diff options
Diffstat (limited to 'vendor/github.com/tangerine-network/mcl/src/asm/x86.bmi2.s')
-rw-r--r-- | vendor/github.com/tangerine-network/mcl/src/asm/x86.bmi2.s | 71547 |
1 files changed, 71547 insertions, 0 deletions
diff --git a/vendor/github.com/tangerine-network/mcl/src/asm/x86.bmi2.s b/vendor/github.com/tangerine-network/mcl/src/asm/x86.bmi2.s new file mode 100644 index 000000000..77729c530 --- /dev/null +++ b/vendor/github.com/tangerine-network/mcl/src/asm/x86.bmi2.s @@ -0,0 +1,71547 @@ + .text + .file "<stdin>" + .globl makeNIST_P192Lbmi2 + .align 16, 0x90 + .type makeNIST_P192Lbmi2,@function +makeNIST_P192Lbmi2: # @makeNIST_P192Lbmi2 +# BB#0: + movl 4(%esp), %eax + movl $-1, 20(%eax) + movl $-1, 16(%eax) + movl $-1, 12(%eax) + movl $-2, 8(%eax) + movl $-1, 4(%eax) + movl $-1, (%eax) + retl $4 +.Lfunc_end0: + .size makeNIST_P192Lbmi2, .Lfunc_end0-makeNIST_P192Lbmi2 + + .globl mcl_fpDbl_mod_NIST_P192Lbmi2 + .align 16, 0x90 + .type mcl_fpDbl_mod_NIST_P192Lbmi2,@function +mcl_fpDbl_mod_NIST_P192Lbmi2: # @mcl_fpDbl_mod_NIST_P192Lbmi2 +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $32, %esp + movl 56(%esp), %eax + movl 32(%eax), %esi + movl %esi, 12(%esp) # 4-byte Spill + movl 24(%eax), %ecx + movl %ecx, 20(%esp) # 4-byte Spill + movl 28(%eax), %edi + movl %edi, 16(%esp) # 4-byte Spill + xorl %edx, %edx + movl (%eax), %ebx + addl %ecx, %ebx + movl %ebx, 24(%esp) # 4-byte Spill + movl 4(%eax), %ecx + adcl %edi, %ecx + movl %ecx, 28(%esp) # 4-byte Spill + movl 8(%eax), %ebp + adcl %esi, %ebp + movl 36(%eax), %ecx + movl %ecx, 8(%esp) # 4-byte Spill + movl 12(%eax), %esi + adcl %ecx, %esi + movl 40(%eax), %ebx + movl %ebx, 4(%esp) # 4-byte Spill + movl 16(%eax), %ecx + adcl %ebx, %ecx + movl 44(%eax), %edi + movl %edi, (%esp) # 4-byte Spill + movl 20(%eax), %eax + adcl %edi, %eax + adcl $0, %edx + sbbl %edi, %edi + andl $1, %edi + addl %ebx, 24(%esp) # 4-byte Folded Spill + movl (%esp), %ebx # 4-byte Reload + adcl %ebx, 28(%esp) # 4-byte Folded Spill + adcl 20(%esp), %ebp # 4-byte Folded Reload + adcl 16(%esp), %esi # 4-byte Folded Reload + adcl 12(%esp), %ecx # 4-byte Folded Reload + adcl 8(%esp), %eax # 4-byte Folded Reload + adcl $0, %edx + adcl $0, %edi + addl 4(%esp), %ebp # 4-byte Folded Reload + adcl %ebx, %esi + adcl $0, %ecx + adcl $0, %eax + adcl $0, %edx + adcl $0, %edi + addl %edx, 24(%esp) # 4-byte Folded Spill + adcl %edi, 28(%esp) # 4-byte Folded Spill + adcl %ebp, %edx + movl %edx, 20(%esp) # 4-byte Spill + adcl %esi, %edi + adcl $0, %ecx + adcl $0, %eax + sbbl %ebx, %ebx + andl $1, %ebx + movl 24(%esp), %esi # 4-byte Reload + addl $1, %esi + movl 28(%esp), %ebp # 4-byte Reload + adcl $0, %ebp + adcl $1, %edx + movl %edx, 8(%esp) # 4-byte Spill + movl %edi, %edx + adcl $0, %edx + movl %edx, 12(%esp) # 4-byte Spill + movl %ecx, %edx + adcl $0, %edx + movl %edx, 16(%esp) # 4-byte Spill + movl %eax, %edx + adcl $0, %edx + adcl $-1, %ebx + andl $1, %ebx + jne .LBB1_2 +# BB#1: + movl %edx, %eax +.LBB1_2: + testb %bl, %bl + movl 24(%esp), %edx # 4-byte Reload + jne .LBB1_4 +# BB#3: + movl %esi, %edx +.LBB1_4: + movl 52(%esp), %esi + movl %edx, (%esi) + movl 20(%esp), %edx # 4-byte Reload + movl 28(%esp), %ebx # 4-byte Reload + jne .LBB1_6 +# BB#5: + movl %ebp, %ebx +.LBB1_6: + movl %ebx, 4(%esi) + jne .LBB1_8 +# BB#7: + movl 8(%esp), %edx # 4-byte Reload +.LBB1_8: + movl %edx, 8(%esi) + jne .LBB1_10 +# BB#9: + movl 12(%esp), %edi # 4-byte Reload +.LBB1_10: + movl %edi, 12(%esi) + jne .LBB1_12 +# BB#11: + movl 16(%esp), %ecx # 4-byte Reload +.LBB1_12: + movl %ecx, 16(%esi) + movl %eax, 20(%esi) + addl $32, %esp + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end1: + .size mcl_fpDbl_mod_NIST_P192Lbmi2, .Lfunc_end1-mcl_fpDbl_mod_NIST_P192Lbmi2 + + .globl mcl_fp_sqr_NIST_P192Lbmi2 + .align 16, 0x90 + .type mcl_fp_sqr_NIST_P192Lbmi2,@function +mcl_fp_sqr_NIST_P192Lbmi2: # @mcl_fp_sqr_NIST_P192Lbmi2 +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $92, %esp + calll .L2$pb +.L2$pb: + popl %ebx +.Ltmp0: + addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp0-.L2$pb), %ebx + movl 116(%esp), %eax + movl %eax, 4(%esp) + leal 44(%esp), %eax + movl %eax, (%esp) + calll mcl_fpDbl_sqrPre6Lbmi2@PLT + xorl %edi, %edi + movl 76(%esp), %ecx + movl %ecx, 24(%esp) # 4-byte Spill + movl 68(%esp), %eax + movl %eax, 32(%esp) # 4-byte Spill + movl 72(%esp), %edx + movl %edx, 28(%esp) # 4-byte Spill + movl 44(%esp), %esi + addl %eax, %esi + movl %esi, 36(%esp) # 4-byte Spill + movl 48(%esp), %eax + adcl %edx, %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 52(%esp), %ebp + adcl %ecx, %ebp + movl 80(%esp), %eax + movl %eax, 20(%esp) # 4-byte Spill + movl 56(%esp), %esi + adcl %eax, %esi + movl 84(%esp), %ebx + movl %ebx, 16(%esp) # 4-byte Spill + movl 60(%esp), %ecx + adcl %ebx, %ecx + movl 88(%esp), %eax + movl %eax, 12(%esp) # 4-byte Spill + movl 64(%esp), %edx + adcl %eax, %edx + adcl $0, %edi + sbbl %eax, %eax + andl $1, %eax + addl %ebx, 36(%esp) # 4-byte Folded Spill + movl 12(%esp), %ebx # 4-byte Reload + adcl %ebx, 40(%esp) # 4-byte Folded Spill + adcl 32(%esp), %ebp # 4-byte Folded Reload + adcl 28(%esp), %esi # 4-byte Folded Reload + adcl 24(%esp), %ecx # 4-byte Folded Reload + adcl 20(%esp), %edx # 4-byte Folded Reload + adcl $0, %edi + adcl $0, %eax + addl 16(%esp), %ebp # 4-byte Folded Reload + adcl %ebx, %esi + adcl $0, %ecx + adcl $0, %edx + adcl $0, %edi + adcl $0, %eax + addl %edi, 36(%esp) # 4-byte Folded Spill + adcl %eax, 40(%esp) # 4-byte Folded Spill + adcl %ebp, %edi + adcl %esi, %eax + adcl $0, %ecx + adcl $0, %edx + sbbl %ebx, %ebx + andl $1, %ebx + movl 36(%esp), %esi # 4-byte Reload + addl $1, %esi + movl 40(%esp), %ebp # 4-byte Reload + adcl $0, %ebp + movl %ebp, 20(%esp) # 4-byte Spill + movl %edi, %ebp + adcl $1, %ebp + movl %ebp, 24(%esp) # 4-byte Spill + movl %eax, %ebp + adcl $0, %ebp + movl %ebp, 28(%esp) # 4-byte Spill + movl %ecx, %ebp + adcl $0, %ebp + movl %ebp, 32(%esp) # 4-byte Spill + movl %edx, %ebp + adcl $0, %ebp + adcl $-1, %ebx + andl $1, %ebx + jne .LBB2_2 +# BB#1: + movl %ebp, %edx +.LBB2_2: + testb %bl, %bl + movl 36(%esp), %ebx # 4-byte Reload + jne .LBB2_4 +# BB#3: + movl %esi, %ebx +.LBB2_4: + movl 112(%esp), %esi + movl %ebx, (%esi) + movl 40(%esp), %ebx # 4-byte Reload + jne .LBB2_6 +# BB#5: + movl 20(%esp), %ebx # 4-byte Reload +.LBB2_6: + movl %ebx, 4(%esi) + jne .LBB2_8 +# BB#7: + movl 24(%esp), %edi # 4-byte Reload +.LBB2_8: + movl %edi, 8(%esi) + jne .LBB2_10 +# BB#9: + movl 28(%esp), %eax # 4-byte Reload +.LBB2_10: + movl %eax, 12(%esi) + jne .LBB2_12 +# BB#11: + movl 32(%esp), %ecx # 4-byte Reload +.LBB2_12: + movl %ecx, 16(%esi) + movl %edx, 20(%esi) + addl $92, %esp + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end2: + .size mcl_fp_sqr_NIST_P192Lbmi2, .Lfunc_end2-mcl_fp_sqr_NIST_P192Lbmi2 + + .globl mcl_fp_mulNIST_P192Lbmi2 + .align 16, 0x90 + .type mcl_fp_mulNIST_P192Lbmi2,@function +mcl_fp_mulNIST_P192Lbmi2: # @mcl_fp_mulNIST_P192Lbmi2 +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $92, %esp + calll .L3$pb +.L3$pb: + popl %ebx +.Ltmp1: + addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp1-.L3$pb), %ebx + movl 120(%esp), %eax + movl %eax, 8(%esp) + movl 116(%esp), %eax + movl %eax, 4(%esp) + leal 44(%esp), %eax + movl %eax, (%esp) + calll mcl_fpDbl_mulPre6Lbmi2@PLT + xorl %edi, %edi + movl 76(%esp), %ecx + movl %ecx, 24(%esp) # 4-byte Spill + movl 68(%esp), %eax + movl %eax, 32(%esp) # 4-byte Spill + movl 72(%esp), %edx + movl %edx, 28(%esp) # 4-byte Spill + movl 44(%esp), %esi + addl %eax, %esi + movl %esi, 36(%esp) # 4-byte Spill + movl 48(%esp), %eax + adcl %edx, %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 52(%esp), %ebp + adcl %ecx, %ebp + movl 80(%esp), %eax + movl %eax, 20(%esp) # 4-byte Spill + movl 56(%esp), %esi + adcl %eax, %esi + movl 84(%esp), %ebx + movl %ebx, 16(%esp) # 4-byte Spill + movl 60(%esp), %ecx + adcl %ebx, %ecx + movl 88(%esp), %eax + movl %eax, 12(%esp) # 4-byte Spill + movl 64(%esp), %edx + adcl %eax, %edx + adcl $0, %edi + sbbl %eax, %eax + andl $1, %eax + addl %ebx, 36(%esp) # 4-byte Folded Spill + movl 12(%esp), %ebx # 4-byte Reload + adcl %ebx, 40(%esp) # 4-byte Folded Spill + adcl 32(%esp), %ebp # 4-byte Folded Reload + adcl 28(%esp), %esi # 4-byte Folded Reload + adcl 24(%esp), %ecx # 4-byte Folded Reload + adcl 20(%esp), %edx # 4-byte Folded Reload + adcl $0, %edi + adcl $0, %eax + addl 16(%esp), %ebp # 4-byte Folded Reload + adcl %ebx, %esi + adcl $0, %ecx + adcl $0, %edx + adcl $0, %edi + adcl $0, %eax + addl %edi, 36(%esp) # 4-byte Folded Spill + adcl %eax, 40(%esp) # 4-byte Folded Spill + adcl %ebp, %edi + adcl %esi, %eax + adcl $0, %ecx + adcl $0, %edx + sbbl %ebx, %ebx + andl $1, %ebx + movl 36(%esp), %esi # 4-byte Reload + addl $1, %esi + movl 40(%esp), %ebp # 4-byte Reload + adcl $0, %ebp + movl %ebp, 20(%esp) # 4-byte Spill + movl %edi, %ebp + adcl $1, %ebp + movl %ebp, 24(%esp) # 4-byte Spill + movl %eax, %ebp + adcl $0, %ebp + movl %ebp, 28(%esp) # 4-byte Spill + movl %ecx, %ebp + adcl $0, %ebp + movl %ebp, 32(%esp) # 4-byte Spill + movl %edx, %ebp + adcl $0, %ebp + adcl $-1, %ebx + andl $1, %ebx + jne .LBB3_2 +# BB#1: + movl %ebp, %edx +.LBB3_2: + testb %bl, %bl + movl 36(%esp), %ebx # 4-byte Reload + jne .LBB3_4 +# BB#3: + movl %esi, %ebx +.LBB3_4: + movl 112(%esp), %esi + movl %ebx, (%esi) + movl 40(%esp), %ebx # 4-byte Reload + jne .LBB3_6 +# BB#5: + movl 20(%esp), %ebx # 4-byte Reload +.LBB3_6: + movl %ebx, 4(%esi) + jne .LBB3_8 +# BB#7: + movl 24(%esp), %edi # 4-byte Reload +.LBB3_8: + movl %edi, 8(%esi) + jne .LBB3_10 +# BB#9: + movl 28(%esp), %eax # 4-byte Reload +.LBB3_10: + movl %eax, 12(%esi) + jne .LBB3_12 +# BB#11: + movl 32(%esp), %ecx # 4-byte Reload +.LBB3_12: + movl %ecx, 16(%esi) + movl %edx, 20(%esi) + addl $92, %esp + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end3: + .size mcl_fp_mulNIST_P192Lbmi2, .Lfunc_end3-mcl_fp_mulNIST_P192Lbmi2 + + .globl mcl_fpDbl_mod_NIST_P521Lbmi2 + .align 16, 0x90 + .type mcl_fpDbl_mod_NIST_P521Lbmi2,@function +mcl_fpDbl_mod_NIST_P521Lbmi2: # @mcl_fpDbl_mod_NIST_P521Lbmi2 +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $60, %esp + movl 84(%esp), %ecx + movl 124(%ecx), %edx + movl 128(%ecx), %esi + movl %esi, %eax + shldl $23, %edx, %eax + movl %eax, 12(%esp) # 4-byte Spill + movl 120(%ecx), %eax + shldl $23, %eax, %edx + movl %edx, 56(%esp) # 4-byte Spill + movl 116(%ecx), %edx + shldl $23, %edx, %eax + movl %eax, 20(%esp) # 4-byte Spill + movl 112(%ecx), %eax + shldl $23, %eax, %edx + movl %edx, 52(%esp) # 4-byte Spill + movl 108(%ecx), %edx + shldl $23, %edx, %eax + movl %eax, 24(%esp) # 4-byte Spill + movl 104(%ecx), %eax + shldl $23, %eax, %edx + movl %edx, 48(%esp) # 4-byte Spill + movl 100(%ecx), %edx + shldl $23, %edx, %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 96(%ecx), %eax + shldl $23, %eax, %edx + movl %edx, 40(%esp) # 4-byte Spill + movl 92(%ecx), %edx + shldl $23, %edx, %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 88(%ecx), %eax + shldl $23, %eax, %edx + movl %edx, 32(%esp) # 4-byte Spill + movl 84(%ecx), %edi + shldl $23, %edi, %eax + movl %eax, 28(%esp) # 4-byte Spill + movl 80(%ecx), %edx + shldl $23, %edx, %edi + movl 76(%ecx), %eax + shldl $23, %eax, %edx + movl 72(%ecx), %ebx + shldl $23, %ebx, %eax + movl 68(%ecx), %ebp + shldl $23, %ebp, %ebx + shrl $9, %esi + movl %esi, 8(%esp) # 4-byte Spill + movl 64(%ecx), %esi + shldl $23, %esi, %ebp + andl $511, %esi # imm = 0x1FF + addl (%ecx), %ebp + adcl 4(%ecx), %ebx + movl %ebx, 16(%esp) # 4-byte Spill + adcl 8(%ecx), %eax + adcl 12(%ecx), %edx + adcl 16(%ecx), %edi + movl 28(%esp), %ebx # 4-byte Reload + adcl 20(%ecx), %ebx + movl %ebx, 28(%esp) # 4-byte Spill + movl 32(%esp), %ebx # 4-byte Reload + adcl 24(%ecx), %ebx + movl %ebx, 32(%esp) # 4-byte Spill + movl 36(%esp), %ebx # 4-byte Reload + adcl 28(%ecx), %ebx + movl %ebx, 36(%esp) # 4-byte Spill + movl 40(%esp), %ebx # 4-byte Reload + adcl 32(%ecx), %ebx + movl %ebx, 40(%esp) # 4-byte Spill + movl 44(%esp), %ebx # 4-byte Reload + adcl 36(%ecx), %ebx + movl %ebx, 44(%esp) # 4-byte Spill + movl 48(%esp), %ebx # 4-byte Reload + adcl 40(%ecx), %ebx + movl %ebx, 48(%esp) # 4-byte Spill + movl 24(%esp), %ebx # 4-byte Reload + adcl 44(%ecx), %ebx + movl %ebx, 24(%esp) # 4-byte Spill + movl 52(%esp), %ebx # 4-byte Reload + adcl 48(%ecx), %ebx + movl %ebx, 52(%esp) # 4-byte Spill + movl 20(%esp), %ebx # 4-byte Reload + adcl 52(%ecx), %ebx + movl %ebx, 20(%esp) # 4-byte Spill + movl 56(%esp), %ebx # 4-byte Reload + adcl 56(%ecx), %ebx + movl %ebx, 56(%esp) # 4-byte Spill + movl 12(%esp), %ebx # 4-byte Reload + adcl 60(%ecx), %ebx + adcl 8(%esp), %esi # 4-byte Folded Reload + movl %esi, 12(%esp) # 4-byte Spill + movl %esi, %ecx + shrl $9, %ecx + andl $1, %ecx + addl %ebp, %ecx + adcl $0, 16(%esp) # 4-byte Folded Spill + adcl $0, %eax + movl %eax, (%esp) # 4-byte Spill + adcl $0, %edx + movl %edx, 4(%esp) # 4-byte Spill + adcl $0, %edi + movl %edi, 8(%esp) # 4-byte Spill + movl %edi, %esi + adcl $0, 28(%esp) # 4-byte Folded Spill + adcl $0, 32(%esp) # 4-byte Folded Spill + adcl $0, 36(%esp) # 4-byte Folded Spill + adcl $0, 40(%esp) # 4-byte Folded Spill + adcl $0, 44(%esp) # 4-byte Folded Spill + adcl $0, 48(%esp) # 4-byte Folded Spill + adcl $0, 24(%esp) # 4-byte Folded Spill + adcl $0, 52(%esp) # 4-byte Folded Spill + adcl $0, 20(%esp) # 4-byte Folded Spill + adcl $0, 56(%esp) # 4-byte Folded Spill + movl %ebx, %ebp + adcl $0, %ebp + movl 12(%esp), %ebx # 4-byte Reload + adcl $0, %ebx + movl %ecx, %edi + andl %eax, %edi + andl %edx, %edi + andl %esi, %edi + andl 28(%esp), %edi # 4-byte Folded Reload + andl 32(%esp), %edi # 4-byte Folded Reload + andl 36(%esp), %edi # 4-byte Folded Reload + andl 40(%esp), %edi # 4-byte Folded Reload + andl 44(%esp), %edi # 4-byte Folded Reload + andl 48(%esp), %edi # 4-byte Folded Reload + andl 24(%esp), %edi # 4-byte Folded Reload + andl 52(%esp), %edi # 4-byte Folded Reload + movl 20(%esp), %esi # 4-byte Reload + andl %esi, %edi + andl 56(%esp), %edi # 4-byte Folded Reload + movl %ebx, %edx + movl 16(%esp), %ebx # 4-byte Reload + andl %ebp, %edi + movl %ebp, %eax + movl %edx, %ebp + orl $-512, %ebp # imm = 0xFFFFFFFFFFFFFE00 + andl %edi, %ebp + andl %ebx, %ebp + cmpl $-1, %ebp + movl 80(%esp), %edi + je .LBB4_1 +# BB#3: # %nonzero + movl %ecx, (%edi) + movl %ebx, 4(%edi) + movl (%esp), %ecx # 4-byte Reload + movl %ecx, 8(%edi) + movl 4(%esp), %ecx # 4-byte Reload + movl %ecx, 12(%edi) + movl 8(%esp), %ecx # 4-byte Reload + movl %ecx, 16(%edi) + movl 28(%esp), %ecx # 4-byte Reload + movl %ecx, 20(%edi) + movl 32(%esp), %ecx # 4-byte Reload + movl %ecx, 24(%edi) + movl 36(%esp), %ecx # 4-byte Reload + movl %ecx, 28(%edi) + movl 40(%esp), %ecx # 4-byte Reload + movl %ecx, 32(%edi) + movl 44(%esp), %ecx # 4-byte Reload + movl %ecx, 36(%edi) + movl 48(%esp), %ecx # 4-byte Reload + movl %ecx, 40(%edi) + movl 24(%esp), %ecx # 4-byte Reload + movl %ecx, 44(%edi) + movl 52(%esp), %ecx # 4-byte Reload + movl %ecx, 48(%edi) + movl %esi, 52(%edi) + movl 56(%esp), %ecx # 4-byte Reload + movl %ecx, 56(%edi) + movl %eax, 60(%edi) + andl $511, %edx # imm = 0x1FF + movl %edx, 64(%edi) + jmp .LBB4_2 +.LBB4_1: # %zero + xorl %eax, %eax + movl $17, %ecx + rep;stosl +.LBB4_2: # %zero + addl $60, %esp + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end4: + .size mcl_fpDbl_mod_NIST_P521Lbmi2, .Lfunc_end4-mcl_fpDbl_mod_NIST_P521Lbmi2 + + .globl mcl_fp_mulUnitPre1Lbmi2 + .align 16, 0x90 + .type mcl_fp_mulUnitPre1Lbmi2,@function +mcl_fp_mulUnitPre1Lbmi2: # @mcl_fp_mulUnitPre1Lbmi2 +# BB#0: + movl 8(%esp), %eax + movl (%eax), %edx + mulxl 12(%esp), %ecx, %eax + movl 4(%esp), %edx + movl %ecx, (%edx) + movl %eax, 4(%edx) + retl +.Lfunc_end5: + .size mcl_fp_mulUnitPre1Lbmi2, .Lfunc_end5-mcl_fp_mulUnitPre1Lbmi2 + + .globl mcl_fpDbl_mulPre1Lbmi2 + .align 16, 0x90 + .type mcl_fpDbl_mulPre1Lbmi2,@function +mcl_fpDbl_mulPre1Lbmi2: # @mcl_fpDbl_mulPre1Lbmi2 +# BB#0: + movl 12(%esp), %eax + movl (%eax), %edx + movl 8(%esp), %eax + mulxl (%eax), %ecx, %eax + movl 4(%esp), %edx + movl %ecx, (%edx) + movl %eax, 4(%edx) + retl +.Lfunc_end6: + .size mcl_fpDbl_mulPre1Lbmi2, .Lfunc_end6-mcl_fpDbl_mulPre1Lbmi2 + + .globl mcl_fpDbl_sqrPre1Lbmi2 + .align 16, 0x90 + .type mcl_fpDbl_sqrPre1Lbmi2,@function +mcl_fpDbl_sqrPre1Lbmi2: # @mcl_fpDbl_sqrPre1Lbmi2 +# BB#0: + movl 8(%esp), %eax + movl (%eax), %edx + mulxl %edx, %ecx, %eax + movl 4(%esp), %edx + movl %ecx, (%edx) + movl %eax, 4(%edx) + retl +.Lfunc_end7: + .size mcl_fpDbl_sqrPre1Lbmi2, .Lfunc_end7-mcl_fpDbl_sqrPre1Lbmi2 + + .globl mcl_fp_mont1Lbmi2 + .align 16, 0x90 + .type mcl_fp_mont1Lbmi2,@function +mcl_fp_mont1Lbmi2: # @mcl_fp_mont1Lbmi2 +# BB#0: + pushl %edi + pushl %esi + movl 16(%esp), %eax + movl (%eax), %edx + movl 20(%esp), %eax + mulxl (%eax), %esi, %ecx + movl 24(%esp), %eax + movl -4(%eax), %edx + imull %esi, %edx + movl (%eax), %edi + mulxl %edi, %edx, %eax + addl %esi, %edx + adcl %ecx, %eax + sbbl %edx, %edx + andl $1, %edx + movl %eax, %ecx + subl %edi, %ecx + sbbl $0, %edx + testb $1, %dl + jne .LBB8_2 +# BB#1: + movl %ecx, %eax +.LBB8_2: + movl 12(%esp), %ecx + movl %eax, (%ecx) + popl %esi + popl %edi + retl +.Lfunc_end8: + .size mcl_fp_mont1Lbmi2, .Lfunc_end8-mcl_fp_mont1Lbmi2 + + .globl mcl_fp_montNF1Lbmi2 + .align 16, 0x90 + .type mcl_fp_montNF1Lbmi2,@function +mcl_fp_montNF1Lbmi2: # @mcl_fp_montNF1Lbmi2 +# BB#0: + pushl %edi + pushl %esi + movl 16(%esp), %eax + movl (%eax), %edx + movl 20(%esp), %eax + mulxl (%eax), %esi, %ecx + movl 24(%esp), %eax + movl -4(%eax), %edx + imull %esi, %edx + movl (%eax), %edi + mulxl %edi, %edx, %eax + addl %esi, %edx + adcl %ecx, %eax + movl %eax, %ecx + subl %edi, %ecx + js .LBB9_2 +# BB#1: + movl %ecx, %eax +.LBB9_2: + movl 12(%esp), %ecx + movl %eax, (%ecx) + popl %esi + popl %edi + retl +.Lfunc_end9: + .size mcl_fp_montNF1Lbmi2, .Lfunc_end9-mcl_fp_montNF1Lbmi2 + + .globl mcl_fp_montRed1Lbmi2 + .align 16, 0x90 + .type mcl_fp_montRed1Lbmi2,@function +mcl_fp_montRed1Lbmi2: # @mcl_fp_montRed1Lbmi2 +# BB#0: + pushl %edi + pushl %esi + movl 16(%esp), %ecx + movl (%ecx), %esi + movl 20(%esp), %eax + movl -4(%eax), %edx + imull %esi, %edx + movl (%eax), %edi + mulxl %edi, %edx, %eax + addl %esi, %edx + adcl 4(%ecx), %eax + sbbl %edx, %edx + andl $1, %edx + movl %eax, %ecx + subl %edi, %ecx + sbbl $0, %edx + testb $1, %dl + jne .LBB10_2 +# BB#1: + movl %ecx, %eax +.LBB10_2: + movl 12(%esp), %ecx + movl %eax, (%ecx) + popl %esi + popl %edi + retl +.Lfunc_end10: + .size mcl_fp_montRed1Lbmi2, .Lfunc_end10-mcl_fp_montRed1Lbmi2 + + .globl mcl_fp_addPre1Lbmi2 + .align 16, 0x90 + .type mcl_fp_addPre1Lbmi2,@function +mcl_fp_addPre1Lbmi2: # @mcl_fp_addPre1Lbmi2 +# BB#0: + movl 12(%esp), %eax + movl (%eax), %eax + movl 4(%esp), %ecx + movl 8(%esp), %edx + addl (%edx), %eax + movl %eax, (%ecx) + sbbl %eax, %eax + andl $1, %eax + retl +.Lfunc_end11: + .size mcl_fp_addPre1Lbmi2, .Lfunc_end11-mcl_fp_addPre1Lbmi2 + + .globl mcl_fp_subPre1Lbmi2 + .align 16, 0x90 + .type mcl_fp_subPre1Lbmi2,@function +mcl_fp_subPre1Lbmi2: # @mcl_fp_subPre1Lbmi2 +# BB#0: + pushl %esi + movl 12(%esp), %eax + movl (%eax), %ecx + xorl %eax, %eax + movl 8(%esp), %edx + movl 16(%esp), %esi + subl (%esi), %ecx + movl %ecx, (%edx) + sbbl $0, %eax + andl $1, %eax + popl %esi + retl +.Lfunc_end12: + .size mcl_fp_subPre1Lbmi2, .Lfunc_end12-mcl_fp_subPre1Lbmi2 + + .globl mcl_fp_shr1_1Lbmi2 + .align 16, 0x90 + .type mcl_fp_shr1_1Lbmi2,@function +mcl_fp_shr1_1Lbmi2: # @mcl_fp_shr1_1Lbmi2 +# BB#0: + movl 8(%esp), %eax + movl (%eax), %eax + shrl %eax + movl 4(%esp), %ecx + movl %eax, (%ecx) + retl +.Lfunc_end13: + .size mcl_fp_shr1_1Lbmi2, .Lfunc_end13-mcl_fp_shr1_1Lbmi2 + + .globl mcl_fp_add1Lbmi2 + .align 16, 0x90 + .type mcl_fp_add1Lbmi2,@function +mcl_fp_add1Lbmi2: # @mcl_fp_add1Lbmi2 +# BB#0: + pushl %esi + movl 16(%esp), %eax + movl (%eax), %eax + movl 8(%esp), %ecx + movl 12(%esp), %edx + addl (%edx), %eax + movl %eax, (%ecx) + sbbl %edx, %edx + andl $1, %edx + movl 20(%esp), %esi + subl (%esi), %eax + sbbl $0, %edx + testb $1, %dl + jne .LBB14_2 +# BB#1: # %nocarry + movl %eax, (%ecx) +.LBB14_2: # %carry + popl %esi + retl +.Lfunc_end14: + .size mcl_fp_add1Lbmi2, .Lfunc_end14-mcl_fp_add1Lbmi2 + + .globl mcl_fp_addNF1Lbmi2 + .align 16, 0x90 + .type mcl_fp_addNF1Lbmi2,@function +mcl_fp_addNF1Lbmi2: # @mcl_fp_addNF1Lbmi2 +# BB#0: + movl 12(%esp), %eax + movl (%eax), %eax + movl 8(%esp), %ecx + addl (%ecx), %eax + movl 16(%esp), %edx + movl %eax, %ecx + subl (%edx), %ecx + js .LBB15_2 +# BB#1: + movl %ecx, %eax +.LBB15_2: + movl 4(%esp), %ecx + movl %eax, (%ecx) + retl +.Lfunc_end15: + .size mcl_fp_addNF1Lbmi2, .Lfunc_end15-mcl_fp_addNF1Lbmi2 + + .globl mcl_fp_sub1Lbmi2 + .align 16, 0x90 + .type mcl_fp_sub1Lbmi2,@function +mcl_fp_sub1Lbmi2: # @mcl_fp_sub1Lbmi2 +# BB#0: + pushl %esi + movl 12(%esp), %eax + movl (%eax), %eax + xorl %edx, %edx + movl 8(%esp), %ecx + movl 16(%esp), %esi + subl (%esi), %eax + movl %eax, (%ecx) + sbbl $0, %edx + testb $1, %dl + jne .LBB16_2 +# BB#1: # %nocarry + popl %esi + retl +.LBB16_2: # %carry + movl 20(%esp), %edx + addl (%edx), %eax + movl %eax, (%ecx) + popl %esi + retl +.Lfunc_end16: + .size mcl_fp_sub1Lbmi2, .Lfunc_end16-mcl_fp_sub1Lbmi2 + + .globl mcl_fp_subNF1Lbmi2 + .align 16, 0x90 + .type mcl_fp_subNF1Lbmi2,@function +mcl_fp_subNF1Lbmi2: # @mcl_fp_subNF1Lbmi2 +# BB#0: + movl 8(%esp), %eax + movl (%eax), %eax + movl 12(%esp), %ecx + subl (%ecx), %eax + movl %eax, %ecx + sarl $31, %ecx + movl 16(%esp), %edx + andl (%edx), %ecx + addl %eax, %ecx + movl 4(%esp), %eax + movl %ecx, (%eax) + retl +.Lfunc_end17: + .size mcl_fp_subNF1Lbmi2, .Lfunc_end17-mcl_fp_subNF1Lbmi2 + + .globl mcl_fpDbl_add1Lbmi2 + .align 16, 0x90 + .type mcl_fpDbl_add1Lbmi2,@function +mcl_fpDbl_add1Lbmi2: # @mcl_fpDbl_add1Lbmi2 +# BB#0: + pushl %ebx + pushl %esi + movl 20(%esp), %eax + movl (%eax), %edx + movl 4(%eax), %eax + movl 16(%esp), %esi + addl (%esi), %edx + movl 12(%esp), %ecx + adcl 4(%esi), %eax + movl %edx, (%ecx) + sbbl %ebx, %ebx + andl $1, %ebx + movl 24(%esp), %esi + movl %eax, %edx + subl (%esi), %edx + sbbl $0, %ebx + testb $1, %bl + jne .LBB18_2 +# BB#1: + movl %edx, %eax +.LBB18_2: + movl %eax, 4(%ecx) + popl %esi + popl %ebx + retl +.Lfunc_end18: + .size mcl_fpDbl_add1Lbmi2, .Lfunc_end18-mcl_fpDbl_add1Lbmi2 + + .globl mcl_fpDbl_sub1Lbmi2 + .align 16, 0x90 + .type mcl_fpDbl_sub1Lbmi2,@function +mcl_fpDbl_sub1Lbmi2: # @mcl_fpDbl_sub1Lbmi2 +# BB#0: + pushl %esi + movl 12(%esp), %eax + movl (%eax), %esi + movl 4(%eax), %eax + xorl %ecx, %ecx + movl 16(%esp), %edx + subl (%edx), %esi + sbbl 4(%edx), %eax + movl 8(%esp), %edx + movl %esi, (%edx) + sbbl $0, %ecx + andl $1, %ecx + je .LBB19_2 +# BB#1: + movl 20(%esp), %ecx + movl (%ecx), %ecx +.LBB19_2: + addl %eax, %ecx + movl %ecx, 4(%edx) + popl %esi + retl +.Lfunc_end19: + .size mcl_fpDbl_sub1Lbmi2, .Lfunc_end19-mcl_fpDbl_sub1Lbmi2 + + .globl mcl_fp_mulUnitPre2Lbmi2 + .align 16, 0x90 + .type mcl_fp_mulUnitPre2Lbmi2,@function +mcl_fp_mulUnitPre2Lbmi2: # @mcl_fp_mulUnitPre2Lbmi2 +# BB#0: + pushl %edi + pushl %esi + movl 20(%esp), %edx + movl 16(%esp), %eax + mulxl 4(%eax), %ecx, %esi + mulxl (%eax), %eax, %edx + movl 12(%esp), %edi + movl %eax, (%edi) + addl %ecx, %edx + movl %edx, 4(%edi) + adcl $0, %esi + movl %esi, 8(%edi) + popl %esi + popl %edi + retl +.Lfunc_end20: + .size mcl_fp_mulUnitPre2Lbmi2, .Lfunc_end20-mcl_fp_mulUnitPre2Lbmi2 + + .globl mcl_fpDbl_mulPre2Lbmi2 + .align 16, 0x90 + .type mcl_fpDbl_mulPre2Lbmi2,@function +mcl_fpDbl_mulPre2Lbmi2: # @mcl_fpDbl_mulPre2Lbmi2 +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + movl 24(%esp), %ecx + movl (%ecx), %eax + movl 4(%ecx), %ecx + movl 28(%esp), %esi + movl (%esi), %edi + movl %ecx, %edx + mulxl %edi, %ebx, %ebp + movl %eax, %edx + mulxl %edi, %edx, %edi + addl %ebx, %edi + movl 20(%esp), %ebx + movl %edx, (%ebx) + adcl $0, %ebp + movl 4(%esi), %esi + movl %eax, %edx + mulxl %esi, %eax, %ebx + addl %edi, %eax + movl %ecx, %edx + mulxl %esi, %edx, %ecx + adcl %ebp, %edx + sbbl %esi, %esi + andl $1, %esi + addl %ebx, %edx + movl 20(%esp), %edi + movl %eax, 4(%edi) + movl %edx, 8(%edi) + adcl %ecx, %esi + movl %esi, 12(%edi) + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end21: + .size mcl_fpDbl_mulPre2Lbmi2, .Lfunc_end21-mcl_fpDbl_mulPre2Lbmi2 + + .globl mcl_fpDbl_sqrPre2Lbmi2 + .align 16, 0x90 + .type mcl_fpDbl_sqrPre2Lbmi2,@function +mcl_fpDbl_sqrPre2Lbmi2: # @mcl_fpDbl_sqrPre2Lbmi2 +# BB#0: + pushl %ebx + pushl %edi + pushl %esi + movl 20(%esp), %ecx + movl (%ecx), %eax + movl 4(%ecx), %ecx + movl 16(%esp), %esi + movl %eax, %edx + mulxl %eax, %edx, %edi + movl %edx, (%esi) + movl %ecx, %edx + mulxl %eax, %edx, %eax + addl %edx, %edi + movl %eax, %ebx + adcl $0, %ebx + addl %edx, %edi + movl %ecx, %edx + mulxl %ecx, %edx, %ecx + adcl %ebx, %edx + sbbl %ebx, %ebx + andl $1, %ebx + addl %eax, %edx + movl %edi, 4(%esi) + movl %edx, 8(%esi) + adcl %ecx, %ebx + movl %ebx, 12(%esi) + popl %esi + popl %edi + popl %ebx + retl +.Lfunc_end22: + .size mcl_fpDbl_sqrPre2Lbmi2, .Lfunc_end22-mcl_fpDbl_sqrPre2Lbmi2 + + .globl mcl_fp_mont2Lbmi2 + .align 16, 0x90 + .type mcl_fp_mont2Lbmi2,@function +mcl_fp_mont2Lbmi2: # @mcl_fp_mont2Lbmi2 +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $24, %esp + movl 48(%esp), %eax + movl (%eax), %edi + movl %edi, 8(%esp) # 4-byte Spill + movl 4(%eax), %edx + movl %edx, 4(%esp) # 4-byte Spill + movl 52(%esp), %eax + movl (%eax), %eax + mulxl %eax, %ecx, %esi + movl %edi, %edx + mulxl %eax, %edx, %edi + movl %edx, (%esp) # 4-byte Spill + addl %ecx, %edi + adcl $0, %esi + movl 56(%esp), %eax + movl -4(%eax), %ecx + movl %ecx, 12(%esp) # 4-byte Spill + imull %ecx, %edx + movl (%eax), %ebx + movl %ebx, 16(%esp) # 4-byte Spill + movl 4(%eax), %eax + movl %eax, 20(%esp) # 4-byte Spill + mulxl %eax, %ebp, %ecx + mulxl %ebx, %edx, %eax + addl %ebp, %eax + adcl $0, %ecx + addl (%esp), %edx # 4-byte Folded Reload + adcl %edi, %eax + adcl %esi, %ecx + movl 52(%esp), %edx + movl 4(%edx), %edx + sbbl %ebx, %ebx + andl $1, %ebx + mulxl 4(%esp), %esi, %ebp # 4-byte Folded Reload + movl %esi, 4(%esp) # 4-byte Spill + mulxl 8(%esp), %edi, %esi # 4-byte Folded Reload + addl 4(%esp), %esi # 4-byte Folded Reload + adcl $0, %ebp + addl %eax, %edi + adcl %ecx, %esi + adcl %ebx, %ebp + sbbl %ecx, %ecx + movl 12(%esp), %edx # 4-byte Reload + imull %edi, %edx + movl %edx, %eax + mulxl 16(%esp), %ebx, %edx # 4-byte Folded Reload + movl %edx, 12(%esp) # 4-byte Spill + movl %eax, %edx + mulxl 20(%esp), %edx, %eax # 4-byte Folded Reload + addl 12(%esp), %edx # 4-byte Folded Reload + adcl $0, %eax + andl $1, %ecx + addl %edi, %ebx + adcl %esi, %edx + adcl %ebp, %eax + adcl $0, %ecx + movl %edx, %ebp + subl 16(%esp), %ebp # 4-byte Folded Reload + movl %eax, %esi + sbbl 20(%esp), %esi # 4-byte Folded Reload + sbbl $0, %ecx + andl $1, %ecx + jne .LBB23_2 +# BB#1: + movl %ebp, %edx +.LBB23_2: + movl 44(%esp), %edi + movl %edx, (%edi) + testb %cl, %cl + jne .LBB23_4 +# BB#3: + movl %esi, %eax +.LBB23_4: + movl %eax, 4(%edi) + addl $24, %esp + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end23: + .size mcl_fp_mont2Lbmi2, .Lfunc_end23-mcl_fp_mont2Lbmi2 + + .globl mcl_fp_montNF2Lbmi2 + .align 16, 0x90 + .type mcl_fp_montNF2Lbmi2,@function +mcl_fp_montNF2Lbmi2: # @mcl_fp_montNF2Lbmi2 +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $20, %esp + movl 44(%esp), %eax + movl (%eax), %ecx + movl %ecx, 8(%esp) # 4-byte Spill + movl 4(%eax), %edx + movl %edx, 4(%esp) # 4-byte Spill + movl 48(%esp), %eax + movl (%eax), %eax + mulxl %eax, %edi, %ebp + movl %ecx, %edx + mulxl %eax, %ecx, %esi + addl %edi, %esi + adcl $0, %ebp + movl 52(%esp), %eax + movl -4(%eax), %ebx + movl %ecx, %edx + imull %ebx, %edx + movl (%eax), %eax + movl %eax, 16(%esp) # 4-byte Spill + mulxl %eax, %edi, %eax + movl %eax, (%esp) # 4-byte Spill + addl %ecx, %edi + movl 52(%esp), %eax + movl 4(%eax), %eax + movl %eax, 12(%esp) # 4-byte Spill + mulxl %eax, %edi, %edx + adcl %esi, %edi + adcl $0, %ebp + addl (%esp), %edi # 4-byte Folded Reload + adcl %edx, %ebp + movl 48(%esp), %eax + movl 4(%eax), %edx + mulxl 4(%esp), %eax, %esi # 4-byte Folded Reload + movl %eax, 4(%esp) # 4-byte Spill + mulxl 8(%esp), %eax, %ecx # 4-byte Folded Reload + addl 4(%esp), %ecx # 4-byte Folded Reload + adcl $0, %esi + addl %edi, %eax + adcl %ebp, %ecx + adcl $0, %esi + imull %eax, %ebx + movl %ebx, %edx + movl 16(%esp), %ebp # 4-byte Reload + mulxl %ebp, %edx, %edi + addl %eax, %edx + movl %ebx, %edx + movl 12(%esp), %ebx # 4-byte Reload + mulxl %ebx, %eax, %edx + adcl %ecx, %eax + adcl $0, %esi + addl %edi, %eax + adcl %edx, %esi + movl %eax, %edx + subl %ebp, %edx + movl %esi, %ecx + sbbl %ebx, %ecx + testl %ecx, %ecx + js .LBB24_2 +# BB#1: + movl %edx, %eax +.LBB24_2: + movl 40(%esp), %edx + movl %eax, (%edx) + js .LBB24_4 +# BB#3: + movl %ecx, %esi +.LBB24_4: + movl %esi, 4(%edx) + addl $20, %esp + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end24: + .size mcl_fp_montNF2Lbmi2, .Lfunc_end24-mcl_fp_montNF2Lbmi2 + + .globl mcl_fp_montRed2Lbmi2 + .align 16, 0x90 + .type mcl_fp_montRed2Lbmi2,@function +mcl_fp_montRed2Lbmi2: # @mcl_fp_montRed2Lbmi2 +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $16, %esp + movl 44(%esp), %esi + movl -4(%esi), %ecx + movl (%esi), %edi + movl %edi, 8(%esp) # 4-byte Spill + movl 40(%esp), %eax + movl (%eax), %ebx + movl %ebx, %edx + imull %ecx, %edx + movl 4(%esi), %eax + movl %eax, 12(%esp) # 4-byte Spill + mulxl %eax, %ebp, %esi + mulxl %edi, %edx, %eax + addl %ebp, %eax + adcl $0, %esi + addl %ebx, %edx + movl 40(%esp), %edi + movl 12(%edi), %edx + adcl 4(%edi), %eax + adcl 8(%edi), %esi + adcl $0, %edx + movl %edx, 4(%esp) # 4-byte Spill + sbbl %ebx, %ebx + imull %eax, %ecx + movl %ecx, %edx + mulxl 8(%esp), %edi, %edx # 4-byte Folded Reload + movl %edx, (%esp) # 4-byte Spill + movl %ecx, %edx + mulxl 12(%esp), %edx, %ebp # 4-byte Folded Reload + addl (%esp), %edx # 4-byte Folded Reload + adcl $0, %ebp + andl $1, %ebx + addl %eax, %edi + adcl %esi, %edx + adcl 4(%esp), %ebp # 4-byte Folded Reload + adcl $0, %ebx + movl %edx, %edi + subl 8(%esp), %edi # 4-byte Folded Reload + movl %ebp, %ecx + sbbl 12(%esp), %ecx # 4-byte Folded Reload + sbbl $0, %ebx + andl $1, %ebx + jne .LBB25_2 +# BB#1: + movl %edi, %edx +.LBB25_2: + movl 36(%esp), %esi + movl %edx, (%esi) + testb %bl, %bl + jne .LBB25_4 +# BB#3: + movl %ecx, %ebp +.LBB25_4: + movl %ebp, 4(%esi) + addl $16, %esp + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end25: + .size mcl_fp_montRed2Lbmi2, .Lfunc_end25-mcl_fp_montRed2Lbmi2 + + .globl mcl_fp_addPre2Lbmi2 + .align 16, 0x90 + .type mcl_fp_addPre2Lbmi2,@function +mcl_fp_addPre2Lbmi2: # @mcl_fp_addPre2Lbmi2 +# BB#0: + pushl %esi + movl 16(%esp), %eax + movl (%eax), %ecx + movl 4(%eax), %eax + movl 12(%esp), %edx + addl (%edx), %ecx + movl 8(%esp), %esi + adcl 4(%edx), %eax + movl %ecx, (%esi) + movl %eax, 4(%esi) + sbbl %eax, %eax + andl $1, %eax + popl %esi + retl +.Lfunc_end26: + .size mcl_fp_addPre2Lbmi2, .Lfunc_end26-mcl_fp_addPre2Lbmi2 + + .globl mcl_fp_subPre2Lbmi2 + .align 16, 0x90 + .type mcl_fp_subPre2Lbmi2,@function +mcl_fp_subPre2Lbmi2: # @mcl_fp_subPre2Lbmi2 +# BB#0: + pushl %esi + movl 12(%esp), %eax + movl (%eax), %ecx + movl 4(%eax), %edx + xorl %eax, %eax + movl 16(%esp), %esi + subl (%esi), %ecx + sbbl 4(%esi), %edx + movl 8(%esp), %esi + movl %ecx, (%esi) + movl %edx, 4(%esi) + sbbl $0, %eax + andl $1, %eax + popl %esi + retl +.Lfunc_end27: + .size mcl_fp_subPre2Lbmi2, .Lfunc_end27-mcl_fp_subPre2Lbmi2 + + .globl mcl_fp_shr1_2Lbmi2 + .align 16, 0x90 + .type mcl_fp_shr1_2Lbmi2,@function +mcl_fp_shr1_2Lbmi2: # @mcl_fp_shr1_2Lbmi2 +# BB#0: + movl 8(%esp), %eax + movl (%eax), %ecx + movl 4(%eax), %eax + shrdl $1, %eax, %ecx + movl 4(%esp), %edx + movl %ecx, (%edx) + shrl %eax + movl %eax, 4(%edx) + retl +.Lfunc_end28: + .size mcl_fp_shr1_2Lbmi2, .Lfunc_end28-mcl_fp_shr1_2Lbmi2 + + .globl mcl_fp_add2Lbmi2 + .align 16, 0x90 + .type mcl_fp_add2Lbmi2,@function +mcl_fp_add2Lbmi2: # @mcl_fp_add2Lbmi2 +# BB#0: + pushl %ebx + pushl %esi + movl 20(%esp), %ecx + movl (%ecx), %eax + movl 4(%ecx), %ecx + movl 16(%esp), %esi + addl (%esi), %eax + movl 12(%esp), %edx + adcl 4(%esi), %ecx + movl %eax, (%edx) + movl %ecx, 4(%edx) + sbbl %ebx, %ebx + andl $1, %ebx + movl 24(%esp), %esi + subl (%esi), %eax + sbbl 4(%esi), %ecx + sbbl $0, %ebx + testb $1, %bl + jne .LBB29_2 +# BB#1: # %nocarry + movl %eax, (%edx) + movl %ecx, 4(%edx) +.LBB29_2: # %carry + popl %esi + popl %ebx + retl +.Lfunc_end29: + .size mcl_fp_add2Lbmi2, .Lfunc_end29-mcl_fp_add2Lbmi2 + + .globl mcl_fp_addNF2Lbmi2 + .align 16, 0x90 + .type mcl_fp_addNF2Lbmi2,@function +mcl_fp_addNF2Lbmi2: # @mcl_fp_addNF2Lbmi2 +# BB#0: + pushl %edi + pushl %esi + movl 20(%esp), %eax + movl (%eax), %ecx + movl 4(%eax), %eax + movl 16(%esp), %edx + addl (%edx), %ecx + adcl 4(%edx), %eax + movl 24(%esp), %edi + movl %ecx, %esi + subl (%edi), %esi + movl %eax, %edx + sbbl 4(%edi), %edx + testl %edx, %edx + js .LBB30_2 +# BB#1: + movl %esi, %ecx +.LBB30_2: + movl 12(%esp), %esi + movl %ecx, (%esi) + js .LBB30_4 +# BB#3: + movl %edx, %eax +.LBB30_4: + movl %eax, 4(%esi) + popl %esi + popl %edi + retl +.Lfunc_end30: + .size mcl_fp_addNF2Lbmi2, .Lfunc_end30-mcl_fp_addNF2Lbmi2 + + .globl mcl_fp_sub2Lbmi2 + .align 16, 0x90 + .type mcl_fp_sub2Lbmi2,@function +mcl_fp_sub2Lbmi2: # @mcl_fp_sub2Lbmi2 +# BB#0: + pushl %ebx + pushl %edi + pushl %esi + movl 20(%esp), %eax + movl (%eax), %ecx + movl 4(%eax), %eax + xorl %ebx, %ebx + movl 24(%esp), %edx + subl (%edx), %ecx + sbbl 4(%edx), %eax + movl 16(%esp), %edx + movl %ecx, (%edx) + movl %eax, 4(%edx) + sbbl $0, %ebx + testb $1, %bl + je .LBB31_2 +# BB#1: # %carry + movl 28(%esp), %esi + movl 4(%esi), %edi + addl (%esi), %ecx + movl %ecx, (%edx) + adcl %eax, %edi + movl %edi, 4(%edx) +.LBB31_2: # %nocarry + popl %esi + popl %edi + popl %ebx + retl +.Lfunc_end31: + .size mcl_fp_sub2Lbmi2, .Lfunc_end31-mcl_fp_sub2Lbmi2 + + .globl mcl_fp_subNF2Lbmi2 + .align 16, 0x90 + .type mcl_fp_subNF2Lbmi2,@function +mcl_fp_subNF2Lbmi2: # @mcl_fp_subNF2Lbmi2 +# BB#0: + pushl %edi + pushl %esi + movl 16(%esp), %eax + movl (%eax), %ecx + movl 4(%eax), %eax + movl 20(%esp), %edx + subl (%edx), %ecx + sbbl 4(%edx), %eax + movl %eax, %edx + sarl $31, %edx + movl 24(%esp), %esi + movl 4(%esi), %edi + andl %edx, %edi + andl (%esi), %edx + addl %ecx, %edx + movl 12(%esp), %ecx + movl %edx, (%ecx) + adcl %eax, %edi + movl %edi, 4(%ecx) + popl %esi + popl %edi + retl +.Lfunc_end32: + .size mcl_fp_subNF2Lbmi2, .Lfunc_end32-mcl_fp_subNF2Lbmi2 + + .globl mcl_fpDbl_add2Lbmi2 + .align 16, 0x90 + .type mcl_fpDbl_add2Lbmi2,@function +mcl_fpDbl_add2Lbmi2: # @mcl_fpDbl_add2Lbmi2 +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + movl 28(%esp), %edx + movl 12(%edx), %esi + movl 24(%esp), %edi + movl 12(%edi), %eax + movl 8(%edx), %ecx + movl (%edx), %ebx + movl 4(%edx), %ebp + addl (%edi), %ebx + adcl 4(%edi), %ebp + movl 20(%esp), %edx + adcl 8(%edi), %ecx + movl %ebx, (%edx) + movl %ebp, 4(%edx) + adcl %esi, %eax + sbbl %ebx, %ebx + andl $1, %ebx + movl 32(%esp), %ebp + movl %ecx, %esi + subl (%ebp), %esi + movl %eax, %edi + sbbl 4(%ebp), %edi + sbbl $0, %ebx + andl $1, %ebx + jne .LBB33_2 +# BB#1: + movl %edi, %eax +.LBB33_2: + testb %bl, %bl + jne .LBB33_4 +# BB#3: + movl %esi, %ecx +.LBB33_4: + movl %ecx, 8(%edx) + movl %eax, 12(%edx) + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end33: + .size mcl_fpDbl_add2Lbmi2, .Lfunc_end33-mcl_fpDbl_add2Lbmi2 + + .globl mcl_fpDbl_sub2Lbmi2 + .align 16, 0x90 + .type mcl_fpDbl_sub2Lbmi2,@function +mcl_fpDbl_sub2Lbmi2: # @mcl_fpDbl_sub2Lbmi2 +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + movl 24(%esp), %ecx + movl (%ecx), %esi + movl 4(%ecx), %edi + xorl %ebx, %ebx + movl 28(%esp), %edx + subl (%edx), %esi + sbbl 4(%edx), %edi + movl 8(%ecx), %eax + sbbl 8(%edx), %eax + movl 12(%edx), %ebp + movl 12(%ecx), %edx + movl 20(%esp), %ecx + movl %esi, (%ecx) + movl %edi, 4(%ecx) + sbbl %ebp, %edx + movl 32(%esp), %edi + movl (%edi), %esi + sbbl $0, %ebx + andl $1, %ebx + jne .LBB34_1 +# BB#2: + xorl %edi, %edi + jmp .LBB34_3 +.LBB34_1: + movl 4(%edi), %edi +.LBB34_3: + testb %bl, %bl + jne .LBB34_5 +# BB#4: + xorl %esi, %esi +.LBB34_5: + addl %eax, %esi + movl %esi, 8(%ecx) + adcl %edx, %edi + movl %edi, 12(%ecx) + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end34: + .size mcl_fpDbl_sub2Lbmi2, .Lfunc_end34-mcl_fpDbl_sub2Lbmi2 + + .globl mcl_fp_mulUnitPre3Lbmi2 + .align 16, 0x90 + .type mcl_fp_mulUnitPre3Lbmi2,@function +mcl_fp_mulUnitPre3Lbmi2: # @mcl_fp_mulUnitPre3Lbmi2 +# BB#0: + pushl %ebx + pushl %edi + pushl %esi + movl 24(%esp), %edx + movl 20(%esp), %eax + mulxl 4(%eax), %esi, %ecx + mulxl (%eax), %edi, %ebx + addl %esi, %ebx + mulxl 8(%eax), %eax, %edx + movl 16(%esp), %esi + movl %edi, (%esi) + movl %ebx, 4(%esi) + adcl %ecx, %eax + movl %eax, 8(%esi) + adcl $0, %edx + movl %edx, 12(%esi) + popl %esi + popl %edi + popl %ebx + retl +.Lfunc_end35: + .size mcl_fp_mulUnitPre3Lbmi2, .Lfunc_end35-mcl_fp_mulUnitPre3Lbmi2 + + .globl mcl_fpDbl_mulPre3Lbmi2 + .align 16, 0x90 + .type mcl_fpDbl_mulPre3Lbmi2,@function +mcl_fpDbl_mulPre3Lbmi2: # @mcl_fpDbl_mulPre3Lbmi2 +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $16, %esp + movl 40(%esp), %ecx + movl (%ecx), %eax + movl 4(%ecx), %edx + movl %edx, 12(%esp) # 4-byte Spill + movl 44(%esp), %esi + movl (%esi), %edi + mulxl %edi, %ebx, %ebp + movl %eax, %edx + movl %eax, %esi + mulxl %edi, %edx, %eax + movl %edx, 4(%esp) # 4-byte Spill + addl %ebx, %eax + movl 8(%ecx), %edx + movl %edx, 8(%esp) # 4-byte Spill + mulxl %edi, %ebx, %edi + adcl %ebp, %ebx + movl 36(%esp), %ecx + movl 4(%esp), %edx # 4-byte Reload + movl %edx, (%ecx) + adcl $0, %edi + movl 44(%esp), %ecx + movl 4(%ecx), %ebp + movl %esi, %edx + mulxl %ebp, %ecx, %edx + movl %edx, 4(%esp) # 4-byte Spill + addl %eax, %ecx + movl 12(%esp), %edx # 4-byte Reload + mulxl %ebp, %eax, %edx + movl %edx, (%esp) # 4-byte Spill + adcl %ebx, %eax + movl 8(%esp), %edx # 4-byte Reload + mulxl %ebp, %ebx, %edx + adcl %edi, %ebx + sbbl %edi, %edi + andl $1, %edi + addl 4(%esp), %eax # 4-byte Folded Reload + adcl (%esp), %ebx # 4-byte Folded Reload + adcl %edx, %edi + movl 36(%esp), %edx + movl %ecx, 4(%edx) + movl 44(%esp), %ecx + movl 8(%ecx), %ecx + movl %esi, %edx + mulxl %ecx, %ebp, %edx + movl %edx, 4(%esp) # 4-byte Spill + addl %eax, %ebp + movl 12(%esp), %edx # 4-byte Reload + mulxl %ecx, %eax, %edx + movl %edx, 12(%esp) # 4-byte Spill + adcl %ebx, %eax + movl 8(%esp), %edx # 4-byte Reload + mulxl %ecx, %edx, %ecx + adcl %edi, %edx + sbbl %esi, %esi + andl $1, %esi + addl 4(%esp), %eax # 4-byte Folded Reload + adcl 12(%esp), %edx # 4-byte Folded Reload + movl 36(%esp), %edi + movl %ebp, 8(%edi) + movl %eax, 12(%edi) + movl %edx, 16(%edi) + adcl %ecx, %esi + movl %esi, 20(%edi) + addl $16, %esp + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end36: + .size mcl_fpDbl_mulPre3Lbmi2, .Lfunc_end36-mcl_fpDbl_mulPre3Lbmi2 + + .globl mcl_fpDbl_sqrPre3Lbmi2 + .align 16, 0x90 + .type mcl_fpDbl_sqrPre3Lbmi2,@function +mcl_fpDbl_sqrPre3Lbmi2: # @mcl_fpDbl_sqrPre3Lbmi2 +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $20, %esp + movl 44(%esp), %edx + movl 8(%edx), %edi + movl %edi, (%esp) # 4-byte Spill + movl (%edx), %ecx + movl 4(%edx), %esi + movl 40(%esp), %eax + movl %ecx, %edx + mulxl %ecx, %edx, %ebx + movl %edx, (%eax) + movl %esi, %edx + mulxl %ecx, %ebp, %eax + movl %eax, 8(%esp) # 4-byte Spill + addl %ebp, %ebx + movl %edi, %edx + mulxl %ecx, %edx, %ecx + movl %edx, 12(%esp) # 4-byte Spill + movl %ecx, 16(%esp) # 4-byte Spill + movl %eax, %edi + adcl %edx, %edi + adcl $0, %ecx + addl %ebp, %ebx + movl %esi, %edx + mulxl %esi, %ebp, %eax + movl %eax, 4(%esp) # 4-byte Spill + adcl %edi, %ebp + movl (%esp), %eax # 4-byte Reload + movl %eax, %edx + mulxl %esi, %edx, %esi + adcl %edx, %ecx + sbbl %edi, %edi + andl $1, %edi + addl 8(%esp), %ebp # 4-byte Folded Reload + adcl 4(%esp), %ecx # 4-byte Folded Reload + adcl %esi, %edi + addl 12(%esp), %ebp # 4-byte Folded Reload + adcl %edx, %ecx + movl %eax, %edx + mulxl %eax, %edx, %eax + adcl %edi, %edx + sbbl %edi, %edi + andl $1, %edi + addl 16(%esp), %ecx # 4-byte Folded Reload + adcl %esi, %edx + movl 40(%esp), %esi + movl %ebx, 4(%esi) + movl %ebp, 8(%esi) + movl %ecx, 12(%esi) + movl %edx, 16(%esi) + adcl %eax, %edi + movl %edi, 20(%esi) + addl $20, %esp + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end37: + .size mcl_fpDbl_sqrPre3Lbmi2, .Lfunc_end37-mcl_fpDbl_sqrPre3Lbmi2 + + .globl mcl_fp_mont3Lbmi2 + .align 16, 0x90 + .type mcl_fp_mont3Lbmi2,@function +mcl_fp_mont3Lbmi2: # @mcl_fp_mont3Lbmi2 +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $44, %esp + movl 68(%esp), %eax + movl 8(%eax), %edx + movl %edx, 16(%esp) # 4-byte Spill + movl 72(%esp), %ecx + movl (%ecx), %ecx + mulxl %ecx, %edx, %edi + movl %edx, 40(%esp) # 4-byte Spill + movl (%eax), %esi + movl %esi, 12(%esp) # 4-byte Spill + movl 4(%eax), %edx + movl %edx, 8(%esp) # 4-byte Spill + mulxl %ecx, %eax, %ebp + movl %esi, %edx + mulxl %ecx, %edx, %ebx + movl %edx, 4(%esp) # 4-byte Spill + addl %eax, %ebx + adcl 40(%esp), %ebp # 4-byte Folded Reload + movl %ebp, 24(%esp) # 4-byte Spill + adcl $0, %edi + movl %edi, 28(%esp) # 4-byte Spill + movl 76(%esp), %esi + movl -4(%esi), %eax + movl %eax, 20(%esp) # 4-byte Spill + imull %eax, %edx + movl (%esi), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 4(%esi), %ecx + movl %ecx, 40(%esp) # 4-byte Spill + mulxl %ecx, %ecx, %edi + movl %edi, (%esp) # 4-byte Spill + mulxl %eax, %ebp, %edi + addl %ecx, %edi + movl 8(%esi), %eax + movl %eax, 32(%esp) # 4-byte Spill + mulxl %eax, %ecx, %esi + adcl (%esp), %ecx # 4-byte Folded Reload + adcl $0, %esi + addl 4(%esp), %ebp # 4-byte Folded Reload + adcl %ebx, %edi + adcl 24(%esp), %ecx # 4-byte Folded Reload + adcl 28(%esp), %esi # 4-byte Folded Reload + sbbl %eax, %eax + andl $1, %eax + movl %eax, 24(%esp) # 4-byte Spill + movl 72(%esp), %eax + movl 4(%eax), %edx + mulxl 16(%esp), %ebx, %eax # 4-byte Folded Reload + movl %ebx, 4(%esp) # 4-byte Spill + movl %eax, 28(%esp) # 4-byte Spill + mulxl 8(%esp), %ebx, %eax # 4-byte Folded Reload + movl %ebx, (%esp) # 4-byte Spill + mulxl 12(%esp), %ebx, %ebp # 4-byte Folded Reload + addl (%esp), %ebp # 4-byte Folded Reload + movl %eax, %edx + adcl 4(%esp), %edx # 4-byte Folded Reload + movl 28(%esp), %eax # 4-byte Reload + adcl $0, %eax + addl %edi, %ebx + adcl %ecx, %ebp + adcl %esi, %edx + movl %edx, 4(%esp) # 4-byte Spill + adcl 24(%esp), %eax # 4-byte Folded Reload + movl %eax, 28(%esp) # 4-byte Spill + sbbl %eax, %eax + movl %eax, 24(%esp) # 4-byte Spill + movl %ebx, %eax + movl %eax, %edx + imull 20(%esp), %edx # 4-byte Folded Reload + mulxl 40(%esp), %ecx, %esi # 4-byte Folded Reload + movl %esi, (%esp) # 4-byte Spill + mulxl 36(%esp), %esi, %ebx # 4-byte Folded Reload + addl %ecx, %ebx + mulxl 32(%esp), %ecx, %edi # 4-byte Folded Reload + adcl (%esp), %ecx # 4-byte Folded Reload + adcl $0, %edi + movl 24(%esp), %edx # 4-byte Reload + andl $1, %edx + addl %eax, %esi + adcl %ebp, %ebx + adcl 4(%esp), %ecx # 4-byte Folded Reload + adcl 28(%esp), %edi # 4-byte Folded Reload + adcl $0, %edx + movl %edx, 24(%esp) # 4-byte Spill + movl 72(%esp), %edx + movl 8(%edx), %edx + mulxl 16(%esp), %esi, %eax # 4-byte Folded Reload + movl %esi, 16(%esp) # 4-byte Spill + movl %eax, 28(%esp) # 4-byte Spill + mulxl 8(%esp), %eax, %ebp # 4-byte Folded Reload + movl %eax, 8(%esp) # 4-byte Spill + mulxl 12(%esp), %eax, %esi # 4-byte Folded Reload + addl 8(%esp), %esi # 4-byte Folded Reload + adcl 16(%esp), %ebp # 4-byte Folded Reload + movl 28(%esp), %edx # 4-byte Reload + adcl $0, %edx + addl %ebx, %eax + movl %eax, 16(%esp) # 4-byte Spill + adcl %ecx, %esi + adcl %edi, %ebp + adcl 24(%esp), %edx # 4-byte Folded Reload + movl %edx, 28(%esp) # 4-byte Spill + sbbl %ebx, %ebx + movl 20(%esp), %edx # 4-byte Reload + imull %eax, %edx + mulxl 36(%esp), %ecx, %eax # 4-byte Folded Reload + movl %eax, 20(%esp) # 4-byte Spill + movl %edx, %eax + mulxl 40(%esp), %edi, %edx # 4-byte Folded Reload + movl %edx, 24(%esp) # 4-byte Spill + addl 20(%esp), %edi # 4-byte Folded Reload + movl %eax, %edx + mulxl 32(%esp), %edx, %eax # 4-byte Folded Reload + adcl 24(%esp), %edx # 4-byte Folded Reload + adcl $0, %eax + andl $1, %ebx + addl 16(%esp), %ecx # 4-byte Folded Reload + adcl %esi, %edi + adcl %ebp, %edx + adcl 28(%esp), %eax # 4-byte Folded Reload + adcl $0, %ebx + movl %edi, %ebp + subl 36(%esp), %ebp # 4-byte Folded Reload + movl %edx, %esi + sbbl 40(%esp), %esi # 4-byte Folded Reload + movl %eax, %ecx + sbbl 32(%esp), %ecx # 4-byte Folded Reload + sbbl $0, %ebx + andl $1, %ebx + jne .LBB38_2 +# BB#1: + movl %ebp, %edi +.LBB38_2: + movl 64(%esp), %ebp + movl %edi, (%ebp) + testb %bl, %bl + jne .LBB38_4 +# BB#3: + movl %esi, %edx +.LBB38_4: + movl %edx, 4(%ebp) + jne .LBB38_6 +# BB#5: + movl %ecx, %eax +.LBB38_6: + movl %eax, 8(%ebp) + addl $44, %esp + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end38: + .size mcl_fp_mont3Lbmi2, .Lfunc_end38-mcl_fp_mont3Lbmi2 + + .globl mcl_fp_montNF3Lbmi2 + .align 16, 0x90 + .type mcl_fp_montNF3Lbmi2,@function +mcl_fp_montNF3Lbmi2: # @mcl_fp_montNF3Lbmi2 +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $36, %esp + movl 60(%esp), %eax + movl (%eax), %edi + movl %edi, 16(%esp) # 4-byte Spill + movl 4(%eax), %edx + movl %edx, 12(%esp) # 4-byte Spill + movl 64(%esp), %ecx + movl (%ecx), %ecx + mulxl %ecx, %esi, %edx + movl %edx, 32(%esp) # 4-byte Spill + movl %edi, %edx + mulxl %ecx, %edi, %ebp + addl %esi, %ebp + movl 8(%eax), %edx + movl %edx, 8(%esp) # 4-byte Spill + mulxl %ecx, %eax, %ebx + adcl 32(%esp), %eax # 4-byte Folded Reload + adcl $0, %ebx + movl 68(%esp), %esi + movl -4(%esi), %ecx + movl %ecx, 20(%esp) # 4-byte Spill + movl %edi, %edx + imull %ecx, %edx + movl (%esi), %ecx + movl %ecx, 32(%esp) # 4-byte Spill + mulxl %ecx, %esi, %ecx + movl %ecx, 4(%esp) # 4-byte Spill + addl %edi, %esi + movl 68(%esp), %esi + movl 4(%esi), %ecx + movl %ecx, 28(%esp) # 4-byte Spill + mulxl %ecx, %edi, %ecx + adcl %ebp, %edi + movl 8(%esi), %esi + movl %esi, 24(%esp) # 4-byte Spill + mulxl %esi, %ebp, %edx + adcl %eax, %ebp + adcl $0, %ebx + addl 4(%esp), %edi # 4-byte Folded Reload + adcl %ecx, %ebp + adcl %edx, %ebx + movl 64(%esp), %eax + movl 4(%eax), %edx + mulxl 12(%esp), %eax, %ecx # 4-byte Folded Reload + movl %ecx, (%esp) # 4-byte Spill + mulxl 16(%esp), %esi, %ecx # 4-byte Folded Reload + movl %esi, 4(%esp) # 4-byte Spill + addl %eax, %ecx + mulxl 8(%esp), %esi, %eax # 4-byte Folded Reload + adcl (%esp), %esi # 4-byte Folded Reload + adcl $0, %eax + movl 4(%esp), %edx # 4-byte Reload + addl %edi, %edx + adcl %ebp, %ecx + adcl %ebx, %esi + adcl $0, %eax + movl %edx, %ebp + imull 20(%esp), %edx # 4-byte Folded Reload + mulxl 32(%esp), %ebx, %edi # 4-byte Folded Reload + addl %ebp, %ebx + mulxl 28(%esp), %ebp, %ebx # 4-byte Folded Reload + adcl %ecx, %ebp + mulxl 24(%esp), %ecx, %edx # 4-byte Folded Reload + adcl %esi, %ecx + adcl $0, %eax + addl %edi, %ebp + adcl %ebx, %ecx + adcl %edx, %eax + movl 64(%esp), %edx + movl 8(%edx), %edx + mulxl 12(%esp), %esi, %edi # 4-byte Folded Reload + movl %edi, 12(%esp) # 4-byte Spill + mulxl 16(%esp), %ebx, %edi # 4-byte Folded Reload + movl %ebx, 16(%esp) # 4-byte Spill + addl %esi, %edi + mulxl 8(%esp), %ebx, %esi # 4-byte Folded Reload + adcl 12(%esp), %ebx # 4-byte Folded Reload + adcl $0, %esi + addl %ebp, 16(%esp) # 4-byte Folded Spill + adcl %ecx, %edi + adcl %eax, %ebx + adcl $0, %esi + movl 20(%esp), %edx # 4-byte Reload + movl 16(%esp), %ecx # 4-byte Reload + imull %ecx, %edx + mulxl 32(%esp), %eax, %ebp # 4-byte Folded Reload + movl %ebp, 20(%esp) # 4-byte Spill + addl %ecx, %eax + movl %edx, %eax + mulxl 28(%esp), %ecx, %ebp # 4-byte Folded Reload + adcl %edi, %ecx + mulxl 24(%esp), %eax, %edx # 4-byte Folded Reload + adcl %ebx, %eax + adcl $0, %esi + addl 20(%esp), %ecx # 4-byte Folded Reload + adcl %ebp, %eax + adcl %edx, %esi + movl %ecx, %ebp + subl 32(%esp), %ebp # 4-byte Folded Reload + movl %eax, %edi + sbbl 28(%esp), %edi # 4-byte Folded Reload + movl %esi, %edx + sbbl 24(%esp), %edx # 4-byte Folded Reload + movl %edx, %ebx + sarl $31, %ebx + testl %ebx, %ebx + js .LBB39_2 +# BB#1: + movl %ebp, %ecx +.LBB39_2: + movl 56(%esp), %ebx + movl %ecx, (%ebx) + js .LBB39_4 +# BB#3: + movl %edi, %eax +.LBB39_4: + movl %eax, 4(%ebx) + js .LBB39_6 +# BB#5: + movl %edx, %esi +.LBB39_6: + movl %esi, 8(%ebx) + addl $36, %esp + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end39: + .size mcl_fp_montNF3Lbmi2, .Lfunc_end39-mcl_fp_montNF3Lbmi2 + + .globl mcl_fp_montRed3Lbmi2 + .align 16, 0x90 + .type mcl_fp_montRed3Lbmi2,@function +mcl_fp_montRed3Lbmi2: # @mcl_fp_montRed3Lbmi2 +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $36, %esp + movl 64(%esp), %ecx + movl -4(%ecx), %edi + movl %edi, 20(%esp) # 4-byte Spill + movl (%ecx), %esi + movl %esi, 24(%esp) # 4-byte Spill + movl 60(%esp), %eax + movl (%eax), %ebx + movl %ebx, %edx + imull %edi, %edx + movl 8(%ecx), %edi + movl %edi, 32(%esp) # 4-byte Spill + movl 4(%ecx), %ecx + movl %ecx, 28(%esp) # 4-byte Spill + mulxl %edi, %edi, %eax + movl %edi, 16(%esp) # 4-byte Spill + mulxl %ecx, %ebp, %edi + mulxl %esi, %edx, %ecx + addl %ebp, %ecx + adcl 16(%esp), %edi # 4-byte Folded Reload + adcl $0, %eax + addl %ebx, %edx + movl 60(%esp), %edx + adcl 4(%edx), %ecx + adcl 8(%edx), %edi + adcl 12(%edx), %eax + movl %eax, 12(%esp) # 4-byte Spill + movl 20(%edx), %eax + movl 16(%edx), %edx + adcl $0, %edx + movl %edx, 8(%esp) # 4-byte Spill + adcl $0, %eax + movl %eax, 16(%esp) # 4-byte Spill + sbbl %ebx, %ebx + andl $1, %ebx + movl %ecx, %edx + imull 20(%esp), %edx # 4-byte Folded Reload + mulxl 28(%esp), %esi, %eax # 4-byte Folded Reload + movl %eax, (%esp) # 4-byte Spill + mulxl 24(%esp), %ebp, %eax # 4-byte Folded Reload + movl %ebp, 4(%esp) # 4-byte Spill + addl %esi, %eax + mulxl 32(%esp), %esi, %ebp # 4-byte Folded Reload + adcl (%esp), %esi # 4-byte Folded Reload + adcl $0, %ebp + addl %ecx, 4(%esp) # 4-byte Folded Spill + adcl %edi, %eax + adcl 12(%esp), %esi # 4-byte Folded Reload + adcl 8(%esp), %ebp # 4-byte Folded Reload + adcl $0, 16(%esp) # 4-byte Folded Spill + adcl $0, %ebx + movl 20(%esp), %edx # 4-byte Reload + imull %eax, %edx + mulxl 24(%esp), %ecx, %edi # 4-byte Folded Reload + movl %edi, 8(%esp) # 4-byte Spill + movl %ecx, 20(%esp) # 4-byte Spill + movl %edx, %ecx + mulxl 28(%esp), %edi, %edx # 4-byte Folded Reload + movl %edx, 12(%esp) # 4-byte Spill + addl 8(%esp), %edi # 4-byte Folded Reload + movl %ecx, %edx + mulxl 32(%esp), %ecx, %edx # 4-byte Folded Reload + adcl 12(%esp), %ecx # 4-byte Folded Reload + adcl $0, %edx + addl %eax, 20(%esp) # 4-byte Folded Spill + adcl %esi, %edi + adcl %ebp, %ecx + adcl 16(%esp), %edx # 4-byte Folded Reload + adcl $0, %ebx + movl %edi, %ebp + subl 24(%esp), %ebp # 4-byte Folded Reload + movl %ecx, %esi + sbbl 28(%esp), %esi # 4-byte Folded Reload + movl %edx, %eax + sbbl 32(%esp), %eax # 4-byte Folded Reload + sbbl $0, %ebx + andl $1, %ebx + jne .LBB40_2 +# BB#1: + movl %ebp, %edi +.LBB40_2: + movl 56(%esp), %ebp + movl %edi, (%ebp) + testb %bl, %bl + jne .LBB40_4 +# BB#3: + movl %esi, %ecx +.LBB40_4: + movl %ecx, 4(%ebp) + jne .LBB40_6 +# BB#5: + movl %eax, %edx +.LBB40_6: + movl %edx, 8(%ebp) + addl $36, %esp + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end40: + .size mcl_fp_montRed3Lbmi2, .Lfunc_end40-mcl_fp_montRed3Lbmi2 + + .globl mcl_fp_addPre3Lbmi2 + .align 16, 0x90 + .type mcl_fp_addPre3Lbmi2,@function +mcl_fp_addPre3Lbmi2: # @mcl_fp_addPre3Lbmi2 +# BB#0: + pushl %esi + movl 16(%esp), %eax + movl (%eax), %ecx + movl 4(%eax), %edx + movl 12(%esp), %esi + addl (%esi), %ecx + adcl 4(%esi), %edx + movl 8(%eax), %eax + adcl 8(%esi), %eax + movl 8(%esp), %esi + movl %ecx, (%esi) + movl %edx, 4(%esi) + movl %eax, 8(%esi) + sbbl %eax, %eax + andl $1, %eax + popl %esi + retl +.Lfunc_end41: + .size mcl_fp_addPre3Lbmi2, .Lfunc_end41-mcl_fp_addPre3Lbmi2 + + .globl mcl_fp_subPre3Lbmi2 + .align 16, 0x90 + .type mcl_fp_subPre3Lbmi2,@function +mcl_fp_subPre3Lbmi2: # @mcl_fp_subPre3Lbmi2 +# BB#0: + pushl %edi + pushl %esi + movl 16(%esp), %ecx + movl (%ecx), %edx + movl 4(%ecx), %esi + xorl %eax, %eax + movl 20(%esp), %edi + subl (%edi), %edx + sbbl 4(%edi), %esi + movl 8(%ecx), %ecx + sbbl 8(%edi), %ecx + movl 12(%esp), %edi + movl %edx, (%edi) + movl %esi, 4(%edi) + movl %ecx, 8(%edi) + sbbl $0, %eax + andl $1, %eax + popl %esi + popl %edi + retl +.Lfunc_end42: + .size mcl_fp_subPre3Lbmi2, .Lfunc_end42-mcl_fp_subPre3Lbmi2 + + .globl mcl_fp_shr1_3Lbmi2 + .align 16, 0x90 + .type mcl_fp_shr1_3Lbmi2,@function +mcl_fp_shr1_3Lbmi2: # @mcl_fp_shr1_3Lbmi2 +# BB#0: + pushl %esi + movl 12(%esp), %eax + movl 8(%eax), %ecx + movl (%eax), %edx + movl 4(%eax), %eax + shrdl $1, %eax, %edx + movl 8(%esp), %esi + movl %edx, (%esi) + shrdl $1, %ecx, %eax + movl %eax, 4(%esi) + shrl %ecx + movl %ecx, 8(%esi) + popl %esi + retl +.Lfunc_end43: + .size mcl_fp_shr1_3Lbmi2, .Lfunc_end43-mcl_fp_shr1_3Lbmi2 + + .globl mcl_fp_add3Lbmi2 + .align 16, 0x90 + .type mcl_fp_add3Lbmi2,@function +mcl_fp_add3Lbmi2: # @mcl_fp_add3Lbmi2 +# BB#0: + pushl %ebx + pushl %edi + pushl %esi + movl 24(%esp), %edx + movl (%edx), %eax + movl 4(%edx), %ecx + movl 20(%esp), %esi + addl (%esi), %eax + adcl 4(%esi), %ecx + movl 8(%edx), %edx + adcl 8(%esi), %edx + movl 16(%esp), %esi + movl %eax, (%esi) + movl %ecx, 4(%esi) + movl %edx, 8(%esi) + sbbl %ebx, %ebx + andl $1, %ebx + movl 28(%esp), %edi + subl (%edi), %eax + sbbl 4(%edi), %ecx + sbbl 8(%edi), %edx + sbbl $0, %ebx + testb $1, %bl + jne .LBB44_2 +# BB#1: # %nocarry + movl %eax, (%esi) + movl %ecx, 4(%esi) + movl %edx, 8(%esi) +.LBB44_2: # %carry + popl %esi + popl %edi + popl %ebx + retl +.Lfunc_end44: + .size mcl_fp_add3Lbmi2, .Lfunc_end44-mcl_fp_add3Lbmi2 + + .globl mcl_fp_addNF3Lbmi2 + .align 16, 0x90 + .type mcl_fp_addNF3Lbmi2,@function +mcl_fp_addNF3Lbmi2: # @mcl_fp_addNF3Lbmi2 +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + movl 28(%esp), %eax + movl (%eax), %edx + movl 4(%eax), %ecx + movl 24(%esp), %esi + addl (%esi), %edx + adcl 4(%esi), %ecx + movl 8(%eax), %eax + adcl 8(%esi), %eax + movl 32(%esp), %ebp + movl %edx, %ebx + subl (%ebp), %ebx + movl %ecx, %edi + sbbl 4(%ebp), %edi + movl %eax, %esi + sbbl 8(%ebp), %esi + movl %esi, %ebp + sarl $31, %ebp + testl %ebp, %ebp + js .LBB45_2 +# BB#1: + movl %ebx, %edx +.LBB45_2: + movl 20(%esp), %ebx + movl %edx, (%ebx) + js .LBB45_4 +# BB#3: + movl %edi, %ecx +.LBB45_4: + movl %ecx, 4(%ebx) + js .LBB45_6 +# BB#5: + movl %esi, %eax +.LBB45_6: + movl %eax, 8(%ebx) + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end45: + .size mcl_fp_addNF3Lbmi2, .Lfunc_end45-mcl_fp_addNF3Lbmi2 + + .globl mcl_fp_sub3Lbmi2 + .align 16, 0x90 + .type mcl_fp_sub3Lbmi2,@function +mcl_fp_sub3Lbmi2: # @mcl_fp_sub3Lbmi2 +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + movl 24(%esp), %edx + movl (%edx), %ecx + movl 4(%edx), %eax + xorl %ebx, %ebx + movl 28(%esp), %esi + subl (%esi), %ecx + sbbl 4(%esi), %eax + movl 8(%edx), %edx + sbbl 8(%esi), %edx + movl 20(%esp), %esi + movl %ecx, (%esi) + movl %eax, 4(%esi) + movl %edx, 8(%esi) + sbbl $0, %ebx + testb $1, %bl + je .LBB46_2 +# BB#1: # %carry + movl 32(%esp), %edi + movl 4(%edi), %ebx + movl 8(%edi), %ebp + addl (%edi), %ecx + movl %ecx, (%esi) + adcl %eax, %ebx + movl %ebx, 4(%esi) + adcl %edx, %ebp + movl %ebp, 8(%esi) +.LBB46_2: # %nocarry + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end46: + .size mcl_fp_sub3Lbmi2, .Lfunc_end46-mcl_fp_sub3Lbmi2 + + .globl mcl_fp_subNF3Lbmi2 + .align 16, 0x90 + .type mcl_fp_subNF3Lbmi2,@function +mcl_fp_subNF3Lbmi2: # @mcl_fp_subNF3Lbmi2 +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + movl 24(%esp), %eax + movl (%eax), %ecx + movl 4(%eax), %edx + movl 28(%esp), %esi + subl (%esi), %ecx + sbbl 4(%esi), %edx + movl 8(%eax), %eax + sbbl 8(%esi), %eax + movl %eax, %esi + sarl $31, %esi + movl %esi, %edi + shldl $1, %eax, %edi + movl 32(%esp), %ebx + andl (%ebx), %edi + movl 8(%ebx), %ebp + andl %esi, %ebp + andl 4(%ebx), %esi + addl %ecx, %edi + adcl %edx, %esi + movl 20(%esp), %ecx + movl %edi, (%ecx) + movl %esi, 4(%ecx) + adcl %eax, %ebp + movl %ebp, 8(%ecx) + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end47: + .size mcl_fp_subNF3Lbmi2, .Lfunc_end47-mcl_fp_subNF3Lbmi2 + + .globl mcl_fpDbl_add3Lbmi2 + .align 16, 0x90 + .type mcl_fpDbl_add3Lbmi2,@function +mcl_fpDbl_add3Lbmi2: # @mcl_fpDbl_add3Lbmi2 +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + pushl %eax + movl 32(%esp), %esi + movl 20(%esi), %eax + movl %eax, (%esp) # 4-byte Spill + movl 16(%esi), %edi + movl 12(%esi), %ebx + movl (%esi), %edx + movl 28(%esp), %eax + addl (%eax), %edx + movl 24(%esp), %ecx + movl %edx, (%ecx) + movl 8(%esi), %edx + movl 4(%esi), %esi + adcl 4(%eax), %esi + adcl 8(%eax), %edx + movl %esi, 4(%ecx) + movl 20(%eax), %ebp + movl %edx, 8(%ecx) + movl 12(%eax), %esi + movl 16(%eax), %edx + adcl %ebx, %esi + adcl %edi, %edx + adcl (%esp), %ebp # 4-byte Folded Reload + sbbl %eax, %eax + andl $1, %eax + movl 36(%esp), %ecx + movl %esi, %ebx + subl (%ecx), %ebx + movl %edx, %edi + sbbl 4(%ecx), %edi + movl %edi, (%esp) # 4-byte Spill + movl %ebp, %ecx + movl 36(%esp), %edi + sbbl 8(%edi), %ecx + sbbl $0, %eax + andl $1, %eax + jne .LBB48_2 +# BB#1: + movl %ecx, %ebp +.LBB48_2: + testb %al, %al + jne .LBB48_4 +# BB#3: + movl %ebx, %esi +.LBB48_4: + movl 24(%esp), %eax + movl %esi, 12(%eax) + jne .LBB48_6 +# BB#5: + movl (%esp), %edx # 4-byte Reload +.LBB48_6: + movl %edx, 16(%eax) + movl %ebp, 20(%eax) + addl $4, %esp + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end48: + .size mcl_fpDbl_add3Lbmi2, .Lfunc_end48-mcl_fpDbl_add3Lbmi2 + + .globl mcl_fpDbl_sub3Lbmi2 + .align 16, 0x90 + .type mcl_fpDbl_sub3Lbmi2,@function +mcl_fpDbl_sub3Lbmi2: # @mcl_fpDbl_sub3Lbmi2 +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + movl 24(%esp), %ecx + movl (%ecx), %edx + movl 4(%ecx), %esi + movl 28(%esp), %ebx + subl (%ebx), %edx + sbbl 4(%ebx), %esi + movl 8(%ecx), %ebp + sbbl 8(%ebx), %ebp + movl 20(%esp), %eax + movl %edx, (%eax) + movl 12(%ecx), %edi + sbbl 12(%ebx), %edi + movl %esi, 4(%eax) + movl 16(%ecx), %esi + sbbl 16(%ebx), %esi + movl 20(%ebx), %ebx + movl 20(%ecx), %edx + movl %ebp, 8(%eax) + sbbl %ebx, %edx + movl $0, %ecx + sbbl $0, %ecx + andl $1, %ecx + movl 32(%esp), %ebp + jne .LBB49_1 +# BB#2: + xorl %ebx, %ebx + jmp .LBB49_3 +.LBB49_1: + movl 8(%ebp), %ebx +.LBB49_3: + testb %cl, %cl + movl $0, %eax + jne .LBB49_4 +# BB#5: + xorl %ecx, %ecx + jmp .LBB49_6 +.LBB49_4: + movl (%ebp), %ecx + movl 4(%ebp), %eax +.LBB49_6: + addl %edi, %ecx + adcl %esi, %eax + movl 20(%esp), %esi + movl %ecx, 12(%esi) + movl %eax, 16(%esi) + adcl %edx, %ebx + movl %ebx, 20(%esi) + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end49: + .size mcl_fpDbl_sub3Lbmi2, .Lfunc_end49-mcl_fpDbl_sub3Lbmi2 + + .globl mcl_fp_mulUnitPre4Lbmi2 + .align 16, 0x90 + .type mcl_fp_mulUnitPre4Lbmi2,@function +mcl_fp_mulUnitPre4Lbmi2: # @mcl_fp_mulUnitPre4Lbmi2 +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + movl 28(%esp), %edx + movl 24(%esp), %eax + mulxl 4(%eax), %esi, %ecx + mulxl (%eax), %edi, %ebx + addl %esi, %ebx + mulxl 8(%eax), %ebp, %esi + adcl %ecx, %ebp + mulxl 12(%eax), %eax, %ecx + movl 20(%esp), %edx + movl %edi, (%edx) + movl %ebx, 4(%edx) + movl %ebp, 8(%edx) + adcl %esi, %eax + movl %eax, 12(%edx) + adcl $0, %ecx + movl %ecx, 16(%edx) + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end50: + .size mcl_fp_mulUnitPre4Lbmi2, .Lfunc_end50-mcl_fp_mulUnitPre4Lbmi2 + + .globl mcl_fpDbl_mulPre4Lbmi2 + .align 16, 0x90 + .type mcl_fpDbl_mulPre4Lbmi2,@function +mcl_fpDbl_mulPre4Lbmi2: # @mcl_fpDbl_mulPre4Lbmi2 +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $28, %esp + movl 52(%esp), %eax + movl (%eax), %ebx + movl %ebx, 12(%esp) # 4-byte Spill + movl 4(%eax), %edx + movl %edx, 24(%esp) # 4-byte Spill + movl 56(%esp), %ecx + movl (%ecx), %ebp + mulxl %ebp, %esi, %ecx + movl %ecx, 16(%esp) # 4-byte Spill + movl %ebx, %edx + movl %ebx, %ecx + mulxl %ebp, %edx, %ebx + movl %edx, 8(%esp) # 4-byte Spill + addl %esi, %ebx + movl 8(%eax), %edx + movl %edx, 20(%esp) # 4-byte Spill + movl %eax, %esi + mulxl %ebp, %eax, %edi + adcl 16(%esp), %eax # 4-byte Folded Reload + movl 12(%esi), %edx + movl %edx, 16(%esp) # 4-byte Spill + mulxl %ebp, %ebp, %esi + adcl %edi, %ebp + movl 48(%esp), %edx + movl 8(%esp), %edi # 4-byte Reload + movl %edi, (%edx) + adcl $0, %esi + movl 56(%esp), %edx + movl 4(%edx), %edi + movl %ecx, %edx + mulxl %edi, %ecx, %edx + movl %edx, 8(%esp) # 4-byte Spill + addl %ebx, %ecx + movl 24(%esp), %edx # 4-byte Reload + mulxl %edi, %ebx, %edx + movl %edx, 4(%esp) # 4-byte Spill + adcl %eax, %ebx + movl 20(%esp), %edx # 4-byte Reload + mulxl %edi, %eax, %edx + movl %edx, (%esp) # 4-byte Spill + adcl %ebp, %eax + movl 16(%esp), %edx # 4-byte Reload + mulxl %edi, %edi, %edx + adcl %esi, %edi + sbbl %ebp, %ebp + andl $1, %ebp + addl 8(%esp), %ebx # 4-byte Folded Reload + adcl 4(%esp), %eax # 4-byte Folded Reload + adcl (%esp), %edi # 4-byte Folded Reload + adcl %edx, %ebp + movl 48(%esp), %edx + movl %ecx, 4(%edx) + movl 56(%esp), %ecx + movl 8(%ecx), %ecx + movl 12(%esp), %edx # 4-byte Reload + mulxl %ecx, %edx, %esi + movl %esi, 8(%esp) # 4-byte Spill + addl %ebx, %edx + movl %edx, 12(%esp) # 4-byte Spill + movl 24(%esp), %edx # 4-byte Reload + mulxl %ecx, %ebx, %edx + movl %edx, 24(%esp) # 4-byte Spill + adcl %eax, %ebx + movl 20(%esp), %edx # 4-byte Reload + mulxl %ecx, %esi, %eax + movl %eax, 20(%esp) # 4-byte Spill + adcl %edi, %esi + movl 16(%esp), %edx # 4-byte Reload + mulxl %ecx, %edi, %eax + adcl %ebp, %edi + sbbl %ebp, %ebp + andl $1, %ebp + addl 8(%esp), %ebx # 4-byte Folded Reload + adcl 24(%esp), %esi # 4-byte Folded Reload + adcl 20(%esp), %edi # 4-byte Folded Reload + adcl %eax, %ebp + movl 48(%esp), %eax + movl 12(%esp), %ecx # 4-byte Reload + movl %ecx, 8(%eax) + movl 56(%esp), %eax + movl 12(%eax), %edx + movl 52(%esp), %eax + mulxl (%eax), %ecx, %eax + movl %eax, 20(%esp) # 4-byte Spill + addl %ebx, %ecx + movl %ecx, 24(%esp) # 4-byte Spill + movl 52(%esp), %ebx + mulxl 4(%ebx), %ecx, %eax + movl %eax, 16(%esp) # 4-byte Spill + adcl %esi, %ecx + mulxl 8(%ebx), %eax, %esi + adcl %edi, %eax + mulxl 12(%ebx), %edi, %edx + adcl %ebp, %edi + sbbl %ebp, %ebp + andl $1, %ebp + addl 20(%esp), %ecx # 4-byte Folded Reload + adcl 16(%esp), %eax # 4-byte Folded Reload + adcl %esi, %edi + movl 48(%esp), %esi + movl 24(%esp), %ebx # 4-byte Reload + movl %ebx, 12(%esi) + movl %ecx, 16(%esi) + movl %eax, 20(%esi) + movl %edi, 24(%esi) + adcl %edx, %ebp + movl %ebp, 28(%esi) + addl $28, %esp + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end51: + .size mcl_fpDbl_mulPre4Lbmi2, .Lfunc_end51-mcl_fpDbl_mulPre4Lbmi2 + + .globl mcl_fpDbl_sqrPre4Lbmi2 + .align 16, 0x90 + .type mcl_fpDbl_sqrPre4Lbmi2,@function +mcl_fpDbl_sqrPre4Lbmi2: # @mcl_fpDbl_sqrPre4Lbmi2 +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $36, %esp + movl 60(%esp), %eax + movl (%eax), %esi + movl 4(%eax), %edi + movl 56(%esp), %ebx + movl %esi, %edx + mulxl %esi, %eax, %ebp + movl %eax, (%ebx) + movl %edi, %edx + mulxl %esi, %edx, %ecx + movl %edx, 28(%esp) # 4-byte Spill + movl %ecx, 16(%esp) # 4-byte Spill + movl %ebp, %eax + addl %edx, %eax + movl 60(%esp), %edx + movl 8(%edx), %edx + movl %edx, (%esp) # 4-byte Spill + mulxl %esi, %edx, %ebx + movl %edx, 20(%esp) # 4-byte Spill + movl %ebx, 24(%esp) # 4-byte Spill + movl %ecx, %ebp + adcl %edx, %ebp + movl 60(%esp), %ecx + movl 12(%ecx), %edx + movl %edx, 32(%esp) # 4-byte Spill + mulxl %esi, %esi, %ecx + adcl %ebx, %esi + adcl $0, %ecx + addl 28(%esp), %eax # 4-byte Folded Reload + movl %eax, 28(%esp) # 4-byte Spill + movl %edi, %edx + mulxl %edi, %ebx, %eax + movl %eax, 8(%esp) # 4-byte Spill + adcl %ebp, %ebx + movl 32(%esp), %edx # 4-byte Reload + mulxl %edi, %ebp, %eax + movl %eax, 4(%esp) # 4-byte Spill + movl (%esp), %edx # 4-byte Reload + mulxl %edi, %edi, %eax + movl %eax, 12(%esp) # 4-byte Spill + adcl %edi, %esi + adcl %ecx, %ebp + sbbl %ecx, %ecx + andl $1, %ecx + addl 16(%esp), %ebx # 4-byte Folded Reload + adcl 8(%esp), %esi # 4-byte Folded Reload + adcl %eax, %ebp + adcl 4(%esp), %ecx # 4-byte Folded Reload + addl 20(%esp), %ebx # 4-byte Folded Reload + adcl %edi, %esi + mulxl %edx, %edi, %eax + movl %eax, 20(%esp) # 4-byte Spill + movl %edx, %eax + adcl %ebp, %edi + movl 32(%esp), %edx # 4-byte Reload + mulxl %eax, %ebp, %edx + adcl %ecx, %ebp + sbbl %eax, %eax + andl $1, %eax + addl 24(%esp), %esi # 4-byte Folded Reload + adcl 12(%esp), %edi # 4-byte Folded Reload + adcl 20(%esp), %ebp # 4-byte Folded Reload + adcl %edx, %eax + movl %eax, 24(%esp) # 4-byte Spill + movl 56(%esp), %eax + movl 28(%esp), %ecx # 4-byte Reload + movl %ecx, 4(%eax) + movl %ebx, 8(%eax) + movl 60(%esp), %eax + movl 12(%eax), %edx + mulxl (%eax), %ebx, %ecx + movl %ecx, 32(%esp) # 4-byte Spill + addl %esi, %ebx + mulxl 4(%eax), %esi, %ecx + movl %ecx, 28(%esp) # 4-byte Spill + adcl %edi, %esi + mulxl 8(%eax), %ecx, %edi + adcl %ebp, %ecx + mulxl %edx, %ebp, %edx + adcl 24(%esp), %ebp # 4-byte Folded Reload + sbbl %eax, %eax + andl $1, %eax + addl 32(%esp), %esi # 4-byte Folded Reload + adcl 28(%esp), %ecx # 4-byte Folded Reload + adcl %edi, %ebp + movl 56(%esp), %edi + movl %ebx, 12(%edi) + movl %esi, 16(%edi) + movl %ecx, 20(%edi) + movl %ebp, 24(%edi) + adcl %edx, %eax + movl %eax, 28(%edi) + addl $36, %esp + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end52: + .size mcl_fpDbl_sqrPre4Lbmi2, .Lfunc_end52-mcl_fpDbl_sqrPre4Lbmi2 + + .globl mcl_fp_mont4Lbmi2 + .align 16, 0x90 + .type mcl_fp_mont4Lbmi2,@function +mcl_fp_mont4Lbmi2: # @mcl_fp_mont4Lbmi2 +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $64, %esp + movl 88(%esp), %eax + movl 12(%eax), %edx + movl %edx, 40(%esp) # 4-byte Spill + movl 92(%esp), %ecx + movl (%ecx), %ecx + movl 8(%eax), %esi + movl %esi, 36(%esp) # 4-byte Spill + movl (%eax), %edi + movl %edi, 32(%esp) # 4-byte Spill + movl 4(%eax), %ebx + movl %ebx, 28(%esp) # 4-byte Spill + mulxl %ecx, %eax, %ebp + movl %eax, 60(%esp) # 4-byte Spill + movl %esi, %edx + mulxl %ecx, %edx, %eax + movl %edx, 56(%esp) # 4-byte Spill + movl %ebx, %edx + mulxl %ecx, %ebx, %esi + movl %edi, %edx + mulxl %ecx, %edx, %ecx + movl %edx, 8(%esp) # 4-byte Spill + addl %ebx, %ecx + movl %ecx, 12(%esp) # 4-byte Spill + adcl 56(%esp), %esi # 4-byte Folded Reload + movl %esi, 16(%esp) # 4-byte Spill + adcl 60(%esp), %eax # 4-byte Folded Reload + movl %eax, 20(%esp) # 4-byte Spill + adcl $0, %ebp + movl %ebp, 24(%esp) # 4-byte Spill + movl 96(%esp), %ebx + movl -4(%ebx), %eax + movl %eax, 44(%esp) # 4-byte Spill + imull %eax, %edx + movl (%ebx), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 4(%ebx), %ecx + movl %ecx, 60(%esp) # 4-byte Spill + mulxl %ecx, %esi, %ecx + movl %ecx, 48(%esp) # 4-byte Spill + mulxl %eax, %ecx, %eax + addl %esi, %eax + movl %eax, %ebp + movl 8(%ebx), %eax + movl %eax, 52(%esp) # 4-byte Spill + mulxl %eax, %edi, %eax + movl %eax, 4(%esp) # 4-byte Spill + adcl 48(%esp), %edi # 4-byte Folded Reload + movl 12(%ebx), %eax + movl %eax, 48(%esp) # 4-byte Spill + mulxl %eax, %esi, %ebx + adcl 4(%esp), %esi # 4-byte Folded Reload + adcl $0, %ebx + addl 8(%esp), %ecx # 4-byte Folded Reload + adcl 12(%esp), %ebp # 4-byte Folded Reload + movl %ebp, 8(%esp) # 4-byte Spill + adcl 16(%esp), %edi # 4-byte Folded Reload + adcl 20(%esp), %esi # 4-byte Folded Reload + adcl 24(%esp), %ebx # 4-byte Folded Reload + sbbl %eax, %eax + andl $1, %eax + movl %eax, 16(%esp) # 4-byte Spill + movl 92(%esp), %eax + movl 4(%eax), %edx + mulxl 40(%esp), %ecx, %eax # 4-byte Folded Reload + movl %ecx, 4(%esp) # 4-byte Spill + movl %eax, 24(%esp) # 4-byte Spill + mulxl 36(%esp), %ecx, %eax # 4-byte Folded Reload + movl %ecx, 12(%esp) # 4-byte Spill + movl %eax, 20(%esp) # 4-byte Spill + mulxl 28(%esp), %ecx, %eax # 4-byte Folded Reload + movl %ecx, (%esp) # 4-byte Spill + mulxl 32(%esp), %ecx, %ebp # 4-byte Folded Reload + addl (%esp), %ebp # 4-byte Folded Reload + adcl 12(%esp), %eax # 4-byte Folded Reload + movl %eax, 12(%esp) # 4-byte Spill + movl 20(%esp), %edx # 4-byte Reload + adcl 4(%esp), %edx # 4-byte Folded Reload + movl 24(%esp), %eax # 4-byte Reload + adcl $0, %eax + addl 8(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 8(%esp) # 4-byte Spill + adcl %edi, %ebp + adcl %esi, 12(%esp) # 4-byte Folded Spill + adcl %ebx, %edx + movl %edx, 20(%esp) # 4-byte Spill + adcl 16(%esp), %eax # 4-byte Folded Reload + movl %eax, 24(%esp) # 4-byte Spill + sbbl %eax, %eax + movl %eax, 16(%esp) # 4-byte Spill + movl %ecx, %edx + imull 44(%esp), %edx # 4-byte Folded Reload + mulxl 60(%esp), %ecx, %eax # 4-byte Folded Reload + mulxl 56(%esp), %edi, %esi # 4-byte Folded Reload + movl %edi, 4(%esp) # 4-byte Spill + addl %ecx, %esi + mulxl 52(%esp), %ebx, %ecx # 4-byte Folded Reload + adcl %eax, %ebx + mulxl 48(%esp), %edi, %eax # 4-byte Folded Reload + adcl %ecx, %edi + adcl $0, %eax + movl 16(%esp), %ecx # 4-byte Reload + andl $1, %ecx + movl 4(%esp), %edx # 4-byte Reload + addl 8(%esp), %edx # 4-byte Folded Reload + adcl %ebp, %esi + movl %esi, 8(%esp) # 4-byte Spill + adcl 12(%esp), %ebx # 4-byte Folded Reload + adcl 20(%esp), %edi # 4-byte Folded Reload + adcl 24(%esp), %eax # 4-byte Folded Reload + movl %eax, 4(%esp) # 4-byte Spill + adcl $0, %ecx + movl %ecx, 16(%esp) # 4-byte Spill + movl 92(%esp), %edx + movl 8(%edx), %edx + mulxl 40(%esp), %ecx, %eax # 4-byte Folded Reload + movl %ecx, 12(%esp) # 4-byte Spill + movl %eax, 24(%esp) # 4-byte Spill + mulxl 36(%esp), %ecx, %eax # 4-byte Folded Reload + movl %ecx, (%esp) # 4-byte Spill + movl %eax, 20(%esp) # 4-byte Spill + mulxl 28(%esp), %esi, %eax # 4-byte Folded Reload + mulxl 32(%esp), %ebp, %ecx # 4-byte Folded Reload + addl %esi, %ecx + movl %ecx, %esi + adcl (%esp), %eax # 4-byte Folded Reload + movl %eax, %ecx + movl 20(%esp), %eax # 4-byte Reload + adcl 12(%esp), %eax # 4-byte Folded Reload + movl 24(%esp), %edx # 4-byte Reload + adcl $0, %edx + addl 8(%esp), %ebp # 4-byte Folded Reload + adcl %ebx, %esi + movl %esi, 12(%esp) # 4-byte Spill + adcl %edi, %ecx + movl %ecx, %edi + adcl 4(%esp), %eax # 4-byte Folded Reload + movl %eax, 20(%esp) # 4-byte Spill + adcl 16(%esp), %edx # 4-byte Folded Reload + movl %edx, 24(%esp) # 4-byte Spill + sbbl %eax, %eax + movl %eax, 16(%esp) # 4-byte Spill + movl %ebp, %edx + imull 44(%esp), %edx # 4-byte Folded Reload + mulxl 60(%esp), %ecx, %eax # 4-byte Folded Reload + mulxl 56(%esp), %esi, %ebx # 4-byte Folded Reload + movl %esi, 8(%esp) # 4-byte Spill + addl %ecx, %ebx + mulxl 52(%esp), %ecx, %esi # 4-byte Folded Reload + movl %esi, 4(%esp) # 4-byte Spill + adcl %eax, %ecx + mulxl 48(%esp), %eax, %esi # 4-byte Folded Reload + adcl 4(%esp), %eax # 4-byte Folded Reload + adcl $0, %esi + movl 16(%esp), %edx # 4-byte Reload + andl $1, %edx + addl %ebp, 8(%esp) # 4-byte Folded Spill + adcl 12(%esp), %ebx # 4-byte Folded Reload + adcl %edi, %ecx + adcl 20(%esp), %eax # 4-byte Folded Reload + adcl 24(%esp), %esi # 4-byte Folded Reload + adcl $0, %edx + movl %edx, 16(%esp) # 4-byte Spill + movl 92(%esp), %edx + movl 12(%edx), %edx + mulxl 28(%esp), %ebp, %edi # 4-byte Folded Reload + movl %ebp, 28(%esp) # 4-byte Spill + movl %edi, 24(%esp) # 4-byte Spill + mulxl 32(%esp), %edi, %ebp # 4-byte Folded Reload + movl %edi, 32(%esp) # 4-byte Spill + addl 28(%esp), %ebp # 4-byte Folded Reload + movl %ebp, 28(%esp) # 4-byte Spill + mulxl 40(%esp), %ebp, %edi # 4-byte Folded Reload + movl %edi, 40(%esp) # 4-byte Spill + mulxl 36(%esp), %edi, %edx # 4-byte Folded Reload + adcl 24(%esp), %edi # 4-byte Folded Reload + adcl %ebp, %edx + movl %edx, 36(%esp) # 4-byte Spill + movl 40(%esp), %edx # 4-byte Reload + adcl $0, %edx + movl 32(%esp), %ebp # 4-byte Reload + addl %ebx, %ebp + movl %ebp, 32(%esp) # 4-byte Spill + adcl %ecx, 28(%esp) # 4-byte Folded Spill + adcl %eax, %edi + adcl %esi, 36(%esp) # 4-byte Folded Spill + adcl 16(%esp), %edx # 4-byte Folded Reload + movl %edx, 40(%esp) # 4-byte Spill + sbbl %ecx, %ecx + movl 44(%esp), %edx # 4-byte Reload + imull %ebp, %edx + mulxl 56(%esp), %eax, %esi # 4-byte Folded Reload + movl %eax, 44(%esp) # 4-byte Spill + movl %edx, %ebx + mulxl 60(%esp), %ebp, %eax # 4-byte Folded Reload + addl %esi, %ebp + mulxl 52(%esp), %esi, %edx # 4-byte Folded Reload + movl %edx, 24(%esp) # 4-byte Spill + adcl %eax, %esi + movl %ebx, %edx + mulxl 48(%esp), %edx, %eax # 4-byte Folded Reload + adcl 24(%esp), %edx # 4-byte Folded Reload + adcl $0, %eax + andl $1, %ecx + movl 44(%esp), %ebx # 4-byte Reload + addl 32(%esp), %ebx # 4-byte Folded Reload + adcl 28(%esp), %ebp # 4-byte Folded Reload + adcl %edi, %esi + adcl 36(%esp), %edx # 4-byte Folded Reload + adcl 40(%esp), %eax # 4-byte Folded Reload + adcl $0, %ecx + movl %ebp, %edi + subl 56(%esp), %edi # 4-byte Folded Reload + movl %esi, %ebx + sbbl 60(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 56(%esp) # 4-byte Spill + movl %edx, %ebx + sbbl 52(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 60(%esp) # 4-byte Spill + movl %eax, %ebx + sbbl 48(%esp), %ebx # 4-byte Folded Reload + sbbl $0, %ecx + andl $1, %ecx + jne .LBB53_2 +# BB#1: + movl %edi, %ebp +.LBB53_2: + movl 84(%esp), %edi + movl %ebp, (%edi) + testb %cl, %cl + jne .LBB53_4 +# BB#3: + movl 56(%esp), %esi # 4-byte Reload +.LBB53_4: + movl %esi, 4(%edi) + jne .LBB53_6 +# BB#5: + movl 60(%esp), %edx # 4-byte Reload +.LBB53_6: + movl %edx, 8(%edi) + jne .LBB53_8 +# BB#7: + movl %ebx, %eax +.LBB53_8: + movl %eax, 12(%edi) + addl $64, %esp + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end53: + .size mcl_fp_mont4Lbmi2, .Lfunc_end53-mcl_fp_mont4Lbmi2 + + .globl mcl_fp_montNF4Lbmi2 + .align 16, 0x90 + .type mcl_fp_montNF4Lbmi2,@function +mcl_fp_montNF4Lbmi2: # @mcl_fp_montNF4Lbmi2 +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $52, %esp + movl 76(%esp), %esi + movl (%esi), %eax + movl %eax, 28(%esp) # 4-byte Spill + movl 4(%esi), %edx + movl %edx, 24(%esp) # 4-byte Spill + movl 80(%esp), %ecx + movl (%ecx), %ecx + mulxl %ecx, %edi, %edx + movl %edx, 48(%esp) # 4-byte Spill + movl %eax, %edx + mulxl %ecx, %ebp, %eax + movl %ebp, 40(%esp) # 4-byte Spill + addl %edi, %eax + movl 8(%esi), %edx + movl %edx, 16(%esp) # 4-byte Spill + mulxl %ecx, %ebx, %edx + movl %edx, 44(%esp) # 4-byte Spill + adcl 48(%esp), %ebx # 4-byte Folded Reload + movl 12(%esi), %edx + movl %edx, 20(%esp) # 4-byte Spill + mulxl %ecx, %esi, %edi + adcl 44(%esp), %esi # 4-byte Folded Reload + adcl $0, %edi + movl 84(%esp), %ecx + movl -4(%ecx), %ecx + movl %ecx, 48(%esp) # 4-byte Spill + movl %ebp, %edx + imull %ecx, %edx + movl 84(%esp), %ecx + movl (%ecx), %ecx + movl %ecx, 44(%esp) # 4-byte Spill + mulxl %ecx, %ecx, %ebp + movl %ebp, 8(%esp) # 4-byte Spill + addl 40(%esp), %ecx # 4-byte Folded Reload + movl 84(%esp), %ecx + movl 4(%ecx), %ecx + movl %ecx, 40(%esp) # 4-byte Spill + mulxl %ecx, %ecx, %ebp + movl %ebp, 4(%esp) # 4-byte Spill + adcl %eax, %ecx + movl %ecx, 12(%esp) # 4-byte Spill + movl 84(%esp), %eax + movl 8(%eax), %eax + movl %eax, 36(%esp) # 4-byte Spill + mulxl %eax, %ecx, %eax + adcl %ebx, %ecx + movl %ecx, %ebp + movl 84(%esp), %ecx + movl 12(%ecx), %ecx + movl %ecx, 32(%esp) # 4-byte Spill + mulxl %ecx, %ebx, %edx + adcl %esi, %ebx + adcl $0, %edi + movl 8(%esp), %ecx # 4-byte Reload + addl %ecx, 12(%esp) # 4-byte Folded Spill + adcl 4(%esp), %ebp # 4-byte Folded Reload + movl %ebp, 4(%esp) # 4-byte Spill + adcl %eax, %ebx + adcl %edx, %edi + movl %edi, 8(%esp) # 4-byte Spill + movl 80(%esp), %eax + movl 4(%eax), %edx + mulxl 24(%esp), %esi, %edi # 4-byte Folded Reload + mulxl 28(%esp), %ecx, %eax # 4-byte Folded Reload + addl %esi, %eax + mulxl 16(%esp), %ebp, %esi # 4-byte Folded Reload + movl %esi, (%esp) # 4-byte Spill + adcl %edi, %ebp + mulxl 20(%esp), %edi, %esi # 4-byte Folded Reload + adcl (%esp), %edi # 4-byte Folded Reload + adcl $0, %esi + addl 12(%esp), %ecx # 4-byte Folded Reload + movl %ecx, (%esp) # 4-byte Spill + adcl 4(%esp), %eax # 4-byte Folded Reload + adcl %ebx, %ebp + adcl 8(%esp), %edi # 4-byte Folded Reload + adcl $0, %esi + movl %ecx, %edx + imull 48(%esp), %edx # 4-byte Folded Reload + mulxl 44(%esp), %ebx, %ecx # 4-byte Folded Reload + movl %ecx, 12(%esp) # 4-byte Spill + addl (%esp), %ebx # 4-byte Folded Reload + mulxl 40(%esp), %ebx, %ecx # 4-byte Folded Reload + movl %ecx, 8(%esp) # 4-byte Spill + adcl %eax, %ebx + movl %ebx, %eax + mulxl 36(%esp), %ebx, %ecx # 4-byte Folded Reload + adcl %ebp, %ebx + movl %ebx, %ebp + mulxl 32(%esp), %ebx, %edx # 4-byte Folded Reload + adcl %edi, %ebx + adcl $0, %esi + addl 12(%esp), %eax # 4-byte Folded Reload + movl %eax, 12(%esp) # 4-byte Spill + adcl 8(%esp), %ebp # 4-byte Folded Reload + movl %ebp, 4(%esp) # 4-byte Spill + adcl %ecx, %ebx + movl %ebx, 8(%esp) # 4-byte Spill + adcl %edx, %esi + movl 80(%esp), %ecx + movl 8(%ecx), %edx + mulxl 24(%esp), %ecx, %ebx # 4-byte Folded Reload + mulxl 28(%esp), %eax, %ebp # 4-byte Folded Reload + addl %ecx, %ebp + mulxl 16(%esp), %edi, %ecx # 4-byte Folded Reload + movl %ecx, (%esp) # 4-byte Spill + adcl %ebx, %edi + mulxl 20(%esp), %ebx, %ecx # 4-byte Folded Reload + adcl (%esp), %ebx # 4-byte Folded Reload + adcl $0, %ecx + addl 12(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) # 4-byte Spill + adcl 4(%esp), %ebp # 4-byte Folded Reload + adcl 8(%esp), %edi # 4-byte Folded Reload + adcl %esi, %ebx + adcl $0, %ecx + movl %eax, %edx + imull 48(%esp), %edx # 4-byte Folded Reload + mulxl 44(%esp), %eax, %esi # 4-byte Folded Reload + movl %esi, 12(%esp) # 4-byte Spill + addl (%esp), %eax # 4-byte Folded Reload + mulxl 40(%esp), %eax, %esi # 4-byte Folded Reload + movl %esi, 4(%esp) # 4-byte Spill + adcl %ebp, %eax + mulxl 36(%esp), %ebp, %esi # 4-byte Folded Reload + movl %esi, (%esp) # 4-byte Spill + adcl %edi, %ebp + mulxl 32(%esp), %esi, %edx # 4-byte Folded Reload + adcl %ebx, %esi + adcl $0, %ecx + addl 12(%esp), %eax # 4-byte Folded Reload + movl %eax, 8(%esp) # 4-byte Spill + adcl 4(%esp), %ebp # 4-byte Folded Reload + movl %ebp, 12(%esp) # 4-byte Spill + adcl (%esp), %esi # 4-byte Folded Reload + adcl %edx, %ecx + movl 80(%esp), %eax + movl 12(%eax), %edx + mulxl 24(%esp), %ebx, %ebp # 4-byte Folded Reload + mulxl 28(%esp), %edi, %eax # 4-byte Folded Reload + movl %edi, 28(%esp) # 4-byte Spill + addl %ebx, %eax + mulxl 16(%esp), %edi, %ebx # 4-byte Folded Reload + movl %ebx, 24(%esp) # 4-byte Spill + adcl %ebp, %edi + mulxl 20(%esp), %ebp, %ebx # 4-byte Folded Reload + adcl 24(%esp), %ebp # 4-byte Folded Reload + adcl $0, %ebx + movl 28(%esp), %edx # 4-byte Reload + addl 8(%esp), %edx # 4-byte Folded Reload + movl %edx, 28(%esp) # 4-byte Spill + adcl 12(%esp), %eax # 4-byte Folded Reload + adcl %esi, %edi + adcl %ecx, %ebp + adcl $0, %ebx + movl 48(%esp), %edx # 4-byte Reload + imull 28(%esp), %edx # 4-byte Folded Reload + movl %edx, 48(%esp) # 4-byte Spill + mulxl 44(%esp), %ecx, %esi # 4-byte Folded Reload + movl %esi, 24(%esp) # 4-byte Spill + addl 28(%esp), %ecx # 4-byte Folded Reload + mulxl 40(%esp), %esi, %ecx # 4-byte Folded Reload + movl %ecx, 28(%esp) # 4-byte Spill + adcl %eax, %esi + movl 48(%esp), %edx # 4-byte Reload + mulxl 36(%esp), %ecx, %eax # 4-byte Folded Reload + movl %eax, 20(%esp) # 4-byte Spill + adcl %edi, %ecx + movl 48(%esp), %edx # 4-byte Reload + mulxl 32(%esp), %eax, %edx # 4-byte Folded Reload + adcl %ebp, %eax + adcl $0, %ebx + addl 24(%esp), %esi # 4-byte Folded Reload + adcl 28(%esp), %ecx # 4-byte Folded Reload + adcl 20(%esp), %eax # 4-byte Folded Reload + adcl %edx, %ebx + movl %esi, %edi + subl 44(%esp), %edi # 4-byte Folded Reload + movl %ecx, %ebp + sbbl 40(%esp), %ebp # 4-byte Folded Reload + movl %eax, %edx + sbbl 36(%esp), %edx # 4-byte Folded Reload + movl %edx, 48(%esp) # 4-byte Spill + movl %ebx, %edx + sbbl 32(%esp), %edx # 4-byte Folded Reload + testl %edx, %edx + js .LBB54_2 +# BB#1: + movl %edi, %esi +.LBB54_2: + movl 72(%esp), %edi + movl %esi, (%edi) + js .LBB54_4 +# BB#3: + movl %ebp, %ecx +.LBB54_4: + movl %ecx, 4(%edi) + js .LBB54_6 +# BB#5: + movl 48(%esp), %eax # 4-byte Reload +.LBB54_6: + movl %eax, 8(%edi) + js .LBB54_8 +# BB#7: + movl %edx, %ebx +.LBB54_8: + movl %ebx, 12(%edi) + addl $52, %esp + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end54: + .size mcl_fp_montNF4Lbmi2, .Lfunc_end54-mcl_fp_montNF4Lbmi2 + + .globl mcl_fp_montRed4Lbmi2 + .align 16, 0x90 + .type mcl_fp_montRed4Lbmi2,@function +mcl_fp_montRed4Lbmi2: # @mcl_fp_montRed4Lbmi2 +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $52, %esp + movl 80(%esp), %ecx + movl -4(%ecx), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl (%ecx), %edi + movl %edi, 44(%esp) # 4-byte Spill + movl 76(%esp), %ebp + movl (%ebp), %edx + movl %edx, 36(%esp) # 4-byte Spill + imull %eax, %edx + movl 12(%ecx), %esi + movl %esi, 28(%esp) # 4-byte Spill + movl 8(%ecx), %ebx + movl %ebx, 48(%esp) # 4-byte Spill + movl 4(%ecx), %eax + movl %eax, 32(%esp) # 4-byte Spill + mulxl %esi, %esi, %ecx + movl %esi, 16(%esp) # 4-byte Spill + movl %ecx, 24(%esp) # 4-byte Spill + mulxl %ebx, %esi, %ecx + movl %esi, 12(%esp) # 4-byte Spill + movl %ecx, 20(%esp) # 4-byte Spill + mulxl %eax, %ebx, %ecx + mulxl %edi, %edx, %esi + addl %ebx, %esi + movl %ecx, %edi + adcl 12(%esp), %edi # 4-byte Folded Reload + movl 20(%esp), %ebx # 4-byte Reload + adcl 16(%esp), %ebx # 4-byte Folded Reload + movl 24(%esp), %ecx # 4-byte Reload + adcl $0, %ecx + addl 36(%esp), %edx # 4-byte Folded Reload + adcl 4(%ebp), %esi + adcl 8(%ebp), %edi + movl %edi, 12(%esp) # 4-byte Spill + adcl 12(%ebp), %ebx + movl %ebx, 20(%esp) # 4-byte Spill + adcl 16(%ebp), %ecx + movl %ecx, 24(%esp) # 4-byte Spill + movl 28(%ebp), %ecx + movl 24(%ebp), %edx + movl 20(%ebp), %edi + adcl $0, %edi + movl %edi, 8(%esp) # 4-byte Spill + adcl $0, %edx + movl %edx, 16(%esp) # 4-byte Spill + adcl $0, %ecx + movl %ecx, 36(%esp) # 4-byte Spill + sbbl %ebx, %ebx + andl $1, %ebx + movl %esi, %edx + imull 40(%esp), %edx # 4-byte Folded Reload + mulxl %eax, %ebp, %edi + mulxl 44(%esp), %ecx, %eax # 4-byte Folded Reload + movl %ecx, 4(%esp) # 4-byte Spill + addl %ebp, %eax + mulxl 48(%esp), %ebp, %ecx # 4-byte Folded Reload + movl %ecx, (%esp) # 4-byte Spill + adcl %edi, %ebp + movl 28(%esp), %ecx # 4-byte Reload + mulxl %ecx, %edi, %edx + adcl (%esp), %edi # 4-byte Folded Reload + adcl $0, %edx + addl %esi, 4(%esp) # 4-byte Folded Spill + adcl 12(%esp), %eax # 4-byte Folded Reload + adcl 20(%esp), %ebp # 4-byte Folded Reload + adcl 24(%esp), %edi # 4-byte Folded Reload + adcl 8(%esp), %edx # 4-byte Folded Reload + movl %edx, 12(%esp) # 4-byte Spill + adcl $0, 16(%esp) # 4-byte Folded Spill + adcl $0, 36(%esp) # 4-byte Folded Spill + adcl $0, %ebx + movl %eax, %edx + imull 40(%esp), %edx # 4-byte Folded Reload + mulxl %ecx, %esi, %ecx + movl %esi, 20(%esp) # 4-byte Spill + movl %ecx, 24(%esp) # 4-byte Spill + mulxl 32(%esp), %esi, %ecx # 4-byte Folded Reload + movl %esi, (%esp) # 4-byte Spill + movl %ecx, 4(%esp) # 4-byte Spill + mulxl 44(%esp), %esi, %ecx # 4-byte Folded Reload + movl %esi, 8(%esp) # 4-byte Spill + addl (%esp), %ecx # 4-byte Folded Reload + mulxl 48(%esp), %esi, %edx # 4-byte Folded Reload + adcl 4(%esp), %esi # 4-byte Folded Reload + adcl 20(%esp), %edx # 4-byte Folded Reload + movl %edx, 20(%esp) # 4-byte Spill + movl 24(%esp), %edx # 4-byte Reload + adcl $0, %edx + addl %eax, 8(%esp) # 4-byte Folded Spill + adcl %ebp, %ecx + adcl %edi, %esi + movl %esi, 8(%esp) # 4-byte Spill + movl 12(%esp), %eax # 4-byte Reload + adcl %eax, 20(%esp) # 4-byte Folded Spill + adcl 16(%esp), %edx # 4-byte Folded Reload + movl %edx, 24(%esp) # 4-byte Spill + adcl $0, 36(%esp) # 4-byte Folded Spill + adcl $0, %ebx + movl 40(%esp), %edx # 4-byte Reload + imull %ecx, %edx + mulxl 44(%esp), %esi, %eax # 4-byte Folded Reload + movl %esi, 40(%esp) # 4-byte Spill + mulxl 32(%esp), %ebp, %esi # 4-byte Folded Reload + addl %eax, %ebp + movl %edx, %eax + mulxl 48(%esp), %edi, %edx # 4-byte Folded Reload + movl %edx, 16(%esp) # 4-byte Spill + adcl %esi, %edi + movl %eax, %edx + mulxl 28(%esp), %edx, %esi # 4-byte Folded Reload + adcl 16(%esp), %edx # 4-byte Folded Reload + adcl $0, %esi + addl %ecx, 40(%esp) # 4-byte Folded Spill + adcl 8(%esp), %ebp # 4-byte Folded Reload + adcl 20(%esp), %edi # 4-byte Folded Reload + adcl 24(%esp), %edx # 4-byte Folded Reload + adcl 36(%esp), %esi # 4-byte Folded Reload + adcl $0, %ebx + movl %ebp, %ecx + subl 44(%esp), %ecx # 4-byte Folded Reload + movl %edi, %eax + sbbl 32(%esp), %eax # 4-byte Folded Reload + movl %eax, 44(%esp) # 4-byte Spill + movl %edx, %eax + sbbl 48(%esp), %eax # 4-byte Folded Reload + movl %eax, 48(%esp) # 4-byte Spill + movl %esi, %eax + sbbl 28(%esp), %eax # 4-byte Folded Reload + sbbl $0, %ebx + andl $1, %ebx + jne .LBB55_2 +# BB#1: + movl %ecx, %ebp +.LBB55_2: + movl 72(%esp), %ecx + movl %ebp, (%ecx) + testb %bl, %bl + jne .LBB55_4 +# BB#3: + movl 44(%esp), %edi # 4-byte Reload +.LBB55_4: + movl %edi, 4(%ecx) + jne .LBB55_6 +# BB#5: + movl 48(%esp), %edx # 4-byte Reload +.LBB55_6: + movl %edx, 8(%ecx) + jne .LBB55_8 +# BB#7: + movl %eax, %esi +.LBB55_8: + movl %esi, 12(%ecx) + addl $52, %esp + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end55: + .size mcl_fp_montRed4Lbmi2, .Lfunc_end55-mcl_fp_montRed4Lbmi2 + + .globl mcl_fp_addPre4Lbmi2 + .align 16, 0x90 + .type mcl_fp_addPre4Lbmi2,@function +mcl_fp_addPre4Lbmi2: # @mcl_fp_addPre4Lbmi2 +# BB#0: + pushl %ebx + pushl %edi + pushl %esi + movl 24(%esp), %eax + movl (%eax), %ecx + movl 4(%eax), %edx + movl 20(%esp), %esi + addl (%esi), %ecx + adcl 4(%esi), %edx + movl 12(%eax), %edi + movl 8(%eax), %eax + adcl 8(%esi), %eax + movl 12(%esi), %esi + movl 16(%esp), %ebx + movl %ecx, (%ebx) + movl %edx, 4(%ebx) + movl %eax, 8(%ebx) + adcl %edi, %esi + movl %esi, 12(%ebx) + sbbl %eax, %eax + andl $1, %eax + popl %esi + popl %edi + popl %ebx + retl +.Lfunc_end56: + .size mcl_fp_addPre4Lbmi2, .Lfunc_end56-mcl_fp_addPre4Lbmi2 + + .globl mcl_fp_subPre4Lbmi2 + .align 16, 0x90 + .type mcl_fp_subPre4Lbmi2,@function +mcl_fp_subPre4Lbmi2: # @mcl_fp_subPre4Lbmi2 +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + movl 24(%esp), %ecx + movl (%ecx), %edx + movl 4(%ecx), %esi + xorl %eax, %eax + movl 28(%esp), %edi + subl (%edi), %edx + sbbl 4(%edi), %esi + movl 8(%ecx), %ebx + sbbl 8(%edi), %ebx + movl 12(%edi), %edi + movl 12(%ecx), %ecx + movl 20(%esp), %ebp + movl %edx, (%ebp) + movl %esi, 4(%ebp) + movl %ebx, 8(%ebp) + sbbl %edi, %ecx + movl %ecx, 12(%ebp) + sbbl $0, %eax + andl $1, %eax + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end57: + .size mcl_fp_subPre4Lbmi2, .Lfunc_end57-mcl_fp_subPre4Lbmi2 + + .globl mcl_fp_shr1_4Lbmi2 + .align 16, 0x90 + .type mcl_fp_shr1_4Lbmi2,@function +mcl_fp_shr1_4Lbmi2: # @mcl_fp_shr1_4Lbmi2 +# BB#0: + pushl %edi + pushl %esi + movl 16(%esp), %eax + movl 12(%eax), %ecx + movl 8(%eax), %edx + movl (%eax), %esi + movl 4(%eax), %eax + shrdl $1, %eax, %esi + movl 12(%esp), %edi + movl %esi, (%edi) + shrdl $1, %edx, %eax + movl %eax, 4(%edi) + shrdl $1, %ecx, %edx + movl %edx, 8(%edi) + shrl %ecx + movl %ecx, 12(%edi) + popl %esi + popl %edi + retl +.Lfunc_end58: + .size mcl_fp_shr1_4Lbmi2, .Lfunc_end58-mcl_fp_shr1_4Lbmi2 + + .globl mcl_fp_add4Lbmi2 + .align 16, 0x90 + .type mcl_fp_add4Lbmi2,@function +mcl_fp_add4Lbmi2: # @mcl_fp_add4Lbmi2 +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + movl 28(%esp), %edi + movl (%edi), %eax + movl 4(%edi), %ecx + movl 24(%esp), %esi + addl (%esi), %eax + adcl 4(%esi), %ecx + movl 8(%edi), %edx + adcl 8(%esi), %edx + movl 12(%esi), %esi + adcl 12(%edi), %esi + movl 20(%esp), %edi + movl %eax, (%edi) + movl %ecx, 4(%edi) + movl %edx, 8(%edi) + movl %esi, 12(%edi) + sbbl %ebx, %ebx + andl $1, %ebx + movl 32(%esp), %ebp + subl (%ebp), %eax + sbbl 4(%ebp), %ecx + sbbl 8(%ebp), %edx + sbbl 12(%ebp), %esi + sbbl $0, %ebx + testb $1, %bl + jne .LBB59_2 +# BB#1: # %nocarry + movl %eax, (%edi) + movl %ecx, 4(%edi) + movl %edx, 8(%edi) + movl %esi, 12(%edi) +.LBB59_2: # %carry + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end59: + .size mcl_fp_add4Lbmi2, .Lfunc_end59-mcl_fp_add4Lbmi2 + + .globl mcl_fp_addNF4Lbmi2 + .align 16, 0x90 + .type mcl_fp_addNF4Lbmi2,@function +mcl_fp_addNF4Lbmi2: # @mcl_fp_addNF4Lbmi2 +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $8, %esp + movl 36(%esp), %edx + movl (%edx), %esi + movl 4(%edx), %ecx + movl 32(%esp), %edi + addl (%edi), %esi + adcl 4(%edi), %ecx + movl 12(%edx), %ebp + movl 8(%edx), %edx + adcl 8(%edi), %edx + adcl 12(%edi), %ebp + movl 40(%esp), %eax + movl %esi, %ebx + subl (%eax), %ebx + movl %ecx, %edi + sbbl 4(%eax), %edi + movl %edi, (%esp) # 4-byte Spill + movl %edx, %edi + movl 40(%esp), %eax + sbbl 8(%eax), %edi + movl %edi, 4(%esp) # 4-byte Spill + movl %ebp, %edi + movl 40(%esp), %eax + sbbl 12(%eax), %edi + testl %edi, %edi + js .LBB60_2 +# BB#1: + movl %ebx, %esi +.LBB60_2: + movl 28(%esp), %ebx + movl %esi, (%ebx) + js .LBB60_4 +# BB#3: + movl (%esp), %ecx # 4-byte Reload +.LBB60_4: + movl %ecx, 4(%ebx) + js .LBB60_6 +# BB#5: + movl 4(%esp), %edx # 4-byte Reload +.LBB60_6: + movl %edx, 8(%ebx) + js .LBB60_8 +# BB#7: + movl %edi, %ebp +.LBB60_8: + movl %ebp, 12(%ebx) + addl $8, %esp + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end60: + .size mcl_fp_addNF4Lbmi2, .Lfunc_end60-mcl_fp_addNF4Lbmi2 + + .globl mcl_fp_sub4Lbmi2 + .align 16, 0x90 + .type mcl_fp_sub4Lbmi2,@function +mcl_fp_sub4Lbmi2: # @mcl_fp_sub4Lbmi2 +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + movl 24(%esp), %esi + movl (%esi), %eax + movl 4(%esi), %ecx + xorl %ebx, %ebx + movl 28(%esp), %edi + subl (%edi), %eax + sbbl 4(%edi), %ecx + movl 8(%esi), %edx + sbbl 8(%edi), %edx + movl 12(%esi), %esi + sbbl 12(%edi), %esi + movl 20(%esp), %edi + movl %eax, (%edi) + movl %ecx, 4(%edi) + movl %edx, 8(%edi) + movl %esi, 12(%edi) + sbbl $0, %ebx + testb $1, %bl + je .LBB61_2 +# BB#1: # %carry + movl 32(%esp), %ebx + addl (%ebx), %eax + movl 8(%ebx), %ebp + adcl 4(%ebx), %ecx + movl 12(%ebx), %ebx + movl %eax, (%edi) + movl %ecx, 4(%edi) + adcl %edx, %ebp + movl %ebp, 8(%edi) + adcl %esi, %ebx + movl %ebx, 12(%edi) +.LBB61_2: # %nocarry + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end61: + .size mcl_fp_sub4Lbmi2, .Lfunc_end61-mcl_fp_sub4Lbmi2 + + .globl mcl_fp_subNF4Lbmi2 + .align 16, 0x90 + .type mcl_fp_subNF4Lbmi2,@function +mcl_fp_subNF4Lbmi2: # @mcl_fp_subNF4Lbmi2 +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $8, %esp + movl 32(%esp), %eax + movl (%eax), %edx + movl 4(%eax), %ecx + movl 36(%esp), %esi + subl (%esi), %edx + movl %edx, (%esp) # 4-byte Spill + sbbl 4(%esi), %ecx + movl %ecx, 4(%esp) # 4-byte Spill + movl 12(%eax), %edi + movl 8(%eax), %edx + sbbl 8(%esi), %edx + sbbl 12(%esi), %edi + movl %edi, %esi + sarl $31, %esi + movl 40(%esp), %eax + movl 12(%eax), %ebp + andl %esi, %ebp + movl 8(%eax), %ecx + andl %esi, %ecx + movl 40(%esp), %eax + movl 4(%eax), %eax + andl %esi, %eax + movl 40(%esp), %ebx + andl (%ebx), %esi + addl (%esp), %esi # 4-byte Folded Reload + adcl 4(%esp), %eax # 4-byte Folded Reload + movl 28(%esp), %ebx + movl %esi, (%ebx) + adcl %edx, %ecx + movl %eax, 4(%ebx) + movl %ecx, 8(%ebx) + adcl %edi, %ebp + movl %ebp, 12(%ebx) + addl $8, %esp + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end62: + .size mcl_fp_subNF4Lbmi2, .Lfunc_end62-mcl_fp_subNF4Lbmi2 + + .globl mcl_fpDbl_add4Lbmi2 + .align 16, 0x90 + .type mcl_fpDbl_add4Lbmi2,@function +mcl_fpDbl_add4Lbmi2: # @mcl_fpDbl_add4Lbmi2 +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $12, %esp + movl 40(%esp), %eax + movl (%eax), %edi + movl 4(%eax), %edx + movl 36(%esp), %esi + addl (%esi), %edi + adcl 4(%esi), %edx + movl 8(%eax), %ebx + adcl 8(%esi), %ebx + movl 12(%esi), %ebp + movl 32(%esp), %ecx + movl %edi, (%ecx) + movl 16(%esi), %edi + adcl 12(%eax), %ebp + adcl 16(%eax), %edi + movl %edx, 4(%ecx) + movl 28(%eax), %edx + movl %edx, 8(%esp) # 4-byte Spill + movl %ebx, 8(%ecx) + movl 24(%eax), %ebx + movl 20(%eax), %eax + movl %ebp, 12(%ecx) + movl 20(%esi), %edx + adcl %eax, %edx + movl 28(%esi), %ecx + movl 24(%esi), %ebp + adcl %ebx, %ebp + adcl 8(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 8(%esp) # 4-byte Spill + sbbl %ebx, %ebx + andl $1, %ebx + movl 44(%esp), %eax + movl %edi, %esi + subl (%eax), %esi + movl %esi, (%esp) # 4-byte Spill + movl %edx, %esi + sbbl 4(%eax), %esi + movl %esi, 4(%esp) # 4-byte Spill + movl %ebp, %esi + sbbl 8(%eax), %esi + sbbl 12(%eax), %ecx + sbbl $0, %ebx + andl $1, %ebx + jne .LBB63_2 +# BB#1: + movl %esi, %ebp +.LBB63_2: + testb %bl, %bl + jne .LBB63_4 +# BB#3: + movl (%esp), %edi # 4-byte Reload +.LBB63_4: + movl 32(%esp), %eax + movl %edi, 16(%eax) + jne .LBB63_6 +# BB#5: + movl 4(%esp), %edx # 4-byte Reload +.LBB63_6: + movl %edx, 20(%eax) + movl %ebp, 24(%eax) + movl 8(%esp), %edx # 4-byte Reload + jne .LBB63_8 +# BB#7: + movl %ecx, %edx +.LBB63_8: + movl %edx, 28(%eax) + addl $12, %esp + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end63: + .size mcl_fpDbl_add4Lbmi2, .Lfunc_end63-mcl_fpDbl_add4Lbmi2 + + .globl mcl_fpDbl_sub4Lbmi2 + .align 16, 0x90 + .type mcl_fpDbl_sub4Lbmi2,@function +mcl_fpDbl_sub4Lbmi2: # @mcl_fpDbl_sub4Lbmi2 +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + pushl %eax + movl 28(%esp), %eax + movl (%eax), %edx + movl 4(%eax), %esi + movl 32(%esp), %ebp + subl (%ebp), %edx + sbbl 4(%ebp), %esi + movl 8(%eax), %ebx + sbbl 8(%ebp), %ebx + movl 24(%esp), %ecx + movl %edx, (%ecx) + movl 12(%eax), %edx + sbbl 12(%ebp), %edx + movl %esi, 4(%ecx) + movl 16(%eax), %edi + sbbl 16(%ebp), %edi + movl %ebx, 8(%ecx) + movl 20(%ebp), %esi + movl %edx, 12(%ecx) + movl 20(%eax), %ebx + sbbl %esi, %ebx + movl 24(%ebp), %edx + movl 24(%eax), %esi + sbbl %edx, %esi + movl 28(%ebp), %edx + movl 28(%eax), %eax + sbbl %edx, %eax + movl %eax, (%esp) # 4-byte Spill + movl $0, %edx + sbbl $0, %edx + andl $1, %edx + movl 36(%esp), %ecx + movl (%ecx), %eax + jne .LBB64_1 +# BB#2: + xorl %ebp, %ebp + jmp .LBB64_3 +.LBB64_1: + movl 4(%ecx), %ebp +.LBB64_3: + testb %dl, %dl + jne .LBB64_5 +# BB#4: + movl $0, %eax +.LBB64_5: + jne .LBB64_6 +# BB#7: + movl $0, %edx + jmp .LBB64_8 +.LBB64_6: + movl 12(%ecx), %edx +.LBB64_8: + jne .LBB64_9 +# BB#10: + xorl %ecx, %ecx + jmp .LBB64_11 +.LBB64_9: + movl 8(%ecx), %ecx +.LBB64_11: + addl %edi, %eax + adcl %ebx, %ebp + movl 24(%esp), %edi + movl %eax, 16(%edi) + adcl %esi, %ecx + movl %ebp, 20(%edi) + movl %ecx, 24(%edi) + adcl (%esp), %edx # 4-byte Folded Reload + movl %edx, 28(%edi) + addl $4, %esp + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end64: + .size mcl_fpDbl_sub4Lbmi2, .Lfunc_end64-mcl_fpDbl_sub4Lbmi2 + + .globl mcl_fp_mulUnitPre5Lbmi2 + .align 16, 0x90 + .type mcl_fp_mulUnitPre5Lbmi2,@function +mcl_fp_mulUnitPre5Lbmi2: # @mcl_fp_mulUnitPre5Lbmi2 +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $8, %esp + movl 36(%esp), %edx + movl 32(%esp), %ecx + mulxl 4(%ecx), %esi, %eax + mulxl (%ecx), %edi, %ebx + movl %edi, 4(%esp) # 4-byte Spill + addl %esi, %ebx + mulxl 8(%ecx), %ebp, %esi + adcl %eax, %ebp + mulxl 12(%ecx), %eax, %edi + movl %edi, (%esp) # 4-byte Spill + adcl %esi, %eax + mulxl 16(%ecx), %ecx, %edx + movl 28(%esp), %esi + movl 4(%esp), %edi # 4-byte Reload + movl %edi, (%esi) + movl %ebx, 4(%esi) + movl %ebp, 8(%esi) + movl %eax, 12(%esi) + adcl (%esp), %ecx # 4-byte Folded Reload + movl %ecx, 16(%esi) + adcl $0, %edx + movl %edx, 20(%esi) + addl $8, %esp + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end65: + .size mcl_fp_mulUnitPre5Lbmi2, .Lfunc_end65-mcl_fp_mulUnitPre5Lbmi2 + + .globl mcl_fpDbl_mulPre5Lbmi2 + .align 16, 0x90 + .type mcl_fpDbl_mulPre5Lbmi2,@function +mcl_fpDbl_mulPre5Lbmi2: # @mcl_fpDbl_mulPre5Lbmi2 +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $44, %esp + movl 68(%esp), %eax + movl (%eax), %ebx + movl %ebx, 36(%esp) # 4-byte Spill + movl 4(%eax), %edx + movl %edx, 40(%esp) # 4-byte Spill + movl %eax, %ecx + movl 72(%esp), %eax + movl (%eax), %ebp + mulxl %ebp, %esi, %edi + movl %ebx, %edx + mulxl %ebp, %edx, %eax + movl %edx, 20(%esp) # 4-byte Spill + addl %esi, %eax + movl 8(%ecx), %edx + movl %edx, 32(%esp) # 4-byte Spill + mulxl %ebp, %esi, %ebx + adcl %edi, %esi + movl 12(%ecx), %edx + movl %edx, 28(%esp) # 4-byte Spill + mulxl %ebp, %edi, %ecx + adcl %ebx, %edi + movl 68(%esp), %edx + movl 16(%edx), %edx + movl %edx, 24(%esp) # 4-byte Spill + mulxl %ebp, %ebp, %edx + adcl %ecx, %ebp + movl 64(%esp), %ecx + movl 20(%esp), %ebx # 4-byte Reload + movl %ebx, (%ecx) + adcl $0, %edx + movl %edx, 16(%esp) # 4-byte Spill + movl 72(%esp), %ecx + movl 4(%ecx), %ebx + movl 36(%esp), %edx # 4-byte Reload + mulxl %ebx, %ecx, %edx + movl %edx, 20(%esp) # 4-byte Spill + addl %eax, %ecx + movl %ecx, 36(%esp) # 4-byte Spill + movl 40(%esp), %edx # 4-byte Reload + mulxl %ebx, %ecx, %eax + movl %eax, 40(%esp) # 4-byte Spill + adcl %esi, %ecx + movl 32(%esp), %edx # 4-byte Reload + mulxl %ebx, %esi, %eax + movl %eax, 32(%esp) # 4-byte Spill + adcl %edi, %esi + movl 28(%esp), %edx # 4-byte Reload + mulxl %ebx, %edi, %eax + movl %eax, 28(%esp) # 4-byte Spill + adcl %ebp, %edi + movl 24(%esp), %edx # 4-byte Reload + mulxl %ebx, %eax, %edx + adcl 16(%esp), %eax # 4-byte Folded Reload + movl %eax, %ebx + sbbl %eax, %eax + andl $1, %eax + addl 20(%esp), %ecx # 4-byte Folded Reload + adcl 40(%esp), %esi # 4-byte Folded Reload + adcl 32(%esp), %edi # 4-byte Folded Reload + adcl 28(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 24(%esp) # 4-byte Spill + adcl %edx, %eax + movl %eax, 16(%esp) # 4-byte Spill + movl 64(%esp), %eax + movl 36(%esp), %edx # 4-byte Reload + movl %edx, 4(%eax) + movl 68(%esp), %ebx + movl (%ebx), %edx + movl %edx, 40(%esp) # 4-byte Spill + movl 72(%esp), %eax + movl 8(%eax), %eax + mulxl %eax, %edx, %ebp + movl %ebp, 12(%esp) # 4-byte Spill + addl %ecx, %edx + movl %edx, 20(%esp) # 4-byte Spill + movl 4(%ebx), %edx + movl %edx, 36(%esp) # 4-byte Spill + mulxl %eax, %edx, %ecx + movl %ecx, 8(%esp) # 4-byte Spill + adcl %esi, %edx + movl %edx, %ebp + movl 8(%ebx), %edx + movl %edx, 32(%esp) # 4-byte Spill + mulxl %eax, %ecx, %edx + movl %edx, 4(%esp) # 4-byte Spill + adcl %edi, %ecx + movl 12(%ebx), %edx + movl %edx, 28(%esp) # 4-byte Spill + mulxl %eax, %esi, %edx + movl %edx, (%esp) # 4-byte Spill + adcl 24(%esp), %esi # 4-byte Folded Reload + movl 16(%ebx), %edx + movl %edx, 24(%esp) # 4-byte Spill + mulxl %eax, %edi, %edx + adcl 16(%esp), %edi # 4-byte Folded Reload + sbbl %ebx, %ebx + andl $1, %ebx + addl 12(%esp), %ebp # 4-byte Folded Reload + movl %ebp, 16(%esp) # 4-byte Spill + adcl 8(%esp), %ecx # 4-byte Folded Reload + adcl 4(%esp), %esi # 4-byte Folded Reload + adcl (%esp), %edi # 4-byte Folded Reload + adcl %edx, %ebx + movl 64(%esp), %eax + movl 20(%esp), %edx # 4-byte Reload + movl %edx, 8(%eax) + movl 72(%esp), %eax + movl 12(%eax), %eax + movl 40(%esp), %edx # 4-byte Reload + mulxl %eax, %edx, %ebp + movl %ebp, 20(%esp) # 4-byte Spill + addl 16(%esp), %edx # 4-byte Folded Reload + movl %edx, 40(%esp) # 4-byte Spill + movl 36(%esp), %edx # 4-byte Reload + mulxl %eax, %ebp, %edx + movl %edx, 36(%esp) # 4-byte Spill + adcl %ecx, %ebp + movl 32(%esp), %edx # 4-byte Reload + mulxl %eax, %ecx, %edx + movl %edx, 32(%esp) # 4-byte Spill + adcl %esi, %ecx + movl 28(%esp), %edx # 4-byte Reload + mulxl %eax, %edx, %esi + movl %esi, 28(%esp) # 4-byte Spill + adcl %edi, %edx + movl %edx, %esi + movl 24(%esp), %edx # 4-byte Reload + mulxl %eax, %edi, %edx + adcl %ebx, %edi + sbbl %eax, %eax + andl $1, %eax + addl 20(%esp), %ebp # 4-byte Folded Reload + adcl 36(%esp), %ecx # 4-byte Folded Reload + adcl 32(%esp), %esi # 4-byte Folded Reload + movl %esi, 24(%esp) # 4-byte Spill + adcl 28(%esp), %edi # 4-byte Folded Reload + adcl %edx, %eax + movl %eax, 32(%esp) # 4-byte Spill + movl 64(%esp), %eax + movl 40(%esp), %edx # 4-byte Reload + movl %edx, 12(%eax) + movl 72(%esp), %eax + movl 16(%eax), %edx + movl 68(%esp), %eax + mulxl (%eax), %esi, %ebx + movl %ebx, 36(%esp) # 4-byte Spill + addl %ebp, %esi + movl %esi, 40(%esp) # 4-byte Spill + mulxl 4(%eax), %ebx, %esi + movl %esi, 28(%esp) # 4-byte Spill + adcl %ecx, %ebx + mulxl 8(%eax), %esi, %ecx + movl %ecx, 20(%esp) # 4-byte Spill + adcl 24(%esp), %esi # 4-byte Folded Reload + mulxl 12(%eax), %ecx, %ebp + adcl %edi, %ecx + mulxl 16(%eax), %edi, %eax + movl %eax, 24(%esp) # 4-byte Spill + adcl 32(%esp), %edi # 4-byte Folded Reload + sbbl %eax, %eax + andl $1, %eax + addl 36(%esp), %ebx # 4-byte Folded Reload + adcl 28(%esp), %esi # 4-byte Folded Reload + adcl 20(%esp), %ecx # 4-byte Folded Reload + adcl %ebp, %edi + movl 64(%esp), %ebp + movl 40(%esp), %edx # 4-byte Reload + movl %edx, 16(%ebp) + movl %ebx, 20(%ebp) + movl %esi, 24(%ebp) + movl %ecx, 28(%ebp) + movl %edi, 32(%ebp) + adcl 24(%esp), %eax # 4-byte Folded Reload + movl %eax, 36(%ebp) + addl $44, %esp + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end66: + .size mcl_fpDbl_mulPre5Lbmi2, .Lfunc_end66-mcl_fpDbl_mulPre5Lbmi2 + + .globl mcl_fpDbl_sqrPre5Lbmi2 + .align 16, 0x90 + .type mcl_fpDbl_sqrPre5Lbmi2,@function +mcl_fpDbl_sqrPre5Lbmi2: # @mcl_fpDbl_sqrPre5Lbmi2 +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $40, %esp + movl 64(%esp), %ecx + movl (%ecx), %edi + movl 4(%ecx), %esi + movl %esi, %edx + mulxl %edi, %ebp, %ebx + movl %ebp, 24(%esp) # 4-byte Spill + movl %ebx, 28(%esp) # 4-byte Spill + movl %edi, %edx + mulxl %edi, %edx, %eax + movl %edx, 16(%esp) # 4-byte Spill + addl %ebp, %eax + movl 8(%ecx), %edx + movl %edx, 36(%esp) # 4-byte Spill + mulxl %edi, %ebp, %edx + movl %edx, 32(%esp) # 4-byte Spill + adcl %ebx, %ebp + movl %ebp, 12(%esp) # 4-byte Spill + movl 12(%ecx), %edx + movl %edx, 20(%esp) # 4-byte Spill + mulxl %edi, %ecx, %ebx + adcl 32(%esp), %ecx # 4-byte Folded Reload + movl 64(%esp), %edx + movl 16(%edx), %edx + movl %edx, 32(%esp) # 4-byte Spill + mulxl %edi, %edi, %edx + adcl %ebx, %edi + movl 16(%esp), %ebx # 4-byte Reload + movl 60(%esp), %ebp + movl %ebx, (%ebp) + adcl $0, %edx + movl %edx, 8(%esp) # 4-byte Spill + addl 24(%esp), %eax # 4-byte Folded Reload + movl %eax, 24(%esp) # 4-byte Spill + movl %esi, %edx + mulxl %esi, %ebx, %eax + movl %eax, 16(%esp) # 4-byte Spill + adcl 12(%esp), %ebx # 4-byte Folded Reload + movl 36(%esp), %edx # 4-byte Reload + mulxl %esi, %ebp, %eax + movl %eax, 12(%esp) # 4-byte Spill + adcl %ecx, %ebp + movl 20(%esp), %edx # 4-byte Reload + mulxl %esi, %ecx, %eax + movl %eax, 20(%esp) # 4-byte Spill + adcl %edi, %ecx + movl 32(%esp), %edx # 4-byte Reload + mulxl %esi, %edi, %edx + adcl 8(%esp), %edi # 4-byte Folded Reload + sbbl %eax, %eax + andl $1, %eax + addl 28(%esp), %ebx # 4-byte Folded Reload + adcl 16(%esp), %ebp # 4-byte Folded Reload + adcl 12(%esp), %ecx # 4-byte Folded Reload + adcl 20(%esp), %edi # 4-byte Folded Reload + adcl %edx, %eax + movl %eax, 12(%esp) # 4-byte Spill + movl 60(%esp), %eax + movl 24(%esp), %edx # 4-byte Reload + movl %edx, 4(%eax) + movl 64(%esp), %eax + movl (%eax), %edx + movl %edx, 24(%esp) # 4-byte Spill + movl 36(%esp), %esi # 4-byte Reload + mulxl %esi, %edx, %eax + movl %eax, 16(%esp) # 4-byte Spill + addl %ebx, %edx + movl %edx, 28(%esp) # 4-byte Spill + movl 64(%esp), %eax + movl 4(%eax), %edx + movl %edx, 20(%esp) # 4-byte Spill + mulxl %esi, %ebx, %eax + movl %eax, 8(%esp) # 4-byte Spill + adcl %ebp, %ebx + movl %esi, %edx + mulxl %esi, %ebp, %edx + movl %edx, 4(%esp) # 4-byte Spill + movl %esi, %eax + adcl %ecx, %ebp + movl 64(%esp), %ecx + movl 12(%ecx), %esi + movl %esi, %edx + mulxl %eax, %eax, %ecx + movl %ecx, (%esp) # 4-byte Spill + adcl %edi, %eax + movl 32(%esp), %edx # 4-byte Reload + mulxl 36(%esp), %ecx, %edx # 4-byte Folded Reload + adcl 12(%esp), %ecx # 4-byte Folded Reload + sbbl %edi, %edi + andl $1, %edi + addl 16(%esp), %ebx # 4-byte Folded Reload + adcl 8(%esp), %ebp # 4-byte Folded Reload + adcl 4(%esp), %eax # 4-byte Folded Reload + movl %eax, 12(%esp) # 4-byte Spill + adcl (%esp), %ecx # 4-byte Folded Reload + adcl %edx, %edi + movl %edi, 16(%esp) # 4-byte Spill + movl 24(%esp), %edx # 4-byte Reload + mulxl %esi, %edx, %edi + movl %edi, 24(%esp) # 4-byte Spill + addl %ebx, %edx + movl %edx, 32(%esp) # 4-byte Spill + movl 20(%esp), %edx # 4-byte Reload + mulxl %esi, %edx, %edi + movl %edi, 20(%esp) # 4-byte Spill + adcl %ebp, %edx + movl %edx, %edi + movl 60(%esp), %eax + movl 28(%esp), %edx # 4-byte Reload + movl %edx, 8(%eax) + movl 64(%esp), %eax + movl 8(%eax), %edx + movl %edx, 36(%esp) # 4-byte Spill + mulxl %esi, %ebx, %edx + movl %edx, 28(%esp) # 4-byte Spill + adcl 12(%esp), %ebx # 4-byte Folded Reload + movl %esi, %edx + mulxl %esi, %ebp, %edx + movl %edx, 12(%esp) # 4-byte Spill + adcl %ecx, %ebp + movl 16(%eax), %ecx + movl %ecx, %edx + mulxl %esi, %esi, %edx + adcl 16(%esp), %esi # 4-byte Folded Reload + sbbl %eax, %eax + andl $1, %eax + addl 24(%esp), %edi # 4-byte Folded Reload + adcl 20(%esp), %ebx # 4-byte Folded Reload + adcl 28(%esp), %ebp # 4-byte Folded Reload + adcl 12(%esp), %esi # 4-byte Folded Reload + adcl %edx, %eax + movl %eax, 20(%esp) # 4-byte Spill + movl 60(%esp), %edx + movl 32(%esp), %eax # 4-byte Reload + movl %eax, 12(%edx) + movl %ecx, %edx + movl 64(%esp), %eax + mulxl (%eax), %edx, %eax + movl %eax, 28(%esp) # 4-byte Spill + addl %edi, %edx + movl %edx, 32(%esp) # 4-byte Spill + movl %ecx, %edx + movl 64(%esp), %eax + mulxl 4(%eax), %edi, %edx + movl %edx, 24(%esp) # 4-byte Spill + adcl %ebx, %edi + movl 36(%esp), %edx # 4-byte Reload + mulxl %ecx, %ebx, %edx + movl %edx, 16(%esp) # 4-byte Spill + adcl %ebp, %ebx + movl %ecx, %edx + mulxl 12(%eax), %ebp, %eax + movl %eax, 12(%esp) # 4-byte Spill + adcl %esi, %ebp + movl %ecx, %edx + mulxl %ecx, %edx, %eax + movl %eax, 36(%esp) # 4-byte Spill + adcl 20(%esp), %edx # 4-byte Folded Reload + sbbl %eax, %eax + andl $1, %eax + addl 28(%esp), %edi # 4-byte Folded Reload + adcl 24(%esp), %ebx # 4-byte Folded Reload + adcl 16(%esp), %ebp # 4-byte Folded Reload + adcl 12(%esp), %edx # 4-byte Folded Reload + movl 60(%esp), %esi + movl 32(%esp), %ecx # 4-byte Reload + movl %ecx, 16(%esi) + movl %edi, 20(%esi) + movl %ebx, 24(%esi) + movl %ebp, 28(%esi) + movl %edx, 32(%esi) + adcl 36(%esp), %eax # 4-byte Folded Reload + movl %eax, 36(%esi) + addl $40, %esp + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end67: + .size mcl_fpDbl_sqrPre5Lbmi2, .Lfunc_end67-mcl_fpDbl_sqrPre5Lbmi2 + + .globl mcl_fp_mont5Lbmi2 + .align 16, 0x90 + .type mcl_fp_mont5Lbmi2,@function +mcl_fp_mont5Lbmi2: # @mcl_fp_mont5Lbmi2 +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $84, %esp + movl 108(%esp), %eax + movl 16(%eax), %edx + movl %edx, 48(%esp) # 4-byte Spill + movl 112(%esp), %ecx + movl (%ecx), %ecx + movl 12(%eax), %esi + movl %esi, 52(%esp) # 4-byte Spill + movl 8(%eax), %edi + movl %edi, 56(%esp) # 4-byte Spill + movl (%eax), %ebx + movl %ebx, 44(%esp) # 4-byte Spill + movl 4(%eax), %ebp + movl %ebp, 40(%esp) # 4-byte Spill + mulxl %ecx, %edx, %eax + movl %edx, 80(%esp) # 4-byte Spill + movl %esi, %edx + mulxl %ecx, %edx, %esi + movl %edx, 76(%esp) # 4-byte Spill + movl %edi, %edx + mulxl %ecx, %edx, %edi + movl %edx, 72(%esp) # 4-byte Spill + movl %ebp, %edx + mulxl %ecx, %edx, %ebp + movl %edx, 68(%esp) # 4-byte Spill + movl %ebx, %edx + mulxl %ecx, %edx, %ecx + movl %edx, 16(%esp) # 4-byte Spill + addl 68(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 20(%esp) # 4-byte Spill + adcl 72(%esp), %ebp # 4-byte Folded Reload + movl %ebp, 24(%esp) # 4-byte Spill + adcl 76(%esp), %edi # 4-byte Folded Reload + movl %edi, 28(%esp) # 4-byte Spill + adcl 80(%esp), %esi # 4-byte Folded Reload + movl %esi, 32(%esp) # 4-byte Spill + adcl $0, %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 116(%esp), %ebp + movl -4(%ebp), %eax + movl %eax, 60(%esp) # 4-byte Spill + imull %eax, %edx + movl (%ebp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 4(%ebp), %ecx + movl %ecx, 80(%esp) # 4-byte Spill + mulxl %ecx, %ecx, %ebx + mulxl %eax, %esi, %edi + movl %esi, 12(%esp) # 4-byte Spill + addl %ecx, %edi + movl 8(%ebp), %eax + movl %eax, 72(%esp) # 4-byte Spill + mulxl %eax, %esi, %ecx + adcl %ebx, %esi + movl 12(%ebp), %eax + movl %eax, 68(%esp) # 4-byte Spill + mulxl %eax, %eax, %ebx + movl %ebx, 8(%esp) # 4-byte Spill + adcl %ecx, %eax + movl %eax, %ecx + movl 16(%ebp), %eax + movl %eax, 64(%esp) # 4-byte Spill + mulxl %eax, %ebx, %eax + adcl 8(%esp), %ebx # 4-byte Folded Reload + adcl $0, %eax + movl 12(%esp), %edx # 4-byte Reload + addl 16(%esp), %edx # 4-byte Folded Reload + adcl 20(%esp), %edi # 4-byte Folded Reload + movl %edi, 20(%esp) # 4-byte Spill + adcl 24(%esp), %esi # 4-byte Folded Reload + movl %esi, 16(%esp) # 4-byte Spill + adcl 28(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 28(%esp) # 4-byte Spill + adcl 32(%esp), %ebx # 4-byte Folded Reload + adcl 36(%esp), %eax # 4-byte Folded Reload + movl %eax, 12(%esp) # 4-byte Spill + sbbl %edi, %edi + andl $1, %edi + movl 112(%esp), %edx + movl 4(%edx), %edx + mulxl 48(%esp), %ecx, %eax # 4-byte Folded Reload + movl %ecx, 8(%esp) # 4-byte Spill + movl %eax, 36(%esp) # 4-byte Spill + mulxl 52(%esp), %ecx, %eax # 4-byte Folded Reload + movl %ecx, 4(%esp) # 4-byte Spill + movl %eax, 32(%esp) # 4-byte Spill + mulxl 40(%esp), %eax, %ecx # 4-byte Folded Reload + movl %ecx, 24(%esp) # 4-byte Spill + mulxl 44(%esp), %esi, %ebp # 4-byte Folded Reload + addl %eax, %ebp + mulxl 56(%esp), %ecx, %eax # 4-byte Folded Reload + adcl 24(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 24(%esp) # 4-byte Spill + adcl 4(%esp), %eax # 4-byte Folded Reload + movl %eax, %ecx + movl 32(%esp), %eax # 4-byte Reload + adcl 8(%esp), %eax # 4-byte Folded Reload + movl 36(%esp), %edx # 4-byte Reload + adcl $0, %edx + addl 20(%esp), %esi # 4-byte Folded Reload + movl %esi, 8(%esp) # 4-byte Spill + adcl 16(%esp), %ebp # 4-byte Folded Reload + movl %ebp, 20(%esp) # 4-byte Spill + movl 24(%esp), %ebp # 4-byte Reload + adcl 28(%esp), %ebp # 4-byte Folded Reload + movl %ebp, 24(%esp) # 4-byte Spill + adcl %ebx, %ecx + movl %ecx, 16(%esp) # 4-byte Spill + adcl 12(%esp), %eax # 4-byte Folded Reload + movl %eax, 32(%esp) # 4-byte Spill + adcl %edi, %edx + movl %edx, 36(%esp) # 4-byte Spill + sbbl %eax, %eax + movl %eax, 28(%esp) # 4-byte Spill + movl %esi, %edx + imull 60(%esp), %edx # 4-byte Folded Reload + mulxl 80(%esp), %ecx, %eax # 4-byte Folded Reload + mulxl 76(%esp), %edi, %esi # 4-byte Folded Reload + movl %edi, 12(%esp) # 4-byte Spill + addl %ecx, %esi + movl %esi, %edi + mulxl 72(%esp), %esi, %ebx # 4-byte Folded Reload + adcl %eax, %esi + mulxl 68(%esp), %ecx, %ebp # 4-byte Folded Reload + adcl %ebx, %ecx + mulxl 64(%esp), %edx, %eax # 4-byte Folded Reload + adcl %ebp, %edx + movl %edx, %ebx + adcl $0, %eax + movl 28(%esp), %edx # 4-byte Reload + andl $1, %edx + movl 12(%esp), %ebp # 4-byte Reload + addl 8(%esp), %ebp # 4-byte Folded Reload + adcl 20(%esp), %edi # 4-byte Folded Reload + movl %edi, 20(%esp) # 4-byte Spill + adcl 24(%esp), %esi # 4-byte Folded Reload + movl %esi, 12(%esp) # 4-byte Spill + adcl 16(%esp), %ecx # 4-byte Folded Reload + movl %ecx, %esi + adcl 32(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 16(%esp) # 4-byte Spill + adcl 36(%esp), %eax # 4-byte Folded Reload + movl %eax, 8(%esp) # 4-byte Spill + adcl $0, %edx + movl %edx, 28(%esp) # 4-byte Spill + movl 112(%esp), %edx + movl 8(%edx), %edx + mulxl 48(%esp), %ecx, %eax # 4-byte Folded Reload + movl %ecx, 4(%esp) # 4-byte Spill + movl %eax, 36(%esp) # 4-byte Spill + mulxl 52(%esp), %ecx, %eax # 4-byte Folded Reload + movl %ecx, (%esp) # 4-byte Spill + movl %eax, 32(%esp) # 4-byte Spill + mulxl 40(%esp), %edi, %ebx # 4-byte Folded Reload + mulxl 44(%esp), %eax, %ecx # 4-byte Folded Reload + movl %eax, 24(%esp) # 4-byte Spill + addl %edi, %ecx + mulxl 56(%esp), %eax, %edx # 4-byte Folded Reload + adcl %ebx, %eax + movl %eax, %edi + adcl (%esp), %edx # 4-byte Folded Reload + movl %edx, %eax + movl 32(%esp), %ebx # 4-byte Reload + adcl 4(%esp), %ebx # 4-byte Folded Reload + movl 36(%esp), %edx # 4-byte Reload + adcl $0, %edx + movl 24(%esp), %ebp # 4-byte Reload + addl 20(%esp), %ebp # 4-byte Folded Reload + movl %ebp, 24(%esp) # 4-byte Spill + adcl 12(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 20(%esp) # 4-byte Spill + adcl %esi, %edi + movl %edi, 4(%esp) # 4-byte Spill + adcl 16(%esp), %eax # 4-byte Folded Reload + movl %eax, 16(%esp) # 4-byte Spill + adcl 8(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 32(%esp) # 4-byte Spill + adcl 28(%esp), %edx # 4-byte Folded Reload + movl %edx, 36(%esp) # 4-byte Spill + sbbl %eax, %eax + movl %eax, 28(%esp) # 4-byte Spill + movl %ebp, %edx + imull 60(%esp), %edx # 4-byte Folded Reload + mulxl 80(%esp), %ecx, %eax # 4-byte Folded Reload + mulxl 76(%esp), %edi, %esi # 4-byte Folded Reload + movl %edi, 12(%esp) # 4-byte Spill + addl %ecx, %esi + mulxl 72(%esp), %ecx, %ebp # 4-byte Folded Reload + adcl %eax, %ecx + mulxl 68(%esp), %eax, %edi # 4-byte Folded Reload + adcl %ebp, %eax + mulxl 64(%esp), %ebx, %ebp # 4-byte Folded Reload + adcl %edi, %ebx + adcl $0, %ebp + movl 28(%esp), %edx # 4-byte Reload + andl $1, %edx + movl 12(%esp), %edi # 4-byte Reload + addl 24(%esp), %edi # 4-byte Folded Reload + adcl 20(%esp), %esi # 4-byte Folded Reload + movl %esi, 20(%esp) # 4-byte Spill + adcl 4(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 12(%esp) # 4-byte Spill + adcl 16(%esp), %eax # 4-byte Folded Reload + movl %eax, 16(%esp) # 4-byte Spill + adcl 32(%esp), %ebx # 4-byte Folded Reload + adcl 36(%esp), %ebp # 4-byte Folded Reload + adcl $0, %edx + movl %edx, 28(%esp) # 4-byte Spill + movl 112(%esp), %edx + movl 12(%edx), %edx + mulxl 48(%esp), %ecx, %eax # 4-byte Folded Reload + movl %ecx, 8(%esp) # 4-byte Spill + movl %eax, 36(%esp) # 4-byte Spill + mulxl 52(%esp), %ecx, %eax # 4-byte Folded Reload + movl %eax, 32(%esp) # 4-byte Spill + mulxl 40(%esp), %eax, %esi # 4-byte Folded Reload + movl %esi, 24(%esp) # 4-byte Spill + mulxl 44(%esp), %esi, %edi # 4-byte Folded Reload + addl %eax, %edi + mulxl 56(%esp), %eax, %edx # 4-byte Folded Reload + adcl 24(%esp), %eax # 4-byte Folded Reload + movl %eax, 24(%esp) # 4-byte Spill + adcl %ecx, %edx + movl %edx, %ecx + movl 32(%esp), %eax # 4-byte Reload + adcl 8(%esp), %eax # 4-byte Folded Reload + movl 36(%esp), %edx # 4-byte Reload + adcl $0, %edx + addl 20(%esp), %esi # 4-byte Folded Reload + movl %esi, 8(%esp) # 4-byte Spill + adcl 12(%esp), %edi # 4-byte Folded Reload + movl %edi, 20(%esp) # 4-byte Spill + movl 24(%esp), %edi # 4-byte Reload + adcl 16(%esp), %edi # 4-byte Folded Reload + movl %edi, 24(%esp) # 4-byte Spill + adcl %ebx, %ecx + movl %ecx, 16(%esp) # 4-byte Spill + adcl %ebp, %eax + movl %eax, 32(%esp) # 4-byte Spill + adcl 28(%esp), %edx # 4-byte Folded Reload + movl %edx, 36(%esp) # 4-byte Spill + sbbl %eax, %eax + movl %eax, 28(%esp) # 4-byte Spill + movl %esi, %edx + imull 60(%esp), %edx # 4-byte Folded Reload + mulxl 80(%esp), %ecx, %eax # 4-byte Folded Reload + mulxl 76(%esp), %edi, %esi # 4-byte Folded Reload + movl %edi, 12(%esp) # 4-byte Spill + addl %ecx, %esi + movl %esi, %ebp + mulxl 72(%esp), %esi, %ecx # 4-byte Folded Reload + adcl %eax, %esi + movl %esi, %eax + mulxl 68(%esp), %ebx, %edi # 4-byte Folded Reload + adcl %ecx, %ebx + mulxl 64(%esp), %ecx, %esi # 4-byte Folded Reload + adcl %edi, %ecx + adcl $0, %esi + movl 28(%esp), %edx # 4-byte Reload + andl $1, %edx + movl 12(%esp), %edi # 4-byte Reload + addl 8(%esp), %edi # 4-byte Folded Reload + adcl 20(%esp), %ebp # 4-byte Folded Reload + movl %ebp, 20(%esp) # 4-byte Spill + adcl 24(%esp), %eax # 4-byte Folded Reload + movl %eax, 24(%esp) # 4-byte Spill + adcl 16(%esp), %ebx # 4-byte Folded Reload + adcl 32(%esp), %ecx # 4-byte Folded Reload + adcl 36(%esp), %esi # 4-byte Folded Reload + adcl $0, %edx + movl %edx, 28(%esp) # 4-byte Spill + movl 112(%esp), %edx + movl 16(%edx), %edx + mulxl 40(%esp), %ebp, %eax # 4-byte Folded Reload + movl %eax, 40(%esp) # 4-byte Spill + mulxl 44(%esp), %eax, %edi # 4-byte Folded Reload + movl %eax, 44(%esp) # 4-byte Spill + addl %ebp, %edi + mulxl 48(%esp), %ebp, %eax # 4-byte Folded Reload + movl %ebp, 36(%esp) # 4-byte Spill + movl %eax, 48(%esp) # 4-byte Spill + mulxl 52(%esp), %ebp, %eax # 4-byte Folded Reload + movl %ebp, 32(%esp) # 4-byte Spill + mulxl 56(%esp), %ebp, %edx # 4-byte Folded Reload + adcl 40(%esp), %ebp # 4-byte Folded Reload + movl %ebp, 52(%esp) # 4-byte Spill + adcl 32(%esp), %edx # 4-byte Folded Reload + movl %edx, 56(%esp) # 4-byte Spill + adcl 36(%esp), %eax # 4-byte Folded Reload + movl %eax, %ebp + movl 48(%esp), %edx # 4-byte Reload + adcl $0, %edx + movl 44(%esp), %eax # 4-byte Reload + addl 20(%esp), %eax # 4-byte Folded Reload + movl %eax, 44(%esp) # 4-byte Spill + adcl 24(%esp), %edi # 4-byte Folded Reload + movl %edi, 40(%esp) # 4-byte Spill + adcl %ebx, 52(%esp) # 4-byte Folded Spill + adcl %ecx, 56(%esp) # 4-byte Folded Spill + adcl %esi, %ebp + movl %ebp, 36(%esp) # 4-byte Spill + adcl 28(%esp), %edx # 4-byte Folded Reload + movl %edx, 48(%esp) # 4-byte Spill + sbbl %ebx, %ebx + movl 60(%esp), %edx # 4-byte Reload + imull %eax, %edx + mulxl 76(%esp), %ecx, %eax # 4-byte Folded Reload + movl %ecx, 60(%esp) # 4-byte Spill + mulxl 80(%esp), %ebp, %ecx # 4-byte Folded Reload + addl %eax, %ebp + mulxl 72(%esp), %edi, %eax # 4-byte Folded Reload + adcl %ecx, %edi + movl %edx, %ecx + mulxl 68(%esp), %esi, %edx # 4-byte Folded Reload + movl %edx, 32(%esp) # 4-byte Spill + adcl %eax, %esi + movl %ecx, %edx + mulxl 64(%esp), %edx, %ecx # 4-byte Folded Reload + adcl 32(%esp), %edx # 4-byte Folded Reload + adcl $0, %ecx + andl $1, %ebx + movl 60(%esp), %eax # 4-byte Reload + addl 44(%esp), %eax # 4-byte Folded Reload + adcl 40(%esp), %ebp # 4-byte Folded Reload + movl %ebp, 44(%esp) # 4-byte Spill + adcl 52(%esp), %edi # 4-byte Folded Reload + adcl 56(%esp), %esi # 4-byte Folded Reload + adcl 36(%esp), %edx # 4-byte Folded Reload + movl %edx, 60(%esp) # 4-byte Spill + adcl 48(%esp), %ecx # 4-byte Folded Reload + adcl $0, %ebx + subl 76(%esp), %ebp # 4-byte Folded Reload + movl %ebp, 76(%esp) # 4-byte Spill + movl %edi, %eax + sbbl 80(%esp), %eax # 4-byte Folded Reload + movl %esi, %ebp + sbbl 72(%esp), %ebp # 4-byte Folded Reload + sbbl 68(%esp), %edx # 4-byte Folded Reload + movl %edx, 80(%esp) # 4-byte Spill + movl %ecx, %edx + sbbl 64(%esp), %edx # 4-byte Folded Reload + sbbl $0, %ebx + andl $1, %ebx + jne .LBB68_2 +# BB#1: + movl %eax, %edi +.LBB68_2: + testb %bl, %bl + movl 44(%esp), %ebx # 4-byte Reload + jne .LBB68_4 +# BB#3: + movl 76(%esp), %ebx # 4-byte Reload +.LBB68_4: + movl 104(%esp), %eax + movl %ebx, (%eax) + movl %edi, 4(%eax) + jne .LBB68_6 +# BB#5: + movl %ebp, %esi +.LBB68_6: + movl %esi, 8(%eax) + movl 60(%esp), %esi # 4-byte Reload + jne .LBB68_8 +# BB#7: + movl 80(%esp), %esi # 4-byte Reload +.LBB68_8: + movl %esi, 12(%eax) + jne .LBB68_10 +# BB#9: + movl %edx, %ecx +.LBB68_10: + movl %ecx, 16(%eax) + addl $84, %esp + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end68: + .size mcl_fp_mont5Lbmi2, .Lfunc_end68-mcl_fp_mont5Lbmi2 + + .globl mcl_fp_montNF5Lbmi2 + .align 16, 0x90 + .type mcl_fp_montNF5Lbmi2,@function +mcl_fp_montNF5Lbmi2: # @mcl_fp_montNF5Lbmi2 +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $68, %esp + movl 92(%esp), %edi + movl (%edi), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 4(%edi), %edx + movl %edx, 36(%esp) # 4-byte Spill + movl 96(%esp), %ecx + movl (%ecx), %ebx + mulxl %ebx, %ecx, %esi + movl %eax, %edx + mulxl %ebx, %edx, %eax + movl %edx, 60(%esp) # 4-byte Spill + addl %ecx, %eax + movl %eax, %ecx + movl 8(%edi), %edx + movl %edx, 32(%esp) # 4-byte Spill + mulxl %ebx, %eax, %ebp + adcl %esi, %eax + movl %eax, %esi + movl 12(%edi), %edx + movl %edx, 28(%esp) # 4-byte Spill + mulxl %ebx, %eax, %edx + movl %edx, 64(%esp) # 4-byte Spill + adcl %ebp, %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 16(%edi), %edx + movl %edx, 24(%esp) # 4-byte Spill + mulxl %ebx, %edx, %eax + adcl 64(%esp), %edx # 4-byte Folded Reload + movl %edx, 16(%esp) # 4-byte Spill + adcl $0, %eax + movl %eax, 20(%esp) # 4-byte Spill + movl 100(%esp), %ebx + movl -4(%ebx), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 60(%esp), %edi # 4-byte Reload + movl %edi, %edx + imull %eax, %edx + movl (%ebx), %eax + movl %eax, 64(%esp) # 4-byte Spill + mulxl %eax, %ebp, %eax + movl %eax, 12(%esp) # 4-byte Spill + addl %edi, %ebp + movl 4(%ebx), %eax + movl %eax, 60(%esp) # 4-byte Spill + mulxl %eax, %eax, %edi + movl %edi, 8(%esp) # 4-byte Spill + adcl %ecx, %eax + movl %eax, %edi + movl 8(%ebx), %eax + movl %eax, 56(%esp) # 4-byte Spill + mulxl %eax, %eax, %ecx + movl %ecx, 4(%esp) # 4-byte Spill + adcl %esi, %eax + movl %eax, %esi + movl 12(%ebx), %eax + movl %eax, 52(%esp) # 4-byte Spill + mulxl %eax, %ecx, %ebp + adcl 48(%esp), %ecx # 4-byte Folded Reload + movl 16(%ebx), %eax + movl %eax, 48(%esp) # 4-byte Spill + mulxl %eax, %ebx, %edx + adcl 16(%esp), %ebx # 4-byte Folded Reload + movl 20(%esp), %eax # 4-byte Reload + adcl $0, %eax + addl 12(%esp), %edi # 4-byte Folded Reload + movl %edi, 16(%esp) # 4-byte Spill + adcl 8(%esp), %esi # 4-byte Folded Reload + movl %esi, 12(%esp) # 4-byte Spill + adcl 4(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 8(%esp) # 4-byte Spill + adcl %ebp, %ebx + adcl %edx, %eax + movl %eax, 20(%esp) # 4-byte Spill + movl 96(%esp), %eax + movl 4(%eax), %edx + mulxl 36(%esp), %ecx, %esi # 4-byte Folded Reload + mulxl 40(%esp), %edi, %eax # 4-byte Folded Reload + addl %ecx, %eax + mulxl 32(%esp), %ebp, %ecx # 4-byte Folded Reload + movl %ecx, (%esp) # 4-byte Spill + adcl %esi, %ebp + mulxl 28(%esp), %esi, %ecx # 4-byte Folded Reload + movl %ecx, 4(%esp) # 4-byte Spill + adcl (%esp), %esi # 4-byte Folded Reload + mulxl 24(%esp), %edx, %ecx # 4-byte Folded Reload + adcl 4(%esp), %edx # 4-byte Folded Reload + adcl $0, %ecx + addl 16(%esp), %edi # 4-byte Folded Reload + adcl 12(%esp), %eax # 4-byte Folded Reload + adcl 8(%esp), %ebp # 4-byte Folded Reload + adcl %ebx, %esi + adcl 20(%esp), %edx # 4-byte Folded Reload + movl %edx, 16(%esp) # 4-byte Spill + adcl $0, %ecx + movl %ecx, 20(%esp) # 4-byte Spill + movl %edi, %edx + imull 44(%esp), %edx # 4-byte Folded Reload + mulxl 64(%esp), %ebx, %ecx # 4-byte Folded Reload + movl %ecx, 12(%esp) # 4-byte Spill + addl %edi, %ebx + mulxl 60(%esp), %edi, %ecx # 4-byte Folded Reload + movl %ecx, 8(%esp) # 4-byte Spill + adcl %eax, %edi + mulxl 56(%esp), %ecx, %eax # 4-byte Folded Reload + movl %eax, 4(%esp) # 4-byte Spill + adcl %ebp, %ecx + mulxl 52(%esp), %eax, %ebp # 4-byte Folded Reload + adcl %esi, %eax + mulxl 48(%esp), %ebx, %edx # 4-byte Folded Reload + adcl 16(%esp), %ebx # 4-byte Folded Reload + movl 20(%esp), %esi # 4-byte Reload + adcl $0, %esi + addl 12(%esp), %edi # 4-byte Folded Reload + movl %edi, 16(%esp) # 4-byte Spill + adcl 8(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 12(%esp) # 4-byte Spill + adcl 4(%esp), %eax # 4-byte Folded Reload + movl %eax, 4(%esp) # 4-byte Spill + adcl %ebp, %ebx + movl %ebx, 8(%esp) # 4-byte Spill + adcl %edx, %esi + movl %esi, 20(%esp) # 4-byte Spill + movl 96(%esp), %eax + movl 8(%eax), %edx + mulxl 36(%esp), %ecx, %eax # 4-byte Folded Reload + mulxl 40(%esp), %ebp, %esi # 4-byte Folded Reload + addl %ecx, %esi + mulxl 32(%esp), %edi, %ecx # 4-byte Folded Reload + adcl %eax, %edi + mulxl 28(%esp), %ebx, %eax # 4-byte Folded Reload + movl %eax, (%esp) # 4-byte Spill + adcl %ecx, %ebx + mulxl 24(%esp), %ecx, %eax # 4-byte Folded Reload + adcl (%esp), %ecx # 4-byte Folded Reload + adcl $0, %eax + addl 16(%esp), %ebp # 4-byte Folded Reload + adcl 12(%esp), %esi # 4-byte Folded Reload + adcl 4(%esp), %edi # 4-byte Folded Reload + adcl 8(%esp), %ebx # 4-byte Folded Reload + adcl 20(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 12(%esp) # 4-byte Spill + adcl $0, %eax + movl %eax, 20(%esp) # 4-byte Spill + movl %ebp, %eax + movl %eax, %edx + imull 44(%esp), %edx # 4-byte Folded Reload + mulxl 64(%esp), %ebp, %ecx # 4-byte Folded Reload + movl %ecx, 16(%esp) # 4-byte Spill + addl %eax, %ebp + mulxl 60(%esp), %ebp, %eax # 4-byte Folded Reload + movl %eax, 8(%esp) # 4-byte Spill + adcl %esi, %ebp + movl %ebp, %esi + mulxl 56(%esp), %ebp, %eax # 4-byte Folded Reload + movl %eax, 4(%esp) # 4-byte Spill + adcl %edi, %ebp + movl %ebp, %eax + mulxl 52(%esp), %ebp, %edi # 4-byte Folded Reload + adcl %ebx, %ebp + movl %ebp, %ebx + mulxl 48(%esp), %ebp, %edx # 4-byte Folded Reload + adcl 12(%esp), %ebp # 4-byte Folded Reload + movl 20(%esp), %ecx # 4-byte Reload + adcl $0, %ecx + addl 16(%esp), %esi # 4-byte Folded Reload + movl %esi, 16(%esp) # 4-byte Spill + adcl 8(%esp), %eax # 4-byte Folded Reload + movl %eax, 12(%esp) # 4-byte Spill + adcl 4(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 4(%esp) # 4-byte Spill + adcl %edi, %ebp + movl %ebp, 8(%esp) # 4-byte Spill + adcl %edx, %ecx + movl %ecx, 20(%esp) # 4-byte Spill + movl 96(%esp), %eax + movl 12(%eax), %edx + mulxl 36(%esp), %ecx, %esi # 4-byte Folded Reload + mulxl 40(%esp), %ebx, %ebp # 4-byte Folded Reload + addl %ecx, %ebp + mulxl 32(%esp), %ecx, %edi # 4-byte Folded Reload + adcl %esi, %ecx + mulxl 28(%esp), %esi, %eax # 4-byte Folded Reload + movl %eax, (%esp) # 4-byte Spill + adcl %edi, %esi + mulxl 24(%esp), %edi, %eax # 4-byte Folded Reload + adcl (%esp), %edi # 4-byte Folded Reload + adcl $0, %eax + addl 16(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 16(%esp) # 4-byte Spill + adcl 12(%esp), %ebp # 4-byte Folded Reload + adcl 4(%esp), %ecx # 4-byte Folded Reload + adcl 8(%esp), %esi # 4-byte Folded Reload + adcl 20(%esp), %edi # 4-byte Folded Reload + adcl $0, %eax + movl %eax, 20(%esp) # 4-byte Spill + movl %ebx, %edx + imull 44(%esp), %edx # 4-byte Folded Reload + mulxl 64(%esp), %ebx, %eax # 4-byte Folded Reload + movl %eax, 12(%esp) # 4-byte Spill + addl 16(%esp), %ebx # 4-byte Folded Reload + mulxl 60(%esp), %ebx, %eax # 4-byte Folded Reload + movl %eax, 8(%esp) # 4-byte Spill + adcl %ebp, %ebx + movl %ebx, 16(%esp) # 4-byte Spill + mulxl 56(%esp), %eax, %ebx # 4-byte Folded Reload + adcl %ecx, %eax + mulxl 52(%esp), %ecx, %ebp # 4-byte Folded Reload + movl %ebp, 4(%esp) # 4-byte Spill + adcl %esi, %ecx + movl %ecx, %esi + mulxl 48(%esp), %ecx, %edx # 4-byte Folded Reload + adcl %edi, %ecx + movl 20(%esp), %ebp # 4-byte Reload + adcl $0, %ebp + movl 12(%esp), %edi # 4-byte Reload + addl %edi, 16(%esp) # 4-byte Folded Spill + adcl 8(%esp), %eax # 4-byte Folded Reload + movl %eax, 12(%esp) # 4-byte Spill + adcl %ebx, %esi + movl %esi, 8(%esp) # 4-byte Spill + adcl 4(%esp), %ecx # 4-byte Folded Reload + adcl %edx, %ebp + movl %ebp, 20(%esp) # 4-byte Spill + movl 96(%esp), %eax + movl 16(%eax), %edx + mulxl 36(%esp), %eax, %ebp # 4-byte Folded Reload + mulxl 40(%esp), %edi, %ebx # 4-byte Folded Reload + addl %eax, %ebx + mulxl 32(%esp), %eax, %esi # 4-byte Folded Reload + movl %esi, 36(%esp) # 4-byte Spill + adcl %ebp, %eax + mulxl 28(%esp), %ebp, %esi # 4-byte Folded Reload + movl %esi, 40(%esp) # 4-byte Spill + adcl 36(%esp), %ebp # 4-byte Folded Reload + mulxl 24(%esp), %edx, %esi # 4-byte Folded Reload + adcl 40(%esp), %edx # 4-byte Folded Reload + adcl $0, %esi + addl 16(%esp), %edi # 4-byte Folded Reload + adcl 12(%esp), %ebx # 4-byte Folded Reload + adcl 8(%esp), %eax # 4-byte Folded Reload + movl %eax, 32(%esp) # 4-byte Spill + adcl %ecx, %ebp + adcl 20(%esp), %edx # 4-byte Folded Reload + movl %edx, 40(%esp) # 4-byte Spill + adcl $0, %esi + movl 44(%esp), %edx # 4-byte Reload + imull %edi, %edx + mulxl 64(%esp), %ecx, %eax # 4-byte Folded Reload + movl %eax, 44(%esp) # 4-byte Spill + addl %edi, %ecx + mulxl 60(%esp), %edi, %eax # 4-byte Folded Reload + movl %eax, 36(%esp) # 4-byte Spill + adcl %ebx, %edi + movl %edx, %eax + mulxl 56(%esp), %ebx, %ecx # 4-byte Folded Reload + movl %ecx, 28(%esp) # 4-byte Spill + adcl 32(%esp), %ebx # 4-byte Folded Reload + mulxl 52(%esp), %ecx, %edx # 4-byte Folded Reload + movl %edx, 32(%esp) # 4-byte Spill + adcl %ebp, %ecx + movl %eax, %edx + mulxl 48(%esp), %ebp, %edx # 4-byte Folded Reload + adcl 40(%esp), %ebp # 4-byte Folded Reload + adcl $0, %esi + addl 44(%esp), %edi # 4-byte Folded Reload + adcl 36(%esp), %ebx # 4-byte Folded Reload + adcl 28(%esp), %ecx # 4-byte Folded Reload + adcl 32(%esp), %ebp # 4-byte Folded Reload + adcl %edx, %esi + movl %edi, %eax + subl 64(%esp), %eax # 4-byte Folded Reload + movl %eax, 40(%esp) # 4-byte Spill + movl %ebx, %eax + sbbl 60(%esp), %eax # 4-byte Folded Reload + movl %eax, 44(%esp) # 4-byte Spill + movl %ecx, %eax + sbbl 56(%esp), %eax # 4-byte Folded Reload + movl %eax, 56(%esp) # 4-byte Spill + movl %ebp, %eax + sbbl 52(%esp), %eax # 4-byte Folded Reload + movl %eax, 60(%esp) # 4-byte Spill + movl %esi, %edx + sbbl 48(%esp), %edx # 4-byte Folded Reload + movl %edx, 64(%esp) # 4-byte Spill + sarl $31, %edx + testl %edx, %edx + js .LBB69_2 +# BB#1: + movl 40(%esp), %edi # 4-byte Reload +.LBB69_2: + movl 88(%esp), %edx + movl %edi, (%edx) + js .LBB69_4 +# BB#3: + movl 44(%esp), %ebx # 4-byte Reload +.LBB69_4: + movl %ebx, 4(%edx) + js .LBB69_6 +# BB#5: + movl 56(%esp), %ecx # 4-byte Reload +.LBB69_6: + movl %ecx, 8(%edx) + js .LBB69_8 +# BB#7: + movl 60(%esp), %ebp # 4-byte Reload +.LBB69_8: + movl %ebp, 12(%edx) + js .LBB69_10 +# BB#9: + movl 64(%esp), %esi # 4-byte Reload +.LBB69_10: + movl %esi, 16(%edx) + addl $68, %esp + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end69: + .size mcl_fp_montNF5Lbmi2, .Lfunc_end69-mcl_fp_montNF5Lbmi2 + + .globl mcl_fp_montRed5Lbmi2 + .align 16, 0x90 + .type mcl_fp_montRed5Lbmi2,@function +mcl_fp_montRed5Lbmi2: # @mcl_fp_montRed5Lbmi2 +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $68, %esp + movl 96(%esp), %eax + movl -4(%eax), %esi + movl %esi, 48(%esp) # 4-byte Spill + movl (%eax), %edi + movl %edi, 52(%esp) # 4-byte Spill + movl 92(%esp), %ecx + movl (%ecx), %edx + movl %edx, 44(%esp) # 4-byte Spill + imull %esi, %edx + movl 16(%eax), %esi + movl %esi, 40(%esp) # 4-byte Spill + movl 12(%eax), %ebx + movl %ebx, 64(%esp) # 4-byte Spill + movl 8(%eax), %ebp + movl %ebp, 60(%esp) # 4-byte Spill + movl 4(%eax), %ecx + movl %ecx, 56(%esp) # 4-byte Spill + mulxl %esi, %esi, %eax + movl %esi, 36(%esp) # 4-byte Spill + movl %eax, 32(%esp) # 4-byte Spill + mulxl %ebx, %esi, %eax + movl %esi, 24(%esp) # 4-byte Spill + movl %eax, 28(%esp) # 4-byte Spill + mulxl %ebp, %ebp, %eax + mulxl %ecx, %esi, %ecx + mulxl %edi, %edx, %ebx + addl %esi, %ebx + adcl %ebp, %ecx + adcl 24(%esp), %eax # 4-byte Folded Reload + movl 28(%esp), %edi # 4-byte Reload + adcl 36(%esp), %edi # 4-byte Folded Reload + movl 32(%esp), %esi # 4-byte Reload + adcl $0, %esi + addl 44(%esp), %edx # 4-byte Folded Reload + movl 92(%esp), %ebp + adcl 4(%ebp), %ebx + adcl 8(%ebp), %ecx + movl %ecx, 12(%esp) # 4-byte Spill + adcl 12(%ebp), %eax + movl %eax, 16(%esp) # 4-byte Spill + adcl 16(%ebp), %edi + movl %edi, 28(%esp) # 4-byte Spill + adcl 20(%ebp), %esi + movl %esi, 32(%esp) # 4-byte Spill + movl 36(%ebp), %edx + movl 32(%ebp), %esi + movl 28(%ebp), %edi + movl 24(%ebp), %eax + adcl $0, %eax + movl %eax, 8(%esp) # 4-byte Spill + adcl $0, %edi + movl %edi, 24(%esp) # 4-byte Spill + adcl $0, %esi + movl %esi, 36(%esp) # 4-byte Spill + adcl $0, %edx + movl %edx, 44(%esp) # 4-byte Spill + sbbl %eax, %eax + andl $1, %eax + movl %eax, %esi + movl %ebx, %edx + imull 48(%esp), %edx # 4-byte Folded Reload + mulxl 60(%esp), %ecx, %eax # 4-byte Folded Reload + movl %ecx, (%esp) # 4-byte Spill + movl %eax, 20(%esp) # 4-byte Spill + mulxl 56(%esp), %ebp, %eax # 4-byte Folded Reload + mulxl 52(%esp), %edi, %ecx # 4-byte Folded Reload + movl %edi, 4(%esp) # 4-byte Spill + addl %ebp, %ecx + adcl (%esp), %eax # 4-byte Folded Reload + movl %eax, %ebp + mulxl 64(%esp), %eax, %edi # 4-byte Folded Reload + movl %edi, (%esp) # 4-byte Spill + adcl 20(%esp), %eax # 4-byte Folded Reload + movl %eax, 20(%esp) # 4-byte Spill + movl 40(%esp), %eax # 4-byte Reload + mulxl %eax, %edi, %edx + adcl (%esp), %edi # 4-byte Folded Reload + adcl $0, %edx + addl %ebx, 4(%esp) # 4-byte Folded Spill + adcl 12(%esp), %ecx # 4-byte Folded Reload + adcl 16(%esp), %ebp # 4-byte Folded Reload + movl %ebp, 16(%esp) # 4-byte Spill + movl 28(%esp), %ebx # 4-byte Reload + adcl %ebx, 20(%esp) # 4-byte Folded Spill + adcl 32(%esp), %edi # 4-byte Folded Reload + movl %edi, 12(%esp) # 4-byte Spill + adcl 8(%esp), %edx # 4-byte Folded Reload + movl %edx, 28(%esp) # 4-byte Spill + adcl $0, 24(%esp) # 4-byte Folded Spill + adcl $0, 36(%esp) # 4-byte Folded Spill + adcl $0, 44(%esp) # 4-byte Folded Spill + adcl $0, %esi + movl %ecx, %edx + imull 48(%esp), %edx # 4-byte Folded Reload + mulxl %eax, %edi, %eax + movl %edi, 4(%esp) # 4-byte Spill + movl %eax, 32(%esp) # 4-byte Spill + mulxl 56(%esp), %edi, %eax # 4-byte Folded Reload + movl %eax, (%esp) # 4-byte Spill + mulxl 52(%esp), %eax, %ebp # 4-byte Folded Reload + movl %eax, 8(%esp) # 4-byte Spill + addl %edi, %ebp + mulxl 60(%esp), %ebx, %eax # 4-byte Folded Reload + adcl (%esp), %ebx # 4-byte Folded Reload + mulxl 64(%esp), %edi, %edx # 4-byte Folded Reload + adcl %eax, %edi + adcl 4(%esp), %edx # 4-byte Folded Reload + movl %edx, %eax + movl 32(%esp), %edx # 4-byte Reload + adcl $0, %edx + addl %ecx, 8(%esp) # 4-byte Folded Spill + adcl 16(%esp), %ebp # 4-byte Folded Reload + adcl 20(%esp), %ebx # 4-byte Folded Reload + adcl 12(%esp), %edi # 4-byte Folded Reload + adcl 28(%esp), %eax # 4-byte Folded Reload + movl %eax, 20(%esp) # 4-byte Spill + adcl 24(%esp), %edx # 4-byte Folded Reload + movl %edx, 32(%esp) # 4-byte Spill + adcl $0, 36(%esp) # 4-byte Folded Spill + adcl $0, 44(%esp) # 4-byte Folded Spill + adcl $0, %esi + movl %esi, 16(%esp) # 4-byte Spill + movl %ebp, %edx + imull 48(%esp), %edx # 4-byte Folded Reload + mulxl 40(%esp), %ecx, %eax # 4-byte Folded Reload + movl %ecx, 8(%esp) # 4-byte Spill + movl %eax, 28(%esp) # 4-byte Spill + mulxl 56(%esp), %ecx, %eax # 4-byte Folded Reload + movl %eax, 24(%esp) # 4-byte Spill + mulxl 52(%esp), %eax, %esi # 4-byte Folded Reload + movl %eax, 12(%esp) # 4-byte Spill + addl %ecx, %esi + mulxl 60(%esp), %ecx, %eax # 4-byte Folded Reload + adcl 24(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 24(%esp) # 4-byte Spill + mulxl 64(%esp), %edx, %ecx # 4-byte Folded Reload + adcl %eax, %edx + movl %edx, %eax + adcl 8(%esp), %ecx # 4-byte Folded Reload + movl 28(%esp), %edx # 4-byte Reload + adcl $0, %edx + addl %ebp, 12(%esp) # 4-byte Folded Spill + adcl %ebx, %esi + adcl %edi, 24(%esp) # 4-byte Folded Spill + adcl 20(%esp), %eax # 4-byte Folded Reload + movl %eax, 20(%esp) # 4-byte Spill + adcl 32(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 32(%esp) # 4-byte Spill + adcl 36(%esp), %edx # 4-byte Folded Reload + movl %edx, 28(%esp) # 4-byte Spill + adcl $0, 44(%esp) # 4-byte Folded Spill + movl 16(%esp), %ebx # 4-byte Reload + adcl $0, %ebx + movl 48(%esp), %edx # 4-byte Reload + imull %esi, %edx + mulxl 52(%esp), %eax, %ecx # 4-byte Folded Reload + movl %eax, 48(%esp) # 4-byte Spill + mulxl 56(%esp), %edi, %eax # 4-byte Folded Reload + addl %ecx, %edi + movl %edx, %ebp + mulxl 60(%esp), %ecx, %edx # 4-byte Folded Reload + movl %edx, 36(%esp) # 4-byte Spill + adcl %eax, %ecx + movl %ebp, %edx + mulxl 64(%esp), %eax, %edx # 4-byte Folded Reload + movl %edx, 16(%esp) # 4-byte Spill + adcl 36(%esp), %eax # 4-byte Folded Reload + movl %ebp, %edx + mulxl 40(%esp), %ebp, %edx # 4-byte Folded Reload + adcl 16(%esp), %ebp # 4-byte Folded Reload + adcl $0, %edx + addl %esi, 48(%esp) # 4-byte Folded Spill + adcl 24(%esp), %edi # 4-byte Folded Reload + adcl 20(%esp), %ecx # 4-byte Folded Reload + adcl 32(%esp), %eax # 4-byte Folded Reload + movl %eax, 36(%esp) # 4-byte Spill + adcl 28(%esp), %ebp # 4-byte Folded Reload + movl %ebp, 48(%esp) # 4-byte Spill + adcl 44(%esp), %edx # 4-byte Folded Reload + movl %ebx, %esi + adcl $0, %esi + movl %edi, %ebx + subl 52(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 52(%esp) # 4-byte Spill + movl %ecx, %ebx + sbbl 56(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 56(%esp) # 4-byte Spill + sbbl 60(%esp), %eax # 4-byte Folded Reload + movl %eax, %ebx + sbbl 64(%esp), %ebp # 4-byte Folded Reload + movl %ebp, 64(%esp) # 4-byte Spill + movl %edx, %ebp + sbbl 40(%esp), %ebp # 4-byte Folded Reload + sbbl $0, %esi + andl $1, %esi + jne .LBB70_2 +# BB#1: + movl 56(%esp), %ecx # 4-byte Reload +.LBB70_2: + movl %esi, %eax + testb %al, %al + jne .LBB70_4 +# BB#3: + movl 52(%esp), %edi # 4-byte Reload +.LBB70_4: + movl 88(%esp), %esi + movl %edi, (%esi) + movl %ecx, 4(%esi) + movl 48(%esp), %eax # 4-byte Reload + movl 36(%esp), %ecx # 4-byte Reload + jne .LBB70_6 +# BB#5: + movl %ebx, %ecx +.LBB70_6: + movl %ecx, 8(%esi) + jne .LBB70_8 +# BB#7: + movl 64(%esp), %eax # 4-byte Reload +.LBB70_8: + movl %eax, 12(%esi) + jne .LBB70_10 +# BB#9: + movl %ebp, %edx +.LBB70_10: + movl %edx, 16(%esi) + addl $68, %esp + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end70: + .size mcl_fp_montRed5Lbmi2, .Lfunc_end70-mcl_fp_montRed5Lbmi2 + + .globl mcl_fp_addPre5Lbmi2 + .align 16, 0x90 + .type mcl_fp_addPre5Lbmi2,@function +mcl_fp_addPre5Lbmi2: # @mcl_fp_addPre5Lbmi2 +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + movl 28(%esp), %eax + movl (%eax), %ecx + movl 4(%eax), %edx + movl 24(%esp), %esi + addl (%esi), %ecx + adcl 4(%esi), %edx + movl 8(%eax), %edi + adcl 8(%esi), %edi + movl 12(%esi), %ebx + movl 16(%esi), %esi + adcl 12(%eax), %ebx + movl 16(%eax), %eax + movl 20(%esp), %ebp + movl %ecx, (%ebp) + movl %edx, 4(%ebp) + movl %edi, 8(%ebp) + movl %ebx, 12(%ebp) + adcl %esi, %eax + movl %eax, 16(%ebp) + sbbl %eax, %eax + andl $1, %eax + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end71: + .size mcl_fp_addPre5Lbmi2, .Lfunc_end71-mcl_fp_addPre5Lbmi2 + + .globl mcl_fp_subPre5Lbmi2 + .align 16, 0x90 + .type mcl_fp_subPre5Lbmi2,@function +mcl_fp_subPre5Lbmi2: # @mcl_fp_subPre5Lbmi2 +# BB#0: + pushl %edi + pushl %esi + movl 16(%esp), %ecx + movl (%ecx), %edx + xorl %eax, %eax + movl 20(%esp), %esi + subl (%esi), %edx + movl 12(%esp), %edi + movl %edx, (%edi) + movl 4(%ecx), %edx + sbbl 4(%esi), %edx + movl %edx, 4(%edi) + movl 8(%ecx), %edx + sbbl 8(%esi), %edx + movl %edx, 8(%edi) + movl 12(%ecx), %edx + sbbl 12(%esi), %edx + movl %edx, 12(%edi) + movl 16(%esi), %edx + movl 16(%ecx), %ecx + sbbl %edx, %ecx + movl %ecx, 16(%edi) + sbbl $0, %eax + andl $1, %eax + popl %esi + popl %edi + retl +.Lfunc_end72: + .size mcl_fp_subPre5Lbmi2, .Lfunc_end72-mcl_fp_subPre5Lbmi2 + + .globl mcl_fp_shr1_5Lbmi2 + .align 16, 0x90 + .type mcl_fp_shr1_5Lbmi2,@function +mcl_fp_shr1_5Lbmi2: # @mcl_fp_shr1_5Lbmi2 +# BB#0: + pushl %ebx + pushl %edi + pushl %esi + movl 20(%esp), %eax + movl 16(%eax), %ecx + movl 12(%eax), %edx + movl 8(%eax), %esi + movl (%eax), %edi + movl 4(%eax), %eax + shrdl $1, %eax, %edi + movl 16(%esp), %ebx + movl %edi, (%ebx) + shrdl $1, %esi, %eax + movl %eax, 4(%ebx) + shrdl $1, %edx, %esi + movl %esi, 8(%ebx) + shrdl $1, %ecx, %edx + movl %edx, 12(%ebx) + shrl %ecx + movl %ecx, 16(%ebx) + popl %esi + popl %edi + popl %ebx + retl +.Lfunc_end73: + .size mcl_fp_shr1_5Lbmi2, .Lfunc_end73-mcl_fp_shr1_5Lbmi2 + + .globl mcl_fp_add5Lbmi2 + .align 16, 0x90 + .type mcl_fp_add5Lbmi2,@function +mcl_fp_add5Lbmi2: # @mcl_fp_add5Lbmi2 +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + movl 28(%esp), %ebx + movl (%ebx), %eax + movl 4(%ebx), %ecx + movl 24(%esp), %edi + addl (%edi), %eax + adcl 4(%edi), %ecx + movl 8(%ebx), %edx + adcl 8(%edi), %edx + movl 12(%edi), %esi + movl 16(%edi), %edi + adcl 12(%ebx), %esi + adcl 16(%ebx), %edi + movl 20(%esp), %ebx + movl %eax, (%ebx) + movl %ecx, 4(%ebx) + movl %edx, 8(%ebx) + movl %esi, 12(%ebx) + movl %edi, 16(%ebx) + sbbl %ebx, %ebx + andl $1, %ebx + movl 32(%esp), %ebp + subl (%ebp), %eax + sbbl 4(%ebp), %ecx + sbbl 8(%ebp), %edx + sbbl 12(%ebp), %esi + sbbl 16(%ebp), %edi + sbbl $0, %ebx + testb $1, %bl + jne .LBB74_2 +# BB#1: # %nocarry + movl 20(%esp), %ebx + movl %eax, (%ebx) + movl %ecx, 4(%ebx) + movl %edx, 8(%ebx) + movl %esi, 12(%ebx) + movl %edi, 16(%ebx) +.LBB74_2: # %carry + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end74: + .size mcl_fp_add5Lbmi2, .Lfunc_end74-mcl_fp_add5Lbmi2 + + .globl mcl_fp_addNF5Lbmi2 + .align 16, 0x90 + .type mcl_fp_addNF5Lbmi2,@function +mcl_fp_addNF5Lbmi2: # @mcl_fp_addNF5Lbmi2 +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $20, %esp + movl 48(%esp), %esi + movl (%esi), %ebx + movl 4(%esi), %eax + movl 44(%esp), %edi + addl (%edi), %ebx + adcl 4(%edi), %eax + movl 16(%esi), %ecx + movl 12(%esi), %edx + movl 8(%esi), %ebp + adcl 8(%edi), %ebp + adcl 12(%edi), %edx + movl %edx, 12(%esp) # 4-byte Spill + adcl 16(%edi), %ecx + movl %ecx, 16(%esp) # 4-byte Spill + movl 52(%esp), %edi + movl %ebx, %esi + subl (%edi), %esi + movl %esi, (%esp) # 4-byte Spill + movl %eax, %esi + sbbl 4(%edi), %esi + movl %esi, 4(%esp) # 4-byte Spill + movl %ebp, %esi + sbbl 8(%edi), %esi + sbbl 12(%edi), %edx + movl %edx, 8(%esp) # 4-byte Spill + movl %ecx, %edx + sbbl 16(%edi), %edx + movl %edx, %edi + sarl $31, %edi + testl %edi, %edi + js .LBB75_2 +# BB#1: + movl (%esp), %ebx # 4-byte Reload +.LBB75_2: + movl 40(%esp), %edi + movl %ebx, (%edi) + js .LBB75_4 +# BB#3: + movl 4(%esp), %eax # 4-byte Reload +.LBB75_4: + movl %eax, 4(%edi) + movl 12(%esp), %ecx # 4-byte Reload + js .LBB75_6 +# BB#5: + movl %esi, %ebp +.LBB75_6: + movl %ebp, 8(%edi) + movl 16(%esp), %eax # 4-byte Reload + js .LBB75_8 +# BB#7: + movl 8(%esp), %ecx # 4-byte Reload +.LBB75_8: + movl %ecx, 12(%edi) + js .LBB75_10 +# BB#9: + movl %edx, %eax +.LBB75_10: + movl %eax, 16(%edi) + addl $20, %esp + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end75: + .size mcl_fp_addNF5Lbmi2, .Lfunc_end75-mcl_fp_addNF5Lbmi2 + + .globl mcl_fp_sub5Lbmi2 + .align 16, 0x90 + .type mcl_fp_sub5Lbmi2,@function +mcl_fp_sub5Lbmi2: # @mcl_fp_sub5Lbmi2 +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + movl 24(%esp), %edi + movl (%edi), %eax + movl 4(%edi), %ecx + xorl %ebx, %ebx + movl 28(%esp), %ebp + subl (%ebp), %eax + sbbl 4(%ebp), %ecx + movl 8(%edi), %edx + sbbl 8(%ebp), %edx + movl 12(%edi), %esi + sbbl 12(%ebp), %esi + movl 16(%edi), %edi + sbbl 16(%ebp), %edi + movl 20(%esp), %ebp + movl %eax, (%ebp) + movl %ecx, 4(%ebp) + movl %edx, 8(%ebp) + movl %esi, 12(%ebp) + movl %edi, 16(%ebp) + sbbl $0, %ebx + testb $1, %bl + je .LBB76_2 +# BB#1: # %carry + movl 32(%esp), %ebx + addl (%ebx), %eax + movl %eax, (%ebp) + adcl 4(%ebx), %ecx + movl %ecx, 4(%ebp) + adcl 8(%ebx), %edx + movl %edx, 8(%ebp) + movl 12(%ebx), %eax + adcl %esi, %eax + movl %eax, 12(%ebp) + movl 16(%ebx), %eax + adcl %edi, %eax + movl %eax, 16(%ebp) +.LBB76_2: # %nocarry + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end76: + .size mcl_fp_sub5Lbmi2, .Lfunc_end76-mcl_fp_sub5Lbmi2 + + .globl mcl_fp_subNF5Lbmi2 + .align 16, 0x90 + .type mcl_fp_subNF5Lbmi2,@function +mcl_fp_subNF5Lbmi2: # @mcl_fp_subNF5Lbmi2 +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $16, %esp + movl 40(%esp), %edi + movl (%edi), %ecx + movl 4(%edi), %eax + movl 44(%esp), %ebx + subl (%ebx), %ecx + movl %ecx, 4(%esp) # 4-byte Spill + sbbl 4(%ebx), %eax + movl %eax, 12(%esp) # 4-byte Spill + movl 16(%edi), %esi + movl 12(%edi), %eax + movl 8(%edi), %ecx + sbbl 8(%ebx), %ecx + movl %ecx, (%esp) # 4-byte Spill + sbbl 12(%ebx), %eax + movl %eax, 8(%esp) # 4-byte Spill + sbbl 16(%ebx), %esi + movl %esi, %ebx + sarl $31, %ebx + movl %ebx, %ebp + shldl $1, %esi, %ebp + movl 48(%esp), %edi + movl 4(%edi), %ecx + andl %ebp, %ecx + andl (%edi), %ebp + movl 16(%edi), %edx + andl %ebx, %edx + rorxl $31, %ebx, %eax + andl 12(%edi), %ebx + andl 8(%edi), %eax + addl 4(%esp), %ebp # 4-byte Folded Reload + adcl 12(%esp), %ecx # 4-byte Folded Reload + movl 36(%esp), %edi + movl %ebp, (%edi) + adcl (%esp), %eax # 4-byte Folded Reload + movl %ecx, 4(%edi) + adcl 8(%esp), %ebx # 4-byte Folded Reload + movl %eax, 8(%edi) + movl %ebx, 12(%edi) + adcl %esi, %edx + movl %edx, 16(%edi) + addl $16, %esp + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end77: + .size mcl_fp_subNF5Lbmi2, .Lfunc_end77-mcl_fp_subNF5Lbmi2 + + .globl mcl_fpDbl_add5Lbmi2 + .align 16, 0x90 + .type mcl_fpDbl_add5Lbmi2,@function +mcl_fpDbl_add5Lbmi2: # @mcl_fpDbl_add5Lbmi2 +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $28, %esp + movl 56(%esp), %edx + movl 52(%esp), %ecx + movl 12(%ecx), %ebx + movl 16(%ecx), %ebp + movl 8(%edx), %esi + movl (%edx), %edi + addl (%ecx), %edi + movl 48(%esp), %eax + movl %edi, (%eax) + movl 4(%edx), %edi + adcl 4(%ecx), %edi + adcl 8(%ecx), %esi + adcl 12(%edx), %ebx + adcl 16(%edx), %ebp + movl %ebp, 20(%esp) # 4-byte Spill + movl %edi, 4(%eax) + movl 28(%edx), %edi + movl %edi, 24(%esp) # 4-byte Spill + movl %esi, 8(%eax) + movl 20(%edx), %esi + movl %ebx, 12(%eax) + movl 20(%ecx), %ebp + adcl %esi, %ebp + movl %ebp, 12(%esp) # 4-byte Spill + movl 24(%edx), %esi + movl 20(%esp), %edi # 4-byte Reload + movl %edi, 16(%eax) + movl 24(%ecx), %ebx + adcl %esi, %ebx + movl %ebx, 16(%esp) # 4-byte Spill + movl 28(%ecx), %edi + adcl 24(%esp), %edi # 4-byte Folded Reload + movl %edi, 20(%esp) # 4-byte Spill + movl 32(%edx), %eax + movl 32(%ecx), %esi + adcl %eax, %esi + movl %esi, 24(%esp) # 4-byte Spill + movl 36(%edx), %eax + movl 36(%ecx), %edx + adcl %eax, %edx + sbbl %eax, %eax + andl $1, %eax + movl %ebp, %ecx + movl 60(%esp), %ebp + subl (%ebp), %ecx + movl %ecx, (%esp) # 4-byte Spill + sbbl 4(%ebp), %ebx + movl %ebx, 4(%esp) # 4-byte Spill + sbbl 8(%ebp), %edi + movl %edi, 8(%esp) # 4-byte Spill + movl %esi, %ebx + movl %edx, %esi + sbbl 12(%ebp), %ebx + sbbl 16(%ebp), %edx + sbbl $0, %eax + andl $1, %eax + jne .LBB78_2 +# BB#1: + movl %edx, %esi +.LBB78_2: + testb %al, %al + movl 12(%esp), %ebp # 4-byte Reload + jne .LBB78_4 +# BB#3: + movl (%esp), %ebp # 4-byte Reload +.LBB78_4: + movl 48(%esp), %eax + movl %ebp, 20(%eax) + movl 24(%esp), %ecx # 4-byte Reload + movl 20(%esp), %edx # 4-byte Reload + movl 16(%esp), %edi # 4-byte Reload + jne .LBB78_6 +# BB#5: + movl 4(%esp), %edi # 4-byte Reload +.LBB78_6: + movl %edi, 24(%eax) + jne .LBB78_8 +# BB#7: + movl 8(%esp), %edx # 4-byte Reload +.LBB78_8: + movl %edx, 28(%eax) + jne .LBB78_10 +# BB#9: + movl %ebx, %ecx +.LBB78_10: + movl %ecx, 32(%eax) + movl %esi, 36(%eax) + addl $28, %esp + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end78: + .size mcl_fpDbl_add5Lbmi2, .Lfunc_end78-mcl_fpDbl_add5Lbmi2 + + .globl mcl_fpDbl_sub5Lbmi2 + .align 16, 0x90 + .type mcl_fpDbl_sub5Lbmi2,@function +mcl_fpDbl_sub5Lbmi2: # @mcl_fpDbl_sub5Lbmi2 +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $16, %esp + movl 40(%esp), %eax + movl (%eax), %esi + movl 4(%eax), %edi + movl 44(%esp), %edx + subl (%edx), %esi + sbbl 4(%edx), %edi + movl 8(%eax), %ebx + sbbl 8(%edx), %ebx + movl 36(%esp), %ecx + movl %esi, (%ecx) + movl 12(%eax), %esi + sbbl 12(%edx), %esi + movl %edi, 4(%ecx) + movl 16(%eax), %edi + sbbl 16(%edx), %edi + movl %ebx, 8(%ecx) + movl 20(%edx), %ebx + movl %esi, 12(%ecx) + movl 20(%eax), %esi + sbbl %ebx, %esi + movl %esi, 4(%esp) # 4-byte Spill + movl 24(%edx), %esi + movl %edi, 16(%ecx) + movl 24(%eax), %ebp + sbbl %esi, %ebp + movl 28(%edx), %esi + movl 28(%eax), %edi + sbbl %esi, %edi + movl %edi, (%esp) # 4-byte Spill + movl 32(%edx), %esi + movl 32(%eax), %edi + sbbl %esi, %edi + movl %edi, 8(%esp) # 4-byte Spill + movl 36(%edx), %edx + movl 36(%eax), %eax + sbbl %edx, %eax + movl %eax, 12(%esp) # 4-byte Spill + movl $0, %edx + sbbl $0, %edx + andl $1, %edx + movl 48(%esp), %ebx + jne .LBB79_1 +# BB#2: + xorl %eax, %eax + jmp .LBB79_3 +.LBB79_1: + movl 16(%ebx), %eax +.LBB79_3: + testb %dl, %dl + jne .LBB79_4 +# BB#5: + movl $0, %edx + movl $0, %esi + jmp .LBB79_6 +.LBB79_4: + movl (%ebx), %esi + movl 4(%ebx), %edx +.LBB79_6: + jne .LBB79_7 +# BB#8: + movl $0, %edi + jmp .LBB79_9 +.LBB79_7: + movl 12(%ebx), %edi +.LBB79_9: + jne .LBB79_10 +# BB#11: + xorl %ebx, %ebx + jmp .LBB79_12 +.LBB79_10: + movl 8(%ebx), %ebx +.LBB79_12: + addl 4(%esp), %esi # 4-byte Folded Reload + adcl %ebp, %edx + movl %esi, 20(%ecx) + adcl (%esp), %ebx # 4-byte Folded Reload + movl %edx, 24(%ecx) + adcl 8(%esp), %edi # 4-byte Folded Reload + movl %ebx, 28(%ecx) + movl %edi, 32(%ecx) + adcl 12(%esp), %eax # 4-byte Folded Reload + movl %eax, 36(%ecx) + addl $16, %esp + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end79: + .size mcl_fpDbl_sub5Lbmi2, .Lfunc_end79-mcl_fpDbl_sub5Lbmi2 + + .globl mcl_fp_mulUnitPre6Lbmi2 + .align 16, 0x90 + .type mcl_fp_mulUnitPre6Lbmi2,@function +mcl_fp_mulUnitPre6Lbmi2: # @mcl_fp_mulUnitPre6Lbmi2 +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $12, %esp + movl 40(%esp), %edx + movl 36(%esp), %esi + mulxl 4(%esi), %ecx, %edi + mulxl (%esi), %eax, %ebx + movl %eax, 8(%esp) # 4-byte Spill + addl %ecx, %ebx + movl %ebx, 4(%esp) # 4-byte Spill + mulxl 8(%esi), %ebp, %eax + adcl %edi, %ebp + mulxl 12(%esi), %ecx, %edi + adcl %eax, %ecx + mulxl 16(%esi), %eax, %ebx + movl %ebx, (%esp) # 4-byte Spill + adcl %edi, %eax + mulxl 20(%esi), %edx, %esi + movl 32(%esp), %edi + movl 8(%esp), %ebx # 4-byte Reload + movl %ebx, (%edi) + movl 4(%esp), %ebx # 4-byte Reload + movl %ebx, 4(%edi) + movl %ebp, 8(%edi) + movl %ecx, 12(%edi) + movl %eax, 16(%edi) + adcl (%esp), %edx # 4-byte Folded Reload + movl %edx, 20(%edi) + adcl $0, %esi + movl %esi, 24(%edi) + addl $12, %esp + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end80: + .size mcl_fp_mulUnitPre6Lbmi2, .Lfunc_end80-mcl_fp_mulUnitPre6Lbmi2 + + .globl mcl_fpDbl_mulPre6Lbmi2 + .align 16, 0x90 + .type mcl_fpDbl_mulPre6Lbmi2,@function +mcl_fpDbl_mulPre6Lbmi2: # @mcl_fpDbl_mulPre6Lbmi2 +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $56, %esp + movl 80(%esp), %ebp + movl (%ebp), %ecx + movl %ecx, 52(%esp) # 4-byte Spill + movl 4(%ebp), %edx + movl %edx, 48(%esp) # 4-byte Spill + movl 84(%esp), %eax + movl (%eax), %eax + mulxl %eax, %esi, %edi + movl %ecx, %edx + mulxl %eax, %edx, %ecx + movl %edx, 28(%esp) # 4-byte Spill + addl %esi, %ecx + movl %ecx, 24(%esp) # 4-byte Spill + movl 8(%ebp), %edx + movl %edx, 44(%esp) # 4-byte Spill + mulxl %eax, %esi, %ebx + adcl %edi, %esi + movl 12(%ebp), %edx + movl %edx, 40(%esp) # 4-byte Spill + movl %ebp, %ecx + mulxl %eax, %edi, %ebp + adcl %ebx, %edi + movl 16(%ecx), %edx + movl %edx, 36(%esp) # 4-byte Spill + mulxl %eax, %ebx, %edx + movl %edx, 20(%esp) # 4-byte Spill + adcl %ebp, %ebx + movl %ecx, %edx + movl 20(%edx), %edx + movl %edx, 32(%esp) # 4-byte Spill + mulxl %eax, %eax, %ecx + adcl 20(%esp), %eax # 4-byte Folded Reload + movl 76(%esp), %edx + movl 28(%esp), %ebp # 4-byte Reload + movl %ebp, (%edx) + adcl $0, %ecx + movl %ecx, 20(%esp) # 4-byte Spill + movl 84(%esp), %edx + movl 4(%edx), %ebp + movl 52(%esp), %edx # 4-byte Reload + mulxl %ebp, %edx, %ecx + movl %ecx, 28(%esp) # 4-byte Spill + addl 24(%esp), %edx # 4-byte Folded Reload + movl %edx, 52(%esp) # 4-byte Spill + movl 48(%esp), %edx # 4-byte Reload + mulxl %ebp, %ecx, %edx + movl %edx, 48(%esp) # 4-byte Spill + adcl %esi, %ecx + movl 44(%esp), %edx # 4-byte Reload + mulxl %ebp, %esi, %edx + movl %edx, 44(%esp) # 4-byte Spill + adcl %edi, %esi + movl 40(%esp), %edx # 4-byte Reload + mulxl %ebp, %edi, %edx + movl %edx, 40(%esp) # 4-byte Spill + adcl %ebx, %edi + movl 36(%esp), %edx # 4-byte Reload + mulxl %ebp, %ebx, %edx + movl %edx, 36(%esp) # 4-byte Spill + adcl %eax, %ebx + movl 32(%esp), %edx # 4-byte Reload + mulxl %ebp, %eax, %edx + adcl 20(%esp), %eax # 4-byte Folded Reload + movl %eax, %ebp + sbbl %eax, %eax + andl $1, %eax + addl 28(%esp), %ecx # 4-byte Folded Reload + adcl 48(%esp), %esi # 4-byte Folded Reload + adcl 44(%esp), %edi # 4-byte Folded Reload + adcl 40(%esp), %ebx # 4-byte Folded Reload + adcl 36(%esp), %ebp # 4-byte Folded Reload + movl %ebp, 28(%esp) # 4-byte Spill + adcl %edx, %eax + movl %eax, 20(%esp) # 4-byte Spill + movl 76(%esp), %eax + movl 52(%esp), %edx # 4-byte Reload + movl %edx, 4(%eax) + movl 80(%esp), %eax + movl (%eax), %edx + movl %edx, 48(%esp) # 4-byte Spill + movl 84(%esp), %eax + movl 8(%eax), %eax + mulxl %eax, %edx, %ebp + movl %ebp, 16(%esp) # 4-byte Spill + addl %ecx, %edx + movl %edx, 24(%esp) # 4-byte Spill + movl 80(%esp), %ebp + movl 4(%ebp), %edx + movl %edx, 44(%esp) # 4-byte Spill + mulxl %eax, %ecx, %edx + movl %edx, 12(%esp) # 4-byte Spill + adcl %esi, %ecx + movl %ecx, 52(%esp) # 4-byte Spill + movl 8(%ebp), %edx + movl %edx, 40(%esp) # 4-byte Spill + mulxl %eax, %ecx, %edx + movl %edx, 8(%esp) # 4-byte Spill + adcl %edi, %ecx + movl %ecx, %esi + movl 12(%ebp), %edx + movl %edx, 36(%esp) # 4-byte Spill + mulxl %eax, %edi, %ecx + movl %ecx, 4(%esp) # 4-byte Spill + adcl %ebx, %edi + movl 16(%ebp), %edx + movl %edx, 32(%esp) # 4-byte Spill + mulxl %eax, %ebx, %ecx + movl %ecx, (%esp) # 4-byte Spill + adcl 28(%esp), %ebx # 4-byte Folded Reload + movl 20(%ebp), %edx + movl %edx, 28(%esp) # 4-byte Spill + mulxl %eax, %ebp, %edx + adcl 20(%esp), %ebp # 4-byte Folded Reload + sbbl %ecx, %ecx + andl $1, %ecx + movl 16(%esp), %eax # 4-byte Reload + addl %eax, 52(%esp) # 4-byte Folded Spill + adcl 12(%esp), %esi # 4-byte Folded Reload + movl %esi, 20(%esp) # 4-byte Spill + adcl 8(%esp), %edi # 4-byte Folded Reload + adcl 4(%esp), %ebx # 4-byte Folded Reload + adcl (%esp), %ebp # 4-byte Folded Reload + adcl %edx, %ecx + movl 76(%esp), %eax + movl 24(%esp), %edx # 4-byte Reload + movl %edx, 8(%eax) + movl 84(%esp), %eax + movl 12(%eax), %eax + movl 48(%esp), %edx # 4-byte Reload + mulxl %eax, %edx, %esi + movl %esi, 24(%esp) # 4-byte Spill + addl 52(%esp), %edx # 4-byte Folded Reload + movl %edx, 48(%esp) # 4-byte Spill + movl 44(%esp), %edx # 4-byte Reload + mulxl %eax, %edx, %esi + movl %esi, 44(%esp) # 4-byte Spill + adcl 20(%esp), %edx # 4-byte Folded Reload + movl %edx, 52(%esp) # 4-byte Spill + movl 40(%esp), %edx # 4-byte Reload + mulxl %eax, %esi, %edx + movl %edx, 40(%esp) # 4-byte Spill + adcl %edi, %esi + movl 36(%esp), %edx # 4-byte Reload + mulxl %eax, %edi, %edx + movl %edx, 36(%esp) # 4-byte Spill + adcl %ebx, %edi + movl 32(%esp), %edx # 4-byte Reload + mulxl %eax, %ebx, %edx + movl %edx, 32(%esp) # 4-byte Spill + adcl %ebp, %ebx + movl 28(%esp), %edx # 4-byte Reload + mulxl %eax, %ebp, %edx + adcl %ecx, %ebp + sbbl %eax, %eax + andl $1, %eax + movl 24(%esp), %ecx # 4-byte Reload + addl %ecx, 52(%esp) # 4-byte Folded Spill + adcl 44(%esp), %esi # 4-byte Folded Reload + adcl 40(%esp), %edi # 4-byte Folded Reload + adcl 36(%esp), %ebx # 4-byte Folded Reload + adcl 32(%esp), %ebp # 4-byte Folded Reload + adcl %edx, %eax + movl %eax, 24(%esp) # 4-byte Spill + movl 76(%esp), %eax + movl 48(%esp), %ecx # 4-byte Reload + movl %ecx, 12(%eax) + movl 80(%esp), %ecx + movl (%ecx), %edx + movl %edx, 48(%esp) # 4-byte Spill + movl 84(%esp), %eax + movl 16(%eax), %eax + mulxl %eax, %edx, %ecx + movl %ecx, 20(%esp) # 4-byte Spill + addl 52(%esp), %edx # 4-byte Folded Reload + movl %edx, 28(%esp) # 4-byte Spill + movl 80(%esp), %ecx + movl 4(%ecx), %edx + movl %edx, 52(%esp) # 4-byte Spill + mulxl %eax, %ecx, %edx + movl %edx, 16(%esp) # 4-byte Spill + adcl %esi, %ecx + movl 80(%esp), %edx + movl 8(%edx), %edx + movl %edx, 40(%esp) # 4-byte Spill + mulxl %eax, %esi, %edx + movl %edx, 12(%esp) # 4-byte Spill + adcl %edi, %esi + movl %esi, %edi + movl 80(%esp), %esi + movl %esi, %edx + movl 12(%edx), %edx + movl %edx, 44(%esp) # 4-byte Spill + mulxl %eax, %esi, %edx + movl %edx, 8(%esp) # 4-byte Spill + adcl %ebx, %esi + movl 80(%esp), %edx + movl 16(%edx), %edx + movl %edx, 36(%esp) # 4-byte Spill + mulxl %eax, %ebx, %edx + movl %edx, 4(%esp) # 4-byte Spill + adcl %ebp, %ebx + movl 80(%esp), %edx + movl 20(%edx), %edx + movl %edx, 32(%esp) # 4-byte Spill + mulxl %eax, %ebp, %edx + adcl 24(%esp), %ebp # 4-byte Folded Reload + sbbl %eax, %eax + andl $1, %eax + addl 20(%esp), %ecx # 4-byte Folded Reload + adcl 16(%esp), %edi # 4-byte Folded Reload + movl %edi, 20(%esp) # 4-byte Spill + adcl 12(%esp), %esi # 4-byte Folded Reload + adcl 8(%esp), %ebx # 4-byte Folded Reload + adcl 4(%esp), %ebp # 4-byte Folded Reload + adcl %edx, %eax + movl %eax, 24(%esp) # 4-byte Spill + movl 76(%esp), %eax + movl 28(%esp), %edx # 4-byte Reload + movl %edx, 16(%eax) + movl 84(%esp), %eax + movl 20(%eax), %eax + movl 48(%esp), %edx # 4-byte Reload + mulxl %eax, %edx, %edi + movl %edi, 28(%esp) # 4-byte Spill + addl %ecx, %edx + movl %edx, 48(%esp) # 4-byte Spill + movl 52(%esp), %edx # 4-byte Reload + mulxl %eax, %ecx, %edx + movl %edx, 52(%esp) # 4-byte Spill + adcl 20(%esp), %ecx # 4-byte Folded Reload + movl %ecx, %edi + movl 40(%esp), %edx # 4-byte Reload + mulxl %eax, %ecx, %edx + movl %edx, 40(%esp) # 4-byte Spill + adcl %esi, %ecx + movl 44(%esp), %edx # 4-byte Reload + mulxl %eax, %esi, %edx + movl %edx, 20(%esp) # 4-byte Spill + adcl %ebx, %esi + movl 36(%esp), %edx # 4-byte Reload + mulxl %eax, %ebx, %edx + movl %edx, 36(%esp) # 4-byte Spill + adcl %ebp, %ebx + movl 32(%esp), %edx # 4-byte Reload + mulxl %eax, %edx, %eax + movl %eax, 44(%esp) # 4-byte Spill + adcl 24(%esp), %edx # 4-byte Folded Reload + sbbl %ebp, %ebp + andl $1, %ebp + addl 28(%esp), %edi # 4-byte Folded Reload + movl %edi, 32(%esp) # 4-byte Spill + adcl 52(%esp), %ecx # 4-byte Folded Reload + adcl 40(%esp), %esi # 4-byte Folded Reload + adcl 20(%esp), %ebx # 4-byte Folded Reload + adcl 36(%esp), %edx # 4-byte Folded Reload + movl 76(%esp), %eax + movl 48(%esp), %edi # 4-byte Reload + movl %edi, 20(%eax) + movl 32(%esp), %edi # 4-byte Reload + movl %edi, 24(%eax) + movl %ecx, 28(%eax) + movl %esi, 32(%eax) + movl %ebx, 36(%eax) + movl %edx, 40(%eax) + adcl 44(%esp), %ebp # 4-byte Folded Reload + movl %ebp, 44(%eax) + addl $56, %esp + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end81: + .size mcl_fpDbl_mulPre6Lbmi2, .Lfunc_end81-mcl_fpDbl_mulPre6Lbmi2 + + .globl mcl_fpDbl_sqrPre6Lbmi2 + .align 16, 0x90 + .type mcl_fpDbl_sqrPre6Lbmi2,@function +mcl_fpDbl_sqrPre6Lbmi2: # @mcl_fpDbl_sqrPre6Lbmi2 +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $60, %esp + movl 84(%esp), %ebp + movl (%ebp), %ecx + movl 4(%ebp), %eax + movl %eax, %edx + mulxl %ecx, %edi, %esi + movl %edi, 36(%esp) # 4-byte Spill + movl %esi, 52(%esp) # 4-byte Spill + movl %ecx, %edx + mulxl %ecx, %ebx, %edx + movl %ebx, 28(%esp) # 4-byte Spill + addl %edi, %edx + movl %edx, 56(%esp) # 4-byte Spill + movl 8(%ebp), %edx + movl %edx, 48(%esp) # 4-byte Spill + mulxl %ecx, %edi, %edx + movl %edx, 40(%esp) # 4-byte Spill + adcl %esi, %edi + movl 12(%ebp), %edx + movl %edx, 44(%esp) # 4-byte Spill + mulxl %ecx, %ebx, %edx + movl %edx, 32(%esp) # 4-byte Spill + adcl 40(%esp), %ebx # 4-byte Folded Reload + movl 16(%ebp), %edx + movl %edx, 40(%esp) # 4-byte Spill + mulxl %ecx, %edx, %esi + movl %esi, 20(%esp) # 4-byte Spill + adcl 32(%esp), %edx # 4-byte Folded Reload + movl %edx, 24(%esp) # 4-byte Spill + movl 20(%ebp), %edx + movl %edx, 32(%esp) # 4-byte Spill + mulxl %ecx, %ebp, %edx + adcl 20(%esp), %ebp # 4-byte Folded Reload + movl 80(%esp), %ecx + movl 28(%esp), %esi # 4-byte Reload + movl %esi, (%ecx) + adcl $0, %edx + movl %edx, 28(%esp) # 4-byte Spill + movl 36(%esp), %ecx # 4-byte Reload + addl %ecx, 56(%esp) # 4-byte Folded Spill + movl %eax, %edx + mulxl %eax, %esi, %ecx + movl %ecx, 36(%esp) # 4-byte Spill + adcl %edi, %esi + movl 48(%esp), %edx # 4-byte Reload + mulxl %eax, %ecx, %edx + movl %edx, 48(%esp) # 4-byte Spill + adcl %ebx, %ecx + movl 44(%esp), %edx # 4-byte Reload + mulxl %eax, %ebx, %edx + movl %edx, 44(%esp) # 4-byte Spill + adcl 24(%esp), %ebx # 4-byte Folded Reload + movl 40(%esp), %edx # 4-byte Reload + mulxl %eax, %edi, %edx + movl %edx, 40(%esp) # 4-byte Spill + adcl %ebp, %edi + movl 32(%esp), %edx # 4-byte Reload + mulxl %eax, %edx, %eax + adcl 28(%esp), %edx # 4-byte Folded Reload + movl %edx, %ebp + sbbl %edx, %edx + andl $1, %edx + addl 52(%esp), %esi # 4-byte Folded Reload + adcl 36(%esp), %ecx # 4-byte Folded Reload + adcl 48(%esp), %ebx # 4-byte Folded Reload + adcl 44(%esp), %edi # 4-byte Folded Reload + adcl 40(%esp), %ebp # 4-byte Folded Reload + movl %ebp, 32(%esp) # 4-byte Spill + adcl %eax, %edx + movl %edx, 24(%esp) # 4-byte Spill + movl 80(%esp), %eax + movl 56(%esp), %edx # 4-byte Reload + movl %edx, 4(%eax) + movl 84(%esp), %eax + movl (%eax), %edx + movl %edx, 48(%esp) # 4-byte Spill + movl 8(%eax), %ebp + mulxl %ebp, %edx, %eax + movl %eax, 20(%esp) # 4-byte Spill + addl %esi, %edx + movl %edx, 44(%esp) # 4-byte Spill + movl 84(%esp), %eax + movl 4(%eax), %edx + movl %edx, 40(%esp) # 4-byte Spill + mulxl %ebp, %edx, %esi + movl %esi, 16(%esp) # 4-byte Spill + adcl %ecx, %edx + movl %edx, 56(%esp) # 4-byte Spill + movl %ebp, %edx + mulxl %ebp, %ecx, %edx + movl %edx, 12(%esp) # 4-byte Spill + adcl %ebx, %ecx + movl %eax, %esi + movl 12(%esi), %eax + movl %eax, %edx + mulxl %ebp, %ebx, %edx + movl %ebx, 28(%esp) # 4-byte Spill + movl %edx, 52(%esp) # 4-byte Spill + adcl %ebx, %edi + movl 16(%esi), %edx + movl %edx, 36(%esp) # 4-byte Spill + mulxl %ebp, %ebx, %edx + movl %edx, 8(%esp) # 4-byte Spill + adcl 32(%esp), %ebx # 4-byte Folded Reload + movl 20(%esi), %edx + movl %edx, 32(%esp) # 4-byte Spill + mulxl %ebp, %esi, %edx + movl %edx, 4(%esp) # 4-byte Spill + adcl 24(%esp), %esi # 4-byte Folded Reload + sbbl %ebp, %ebp + andl $1, %ebp + movl 20(%esp), %edx # 4-byte Reload + addl %edx, 56(%esp) # 4-byte Folded Spill + adcl 16(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 16(%esp) # 4-byte Spill + adcl 12(%esp), %edi # 4-byte Folded Reload + adcl 52(%esp), %ebx # 4-byte Folded Reload + adcl 8(%esp), %esi # 4-byte Folded Reload + adcl 4(%esp), %ebp # 4-byte Folded Reload + movl 48(%esp), %edx # 4-byte Reload + mulxl %eax, %edx, %ecx + movl %ecx, 20(%esp) # 4-byte Spill + addl 56(%esp), %edx # 4-byte Folded Reload + movl %edx, 24(%esp) # 4-byte Spill + movl 40(%esp), %edx # 4-byte Reload + mulxl %eax, %edx, %ecx + movl %ecx, 40(%esp) # 4-byte Spill + adcl 16(%esp), %edx # 4-byte Folded Reload + movl %edx, 48(%esp) # 4-byte Spill + adcl 28(%esp), %edi # 4-byte Folded Reload + movl %eax, %edx + mulxl %eax, %ecx, %edx + movl %edx, 28(%esp) # 4-byte Spill + adcl %ebx, %ecx + movl %ecx, 56(%esp) # 4-byte Spill + movl 36(%esp), %edx # 4-byte Reload + mulxl %eax, %ecx, %edx + movl %edx, 36(%esp) # 4-byte Spill + adcl %esi, %ecx + movl %ecx, %esi + movl 32(%esp), %edx # 4-byte Reload + mulxl %eax, %ebx, %edx + adcl %ebp, %ebx + sbbl %ecx, %ecx + andl $1, %ecx + movl 20(%esp), %eax # 4-byte Reload + addl %eax, 48(%esp) # 4-byte Folded Spill + adcl 40(%esp), %edi # 4-byte Folded Reload + movl 56(%esp), %eax # 4-byte Reload + adcl 52(%esp), %eax # 4-byte Folded Reload + movl %eax, 56(%esp) # 4-byte Spill + adcl 28(%esp), %esi # 4-byte Folded Reload + movl %esi, 52(%esp) # 4-byte Spill + adcl 36(%esp), %ebx # 4-byte Folded Reload + adcl %edx, %ecx + movl 80(%esp), %eax + movl 44(%esp), %edx # 4-byte Reload + movl %edx, 8(%eax) + movl 24(%esp), %edx # 4-byte Reload + movl %edx, 12(%eax) + movl 84(%esp), %esi + movl (%esi), %edx + movl %edx, 44(%esp) # 4-byte Spill + movl 16(%esi), %ebp + mulxl %ebp, %edx, %eax + movl %eax, 24(%esp) # 4-byte Spill + addl 48(%esp), %edx # 4-byte Folded Reload + movl %edx, 48(%esp) # 4-byte Spill + movl 4(%esi), %edx + movl %edx, 40(%esp) # 4-byte Spill + mulxl %ebp, %eax, %edx + movl %edx, 16(%esp) # 4-byte Spill + adcl %edi, %eax + movl %eax, 8(%esp) # 4-byte Spill + movl 8(%esi), %edx + movl %edx, 36(%esp) # 4-byte Spill + mulxl %ebp, %eax, %edx + movl %edx, 12(%esp) # 4-byte Spill + adcl 56(%esp), %eax # 4-byte Folded Reload + movl %eax, 56(%esp) # 4-byte Spill + movl 12(%esi), %edx + movl %edx, 32(%esp) # 4-byte Spill + mulxl %ebp, %edi, %eax + movl %eax, 4(%esp) # 4-byte Spill + adcl 52(%esp), %edi # 4-byte Folded Reload + movl %ebp, %edx + mulxl %ebp, %eax, %edx + movl %edx, (%esp) # 4-byte Spill + adcl %ebx, %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 20(%esi), %ebx + movl %ebx, %edx + mulxl %ebp, %edx, %eax + movl %eax, 20(%esp) # 4-byte Spill + movl %edx, 28(%esp) # 4-byte Spill + adcl %edx, %ecx + sbbl %ebp, %ebp + andl $1, %ebp + movl 8(%esp), %esi # 4-byte Reload + addl 24(%esp), %esi # 4-byte Folded Reload + movl 16(%esp), %edx # 4-byte Reload + adcl %edx, 56(%esp) # 4-byte Folded Spill + adcl 12(%esp), %edi # 4-byte Folded Reload + movl 4(%esp), %edx # 4-byte Reload + adcl %edx, 52(%esp) # 4-byte Folded Spill + adcl (%esp), %ecx # 4-byte Folded Reload + adcl %eax, %ebp + movl 44(%esp), %edx # 4-byte Reload + mulxl %ebx, %edx, %eax + movl %eax, 24(%esp) # 4-byte Spill + addl %esi, %edx + movl %edx, 44(%esp) # 4-byte Spill + movl 40(%esp), %edx # 4-byte Reload + mulxl %ebx, %edx, %eax + movl %eax, 40(%esp) # 4-byte Spill + adcl 56(%esp), %edx # 4-byte Folded Reload + movl %edx, %eax + movl 36(%esp), %edx # 4-byte Reload + mulxl %ebx, %esi, %edx + movl %edx, 36(%esp) # 4-byte Spill + adcl %edi, %esi + movl 32(%esp), %edx # 4-byte Reload + mulxl %ebx, %edi, %edx + movl %edx, 32(%esp) # 4-byte Spill + adcl 52(%esp), %edi # 4-byte Folded Reload + adcl 28(%esp), %ecx # 4-byte Folded Reload + movl %ebx, %edx + mulxl %ebx, %ebx, %edx + movl %edx, 56(%esp) # 4-byte Spill + adcl %ebp, %ebx + sbbl %ebp, %ebp + andl $1, %ebp + addl 24(%esp), %eax # 4-byte Folded Reload + movl %eax, 52(%esp) # 4-byte Spill + adcl 40(%esp), %esi # 4-byte Folded Reload + adcl 36(%esp), %edi # 4-byte Folded Reload + adcl 32(%esp), %ecx # 4-byte Folded Reload + adcl 20(%esp), %ebx # 4-byte Folded Reload + movl 80(%esp), %eax + movl 48(%esp), %edx # 4-byte Reload + movl %edx, 16(%eax) + movl 44(%esp), %edx # 4-byte Reload + movl %edx, 20(%eax) + movl 52(%esp), %edx # 4-byte Reload + movl %edx, 24(%eax) + movl %esi, 28(%eax) + movl %edi, 32(%eax) + movl %ecx, 36(%eax) + movl %ebx, 40(%eax) + adcl 56(%esp), %ebp # 4-byte Folded Reload + movl %ebp, 44(%eax) + addl $60, %esp + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end82: + .size mcl_fpDbl_sqrPre6Lbmi2, .Lfunc_end82-mcl_fpDbl_sqrPre6Lbmi2 + + .globl mcl_fp_mont6Lbmi2 + .align 16, 0x90 + .type mcl_fp_mont6Lbmi2,@function +mcl_fp_mont6Lbmi2: # @mcl_fp_mont6Lbmi2 +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $100, %esp + movl 124(%esp), %eax + movl 20(%eax), %edx + movl %edx, 64(%esp) # 4-byte Spill + movl 128(%esp), %ecx + movl (%ecx), %ecx + mulxl %ecx, %edx, %ebp + movl %edx, 96(%esp) # 4-byte Spill + movl 16(%eax), %edx + movl %edx, 72(%esp) # 4-byte Spill + mulxl %ecx, %edx, %edi + movl %edx, 92(%esp) # 4-byte Spill + movl 12(%eax), %edx + movl %edx, 68(%esp) # 4-byte Spill + mulxl %ecx, %edx, %esi + movl %edx, 88(%esp) # 4-byte Spill + movl (%eax), %ebx + movl %ebx, 52(%esp) # 4-byte Spill + movl 4(%eax), %edx + movl %edx, 48(%esp) # 4-byte Spill + mulxl %ecx, %eax, %edx + movl %eax, 80(%esp) # 4-byte Spill + movl %edx, 84(%esp) # 4-byte Spill + movl %ebx, %edx + mulxl %ecx, %ebx, %edx + movl %ebx, 16(%esp) # 4-byte Spill + addl 80(%esp), %edx # 4-byte Folded Reload + movl %edx, 44(%esp) # 4-byte Spill + movl 124(%esp), %eax + movl 8(%eax), %edx + movl %edx, 56(%esp) # 4-byte Spill + mulxl %ecx, %ecx, %eax + adcl 84(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 20(%esp) # 4-byte Spill + adcl 88(%esp), %eax # 4-byte Folded Reload + movl %eax, 24(%esp) # 4-byte Spill + adcl 92(%esp), %esi # 4-byte Folded Reload + movl %esi, 28(%esp) # 4-byte Spill + adcl 96(%esp), %edi # 4-byte Folded Reload + movl %edi, 32(%esp) # 4-byte Spill + adcl $0, %ebp + movl %ebp, 40(%esp) # 4-byte Spill + movl 132(%esp), %edi + movl -4(%edi), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl %ebx, %edx + imull %eax, %edx + movl (%edi), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 4(%edi), %esi + movl %esi, 96(%esp) # 4-byte Spill + mulxl %esi, %esi, %ebp + mulxl %eax, %ecx, %eax + movl %ecx, 12(%esp) # 4-byte Spill + addl %esi, %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 8(%edi), %eax + movl %eax, 88(%esp) # 4-byte Spill + mulxl %eax, %ecx, %esi + adcl %ebp, %ecx + movl 12(%edi), %eax + movl %eax, 84(%esp) # 4-byte Spill + mulxl %eax, %ebx, %eax + adcl %esi, %ebx + movl 16(%edi), %esi + movl %esi, 80(%esp) # 4-byte Spill + mulxl %esi, %esi, %ebp + adcl %eax, %esi + movl 20(%edi), %eax + movl %eax, 76(%esp) # 4-byte Spill + mulxl %eax, %edi, %eax + adcl %ebp, %edi + adcl $0, %eax + movl 12(%esp), %edx # 4-byte Reload + addl 16(%esp), %edx # 4-byte Folded Reload + movl 44(%esp), %edx # 4-byte Reload + adcl %edx, 36(%esp) # 4-byte Folded Spill + adcl 20(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 20(%esp) # 4-byte Spill + adcl 24(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 24(%esp) # 4-byte Spill + adcl 28(%esp), %esi # 4-byte Folded Reload + movl %esi, 16(%esp) # 4-byte Spill + adcl 32(%esp), %edi # 4-byte Folded Reload + adcl 40(%esp), %eax # 4-byte Folded Reload + movl %eax, 12(%esp) # 4-byte Spill + sbbl %eax, %eax + andl $1, %eax + movl %eax, 8(%esp) # 4-byte Spill + movl 128(%esp), %edx + movl 4(%edx), %edx + mulxl 64(%esp), %ecx, %eax # 4-byte Folded Reload + movl %ecx, 4(%esp) # 4-byte Spill + movl %eax, 44(%esp) # 4-byte Spill + mulxl 72(%esp), %ecx, %eax # 4-byte Folded Reload + movl %ecx, (%esp) # 4-byte Spill + movl %eax, 40(%esp) # 4-byte Spill + mulxl 48(%esp), %eax, %ecx # 4-byte Folded Reload + mulxl 52(%esp), %ebx, %ebp # 4-byte Folded Reload + addl %eax, %ebp + movl %ebp, 28(%esp) # 4-byte Spill + mulxl 56(%esp), %esi, %ebp # 4-byte Folded Reload + adcl %ecx, %esi + mulxl 68(%esp), %ecx, %eax # 4-byte Folded Reload + adcl %ebp, %ecx + movl %ecx, 32(%esp) # 4-byte Spill + adcl (%esp), %eax # 4-byte Folded Reload + movl %eax, %ecx + movl 40(%esp), %eax # 4-byte Reload + adcl 4(%esp), %eax # 4-byte Folded Reload + movl 44(%esp), %edx # 4-byte Reload + adcl $0, %edx + addl 36(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 4(%esp) # 4-byte Spill + movl 20(%esp), %ebp # 4-byte Reload + adcl %ebp, 28(%esp) # 4-byte Folded Spill + adcl 24(%esp), %esi # 4-byte Folded Reload + movl %esi, 20(%esp) # 4-byte Spill + movl 32(%esp), %esi # 4-byte Reload + adcl 16(%esp), %esi # 4-byte Folded Reload + movl %esi, 32(%esp) # 4-byte Spill + adcl %edi, %ecx + movl %ecx, 16(%esp) # 4-byte Spill + adcl 12(%esp), %eax # 4-byte Folded Reload + movl %eax, 40(%esp) # 4-byte Spill + adcl 8(%esp), %edx # 4-byte Folded Reload + movl %edx, 44(%esp) # 4-byte Spill + sbbl %eax, %eax + movl %eax, 36(%esp) # 4-byte Spill + movl %ebx, %edx + imull 60(%esp), %edx # 4-byte Folded Reload + mulxl 96(%esp), %ecx, %eax # 4-byte Folded Reload + mulxl 92(%esp), %edi, %esi # 4-byte Folded Reload + movl %edi, 12(%esp) # 4-byte Spill + addl %ecx, %esi + movl %esi, %edi + mulxl 88(%esp), %esi, %ecx # 4-byte Folded Reload + adcl %eax, %esi + movl %esi, %eax + mulxl 84(%esp), %esi, %ebx # 4-byte Folded Reload + adcl %ecx, %esi + mulxl 80(%esp), %ecx, %ebp # 4-byte Folded Reload + adcl %ebx, %ecx + movl %ecx, %ebx + mulxl 76(%esp), %ecx, %edx # 4-byte Folded Reload + adcl %ebp, %ecx + movl %ecx, 24(%esp) # 4-byte Spill + adcl $0, %edx + movl %edx, %ecx + movl 36(%esp), %edx # 4-byte Reload + andl $1, %edx + movl 12(%esp), %ebp # 4-byte Reload + addl 4(%esp), %ebp # 4-byte Folded Reload + adcl 28(%esp), %edi # 4-byte Folded Reload + movl %edi, 28(%esp) # 4-byte Spill + adcl 20(%esp), %eax # 4-byte Folded Reload + movl %eax, 20(%esp) # 4-byte Spill + adcl 32(%esp), %esi # 4-byte Folded Reload + movl %esi, 32(%esp) # 4-byte Spill + adcl 16(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 12(%esp) # 4-byte Spill + movl 40(%esp), %eax # 4-byte Reload + adcl %eax, 24(%esp) # 4-byte Folded Spill + adcl 44(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 8(%esp) # 4-byte Spill + adcl $0, %edx + movl %edx, 36(%esp) # 4-byte Spill + movl 128(%esp), %edx + movl 8(%edx), %edx + mulxl 64(%esp), %ecx, %eax # 4-byte Folded Reload + movl %ecx, 16(%esp) # 4-byte Spill + movl %eax, 44(%esp) # 4-byte Spill + mulxl 72(%esp), %ecx, %eax # 4-byte Folded Reload + movl %ecx, (%esp) # 4-byte Spill + movl %eax, 40(%esp) # 4-byte Spill + mulxl 48(%esp), %ebx, %edi # 4-byte Folded Reload + mulxl 52(%esp), %eax, %esi # 4-byte Folded Reload + movl %eax, 4(%esp) # 4-byte Spill + addl %ebx, %esi + mulxl 56(%esp), %ecx, %ebp # 4-byte Folded Reload + adcl %edi, %ecx + mulxl 68(%esp), %edi, %eax # 4-byte Folded Reload + adcl %ebp, %edi + adcl (%esp), %eax # 4-byte Folded Reload + movl 40(%esp), %ebp # 4-byte Reload + adcl 16(%esp), %ebp # 4-byte Folded Reload + movl 44(%esp), %edx # 4-byte Reload + adcl $0, %edx + movl 4(%esp), %ebx # 4-byte Reload + addl 28(%esp), %ebx # 4-byte Folded Reload + adcl 20(%esp), %esi # 4-byte Folded Reload + movl %esi, 20(%esp) # 4-byte Spill + adcl 32(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 16(%esp) # 4-byte Spill + adcl 12(%esp), %edi # 4-byte Folded Reload + adcl 24(%esp), %eax # 4-byte Folded Reload + movl %eax, 12(%esp) # 4-byte Spill + adcl 8(%esp), %ebp # 4-byte Folded Reload + movl %ebp, 40(%esp) # 4-byte Spill + adcl 36(%esp), %edx # 4-byte Folded Reload + movl %edx, 44(%esp) # 4-byte Spill + sbbl %eax, %eax + movl %eax, 32(%esp) # 4-byte Spill + movl %ebx, %edx + imull 60(%esp), %edx # 4-byte Folded Reload + mulxl 96(%esp), %ecx, %eax # 4-byte Folded Reload + mulxl 92(%esp), %ebp, %esi # 4-byte Folded Reload + movl %ebp, 36(%esp) # 4-byte Spill + addl %ecx, %esi + movl %esi, 28(%esp) # 4-byte Spill + mulxl 88(%esp), %esi, %ecx # 4-byte Folded Reload + adcl %eax, %esi + movl %esi, 24(%esp) # 4-byte Spill + mulxl 84(%esp), %esi, %eax # 4-byte Folded Reload + adcl %ecx, %esi + movl %esi, %ecx + mulxl 80(%esp), %esi, %ebp # 4-byte Folded Reload + movl %ebp, 8(%esp) # 4-byte Spill + adcl %eax, %esi + mulxl 76(%esp), %ebp, %eax # 4-byte Folded Reload + adcl 8(%esp), %ebp # 4-byte Folded Reload + adcl $0, %eax + movl 32(%esp), %edx # 4-byte Reload + andl $1, %edx + addl %ebx, 36(%esp) # 4-byte Folded Spill + movl 20(%esp), %ebx # 4-byte Reload + adcl %ebx, 28(%esp) # 4-byte Folded Spill + movl 16(%esp), %ebx # 4-byte Reload + adcl %ebx, 24(%esp) # 4-byte Folded Spill + adcl %edi, %ecx + movl %ecx, 16(%esp) # 4-byte Spill + adcl 12(%esp), %esi # 4-byte Folded Reload + movl %esi, 12(%esp) # 4-byte Spill + adcl 40(%esp), %ebp # 4-byte Folded Reload + adcl 44(%esp), %eax # 4-byte Folded Reload + movl %eax, 8(%esp) # 4-byte Spill + adcl $0, %edx + movl %edx, 32(%esp) # 4-byte Spill + movl 128(%esp), %edx + movl 12(%edx), %edx + mulxl 64(%esp), %ecx, %eax # 4-byte Folded Reload + movl %ecx, 4(%esp) # 4-byte Spill + movl %eax, 40(%esp) # 4-byte Spill + mulxl 72(%esp), %esi, %eax # 4-byte Folded Reload + movl %eax, 36(%esp) # 4-byte Spill + mulxl 48(%esp), %eax, %ebx # 4-byte Folded Reload + mulxl 52(%esp), %ecx, %edi # 4-byte Folded Reload + addl %eax, %edi + movl %edi, 44(%esp) # 4-byte Spill + mulxl 56(%esp), %eax, %edi # 4-byte Folded Reload + adcl %ebx, %eax + movl %eax, 20(%esp) # 4-byte Spill + mulxl 68(%esp), %ebx, %eax # 4-byte Folded Reload + adcl %edi, %ebx + adcl %esi, %eax + movl %eax, %esi + movl 36(%esp), %eax # 4-byte Reload + adcl 4(%esp), %eax # 4-byte Folded Reload + movl 40(%esp), %edx # 4-byte Reload + adcl $0, %edx + addl 28(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 4(%esp) # 4-byte Spill + movl 44(%esp), %edi # 4-byte Reload + adcl 24(%esp), %edi # 4-byte Folded Reload + movl %edi, 44(%esp) # 4-byte Spill + movl 16(%esp), %edi # 4-byte Reload + adcl %edi, 20(%esp) # 4-byte Folded Spill + adcl 12(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 24(%esp) # 4-byte Spill + adcl %ebp, %esi + movl %esi, 28(%esp) # 4-byte Spill + adcl 8(%esp), %eax # 4-byte Folded Reload + movl %eax, 36(%esp) # 4-byte Spill + adcl 32(%esp), %edx # 4-byte Folded Reload + movl %edx, 40(%esp) # 4-byte Spill + sbbl %eax, %eax + movl %eax, 32(%esp) # 4-byte Spill + movl %ecx, %edx + imull 60(%esp), %edx # 4-byte Folded Reload + mulxl 96(%esp), %ecx, %eax # 4-byte Folded Reload + mulxl 92(%esp), %edi, %esi # 4-byte Folded Reload + movl %edi, 16(%esp) # 4-byte Spill + addl %ecx, %esi + movl %esi, %ebx + mulxl 88(%esp), %esi, %ecx # 4-byte Folded Reload + adcl %eax, %esi + movl %esi, %edi + mulxl 84(%esp), %esi, %eax # 4-byte Folded Reload + adcl %ecx, %esi + movl %esi, %ecx + mulxl 80(%esp), %esi, %ebp # 4-byte Folded Reload + movl %ebp, 12(%esp) # 4-byte Spill + adcl %eax, %esi + movl %esi, %ebp + mulxl 76(%esp), %edx, %eax # 4-byte Folded Reload + adcl 12(%esp), %edx # 4-byte Folded Reload + movl %edx, 8(%esp) # 4-byte Spill + adcl $0, %eax + movl 32(%esp), %edx # 4-byte Reload + andl $1, %edx + movl 16(%esp), %esi # 4-byte Reload + addl 4(%esp), %esi # 4-byte Folded Reload + adcl 44(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 16(%esp) # 4-byte Spill + adcl 20(%esp), %edi # 4-byte Folded Reload + movl %edi, 20(%esp) # 4-byte Spill + adcl 24(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 24(%esp) # 4-byte Spill + adcl 28(%esp), %ebp # 4-byte Folded Reload + movl %ebp, 12(%esp) # 4-byte Spill + movl 8(%esp), %esi # 4-byte Reload + adcl 36(%esp), %esi # 4-byte Folded Reload + adcl 40(%esp), %eax # 4-byte Folded Reload + movl %eax, 40(%esp) # 4-byte Spill + adcl $0, %edx + movl %edx, 32(%esp) # 4-byte Spill + movl 128(%esp), %edx + movl 16(%edx), %edx + mulxl 64(%esp), %eax, %ebp # 4-byte Folded Reload + movl %eax, 36(%esp) # 4-byte Spill + mulxl 72(%esp), %ecx, %eax # 4-byte Folded Reload + movl %ecx, 8(%esp) # 4-byte Spill + movl %eax, 44(%esp) # 4-byte Spill + mulxl 48(%esp), %ebx, %eax # 4-byte Folded Reload + mulxl 52(%esp), %ecx, %edi # 4-byte Folded Reload + addl %ebx, %edi + movl %edi, 28(%esp) # 4-byte Spill + mulxl 56(%esp), %edi, %ebx # 4-byte Folded Reload + movl %ebx, 4(%esp) # 4-byte Spill + adcl %eax, %edi + movl %edi, %ebx + mulxl 68(%esp), %edx, %eax # 4-byte Folded Reload + adcl 4(%esp), %edx # 4-byte Folded Reload + movl %edx, %edi + adcl 8(%esp), %eax # 4-byte Folded Reload + movl %eax, 4(%esp) # 4-byte Spill + movl 44(%esp), %eax # 4-byte Reload + adcl 36(%esp), %eax # 4-byte Folded Reload + movl %ebp, %edx + adcl $0, %edx + addl 16(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 8(%esp) # 4-byte Spill + movl 20(%esp), %ebp # 4-byte Reload + adcl %ebp, 28(%esp) # 4-byte Folded Spill + adcl 24(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 20(%esp) # 4-byte Spill + adcl 12(%esp), %edi # 4-byte Folded Reload + movl %edi, 16(%esp) # 4-byte Spill + movl 4(%esp), %ebx # 4-byte Reload + adcl %esi, %ebx + adcl 40(%esp), %eax # 4-byte Folded Reload + movl %eax, 44(%esp) # 4-byte Spill + adcl 32(%esp), %edx # 4-byte Folded Reload + movl %edx, 24(%esp) # 4-byte Spill + sbbl %eax, %eax + movl %eax, 40(%esp) # 4-byte Spill + movl %ecx, %edx + imull 60(%esp), %edx # 4-byte Folded Reload + mulxl 96(%esp), %ecx, %eax # 4-byte Folded Reload + mulxl 92(%esp), %edi, %esi # 4-byte Folded Reload + movl %edi, 12(%esp) # 4-byte Spill + addl %ecx, %esi + movl %esi, 36(%esp) # 4-byte Spill + mulxl 88(%esp), %esi, %ecx # 4-byte Folded Reload + adcl %eax, %esi + movl %esi, 32(%esp) # 4-byte Spill + mulxl 84(%esp), %edi, %eax # 4-byte Folded Reload + adcl %ecx, %edi + mulxl 80(%esp), %esi, %ebp # 4-byte Folded Reload + adcl %eax, %esi + mulxl 76(%esp), %ecx, %eax # 4-byte Folded Reload + adcl %ebp, %ecx + adcl $0, %eax + movl %eax, %ebp + movl 40(%esp), %edx # 4-byte Reload + andl $1, %edx + movl 12(%esp), %eax # 4-byte Reload + addl 8(%esp), %eax # 4-byte Folded Reload + movl 28(%esp), %eax # 4-byte Reload + adcl %eax, 36(%esp) # 4-byte Folded Spill + movl 20(%esp), %eax # 4-byte Reload + adcl %eax, 32(%esp) # 4-byte Folded Spill + adcl 16(%esp), %edi # 4-byte Folded Reload + movl %edi, 28(%esp) # 4-byte Spill + adcl %ebx, %esi + movl %esi, 20(%esp) # 4-byte Spill + adcl 44(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 16(%esp) # 4-byte Spill + adcl 24(%esp), %ebp # 4-byte Folded Reload + movl %ebp, 44(%esp) # 4-byte Spill + adcl $0, %edx + movl %edx, 40(%esp) # 4-byte Spill + movl 128(%esp), %edx + movl 20(%edx), %edx + mulxl 48(%esp), %eax, %esi # 4-byte Folded Reload + mulxl 52(%esp), %ecx, %ebp # 4-byte Folded Reload + movl %ecx, 52(%esp) # 4-byte Spill + addl %eax, %ebp + mulxl 64(%esp), %ecx, %eax # 4-byte Folded Reload + movl %ecx, 48(%esp) # 4-byte Spill + movl %eax, 64(%esp) # 4-byte Spill + mulxl 56(%esp), %ebx, %ecx # 4-byte Folded Reload + adcl %esi, %ebx + mulxl 72(%esp), %esi, %eax # 4-byte Folded Reload + movl %esi, 72(%esp) # 4-byte Spill + mulxl 68(%esp), %esi, %edx # 4-byte Folded Reload + adcl %ecx, %esi + adcl 72(%esp), %edx # 4-byte Folded Reload + movl %edx, %ecx + adcl 48(%esp), %eax # 4-byte Folded Reload + movl 64(%esp), %edx # 4-byte Reload + adcl $0, %edx + movl 52(%esp), %edi # 4-byte Reload + addl 36(%esp), %edi # 4-byte Folded Reload + movl %edi, 52(%esp) # 4-byte Spill + adcl 32(%esp), %ebp # 4-byte Folded Reload + movl %ebp, 68(%esp) # 4-byte Spill + adcl 28(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 48(%esp) # 4-byte Spill + adcl 20(%esp), %esi # 4-byte Folded Reload + movl %esi, 32(%esp) # 4-byte Spill + adcl 16(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 36(%esp) # 4-byte Spill + adcl 44(%esp), %eax # 4-byte Folded Reload + movl %eax, 44(%esp) # 4-byte Spill + adcl 40(%esp), %edx # 4-byte Folded Reload + movl %edx, 64(%esp) # 4-byte Spill + sbbl %eax, %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 60(%esp), %edx # 4-byte Reload + imull %edi, %edx + mulxl 92(%esp), %eax, %edi # 4-byte Folded Reload + movl %eax, 60(%esp) # 4-byte Spill + mulxl 96(%esp), %ecx, %esi # 4-byte Folded Reload + addl %edi, %ecx + mulxl 88(%esp), %edi, %ebx # 4-byte Folded Reload + adcl %esi, %edi + movl %edx, %esi + mulxl 84(%esp), %ebp, %eax # 4-byte Folded Reload + adcl %ebx, %ebp + movl %esi, %edx + mulxl 80(%esp), %ebx, %edx # 4-byte Folded Reload + movl %edx, 56(%esp) # 4-byte Spill + adcl %eax, %ebx + movl %esi, %edx + mulxl 76(%esp), %esi, %edx # 4-byte Folded Reload + adcl 56(%esp), %esi # 4-byte Folded Reload + adcl $0, %edx + andl $1, 72(%esp) # 4-byte Folded Spill + movl 60(%esp), %eax # 4-byte Reload + addl 52(%esp), %eax # 4-byte Folded Reload + movl 72(%esp), %eax # 4-byte Reload + adcl 68(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 56(%esp) # 4-byte Spill + adcl 48(%esp), %edi # 4-byte Folded Reload + movl %edi, 68(%esp) # 4-byte Spill + adcl 32(%esp), %ebp # 4-byte Folded Reload + adcl 36(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 60(%esp) # 4-byte Spill + adcl 44(%esp), %esi # 4-byte Folded Reload + adcl 64(%esp), %edx # 4-byte Folded Reload + adcl $0, %eax + subl 92(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 72(%esp) # 4-byte Spill + movl %edi, %ecx + sbbl 96(%esp), %ecx # 4-byte Folded Reload + movl %ebp, %edi + sbbl 88(%esp), %edi # 4-byte Folded Reload + movl %edi, 92(%esp) # 4-byte Spill + movl %esi, %edi + sbbl 84(%esp), %ebx # 4-byte Folded Reload + sbbl 80(%esp), %esi # 4-byte Folded Reload + movl %esi, 96(%esp) # 4-byte Spill + movl %edx, %esi + sbbl 76(%esp), %esi # 4-byte Folded Reload + sbbl $0, %eax + andl $1, %eax + jne .LBB83_2 +# BB#1: + movl %ecx, 68(%esp) # 4-byte Spill +.LBB83_2: + testb %al, %al + movl 56(%esp), %ecx # 4-byte Reload + jne .LBB83_4 +# BB#3: + movl 72(%esp), %ecx # 4-byte Reload +.LBB83_4: + movl 120(%esp), %eax + movl %ecx, (%eax) + movl 68(%esp), %ecx # 4-byte Reload + movl %ecx, 4(%eax) + jne .LBB83_6 +# BB#5: + movl 92(%esp), %ebp # 4-byte Reload +.LBB83_6: + movl %ebp, 8(%eax) + movl 60(%esp), %ecx # 4-byte Reload + jne .LBB83_8 +# BB#7: + movl %ebx, %ecx +.LBB83_8: + movl %ecx, 12(%eax) + jne .LBB83_10 +# BB#9: + movl 96(%esp), %edi # 4-byte Reload +.LBB83_10: + movl %edi, 16(%eax) + jne .LBB83_12 +# BB#11: + movl %esi, %edx +.LBB83_12: + movl %edx, 20(%eax) + addl $100, %esp + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end83: + .size mcl_fp_mont6Lbmi2, .Lfunc_end83-mcl_fp_mont6Lbmi2 + + .globl mcl_fp_montNF6Lbmi2 + .align 16, 0x90 + .type mcl_fp_montNF6Lbmi2,@function +mcl_fp_montNF6Lbmi2: # @mcl_fp_montNF6Lbmi2 +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $84, %esp + movl 108(%esp), %ebx + movl (%ebx), %edi + movl %edi, 52(%esp) # 4-byte Spill + movl 4(%ebx), %edx + movl %edx, 48(%esp) # 4-byte Spill + movl 112(%esp), %eax + movl (%eax), %eax + mulxl %eax, %ecx, %esi + movl %edi, %edx + mulxl %eax, %edx, %ebp + movl %edx, 76(%esp) # 4-byte Spill + addl %ecx, %ebp + movl 8(%ebx), %edx + movl %edx, 44(%esp) # 4-byte Spill + mulxl %eax, %ecx, %edi + adcl %esi, %ecx + movl %ecx, %esi + movl 12(%ebx), %edx + movl %edx, 40(%esp) # 4-byte Spill + mulxl %eax, %ecx, %edx + movl %edx, 80(%esp) # 4-byte Spill + adcl %edi, %ecx + movl %ecx, 64(%esp) # 4-byte Spill + movl 16(%ebx), %edx + movl %edx, 36(%esp) # 4-byte Spill + mulxl %eax, %ecx, %edi + adcl 80(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 60(%esp) # 4-byte Spill + movl 20(%ebx), %edx + movl %edx, 32(%esp) # 4-byte Spill + mulxl %eax, %ecx, %eax + adcl %edi, %ecx + movl %ecx, 20(%esp) # 4-byte Spill + adcl $0, %eax + movl %eax, 28(%esp) # 4-byte Spill + movl 116(%esp), %ebx + movl -4(%ebx), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 76(%esp), %edi # 4-byte Reload + movl %edi, %edx + imull %eax, %edx + movl (%ebx), %eax + movl %eax, 80(%esp) # 4-byte Spill + mulxl %eax, %ecx, %eax + movl %eax, 16(%esp) # 4-byte Spill + addl %edi, %ecx + movl 4(%ebx), %eax + movl %eax, 76(%esp) # 4-byte Spill + mulxl %eax, %edi, %ecx + movl %ecx, 12(%esp) # 4-byte Spill + adcl %ebp, %edi + movl 8(%ebx), %eax + movl %eax, 72(%esp) # 4-byte Spill + mulxl %eax, %eax, %ecx + movl %ecx, 8(%esp) # 4-byte Spill + adcl %esi, %eax + movl %eax, 24(%esp) # 4-byte Spill + movl 12(%ebx), %eax + movl %eax, 68(%esp) # 4-byte Spill + mulxl %eax, %esi, %eax + movl %eax, 4(%esp) # 4-byte Spill + adcl 64(%esp), %esi # 4-byte Folded Reload + movl 16(%ebx), %eax + movl %eax, 64(%esp) # 4-byte Spill + mulxl %eax, %ecx, %eax + movl %eax, (%esp) # 4-byte Spill + adcl 60(%esp), %ecx # 4-byte Folded Reload + movl 20(%ebx), %eax + movl %eax, 60(%esp) # 4-byte Spill + mulxl %eax, %ebp, %eax + adcl 20(%esp), %ebp # 4-byte Folded Reload + movl 28(%esp), %edx # 4-byte Reload + adcl $0, %edx + addl 16(%esp), %edi # 4-byte Folded Reload + movl %edi, 20(%esp) # 4-byte Spill + movl 12(%esp), %edi # 4-byte Reload + adcl %edi, 24(%esp) # 4-byte Folded Spill + adcl 8(%esp), %esi # 4-byte Folded Reload + movl %esi, 16(%esp) # 4-byte Spill + adcl 4(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 12(%esp) # 4-byte Spill + adcl (%esp), %ebp # 4-byte Folded Reload + movl %ebp, 8(%esp) # 4-byte Spill + adcl %eax, %edx + movl %edx, 28(%esp) # 4-byte Spill + movl 112(%esp), %eax + movl 4(%eax), %edx + mulxl 48(%esp), %ecx, %esi # 4-byte Folded Reload + mulxl 52(%esp), %ebp, %eax # 4-byte Folded Reload + addl %ecx, %eax + movl %eax, 4(%esp) # 4-byte Spill + mulxl 44(%esp), %ecx, %edi # 4-byte Folded Reload + adcl %esi, %ecx + movl %ecx, %esi + mulxl 40(%esp), %eax, %ebx # 4-byte Folded Reload + adcl %edi, %eax + mulxl 36(%esp), %ecx, %edi # 4-byte Folded Reload + movl %edi, (%esp) # 4-byte Spill + adcl %ebx, %ecx + movl %ecx, %edi + mulxl 32(%esp), %ebx, %ecx # 4-byte Folded Reload + adcl (%esp), %ebx # 4-byte Folded Reload + adcl $0, %ecx + movl %ecx, %edx + addl 20(%esp), %ebp # 4-byte Folded Reload + movl 4(%esp), %ecx # 4-byte Reload + adcl 24(%esp), %ecx # 4-byte Folded Reload + adcl 16(%esp), %esi # 4-byte Folded Reload + movl %esi, 4(%esp) # 4-byte Spill + adcl 12(%esp), %eax # 4-byte Folded Reload + adcl 8(%esp), %edi # 4-byte Folded Reload + movl %edi, 20(%esp) # 4-byte Spill + adcl 28(%esp), %ebx # 4-byte Folded Reload + adcl $0, %edx + movl %edx, 28(%esp) # 4-byte Spill + movl %ebp, %esi + movl %esi, %edx + imull 56(%esp), %edx # 4-byte Folded Reload + mulxl 80(%esp), %ebp, %edi # 4-byte Folded Reload + movl %edi, 24(%esp) # 4-byte Spill + addl %esi, %ebp + mulxl 76(%esp), %ebp, %esi # 4-byte Folded Reload + movl %esi, 16(%esp) # 4-byte Spill + adcl %ecx, %ebp + movl %ebp, %esi + mulxl 72(%esp), %ebp, %ecx # 4-byte Folded Reload + movl %ecx, 12(%esp) # 4-byte Spill + adcl 4(%esp), %ebp # 4-byte Folded Reload + movl %ebp, %ecx + mulxl 68(%esp), %ebp, %edi # 4-byte Folded Reload + movl %edi, 8(%esp) # 4-byte Spill + adcl %eax, %ebp + movl %ebp, %eax + mulxl 64(%esp), %ebp, %edi # 4-byte Folded Reload + movl %edi, 4(%esp) # 4-byte Spill + adcl 20(%esp), %ebp # 4-byte Folded Reload + mulxl 60(%esp), %edi, %edx # 4-byte Folded Reload + adcl %ebx, %edi + movl %edi, %ebx + movl 28(%esp), %edi # 4-byte Reload + adcl $0, %edi + addl 24(%esp), %esi # 4-byte Folded Reload + movl %esi, 20(%esp) # 4-byte Spill + adcl 16(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 16(%esp) # 4-byte Spill + adcl 12(%esp), %eax # 4-byte Folded Reload + movl %eax, 12(%esp) # 4-byte Spill + adcl 8(%esp), %ebp # 4-byte Folded Reload + movl %ebp, 8(%esp) # 4-byte Spill + adcl 4(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 4(%esp) # 4-byte Spill + adcl %edx, %edi + movl %edi, 28(%esp) # 4-byte Spill + movl 112(%esp), %eax + movl 8(%eax), %edx + mulxl 48(%esp), %eax, %ecx # 4-byte Folded Reload + mulxl 52(%esp), %esi, %edi # 4-byte Folded Reload + movl %esi, (%esp) # 4-byte Spill + addl %eax, %edi + mulxl 44(%esp), %eax, %esi # 4-byte Folded Reload + adcl %ecx, %eax + movl %eax, %ecx + mulxl 40(%esp), %eax, %ebx # 4-byte Folded Reload + adcl %esi, %eax + movl %eax, 24(%esp) # 4-byte Spill + mulxl 36(%esp), %eax, %ebp # 4-byte Folded Reload + adcl %ebx, %eax + movl %eax, %esi + mulxl 32(%esp), %ebx, %eax # 4-byte Folded Reload + adcl %ebp, %ebx + adcl $0, %eax + movl %eax, %edx + movl (%esp), %ebp # 4-byte Reload + addl 20(%esp), %ebp # 4-byte Folded Reload + adcl 16(%esp), %edi # 4-byte Folded Reload + adcl 12(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 20(%esp) # 4-byte Spill + movl 24(%esp), %eax # 4-byte Reload + adcl 8(%esp), %eax # 4-byte Folded Reload + movl %eax, 24(%esp) # 4-byte Spill + adcl 4(%esp), %esi # 4-byte Folded Reload + movl %esi, (%esp) # 4-byte Spill + adcl 28(%esp), %ebx # 4-byte Folded Reload + adcl $0, %edx + movl %edx, 28(%esp) # 4-byte Spill + movl %ebp, %edx + movl %ebp, %eax + imull 56(%esp), %edx # 4-byte Folded Reload + mulxl 80(%esp), %ebp, %ecx # 4-byte Folded Reload + movl %ecx, 16(%esp) # 4-byte Spill + addl %eax, %ebp + mulxl 76(%esp), %ebp, %ecx # 4-byte Folded Reload + movl %ecx, 12(%esp) # 4-byte Spill + adcl %edi, %ebp + movl %ebp, %edi + mulxl 72(%esp), %ebp, %ecx # 4-byte Folded Reload + movl %ecx, 8(%esp) # 4-byte Spill + adcl 20(%esp), %ebp # 4-byte Folded Reload + movl %ebp, %ecx + mulxl 68(%esp), %ebp, %eax # 4-byte Folded Reload + movl %eax, 4(%esp) # 4-byte Spill + adcl 24(%esp), %ebp # 4-byte Folded Reload + movl %ebp, %eax + mulxl 64(%esp), %ebp, %esi # 4-byte Folded Reload + movl %esi, 24(%esp) # 4-byte Spill + adcl (%esp), %ebp # 4-byte Folded Reload + movl %ebp, 20(%esp) # 4-byte Spill + mulxl 60(%esp), %ebp, %edx # 4-byte Folded Reload + adcl %ebx, %ebp + movl 28(%esp), %esi # 4-byte Reload + adcl $0, %esi + addl 16(%esp), %edi # 4-byte Folded Reload + movl %edi, 16(%esp) # 4-byte Spill + adcl 12(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 12(%esp) # 4-byte Spill + adcl 8(%esp), %eax # 4-byte Folded Reload + movl %eax, 8(%esp) # 4-byte Spill + movl 4(%esp), %eax # 4-byte Reload + adcl %eax, 20(%esp) # 4-byte Folded Spill + adcl 24(%esp), %ebp # 4-byte Folded Reload + movl %ebp, 4(%esp) # 4-byte Spill + adcl %edx, %esi + movl %esi, 28(%esp) # 4-byte Spill + movl 112(%esp), %eax + movl 12(%eax), %edx + mulxl 48(%esp), %eax, %ecx # 4-byte Folded Reload + mulxl 52(%esp), %ebp, %esi # 4-byte Folded Reload + addl %eax, %esi + mulxl 44(%esp), %eax, %edi # 4-byte Folded Reload + adcl %ecx, %eax + mulxl 40(%esp), %ecx, %ebx # 4-byte Folded Reload + adcl %edi, %ecx + movl %ecx, 24(%esp) # 4-byte Spill + mulxl 36(%esp), %ecx, %edi # 4-byte Folded Reload + movl %edi, (%esp) # 4-byte Spill + adcl %ebx, %ecx + movl %ecx, %edi + mulxl 32(%esp), %ebx, %ecx # 4-byte Folded Reload + adcl (%esp), %ebx # 4-byte Folded Reload + adcl $0, %ecx + addl 16(%esp), %ebp # 4-byte Folded Reload + adcl 12(%esp), %esi # 4-byte Folded Reload + adcl 8(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) # 4-byte Spill + movl 24(%esp), %eax # 4-byte Reload + adcl 20(%esp), %eax # 4-byte Folded Reload + movl %eax, 24(%esp) # 4-byte Spill + adcl 4(%esp), %edi # 4-byte Folded Reload + movl %edi, 20(%esp) # 4-byte Spill + adcl 28(%esp), %ebx # 4-byte Folded Reload + adcl $0, %ecx + movl %ecx, 28(%esp) # 4-byte Spill + movl %ebp, %ecx + movl %ecx, %edx + imull 56(%esp), %edx # 4-byte Folded Reload + mulxl 80(%esp), %ebp, %eax # 4-byte Folded Reload + movl %eax, 16(%esp) # 4-byte Spill + addl %ecx, %ebp + mulxl 76(%esp), %ecx, %eax # 4-byte Folded Reload + movl %eax, 12(%esp) # 4-byte Spill + adcl %esi, %ecx + mulxl 72(%esp), %ebp, %eax # 4-byte Folded Reload + movl %eax, 8(%esp) # 4-byte Spill + adcl (%esp), %ebp # 4-byte Folded Reload + mulxl 68(%esp), %esi, %eax # 4-byte Folded Reload + movl %eax, 4(%esp) # 4-byte Spill + adcl 24(%esp), %esi # 4-byte Folded Reload + movl %esi, %eax + mulxl 64(%esp), %esi, %edi # 4-byte Folded Reload + movl %edi, 24(%esp) # 4-byte Spill + adcl 20(%esp), %esi # 4-byte Folded Reload + movl %esi, 20(%esp) # 4-byte Spill + mulxl 60(%esp), %esi, %edx # 4-byte Folded Reload + adcl %ebx, %esi + movl 28(%esp), %edi # 4-byte Reload + adcl $0, %edi + addl 16(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 16(%esp) # 4-byte Spill + adcl 12(%esp), %ebp # 4-byte Folded Reload + movl %ebp, 12(%esp) # 4-byte Spill + adcl 8(%esp), %eax # 4-byte Folded Reload + movl %eax, 8(%esp) # 4-byte Spill + movl 4(%esp), %eax # 4-byte Reload + adcl %eax, 20(%esp) # 4-byte Folded Spill + adcl 24(%esp), %esi # 4-byte Folded Reload + movl %esi, 4(%esp) # 4-byte Spill + adcl %edx, %edi + movl %edi, 28(%esp) # 4-byte Spill + movl 112(%esp), %eax + movl 16(%eax), %edx + mulxl 48(%esp), %eax, %ecx # 4-byte Folded Reload + mulxl 52(%esp), %esi, %edi # 4-byte Folded Reload + movl %esi, 24(%esp) # 4-byte Spill + addl %eax, %edi + mulxl 44(%esp), %eax, %esi # 4-byte Folded Reload + adcl %ecx, %eax + mulxl 40(%esp), %ecx, %ebx # 4-byte Folded Reload + adcl %esi, %ecx + mulxl 36(%esp), %esi, %ebp # 4-byte Folded Reload + adcl %ebx, %esi + mulxl 32(%esp), %ebx, %edx # 4-byte Folded Reload + adcl %ebp, %ebx + adcl $0, %edx + movl 24(%esp), %ebp # 4-byte Reload + addl 16(%esp), %ebp # 4-byte Folded Reload + movl %ebp, 24(%esp) # 4-byte Spill + adcl 12(%esp), %edi # 4-byte Folded Reload + adcl 8(%esp), %eax # 4-byte Folded Reload + movl %eax, 8(%esp) # 4-byte Spill + adcl 20(%esp), %ecx # 4-byte Folded Reload + adcl 4(%esp), %esi # 4-byte Folded Reload + adcl 28(%esp), %ebx # 4-byte Folded Reload + adcl $0, %edx + movl %edx, 28(%esp) # 4-byte Spill + movl %ebp, %edx + imull 56(%esp), %edx # 4-byte Folded Reload + mulxl 80(%esp), %ebp, %eax # 4-byte Folded Reload + movl %eax, 20(%esp) # 4-byte Spill + addl 24(%esp), %ebp # 4-byte Folded Reload + mulxl 76(%esp), %ebp, %eax # 4-byte Folded Reload + movl %eax, 16(%esp) # 4-byte Spill + adcl %edi, %ebp + mulxl 72(%esp), %edi, %eax # 4-byte Folded Reload + movl %eax, 12(%esp) # 4-byte Spill + adcl 8(%esp), %edi # 4-byte Folded Reload + movl %edi, 24(%esp) # 4-byte Spill + mulxl 68(%esp), %eax, %edi # 4-byte Folded Reload + movl %edi, 8(%esp) # 4-byte Spill + adcl %ecx, %eax + mulxl 64(%esp), %ecx, %edi # 4-byte Folded Reload + movl %edi, 4(%esp) # 4-byte Spill + adcl %esi, %ecx + movl %ecx, %edi + mulxl 60(%esp), %ecx, %edx # 4-byte Folded Reload + adcl %ebx, %ecx + movl 28(%esp), %esi # 4-byte Reload + adcl $0, %esi + addl 20(%esp), %ebp # 4-byte Folded Reload + movl %ebp, 20(%esp) # 4-byte Spill + movl 16(%esp), %ebx # 4-byte Reload + adcl %ebx, 24(%esp) # 4-byte Folded Spill + adcl 12(%esp), %eax # 4-byte Folded Reload + movl %eax, 16(%esp) # 4-byte Spill + adcl 8(%esp), %edi # 4-byte Folded Reload + movl %edi, 12(%esp) # 4-byte Spill + adcl 4(%esp), %ecx # 4-byte Folded Reload + adcl %edx, %esi + movl %esi, 28(%esp) # 4-byte Spill + movl 112(%esp), %eax + movl 20(%eax), %edx + mulxl 48(%esp), %ebx, %eax # 4-byte Folded Reload + mulxl 52(%esp), %edi, %esi # 4-byte Folded Reload + movl %edi, 52(%esp) # 4-byte Spill + addl %ebx, %esi + mulxl 44(%esp), %ebx, %ebp # 4-byte Folded Reload + adcl %eax, %ebx + mulxl 40(%esp), %eax, %edi # 4-byte Folded Reload + movl %edi, 48(%esp) # 4-byte Spill + adcl %ebp, %eax + movl %eax, %ebp + mulxl 36(%esp), %eax, %edi # 4-byte Folded Reload + movl %edi, 44(%esp) # 4-byte Spill + adcl 48(%esp), %eax # 4-byte Folded Reload + movl %eax, 48(%esp) # 4-byte Spill + mulxl 32(%esp), %edx, %eax # 4-byte Folded Reload + adcl 44(%esp), %edx # 4-byte Folded Reload + adcl $0, %eax + movl 52(%esp), %edi # 4-byte Reload + addl 20(%esp), %edi # 4-byte Folded Reload + movl %edi, 52(%esp) # 4-byte Spill + adcl 24(%esp), %esi # 4-byte Folded Reload + adcl 16(%esp), %ebx # 4-byte Folded Reload + adcl 12(%esp), %ebp # 4-byte Folded Reload + movl %ebp, 40(%esp) # 4-byte Spill + adcl %ecx, 48(%esp) # 4-byte Folded Spill + adcl 28(%esp), %edx # 4-byte Folded Reload + movl %edx, 44(%esp) # 4-byte Spill + adcl $0, %eax + movl 56(%esp), %edx # 4-byte Reload + movl 52(%esp), %ebp # 4-byte Reload + imull %ebp, %edx + mulxl 80(%esp), %ecx, %edi # 4-byte Folded Reload + movl %edi, 56(%esp) # 4-byte Spill + addl %ebp, %ecx + mulxl 76(%esp), %ebp, %ecx # 4-byte Folded Reload + movl %ecx, 52(%esp) # 4-byte Spill + adcl %esi, %ebp + mulxl 72(%esp), %ecx, %esi # 4-byte Folded Reload + movl %esi, 36(%esp) # 4-byte Spill + adcl %ebx, %ecx + movl %edx, %ebx + mulxl 68(%esp), %esi, %edx # 4-byte Folded Reload + movl %edx, 32(%esp) # 4-byte Spill + adcl 40(%esp), %esi # 4-byte Folded Reload + movl %ebx, %edx + mulxl 64(%esp), %edi, %edx # 4-byte Folded Reload + movl %edx, 40(%esp) # 4-byte Spill + adcl 48(%esp), %edi # 4-byte Folded Reload + movl %ebx, %edx + mulxl 60(%esp), %ebx, %edx # 4-byte Folded Reload + adcl 44(%esp), %ebx # 4-byte Folded Reload + adcl $0, %eax + addl 56(%esp), %ebp # 4-byte Folded Reload + movl %ebp, 56(%esp) # 4-byte Spill + adcl 52(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 44(%esp) # 4-byte Spill + adcl 36(%esp), %esi # 4-byte Folded Reload + movl %esi, 48(%esp) # 4-byte Spill + adcl 32(%esp), %edi # 4-byte Folded Reload + movl %edi, 52(%esp) # 4-byte Spill + adcl 40(%esp), %ebx # 4-byte Folded Reload + adcl %edx, %eax + movl %ebp, %edx + subl 80(%esp), %edx # 4-byte Folded Reload + sbbl 76(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 40(%esp) # 4-byte Spill + movl %esi, %ebp + movl %ebx, %esi + sbbl 72(%esp), %ebp # 4-byte Folded Reload + movl %ebp, 76(%esp) # 4-byte Spill + sbbl 68(%esp), %edi # 4-byte Folded Reload + movl %edi, 80(%esp) # 4-byte Spill + movl %esi, %ebx + sbbl 64(%esp), %ebx # 4-byte Folded Reload + movl %eax, %edi + sbbl 60(%esp), %edi # 4-byte Folded Reload + movl %edi, %ebp + sarl $31, %ebp + testl %ebp, %ebp + js .LBB84_2 +# BB#1: + movl %edx, 56(%esp) # 4-byte Spill +.LBB84_2: + movl 104(%esp), %ebp + movl 56(%esp), %ecx # 4-byte Reload + movl %ecx, (%ebp) + movl 44(%esp), %ecx # 4-byte Reload + js .LBB84_4 +# BB#3: + movl 40(%esp), %ecx # 4-byte Reload +.LBB84_4: + movl %ecx, 4(%ebp) + movl 52(%esp), %ecx # 4-byte Reload + movl 48(%esp), %edx # 4-byte Reload + js .LBB84_6 +# BB#5: + movl 76(%esp), %edx # 4-byte Reload +.LBB84_6: + movl %edx, 8(%ebp) + js .LBB84_8 +# BB#7: + movl 80(%esp), %ecx # 4-byte Reload +.LBB84_8: + movl %ecx, 12(%ebp) + js .LBB84_10 +# BB#9: + movl %ebx, %esi +.LBB84_10: + movl %esi, 16(%ebp) + js .LBB84_12 +# BB#11: + movl %edi, %eax +.LBB84_12: + movl %eax, 20(%ebp) + addl $84, %esp + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end84: + .size mcl_fp_montNF6Lbmi2, .Lfunc_end84-mcl_fp_montNF6Lbmi2 + + .globl mcl_fp_montRed6Lbmi2 + .align 16, 0x90 + .type mcl_fp_montRed6Lbmi2,@function +mcl_fp_montRed6Lbmi2: # @mcl_fp_montRed6Lbmi2 +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $88, %esp + movl 116(%esp), %edi + movl -4(%edi), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl (%edi), %esi + movl %esi, 68(%esp) # 4-byte Spill + movl 112(%esp), %ecx + movl (%ecx), %edx + movl %edx, 84(%esp) # 4-byte Spill + imull %eax, %edx + movl 20(%edi), %eax + movl %eax, 52(%esp) # 4-byte Spill + mulxl %eax, %ebx, %eax + movl %ebx, 56(%esp) # 4-byte Spill + movl %eax, 40(%esp) # 4-byte Spill + movl 16(%edi), %eax + movl %eax, 80(%esp) # 4-byte Spill + mulxl %eax, %ecx, %eax + movl %ecx, 44(%esp) # 4-byte Spill + movl %eax, 36(%esp) # 4-byte Spill + movl 12(%edi), %eax + movl %eax, 76(%esp) # 4-byte Spill + mulxl %eax, %ecx, %eax + movl %ecx, 28(%esp) # 4-byte Spill + movl %eax, 32(%esp) # 4-byte Spill + movl 4(%edi), %eax + movl %eax, 72(%esp) # 4-byte Spill + mulxl %eax, %ebx, %eax + movl %eax, 24(%esp) # 4-byte Spill + mulxl %esi, %ecx, %eax + movl %ecx, 48(%esp) # 4-byte Spill + addl %ebx, %eax + movl %eax, %ebp + movl 8(%edi), %esi + movl %esi, 64(%esp) # 4-byte Spill + mulxl %esi, %eax, %edx + adcl 24(%esp), %eax # 4-byte Folded Reload + adcl 28(%esp), %edx # 4-byte Folded Reload + movl %edx, %ebx + movl 32(%esp), %edi # 4-byte Reload + adcl 44(%esp), %edi # 4-byte Folded Reload + movl 36(%esp), %esi # 4-byte Reload + adcl 56(%esp), %esi # 4-byte Folded Reload + movl 40(%esp), %edx # 4-byte Reload + adcl $0, %edx + movl 48(%esp), %ecx # 4-byte Reload + addl 84(%esp), %ecx # 4-byte Folded Reload + movl 112(%esp), %ecx + adcl 4(%ecx), %ebp + adcl 8(%ecx), %eax + movl %eax, 16(%esp) # 4-byte Spill + adcl 12(%ecx), %ebx + movl %ebx, 20(%esp) # 4-byte Spill + adcl 16(%ecx), %edi + movl %edi, 32(%esp) # 4-byte Spill + adcl 20(%ecx), %esi + movl %esi, 36(%esp) # 4-byte Spill + adcl 24(%ecx), %edx + movl %edx, 40(%esp) # 4-byte Spill + movl 44(%ecx), %edx + movl 40(%ecx), %esi + movl 36(%ecx), %edi + movl 32(%ecx), %ebx + movl 28(%ecx), %eax + adcl $0, %eax + movl %eax, 12(%esp) # 4-byte Spill + adcl $0, %ebx + movl %ebx, 28(%esp) # 4-byte Spill + adcl $0, %edi + movl %edi, 44(%esp) # 4-byte Spill + adcl $0, %esi + movl %esi, 48(%esp) # 4-byte Spill + adcl $0, %edx + movl %edx, 56(%esp) # 4-byte Spill + sbbl %ecx, %ecx + andl $1, %ecx + movl %ecx, 84(%esp) # 4-byte Spill + movl %ebp, %ebx + movl %ebx, %edx + imull 60(%esp), %edx # 4-byte Folded Reload + mulxl 76(%esp), %ecx, %eax # 4-byte Folded Reload + movl %ecx, 24(%esp) # 4-byte Spill + movl %eax, 4(%esp) # 4-byte Spill + mulxl 72(%esp), %ecx, %eax # 4-byte Folded Reload + mulxl 68(%esp), %esi, %ebp # 4-byte Folded Reload + movl %esi, 8(%esp) # 4-byte Spill + addl %ecx, %ebp + mulxl 64(%esp), %edi, %ecx # 4-byte Folded Reload + adcl %eax, %edi + adcl 24(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 24(%esp) # 4-byte Spill + mulxl 80(%esp), %eax, %ecx # 4-byte Folded Reload + movl %ecx, (%esp) # 4-byte Spill + adcl 4(%esp), %eax # 4-byte Folded Reload + movl %eax, %ecx + movl 52(%esp), %esi # 4-byte Reload + mulxl %esi, %edx, %eax + adcl (%esp), %edx # 4-byte Folded Reload + adcl $0, %eax + addl %ebx, 8(%esp) # 4-byte Folded Spill + adcl 16(%esp), %ebp # 4-byte Folded Reload + adcl 20(%esp), %edi # 4-byte Folded Reload + movl %edi, 20(%esp) # 4-byte Spill + movl 32(%esp), %edi # 4-byte Reload + adcl %edi, 24(%esp) # 4-byte Folded Spill + adcl 36(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 36(%esp) # 4-byte Spill + adcl 40(%esp), %edx # 4-byte Folded Reload + movl %edx, 16(%esp) # 4-byte Spill + adcl 12(%esp), %eax # 4-byte Folded Reload + movl %eax, 32(%esp) # 4-byte Spill + adcl $0, 28(%esp) # 4-byte Folded Spill + adcl $0, 44(%esp) # 4-byte Folded Spill + adcl $0, 48(%esp) # 4-byte Folded Spill + adcl $0, 56(%esp) # 4-byte Folded Spill + adcl $0, 84(%esp) # 4-byte Folded Spill + movl %ebp, %edx + imull 60(%esp), %edx # 4-byte Folded Reload + mulxl %esi, %ecx, %eax + movl %ecx, 12(%esp) # 4-byte Spill + movl %eax, 40(%esp) # 4-byte Spill + mulxl 64(%esp), %esi, %eax # 4-byte Folded Reload + movl %eax, 4(%esp) # 4-byte Spill + mulxl 72(%esp), %edi, %eax # 4-byte Folded Reload + mulxl 68(%esp), %ecx, %ebx # 4-byte Folded Reload + movl %ecx, 8(%esp) # 4-byte Spill + addl %edi, %ebx + adcl %esi, %eax + movl %eax, %esi + mulxl 76(%esp), %ecx, %eax # 4-byte Folded Reload + adcl 4(%esp), %ecx # 4-byte Folded Reload + mulxl 80(%esp), %edi, %edx # 4-byte Folded Reload + adcl %eax, %edi + movl %edi, %eax + adcl 12(%esp), %edx # 4-byte Folded Reload + movl %edx, %edi + movl 40(%esp), %edx # 4-byte Reload + adcl $0, %edx + addl %ebp, 8(%esp) # 4-byte Folded Spill + adcl 20(%esp), %ebx # 4-byte Folded Reload + adcl 24(%esp), %esi # 4-byte Folded Reload + movl %esi, 24(%esp) # 4-byte Spill + adcl 36(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 20(%esp) # 4-byte Spill + adcl 16(%esp), %eax # 4-byte Folded Reload + movl %eax, 12(%esp) # 4-byte Spill + adcl 32(%esp), %edi # 4-byte Folded Reload + movl %edi, 16(%esp) # 4-byte Spill + adcl 28(%esp), %edx # 4-byte Folded Reload + movl %edx, 40(%esp) # 4-byte Spill + adcl $0, 44(%esp) # 4-byte Folded Spill + adcl $0, 48(%esp) # 4-byte Folded Spill + adcl $0, 56(%esp) # 4-byte Folded Spill + adcl $0, 84(%esp) # 4-byte Folded Spill + movl %ebx, %edx + imull 60(%esp), %edx # 4-byte Folded Reload + mulxl 52(%esp), %ecx, %eax # 4-byte Folded Reload + movl %ecx, 28(%esp) # 4-byte Spill + movl %eax, 36(%esp) # 4-byte Spill + mulxl 64(%esp), %esi, %eax # 4-byte Folded Reload + movl %eax, 8(%esp) # 4-byte Spill + mulxl 72(%esp), %ecx, %eax # 4-byte Folded Reload + mulxl 68(%esp), %edi, %ebp # 4-byte Folded Reload + addl %ecx, %ebp + adcl %esi, %eax + movl %eax, 32(%esp) # 4-byte Spill + mulxl 76(%esp), %eax, %ecx # 4-byte Folded Reload + movl %ecx, 4(%esp) # 4-byte Spill + adcl 8(%esp), %eax # 4-byte Folded Reload + movl %eax, %esi + mulxl 80(%esp), %ecx, %eax # 4-byte Folded Reload + adcl 4(%esp), %ecx # 4-byte Folded Reload + adcl 28(%esp), %eax # 4-byte Folded Reload + movl 36(%esp), %edx # 4-byte Reload + adcl $0, %edx + addl %ebx, %edi + adcl 24(%esp), %ebp # 4-byte Folded Reload + movl 20(%esp), %edi # 4-byte Reload + adcl %edi, 32(%esp) # 4-byte Folded Spill + adcl 12(%esp), %esi # 4-byte Folded Reload + movl %esi, 28(%esp) # 4-byte Spill + adcl 16(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 20(%esp) # 4-byte Spill + adcl 40(%esp), %eax # 4-byte Folded Reload + movl %eax, 24(%esp) # 4-byte Spill + adcl 44(%esp), %edx # 4-byte Folded Reload + movl %edx, 36(%esp) # 4-byte Spill + adcl $0, 48(%esp) # 4-byte Folded Spill + adcl $0, 56(%esp) # 4-byte Folded Spill + adcl $0, 84(%esp) # 4-byte Folded Spill + movl %ebp, %edx + imull 60(%esp), %edx # 4-byte Folded Reload + mulxl 52(%esp), %ecx, %eax # 4-byte Folded Reload + movl %ecx, 16(%esp) # 4-byte Spill + movl %eax, 44(%esp) # 4-byte Spill + mulxl 64(%esp), %edi, %esi # 4-byte Folded Reload + mulxl 72(%esp), %ecx, %eax # 4-byte Folded Reload + movl %ecx, 40(%esp) # 4-byte Spill + mulxl 68(%esp), %ebx, %ecx # 4-byte Folded Reload + addl 40(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 40(%esp) # 4-byte Spill + adcl %edi, %eax + movl %eax, %edi + mulxl 76(%esp), %eax, %ecx # 4-byte Folded Reload + adcl %esi, %eax + movl %eax, %esi + mulxl 80(%esp), %edx, %eax # 4-byte Folded Reload + adcl %ecx, %edx + movl %edx, %ecx + adcl 16(%esp), %eax # 4-byte Folded Reload + movl 44(%esp), %edx # 4-byte Reload + adcl $0, %edx + addl %ebp, %ebx + movl 40(%esp), %ebx # 4-byte Reload + adcl 32(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 40(%esp) # 4-byte Spill + adcl 28(%esp), %edi # 4-byte Folded Reload + movl %edi, 32(%esp) # 4-byte Spill + adcl 20(%esp), %esi # 4-byte Folded Reload + movl %esi, 28(%esp) # 4-byte Spill + adcl 24(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 20(%esp) # 4-byte Spill + adcl 36(%esp), %eax # 4-byte Folded Reload + movl %eax, 24(%esp) # 4-byte Spill + adcl 48(%esp), %edx # 4-byte Folded Reload + movl %edx, 44(%esp) # 4-byte Spill + adcl $0, 56(%esp) # 4-byte Folded Spill + adcl $0, 84(%esp) # 4-byte Folded Spill + movl 60(%esp), %edx # 4-byte Reload + imull %ebx, %edx + mulxl 68(%esp), %eax, %ecx # 4-byte Folded Reload + movl %eax, 60(%esp) # 4-byte Spill + mulxl 72(%esp), %eax, %edi # 4-byte Folded Reload + addl %ecx, %eax + mulxl 64(%esp), %ebp, %ecx # 4-byte Folded Reload + adcl %edi, %ebp + movl %edx, %edi + mulxl 76(%esp), %esi, %edx # 4-byte Folded Reload + movl %edx, 48(%esp) # 4-byte Spill + adcl %ecx, %esi + movl %edi, %edx + mulxl 80(%esp), %ebx, %ecx # 4-byte Folded Reload + movl %ecx, 36(%esp) # 4-byte Spill + adcl 48(%esp), %ebx # 4-byte Folded Reload + movl %edi, %edx + mulxl 52(%esp), %ecx, %edi # 4-byte Folded Reload + adcl 36(%esp), %ecx # 4-byte Folded Reload + adcl $0, %edi + movl 60(%esp), %edx # 4-byte Reload + addl 40(%esp), %edx # 4-byte Folded Reload + adcl 32(%esp), %eax # 4-byte Folded Reload + movl %eax, 36(%esp) # 4-byte Spill + adcl 28(%esp), %ebp # 4-byte Folded Reload + movl %ebp, 60(%esp) # 4-byte Spill + adcl 20(%esp), %esi # 4-byte Folded Reload + adcl 24(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 40(%esp) # 4-byte Spill + adcl 44(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 48(%esp) # 4-byte Spill + adcl 56(%esp), %edi # 4-byte Folded Reload + movl %edi, 56(%esp) # 4-byte Spill + movl 84(%esp), %edx # 4-byte Reload + adcl $0, %edx + subl 68(%esp), %eax # 4-byte Folded Reload + movl %eax, 68(%esp) # 4-byte Spill + movl %ebp, %eax + sbbl 72(%esp), %eax # 4-byte Folded Reload + movl %esi, %ebp + sbbl 64(%esp), %ebp # 4-byte Folded Reload + sbbl 76(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 76(%esp) # 4-byte Spill + movl %ecx, %ebx + sbbl 80(%esp), %ebx # 4-byte Folded Reload + sbbl 52(%esp), %edi # 4-byte Folded Reload + sbbl $0, %edx + andl $1, %edx + movl %edx, 84(%esp) # 4-byte Spill + jne .LBB85_2 +# BB#1: + movl %eax, 60(%esp) # 4-byte Spill +.LBB85_2: + movl 84(%esp), %eax # 4-byte Reload + testb %al, %al + movl 36(%esp), %ecx # 4-byte Reload + jne .LBB85_4 +# BB#3: + movl 68(%esp), %ecx # 4-byte Reload +.LBB85_4: + movl 108(%esp), %eax + movl %ecx, (%eax) + movl 60(%esp), %ecx # 4-byte Reload + movl %ecx, 4(%eax) + movl 40(%esp), %ecx # 4-byte Reload + jne .LBB85_6 +# BB#5: + movl %ebp, %esi +.LBB85_6: + movl %esi, 8(%eax) + jne .LBB85_8 +# BB#7: + movl 76(%esp), %ecx # 4-byte Reload +.LBB85_8: + movl %ecx, 12(%eax) + movl 48(%esp), %ecx # 4-byte Reload + jne .LBB85_10 +# BB#9: + movl %ebx, %ecx +.LBB85_10: + movl %ecx, 16(%eax) + movl 56(%esp), %ecx # 4-byte Reload + jne .LBB85_12 +# BB#11: + movl %edi, %ecx +.LBB85_12: + movl %ecx, 20(%eax) + addl $88, %esp + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end85: + .size mcl_fp_montRed6Lbmi2, .Lfunc_end85-mcl_fp_montRed6Lbmi2 + + .globl mcl_fp_addPre6Lbmi2 + .align 16, 0x90 + .type mcl_fp_addPre6Lbmi2,@function +mcl_fp_addPre6Lbmi2: # @mcl_fp_addPre6Lbmi2 +# BB#0: + pushl %esi + movl 16(%esp), %eax + movl (%eax), %ecx + movl 12(%esp), %edx + addl (%edx), %ecx + movl 8(%esp), %esi + movl %ecx, (%esi) + movl 4(%eax), %ecx + adcl 4(%edx), %ecx + movl %ecx, 4(%esi) + movl 8(%eax), %ecx + adcl 8(%edx), %ecx + movl %ecx, 8(%esi) + movl 12(%edx), %ecx + adcl 12(%eax), %ecx + movl %ecx, 12(%esi) + movl 16(%edx), %ecx + adcl 16(%eax), %ecx + movl %ecx, 16(%esi) + movl 20(%eax), %eax + movl 20(%edx), %ecx + adcl %eax, %ecx + movl %ecx, 20(%esi) + sbbl %eax, %eax + andl $1, %eax + popl %esi + retl +.Lfunc_end86: + .size mcl_fp_addPre6Lbmi2, .Lfunc_end86-mcl_fp_addPre6Lbmi2 + + .globl mcl_fp_subPre6Lbmi2 + .align 16, 0x90 + .type mcl_fp_subPre6Lbmi2,@function +mcl_fp_subPre6Lbmi2: # @mcl_fp_subPre6Lbmi2 +# BB#0: + pushl %edi + pushl %esi + movl 16(%esp), %ecx + movl (%ecx), %edx + xorl %eax, %eax + movl 20(%esp), %esi + subl (%esi), %edx + movl 12(%esp), %edi + movl %edx, (%edi) + movl 4(%ecx), %edx + sbbl 4(%esi), %edx + movl %edx, 4(%edi) + movl 8(%ecx), %edx + sbbl 8(%esi), %edx + movl %edx, 8(%edi) + movl 12(%ecx), %edx + sbbl 12(%esi), %edx + movl %edx, 12(%edi) + movl 16(%ecx), %edx + sbbl 16(%esi), %edx + movl %edx, 16(%edi) + movl 20(%esi), %edx + movl 20(%ecx), %ecx + sbbl %edx, %ecx + movl %ecx, 20(%edi) + sbbl $0, %eax + andl $1, %eax + popl %esi + popl %edi + retl +.Lfunc_end87: + .size mcl_fp_subPre6Lbmi2, .Lfunc_end87-mcl_fp_subPre6Lbmi2 + + .globl mcl_fp_shr1_6Lbmi2 + .align 16, 0x90 + .type mcl_fp_shr1_6Lbmi2,@function +mcl_fp_shr1_6Lbmi2: # @mcl_fp_shr1_6Lbmi2 +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + movl 24(%esp), %eax + movl 20(%eax), %ecx + movl 16(%eax), %edx + movl 12(%eax), %esi + movl 8(%eax), %edi + movl (%eax), %ebx + movl 4(%eax), %eax + shrdl $1, %eax, %ebx + movl 20(%esp), %ebp + movl %ebx, (%ebp) + shrdl $1, %edi, %eax + movl %eax, 4(%ebp) + shrdl $1, %esi, %edi + movl %edi, 8(%ebp) + shrdl $1, %edx, %esi + movl %esi, 12(%ebp) + shrdl $1, %ecx, %edx + movl %edx, 16(%ebp) + shrl %ecx + movl %ecx, 20(%ebp) + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end88: + .size mcl_fp_shr1_6Lbmi2, .Lfunc_end88-mcl_fp_shr1_6Lbmi2 + + .globl mcl_fp_add6Lbmi2 + .align 16, 0x90 + .type mcl_fp_add6Lbmi2,@function +mcl_fp_add6Lbmi2: # @mcl_fp_add6Lbmi2 +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $12, %esp + movl 40(%esp), %eax + movl (%eax), %edx + movl 4(%eax), %ebp + movl 36(%esp), %ebx + addl (%ebx), %edx + adcl 4(%ebx), %ebp + movl 8(%eax), %ecx + adcl 8(%ebx), %ecx + movl %ecx, 8(%esp) # 4-byte Spill + movl %ecx, %esi + movl 12(%ebx), %ecx + movl 16(%ebx), %edi + adcl 12(%eax), %ecx + adcl 16(%eax), %edi + movl 20(%ebx), %ebx + adcl 20(%eax), %ebx + movl 32(%esp), %eax + movl %edx, (%eax) + movl %ebp, 4(%eax) + movl %esi, 8(%eax) + movl %ecx, 12(%eax) + movl %edi, 16(%eax) + movl %ebx, 20(%eax) + sbbl %eax, %eax + andl $1, %eax + movl 44(%esp), %esi + subl (%esi), %edx + movl %edx, (%esp) # 4-byte Spill + movl 8(%esp), %edx # 4-byte Reload + movl 44(%esp), %esi + sbbl 4(%esi), %ebp + movl %ebp, 4(%esp) # 4-byte Spill + movl %ecx, %ebp + sbbl 8(%esi), %edx + sbbl 12(%esi), %ebp + sbbl 16(%esi), %edi + sbbl 20(%esi), %ebx + sbbl $0, %eax + testb $1, %al + jne .LBB89_2 +# BB#1: # %nocarry + movl (%esp), %eax # 4-byte Reload + movl 32(%esp), %ecx + movl %eax, (%ecx) + movl 4(%esp), %eax # 4-byte Reload + movl %eax, 4(%ecx) + movl %edx, 8(%ecx) + movl %ebp, 12(%ecx) + movl %edi, 16(%ecx) + movl %ebx, 20(%ecx) +.LBB89_2: # %carry + addl $12, %esp + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end89: + .size mcl_fp_add6Lbmi2, .Lfunc_end89-mcl_fp_add6Lbmi2 + + .globl mcl_fp_addNF6Lbmi2 + .align 16, 0x90 + .type mcl_fp_addNF6Lbmi2,@function +mcl_fp_addNF6Lbmi2: # @mcl_fp_addNF6Lbmi2 +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $40, %esp + movl 68(%esp), %eax + movl (%eax), %edx + movl 4(%eax), %ecx + movl 64(%esp), %ebp + addl (%ebp), %edx + movl %edx, 16(%esp) # 4-byte Spill + movl %edx, %ebx + adcl 4(%ebp), %ecx + movl %ecx, 20(%esp) # 4-byte Spill + movl 20(%eax), %edx + movl 16(%eax), %esi + movl 12(%eax), %edi + movl 8(%eax), %eax + adcl 8(%ebp), %eax + movl %eax, 24(%esp) # 4-byte Spill + adcl 12(%ebp), %edi + movl %edi, 28(%esp) # 4-byte Spill + adcl 16(%ebp), %esi + movl %esi, 32(%esp) # 4-byte Spill + adcl 20(%ebp), %edx + movl %edx, 36(%esp) # 4-byte Spill + movl %ebx, %ebp + movl 72(%esp), %ebx + subl (%ebx), %ebp + movl %ebp, (%esp) # 4-byte Spill + movl %ecx, %ebp + movl 72(%esp), %ecx + sbbl 4(%ecx), %ebp + movl %ebp, 4(%esp) # 4-byte Spill + sbbl 8(%ecx), %eax + movl %eax, 8(%esp) # 4-byte Spill + movl 16(%esp), %eax # 4-byte Reload + sbbl 12(%ecx), %edi + movl %edi, 12(%esp) # 4-byte Spill + movl %esi, %edi + sbbl 16(%ecx), %edi + movl %edx, %esi + sbbl 20(%ecx), %esi + movl %esi, %ebx + sarl $31, %ebx + testl %ebx, %ebx + js .LBB90_2 +# BB#1: + movl (%esp), %eax # 4-byte Reload +.LBB90_2: + movl 60(%esp), %ebx + movl %eax, (%ebx) + movl 20(%esp), %ecx # 4-byte Reload + js .LBB90_4 +# BB#3: + movl 4(%esp), %ecx # 4-byte Reload +.LBB90_4: + movl %ecx, 4(%ebx) + movl 36(%esp), %eax # 4-byte Reload + movl 28(%esp), %edx # 4-byte Reload + movl 24(%esp), %ecx # 4-byte Reload + js .LBB90_6 +# BB#5: + movl 8(%esp), %ecx # 4-byte Reload +.LBB90_6: + movl %ecx, 8(%ebx) + movl 32(%esp), %ecx # 4-byte Reload + js .LBB90_8 +# BB#7: + movl 12(%esp), %edx # 4-byte Reload +.LBB90_8: + movl %edx, 12(%ebx) + js .LBB90_10 +# BB#9: + movl %edi, %ecx +.LBB90_10: + movl %ecx, 16(%ebx) + js .LBB90_12 +# BB#11: + movl %esi, %eax +.LBB90_12: + movl %eax, 20(%ebx) + addl $40, %esp + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end90: + .size mcl_fp_addNF6Lbmi2, .Lfunc_end90-mcl_fp_addNF6Lbmi2 + + .globl mcl_fp_sub6Lbmi2 + .align 16, 0x90 + .type mcl_fp_sub6Lbmi2,@function +mcl_fp_sub6Lbmi2: # @mcl_fp_sub6Lbmi2 +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $16, %esp + movl 40(%esp), %ebx + movl (%ebx), %esi + movl 4(%ebx), %edi + movl 44(%esp), %ecx + subl (%ecx), %esi + sbbl 4(%ecx), %edi + movl %edi, (%esp) # 4-byte Spill + movl 8(%ebx), %eax + sbbl 8(%ecx), %eax + movl %eax, 12(%esp) # 4-byte Spill + movl 12(%ebx), %eax + sbbl 12(%ecx), %eax + movl %eax, 4(%esp) # 4-byte Spill + movl 16(%ebx), %ebp + sbbl 16(%ecx), %ebp + movl %ebp, 8(%esp) # 4-byte Spill + movl 20(%ebx), %edx + sbbl 20(%ecx), %edx + movl $0, %ecx + sbbl $0, %ecx + testb $1, %cl + movl 36(%esp), %ebx + movl %esi, (%ebx) + movl %edi, 4(%ebx) + movl 12(%esp), %edi # 4-byte Reload + movl %edi, 8(%ebx) + movl %eax, 12(%ebx) + movl %ebp, 16(%ebx) + movl %edx, 20(%ebx) + je .LBB91_2 +# BB#1: # %carry + movl 48(%esp), %ecx + addl (%ecx), %esi + movl %esi, (%ebx) + movl (%esp), %eax # 4-byte Reload + adcl 4(%ecx), %eax + adcl 8(%ecx), %edi + movl %eax, 4(%ebx) + movl 12(%ecx), %eax + adcl 4(%esp), %eax # 4-byte Folded Reload + movl %edi, 8(%ebx) + movl %eax, 12(%ebx) + movl 16(%ecx), %eax + adcl 8(%esp), %eax # 4-byte Folded Reload + movl %eax, 16(%ebx) + movl 20(%ecx), %eax + adcl %edx, %eax + movl %eax, 20(%ebx) +.LBB91_2: # %nocarry + addl $16, %esp + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end91: + .size mcl_fp_sub6Lbmi2, .Lfunc_end91-mcl_fp_sub6Lbmi2 + + .globl mcl_fp_subNF6Lbmi2 + .align 16, 0x90 + .type mcl_fp_subNF6Lbmi2,@function +mcl_fp_subNF6Lbmi2: # @mcl_fp_subNF6Lbmi2 +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $24, %esp + movl 48(%esp), %ebx + movl 20(%ebx), %esi + movl (%ebx), %ecx + movl 4(%ebx), %eax + movl 52(%esp), %ebp + subl (%ebp), %ecx + movl %ecx, 8(%esp) # 4-byte Spill + sbbl 4(%ebp), %eax + movl %eax, 16(%esp) # 4-byte Spill + movl 16(%ebx), %eax + movl 12(%ebx), %ecx + movl 8(%ebx), %edx + sbbl 8(%ebp), %edx + movl %edx, 4(%esp) # 4-byte Spill + sbbl 12(%ebp), %ecx + movl %ecx, 12(%esp) # 4-byte Spill + sbbl 16(%ebp), %eax + movl %eax, 20(%esp) # 4-byte Spill + movl %esi, %edx + sbbl 20(%ebp), %edx + movl %edx, (%esp) # 4-byte Spill + movl %edx, %ebp + sarl $31, %ebp + movl %ebp, %ecx + addl %ecx, %ecx + movl %ebp, %eax + adcl %eax, %eax + shrl $31, %edx + orl %ecx, %edx + movl 56(%esp), %ebx + andl 4(%ebx), %eax + andl (%ebx), %edx + movl 20(%ebx), %edi + andl %ebp, %edi + movl 16(%ebx), %esi + andl %ebp, %esi + movl 12(%ebx), %ecx + andl %ebp, %ecx + andl 8(%ebx), %ebp + addl 8(%esp), %edx # 4-byte Folded Reload + adcl 16(%esp), %eax # 4-byte Folded Reload + movl 44(%esp), %ebx + movl %edx, (%ebx) + adcl 4(%esp), %ebp # 4-byte Folded Reload + movl %eax, 4(%ebx) + adcl 12(%esp), %ecx # 4-byte Folded Reload + movl %ebp, 8(%ebx) + adcl 20(%esp), %esi # 4-byte Folded Reload + movl %ecx, 12(%ebx) + movl %esi, 16(%ebx) + adcl (%esp), %edi # 4-byte Folded Reload + movl %edi, 20(%ebx) + addl $24, %esp + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end92: + .size mcl_fp_subNF6Lbmi2, .Lfunc_end92-mcl_fp_subNF6Lbmi2 + + .globl mcl_fpDbl_add6Lbmi2 + .align 16, 0x90 + .type mcl_fpDbl_add6Lbmi2,@function +mcl_fpDbl_add6Lbmi2: # @mcl_fpDbl_add6Lbmi2 +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $36, %esp + movl 64(%esp), %edx + movl 60(%esp), %ecx + movl 12(%ecx), %esi + movl 16(%ecx), %eax + movl 8(%edx), %edi + movl (%edx), %ebx + addl (%ecx), %ebx + movl 56(%esp), %ebp + movl %ebx, (%ebp) + movl 4(%edx), %ebx + adcl 4(%ecx), %ebx + adcl 8(%ecx), %edi + adcl 12(%edx), %esi + adcl 16(%edx), %eax + movl %ebx, 4(%ebp) + movl %edx, %ebx + movl 32(%ebx), %edx + movl %edx, 32(%esp) # 4-byte Spill + movl %edi, 8(%ebp) + movl 20(%ebx), %edi + movl %esi, 12(%ebp) + movl 20(%ecx), %esi + adcl %edi, %esi + movl 24(%ebx), %edi + movl %eax, 16(%ebp) + movl 24(%ecx), %edx + adcl %edi, %edx + movl %edx, 20(%esp) # 4-byte Spill + movl 28(%ebx), %edi + movl %esi, 20(%ebp) + movl 28(%ecx), %eax + adcl %edi, %eax + movl %eax, 16(%esp) # 4-byte Spill + movl 32(%ecx), %ebp + adcl 32(%esp), %ebp # 4-byte Folded Reload + movl %ebp, 24(%esp) # 4-byte Spill + movl 36(%ebx), %esi + movl %ebx, %edi + movl 36(%ecx), %ebx + adcl %esi, %ebx + movl %ebx, 28(%esp) # 4-byte Spill + movl 40(%edi), %esi + movl 40(%ecx), %edi + adcl %esi, %edi + movl %edi, 32(%esp) # 4-byte Spill + movl 64(%esp), %esi + movl 44(%esi), %esi + movl 44(%ecx), %ecx + adcl %esi, %ecx + movl %ecx, 12(%esp) # 4-byte Spill + sbbl %ecx, %ecx + andl $1, %ecx + movl 68(%esp), %esi + subl (%esi), %edx + movl %edx, 4(%esp) # 4-byte Spill + movl 68(%esp), %edx + sbbl 4(%edx), %eax + movl %eax, (%esp) # 4-byte Spill + sbbl 8(%edx), %ebp + movl %ebp, 8(%esp) # 4-byte Spill + movl %ebx, %ebp + sbbl 12(%edx), %ebp + movl %edi, %ebx + movl 12(%esp), %edi # 4-byte Reload + sbbl 16(%edx), %ebx + movl %edi, %eax + sbbl 20(%edx), %eax + sbbl $0, %ecx + andl $1, %ecx + jne .LBB93_2 +# BB#1: + movl %eax, %edi +.LBB93_2: + testb %cl, %cl + movl 20(%esp), %ecx # 4-byte Reload + movl 16(%esp), %edx # 4-byte Reload + jne .LBB93_4 +# BB#3: + movl (%esp), %edx # 4-byte Reload + movl 4(%esp), %ecx # 4-byte Reload +.LBB93_4: + movl 56(%esp), %eax + movl %ecx, 24(%eax) + movl %edx, 28(%eax) + movl 32(%esp), %ecx # 4-byte Reload + movl 24(%esp), %edx # 4-byte Reload + jne .LBB93_6 +# BB#5: + movl 8(%esp), %edx # 4-byte Reload +.LBB93_6: + movl %edx, 32(%eax) + movl 28(%esp), %edx # 4-byte Reload + jne .LBB93_8 +# BB#7: + movl %ebp, %edx +.LBB93_8: + movl %edx, 36(%eax) + jne .LBB93_10 +# BB#9: + movl %ebx, %ecx +.LBB93_10: + movl %ecx, 40(%eax) + movl %edi, 44(%eax) + addl $36, %esp + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end93: + .size mcl_fpDbl_add6Lbmi2, .Lfunc_end93-mcl_fpDbl_add6Lbmi2 + + .globl mcl_fpDbl_sub6Lbmi2 + .align 16, 0x90 + .type mcl_fpDbl_sub6Lbmi2,@function +mcl_fpDbl_sub6Lbmi2: # @mcl_fpDbl_sub6Lbmi2 +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $24, %esp + movl 48(%esp), %edx + movl (%edx), %eax + movl 4(%edx), %edi + movl 52(%esp), %esi + subl (%esi), %eax + sbbl 4(%esi), %edi + movl 8(%edx), %ebx + sbbl 8(%esi), %ebx + movl 44(%esp), %ecx + movl %eax, (%ecx) + movl 12(%edx), %eax + sbbl 12(%esi), %eax + movl %edi, 4(%ecx) + movl 16(%edx), %edi + sbbl 16(%esi), %edi + movl %ebx, 8(%ecx) + movl 20(%esi), %ebx + movl %eax, 12(%ecx) + movl 20(%edx), %eax + sbbl %ebx, %eax + movl 24(%esi), %ebx + movl %edi, 16(%ecx) + movl 24(%edx), %edi + sbbl %ebx, %edi + movl %edi, 8(%esp) # 4-byte Spill + movl 28(%esi), %edi + movl %eax, 20(%ecx) + movl 28(%edx), %eax + sbbl %edi, %eax + movl %eax, (%esp) # 4-byte Spill + movl 32(%esi), %edi + movl 32(%edx), %eax + sbbl %edi, %eax + movl %eax, 4(%esp) # 4-byte Spill + movl 36(%esi), %edi + movl 36(%edx), %eax + sbbl %edi, %eax + movl %eax, 12(%esp) # 4-byte Spill + movl 40(%esi), %edi + movl 40(%edx), %eax + sbbl %edi, %eax + movl %eax, 16(%esp) # 4-byte Spill + movl 44(%esi), %esi + movl 44(%edx), %eax + sbbl %esi, %eax + movl %eax, 20(%esp) # 4-byte Spill + movl $0, %ebx + sbbl $0, %ebx + andl $1, %ebx + movl 56(%esp), %eax + jne .LBB94_1 +# BB#2: + xorl %edx, %edx + jmp .LBB94_3 +.LBB94_1: + movl 20(%eax), %edx +.LBB94_3: + testb %bl, %bl + jne .LBB94_4 +# BB#5: + movl $0, %esi + movl $0, %edi + jmp .LBB94_6 +.LBB94_4: + movl (%eax), %edi + movl 4(%eax), %esi +.LBB94_6: + jne .LBB94_7 +# BB#8: + movl $0, %ebx + jmp .LBB94_9 +.LBB94_7: + movl 16(%eax), %ebx +.LBB94_9: + jne .LBB94_10 +# BB#11: + movl $0, %ebp + jmp .LBB94_12 +.LBB94_10: + movl 12(%eax), %ebp +.LBB94_12: + jne .LBB94_13 +# BB#14: + xorl %eax, %eax + jmp .LBB94_15 +.LBB94_13: + movl 8(%eax), %eax +.LBB94_15: + addl 8(%esp), %edi # 4-byte Folded Reload + adcl (%esp), %esi # 4-byte Folded Reload + movl %edi, 24(%ecx) + adcl 4(%esp), %eax # 4-byte Folded Reload + movl %esi, 28(%ecx) + adcl 12(%esp), %ebp # 4-byte Folded Reload + movl %eax, 32(%ecx) + adcl 16(%esp), %ebx # 4-byte Folded Reload + movl %ebp, 36(%ecx) + movl %ebx, 40(%ecx) + adcl 20(%esp), %edx # 4-byte Folded Reload + movl %edx, 44(%ecx) + addl $24, %esp + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end94: + .size mcl_fpDbl_sub6Lbmi2, .Lfunc_end94-mcl_fpDbl_sub6Lbmi2 + + .globl mcl_fp_mulUnitPre7Lbmi2 + .align 16, 0x90 + .type mcl_fp_mulUnitPre7Lbmi2,@function +mcl_fp_mulUnitPre7Lbmi2: # @mcl_fp_mulUnitPre7Lbmi2 +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $16, %esp + movl 44(%esp), %edx + movl 40(%esp), %edi + mulxl 4(%edi), %ecx, %esi + mulxl (%edi), %ebx, %eax + movl %ebx, 12(%esp) # 4-byte Spill + addl %ecx, %eax + movl %eax, 8(%esp) # 4-byte Spill + mulxl 8(%edi), %ecx, %eax + adcl %esi, %ecx + movl %ecx, 4(%esp) # 4-byte Spill + mulxl 12(%edi), %ebx, %ecx + adcl %eax, %ebx + mulxl 16(%edi), %esi, %ebp + adcl %ecx, %esi + mulxl 20(%edi), %ecx, %eax + movl %eax, (%esp) # 4-byte Spill + adcl %ebp, %ecx + mulxl 24(%edi), %edx, %edi + movl 36(%esp), %eax + movl 12(%esp), %ebp # 4-byte Reload + movl %ebp, (%eax) + movl 8(%esp), %ebp # 4-byte Reload + movl %ebp, 4(%eax) + movl 4(%esp), %ebp # 4-byte Reload + movl %ebp, 8(%eax) + movl %ebx, 12(%eax) + movl %esi, 16(%eax) + movl %ecx, 20(%eax) + adcl (%esp), %edx # 4-byte Folded Reload + movl %edx, 24(%eax) + adcl $0, %edi + movl %edi, 28(%eax) + addl $16, %esp + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end95: + .size mcl_fp_mulUnitPre7Lbmi2, .Lfunc_end95-mcl_fp_mulUnitPre7Lbmi2 + + .globl mcl_fpDbl_mulPre7Lbmi2 + .align 16, 0x90 + .type mcl_fpDbl_mulPre7Lbmi2,@function +mcl_fpDbl_mulPre7Lbmi2: # @mcl_fpDbl_mulPre7Lbmi2 +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $72, %esp + movl 96(%esp), %eax + movl (%eax), %esi + movl %esi, 68(%esp) # 4-byte Spill + movl 4(%eax), %edx + movl %edx, 64(%esp) # 4-byte Spill + movl %eax, %edi + movl 100(%esp), %eax + movl (%eax), %ebp + mulxl %ebp, %ecx, %eax + movl %esi, %edx + mulxl %ebp, %edx, %esi + movl %edx, 40(%esp) # 4-byte Spill + addl %ecx, %esi + movl 8(%edi), %edx + movl %edx, 60(%esp) # 4-byte Spill + movl %edi, %ebx + mulxl %ebp, %edi, %ecx + adcl %eax, %edi + movl 12(%ebx), %edx + movl %edx, 56(%esp) # 4-byte Spill + movl %ebx, %eax + mulxl %ebp, %ebx, %edx + movl %edx, 48(%esp) # 4-byte Spill + adcl %ecx, %ebx + movl 16(%eax), %edx + movl %edx, 52(%esp) # 4-byte Spill + mulxl %ebp, %ecx, %edx + movl %edx, 44(%esp) # 4-byte Spill + adcl 48(%esp), %ecx # 4-byte Folded Reload + movl 20(%eax), %edx + movl %edx, 48(%esp) # 4-byte Spill + mulxl %ebp, %edx, %eax + movl %eax, 32(%esp) # 4-byte Spill + adcl 44(%esp), %edx # 4-byte Folded Reload + movl %edx, 36(%esp) # 4-byte Spill + movl 96(%esp), %eax + movl 24(%eax), %edx + movl %edx, 44(%esp) # 4-byte Spill + mulxl %ebp, %eax, %edx + adcl 32(%esp), %eax # 4-byte Folded Reload + movl %eax, 24(%esp) # 4-byte Spill + movl 92(%esp), %eax + movl 40(%esp), %ebp # 4-byte Reload + movl %ebp, (%eax) + adcl $0, %edx + movl %edx, 28(%esp) # 4-byte Spill + movl 100(%esp), %eax + movl 4(%eax), %eax + movl 68(%esp), %edx # 4-byte Reload + mulxl %eax, %edx, %ebp + movl %ebp, 40(%esp) # 4-byte Spill + addl %esi, %edx + movl %edx, 68(%esp) # 4-byte Spill + movl 64(%esp), %edx # 4-byte Reload + mulxl %eax, %esi, %edx + movl %edx, 32(%esp) # 4-byte Spill + adcl %edi, %esi + movl 60(%esp), %edx # 4-byte Reload + mulxl %eax, %edi, %edx + movl %edx, 60(%esp) # 4-byte Spill + adcl %ebx, %edi + movl 56(%esp), %edx # 4-byte Reload + mulxl %eax, %ebx, %edx + movl %edx, 56(%esp) # 4-byte Spill + adcl %ecx, %ebx + movl 52(%esp), %edx # 4-byte Reload + mulxl %eax, %ecx, %edx + movl %edx, 52(%esp) # 4-byte Spill + adcl 36(%esp), %ecx # 4-byte Folded Reload + movl %ecx, %ebp + movl 48(%esp), %edx # 4-byte Reload + mulxl %eax, %ecx, %edx + movl %edx, 48(%esp) # 4-byte Spill + adcl 24(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 64(%esp) # 4-byte Spill + movl 44(%esp), %edx # 4-byte Reload + mulxl %eax, %ecx, %edx + adcl 28(%esp), %ecx # 4-byte Folded Reload + sbbl %eax, %eax + andl $1, %eax + addl 40(%esp), %esi # 4-byte Folded Reload + adcl 32(%esp), %edi # 4-byte Folded Reload + adcl 60(%esp), %ebx # 4-byte Folded Reload + adcl 56(%esp), %ebp # 4-byte Folded Reload + movl %ebp, 60(%esp) # 4-byte Spill + movl 52(%esp), %ebp # 4-byte Reload + adcl %ebp, 64(%esp) # 4-byte Folded Spill + adcl 48(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 28(%esp) # 4-byte Spill + adcl %edx, %eax + movl %eax, 24(%esp) # 4-byte Spill + movl 92(%esp), %eax + movl 68(%esp), %edx # 4-byte Reload + movl %edx, 4(%eax) + movl 96(%esp), %ecx + movl (%ecx), %edx + movl %edx, 56(%esp) # 4-byte Spill + movl 100(%esp), %eax + movl 8(%eax), %eax + mulxl %eax, %edx, %ebp + movl %ebp, 20(%esp) # 4-byte Spill + addl %esi, %edx + movl %edx, 32(%esp) # 4-byte Spill + movl 4(%ecx), %edx + movl %edx, 52(%esp) # 4-byte Spill + mulxl %eax, %edx, %esi + movl %esi, 16(%esp) # 4-byte Spill + adcl %edi, %edx + movl %edx, 68(%esp) # 4-byte Spill + movl 8(%ecx), %edx + movl %edx, 48(%esp) # 4-byte Spill + mulxl %eax, %esi, %edx + movl %edx, 12(%esp) # 4-byte Spill + adcl %ebx, %esi + movl %esi, %edi + movl 12(%ecx), %edx + movl %edx, 44(%esp) # 4-byte Spill + mulxl %eax, %edx, %esi + movl %esi, 8(%esp) # 4-byte Spill + adcl 60(%esp), %edx # 4-byte Folded Reload + movl %edx, 60(%esp) # 4-byte Spill + movl 16(%ecx), %edx + movl %edx, 40(%esp) # 4-byte Spill + mulxl %eax, %ebx, %edx + movl %edx, 4(%esp) # 4-byte Spill + adcl 64(%esp), %ebx # 4-byte Folded Reload + movl 20(%ecx), %edx + movl %edx, 36(%esp) # 4-byte Spill + mulxl %eax, %ebp, %edx + movl %edx, 64(%esp) # 4-byte Spill + adcl 28(%esp), %ebp # 4-byte Folded Reload + movl 24(%ecx), %edx + movl %edx, 28(%esp) # 4-byte Spill + mulxl %eax, %ecx, %edx + adcl 24(%esp), %ecx # 4-byte Folded Reload + sbbl %esi, %esi + andl $1, %esi + movl 20(%esp), %eax # 4-byte Reload + addl %eax, 68(%esp) # 4-byte Folded Spill + adcl 16(%esp), %edi # 4-byte Folded Reload + movl %edi, 24(%esp) # 4-byte Spill + movl 12(%esp), %eax # 4-byte Reload + adcl %eax, 60(%esp) # 4-byte Folded Spill + adcl 8(%esp), %ebx # 4-byte Folded Reload + adcl 4(%esp), %ebp # 4-byte Folded Reload + adcl 64(%esp), %ecx # 4-byte Folded Reload + adcl %edx, %esi + movl 92(%esp), %eax + movl 32(%esp), %edx # 4-byte Reload + movl %edx, 8(%eax) + movl 100(%esp), %eax + movl 12(%eax), %eax + movl 56(%esp), %edx # 4-byte Reload + mulxl %eax, %edx, %edi + movl %edi, 32(%esp) # 4-byte Spill + addl 68(%esp), %edx # 4-byte Folded Reload + movl %edx, 56(%esp) # 4-byte Spill + movl 52(%esp), %edx # 4-byte Reload + mulxl %eax, %edx, %edi + movl %edi, 52(%esp) # 4-byte Spill + adcl 24(%esp), %edx # 4-byte Folded Reload + movl %edx, 64(%esp) # 4-byte Spill + movl 48(%esp), %edx # 4-byte Reload + mulxl %eax, %edi, %edx + movl %edx, 48(%esp) # 4-byte Spill + adcl 60(%esp), %edi # 4-byte Folded Reload + movl %edi, 24(%esp) # 4-byte Spill + movl 44(%esp), %edx # 4-byte Reload + mulxl %eax, %edx, %edi + movl %edi, 60(%esp) # 4-byte Spill + adcl %ebx, %edx + movl %edx, 68(%esp) # 4-byte Spill + movl 40(%esp), %edx # 4-byte Reload + mulxl %eax, %ebx, %edx + movl %edx, 44(%esp) # 4-byte Spill + adcl %ebp, %ebx + movl 36(%esp), %edx # 4-byte Reload + mulxl %eax, %ebp, %edx + movl %edx, 40(%esp) # 4-byte Spill + adcl %ecx, %ebp + movl 28(%esp), %edx # 4-byte Reload + mulxl %eax, %ecx, %eax + adcl %esi, %ecx + movl %ecx, %edx + sbbl %ecx, %ecx + andl $1, %ecx + movl 32(%esp), %esi # 4-byte Reload + addl %esi, 64(%esp) # 4-byte Folded Spill + movl 24(%esp), %edi # 4-byte Reload + adcl 52(%esp), %edi # 4-byte Folded Reload + movl 48(%esp), %esi # 4-byte Reload + adcl %esi, 68(%esp) # 4-byte Folded Spill + adcl 60(%esp), %ebx # 4-byte Folded Reload + adcl 44(%esp), %ebp # 4-byte Folded Reload + adcl 40(%esp), %edx # 4-byte Folded Reload + movl %edx, 40(%esp) # 4-byte Spill + adcl %eax, %ecx + movl %ecx, 32(%esp) # 4-byte Spill + movl 92(%esp), %eax + movl 56(%esp), %ecx # 4-byte Reload + movl %ecx, 12(%eax) + movl 96(%esp), %ecx + movl (%ecx), %edx + movl %edx, 60(%esp) # 4-byte Spill + movl 100(%esp), %eax + movl 16(%eax), %esi + mulxl %esi, %eax, %edx + movl %edx, 28(%esp) # 4-byte Spill + addl 64(%esp), %eax # 4-byte Folded Reload + movl %eax, 36(%esp) # 4-byte Spill + movl 4(%ecx), %edx + movl %edx, 64(%esp) # 4-byte Spill + mulxl %esi, %eax, %edx + movl %edx, 24(%esp) # 4-byte Spill + adcl %edi, %eax + movl %eax, %edi + movl 8(%ecx), %edx + movl %edx, 56(%esp) # 4-byte Spill + mulxl %esi, %eax, %edx + movl %edx, 20(%esp) # 4-byte Spill + adcl 68(%esp), %eax # 4-byte Folded Reload + movl %eax, 12(%esp) # 4-byte Spill + movl 12(%ecx), %edx + movl %edx, 52(%esp) # 4-byte Spill + mulxl %esi, %eax, %edx + movl %edx, 16(%esp) # 4-byte Spill + adcl %ebx, %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 16(%ecx), %edx + movl %edx, 48(%esp) # 4-byte Spill + mulxl %esi, %ebx, %edx + movl %edx, 8(%esp) # 4-byte Spill + adcl %ebp, %ebx + movl 20(%ecx), %edx + movl %edx, 44(%esp) # 4-byte Spill + mulxl %esi, %edx, %eax + movl %eax, 4(%esp) # 4-byte Spill + adcl 40(%esp), %edx # 4-byte Folded Reload + movl %edx, %eax + movl 24(%ecx), %edx + movl %edx, 40(%esp) # 4-byte Spill + mulxl %esi, %ebp, %ecx + movl %ecx, (%esp) # 4-byte Spill + adcl 32(%esp), %ebp # 4-byte Folded Reload + sbbl %ecx, %ecx + andl $1, %ecx + movl %edi, %esi + addl 28(%esp), %esi # 4-byte Folded Reload + movl 12(%esp), %edi # 4-byte Reload + adcl 24(%esp), %edi # 4-byte Folded Reload + movl 20(%esp), %edx # 4-byte Reload + adcl %edx, 68(%esp) # 4-byte Folded Spill + adcl 16(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 24(%esp) # 4-byte Spill + adcl 8(%esp), %eax # 4-byte Folded Reload + movl %eax, 28(%esp) # 4-byte Spill + adcl 4(%esp), %ebp # 4-byte Folded Reload + adcl (%esp), %ecx # 4-byte Folded Reload + movl %ecx, 32(%esp) # 4-byte Spill + movl 92(%esp), %eax + movl 36(%esp), %edx # 4-byte Reload + movl %edx, 16(%eax) + movl 100(%esp), %eax + movl 20(%eax), %eax + movl 60(%esp), %edx # 4-byte Reload + mulxl %eax, %ecx, %edx + movl %edx, 36(%esp) # 4-byte Spill + addl %esi, %ecx + movl %ecx, 60(%esp) # 4-byte Spill + movl 64(%esp), %edx # 4-byte Reload + mulxl %eax, %esi, %ecx + movl %ecx, 64(%esp) # 4-byte Spill + adcl %edi, %esi + movl 56(%esp), %edx # 4-byte Reload + mulxl %eax, %ecx, %edx + movl %edx, 56(%esp) # 4-byte Spill + adcl 68(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 20(%esp) # 4-byte Spill + movl 52(%esp), %edx # 4-byte Reload + mulxl %eax, %ebx, %edx + movl %edx, 68(%esp) # 4-byte Spill + adcl 24(%esp), %ebx # 4-byte Folded Reload + movl 48(%esp), %edx # 4-byte Reload + mulxl %eax, %edx, %edi + movl %edi, 52(%esp) # 4-byte Spill + adcl 28(%esp), %edx # 4-byte Folded Reload + movl %edx, %edi + movl 44(%esp), %edx # 4-byte Reload + mulxl %eax, %edx, %ecx + movl %ecx, 48(%esp) # 4-byte Spill + adcl %ebp, %edx + movl %edx, %ebp + movl 40(%esp), %edx # 4-byte Reload + mulxl %eax, %edx, %eax + movl %eax, 44(%esp) # 4-byte Spill + adcl 32(%esp), %edx # 4-byte Folded Reload + movl %edx, %eax + sbbl %edx, %edx + andl $1, %edx + addl 36(%esp), %esi # 4-byte Folded Reload + movl 20(%esp), %ecx # 4-byte Reload + adcl 64(%esp), %ecx # 4-byte Folded Reload + adcl 56(%esp), %ebx # 4-byte Folded Reload + adcl 68(%esp), %edi # 4-byte Folded Reload + movl %edi, 32(%esp) # 4-byte Spill + adcl 52(%esp), %ebp # 4-byte Folded Reload + movl %ebp, 40(%esp) # 4-byte Spill + adcl 48(%esp), %eax # 4-byte Folded Reload + movl %eax, 48(%esp) # 4-byte Spill + adcl 44(%esp), %edx # 4-byte Folded Reload + movl %edx, 56(%esp) # 4-byte Spill + movl 92(%esp), %eax + movl 60(%esp), %edx # 4-byte Reload + movl %edx, 20(%eax) + movl 100(%esp), %eax + movl 24(%eax), %edx + movl 96(%esp), %eax + mulxl (%eax), %ebp, %edi + movl %edi, 60(%esp) # 4-byte Spill + addl %esi, %ebp + movl %ebp, 64(%esp) # 4-byte Spill + mulxl 4(%eax), %esi, %edi + movl %edi, 52(%esp) # 4-byte Spill + adcl %ecx, %esi + movl %esi, %ebp + mulxl 8(%eax), %ecx, %esi + movl %esi, 44(%esp) # 4-byte Spill + adcl %ebx, %ecx + movl %ecx, 68(%esp) # 4-byte Spill + mulxl 12(%eax), %ebx, %ecx + movl %ecx, 36(%esp) # 4-byte Spill + adcl 32(%esp), %ebx # 4-byte Folded Reload + mulxl 16(%eax), %edi, %ecx + movl %ecx, 32(%esp) # 4-byte Spill + adcl 40(%esp), %edi # 4-byte Folded Reload + mulxl 20(%eax), %esi, %ecx + movl %ecx, 40(%esp) # 4-byte Spill + adcl 48(%esp), %esi # 4-byte Folded Reload + mulxl 24(%eax), %edx, %eax + movl %eax, 48(%esp) # 4-byte Spill + adcl 56(%esp), %edx # 4-byte Folded Reload + sbbl %ecx, %ecx + andl $1, %ecx + addl 60(%esp), %ebp # 4-byte Folded Reload + movl %ebp, 60(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl %eax, 68(%esp) # 4-byte Folded Spill + adcl 44(%esp), %ebx # 4-byte Folded Reload + adcl 36(%esp), %edi # 4-byte Folded Reload + adcl 32(%esp), %esi # 4-byte Folded Reload + adcl 40(%esp), %edx # 4-byte Folded Reload + movl 92(%esp), %eax + movl 64(%esp), %ebp # 4-byte Reload + movl %ebp, 24(%eax) + movl 60(%esp), %ebp # 4-byte Reload + movl %ebp, 28(%eax) + movl 68(%esp), %ebp # 4-byte Reload + movl %ebp, 32(%eax) + movl %ebx, 36(%eax) + movl %edi, 40(%eax) + movl %esi, 44(%eax) + movl %edx, 48(%eax) + adcl 48(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 52(%eax) + addl $72, %esp + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end96: + .size mcl_fpDbl_mulPre7Lbmi2, .Lfunc_end96-mcl_fpDbl_mulPre7Lbmi2 + + .globl mcl_fpDbl_sqrPre7Lbmi2 + .align 16, 0x90 + .type mcl_fpDbl_sqrPre7Lbmi2,@function +mcl_fpDbl_sqrPre7Lbmi2: # @mcl_fpDbl_sqrPre7Lbmi2 +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $80, %esp + movl 104(%esp), %ecx + movl (%ecx), %ebx + movl 4(%ecx), %eax + movl %eax, %edx + mulxl %ebx, %esi, %edi + movl %esi, 56(%esp) # 4-byte Spill + movl %edi, 76(%esp) # 4-byte Spill + movl %ebx, %edx + mulxl %ebx, %ebp, %edx + movl %ebp, 44(%esp) # 4-byte Spill + addl %esi, %edx + movl %edx, 72(%esp) # 4-byte Spill + movl 8(%ecx), %edx + movl %edx, 68(%esp) # 4-byte Spill + mulxl %ebx, %edx, %esi + adcl %edi, %edx + movl %edx, 28(%esp) # 4-byte Spill + movl 12(%ecx), %edx + movl %edx, 64(%esp) # 4-byte Spill + mulxl %ebx, %edi, %edx + movl %edx, 52(%esp) # 4-byte Spill + adcl %esi, %edi + movl 16(%ecx), %edx + movl %edx, 60(%esp) # 4-byte Spill + mulxl %ebx, %esi, %edx + movl %edx, 48(%esp) # 4-byte Spill + adcl 52(%esp), %esi # 4-byte Folded Reload + movl 20(%ecx), %edx + movl %edx, 52(%esp) # 4-byte Spill + mulxl %ebx, %edx, %ebp + movl %ebp, 36(%esp) # 4-byte Spill + adcl 48(%esp), %edx # 4-byte Folded Reload + movl %edx, 40(%esp) # 4-byte Spill + movl 24(%ecx), %edx + movl %edx, 48(%esp) # 4-byte Spill + mulxl %ebx, %ecx, %ebx + adcl 36(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 32(%esp) # 4-byte Spill + movl 100(%esp), %ecx + movl 44(%esp), %edx # 4-byte Reload + movl %edx, (%ecx) + adcl $0, %ebx + movl %ebx, 36(%esp) # 4-byte Spill + movl 56(%esp), %edx # 4-byte Reload + addl %edx, 72(%esp) # 4-byte Folded Spill + movl %eax, %edx + mulxl %eax, %ebx, %edx + movl %edx, 56(%esp) # 4-byte Spill + adcl 28(%esp), %ebx # 4-byte Folded Reload + movl 68(%esp), %edx # 4-byte Reload + mulxl %eax, %ebp, %edx + movl %edx, 44(%esp) # 4-byte Spill + adcl %edi, %ebp + movl 64(%esp), %edx # 4-byte Reload + mulxl %eax, %edi, %edx + movl %edx, 64(%esp) # 4-byte Spill + adcl %esi, %edi + movl 60(%esp), %edx # 4-byte Reload + mulxl %eax, %edx, %esi + movl %esi, 60(%esp) # 4-byte Spill + adcl 40(%esp), %edx # 4-byte Folded Reload + movl %edx, %esi + movl 52(%esp), %edx # 4-byte Reload + mulxl %eax, %edx, %ecx + movl %ecx, 52(%esp) # 4-byte Spill + adcl 32(%esp), %edx # 4-byte Folded Reload + movl %edx, 68(%esp) # 4-byte Spill + movl 48(%esp), %edx # 4-byte Reload + mulxl %eax, %ecx, %eax + adcl 36(%esp), %ecx # 4-byte Folded Reload + movl %ecx, %edx + sbbl %ecx, %ecx + andl $1, %ecx + addl 76(%esp), %ebx # 4-byte Folded Reload + adcl 56(%esp), %ebp # 4-byte Folded Reload + adcl 44(%esp), %edi # 4-byte Folded Reload + adcl 64(%esp), %esi # 4-byte Folded Reload + movl %esi, 76(%esp) # 4-byte Spill + movl 60(%esp), %esi # 4-byte Reload + adcl %esi, 68(%esp) # 4-byte Folded Spill + adcl 52(%esp), %edx # 4-byte Folded Reload + movl %edx, 36(%esp) # 4-byte Spill + adcl %eax, %ecx + movl %ecx, 28(%esp) # 4-byte Spill + movl 100(%esp), %eax + movl 72(%esp), %ecx # 4-byte Reload + movl %ecx, 4(%eax) + movl 104(%esp), %esi + movl (%esi), %edx + movl %edx, 52(%esp) # 4-byte Spill + movl 8(%esi), %ecx + mulxl %ecx, %edx, %eax + movl %eax, 24(%esp) # 4-byte Spill + addl %ebx, %edx + movl %edx, 56(%esp) # 4-byte Spill + movl 4(%esi), %edx + movl %edx, 48(%esp) # 4-byte Spill + movl %esi, %ebx + mulxl %ecx, %eax, %edx + movl %edx, 20(%esp) # 4-byte Spill + adcl %ebp, %eax + movl %eax, 64(%esp) # 4-byte Spill + movl %ecx, %edx + mulxl %ecx, %eax, %edx + movl %edx, 16(%esp) # 4-byte Spill + adcl %edi, %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 12(%ebx), %eax + movl %eax, %edx + mulxl %ecx, %edi, %edx + movl %edi, 32(%esp) # 4-byte Spill + movl %edx, 72(%esp) # 4-byte Spill + movl 76(%esp), %edx # 4-byte Reload + adcl %edi, %edx + movl %edx, %esi + movl 16(%ebx), %edx + movl %edx, 44(%esp) # 4-byte Spill + mulxl %ecx, %edx, %edi + movl %edi, 76(%esp) # 4-byte Spill + adcl 68(%esp), %edx # 4-byte Folded Reload + movl %edx, %edi + movl 20(%ebx), %edx + movl %edx, 40(%esp) # 4-byte Spill + movl %ebx, %ebp + mulxl %ecx, %ebx, %edx + movl %edx, 68(%esp) # 4-byte Spill + adcl 36(%esp), %ebx # 4-byte Folded Reload + movl 24(%ebp), %edx + movl %edx, 36(%esp) # 4-byte Spill + mulxl %ecx, %ecx, %edx + movl %edx, 12(%esp) # 4-byte Spill + adcl 28(%esp), %ecx # 4-byte Folded Reload + sbbl %ebp, %ebp + andl $1, %ebp + movl 24(%esp), %edx # 4-byte Reload + addl %edx, 64(%esp) # 4-byte Folded Spill + movl 20(%esp), %edx # 4-byte Reload + adcl %edx, 60(%esp) # 4-byte Folded Spill + adcl 16(%esp), %esi # 4-byte Folded Reload + adcl 72(%esp), %edi # 4-byte Folded Reload + movl %edi, 28(%esp) # 4-byte Spill + adcl 76(%esp), %ebx # 4-byte Folded Reload + adcl 68(%esp), %ecx # 4-byte Folded Reload + adcl 12(%esp), %ebp # 4-byte Folded Reload + movl 52(%esp), %edx # 4-byte Reload + mulxl %eax, %edx, %edi + movl %edi, 52(%esp) # 4-byte Spill + addl 64(%esp), %edx # 4-byte Folded Reload + movl %edx, 64(%esp) # 4-byte Spill + movl 48(%esp), %edx # 4-byte Reload + mulxl %eax, %edx, %edi + movl %edi, 48(%esp) # 4-byte Spill + adcl 60(%esp), %edx # 4-byte Folded Reload + movl %edx, 68(%esp) # 4-byte Spill + adcl 32(%esp), %esi # 4-byte Folded Reload + movl %esi, 76(%esp) # 4-byte Spill + movl %eax, %edx + mulxl %eax, %edx, %esi + movl %esi, 60(%esp) # 4-byte Spill + adcl 28(%esp), %edx # 4-byte Folded Reload + movl %edx, 28(%esp) # 4-byte Spill + movl 44(%esp), %edx # 4-byte Reload + mulxl %eax, %edx, %esi + movl %esi, 44(%esp) # 4-byte Spill + adcl %ebx, %edx + movl %edx, 24(%esp) # 4-byte Spill + movl 40(%esp), %edx # 4-byte Reload + mulxl %eax, %edx, %esi + movl %esi, 32(%esp) # 4-byte Spill + adcl %ecx, %edx + movl %edx, %esi + movl 36(%esp), %edx # 4-byte Reload + mulxl %eax, %edi, %eax + adcl %ebp, %edi + movl %edi, %edx + sbbl %ecx, %ecx + andl $1, %ecx + movl 52(%esp), %edi # 4-byte Reload + addl %edi, 68(%esp) # 4-byte Folded Spill + movl 76(%esp), %edi # 4-byte Reload + adcl 48(%esp), %edi # 4-byte Folded Reload + movl 28(%esp), %ebx # 4-byte Reload + adcl 72(%esp), %ebx # 4-byte Folded Reload + movl 24(%esp), %ebp # 4-byte Reload + adcl 60(%esp), %ebp # 4-byte Folded Reload + adcl 44(%esp), %esi # 4-byte Folded Reload + movl %esi, 40(%esp) # 4-byte Spill + adcl 32(%esp), %edx # 4-byte Folded Reload + movl %edx, 76(%esp) # 4-byte Spill + adcl %eax, %ecx + movl %ecx, 28(%esp) # 4-byte Spill + movl 100(%esp), %eax + movl 56(%esp), %ecx # 4-byte Reload + movl %ecx, 8(%eax) + movl 64(%esp), %ecx # 4-byte Reload + movl %ecx, 12(%eax) + movl 104(%esp), %ecx + movl (%ecx), %edx + movl %edx, 56(%esp) # 4-byte Spill + movl 16(%ecx), %eax + mulxl %eax, %edx, %esi + movl %esi, 24(%esp) # 4-byte Spill + addl 68(%esp), %edx # 4-byte Folded Reload + movl %edx, 60(%esp) # 4-byte Spill + movl 4(%ecx), %edx + movl %edx, 52(%esp) # 4-byte Spill + mulxl %eax, %edx, %esi + movl %esi, 20(%esp) # 4-byte Spill + adcl %edi, %edx + movl %edx, 12(%esp) # 4-byte Spill + movl 8(%ecx), %edx + movl %edx, 48(%esp) # 4-byte Spill + mulxl %eax, %edx, %esi + movl %esi, 16(%esp) # 4-byte Spill + adcl %ebx, %edx + movl %edx, 72(%esp) # 4-byte Spill + movl 12(%ecx), %edx + movl %edx, 44(%esp) # 4-byte Spill + movl %ecx, %esi + mulxl %eax, %ecx, %edx + movl %edx, 8(%esp) # 4-byte Spill + adcl %ebp, %ecx + movl %ecx, 64(%esp) # 4-byte Spill + movl %eax, %edx + mulxl %eax, %ecx, %edx + movl %edx, 4(%esp) # 4-byte Spill + adcl 40(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 68(%esp) # 4-byte Spill + movl 20(%esi), %ecx + movl %ecx, %edx + mulxl %eax, %edx, %ebp + movl %edx, 32(%esp) # 4-byte Spill + movl %ebp, 36(%esp) # 4-byte Spill + movl 76(%esp), %edi # 4-byte Reload + adcl %edx, %edi + movl 24(%esi), %edx + movl %edx, 40(%esp) # 4-byte Spill + mulxl %eax, %esi, %eax + movl %eax, (%esp) # 4-byte Spill + adcl 28(%esp), %esi # 4-byte Folded Reload + sbbl %ebx, %ebx + andl $1, %ebx + movl 12(%esp), %eax # 4-byte Reload + addl 24(%esp), %eax # 4-byte Folded Reload + movl 20(%esp), %edx # 4-byte Reload + adcl %edx, 72(%esp) # 4-byte Folded Spill + movl 16(%esp), %edx # 4-byte Reload + adcl %edx, 64(%esp) # 4-byte Folded Spill + movl 8(%esp), %edx # 4-byte Reload + adcl %edx, 68(%esp) # 4-byte Folded Spill + adcl 4(%esp), %edi # 4-byte Folded Reload + movl %edi, 76(%esp) # 4-byte Spill + adcl %ebp, %esi + adcl (%esp), %ebx # 4-byte Folded Reload + movl 56(%esp), %edx # 4-byte Reload + mulxl %ecx, %edx, %edi + movl %edi, 28(%esp) # 4-byte Spill + addl %eax, %edx + movl %edx, 56(%esp) # 4-byte Spill + movl 52(%esp), %edx # 4-byte Reload + mulxl %ecx, %ebp, %eax + movl %eax, 52(%esp) # 4-byte Spill + adcl 72(%esp), %ebp # 4-byte Folded Reload + movl 48(%esp), %edx # 4-byte Reload + mulxl %ecx, %eax, %edx + movl %edx, 48(%esp) # 4-byte Spill + adcl 64(%esp), %eax # 4-byte Folded Reload + movl %eax, 72(%esp) # 4-byte Spill + movl 44(%esp), %edx # 4-byte Reload + mulxl %ecx, %edi, %eax + movl %eax, 64(%esp) # 4-byte Spill + adcl 68(%esp), %edi # 4-byte Folded Reload + movl 32(%esp), %eax # 4-byte Reload + adcl %eax, 76(%esp) # 4-byte Folded Spill + movl %ecx, %edx + mulxl %ecx, %edx, %eax + movl %eax, 68(%esp) # 4-byte Spill + adcl %esi, %edx + movl %edx, %eax + movl 40(%esp), %edx # 4-byte Reload + mulxl %ecx, %ecx, %edx + adcl %ebx, %ecx + movl %ecx, %ebx + sbbl %ecx, %ecx + andl $1, %ecx + addl 28(%esp), %ebp # 4-byte Folded Reload + movl 52(%esp), %esi # 4-byte Reload + adcl %esi, 72(%esp) # 4-byte Folded Spill + adcl 48(%esp), %edi # 4-byte Folded Reload + movl 76(%esp), %esi # 4-byte Reload + adcl 64(%esp), %esi # 4-byte Folded Reload + adcl 36(%esp), %eax # 4-byte Folded Reload + movl %eax, 44(%esp) # 4-byte Spill + adcl 68(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 48(%esp) # 4-byte Spill + adcl %edx, %ecx + movl %ecx, 52(%esp) # 4-byte Spill + movl 100(%esp), %eax + movl 60(%esp), %ecx # 4-byte Reload + movl %ecx, 16(%eax) + movl 56(%esp), %ecx # 4-byte Reload + movl %ecx, 20(%eax) + movl 104(%esp), %eax + movl 24(%eax), %edx + mulxl (%eax), %ecx, %ebx + movl %ebx, 64(%esp) # 4-byte Spill + addl %ebp, %ecx + movl %ecx, 68(%esp) # 4-byte Spill + mulxl 4(%eax), %ecx, %ebx + movl %ebx, 60(%esp) # 4-byte Spill + adcl 72(%esp), %ecx # 4-byte Folded Reload + movl %ecx, %ebp + mulxl 8(%eax), %ecx, %ebx + movl %ebx, 72(%esp) # 4-byte Spill + adcl %edi, %ecx + movl %ecx, 76(%esp) # 4-byte Spill + mulxl 12(%eax), %ebx, %ecx + movl %ecx, 40(%esp) # 4-byte Spill + adcl %esi, %ebx + mulxl 16(%eax), %edi, %ecx + movl %ecx, 36(%esp) # 4-byte Spill + adcl 44(%esp), %edi # 4-byte Folded Reload + mulxl 20(%eax), %esi, %eax + movl %eax, 44(%esp) # 4-byte Spill + adcl 48(%esp), %esi # 4-byte Folded Reload + mulxl %edx, %edx, %eax + movl %eax, 56(%esp) # 4-byte Spill + adcl 52(%esp), %edx # 4-byte Folded Reload + sbbl %ecx, %ecx + andl $1, %ecx + addl 64(%esp), %ebp # 4-byte Folded Reload + movl %ebp, 64(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl %eax, 76(%esp) # 4-byte Folded Spill + adcl 72(%esp), %ebx # 4-byte Folded Reload + adcl 40(%esp), %edi # 4-byte Folded Reload + adcl 36(%esp), %esi # 4-byte Folded Reload + adcl 44(%esp), %edx # 4-byte Folded Reload + movl 100(%esp), %eax + movl 68(%esp), %ebp # 4-byte Reload + movl %ebp, 24(%eax) + movl 64(%esp), %ebp # 4-byte Reload + movl %ebp, 28(%eax) + movl 76(%esp), %ebp # 4-byte Reload + movl %ebp, 32(%eax) + movl %ebx, 36(%eax) + movl %edi, 40(%eax) + movl %esi, 44(%eax) + movl %edx, 48(%eax) + adcl 56(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 52(%eax) + addl $80, %esp + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end97: + .size mcl_fpDbl_sqrPre7Lbmi2, .Lfunc_end97-mcl_fpDbl_sqrPre7Lbmi2 + + .globl mcl_fp_mont7Lbmi2 + .align 16, 0x90 + .type mcl_fp_mont7Lbmi2,@function +mcl_fp_mont7Lbmi2: # @mcl_fp_mont7Lbmi2 +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $116, %esp + movl 140(%esp), %eax + movl 24(%eax), %edx + movl %edx, 72(%esp) # 4-byte Spill + movl 144(%esp), %ecx + movl (%ecx), %ecx + mulxl %ecx, %edx, %esi + movl %edx, 112(%esp) # 4-byte Spill + movl 20(%eax), %edx + movl %edx, 84(%esp) # 4-byte Spill + mulxl %ecx, %edi, %edx + movl %edi, 108(%esp) # 4-byte Spill + movl %edx, 52(%esp) # 4-byte Spill + movl 16(%eax), %edx + movl %edx, 80(%esp) # 4-byte Spill + mulxl %ecx, %edx, %ebx + movl %edx, 104(%esp) # 4-byte Spill + movl 8(%eax), %edx + movl %edx, 68(%esp) # 4-byte Spill + mulxl %ecx, %edi, %edx + movl %edi, 96(%esp) # 4-byte Spill + movl %edx, 100(%esp) # 4-byte Spill + movl (%eax), %ebp + movl %ebp, 60(%esp) # 4-byte Spill + movl 4(%eax), %edx + movl %edx, 56(%esp) # 4-byte Spill + mulxl %ecx, %edi, %edx + movl %edx, 48(%esp) # 4-byte Spill + movl %ebp, %edx + mulxl %ecx, %ebp, %edx + addl %edi, %edx + movl %edx, 44(%esp) # 4-byte Spill + movl 96(%esp), %edx # 4-byte Reload + adcl %edx, 48(%esp) # 4-byte Folded Spill + movl 12(%eax), %edx + movl %edx, 64(%esp) # 4-byte Spill + mulxl %ecx, %ecx, %eax + adcl 100(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 12(%esp) # 4-byte Spill + adcl 104(%esp), %eax # 4-byte Folded Reload + movl %eax, 16(%esp) # 4-byte Spill + adcl 108(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 20(%esp) # 4-byte Spill + movl 112(%esp), %eax # 4-byte Reload + adcl %eax, 52(%esp) # 4-byte Folded Spill + adcl $0, %esi + movl %esi, 36(%esp) # 4-byte Spill + movl 148(%esp), %ebx + movl -4(%ebx), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl %ebp, %edx + imull %eax, %edx + movl (%ebx), %edi + movl %edi, 108(%esp) # 4-byte Spill + movl 4(%ebx), %ecx + movl %ecx, 112(%esp) # 4-byte Spill + mulxl %ecx, %esi, %ecx + mulxl %edi, %edi, %eax + movl %edi, 8(%esp) # 4-byte Spill + addl %esi, %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 8(%ebx), %esi + movl %esi, 104(%esp) # 4-byte Spill + mulxl %esi, %eax, %esi + adcl %ecx, %eax + movl %eax, 32(%esp) # 4-byte Spill + movl 12(%ebx), %ecx + movl %ecx, 100(%esp) # 4-byte Spill + mulxl %ecx, %eax, %ecx + adcl %esi, %eax + movl %eax, 28(%esp) # 4-byte Spill + movl 16(%ebx), %esi + movl %esi, 96(%esp) # 4-byte Spill + mulxl %esi, %eax, %esi + adcl %ecx, %eax + movl %eax, 24(%esp) # 4-byte Spill + movl 20(%ebx), %eax + movl %eax, 92(%esp) # 4-byte Spill + mulxl %eax, %eax, %edi + adcl %esi, %eax + movl %eax, %ecx + movl 24(%ebx), %eax + movl %eax, 88(%esp) # 4-byte Spill + mulxl %eax, %edx, %eax + adcl %edi, %edx + adcl $0, %eax + addl %ebp, 8(%esp) # 4-byte Folded Spill + movl 44(%esp), %esi # 4-byte Reload + adcl %esi, 40(%esp) # 4-byte Folded Spill + movl 48(%esp), %esi # 4-byte Reload + adcl %esi, 32(%esp) # 4-byte Folded Spill + movl 12(%esp), %esi # 4-byte Reload + adcl %esi, 28(%esp) # 4-byte Folded Spill + movl 16(%esp), %esi # 4-byte Reload + adcl %esi, 24(%esp) # 4-byte Folded Spill + adcl 20(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 16(%esp) # 4-byte Spill + adcl 52(%esp), %edx # 4-byte Folded Reload + movl %edx, 4(%esp) # 4-byte Spill + adcl 36(%esp), %eax # 4-byte Folded Reload + movl %eax, 8(%esp) # 4-byte Spill + sbbl %eax, %eax + andl $1, %eax + movl %eax, (%esp) # 4-byte Spill + movl 144(%esp), %edx + movl 4(%edx), %edx + mulxl 72(%esp), %ecx, %eax # 4-byte Folded Reload + movl %ecx, 12(%esp) # 4-byte Spill + movl %eax, 20(%esp) # 4-byte Spill + mulxl 84(%esp), %ecx, %eax # 4-byte Folded Reload + movl %eax, 44(%esp) # 4-byte Spill + mulxl 56(%esp), %ebx, %esi # 4-byte Folded Reload + mulxl 60(%esp), %eax, %edi # 4-byte Folded Reload + movl %eax, 36(%esp) # 4-byte Spill + addl %ebx, %edi + movl %edi, 52(%esp) # 4-byte Spill + mulxl 68(%esp), %edi, %eax # 4-byte Folded Reload + adcl %esi, %edi + movl %edi, 48(%esp) # 4-byte Spill + mulxl 64(%esp), %ebp, %ebx # 4-byte Folded Reload + adcl %eax, %ebp + mulxl 80(%esp), %esi, %eax # 4-byte Folded Reload + adcl %ebx, %esi + adcl %ecx, %eax + movl %eax, %ecx + movl 44(%esp), %ebx # 4-byte Reload + adcl 12(%esp), %ebx # 4-byte Folded Reload + movl 20(%esp), %edx # 4-byte Reload + adcl $0, %edx + movl 36(%esp), %eax # 4-byte Reload + addl 40(%esp), %eax # 4-byte Folded Reload + movl %eax, 36(%esp) # 4-byte Spill + movl 52(%esp), %edi # 4-byte Reload + adcl 32(%esp), %edi # 4-byte Folded Reload + movl %edi, 52(%esp) # 4-byte Spill + movl 48(%esp), %edi # 4-byte Reload + adcl 28(%esp), %edi # 4-byte Folded Reload + movl %edi, 48(%esp) # 4-byte Spill + adcl 24(%esp), %ebp # 4-byte Folded Reload + movl %ebp, 20(%esp) # 4-byte Spill + adcl 16(%esp), %esi # 4-byte Folded Reload + movl %esi, 12(%esp) # 4-byte Spill + adcl 4(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 16(%esp) # 4-byte Spill + adcl 8(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 44(%esp) # 4-byte Spill + adcl (%esp), %edx # 4-byte Folded Reload + movl %edx, %ebx + sbbl %ecx, %ecx + movl %ecx, 40(%esp) # 4-byte Spill + movl %eax, %edx + imull 76(%esp), %edx # 4-byte Folded Reload + mulxl 112(%esp), %ecx, %eax # 4-byte Folded Reload + mulxl 108(%esp), %edi, %esi # 4-byte Folded Reload + movl %edi, 8(%esp) # 4-byte Spill + addl %ecx, %esi + movl %esi, 32(%esp) # 4-byte Spill + mulxl 104(%esp), %esi, %ecx # 4-byte Folded Reload + adcl %eax, %esi + movl %esi, %edi + mulxl 100(%esp), %esi, %eax # 4-byte Folded Reload + adcl %ecx, %esi + movl %esi, 28(%esp) # 4-byte Spill + mulxl 96(%esp), %ecx, %esi # 4-byte Folded Reload + adcl %eax, %ecx + movl %ecx, 24(%esp) # 4-byte Spill + mulxl 92(%esp), %ecx, %eax # 4-byte Folded Reload + adcl %esi, %ecx + movl %ecx, %esi + mulxl 88(%esp), %ebp, %ecx # 4-byte Folded Reload + adcl %eax, %ebp + adcl $0, %ecx + movl %ecx, %edx + movl 40(%esp), %eax # 4-byte Reload + andl $1, %eax + movl 8(%esp), %ecx # 4-byte Reload + addl 36(%esp), %ecx # 4-byte Folded Reload + movl 52(%esp), %ecx # 4-byte Reload + adcl %ecx, 32(%esp) # 4-byte Folded Spill + adcl 48(%esp), %edi # 4-byte Folded Reload + movl %edi, 8(%esp) # 4-byte Spill + movl 20(%esp), %ecx # 4-byte Reload + adcl %ecx, 28(%esp) # 4-byte Folded Spill + movl 12(%esp), %ecx # 4-byte Reload + adcl %ecx, 24(%esp) # 4-byte Folded Spill + adcl 16(%esp), %esi # 4-byte Folded Reload + movl %esi, 20(%esp) # 4-byte Spill + adcl 44(%esp), %ebp # 4-byte Folded Reload + adcl %ebx, %edx + movl %edx, 4(%esp) # 4-byte Spill + adcl $0, %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 144(%esp), %eax + movl 8(%eax), %edx + mulxl 72(%esp), %ecx, %eax # 4-byte Folded Reload + movl %ecx, 16(%esp) # 4-byte Spill + movl %eax, 52(%esp) # 4-byte Spill + mulxl 84(%esp), %esi, %eax # 4-byte Folded Reload + movl %eax, 48(%esp) # 4-byte Spill + mulxl 68(%esp), %ecx, %eax # 4-byte Folded Reload + movl %ecx, 36(%esp) # 4-byte Spill + movl %eax, 12(%esp) # 4-byte Spill + mulxl 56(%esp), %ecx, %eax # 4-byte Folded Reload + mulxl 60(%esp), %ebx, %edi # 4-byte Folded Reload + movl %ebx, 44(%esp) # 4-byte Spill + addl %ecx, %edi + adcl 36(%esp), %eax # 4-byte Folded Reload + movl %eax, 36(%esp) # 4-byte Spill + mulxl 64(%esp), %ebx, %eax # 4-byte Folded Reload + adcl 12(%esp), %ebx # 4-byte Folded Reload + mulxl 80(%esp), %edx, %ecx # 4-byte Folded Reload + adcl %eax, %edx + movl %edx, 12(%esp) # 4-byte Spill + adcl %esi, %ecx + movl %ecx, %esi + movl 48(%esp), %edx # 4-byte Reload + adcl 16(%esp), %edx # 4-byte Folded Reload + movl 52(%esp), %eax # 4-byte Reload + adcl $0, %eax + movl 44(%esp), %ecx # 4-byte Reload + addl 32(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 44(%esp) # 4-byte Spill + adcl 8(%esp), %edi # 4-byte Folded Reload + movl %edi, 16(%esp) # 4-byte Spill + movl 28(%esp), %edi # 4-byte Reload + adcl %edi, 36(%esp) # 4-byte Folded Spill + adcl 24(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 24(%esp) # 4-byte Spill + movl 12(%esp), %ebx # 4-byte Reload + adcl 20(%esp), %ebx # 4-byte Folded Reload + adcl %ebp, %esi + movl %esi, 12(%esp) # 4-byte Spill + adcl 4(%esp), %edx # 4-byte Folded Reload + movl %edx, 48(%esp) # 4-byte Spill + adcl 40(%esp), %eax # 4-byte Folded Reload + movl %eax, 52(%esp) # 4-byte Spill + sbbl %eax, %eax + movl %eax, 40(%esp) # 4-byte Spill + movl %ecx, %edx + imull 76(%esp), %edx # 4-byte Folded Reload + mulxl 112(%esp), %ecx, %eax # 4-byte Folded Reload + mulxl 108(%esp), %edi, %esi # 4-byte Folded Reload + movl %edi, 8(%esp) # 4-byte Spill + addl %ecx, %esi + movl %esi, 32(%esp) # 4-byte Spill + mulxl 104(%esp), %esi, %ecx # 4-byte Folded Reload + adcl %eax, %esi + movl %esi, 28(%esp) # 4-byte Spill + mulxl 100(%esp), %esi, %eax # 4-byte Folded Reload + adcl %ecx, %esi + movl %esi, %edi + mulxl 96(%esp), %ecx, %esi # 4-byte Folded Reload + adcl %eax, %ecx + movl %ecx, 20(%esp) # 4-byte Spill + mulxl 92(%esp), %ecx, %eax # 4-byte Folded Reload + adcl %esi, %ecx + movl %ecx, %esi + mulxl 88(%esp), %ebp, %ecx # 4-byte Folded Reload + adcl %eax, %ebp + adcl $0, %ecx + movl %ecx, %edx + movl 40(%esp), %eax # 4-byte Reload + andl $1, %eax + movl 8(%esp), %ecx # 4-byte Reload + addl 44(%esp), %ecx # 4-byte Folded Reload + movl 16(%esp), %ecx # 4-byte Reload + adcl %ecx, 32(%esp) # 4-byte Folded Spill + movl 36(%esp), %ecx # 4-byte Reload + adcl %ecx, 28(%esp) # 4-byte Folded Spill + adcl 24(%esp), %edi # 4-byte Folded Reload + movl %edi, 16(%esp) # 4-byte Spill + adcl %ebx, 20(%esp) # 4-byte Folded Spill + adcl 12(%esp), %esi # 4-byte Folded Reload + movl %esi, 12(%esp) # 4-byte Spill + adcl 48(%esp), %ebp # 4-byte Folded Reload + adcl 52(%esp), %edx # 4-byte Folded Reload + movl %edx, 8(%esp) # 4-byte Spill + adcl $0, %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 144(%esp), %eax + movl 12(%eax), %edx + mulxl 72(%esp), %ecx, %eax # 4-byte Folded Reload + movl %ecx, 4(%esp) # 4-byte Spill + movl %eax, 48(%esp) # 4-byte Spill + mulxl 84(%esp), %esi, %eax # 4-byte Folded Reload + movl %eax, 44(%esp) # 4-byte Spill + mulxl 68(%esp), %ecx, %eax # 4-byte Folded Reload + movl %ecx, 24(%esp) # 4-byte Spill + movl %eax, (%esp) # 4-byte Spill + mulxl 56(%esp), %ecx, %eax # 4-byte Folded Reload + mulxl 60(%esp), %ebx, %edi # 4-byte Folded Reload + movl %ebx, 36(%esp) # 4-byte Spill + addl %ecx, %edi + movl %edi, 52(%esp) # 4-byte Spill + adcl 24(%esp), %eax # 4-byte Folded Reload + movl %eax, 24(%esp) # 4-byte Spill + mulxl 64(%esp), %ebx, %eax # 4-byte Folded Reload + adcl (%esp), %ebx # 4-byte Folded Reload + mulxl 80(%esp), %edx, %ecx # 4-byte Folded Reload + adcl %eax, %edx + movl %edx, (%esp) # 4-byte Spill + adcl %esi, %ecx + movl %ecx, %esi + movl 44(%esp), %edx # 4-byte Reload + adcl 4(%esp), %edx # 4-byte Folded Reload + movl 48(%esp), %eax # 4-byte Reload + adcl $0, %eax + movl 36(%esp), %ecx # 4-byte Reload + addl 32(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 36(%esp) # 4-byte Spill + movl 52(%esp), %edi # 4-byte Reload + adcl 28(%esp), %edi # 4-byte Folded Reload + movl %edi, 52(%esp) # 4-byte Spill + movl 16(%esp), %edi # 4-byte Reload + adcl %edi, 24(%esp) # 4-byte Folded Spill + adcl 20(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 16(%esp) # 4-byte Spill + movl (%esp), %ebx # 4-byte Reload + adcl 12(%esp), %ebx # 4-byte Folded Reload + adcl %ebp, %esi + movl %esi, 12(%esp) # 4-byte Spill + adcl 8(%esp), %edx # 4-byte Folded Reload + movl %edx, 44(%esp) # 4-byte Spill + adcl 40(%esp), %eax # 4-byte Folded Reload + movl %eax, 48(%esp) # 4-byte Spill + sbbl %eax, %eax + movl %eax, 40(%esp) # 4-byte Spill + movl %ecx, %edx + imull 76(%esp), %edx # 4-byte Folded Reload + mulxl 112(%esp), %ecx, %eax # 4-byte Folded Reload + mulxl 108(%esp), %edi, %esi # 4-byte Folded Reload + movl %edi, 8(%esp) # 4-byte Spill + addl %ecx, %esi + movl %esi, 32(%esp) # 4-byte Spill + mulxl 104(%esp), %esi, %ecx # 4-byte Folded Reload + adcl %eax, %esi + movl %esi, 28(%esp) # 4-byte Spill + mulxl 100(%esp), %esi, %eax # 4-byte Folded Reload + adcl %ecx, %esi + movl %esi, %edi + mulxl 96(%esp), %ecx, %esi # 4-byte Folded Reload + adcl %eax, %ecx + movl %ecx, 20(%esp) # 4-byte Spill + mulxl 92(%esp), %ecx, %eax # 4-byte Folded Reload + adcl %esi, %ecx + movl %ecx, %esi + mulxl 88(%esp), %ebp, %ecx # 4-byte Folded Reload + adcl %eax, %ebp + adcl $0, %ecx + movl %ecx, %edx + movl 40(%esp), %eax # 4-byte Reload + andl $1, %eax + movl 8(%esp), %ecx # 4-byte Reload + addl 36(%esp), %ecx # 4-byte Folded Reload + movl 52(%esp), %ecx # 4-byte Reload + adcl %ecx, 32(%esp) # 4-byte Folded Spill + movl 24(%esp), %ecx # 4-byte Reload + adcl %ecx, 28(%esp) # 4-byte Folded Spill + adcl 16(%esp), %edi # 4-byte Folded Reload + movl %edi, 16(%esp) # 4-byte Spill + adcl %ebx, 20(%esp) # 4-byte Folded Spill + adcl 12(%esp), %esi # 4-byte Folded Reload + movl %esi, 12(%esp) # 4-byte Spill + adcl 44(%esp), %ebp # 4-byte Folded Reload + adcl 48(%esp), %edx # 4-byte Folded Reload + movl %edx, 8(%esp) # 4-byte Spill + adcl $0, %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 144(%esp), %eax + movl 16(%eax), %edx + mulxl 72(%esp), %ecx, %eax # 4-byte Folded Reload + movl %ecx, 4(%esp) # 4-byte Spill + movl %eax, 48(%esp) # 4-byte Spill + mulxl 84(%esp), %esi, %eax # 4-byte Folded Reload + movl %eax, 44(%esp) # 4-byte Spill + mulxl 68(%esp), %ecx, %eax # 4-byte Folded Reload + movl %ecx, 24(%esp) # 4-byte Spill + movl %eax, (%esp) # 4-byte Spill + mulxl 56(%esp), %ecx, %eax # 4-byte Folded Reload + mulxl 60(%esp), %ebx, %edi # 4-byte Folded Reload + movl %ebx, 36(%esp) # 4-byte Spill + addl %ecx, %edi + movl %edi, 52(%esp) # 4-byte Spill + adcl 24(%esp), %eax # 4-byte Folded Reload + movl %eax, 24(%esp) # 4-byte Spill + mulxl 64(%esp), %ebx, %eax # 4-byte Folded Reload + adcl (%esp), %ebx # 4-byte Folded Reload + mulxl 80(%esp), %edx, %ecx # 4-byte Folded Reload + adcl %eax, %edx + movl %edx, (%esp) # 4-byte Spill + adcl %esi, %ecx + movl %ecx, %esi + movl 44(%esp), %edx # 4-byte Reload + adcl 4(%esp), %edx # 4-byte Folded Reload + movl 48(%esp), %eax # 4-byte Reload + adcl $0, %eax + movl 36(%esp), %ecx # 4-byte Reload + addl 32(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 36(%esp) # 4-byte Spill + movl 52(%esp), %edi # 4-byte Reload + adcl 28(%esp), %edi # 4-byte Folded Reload + movl %edi, 52(%esp) # 4-byte Spill + movl 16(%esp), %edi # 4-byte Reload + adcl %edi, 24(%esp) # 4-byte Folded Spill + adcl 20(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 16(%esp) # 4-byte Spill + movl (%esp), %ebx # 4-byte Reload + adcl 12(%esp), %ebx # 4-byte Folded Reload + adcl %ebp, %esi + movl %esi, 12(%esp) # 4-byte Spill + adcl 8(%esp), %edx # 4-byte Folded Reload + movl %edx, 44(%esp) # 4-byte Spill + adcl 40(%esp), %eax # 4-byte Folded Reload + movl %eax, 48(%esp) # 4-byte Spill + sbbl %eax, %eax + movl %eax, 40(%esp) # 4-byte Spill + movl %ecx, %edx + imull 76(%esp), %edx # 4-byte Folded Reload + mulxl 112(%esp), %ecx, %eax # 4-byte Folded Reload + mulxl 108(%esp), %edi, %esi # 4-byte Folded Reload + movl %edi, 8(%esp) # 4-byte Spill + addl %ecx, %esi + movl %esi, 32(%esp) # 4-byte Spill + mulxl 104(%esp), %esi, %ecx # 4-byte Folded Reload + adcl %eax, %esi + movl %esi, 28(%esp) # 4-byte Spill + mulxl 100(%esp), %esi, %eax # 4-byte Folded Reload + adcl %ecx, %esi + movl %esi, %edi + mulxl 96(%esp), %ecx, %esi # 4-byte Folded Reload + adcl %eax, %ecx + movl %ecx, 20(%esp) # 4-byte Spill + mulxl 92(%esp), %ecx, %eax # 4-byte Folded Reload + adcl %esi, %ecx + movl %ecx, %esi + mulxl 88(%esp), %edx, %ecx # 4-byte Folded Reload + adcl %eax, %edx + movl %edx, %ebp + adcl $0, %ecx + movl %ecx, %edx + movl 40(%esp), %eax # 4-byte Reload + andl $1, %eax + movl 8(%esp), %ecx # 4-byte Reload + addl 36(%esp), %ecx # 4-byte Folded Reload + movl 52(%esp), %ecx # 4-byte Reload + adcl %ecx, 32(%esp) # 4-byte Folded Spill + movl 24(%esp), %ecx # 4-byte Reload + adcl %ecx, 28(%esp) # 4-byte Folded Spill + adcl 16(%esp), %edi # 4-byte Folded Reload + movl %edi, 16(%esp) # 4-byte Spill + adcl %ebx, 20(%esp) # 4-byte Folded Spill + adcl 12(%esp), %esi # 4-byte Folded Reload + movl %esi, 12(%esp) # 4-byte Spill + adcl 44(%esp), %ebp # 4-byte Folded Reload + movl %ebp, 8(%esp) # 4-byte Spill + adcl 48(%esp), %edx # 4-byte Folded Reload + movl %edx, 44(%esp) # 4-byte Spill + adcl $0, %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 144(%esp), %eax + movl 20(%eax), %edx + mulxl 72(%esp), %ecx, %eax # 4-byte Folded Reload + movl %ecx, (%esp) # 4-byte Spill + movl %eax, 4(%esp) # 4-byte Spill + mulxl 84(%esp), %esi, %eax # 4-byte Folded Reload + movl %eax, 48(%esp) # 4-byte Spill + mulxl 68(%esp), %eax, %ebp # 4-byte Folded Reload + movl %eax, 24(%esp) # 4-byte Spill + mulxl 56(%esp), %ecx, %eax # 4-byte Folded Reload + mulxl 60(%esp), %ebx, %edi # 4-byte Folded Reload + movl %ebx, 36(%esp) # 4-byte Spill + addl %ecx, %edi + movl %edi, 52(%esp) # 4-byte Spill + adcl 24(%esp), %eax # 4-byte Folded Reload + movl %eax, 24(%esp) # 4-byte Spill + mulxl 64(%esp), %ecx, %eax # 4-byte Folded Reload + adcl %ebp, %ecx + movl %ecx, %ebp + mulxl 80(%esp), %ebx, %ecx # 4-byte Folded Reload + adcl %eax, %ebx + adcl %esi, %ecx + movl %ecx, %esi + movl 48(%esp), %edx # 4-byte Reload + adcl (%esp), %edx # 4-byte Folded Reload + movl 4(%esp), %eax # 4-byte Reload + adcl $0, %eax + movl 36(%esp), %ecx # 4-byte Reload + addl 32(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 36(%esp) # 4-byte Spill + movl 52(%esp), %edi # 4-byte Reload + adcl 28(%esp), %edi # 4-byte Folded Reload + movl %edi, 52(%esp) # 4-byte Spill + movl 16(%esp), %edi # 4-byte Reload + adcl %edi, 24(%esp) # 4-byte Folded Spill + adcl 20(%esp), %ebp # 4-byte Folded Reload + movl %ebp, 20(%esp) # 4-byte Spill + adcl 12(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 12(%esp) # 4-byte Spill + adcl 8(%esp), %esi # 4-byte Folded Reload + movl %esi, 16(%esp) # 4-byte Spill + adcl 44(%esp), %edx # 4-byte Folded Reload + movl %edx, 48(%esp) # 4-byte Spill + adcl 40(%esp), %eax # 4-byte Folded Reload + movl %eax, %ebp + sbbl %eax, %eax + movl %eax, 44(%esp) # 4-byte Spill + movl %ecx, %edx + imull 76(%esp), %edx # 4-byte Folded Reload + mulxl 112(%esp), %ecx, %eax # 4-byte Folded Reload + mulxl 108(%esp), %edi, %esi # 4-byte Folded Reload + movl %edi, 8(%esp) # 4-byte Spill + addl %ecx, %esi + movl %esi, 40(%esp) # 4-byte Spill + mulxl 104(%esp), %esi, %ecx # 4-byte Folded Reload + adcl %eax, %esi + movl %esi, 32(%esp) # 4-byte Spill + mulxl 100(%esp), %esi, %eax # 4-byte Folded Reload + adcl %ecx, %esi + movl %esi, %edi + mulxl 96(%esp), %ecx, %esi # 4-byte Folded Reload + adcl %eax, %ecx + movl %ecx, 28(%esp) # 4-byte Spill + mulxl 92(%esp), %eax, %ebx # 4-byte Folded Reload + adcl %esi, %eax + movl %eax, %esi + mulxl 88(%esp), %ecx, %eax # 4-byte Folded Reload + adcl %ebx, %ecx + movl %ecx, %ebx + adcl $0, %eax + movl %eax, %ecx + movl 44(%esp), %edx # 4-byte Reload + andl $1, %edx + movl 8(%esp), %eax # 4-byte Reload + addl 36(%esp), %eax # 4-byte Folded Reload + movl 52(%esp), %eax # 4-byte Reload + adcl %eax, 40(%esp) # 4-byte Folded Spill + movl 24(%esp), %eax # 4-byte Reload + adcl %eax, 32(%esp) # 4-byte Folded Spill + adcl 20(%esp), %edi # 4-byte Folded Reload + movl %edi, 52(%esp) # 4-byte Spill + movl 12(%esp), %eax # 4-byte Reload + adcl %eax, 28(%esp) # 4-byte Folded Spill + adcl 16(%esp), %esi # 4-byte Folded Reload + movl %esi, 36(%esp) # 4-byte Spill + adcl 48(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 24(%esp) # 4-byte Spill + adcl %ebp, %ecx + movl %ecx, 48(%esp) # 4-byte Spill + adcl $0, %edx + movl %edx, 44(%esp) # 4-byte Spill + movl 144(%esp), %edx + movl 24(%edx), %edx + mulxl 56(%esp), %ebx, %esi # 4-byte Folded Reload + mulxl 60(%esp), %ecx, %eax # 4-byte Folded Reload + movl %ecx, 60(%esp) # 4-byte Spill + addl %ebx, %eax + movl %eax, 56(%esp) # 4-byte Spill + mulxl 72(%esp), %ecx, %eax # 4-byte Folded Reload + movl %eax, 72(%esp) # 4-byte Spill + mulxl 68(%esp), %ebp, %edi # 4-byte Folded Reload + adcl %esi, %ebp + mulxl 64(%esp), %eax, %esi # 4-byte Folded Reload + adcl %edi, %eax + movl %eax, 68(%esp) # 4-byte Spill + mulxl 84(%esp), %edi, %eax # 4-byte Folded Reload + movl %edi, 84(%esp) # 4-byte Spill + mulxl 80(%esp), %ebx, %edx # 4-byte Folded Reload + adcl %esi, %ebx + adcl 84(%esp), %edx # 4-byte Folded Reload + movl %edx, %esi + adcl %ecx, %eax + movl %eax, %ecx + movl 72(%esp), %edx # 4-byte Reload + adcl $0, %edx + movl 60(%esp), %edi # 4-byte Reload + addl 40(%esp), %edi # 4-byte Folded Reload + movl %edi, 60(%esp) # 4-byte Spill + movl 32(%esp), %eax # 4-byte Reload + adcl %eax, 56(%esp) # 4-byte Folded Spill + adcl 52(%esp), %ebp # 4-byte Folded Reload + movl %ebp, 52(%esp) # 4-byte Spill + movl 28(%esp), %eax # 4-byte Reload + adcl %eax, 68(%esp) # 4-byte Folded Spill + adcl 36(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 36(%esp) # 4-byte Spill + adcl 24(%esp), %esi # 4-byte Folded Reload + movl %esi, 40(%esp) # 4-byte Spill + adcl 48(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 48(%esp) # 4-byte Spill + adcl 44(%esp), %edx # 4-byte Folded Reload + movl %edx, 72(%esp) # 4-byte Spill + sbbl %eax, %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 76(%esp), %edx # 4-byte Reload + imull %edi, %edx + mulxl 108(%esp), %ecx, %eax # 4-byte Folded Reload + movl %ecx, 76(%esp) # 4-byte Spill + mulxl 112(%esp), %ecx, %esi # 4-byte Folded Reload + addl %eax, %ecx + movl %ecx, 80(%esp) # 4-byte Spill + mulxl 104(%esp), %eax, %edi # 4-byte Folded Reload + adcl %esi, %eax + movl %eax, 84(%esp) # 4-byte Spill + mulxl 100(%esp), %ecx, %eax # 4-byte Folded Reload + adcl %edi, %ecx + movl %edx, %edi + mulxl 96(%esp), %ebx, %ebp # 4-byte Folded Reload + adcl %eax, %ebx + mulxl 92(%esp), %esi, %eax # 4-byte Folded Reload + adcl %ebp, %esi + movl %edi, %edx + mulxl 88(%esp), %edi, %ebp # 4-byte Folded Reload + adcl %eax, %edi + adcl $0, %ebp + andl $1, 64(%esp) # 4-byte Folded Spill + movl 76(%esp), %eax # 4-byte Reload + addl 60(%esp), %eax # 4-byte Folded Reload + movl 80(%esp), %edx # 4-byte Reload + adcl 56(%esp), %edx # 4-byte Folded Reload + movl %edx, 80(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 52(%esp), %eax # 4-byte Folded Reload + movl %eax, 84(%esp) # 4-byte Spill + adcl 68(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 76(%esp) # 4-byte Spill + adcl 36(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 68(%esp) # 4-byte Spill + adcl 40(%esp), %esi # 4-byte Folded Reload + adcl 48(%esp), %edi # 4-byte Folded Reload + adcl 72(%esp), %ebp # 4-byte Folded Reload + movl 64(%esp), %eax # 4-byte Reload + adcl $0, %eax + subl 108(%esp), %edx # 4-byte Folded Reload + movl %edx, 60(%esp) # 4-byte Spill + movl 84(%esp), %edx # 4-byte Reload + sbbl 112(%esp), %edx # 4-byte Folded Reload + movl %edx, 64(%esp) # 4-byte Spill + sbbl 104(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 72(%esp) # 4-byte Spill + sbbl 100(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 100(%esp) # 4-byte Spill + movl %esi, %ebx + sbbl 96(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 104(%esp) # 4-byte Spill + movl %edi, %ebx + sbbl 92(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 108(%esp) # 4-byte Spill + movl %ebp, %ebx + sbbl 88(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 112(%esp) # 4-byte Spill + sbbl $0, %eax + andl $1, %eax + movl %eax, %ecx + jne .LBB98_2 +# BB#1: + movl 60(%esp), %eax # 4-byte Reload + movl %eax, 80(%esp) # 4-byte Spill +.LBB98_2: + movl 136(%esp), %ebx + movl 80(%esp), %edx # 4-byte Reload + movl %edx, (%ebx) + movl %ebx, %edx + testb %cl, %cl + movl 84(%esp), %ebx # 4-byte Reload + jne .LBB98_4 +# BB#3: + movl 64(%esp), %ebx # 4-byte Reload +.LBB98_4: + movl %ebx, 4(%edx) + movl 76(%esp), %ecx # 4-byte Reload + jne .LBB98_6 +# BB#5: + movl 72(%esp), %ecx # 4-byte Reload +.LBB98_6: + movl %ecx, 8(%edx) + movl 68(%esp), %eax # 4-byte Reload + jne .LBB98_8 +# BB#7: + movl 100(%esp), %eax # 4-byte Reload +.LBB98_8: + movl %eax, 12(%edx) + jne .LBB98_10 +# BB#9: + movl 104(%esp), %esi # 4-byte Reload +.LBB98_10: + movl %esi, 16(%edx) + jne .LBB98_12 +# BB#11: + movl 108(%esp), %edi # 4-byte Reload +.LBB98_12: + movl %edi, 20(%edx) + jne .LBB98_14 +# BB#13: + movl 112(%esp), %ebp # 4-byte Reload +.LBB98_14: + movl %ebp, 24(%edx) + addl $116, %esp + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end98: + .size mcl_fp_mont7Lbmi2, .Lfunc_end98-mcl_fp_mont7Lbmi2 + + .globl mcl_fp_montNF7Lbmi2 + .align 16, 0x90 + .type mcl_fp_montNF7Lbmi2,@function +mcl_fp_montNF7Lbmi2: # @mcl_fp_montNF7Lbmi2 +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $104, %esp + movl 128(%esp), %eax + movl (%eax), %edi + movl %edi, 68(%esp) # 4-byte Spill + movl 4(%eax), %edx + movl %edx, 64(%esp) # 4-byte Spill + movl 132(%esp), %ecx + movl (%ecx), %ebp + mulxl %ebp, %ecx, %esi + movl %edi, %edx + mulxl %ebp, %edi, %edx + movl %edi, 96(%esp) # 4-byte Spill + addl %ecx, %edx + movl %edx, 92(%esp) # 4-byte Spill + movl 8(%eax), %edx + movl %edx, 60(%esp) # 4-byte Spill + mulxl %ebp, %ecx, %edi + adcl %esi, %ecx + movl %ecx, 88(%esp) # 4-byte Spill + movl 12(%eax), %edx + movl %edx, 56(%esp) # 4-byte Spill + mulxl %ebp, %ecx, %ebx + adcl %edi, %ecx + movl %ecx, 84(%esp) # 4-byte Spill + movl 16(%eax), %edx + movl %edx, 52(%esp) # 4-byte Spill + mulxl %ebp, %edx, %ecx + adcl %ebx, %edx + movl %edx, 80(%esp) # 4-byte Spill + movl 20(%eax), %edx + movl %edx, 48(%esp) # 4-byte Spill + mulxl %ebp, %edx, %esi + adcl %ecx, %edx + movl %edx, 76(%esp) # 4-byte Spill + movl 24(%eax), %edx + movl %edx, 44(%esp) # 4-byte Spill + mulxl %ebp, %ebp, %eax + adcl %esi, %ebp + adcl $0, %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 136(%esp), %edi + movl -4(%edi), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 96(%esp), %esi # 4-byte Reload + movl %esi, %edx + imull %eax, %edx + movl (%edi), %ecx + movl %ecx, 100(%esp) # 4-byte Spill + mulxl %ecx, %ecx, %eax + movl %eax, 28(%esp) # 4-byte Spill + addl %esi, %ecx + movl 4(%edi), %ecx + movl %ecx, 96(%esp) # 4-byte Spill + mulxl %ecx, %eax, %ecx + movl %ecx, 24(%esp) # 4-byte Spill + adcl 92(%esp), %eax # 4-byte Folded Reload + movl %eax, 36(%esp) # 4-byte Spill + movl 8(%edi), %ecx + movl %ecx, 92(%esp) # 4-byte Spill + mulxl %ecx, %eax, %ecx + movl %ecx, 20(%esp) # 4-byte Spill + adcl 88(%esp), %eax # 4-byte Folded Reload + movl %eax, 32(%esp) # 4-byte Spill + movl 12(%edi), %ecx + movl %ecx, 88(%esp) # 4-byte Spill + mulxl %ecx, %esi, %ecx + movl %ecx, 16(%esp) # 4-byte Spill + adcl 84(%esp), %esi # 4-byte Folded Reload + movl 16(%edi), %eax + movl %eax, 84(%esp) # 4-byte Spill + mulxl %eax, %eax, %ecx + movl %ecx, 12(%esp) # 4-byte Spill + adcl 80(%esp), %eax # 4-byte Folded Reload + movl %eax, %ecx + movl 20(%edi), %eax + movl %eax, 80(%esp) # 4-byte Spill + mulxl %eax, %eax, %ebx + movl %ebx, 8(%esp) # 4-byte Spill + adcl 76(%esp), %eax # 4-byte Folded Reload + movl %eax, %ebx + movl 24(%edi), %eax + movl %eax, 76(%esp) # 4-byte Spill + mulxl %eax, %edx, %eax + adcl %ebp, %edx + movl %edx, %edi + movl 40(%esp), %edx # 4-byte Reload + adcl $0, %edx + movl 28(%esp), %ebp # 4-byte Reload + addl %ebp, 36(%esp) # 4-byte Folded Spill + movl 24(%esp), %ebp # 4-byte Reload + adcl %ebp, 32(%esp) # 4-byte Folded Spill + adcl 20(%esp), %esi # 4-byte Folded Reload + movl %esi, 24(%esp) # 4-byte Spill + adcl 16(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 20(%esp) # 4-byte Spill + adcl 12(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 16(%esp) # 4-byte Spill + adcl 8(%esp), %edi # 4-byte Folded Reload + movl %edi, 12(%esp) # 4-byte Spill + adcl %eax, %edx + movl %edx, 40(%esp) # 4-byte Spill + movl 132(%esp), %eax + movl 4(%eax), %edx + mulxl 64(%esp), %ecx, %esi # 4-byte Folded Reload + mulxl 68(%esp), %edi, %eax # 4-byte Folded Reload + movl %edi, 8(%esp) # 4-byte Spill + addl %ecx, %eax + movl %eax, 4(%esp) # 4-byte Spill + mulxl 60(%esp), %eax, %edi # 4-byte Folded Reload + adcl %esi, %eax + movl %eax, %ecx + mulxl 56(%esp), %esi, %ebx # 4-byte Folded Reload + adcl %edi, %esi + mulxl 52(%esp), %eax, %ebp # 4-byte Folded Reload + adcl %ebx, %eax + movl %eax, 28(%esp) # 4-byte Spill + mulxl 48(%esp), %eax, %edi # 4-byte Folded Reload + adcl %ebp, %eax + movl %eax, %ebx + mulxl 44(%esp), %ebp, %eax # 4-byte Folded Reload + adcl %edi, %ebp + adcl $0, %eax + movl %eax, %edx + movl 8(%esp), %eax # 4-byte Reload + addl 36(%esp), %eax # 4-byte Folded Reload + movl 4(%esp), %edi # 4-byte Reload + adcl 32(%esp), %edi # 4-byte Folded Reload + adcl 24(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 8(%esp) # 4-byte Spill + adcl 20(%esp), %esi # 4-byte Folded Reload + movl 16(%esp), %ecx # 4-byte Reload + adcl %ecx, 28(%esp) # 4-byte Folded Spill + adcl 12(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 4(%esp) # 4-byte Spill + adcl 40(%esp), %ebp # 4-byte Folded Reload + adcl $0, %edx + movl %edx, 40(%esp) # 4-byte Spill + movl %eax, %edx + movl %eax, %ebx + imull 72(%esp), %edx # 4-byte Folded Reload + mulxl 100(%esp), %eax, %ecx # 4-byte Folded Reload + movl %ecx, 36(%esp) # 4-byte Spill + addl %ebx, %eax + mulxl 96(%esp), %eax, %ecx # 4-byte Folded Reload + movl %ecx, 24(%esp) # 4-byte Spill + adcl %edi, %eax + movl %eax, 32(%esp) # 4-byte Spill + mulxl 92(%esp), %edi, %ecx # 4-byte Folded Reload + movl %ecx, 20(%esp) # 4-byte Spill + adcl 8(%esp), %edi # 4-byte Folded Reload + mulxl 88(%esp), %eax, %ecx # 4-byte Folded Reload + movl %ecx, 16(%esp) # 4-byte Spill + adcl %esi, %eax + movl %eax, %esi + mulxl 84(%esp), %eax, %ecx # 4-byte Folded Reload + movl %ecx, 12(%esp) # 4-byte Spill + adcl 28(%esp), %eax # 4-byte Folded Reload + movl %eax, %ecx + mulxl 80(%esp), %eax, %ebx # 4-byte Folded Reload + movl %ebx, 8(%esp) # 4-byte Spill + adcl 4(%esp), %eax # 4-byte Folded Reload + movl %eax, 28(%esp) # 4-byte Spill + mulxl 76(%esp), %eax, %edx # 4-byte Folded Reload + adcl %ebp, %eax + movl 40(%esp), %ebx # 4-byte Reload + adcl $0, %ebx + movl 36(%esp), %ebp # 4-byte Reload + addl %ebp, 32(%esp) # 4-byte Folded Spill + adcl 24(%esp), %edi # 4-byte Folded Reload + movl %edi, 24(%esp) # 4-byte Spill + adcl 20(%esp), %esi # 4-byte Folded Reload + movl %esi, 20(%esp) # 4-byte Spill + adcl 16(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 16(%esp) # 4-byte Spill + movl 12(%esp), %ecx # 4-byte Reload + adcl %ecx, 28(%esp) # 4-byte Folded Spill + adcl 8(%esp), %eax # 4-byte Folded Reload + movl %eax, 12(%esp) # 4-byte Spill + adcl %edx, %ebx + movl %ebx, 40(%esp) # 4-byte Spill + movl 132(%esp), %eax + movl 8(%eax), %edx + mulxl 64(%esp), %ecx, %eax # 4-byte Folded Reload + mulxl 68(%esp), %esi, %edi # 4-byte Folded Reload + movl %esi, 36(%esp) # 4-byte Spill + addl %ecx, %edi + mulxl 60(%esp), %ecx, %ebx # 4-byte Folded Reload + adcl %eax, %ecx + movl %ecx, 8(%esp) # 4-byte Spill + mulxl 56(%esp), %esi, %eax # 4-byte Folded Reload + adcl %ebx, %esi + mulxl 52(%esp), %ebx, %ecx # 4-byte Folded Reload + adcl %eax, %ebx + mulxl 48(%esp), %eax, %ebp # 4-byte Folded Reload + movl %ebp, 4(%esp) # 4-byte Spill + adcl %ecx, %eax + movl %eax, %ebp + mulxl 44(%esp), %edx, %eax # 4-byte Folded Reload + adcl 4(%esp), %edx # 4-byte Folded Reload + adcl $0, %eax + movl 36(%esp), %ecx # 4-byte Reload + addl 32(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 36(%esp) # 4-byte Spill + adcl 24(%esp), %edi # 4-byte Folded Reload + movl 8(%esp), %ecx # 4-byte Reload + adcl 20(%esp), %ecx # 4-byte Folded Reload + adcl 16(%esp), %esi # 4-byte Folded Reload + adcl 28(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 16(%esp) # 4-byte Spill + adcl 12(%esp), %ebp # 4-byte Folded Reload + movl %ebp, 24(%esp) # 4-byte Spill + adcl 40(%esp), %edx # 4-byte Folded Reload + movl %edx, 32(%esp) # 4-byte Spill + adcl $0, %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 36(%esp), %ebx # 4-byte Reload + movl %ebx, %edx + imull 72(%esp), %edx # 4-byte Folded Reload + mulxl 100(%esp), %eax, %ebp # 4-byte Folded Reload + movl %ebp, 28(%esp) # 4-byte Spill + addl %ebx, %eax + mulxl 96(%esp), %ebp, %ebx # 4-byte Folded Reload + movl %ebx, 20(%esp) # 4-byte Spill + adcl %edi, %ebp + mulxl 92(%esp), %ebx, %edi # 4-byte Folded Reload + movl %edi, 12(%esp) # 4-byte Spill + adcl %ecx, %ebx + mulxl 88(%esp), %edi, %ecx # 4-byte Folded Reload + movl %ecx, 8(%esp) # 4-byte Spill + adcl %esi, %edi + mulxl 84(%esp), %esi, %ecx # 4-byte Folded Reload + movl %ecx, 4(%esp) # 4-byte Spill + adcl 16(%esp), %esi # 4-byte Folded Reload + mulxl 80(%esp), %eax, %ecx # 4-byte Folded Reload + movl %ecx, (%esp) # 4-byte Spill + adcl 24(%esp), %eax # 4-byte Folded Reload + movl %eax, %ecx + mulxl 76(%esp), %edx, %eax # 4-byte Folded Reload + movl %eax, 24(%esp) # 4-byte Spill + adcl 32(%esp), %edx # 4-byte Folded Reload + movl %edx, %eax + movl 40(%esp), %edx # 4-byte Reload + adcl $0, %edx + addl 28(%esp), %ebp # 4-byte Folded Reload + movl %ebp, 36(%esp) # 4-byte Spill + adcl 20(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 28(%esp) # 4-byte Spill + adcl 12(%esp), %edi # 4-byte Folded Reload + movl %edi, 20(%esp) # 4-byte Spill + adcl 8(%esp), %esi # 4-byte Folded Reload + movl %esi, 16(%esp) # 4-byte Spill + adcl 4(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 12(%esp) # 4-byte Spill + adcl (%esp), %eax # 4-byte Folded Reload + movl %eax, 8(%esp) # 4-byte Spill + adcl 24(%esp), %edx # 4-byte Folded Reload + movl %edx, 40(%esp) # 4-byte Spill + movl 132(%esp), %eax + movl 12(%eax), %edx + mulxl 64(%esp), %esi, %eax # 4-byte Folded Reload + mulxl 68(%esp), %edi, %ecx # 4-byte Folded Reload + movl %edi, 32(%esp) # 4-byte Spill + addl %esi, %ecx + mulxl 60(%esp), %esi, %edi # 4-byte Folded Reload + adcl %eax, %esi + mulxl 56(%esp), %eax, %ebx # 4-byte Folded Reload + adcl %edi, %eax + movl %eax, %edi + mulxl 52(%esp), %eax, %ebp # 4-byte Folded Reload + adcl %ebx, %eax + movl %eax, 24(%esp) # 4-byte Spill + mulxl 48(%esp), %eax, %ebx # 4-byte Folded Reload + movl %ebx, 4(%esp) # 4-byte Spill + adcl %ebp, %eax + movl %eax, %ebx + mulxl 44(%esp), %ebp, %eax # 4-byte Folded Reload + adcl 4(%esp), %ebp # 4-byte Folded Reload + adcl $0, %eax + movl 32(%esp), %edx # 4-byte Reload + addl 36(%esp), %edx # 4-byte Folded Reload + movl %edx, 32(%esp) # 4-byte Spill + adcl 28(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 4(%esp) # 4-byte Spill + adcl 20(%esp), %esi # 4-byte Folded Reload + adcl 16(%esp), %edi # 4-byte Folded Reload + movl %edi, 28(%esp) # 4-byte Spill + movl 12(%esp), %edx # 4-byte Reload + adcl %edx, 24(%esp) # 4-byte Folded Spill + adcl 8(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 20(%esp) # 4-byte Spill + adcl 40(%esp), %ebp # 4-byte Folded Reload + adcl $0, %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 32(%esp), %edi # 4-byte Reload + movl %edi, %edx + imull 72(%esp), %edx # 4-byte Folded Reload + mulxl 100(%esp), %eax, %ecx # 4-byte Folded Reload + movl %ecx, 40(%esp) # 4-byte Spill + addl %edi, %eax + mulxl 96(%esp), %edi, %ecx # 4-byte Folded Reload + movl %ecx, 32(%esp) # 4-byte Spill + adcl 4(%esp), %edi # 4-byte Folded Reload + mulxl 92(%esp), %eax, %ecx # 4-byte Folded Reload + movl %ecx, 12(%esp) # 4-byte Spill + adcl %esi, %eax + movl %eax, %esi + mulxl 88(%esp), %eax, %ecx # 4-byte Folded Reload + movl %ecx, 8(%esp) # 4-byte Spill + adcl 28(%esp), %eax # 4-byte Folded Reload + movl %eax, 28(%esp) # 4-byte Spill + mulxl 84(%esp), %eax, %ecx # 4-byte Folded Reload + movl %ecx, 4(%esp) # 4-byte Spill + adcl 24(%esp), %eax # 4-byte Folded Reload + movl %eax, %ecx + mulxl 80(%esp), %eax, %ebx # 4-byte Folded Reload + movl %ebx, (%esp) # 4-byte Spill + adcl 20(%esp), %eax # 4-byte Folded Reload + movl %eax, 24(%esp) # 4-byte Spill + mulxl 76(%esp), %eax, %edx # 4-byte Folded Reload + adcl %ebp, %eax + movl 36(%esp), %ebx # 4-byte Reload + adcl $0, %ebx + addl 40(%esp), %edi # 4-byte Folded Reload + movl %edi, 20(%esp) # 4-byte Spill + adcl 32(%esp), %esi # 4-byte Folded Reload + movl %esi, 16(%esp) # 4-byte Spill + movl 12(%esp), %esi # 4-byte Reload + adcl %esi, 28(%esp) # 4-byte Folded Spill + adcl 8(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 12(%esp) # 4-byte Spill + movl 4(%esp), %ecx # 4-byte Reload + adcl %ecx, 24(%esp) # 4-byte Folded Spill + adcl (%esp), %eax # 4-byte Folded Reload + movl %eax, 8(%esp) # 4-byte Spill + adcl %edx, %ebx + movl %ebx, 36(%esp) # 4-byte Spill + movl 132(%esp), %eax + movl 16(%eax), %edx + mulxl 64(%esp), %ecx, %eax # 4-byte Folded Reload + mulxl 68(%esp), %esi, %edi # 4-byte Folded Reload + movl %esi, 32(%esp) # 4-byte Spill + addl %ecx, %edi + mulxl 60(%esp), %ecx, %ebx # 4-byte Folded Reload + adcl %eax, %ecx + mulxl 56(%esp), %eax, %esi # 4-byte Folded Reload + adcl %ebx, %eax + movl %eax, %ebx + mulxl 52(%esp), %eax, %ebp # 4-byte Folded Reload + adcl %esi, %eax + movl %eax, 40(%esp) # 4-byte Spill + mulxl 48(%esp), %eax, %esi # 4-byte Folded Reload + movl %esi, 4(%esp) # 4-byte Spill + adcl %ebp, %eax + movl %eax, %esi + mulxl 44(%esp), %ebp, %eax # 4-byte Folded Reload + adcl 4(%esp), %ebp # 4-byte Folded Reload + adcl $0, %eax + movl %eax, %edx + movl 32(%esp), %eax # 4-byte Reload + addl 20(%esp), %eax # 4-byte Folded Reload + movl %eax, 32(%esp) # 4-byte Spill + adcl 16(%esp), %edi # 4-byte Folded Reload + adcl 28(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 16(%esp) # 4-byte Spill + adcl 12(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 28(%esp) # 4-byte Spill + movl 40(%esp), %eax # 4-byte Reload + adcl 24(%esp), %eax # 4-byte Folded Reload + movl %eax, 40(%esp) # 4-byte Spill + adcl 8(%esp), %esi # 4-byte Folded Reload + movl %esi, 24(%esp) # 4-byte Spill + adcl 36(%esp), %ebp # 4-byte Folded Reload + adcl $0, %edx + movl %edx, 36(%esp) # 4-byte Spill + movl 32(%esp), %ebx # 4-byte Reload + movl %ebx, %edx + imull 72(%esp), %edx # 4-byte Folded Reload + mulxl 100(%esp), %eax, %ecx # 4-byte Folded Reload + movl %ecx, 20(%esp) # 4-byte Spill + addl %ebx, %eax + mulxl 96(%esp), %ebx, %ecx # 4-byte Folded Reload + movl %ecx, 32(%esp) # 4-byte Spill + adcl %edi, %ebx + mulxl 92(%esp), %edi, %ecx # 4-byte Folded Reload + movl %ecx, 12(%esp) # 4-byte Spill + adcl 16(%esp), %edi # 4-byte Folded Reload + mulxl 88(%esp), %eax, %ecx # 4-byte Folded Reload + movl %ecx, 8(%esp) # 4-byte Spill + adcl 28(%esp), %eax # 4-byte Folded Reload + movl %eax, %ecx + mulxl 84(%esp), %eax, %esi # 4-byte Folded Reload + movl %esi, 4(%esp) # 4-byte Spill + adcl 40(%esp), %eax # 4-byte Folded Reload + movl %eax, 28(%esp) # 4-byte Spill + mulxl 80(%esp), %eax, %esi # 4-byte Folded Reload + movl %esi, 40(%esp) # 4-byte Spill + adcl 24(%esp), %eax # 4-byte Folded Reload + movl %eax, 24(%esp) # 4-byte Spill + mulxl 76(%esp), %eax, %edx # 4-byte Folded Reload + adcl %ebp, %eax + movl 36(%esp), %esi # 4-byte Reload + adcl $0, %esi + addl 20(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 20(%esp) # 4-byte Spill + adcl 32(%esp), %edi # 4-byte Folded Reload + movl %edi, 16(%esp) # 4-byte Spill + adcl 12(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 12(%esp) # 4-byte Spill + movl 8(%esp), %ecx # 4-byte Reload + adcl %ecx, 28(%esp) # 4-byte Folded Spill + movl 4(%esp), %ecx # 4-byte Reload + adcl %ecx, 24(%esp) # 4-byte Folded Spill + adcl 40(%esp), %eax # 4-byte Folded Reload + movl %eax, 8(%esp) # 4-byte Spill + adcl %edx, %esi + movl %esi, 36(%esp) # 4-byte Spill + movl 132(%esp), %eax + movl 20(%eax), %edx + mulxl 64(%esp), %ecx, %eax # 4-byte Folded Reload + mulxl 68(%esp), %edi, %esi # 4-byte Folded Reload + movl %edi, 32(%esp) # 4-byte Spill + addl %ecx, %esi + mulxl 60(%esp), %ebp, %ecx # 4-byte Folded Reload + adcl %eax, %ebp + mulxl 56(%esp), %eax, %edi # 4-byte Folded Reload + adcl %ecx, %eax + movl %eax, 40(%esp) # 4-byte Spill + mulxl 52(%esp), %ecx, %ebx # 4-byte Folded Reload + adcl %edi, %ecx + mulxl 48(%esp), %eax, %edi # 4-byte Folded Reload + movl %edi, 4(%esp) # 4-byte Spill + adcl %ebx, %eax + movl %eax, %edi + mulxl 44(%esp), %ebx, %eax # 4-byte Folded Reload + adcl 4(%esp), %ebx # 4-byte Folded Reload + adcl $0, %eax + movl %eax, %edx + movl 32(%esp), %eax # 4-byte Reload + addl 20(%esp), %eax # 4-byte Folded Reload + movl %eax, 32(%esp) # 4-byte Spill + adcl 16(%esp), %esi # 4-byte Folded Reload + movl %esi, 16(%esp) # 4-byte Spill + adcl 12(%esp), %ebp # 4-byte Folded Reload + movl 40(%esp), %eax # 4-byte Reload + adcl 28(%esp), %eax # 4-byte Folded Reload + movl %eax, 40(%esp) # 4-byte Spill + adcl 24(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 20(%esp) # 4-byte Spill + adcl 8(%esp), %edi # 4-byte Folded Reload + movl %edi, (%esp) # 4-byte Spill + adcl 36(%esp), %ebx # 4-byte Folded Reload + adcl $0, %edx + movl %edx, 36(%esp) # 4-byte Spill + movl 32(%esp), %ecx # 4-byte Reload + movl %ecx, %edx + imull 72(%esp), %edx # 4-byte Folded Reload + mulxl 100(%esp), %eax, %esi # 4-byte Folded Reload + movl %esi, 28(%esp) # 4-byte Spill + addl %ecx, %eax + mulxl 96(%esp), %eax, %ecx # 4-byte Folded Reload + movl %ecx, 24(%esp) # 4-byte Spill + adcl 16(%esp), %eax # 4-byte Folded Reload + movl %eax, 32(%esp) # 4-byte Spill + mulxl 92(%esp), %esi, %ecx # 4-byte Folded Reload + movl %ecx, 16(%esp) # 4-byte Spill + adcl %ebp, %esi + mulxl 88(%esp), %eax, %ecx # 4-byte Folded Reload + movl %ecx, 12(%esp) # 4-byte Spill + adcl 40(%esp), %eax # 4-byte Folded Reload + movl %eax, %ecx + mulxl 84(%esp), %eax, %ebp # 4-byte Folded Reload + movl %ebp, 8(%esp) # 4-byte Spill + adcl 20(%esp), %eax # 4-byte Folded Reload + mulxl 80(%esp), %ebp, %edi # 4-byte Folded Reload + movl %edi, 4(%esp) # 4-byte Spill + adcl (%esp), %ebp # 4-byte Folded Reload + movl %ebp, 40(%esp) # 4-byte Spill + mulxl 76(%esp), %edi, %edx # 4-byte Folded Reload + adcl %ebx, %edi + movl %edi, %ebx + movl 36(%esp), %edi # 4-byte Reload + adcl $0, %edi + movl 28(%esp), %ebp # 4-byte Reload + addl %ebp, 32(%esp) # 4-byte Folded Spill + adcl 24(%esp), %esi # 4-byte Folded Reload + movl %esi, 28(%esp) # 4-byte Spill + adcl 16(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 24(%esp) # 4-byte Spill + adcl 12(%esp), %eax # 4-byte Folded Reload + movl %eax, 20(%esp) # 4-byte Spill + movl 8(%esp), %eax # 4-byte Reload + adcl %eax, 40(%esp) # 4-byte Folded Spill + adcl 4(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 16(%esp) # 4-byte Spill + adcl %edx, %edi + movl %edi, 36(%esp) # 4-byte Spill + movl 132(%esp), %eax + movl 24(%eax), %edx + mulxl 64(%esp), %edi, %ebx # 4-byte Folded Reload + mulxl 68(%esp), %eax, %ebp # 4-byte Folded Reload + movl %eax, 64(%esp) # 4-byte Spill + addl %edi, %ebp + mulxl 60(%esp), %ecx, %eax # 4-byte Folded Reload + adcl %ebx, %ecx + movl %ecx, 68(%esp) # 4-byte Spill + mulxl 56(%esp), %ebx, %ecx # 4-byte Folded Reload + adcl %eax, %ebx + mulxl 52(%esp), %esi, %edi # 4-byte Folded Reload + adcl %ecx, %esi + mulxl 48(%esp), %eax, %ecx # 4-byte Folded Reload + movl %ecx, 60(%esp) # 4-byte Spill + adcl %edi, %eax + movl %eax, %ecx + mulxl 44(%esp), %edx, %eax # 4-byte Folded Reload + adcl 60(%esp), %edx # 4-byte Folded Reload + adcl $0, %eax + movl 64(%esp), %edi # 4-byte Reload + addl 32(%esp), %edi # 4-byte Folded Reload + movl %edi, 64(%esp) # 4-byte Spill + adcl 28(%esp), %ebp # 4-byte Folded Reload + movl 68(%esp), %edi # 4-byte Reload + adcl 24(%esp), %edi # 4-byte Folded Reload + adcl 20(%esp), %ebx # 4-byte Folded Reload + adcl 40(%esp), %esi # 4-byte Folded Reload + movl %esi, 48(%esp) # 4-byte Spill + adcl 16(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 52(%esp) # 4-byte Spill + adcl 36(%esp), %edx # 4-byte Folded Reload + movl %edx, 60(%esp) # 4-byte Spill + adcl $0, %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 72(%esp), %edx # 4-byte Reload + movl 64(%esp), %eax # 4-byte Reload + imull %eax, %edx + mulxl 100(%esp), %esi, %ecx # 4-byte Folded Reload + movl %ecx, 56(%esp) # 4-byte Spill + addl %eax, %esi + mulxl 96(%esp), %eax, %ecx # 4-byte Folded Reload + movl %ecx, 64(%esp) # 4-byte Spill + adcl %ebp, %eax + movl %eax, 36(%esp) # 4-byte Spill + movl %edx, %ecx + mulxl 92(%esp), %eax, %edx # 4-byte Folded Reload + movl %edx, 44(%esp) # 4-byte Spill + adcl %edi, %eax + movl %eax, 72(%esp) # 4-byte Spill + movl %ecx, %edx + mulxl 88(%esp), %ebp, %eax # 4-byte Folded Reload + movl %eax, 40(%esp) # 4-byte Spill + adcl %ebx, %ebp + movl %ecx, %edx + mulxl 84(%esp), %esi, %eax # 4-byte Folded Reload + movl %eax, 32(%esp) # 4-byte Spill + adcl 48(%esp), %esi # 4-byte Folded Reload + movl %ecx, %edx + mulxl 80(%esp), %edi, %eax # 4-byte Folded Reload + movl %eax, 48(%esp) # 4-byte Spill + adcl 52(%esp), %edi # 4-byte Folded Reload + movl %ecx, %edx + mulxl 76(%esp), %ebx, %eax # 4-byte Folded Reload + movl %eax, 52(%esp) # 4-byte Spill + adcl 60(%esp), %ebx # 4-byte Folded Reload + movl 68(%esp), %ecx # 4-byte Reload + adcl $0, %ecx + movl 36(%esp), %edx # 4-byte Reload + addl 56(%esp), %edx # 4-byte Folded Reload + movl 72(%esp), %eax # 4-byte Reload + adcl 64(%esp), %eax # 4-byte Folded Reload + movl %eax, 72(%esp) # 4-byte Spill + adcl 44(%esp), %ebp # 4-byte Folded Reload + adcl 40(%esp), %esi # 4-byte Folded Reload + movl %esi, 56(%esp) # 4-byte Spill + adcl 32(%esp), %edi # 4-byte Folded Reload + movl %edi, 60(%esp) # 4-byte Spill + adcl 48(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 64(%esp) # 4-byte Spill + adcl 52(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 68(%esp) # 4-byte Spill + movl %edx, %ecx + subl 100(%esp), %ecx # 4-byte Folded Reload + sbbl 96(%esp), %eax # 4-byte Folded Reload + movl %eax, 52(%esp) # 4-byte Spill + movl %ebp, %eax + sbbl 92(%esp), %eax # 4-byte Folded Reload + movl %eax, 92(%esp) # 4-byte Spill + sbbl 88(%esp), %esi # 4-byte Folded Reload + movl %esi, 96(%esp) # 4-byte Spill + sbbl 84(%esp), %edi # 4-byte Folded Reload + movl %edi, 100(%esp) # 4-byte Spill + sbbl 80(%esp), %ebx # 4-byte Folded Reload + movl 68(%esp), %edi # 4-byte Reload + sbbl 76(%esp), %edi # 4-byte Folded Reload + movl %edi, %eax + sarl $31, %eax + testl %eax, %eax + js .LBB99_2 +# BB#1: + movl %ecx, %edx +.LBB99_2: + movl 124(%esp), %esi + movl %edx, (%esi) + movl 72(%esp), %eax # 4-byte Reload + js .LBB99_4 +# BB#3: + movl 52(%esp), %eax # 4-byte Reload +.LBB99_4: + movl %eax, 4(%esi) + movl 68(%esp), %eax # 4-byte Reload + movl 64(%esp), %ecx # 4-byte Reload + movl 60(%esp), %edx # 4-byte Reload + js .LBB99_6 +# BB#5: + movl 92(%esp), %ebp # 4-byte Reload +.LBB99_6: + movl %ebp, 8(%esi) + movl %esi, %ebp + movl 56(%esp), %esi # 4-byte Reload + js .LBB99_8 +# BB#7: + movl 96(%esp), %esi # 4-byte Reload +.LBB99_8: + movl %esi, 12(%ebp) + js .LBB99_10 +# BB#9: + movl 100(%esp), %edx # 4-byte Reload +.LBB99_10: + movl %edx, 16(%ebp) + js .LBB99_12 +# BB#11: + movl %ebx, %ecx +.LBB99_12: + movl %ecx, 20(%ebp) + js .LBB99_14 +# BB#13: + movl %edi, %eax +.LBB99_14: + movl %eax, 24(%ebp) + addl $104, %esp + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end99: + .size mcl_fp_montNF7Lbmi2, .Lfunc_end99-mcl_fp_montNF7Lbmi2 + + .globl mcl_fp_montRed7Lbmi2 + .align 16, 0x90 + .type mcl_fp_montRed7Lbmi2,@function +mcl_fp_montRed7Lbmi2: # @mcl_fp_montRed7Lbmi2 +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $108, %esp + movl 136(%esp), %edi + movl -4(%edi), %ecx + movl %ecx, 76(%esp) # 4-byte Spill + movl (%edi), %esi + movl %esi, 92(%esp) # 4-byte Spill + movl 132(%esp), %eax + movl (%eax), %edx + movl %edx, 72(%esp) # 4-byte Spill + imull %ecx, %edx + movl 24(%edi), %ecx + movl %ecx, 80(%esp) # 4-byte Spill + mulxl %ecx, %ebx, %ecx + movl %ebx, 68(%esp) # 4-byte Spill + movl %ecx, 44(%esp) # 4-byte Spill + movl 20(%edi), %ecx + movl %ecx, 104(%esp) # 4-byte Spill + mulxl %ecx, %ebx, %ecx + movl %ebx, 60(%esp) # 4-byte Spill + movl %ecx, 40(%esp) # 4-byte Spill + movl 16(%edi), %ecx + movl %ecx, 100(%esp) # 4-byte Spill + mulxl %ecx, %ebx, %ecx + movl %ebx, 56(%esp) # 4-byte Spill + movl %ecx, 36(%esp) # 4-byte Spill + movl 4(%edi), %ecx + movl %ecx, 96(%esp) # 4-byte Spill + mulxl %ecx, %ecx, %ebp + mulxl %esi, %ebx, %esi + movl %ebx, 64(%esp) # 4-byte Spill + addl %ecx, %esi + movl %esi, 28(%esp) # 4-byte Spill + movl 8(%edi), %ecx + movl %ecx, 88(%esp) # 4-byte Spill + mulxl %ecx, %esi, %ecx + adcl %ebp, %esi + movl %esi, %ebp + movl 12(%edi), %esi + movl %esi, 84(%esp) # 4-byte Spill + mulxl %esi, %esi, %edx + adcl %ecx, %esi + movl %esi, 52(%esp) # 4-byte Spill + adcl 56(%esp), %edx # 4-byte Folded Reload + movl %edx, %edi + movl 36(%esp), %esi # 4-byte Reload + adcl 60(%esp), %esi # 4-byte Folded Reload + movl 40(%esp), %edx # 4-byte Reload + adcl 68(%esp), %edx # 4-byte Folded Reload + movl 44(%esp), %ecx # 4-byte Reload + adcl $0, %ecx + movl 64(%esp), %ebx # 4-byte Reload + addl 72(%esp), %ebx # 4-byte Folded Reload + movl 28(%esp), %ebx # 4-byte Reload + adcl 4(%eax), %ebx + movl %ebx, 28(%esp) # 4-byte Spill + adcl 8(%eax), %ebp + movl %ebp, 20(%esp) # 4-byte Spill + movl 52(%esp), %ebp # 4-byte Reload + adcl 12(%eax), %ebp + movl %ebp, 52(%esp) # 4-byte Spill + adcl 16(%eax), %edi + movl %edi, 16(%esp) # 4-byte Spill + adcl 20(%eax), %esi + movl %esi, 36(%esp) # 4-byte Spill + adcl 24(%eax), %edx + movl %edx, 40(%esp) # 4-byte Spill + adcl 28(%eax), %ecx + movl %ecx, 44(%esp) # 4-byte Spill + movl 52(%eax), %ecx + movl 48(%eax), %edx + movl 44(%eax), %esi + movl 40(%eax), %edi + movl 36(%eax), %ebp + movl 32(%eax), %eax + adcl $0, %eax + movl %eax, 12(%esp) # 4-byte Spill + adcl $0, %ebp + movl %ebp, 24(%esp) # 4-byte Spill + adcl $0, %edi + movl %edi, 56(%esp) # 4-byte Spill + adcl $0, %esi + movl %esi, 60(%esp) # 4-byte Spill + adcl $0, %edx + movl %edx, 64(%esp) # 4-byte Spill + adcl $0, %ecx + movl %ecx, 72(%esp) # 4-byte Spill + sbbl %eax, %eax + andl $1, %eax + movl %eax, 68(%esp) # 4-byte Spill + movl %ebx, %edx + imull 76(%esp), %edx # 4-byte Folded Reload + mulxl 100(%esp), %eax, %ebx # 4-byte Folded Reload + movl %eax, 4(%esp) # 4-byte Spill + mulxl 88(%esp), %ebp, %eax # 4-byte Folded Reload + movl %eax, (%esp) # 4-byte Spill + mulxl 96(%esp), %ecx, %eax # 4-byte Folded Reload + mulxl 92(%esp), %edi, %esi # 4-byte Folded Reload + movl %edi, 8(%esp) # 4-byte Spill + addl %ecx, %esi + movl %esi, 32(%esp) # 4-byte Spill + adcl %ebp, %eax + movl %eax, 48(%esp) # 4-byte Spill + mulxl 84(%esp), %esi, %ebp # 4-byte Folded Reload + adcl (%esp), %esi # 4-byte Folded Reload + adcl 4(%esp), %ebp # 4-byte Folded Reload + mulxl 104(%esp), %ecx, %edi # 4-byte Folded Reload + adcl %ebx, %ecx + mulxl 80(%esp), %ebx, %edx # 4-byte Folded Reload + adcl %edi, %ebx + adcl $0, %edx + movl 8(%esp), %eax # 4-byte Reload + addl 28(%esp), %eax # 4-byte Folded Reload + movl 32(%esp), %edi # 4-byte Reload + adcl 20(%esp), %edi # 4-byte Folded Reload + movl %edi, 32(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 52(%esp), %eax # 4-byte Folded Reload + movl %eax, 48(%esp) # 4-byte Spill + adcl 16(%esp), %esi # 4-byte Folded Reload + movl %esi, 28(%esp) # 4-byte Spill + adcl 36(%esp), %ebp # 4-byte Folded Reload + movl %ebp, 36(%esp) # 4-byte Spill + adcl 40(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 40(%esp) # 4-byte Spill + adcl 44(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 20(%esp) # 4-byte Spill + adcl 12(%esp), %edx # 4-byte Folded Reload + movl %edx, 44(%esp) # 4-byte Spill + adcl $0, 24(%esp) # 4-byte Folded Spill + adcl $0, 56(%esp) # 4-byte Folded Spill + adcl $0, 60(%esp) # 4-byte Folded Spill + adcl $0, 64(%esp) # 4-byte Folded Spill + adcl $0, 72(%esp) # 4-byte Folded Spill + adcl $0, 68(%esp) # 4-byte Folded Spill + movl %edi, %edx + imull 76(%esp), %edx # 4-byte Folded Reload + mulxl 80(%esp), %ecx, %eax # 4-byte Folded Reload + movl %ecx, 16(%esp) # 4-byte Spill + movl %eax, 52(%esp) # 4-byte Spill + mulxl 84(%esp), %ebx, %ebp # 4-byte Folded Reload + mulxl 96(%esp), %eax, %esi # 4-byte Folded Reload + mulxl 92(%esp), %ecx, %edi # 4-byte Folded Reload + movl %ecx, 12(%esp) # 4-byte Spill + addl %eax, %edi + movl %edi, 8(%esp) # 4-byte Spill + mulxl 88(%esp), %edi, %eax # 4-byte Folded Reload + adcl %esi, %edi + adcl %ebx, %eax + movl %eax, %ebx + mulxl 100(%esp), %esi, %eax # 4-byte Folded Reload + adcl %ebp, %esi + mulxl 104(%esp), %edx, %ecx # 4-byte Folded Reload + adcl %eax, %edx + adcl 16(%esp), %ecx # 4-byte Folded Reload + movl 52(%esp), %eax # 4-byte Reload + adcl $0, %eax + movl 12(%esp), %ebp # 4-byte Reload + addl 32(%esp), %ebp # 4-byte Folded Reload + movl 8(%esp), %ebp # 4-byte Reload + adcl 48(%esp), %ebp # 4-byte Folded Reload + adcl 28(%esp), %edi # 4-byte Folded Reload + movl %edi, 32(%esp) # 4-byte Spill + adcl 36(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 36(%esp) # 4-byte Spill + adcl 40(%esp), %esi # 4-byte Folded Reload + movl %esi, 40(%esp) # 4-byte Spill + adcl 20(%esp), %edx # 4-byte Folded Reload + movl %edx, 28(%esp) # 4-byte Spill + adcl 44(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 44(%esp) # 4-byte Spill + adcl 24(%esp), %eax # 4-byte Folded Reload + movl %eax, 52(%esp) # 4-byte Spill + adcl $0, 56(%esp) # 4-byte Folded Spill + adcl $0, 60(%esp) # 4-byte Folded Spill + adcl $0, 64(%esp) # 4-byte Folded Spill + adcl $0, 72(%esp) # 4-byte Folded Spill + adcl $0, 68(%esp) # 4-byte Folded Spill + movl %ebp, %edx + imull 76(%esp), %edx # 4-byte Folded Reload + mulxl 80(%esp), %ecx, %eax # 4-byte Folded Reload + movl %ecx, 24(%esp) # 4-byte Spill + movl %eax, 48(%esp) # 4-byte Spill + mulxl 84(%esp), %edi, %eax # 4-byte Folded Reload + movl %eax, 12(%esp) # 4-byte Spill + mulxl 96(%esp), %eax, %ebx # 4-byte Folded Reload + mulxl 92(%esp), %esi, %ecx # 4-byte Folded Reload + movl %esi, 20(%esp) # 4-byte Spill + addl %eax, %ecx + movl %ecx, 16(%esp) # 4-byte Spill + mulxl 88(%esp), %esi, %eax # 4-byte Folded Reload + adcl %ebx, %esi + movl %esi, %ebx + adcl %edi, %eax + movl %eax, %edi + mulxl 100(%esp), %esi, %ecx # 4-byte Folded Reload + adcl 12(%esp), %esi # 4-byte Folded Reload + mulxl 104(%esp), %edx, %eax # 4-byte Folded Reload + adcl %ecx, %edx + movl %edx, %ecx + adcl 24(%esp), %eax # 4-byte Folded Reload + movl %eax, %edx + movl 48(%esp), %eax # 4-byte Reload + adcl $0, %eax + addl %ebp, 20(%esp) # 4-byte Folded Spill + movl 16(%esp), %ebp # 4-byte Reload + adcl 32(%esp), %ebp # 4-byte Folded Reload + adcl 36(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 36(%esp) # 4-byte Spill + adcl 40(%esp), %edi # 4-byte Folded Reload + movl %edi, 40(%esp) # 4-byte Spill + adcl 28(%esp), %esi # 4-byte Folded Reload + movl %esi, 32(%esp) # 4-byte Spill + adcl 44(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 28(%esp) # 4-byte Spill + adcl 52(%esp), %edx # 4-byte Folded Reload + movl %edx, 52(%esp) # 4-byte Spill + adcl 56(%esp), %eax # 4-byte Folded Reload + movl %eax, 48(%esp) # 4-byte Spill + adcl $0, 60(%esp) # 4-byte Folded Spill + adcl $0, 64(%esp) # 4-byte Folded Spill + adcl $0, 72(%esp) # 4-byte Folded Spill + adcl $0, 68(%esp) # 4-byte Folded Spill + movl %ebp, %edx + movl %ebp, %edi + imull 76(%esp), %edx # 4-byte Folded Reload + mulxl 80(%esp), %ecx, %eax # 4-byte Folded Reload + movl %ecx, 24(%esp) # 4-byte Spill + movl %eax, 56(%esp) # 4-byte Spill + mulxl 84(%esp), %eax, %ebx # 4-byte Folded Reload + movl %eax, 16(%esp) # 4-byte Spill + mulxl 96(%esp), %eax, %ecx # 4-byte Folded Reload + mulxl 92(%esp), %esi, %ebp # 4-byte Folded Reload + movl %esi, 20(%esp) # 4-byte Spill + addl %eax, %ebp + mulxl 88(%esp), %esi, %eax # 4-byte Folded Reload + adcl %ecx, %esi + movl %esi, 44(%esp) # 4-byte Spill + adcl 16(%esp), %eax # 4-byte Folded Reload + movl %eax, %ecx + mulxl 100(%esp), %esi, %eax # 4-byte Folded Reload + adcl %ebx, %esi + mulxl 104(%esp), %ebx, %edx # 4-byte Folded Reload + adcl %eax, %ebx + adcl 24(%esp), %edx # 4-byte Folded Reload + movl 56(%esp), %eax # 4-byte Reload + adcl $0, %eax + addl %edi, 20(%esp) # 4-byte Folded Spill + adcl 36(%esp), %ebp # 4-byte Folded Reload + movl 40(%esp), %edi # 4-byte Reload + adcl %edi, 44(%esp) # 4-byte Folded Spill + adcl 32(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 40(%esp) # 4-byte Spill + adcl 28(%esp), %esi # 4-byte Folded Reload + movl %esi, 36(%esp) # 4-byte Spill + adcl 52(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 28(%esp) # 4-byte Spill + adcl 48(%esp), %edx # 4-byte Folded Reload + movl %edx, 32(%esp) # 4-byte Spill + adcl 60(%esp), %eax # 4-byte Folded Reload + movl %eax, 56(%esp) # 4-byte Spill + adcl $0, 64(%esp) # 4-byte Folded Spill + adcl $0, 72(%esp) # 4-byte Folded Spill + adcl $0, 68(%esp) # 4-byte Folded Spill + movl %ebp, %edx + imull 76(%esp), %edx # 4-byte Folded Reload + mulxl 80(%esp), %ecx, %eax # 4-byte Folded Reload + movl %ecx, 24(%esp) # 4-byte Spill + movl %eax, 60(%esp) # 4-byte Spill + mulxl 84(%esp), %ebx, %eax # 4-byte Folded Reload + movl %eax, 48(%esp) # 4-byte Spill + mulxl 96(%esp), %ecx, %edi # 4-byte Folded Reload + mulxl 92(%esp), %esi, %eax # 4-byte Folded Reload + movl %esi, 20(%esp) # 4-byte Spill + addl %ecx, %eax + movl %eax, 52(%esp) # 4-byte Spill + mulxl 88(%esp), %ecx, %eax # 4-byte Folded Reload + adcl %edi, %ecx + movl %ecx, %edi + adcl %ebx, %eax + movl %eax, %ebx + mulxl 100(%esp), %esi, %ecx # 4-byte Folded Reload + adcl 48(%esp), %esi # 4-byte Folded Reload + mulxl 104(%esp), %edx, %eax # 4-byte Folded Reload + adcl %ecx, %edx + movl %edx, 48(%esp) # 4-byte Spill + adcl 24(%esp), %eax # 4-byte Folded Reload + movl %eax, %edx + movl 60(%esp), %ecx # 4-byte Reload + adcl $0, %ecx + addl %ebp, 20(%esp) # 4-byte Folded Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 44(%esp), %eax # 4-byte Folded Reload + movl %eax, 52(%esp) # 4-byte Spill + adcl 40(%esp), %edi # 4-byte Folded Reload + movl %edi, 40(%esp) # 4-byte Spill + adcl 36(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 44(%esp) # 4-byte Spill + adcl 28(%esp), %esi # 4-byte Folded Reload + movl %esi, 36(%esp) # 4-byte Spill + movl 32(%esp), %esi # 4-byte Reload + adcl %esi, 48(%esp) # 4-byte Folded Spill + adcl 56(%esp), %edx # 4-byte Folded Reload + movl %edx, 32(%esp) # 4-byte Spill + adcl 64(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 60(%esp) # 4-byte Spill + adcl $0, 72(%esp) # 4-byte Folded Spill + adcl $0, 68(%esp) # 4-byte Folded Spill + movl 76(%esp), %edx # 4-byte Reload + imull %eax, %edx + mulxl 92(%esp), %eax, %ecx # 4-byte Folded Reload + movl %eax, 64(%esp) # 4-byte Spill + mulxl 96(%esp), %eax, %esi # 4-byte Folded Reload + addl %ecx, %eax + movl %eax, 56(%esp) # 4-byte Spill + mulxl 88(%esp), %eax, %edi # 4-byte Folded Reload + adcl %esi, %eax + movl %eax, 76(%esp) # 4-byte Spill + movl %edx, %esi + mulxl 84(%esp), %ebp, %eax # 4-byte Folded Reload + adcl %edi, %ebp + mulxl 100(%esp), %ecx, %edi # 4-byte Folded Reload + adcl %eax, %ecx + mulxl 104(%esp), %ebx, %eax # 4-byte Folded Reload + movl %eax, 28(%esp) # 4-byte Spill + adcl %edi, %ebx + mulxl 80(%esp), %edi, %eax # 4-byte Folded Reload + adcl 28(%esp), %edi # 4-byte Folded Reload + adcl $0, %eax + movl 64(%esp), %edx # 4-byte Reload + addl 52(%esp), %edx # 4-byte Folded Reload + movl 56(%esp), %edx # 4-byte Reload + adcl 40(%esp), %edx # 4-byte Folded Reload + movl 76(%esp), %esi # 4-byte Reload + adcl 44(%esp), %esi # 4-byte Folded Reload + movl %esi, 76(%esp) # 4-byte Spill + adcl 36(%esp), %ebp # 4-byte Folded Reload + movl %ebp, 64(%esp) # 4-byte Spill + adcl 48(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 56(%esp) # 4-byte Spill + adcl 32(%esp), %ebx # 4-byte Folded Reload + adcl 60(%esp), %edi # 4-byte Folded Reload + adcl 72(%esp), %eax # 4-byte Folded Reload + movl 68(%esp), %esi # 4-byte Reload + adcl $0, %esi + movl %edx, %ebp + subl 92(%esp), %ebp # 4-byte Folded Reload + movl %ebp, 68(%esp) # 4-byte Spill + movl 76(%esp), %ebp # 4-byte Reload + sbbl 96(%esp), %ebp # 4-byte Folded Reload + movl %ebp, 72(%esp) # 4-byte Spill + movl 64(%esp), %ebp # 4-byte Reload + sbbl 88(%esp), %ebp # 4-byte Folded Reload + movl %ebp, 88(%esp) # 4-byte Spill + movl %edx, %ebp + sbbl 84(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 92(%esp) # 4-byte Spill + movl %ebx, %ecx + sbbl 100(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 96(%esp) # 4-byte Spill + movl %edi, %ecx + sbbl 104(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 104(%esp) # 4-byte Spill + movl %eax, %edx + movl %eax, %ecx + sbbl 80(%esp), %edx # 4-byte Folded Reload + movl %edx, 100(%esp) # 4-byte Spill + sbbl $0, %esi + andl $1, %esi + jne .LBB100_2 +# BB#1: + movl 68(%esp), %ebp # 4-byte Reload +.LBB100_2: + movl 128(%esp), %edx + movl %ebp, (%edx) + movl %esi, %eax + testb %al, %al + movl 76(%esp), %ebp # 4-byte Reload + jne .LBB100_4 +# BB#3: + movl 72(%esp), %ebp # 4-byte Reload +.LBB100_4: + movl %ebp, 4(%edx) + movl %ecx, %eax + movl 64(%esp), %ecx # 4-byte Reload + jne .LBB100_6 +# BB#5: + movl 88(%esp), %ecx # 4-byte Reload +.LBB100_6: + movl %ecx, 8(%edx) + movl 56(%esp), %ecx # 4-byte Reload + jne .LBB100_8 +# BB#7: + movl 92(%esp), %ecx # 4-byte Reload +.LBB100_8: + movl %ecx, 12(%edx) + jne .LBB100_10 +# BB#9: + movl 96(%esp), %ebx # 4-byte Reload +.LBB100_10: + movl %ebx, 16(%edx) + jne .LBB100_12 +# BB#11: + movl 104(%esp), %edi # 4-byte Reload +.LBB100_12: + movl %edi, 20(%edx) + jne .LBB100_14 +# BB#13: + movl 100(%esp), %eax # 4-byte Reload +.LBB100_14: + movl %eax, 24(%edx) + addl $108, %esp + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end100: + .size mcl_fp_montRed7Lbmi2, .Lfunc_end100-mcl_fp_montRed7Lbmi2 + + .globl mcl_fp_addPre7Lbmi2 + .align 16, 0x90 + .type mcl_fp_addPre7Lbmi2,@function +mcl_fp_addPre7Lbmi2: # @mcl_fp_addPre7Lbmi2 +# BB#0: + pushl %ebx + pushl %edi + pushl %esi + movl 24(%esp), %eax + movl (%eax), %ecx + movl 4(%eax), %edx + movl 20(%esp), %esi + addl (%esi), %ecx + adcl 4(%esi), %edx + movl 8(%eax), %edi + adcl 8(%esi), %edi + movl 16(%esp), %ebx + movl %ecx, (%ebx) + movl 12(%esi), %ecx + movl %edx, 4(%ebx) + movl 16(%esi), %edx + adcl 12(%eax), %ecx + adcl 16(%eax), %edx + movl %edi, 8(%ebx) + movl 20(%eax), %edi + movl %ecx, 12(%ebx) + movl 20(%esi), %ecx + adcl %edi, %ecx + movl %edx, 16(%ebx) + movl %ecx, 20(%ebx) + movl 24(%eax), %eax + movl 24(%esi), %ecx + adcl %eax, %ecx + movl %ecx, 24(%ebx) + sbbl %eax, %eax + andl $1, %eax + popl %esi + popl %edi + popl %ebx + retl +.Lfunc_end101: + .size mcl_fp_addPre7Lbmi2, .Lfunc_end101-mcl_fp_addPre7Lbmi2 + + .globl mcl_fp_subPre7Lbmi2 + .align 16, 0x90 + .type mcl_fp_subPre7Lbmi2,@function +mcl_fp_subPre7Lbmi2: # @mcl_fp_subPre7Lbmi2 +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + movl 24(%esp), %ecx + movl (%ecx), %edx + movl 4(%ecx), %esi + xorl %eax, %eax + movl 28(%esp), %edi + subl (%edi), %edx + sbbl 4(%edi), %esi + movl 8(%ecx), %ebx + sbbl 8(%edi), %ebx + movl 20(%esp), %ebp + movl %edx, (%ebp) + movl 12(%ecx), %edx + sbbl 12(%edi), %edx + movl %esi, 4(%ebp) + movl 16(%ecx), %esi + sbbl 16(%edi), %esi + movl %ebx, 8(%ebp) + movl 20(%edi), %ebx + movl %edx, 12(%ebp) + movl 20(%ecx), %edx + sbbl %ebx, %edx + movl %esi, 16(%ebp) + movl %edx, 20(%ebp) + movl 24(%edi), %edx + movl 24(%ecx), %ecx + sbbl %edx, %ecx + movl %ecx, 24(%ebp) + sbbl $0, %eax + andl $1, %eax + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end102: + .size mcl_fp_subPre7Lbmi2, .Lfunc_end102-mcl_fp_subPre7Lbmi2 + + .globl mcl_fp_shr1_7Lbmi2 + .align 16, 0x90 + .type mcl_fp_shr1_7Lbmi2,@function +mcl_fp_shr1_7Lbmi2: # @mcl_fp_shr1_7Lbmi2 +# BB#0: + pushl %esi + movl 12(%esp), %eax + movl (%eax), %ecx + movl 4(%eax), %edx + shrdl $1, %edx, %ecx + movl 8(%esp), %esi + movl %ecx, (%esi) + movl 8(%eax), %ecx + shrdl $1, %ecx, %edx + movl %edx, 4(%esi) + movl 12(%eax), %edx + shrdl $1, %edx, %ecx + movl %ecx, 8(%esi) + movl 16(%eax), %ecx + shrdl $1, %ecx, %edx + movl %edx, 12(%esi) + movl 20(%eax), %edx + shrdl $1, %edx, %ecx + movl %ecx, 16(%esi) + movl 24(%eax), %eax + shrdl $1, %eax, %edx + movl %edx, 20(%esi) + shrl %eax + movl %eax, 24(%esi) + popl %esi + retl +.Lfunc_end103: + .size mcl_fp_shr1_7Lbmi2, .Lfunc_end103-mcl_fp_shr1_7Lbmi2 + + .globl mcl_fp_add7Lbmi2 + .align 16, 0x90 + .type mcl_fp_add7Lbmi2,@function +mcl_fp_add7Lbmi2: # @mcl_fp_add7Lbmi2 +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $20, %esp + movl 48(%esp), %ebp + movl (%ebp), %eax + movl 4(%ebp), %edi + movl 44(%esp), %ecx + addl (%ecx), %eax + adcl 4(%ecx), %edi + movl 8(%ebp), %esi + adcl 8(%ecx), %esi + movl 12(%ecx), %edx + movl 16(%ecx), %ebx + adcl 12(%ebp), %edx + movl %edx, 16(%esp) # 4-byte Spill + adcl 16(%ebp), %ebx + movl %ebx, 12(%esp) # 4-byte Spill + movl %ebp, %ebx + movl 20(%ecx), %ebp + adcl 20(%ebx), %ebp + movl 24(%ecx), %edx + adcl 24(%ebx), %edx + movl 40(%esp), %ecx + movl %eax, (%ecx) + movl %edi, 4(%ecx) + movl %esi, 8(%ecx) + movl 16(%esp), %ebx # 4-byte Reload + movl %ebx, 12(%ecx) + movl 12(%esp), %ebx # 4-byte Reload + movl %ebx, 16(%ecx) + movl %ebp, 20(%ecx) + movl %edx, 24(%ecx) + sbbl %ebx, %ebx + andl $1, %ebx + movl 52(%esp), %ecx + subl (%ecx), %eax + movl %eax, 8(%esp) # 4-byte Spill + movl 12(%esp), %ecx # 4-byte Reload + movl 52(%esp), %eax + sbbl 4(%eax), %edi + movl %edi, 4(%esp) # 4-byte Spill + movl %eax, %edi + sbbl 8(%edi), %esi + movl %esi, (%esp) # 4-byte Spill + movl 16(%esp), %eax # 4-byte Reload + sbbl 12(%edi), %eax + movl %eax, 16(%esp) # 4-byte Spill + sbbl 16(%edi), %ecx + movl %ecx, %esi + sbbl 20(%edi), %ebp + sbbl 24(%edi), %edx + sbbl $0, %ebx + testb $1, %bl + jne .LBB104_2 +# BB#1: # %nocarry + movl 8(%esp), %ecx # 4-byte Reload + movl 40(%esp), %eax + movl %eax, %ebx + movl %ecx, (%ebx) + movl 4(%esp), %ecx # 4-byte Reload + movl %ecx, 4(%ebx) + movl (%esp), %eax # 4-byte Reload + movl %eax, 8(%ebx) + movl 16(%esp), %eax # 4-byte Reload + movl %eax, 12(%ebx) + movl %esi, 16(%ebx) + movl %ebp, 20(%ebx) + movl %edx, 24(%ebx) +.LBB104_2: # %carry + addl $20, %esp + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end104: + .size mcl_fp_add7Lbmi2, .Lfunc_end104-mcl_fp_add7Lbmi2 + + .globl mcl_fp_addNF7Lbmi2 + .align 16, 0x90 + .type mcl_fp_addNF7Lbmi2,@function +mcl_fp_addNF7Lbmi2: # @mcl_fp_addNF7Lbmi2 +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $52, %esp + movl 80(%esp), %eax + movl (%eax), %ecx + movl 4(%eax), %edx + movl 76(%esp), %esi + addl (%esi), %ecx + movl %ecx, 44(%esp) # 4-byte Spill + adcl 4(%esi), %edx + movl %edx, 28(%esp) # 4-byte Spill + movl 24(%eax), %ecx + movl %ecx, 48(%esp) # 4-byte Spill + movl 20(%eax), %ebx + movl 16(%eax), %edi + movl 12(%eax), %ebp + movl 8(%eax), %ecx + adcl 8(%esi), %ecx + movl %ecx, 24(%esp) # 4-byte Spill + adcl 12(%esi), %ebp + movl %ebp, 32(%esp) # 4-byte Spill + adcl 16(%esi), %edi + movl %edi, 36(%esp) # 4-byte Spill + adcl 20(%esi), %ebx + movl %ebx, 40(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 24(%esi), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 84(%esp), %eax + movl 44(%esp), %esi # 4-byte Reload + subl (%eax), %esi + movl %esi, (%esp) # 4-byte Spill + sbbl 4(%eax), %edx + movl %edx, 4(%esp) # 4-byte Spill + movl 44(%esp), %esi # 4-byte Reload + sbbl 8(%eax), %ecx + movl %ecx, 8(%esp) # 4-byte Spill + sbbl 12(%eax), %ebp + movl %ebp, 16(%esp) # 4-byte Spill + sbbl 16(%eax), %edi + movl %edi, 20(%esp) # 4-byte Spill + sbbl 20(%eax), %ebx + movl %ebx, 12(%esp) # 4-byte Spill + movl 48(%esp), %edi # 4-byte Reload + sbbl 24(%eax), %edi + movl %edi, %ecx + sarl $31, %ecx + testl %ecx, %ecx + js .LBB105_2 +# BB#1: + movl (%esp), %esi # 4-byte Reload +.LBB105_2: + movl 72(%esp), %ecx + movl %esi, (%ecx) + movl 28(%esp), %eax # 4-byte Reload + js .LBB105_4 +# BB#3: + movl 4(%esp), %eax # 4-byte Reload +.LBB105_4: + movl %eax, 4(%ecx) + movl 48(%esp), %ebp # 4-byte Reload + movl 40(%esp), %ecx # 4-byte Reload + movl 36(%esp), %edx # 4-byte Reload + movl 32(%esp), %esi # 4-byte Reload + movl 24(%esp), %ebx # 4-byte Reload + js .LBB105_6 +# BB#5: + movl 8(%esp), %ebx # 4-byte Reload +.LBB105_6: + movl 72(%esp), %eax + movl %ebx, 8(%eax) + movl %eax, %ebx + js .LBB105_8 +# BB#7: + movl 16(%esp), %esi # 4-byte Reload +.LBB105_8: + movl %esi, 12(%ebx) + js .LBB105_10 +# BB#9: + movl 20(%esp), %edx # 4-byte Reload +.LBB105_10: + movl %edx, 16(%ebx) + js .LBB105_12 +# BB#11: + movl 12(%esp), %ecx # 4-byte Reload +.LBB105_12: + movl %ecx, 20(%ebx) + js .LBB105_14 +# BB#13: + movl %edi, %ebp +.LBB105_14: + movl %ebp, 24(%ebx) + addl $52, %esp + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end105: + .size mcl_fp_addNF7Lbmi2, .Lfunc_end105-mcl_fp_addNF7Lbmi2 + + .globl mcl_fp_sub7Lbmi2 + .align 16, 0x90 + .type mcl_fp_sub7Lbmi2,@function +mcl_fp_sub7Lbmi2: # @mcl_fp_sub7Lbmi2 +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $24, %esp + movl 48(%esp), %edi + movl (%edi), %eax + movl 4(%edi), %ecx + xorl %ebx, %ebx + movl 52(%esp), %esi + subl (%esi), %eax + movl %eax, 16(%esp) # 4-byte Spill + sbbl 4(%esi), %ecx + movl %ecx, 20(%esp) # 4-byte Spill + movl 8(%edi), %edx + sbbl 8(%esi), %edx + movl %edx, 4(%esp) # 4-byte Spill + movl 12(%edi), %ecx + sbbl 12(%esi), %ecx + movl %ecx, 8(%esp) # 4-byte Spill + movl 16(%edi), %eax + sbbl 16(%esi), %eax + movl %eax, (%esp) # 4-byte Spill + movl 20(%edi), %ebp + sbbl 20(%esi), %ebp + movl %ebp, 12(%esp) # 4-byte Spill + movl 24(%edi), %edi + sbbl 24(%esi), %edi + sbbl $0, %ebx + testb $1, %bl + movl 44(%esp), %ebx + movl 16(%esp), %esi # 4-byte Reload + movl %esi, (%ebx) + movl 20(%esp), %esi # 4-byte Reload + movl %esi, 4(%ebx) + movl %edx, 8(%ebx) + movl %ecx, 12(%ebx) + movl %eax, 16(%ebx) + movl %ebp, 20(%ebx) + movl %edi, 24(%ebx) + je .LBB106_2 +# BB#1: # %carry + movl 56(%esp), %ebp + movl 16(%esp), %ecx # 4-byte Reload + addl (%ebp), %ecx + movl %ecx, (%ebx) + movl 20(%esp), %edx # 4-byte Reload + adcl 4(%ebp), %edx + movl %edx, 4(%ebx) + movl 4(%esp), %ecx # 4-byte Reload + adcl 8(%ebp), %ecx + movl 12(%ebp), %eax + adcl 8(%esp), %eax # 4-byte Folded Reload + movl %ecx, 8(%ebx) + movl 16(%ebp), %ecx + adcl (%esp), %ecx # 4-byte Folded Reload + movl %eax, 12(%ebx) + movl %ecx, 16(%ebx) + movl 20(%ebp), %eax + adcl 12(%esp), %eax # 4-byte Folded Reload + movl %eax, 20(%ebx) + movl 24(%ebp), %eax + adcl %edi, %eax + movl %eax, 24(%ebx) +.LBB106_2: # %nocarry + addl $24, %esp + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end106: + .size mcl_fp_sub7Lbmi2, .Lfunc_end106-mcl_fp_sub7Lbmi2 + + .globl mcl_fp_subNF7Lbmi2 + .align 16, 0x90 + .type mcl_fp_subNF7Lbmi2,@function +mcl_fp_subNF7Lbmi2: # @mcl_fp_subNF7Lbmi2 +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $32, %esp + movl 56(%esp), %eax + movl (%eax), %esi + movl 4(%eax), %edx + movl 60(%esp), %ecx + subl (%ecx), %esi + movl %esi, 20(%esp) # 4-byte Spill + sbbl 4(%ecx), %edx + movl %edx, 24(%esp) # 4-byte Spill + movl 24(%eax), %edx + movl 20(%eax), %esi + movl 16(%eax), %edi + movl 12(%eax), %ebx + movl 8(%eax), %eax + sbbl 8(%ecx), %eax + movl %eax, 4(%esp) # 4-byte Spill + sbbl 12(%ecx), %ebx + movl %ebx, 12(%esp) # 4-byte Spill + sbbl 16(%ecx), %edi + movl %edi, 16(%esp) # 4-byte Spill + sbbl 20(%ecx), %esi + movl %esi, 28(%esp) # 4-byte Spill + sbbl 24(%ecx), %edx + movl %edx, 8(%esp) # 4-byte Spill + movl %edx, %ecx + sarl $31, %ecx + movl %ecx, %eax + shldl $1, %edx, %eax + movl 64(%esp), %edx + andl (%edx), %eax + movl 24(%edx), %esi + andl %ecx, %esi + movl %esi, (%esp) # 4-byte Spill + movl 20(%edx), %ebx + andl %ecx, %ebx + movl 16(%edx), %edi + andl %ecx, %edi + movl 12(%edx), %esi + andl %ecx, %esi + movl 64(%esp), %edx + movl 8(%edx), %edx + andl %ecx, %edx + movl 64(%esp), %ebp + andl 4(%ebp), %ecx + addl 20(%esp), %eax # 4-byte Folded Reload + adcl 24(%esp), %ecx # 4-byte Folded Reload + movl 52(%esp), %ebp + movl %eax, (%ebp) + adcl 4(%esp), %edx # 4-byte Folded Reload + movl %ebp, %eax + movl %ecx, 4(%eax) + adcl 12(%esp), %esi # 4-byte Folded Reload + movl %edx, 8(%eax) + adcl 16(%esp), %edi # 4-byte Folded Reload + movl %esi, 12(%eax) + adcl 28(%esp), %ebx # 4-byte Folded Reload + movl %edi, 16(%eax) + movl %ebx, 20(%eax) + movl (%esp), %ecx # 4-byte Reload + adcl 8(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 24(%eax) + addl $32, %esp + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end107: + .size mcl_fp_subNF7Lbmi2, .Lfunc_end107-mcl_fp_subNF7Lbmi2 + + .globl mcl_fpDbl_add7Lbmi2 + .align 16, 0x90 + .type mcl_fpDbl_add7Lbmi2,@function +mcl_fpDbl_add7Lbmi2: # @mcl_fpDbl_add7Lbmi2 +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $44, %esp + movl 72(%esp), %esi + movl 68(%esp), %edx + movl 12(%edx), %edi + movl 16(%edx), %ecx + movl 8(%esi), %eax + movl (%esi), %ebx + addl (%edx), %ebx + movl 64(%esp), %ebp + movl %ebx, (%ebp) + movl 4(%esi), %ebx + adcl 4(%edx), %ebx + adcl 8(%edx), %eax + adcl 12(%esi), %edi + adcl 16(%esi), %ecx + movl %ebx, 4(%ebp) + movl %esi, %ebx + movl 36(%ebx), %esi + movl %esi, 36(%esp) # 4-byte Spill + movl %eax, 8(%ebp) + movl 20(%ebx), %eax + movl %edi, 12(%ebp) + movl 20(%edx), %edi + adcl %eax, %edi + movl 24(%ebx), %eax + movl %ecx, 16(%ebp) + movl 24(%edx), %ecx + adcl %eax, %ecx + movl 28(%ebx), %eax + movl %edi, 20(%ebp) + movl 28(%edx), %edi + adcl %eax, %edi + movl %edi, 20(%esp) # 4-byte Spill + movl 32(%ebx), %eax + movl %ecx, 24(%ebp) + movl 32(%edx), %ecx + adcl %eax, %ecx + movl %ecx, 40(%esp) # 4-byte Spill + movl 36(%edx), %esi + adcl 36(%esp), %esi # 4-byte Folded Reload + movl 40(%ebx), %ecx + movl 40(%edx), %eax + adcl %ecx, %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 44(%ebx), %ebp + movl 44(%edx), %ecx + adcl %ebp, %ecx + movl %ecx, 24(%esp) # 4-byte Spill + movl 48(%ebx), %ebp + movl %ebx, %eax + movl 48(%edx), %ebx + adcl %ebp, %ebx + movl %ebx, 28(%esp) # 4-byte Spill + movl 52(%eax), %eax + movl 52(%edx), %ebp + adcl %eax, %ebp + movl %ebp, 32(%esp) # 4-byte Spill + sbbl %edx, %edx + andl $1, %edx + movl 76(%esp), %eax + subl (%eax), %edi + movl %edi, 8(%esp) # 4-byte Spill + movl 40(%esp), %edi # 4-byte Reload + sbbl 4(%eax), %edi + movl %edi, 4(%esp) # 4-byte Spill + movl %esi, %eax + movl 76(%esp), %edi + sbbl 8(%edi), %eax + movl %eax, (%esp) # 4-byte Spill + movl 36(%esp), %eax # 4-byte Reload + sbbl 12(%edi), %eax + movl %eax, 12(%esp) # 4-byte Spill + sbbl 16(%edi), %ecx + movl %ecx, 16(%esp) # 4-byte Spill + sbbl 20(%edi), %ebx + sbbl 24(%edi), %ebp + sbbl $0, %edx + andl $1, %edx + jne .LBB108_2 +# BB#1: + movl %ebp, 32(%esp) # 4-byte Spill +.LBB108_2: + testb %dl, %dl + movl 20(%esp), %ecx # 4-byte Reload + jne .LBB108_4 +# BB#3: + movl (%esp), %esi # 4-byte Reload + movl 4(%esp), %eax # 4-byte Reload + movl %eax, 40(%esp) # 4-byte Spill + movl 8(%esp), %ecx # 4-byte Reload +.LBB108_4: + movl 64(%esp), %eax + movl %ecx, 28(%eax) + movl 40(%esp), %ecx # 4-byte Reload + movl %ecx, 32(%eax) + movl %esi, 36(%eax) + movl 24(%esp), %edx # 4-byte Reload + movl 36(%esp), %ecx # 4-byte Reload + jne .LBB108_6 +# BB#5: + movl 12(%esp), %ecx # 4-byte Reload +.LBB108_6: + movl %ecx, 40(%eax) + movl 28(%esp), %ecx # 4-byte Reload + jne .LBB108_8 +# BB#7: + movl 16(%esp), %edx # 4-byte Reload +.LBB108_8: + movl %edx, 44(%eax) + jne .LBB108_10 +# BB#9: + movl %ebx, %ecx +.LBB108_10: + movl %ecx, 48(%eax) + movl 32(%esp), %ecx # 4-byte Reload + movl %ecx, 52(%eax) + addl $44, %esp + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end108: + .size mcl_fpDbl_add7Lbmi2, .Lfunc_end108-mcl_fpDbl_add7Lbmi2 + + .globl mcl_fpDbl_sub7Lbmi2 + .align 16, 0x90 + .type mcl_fpDbl_sub7Lbmi2,@function +mcl_fpDbl_sub7Lbmi2: # @mcl_fpDbl_sub7Lbmi2 +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $32, %esp + movl 56(%esp), %esi + movl (%esi), %eax + movl 4(%esi), %edx + movl 60(%esp), %edi + subl (%edi), %eax + sbbl 4(%edi), %edx + movl 8(%esi), %ebx + sbbl 8(%edi), %ebx + movl 52(%esp), %ecx + movl %eax, (%ecx) + movl 12(%esi), %eax + sbbl 12(%edi), %eax + movl %edx, 4(%ecx) + movl 16(%esi), %edx + sbbl 16(%edi), %edx + movl %ebx, 8(%ecx) + movl 20(%edi), %ebx + movl %eax, 12(%ecx) + movl 20(%esi), %eax + sbbl %ebx, %eax + movl 24(%edi), %ebx + movl %edx, 16(%ecx) + movl 24(%esi), %edx + sbbl %ebx, %edx + movl 28(%edi), %ebx + movl %eax, 20(%ecx) + movl 28(%esi), %eax + sbbl %ebx, %eax + movl %eax, 12(%esp) # 4-byte Spill + movl 32(%edi), %eax + movl %edx, 24(%ecx) + movl 32(%esi), %edx + sbbl %eax, %edx + movl %edx, 4(%esp) # 4-byte Spill + movl 36(%edi), %eax + movl 36(%esi), %edx + sbbl %eax, %edx + movl %edx, 8(%esp) # 4-byte Spill + movl 40(%edi), %eax + movl 40(%esi), %edx + sbbl %eax, %edx + movl %edx, 16(%esp) # 4-byte Spill + movl 44(%edi), %eax + movl 44(%esi), %edx + sbbl %eax, %edx + movl %edx, 20(%esp) # 4-byte Spill + movl 48(%edi), %eax + movl 48(%esi), %edx + sbbl %eax, %edx + movl %edx, 24(%esp) # 4-byte Spill + movl 52(%edi), %eax + movl 52(%esi), %edx + sbbl %eax, %edx + movl %edx, 28(%esp) # 4-byte Spill + movl $0, %eax + sbbl $0, %eax + andl $1, %eax + movl 64(%esp), %esi + jne .LBB109_1 +# BB#2: + movl $0, (%esp) # 4-byte Folded Spill + jmp .LBB109_3 +.LBB109_1: + movl 24(%esi), %edx + movl %edx, (%esp) # 4-byte Spill +.LBB109_3: + testb %al, %al + jne .LBB109_4 +# BB#5: + movl $0, %edi + movl $0, %eax + jmp .LBB109_6 +.LBB109_4: + movl (%esi), %eax + movl 4(%esi), %edi +.LBB109_6: + jne .LBB109_7 +# BB#8: + movl $0, %ebx + jmp .LBB109_9 +.LBB109_7: + movl 20(%esi), %ebx +.LBB109_9: + jne .LBB109_10 +# BB#11: + movl $0, %ebp + jmp .LBB109_12 +.LBB109_10: + movl 16(%esi), %ebp +.LBB109_12: + jne .LBB109_13 +# BB#14: + movl $0, %edx + jmp .LBB109_15 +.LBB109_13: + movl 12(%esi), %edx +.LBB109_15: + jne .LBB109_16 +# BB#17: + xorl %esi, %esi + jmp .LBB109_18 +.LBB109_16: + movl 8(%esi), %esi +.LBB109_18: + addl 12(%esp), %eax # 4-byte Folded Reload + adcl 4(%esp), %edi # 4-byte Folded Reload + movl %eax, 28(%ecx) + adcl 8(%esp), %esi # 4-byte Folded Reload + movl %edi, 32(%ecx) + adcl 16(%esp), %edx # 4-byte Folded Reload + movl %esi, 36(%ecx) + adcl 20(%esp), %ebp # 4-byte Folded Reload + movl %edx, 40(%ecx) + adcl 24(%esp), %ebx # 4-byte Folded Reload + movl %ebp, 44(%ecx) + movl %ebx, 48(%ecx) + movl (%esp), %eax # 4-byte Reload + adcl 28(%esp), %eax # 4-byte Folded Reload + movl %eax, 52(%ecx) + addl $32, %esp + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end109: + .size mcl_fpDbl_sub7Lbmi2, .Lfunc_end109-mcl_fpDbl_sub7Lbmi2 + + .align 16, 0x90 + .type .LmulPv256x32,@function +.LmulPv256x32: # @mulPv256x32 +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $20, %esp + movl %edx, %eax + movl 40(%esp), %edx + mulxl 4(%eax), %edi, %esi + mulxl (%eax), %ebp, %ebx + movl %ebp, 16(%esp) # 4-byte Spill + addl %edi, %ebx + movl %ebx, 12(%esp) # 4-byte Spill + mulxl 8(%eax), %edi, %ebx + movl %ebx, 4(%esp) # 4-byte Spill + adcl %esi, %edi + movl %edi, 8(%esp) # 4-byte Spill + movl %edx, %ebp + mulxl 12(%eax), %ebx, %esi + adcl 4(%esp), %ebx # 4-byte Folded Reload + mulxl 16(%eax), %edi, %edx + movl %edx, 4(%esp) # 4-byte Spill + adcl %esi, %edi + movl %ebp, %edx + mulxl 20(%eax), %esi, %edx + movl %edx, (%esp) # 4-byte Spill + adcl 4(%esp), %esi # 4-byte Folded Reload + movl %ebp, %edx + mulxl 24(%eax), %edx, %ebp + movl %ebp, 4(%esp) # 4-byte Spill + adcl (%esp), %edx # 4-byte Folded Reload + movl 16(%esp), %ebp # 4-byte Reload + movl %ebp, (%ecx) + movl 12(%esp), %ebp # 4-byte Reload + movl %ebp, 4(%ecx) + movl 8(%esp), %ebp # 4-byte Reload + movl %ebp, 8(%ecx) + movl %ebx, 12(%ecx) + movl %edi, 16(%ecx) + movl %esi, 20(%ecx) + movl %edx, 24(%ecx) + movl 40(%esp), %edx + mulxl 28(%eax), %eax, %edx + adcl 4(%esp), %eax # 4-byte Folded Reload + movl %eax, 28(%ecx) + adcl $0, %edx + movl %edx, 32(%ecx) + movl %ecx, %eax + addl $20, %esp + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end110: + .size .LmulPv256x32, .Lfunc_end110-.LmulPv256x32 + + .globl mcl_fp_mulUnitPre8Lbmi2 + .align 16, 0x90 + .type mcl_fp_mulUnitPre8Lbmi2,@function +mcl_fp_mulUnitPre8Lbmi2: # @mcl_fp_mulUnitPre8Lbmi2 +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $60, %esp + calll .L111$pb +.L111$pb: + popl %ebx +.Ltmp2: + addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp2-.L111$pb), %ebx + movl 88(%esp), %eax + movl %eax, (%esp) + leal 24(%esp), %ecx + movl 84(%esp), %edx + calll .LmulPv256x32 + movl 56(%esp), %eax + movl %eax, 20(%esp) # 4-byte Spill + movl 52(%esp), %eax + movl %eax, 16(%esp) # 4-byte Spill + movl 48(%esp), %eax + movl %eax, 12(%esp) # 4-byte Spill + movl 44(%esp), %esi + movl 40(%esp), %edi + movl 36(%esp), %ebx + movl 32(%esp), %ebp + movl 24(%esp), %edx + movl 28(%esp), %ecx + movl 80(%esp), %eax + movl %edx, (%eax) + movl %ecx, 4(%eax) + movl %ebp, 8(%eax) + movl %ebx, 12(%eax) + movl %edi, 16(%eax) + movl %esi, 20(%eax) + movl 12(%esp), %ecx # 4-byte Reload + movl %ecx, 24(%eax) + movl 16(%esp), %ecx # 4-byte Reload + movl %ecx, 28(%eax) + movl 20(%esp), %ecx # 4-byte Reload + movl %ecx, 32(%eax) + addl $60, %esp + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end111: + .size mcl_fp_mulUnitPre8Lbmi2, .Lfunc_end111-mcl_fp_mulUnitPre8Lbmi2 + + .globl mcl_fpDbl_mulPre8Lbmi2 + .align 16, 0x90 + .type mcl_fpDbl_mulPre8Lbmi2,@function +mcl_fpDbl_mulPre8Lbmi2: # @mcl_fpDbl_mulPre8Lbmi2 +# BB#0: + pushl %ebp + movl %esp, %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $156, %esp + calll .L112$pb +.L112$pb: + popl %ebx +.Ltmp3: + addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp3-.L112$pb), %ebx + movl %ebx, -96(%ebp) # 4-byte Spill + movl 16(%ebp), %esi + movl %esi, 8(%esp) + movl 12(%ebp), %edi + movl %edi, 4(%esp) + movl 8(%ebp), %eax + movl %eax, (%esp) + calll mcl_fpDbl_mulPre4Lbmi2@PLT + leal 16(%esi), %eax + movl %eax, 8(%esp) + leal 16(%edi), %eax + movl %eax, 4(%esp) + movl 8(%ebp), %eax + leal 32(%eax), %eax + movl %eax, (%esp) + calll mcl_fpDbl_mulPre4Lbmi2@PLT + movl 24(%edi), %esi + movl (%edi), %ebx + movl 4(%edi), %eax + addl 16(%edi), %ebx + movl %ebx, -120(%ebp) # 4-byte Spill + adcl 20(%edi), %eax + movl %eax, -100(%ebp) # 4-byte Spill + adcl 8(%edi), %esi + movl %esi, -108(%ebp) # 4-byte Spill + seto %al + lahf + movl %eax, %eax + movl %eax, -80(%ebp) # 4-byte Spill + movl 16(%ebp), %edi + movl (%edi), %eax + movl 4(%edi), %ecx + addl 16(%edi), %eax + adcl 20(%edi), %ecx + movl %ecx, -124(%ebp) # 4-byte Spill + movl 24(%edi), %edx + adcl 8(%edi), %edx + movl 28(%edi), %ecx + adcl 12(%edi), %ecx + pushl %eax + seto %al + lahf + movl %eax, %edi + popl %eax + movl %edi, -128(%ebp) # 4-byte Spill + jb .LBB112_2 +# BB#1: + xorl %esi, %esi + xorl %ebx, %ebx +.LBB112_2: + movl %ebx, -112(%ebp) # 4-byte Spill + movl %esi, -104(%ebp) # 4-byte Spill + movl 12(%ebp), %esi + movl 28(%esi), %edi + movl -80(%ebp), %ebx # 4-byte Reload + pushl %eax + movl %ebx, %eax + addb $127, %al + sahf + popl %eax + adcl 12(%esi), %edi + movl %edi, -116(%ebp) # 4-byte Spill + movl %ecx, -84(%ebp) # 4-byte Spill + movl %edx, %edi + movl -124(%ebp), %ebx # 4-byte Reload + movl %ebx, -80(%ebp) # 4-byte Spill + movl %eax, -92(%ebp) # 4-byte Spill + jb .LBB112_4 +# BB#3: + movl $0, -84(%ebp) # 4-byte Folded Spill + movl $0, %edi + movl $0, -80(%ebp) # 4-byte Folded Spill + movl $0, -92(%ebp) # 4-byte Folded Spill +.LBB112_4: + movl %edi, -88(%ebp) # 4-byte Spill + movl -120(%ebp), %esi # 4-byte Reload + movl %esi, -60(%ebp) + movl -100(%ebp), %edi # 4-byte Reload + movl %edi, -56(%ebp) + movl -108(%ebp), %esi # 4-byte Reload + movl %esi, -52(%ebp) + movl %eax, -76(%ebp) + movl %ebx, -72(%ebp) + movl %edx, -68(%ebp) + movl %ecx, -64(%ebp) + sbbl %edx, %edx + movl -116(%ebp), %esi # 4-byte Reload + movl %esi, -48(%ebp) + movl -128(%ebp), %eax # 4-byte Reload + movl %eax, %eax + addb $127, %al + sahf + jb .LBB112_6 +# BB#5: + movl $0, %esi + movl $0, %edi +.LBB112_6: + sbbl %eax, %eax + leal -76(%ebp), %ecx + movl %ecx, 8(%esp) + leal -60(%ebp), %ecx + movl %ecx, 4(%esp) + leal -44(%ebp), %ecx + movl %ecx, (%esp) + andl %eax, %edx + movl %edi, %eax + movl -92(%ebp), %edi # 4-byte Reload + addl -112(%ebp), %edi # 4-byte Folded Reload + adcl %eax, -80(%ebp) # 4-byte Folded Spill + movl -104(%ebp), %eax # 4-byte Reload + adcl %eax, -88(%ebp) # 4-byte Folded Spill + adcl %esi, -84(%ebp) # 4-byte Folded Spill + sbbl %esi, %esi + andl $1, %esi + andl $1, %edx + movl %edx, -92(%ebp) # 4-byte Spill + movl -96(%ebp), %ebx # 4-byte Reload + calll mcl_fpDbl_mulPre4Lbmi2@PLT + addl -28(%ebp), %edi + movl -80(%ebp), %eax # 4-byte Reload + adcl -24(%ebp), %eax + movl %eax, -80(%ebp) # 4-byte Spill + movl -88(%ebp), %eax # 4-byte Reload + adcl -20(%ebp), %eax + movl %eax, -88(%ebp) # 4-byte Spill + movl -84(%ebp), %eax # 4-byte Reload + adcl -16(%ebp), %eax + movl %eax, -84(%ebp) # 4-byte Spill + adcl %esi, -92(%ebp) # 4-byte Folded Spill + movl -44(%ebp), %eax + movl 8(%ebp), %esi + subl (%esi), %eax + movl %eax, -116(%ebp) # 4-byte Spill + movl -40(%ebp), %ebx + sbbl 4(%esi), %ebx + movl -36(%ebp), %eax + sbbl 8(%esi), %eax + movl %eax, -96(%ebp) # 4-byte Spill + movl -32(%ebp), %edx + sbbl 12(%esi), %edx + movl 16(%esi), %eax + movl %eax, -100(%ebp) # 4-byte Spill + sbbl %eax, %edi + movl 20(%esi), %eax + movl %eax, -112(%ebp) # 4-byte Spill + sbbl %eax, -80(%ebp) # 4-byte Folded Spill + movl 24(%esi), %eax + movl %eax, -104(%ebp) # 4-byte Spill + sbbl %eax, -88(%ebp) # 4-byte Folded Spill + movl 28(%esi), %eax + movl %eax, -108(%ebp) # 4-byte Spill + sbbl %eax, -84(%ebp) # 4-byte Folded Spill + sbbl $0, -92(%ebp) # 4-byte Folded Spill + movl 32(%esi), %ecx + movl %ecx, -132(%ebp) # 4-byte Spill + movl -116(%ebp), %eax # 4-byte Reload + subl %ecx, %eax + movl 36(%esi), %ecx + movl %ecx, -136(%ebp) # 4-byte Spill + sbbl %ecx, %ebx + movl 40(%esi), %ecx + movl %ecx, -128(%ebp) # 4-byte Spill + sbbl %ecx, -96(%ebp) # 4-byte Folded Spill + movl 44(%esi), %ecx + movl %ecx, -140(%ebp) # 4-byte Spill + sbbl %ecx, %edx + movl 48(%esi), %ecx + movl %ecx, -144(%ebp) # 4-byte Spill + sbbl %ecx, %edi + movl 52(%esi), %ecx + movl %ecx, -116(%ebp) # 4-byte Spill + sbbl %ecx, -80(%ebp) # 4-byte Folded Spill + movl 56(%esi), %ecx + movl %ecx, -120(%ebp) # 4-byte Spill + sbbl %ecx, -88(%ebp) # 4-byte Folded Spill + movl 60(%esi), %ecx + movl %ecx, -124(%ebp) # 4-byte Spill + sbbl %ecx, -84(%ebp) # 4-byte Folded Spill + sbbl $0, -92(%ebp) # 4-byte Folded Spill + addl -100(%ebp), %eax # 4-byte Folded Reload + adcl -112(%ebp), %ebx # 4-byte Folded Reload + movl %eax, 16(%esi) + movl -96(%ebp), %eax # 4-byte Reload + adcl -104(%ebp), %eax # 4-byte Folded Reload + movl %ebx, 20(%esi) + adcl -108(%ebp), %edx # 4-byte Folded Reload + movl %eax, 24(%esi) + adcl -132(%ebp), %edi # 4-byte Folded Reload + movl %edx, 28(%esi) + movl -80(%ebp), %eax # 4-byte Reload + adcl -136(%ebp), %eax # 4-byte Folded Reload + movl %edi, 32(%esi) + movl -88(%ebp), %ecx # 4-byte Reload + adcl -128(%ebp), %ecx # 4-byte Folded Reload + movl %eax, 36(%esi) + movl -84(%ebp), %eax # 4-byte Reload + adcl -140(%ebp), %eax # 4-byte Folded Reload + movl %ecx, 40(%esi) + movl -92(%ebp), %ecx # 4-byte Reload + adcl -144(%ebp), %ecx # 4-byte Folded Reload + movl %eax, 44(%esi) + movl %ecx, 48(%esi) + movl -116(%ebp), %eax # 4-byte Reload + adcl $0, %eax + movl %eax, 52(%esi) + movl -120(%ebp), %eax # 4-byte Reload + adcl $0, %eax + movl %eax, 56(%esi) + movl -124(%ebp), %eax # 4-byte Reload + adcl $0, %eax + movl %eax, 60(%esi) + addl $156, %esp + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end112: + .size mcl_fpDbl_mulPre8Lbmi2, .Lfunc_end112-mcl_fpDbl_mulPre8Lbmi2 + + .globl mcl_fpDbl_sqrPre8Lbmi2 + .align 16, 0x90 + .type mcl_fpDbl_sqrPre8Lbmi2,@function +mcl_fpDbl_sqrPre8Lbmi2: # @mcl_fpDbl_sqrPre8Lbmi2 +# BB#0: + pushl %ebp + movl %esp, %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $156, %esp + calll .L113$pb +.L113$pb: + popl %ebx +.Ltmp4: + addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp4-.L113$pb), %ebx + movl %ebx, -96(%ebp) # 4-byte Spill + movl 12(%ebp), %edi + movl %edi, 8(%esp) + movl %edi, 4(%esp) + movl 8(%ebp), %esi + movl %esi, (%esp) + calll mcl_fpDbl_mulPre4Lbmi2@PLT + leal 16(%edi), %eax + movl %eax, 8(%esp) + movl %eax, 4(%esp) + leal 32(%esi), %eax + movl %eax, (%esp) + calll mcl_fpDbl_mulPre4Lbmi2@PLT + movl (%edi), %esi + movl 4(%edi), %ecx + addl 16(%edi), %esi + movl %esi, -108(%ebp) # 4-byte Spill + adcl 20(%edi), %ecx + seto %al + lahf + movl %eax, %edx + addl %esi, %esi + movl %esi, -84(%ebp) # 4-byte Spill + movl %ecx, %esi + adcl %esi, %esi + movl %esi, -80(%ebp) # 4-byte Spill + pushl %eax + seto %al + lahf + movl %eax, %esi + popl %eax + movl %esi, -88(%ebp) # 4-byte Spill + movl 24(%edi), %esi + pushl %eax + movl %edx, %eax + addb $127, %al + sahf + popl %eax + adcl 8(%edi), %esi + movl 28(%edi), %edx + adcl 12(%edi), %edx + pushl %eax + seto %al + lahf + movl %eax, %edi + popl %eax + movl %edi, -100(%ebp) # 4-byte Spill + seto %al + lahf + movl %eax, %eax + movl %eax, -104(%ebp) # 4-byte Spill + seto %al + lahf + movl %eax, %ebx + sbbl %edi, %edi + movl %edi, -92(%ebp) # 4-byte Spill + pushl %eax + movl %ebx, %eax + addb $127, %al + sahf + popl %eax + jb .LBB113_2 +# BB#1: + movl $0, -80(%ebp) # 4-byte Folded Spill + movl $0, -84(%ebp) # 4-byte Folded Spill +.LBB113_2: + movl %esi, %ebx + movl -88(%ebp), %edi # 4-byte Reload + movl %edi, %eax + addb $127, %al + sahf + adcl %ebx, %ebx + movl %edx, %edi + adcl %edi, %edi + movl -104(%ebp), %eax # 4-byte Reload + movl %eax, %eax + addb $127, %al + sahf + jb .LBB113_4 +# BB#3: + xorl %edi, %edi + xorl %ebx, %ebx +.LBB113_4: + movl %ebx, -88(%ebp) # 4-byte Spill + movl -108(%ebp), %eax # 4-byte Reload + movl %eax, -60(%ebp) + movl %ecx, -56(%ebp) + movl %esi, -52(%ebp) + movl %edx, -48(%ebp) + movl %eax, -76(%ebp) + movl %ecx, -72(%ebp) + movl %esi, -68(%ebp) + movl %edx, -64(%ebp) + movl -100(%ebp), %eax # 4-byte Reload + movl %eax, %eax + addb $127, %al + sahf + jb .LBB113_5 +# BB#6: + movl $0, -100(%ebp) # 4-byte Folded Spill + jmp .LBB113_7 +.LBB113_5: + shrl $31, %edx + movl %edx, -100(%ebp) # 4-byte Spill +.LBB113_7: + leal -76(%ebp), %eax + movl %eax, 8(%esp) + leal -60(%ebp), %eax + movl %eax, 4(%esp) + leal -44(%ebp), %eax + movl %eax, (%esp) + movl -92(%ebp), %esi # 4-byte Reload + andl $1, %esi + movl -96(%ebp), %ebx # 4-byte Reload + calll mcl_fpDbl_mulPre4Lbmi2@PLT + movl -84(%ebp), %eax # 4-byte Reload + addl -28(%ebp), %eax + movl %eax, -84(%ebp) # 4-byte Spill + movl -80(%ebp), %eax # 4-byte Reload + adcl -24(%ebp), %eax + movl %eax, -80(%ebp) # 4-byte Spill + movl -88(%ebp), %eax # 4-byte Reload + adcl -20(%ebp), %eax + movl %eax, -88(%ebp) # 4-byte Spill + adcl -16(%ebp), %edi + movl %edi, -92(%ebp) # 4-byte Spill + adcl -100(%ebp), %esi # 4-byte Folded Reload + movl -44(%ebp), %eax + movl 8(%ebp), %edi + subl (%edi), %eax + movl %eax, -116(%ebp) # 4-byte Spill + movl -40(%ebp), %ebx + sbbl 4(%edi), %ebx + movl -36(%ebp), %eax + sbbl 8(%edi), %eax + movl %eax, -96(%ebp) # 4-byte Spill + movl -32(%ebp), %edx + sbbl 12(%edi), %edx + movl 16(%edi), %eax + movl %eax, -100(%ebp) # 4-byte Spill + sbbl %eax, -84(%ebp) # 4-byte Folded Spill + movl 20(%edi), %eax + movl %eax, -112(%ebp) # 4-byte Spill + sbbl %eax, -80(%ebp) # 4-byte Folded Spill + movl 24(%edi), %eax + movl %eax, -104(%ebp) # 4-byte Spill + sbbl %eax, -88(%ebp) # 4-byte Folded Spill + movl 28(%edi), %eax + movl %eax, -108(%ebp) # 4-byte Spill + sbbl %eax, -92(%ebp) # 4-byte Folded Spill + sbbl $0, %esi + movl 32(%edi), %ecx + movl %ecx, -132(%ebp) # 4-byte Spill + movl -116(%ebp), %eax # 4-byte Reload + subl %ecx, %eax + movl 36(%edi), %ecx + movl %ecx, -136(%ebp) # 4-byte Spill + sbbl %ecx, %ebx + movl 40(%edi), %ecx + movl %ecx, -128(%ebp) # 4-byte Spill + sbbl %ecx, -96(%ebp) # 4-byte Folded Spill + movl 44(%edi), %ecx + movl %ecx, -140(%ebp) # 4-byte Spill + sbbl %ecx, %edx + movl 48(%edi), %ecx + movl %ecx, -144(%ebp) # 4-byte Spill + sbbl %ecx, -84(%ebp) # 4-byte Folded Spill + movl 52(%edi), %ecx + movl %ecx, -116(%ebp) # 4-byte Spill + sbbl %ecx, -80(%ebp) # 4-byte Folded Spill + movl 56(%edi), %ecx + movl %ecx, -120(%ebp) # 4-byte Spill + sbbl %ecx, -88(%ebp) # 4-byte Folded Spill + movl 60(%edi), %ecx + movl %ecx, -124(%ebp) # 4-byte Spill + sbbl %ecx, -92(%ebp) # 4-byte Folded Spill + sbbl $0, %esi + addl -100(%ebp), %eax # 4-byte Folded Reload + adcl -112(%ebp), %ebx # 4-byte Folded Reload + movl %eax, 16(%edi) + movl -96(%ebp), %eax # 4-byte Reload + adcl -104(%ebp), %eax # 4-byte Folded Reload + movl %ebx, 20(%edi) + adcl -108(%ebp), %edx # 4-byte Folded Reload + movl %eax, 24(%edi) + movl -84(%ebp), %eax # 4-byte Reload + adcl -132(%ebp), %eax # 4-byte Folded Reload + movl %edx, 28(%edi) + movl -80(%ebp), %ecx # 4-byte Reload + adcl -136(%ebp), %ecx # 4-byte Folded Reload + movl %eax, 32(%edi) + movl -88(%ebp), %eax # 4-byte Reload + adcl -128(%ebp), %eax # 4-byte Folded Reload + movl %ecx, 36(%edi) + movl -92(%ebp), %ecx # 4-byte Reload + adcl -140(%ebp), %ecx # 4-byte Folded Reload + movl %eax, 40(%edi) + adcl -144(%ebp), %esi # 4-byte Folded Reload + movl %ecx, 44(%edi) + movl %esi, 48(%edi) + movl -116(%ebp), %eax # 4-byte Reload + adcl $0, %eax + movl %eax, 52(%edi) + movl -120(%ebp), %eax # 4-byte Reload + adcl $0, %eax + movl %eax, 56(%edi) + movl -124(%ebp), %eax # 4-byte Reload + adcl $0, %eax + movl %eax, 60(%edi) + addl $156, %esp + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end113: + .size mcl_fpDbl_sqrPre8Lbmi2, .Lfunc_end113-mcl_fpDbl_sqrPre8Lbmi2 + + .globl mcl_fp_mont8Lbmi2 + .align 16, 0x90 + .type mcl_fp_mont8Lbmi2,@function +mcl_fp_mont8Lbmi2: # @mcl_fp_mont8Lbmi2 +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $700, %esp # imm = 0x2BC + calll .L114$pb +.L114$pb: + popl %ebx +.Ltmp5: + addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp5-.L114$pb), %ebx + movl 732(%esp), %eax + movl -4(%eax), %esi + movl %esi, 24(%esp) # 4-byte Spill + movl 728(%esp), %eax + movl (%eax), %eax + movl %eax, (%esp) + leal 664(%esp), %ecx + movl 724(%esp), %edx + calll .LmulPv256x32 + movl 664(%esp), %ebp + movl 668(%esp), %edi + movl %ebp, %eax + imull %esi, %eax + movl 696(%esp), %ecx + movl %ecx, 56(%esp) # 4-byte Spill + movl 692(%esp), %ecx + movl %ecx, 52(%esp) # 4-byte Spill + movl 688(%esp), %ecx + movl %ecx, 48(%esp) # 4-byte Spill + movl 684(%esp), %ecx + movl %ecx, 36(%esp) # 4-byte Spill + movl 680(%esp), %ecx + movl %ecx, 28(%esp) # 4-byte Spill + movl 676(%esp), %ecx + movl %ecx, 40(%esp) # 4-byte Spill + movl 672(%esp), %esi + movl %eax, (%esp) + leal 624(%esp), %ecx + movl 732(%esp), %edx + calll .LmulPv256x32 + addl 624(%esp), %ebp + adcl 628(%esp), %edi + adcl 632(%esp), %esi + movl 40(%esp), %eax # 4-byte Reload + adcl 636(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 28(%esp), %eax # 4-byte Reload + adcl 640(%esp), %eax + movl %eax, 28(%esp) # 4-byte Spill + movl 36(%esp), %eax # 4-byte Reload + adcl 644(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 648(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 652(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 56(%esp), %ebp # 4-byte Reload + adcl 656(%esp), %ebp + sbbl %eax, %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 728(%esp), %eax + movl 4(%eax), %eax + movl %eax, (%esp) + leal 584(%esp), %ecx + movl 724(%esp), %edx + calll .LmulPv256x32 + movl 60(%esp), %eax # 4-byte Reload + andl $1, %eax + addl 584(%esp), %edi + adcl 588(%esp), %esi + movl 40(%esp), %ecx # 4-byte Reload + adcl 592(%esp), %ecx + movl %ecx, 40(%esp) # 4-byte Spill + movl 28(%esp), %ecx # 4-byte Reload + adcl 596(%esp), %ecx + movl %ecx, 28(%esp) # 4-byte Spill + movl 36(%esp), %ecx # 4-byte Reload + adcl 600(%esp), %ecx + movl %ecx, 36(%esp) # 4-byte Spill + movl 48(%esp), %ecx # 4-byte Reload + adcl 604(%esp), %ecx + movl %ecx, 48(%esp) # 4-byte Spill + movl 52(%esp), %ecx # 4-byte Reload + adcl 608(%esp), %ecx + movl %ecx, 52(%esp) # 4-byte Spill + adcl 612(%esp), %ebp + movl %ebp, 56(%esp) # 4-byte Spill + adcl 616(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + sbbl %ebp, %ebp + movl %edi, %eax + imull 24(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 544(%esp), %ecx + movl 732(%esp), %eax + movl %eax, %edx + calll .LmulPv256x32 + andl $1, %ebp + addl 544(%esp), %edi + adcl 548(%esp), %esi + movl 40(%esp), %eax # 4-byte Reload + adcl 552(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 28(%esp), %eax # 4-byte Reload + adcl 556(%esp), %eax + movl %eax, 28(%esp) # 4-byte Spill + movl 36(%esp), %eax # 4-byte Reload + adcl 560(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 564(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 52(%esp), %edi # 4-byte Reload + adcl 568(%esp), %edi + movl 56(%esp), %eax # 4-byte Reload + adcl 572(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 576(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + adcl $0, %ebp + movl 728(%esp), %eax + movl 8(%eax), %eax + movl %eax, (%esp) + leal 504(%esp), %ecx + movl 724(%esp), %edx + calll .LmulPv256x32 + addl 504(%esp), %esi + movl 40(%esp), %eax # 4-byte Reload + adcl 508(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 28(%esp), %eax # 4-byte Reload + adcl 512(%esp), %eax + movl %eax, 28(%esp) # 4-byte Spill + movl 36(%esp), %eax # 4-byte Reload + adcl 516(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 520(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + adcl 524(%esp), %edi + movl %edi, 52(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 528(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 532(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + adcl 536(%esp), %ebp + sbbl %edi, %edi + movl %esi, %eax + imull 24(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 464(%esp), %ecx + movl 732(%esp), %edx + calll .LmulPv256x32 + movl %edi, %eax + andl $1, %eax + addl 464(%esp), %esi + movl 40(%esp), %ecx # 4-byte Reload + adcl 468(%esp), %ecx + movl %ecx, 40(%esp) # 4-byte Spill + movl 28(%esp), %ecx # 4-byte Reload + adcl 472(%esp), %ecx + movl %ecx, 28(%esp) # 4-byte Spill + movl 36(%esp), %ecx # 4-byte Reload + adcl 476(%esp), %ecx + movl %ecx, 36(%esp) # 4-byte Spill + movl 48(%esp), %edi # 4-byte Reload + adcl 480(%esp), %edi + movl 52(%esp), %ecx # 4-byte Reload + adcl 484(%esp), %ecx + movl %ecx, 52(%esp) # 4-byte Spill + movl 56(%esp), %esi # 4-byte Reload + adcl 488(%esp), %esi + movl 60(%esp), %ecx # 4-byte Reload + adcl 492(%esp), %ecx + movl %ecx, 60(%esp) # 4-byte Spill + adcl 496(%esp), %ebp + movl %ebp, 32(%esp) # 4-byte Spill + adcl $0, %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 728(%esp), %eax + movl 12(%eax), %eax + movl %eax, (%esp) + leal 424(%esp), %ecx + movl 724(%esp), %eax + movl %eax, %edx + calll .LmulPv256x32 + movl 40(%esp), %ecx # 4-byte Reload + addl 424(%esp), %ecx + movl 28(%esp), %ebp # 4-byte Reload + adcl 428(%esp), %ebp + movl 36(%esp), %eax # 4-byte Reload + adcl 432(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + adcl 436(%esp), %edi + movl %edi, 48(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 440(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + adcl 444(%esp), %esi + movl %esi, 56(%esp) # 4-byte Spill + movl 60(%esp), %edi # 4-byte Reload + adcl 448(%esp), %edi + movl 32(%esp), %eax # 4-byte Reload + adcl 452(%esp), %eax + movl %eax, 32(%esp) # 4-byte Spill + movl 44(%esp), %eax # 4-byte Reload + adcl 456(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + sbbl %eax, %eax + movl %eax, 40(%esp) # 4-byte Spill + movl %ecx, %eax + movl %ecx, %esi + imull 24(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 384(%esp), %ecx + movl 732(%esp), %edx + calll .LmulPv256x32 + movl 40(%esp), %eax # 4-byte Reload + andl $1, %eax + addl 384(%esp), %esi + adcl 388(%esp), %ebp + movl %ebp, 28(%esp) # 4-byte Spill + movl 36(%esp), %esi # 4-byte Reload + adcl 392(%esp), %esi + movl 48(%esp), %ecx # 4-byte Reload + adcl 396(%esp), %ecx + movl %ecx, 48(%esp) # 4-byte Spill + movl 52(%esp), %ecx # 4-byte Reload + adcl 400(%esp), %ecx + movl %ecx, 52(%esp) # 4-byte Spill + movl 56(%esp), %ecx # 4-byte Reload + adcl 404(%esp), %ecx + movl %ecx, 56(%esp) # 4-byte Spill + adcl 408(%esp), %edi + movl 32(%esp), %ecx # 4-byte Reload + adcl 412(%esp), %ecx + movl %ecx, 32(%esp) # 4-byte Spill + movl 44(%esp), %ebp # 4-byte Reload + adcl 416(%esp), %ebp + adcl $0, %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 728(%esp), %eax + movl 16(%eax), %eax + movl %eax, (%esp) + leal 344(%esp), %ecx + movl 724(%esp), %edx + calll .LmulPv256x32 + movl 28(%esp), %ecx # 4-byte Reload + addl 344(%esp), %ecx + adcl 348(%esp), %esi + movl %esi, 36(%esp) # 4-byte Spill + movl 48(%esp), %esi # 4-byte Reload + adcl 352(%esp), %esi + movl 52(%esp), %eax # 4-byte Reload + adcl 356(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 360(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + adcl 364(%esp), %edi + movl %edi, 60(%esp) # 4-byte Spill + movl 32(%esp), %eax # 4-byte Reload + adcl 368(%esp), %eax + movl %eax, 32(%esp) # 4-byte Spill + adcl 372(%esp), %ebp + movl %ebp, 44(%esp) # 4-byte Spill + movl 40(%esp), %eax # 4-byte Reload + adcl 376(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + sbbl %ebp, %ebp + movl %ecx, %eax + movl %ecx, %edi + imull 24(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 304(%esp), %ecx + movl 732(%esp), %edx + calll .LmulPv256x32 + movl %ebp, %eax + andl $1, %eax + addl 304(%esp), %edi + movl 36(%esp), %ecx # 4-byte Reload + adcl 308(%esp), %ecx + movl %ecx, 36(%esp) # 4-byte Spill + adcl 312(%esp), %esi + movl %esi, 48(%esp) # 4-byte Spill + movl 52(%esp), %ebp # 4-byte Reload + adcl 316(%esp), %ebp + movl 56(%esp), %ecx # 4-byte Reload + adcl 320(%esp), %ecx + movl %ecx, 56(%esp) # 4-byte Spill + movl 60(%esp), %ecx # 4-byte Reload + adcl 324(%esp), %ecx + movl %ecx, 60(%esp) # 4-byte Spill + movl 32(%esp), %esi # 4-byte Reload + adcl 328(%esp), %esi + movl 44(%esp), %ecx # 4-byte Reload + adcl 332(%esp), %ecx + movl %ecx, 44(%esp) # 4-byte Spill + movl 40(%esp), %edi # 4-byte Reload + adcl 336(%esp), %edi + adcl $0, %eax + movl %eax, 28(%esp) # 4-byte Spill + movl 728(%esp), %eax + movl 20(%eax), %eax + movl %eax, (%esp) + leal 264(%esp), %ecx + movl 724(%esp), %edx + calll .LmulPv256x32 + movl 36(%esp), %ecx # 4-byte Reload + addl 264(%esp), %ecx + movl 48(%esp), %eax # 4-byte Reload + adcl 268(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + adcl 272(%esp), %ebp + movl %ebp, 52(%esp) # 4-byte Spill + movl 56(%esp), %ebp # 4-byte Reload + adcl 276(%esp), %ebp + movl 60(%esp), %eax # 4-byte Reload + adcl 280(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + adcl 284(%esp), %esi + movl %esi, 32(%esp) # 4-byte Spill + movl 44(%esp), %eax # 4-byte Reload + adcl 288(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + adcl 292(%esp), %edi + movl %edi, 40(%esp) # 4-byte Spill + movl 28(%esp), %eax # 4-byte Reload + adcl 296(%esp), %eax + movl %eax, 28(%esp) # 4-byte Spill + sbbl %edi, %edi + movl %ecx, %eax + movl %ecx, %esi + imull 24(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 224(%esp), %ecx + movl 732(%esp), %edx + calll .LmulPv256x32 + movl %edi, %eax + andl $1, %eax + addl 224(%esp), %esi + movl 48(%esp), %ecx # 4-byte Reload + adcl 228(%esp), %ecx + movl %ecx, 48(%esp) # 4-byte Spill + movl 52(%esp), %esi # 4-byte Reload + adcl 232(%esp), %esi + adcl 236(%esp), %ebp + movl %ebp, 56(%esp) # 4-byte Spill + movl 60(%esp), %edi # 4-byte Reload + adcl 240(%esp), %edi + movl 32(%esp), %ecx # 4-byte Reload + adcl 244(%esp), %ecx + movl %ecx, 32(%esp) # 4-byte Spill + movl 44(%esp), %ebp # 4-byte Reload + adcl 248(%esp), %ebp + movl 40(%esp), %ecx # 4-byte Reload + adcl 252(%esp), %ecx + movl %ecx, 40(%esp) # 4-byte Spill + movl 28(%esp), %ecx # 4-byte Reload + adcl 256(%esp), %ecx + movl %ecx, 28(%esp) # 4-byte Spill + adcl $0, %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 728(%esp), %eax + movl 24(%eax), %eax + movl %eax, (%esp) + leal 184(%esp), %ecx + movl 724(%esp), %edx + calll .LmulPv256x32 + movl 48(%esp), %ecx # 4-byte Reload + addl 184(%esp), %ecx + adcl 188(%esp), %esi + movl %esi, 52(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 192(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + adcl 196(%esp), %edi + movl %edi, 60(%esp) # 4-byte Spill + movl 32(%esp), %eax # 4-byte Reload + adcl 200(%esp), %eax + movl %eax, 32(%esp) # 4-byte Spill + adcl 204(%esp), %ebp + movl %ebp, 44(%esp) # 4-byte Spill + movl 40(%esp), %eax # 4-byte Reload + adcl 208(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 28(%esp), %ebp # 4-byte Reload + adcl 212(%esp), %ebp + movl 36(%esp), %eax # 4-byte Reload + adcl 216(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + sbbl %edi, %edi + movl %ecx, %eax + movl %ecx, %esi + imull 24(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 144(%esp), %ecx + movl 732(%esp), %edx + calll .LmulPv256x32 + movl %edi, %ecx + andl $1, %ecx + addl 144(%esp), %esi + movl 52(%esp), %eax # 4-byte Reload + adcl 148(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 56(%esp), %esi # 4-byte Reload + adcl 152(%esp), %esi + movl 60(%esp), %eax # 4-byte Reload + adcl 156(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 32(%esp), %edi # 4-byte Reload + adcl 160(%esp), %edi + movl 44(%esp), %eax # 4-byte Reload + adcl 164(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 40(%esp), %eax # 4-byte Reload + adcl 168(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + adcl 172(%esp), %ebp + movl %ebp, 28(%esp) # 4-byte Spill + movl 36(%esp), %ebp # 4-byte Reload + adcl 176(%esp), %ebp + adcl $0, %ecx + movl %ecx, 48(%esp) # 4-byte Spill + movl 728(%esp), %eax + movl 28(%eax), %eax + movl %eax, (%esp) + leal 104(%esp), %ecx + movl 724(%esp), %edx + calll .LmulPv256x32 + movl 52(%esp), %ecx # 4-byte Reload + addl 104(%esp), %ecx + adcl 108(%esp), %esi + movl %esi, 56(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 112(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + adcl 116(%esp), %edi + movl %edi, 32(%esp) # 4-byte Spill + movl 44(%esp), %eax # 4-byte Reload + adcl 120(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 40(%esp), %eax # 4-byte Reload + adcl 124(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 28(%esp), %edi # 4-byte Reload + adcl 128(%esp), %edi + adcl 132(%esp), %ebp + movl %ebp, 36(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 136(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + sbbl %esi, %esi + movl 24(%esp), %eax # 4-byte Reload + imull %ecx, %eax + movl %ecx, %ebp + movl %eax, (%esp) + leal 64(%esp), %ecx + movl 732(%esp), %edx + calll .LmulPv256x32 + andl $1, %esi + addl 64(%esp), %ebp + movl 32(%esp), %ebx # 4-byte Reload + movl 56(%esp), %eax # 4-byte Reload + adcl 68(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 60(%esp), %ecx # 4-byte Reload + adcl 72(%esp), %ecx + movl %ecx, 60(%esp) # 4-byte Spill + adcl 76(%esp), %ebx + movl 44(%esp), %edx # 4-byte Reload + adcl 80(%esp), %edx + movl %edx, 44(%esp) # 4-byte Spill + movl 40(%esp), %edx # 4-byte Reload + adcl 84(%esp), %edx + movl %edx, 40(%esp) # 4-byte Spill + adcl 88(%esp), %edi + movl %edi, 28(%esp) # 4-byte Spill + movl 36(%esp), %edx # 4-byte Reload + adcl 92(%esp), %edx + movl %edx, 36(%esp) # 4-byte Spill + movl 48(%esp), %edx # 4-byte Reload + adcl 96(%esp), %edx + movl %edx, 48(%esp) # 4-byte Spill + adcl $0, %esi + movl %eax, %edx + movl 732(%esp), %ebp + subl (%ebp), %edx + movl %ecx, %eax + sbbl 4(%ebp), %eax + movl %ebx, %ecx + sbbl 8(%ebp), %ecx + movl %ecx, 12(%esp) # 4-byte Spill + movl 44(%esp), %ecx # 4-byte Reload + sbbl 12(%ebp), %ecx + movl %ecx, 16(%esp) # 4-byte Spill + movl 40(%esp), %ecx # 4-byte Reload + sbbl 16(%ebp), %ecx + movl %ecx, 20(%esp) # 4-byte Spill + sbbl 20(%ebp), %edi + movl %edi, 24(%esp) # 4-byte Spill + movl 36(%esp), %ecx # 4-byte Reload + sbbl 24(%ebp), %ecx + movl %ecx, 32(%esp) # 4-byte Spill + movl 48(%esp), %ecx # 4-byte Reload + sbbl 28(%ebp), %ecx + movl %ecx, 52(%esp) # 4-byte Spill + movl 56(%esp), %ebp # 4-byte Reload + sbbl $0, %esi + andl $1, %esi + movl %esi, %ecx + jne .LBB114_2 +# BB#1: + movl %edx, %ebp +.LBB114_2: + movl 720(%esp), %edx + movl %ebp, (%edx) + testb %cl, %cl + movl 60(%esp), %ebp # 4-byte Reload + jne .LBB114_4 +# BB#3: + movl %eax, %ebp +.LBB114_4: + movl %ebp, 4(%edx) + jne .LBB114_6 +# BB#5: + movl 12(%esp), %ebx # 4-byte Reload +.LBB114_6: + movl %ebx, 8(%edx) + movl 28(%esp), %eax # 4-byte Reload + jne .LBB114_8 +# BB#7: + movl 16(%esp), %ecx # 4-byte Reload + movl %ecx, 44(%esp) # 4-byte Spill +.LBB114_8: + movl 44(%esp), %ecx # 4-byte Reload + movl %ecx, 12(%edx) + movl 40(%esp), %edi # 4-byte Reload + jne .LBB114_10 +# BB#9: + movl 20(%esp), %edi # 4-byte Reload +.LBB114_10: + movl %edi, 16(%edx) + jne .LBB114_12 +# BB#11: + movl 24(%esp), %eax # 4-byte Reload +.LBB114_12: + movl %eax, 20(%edx) + movl 36(%esp), %eax # 4-byte Reload + jne .LBB114_14 +# BB#13: + movl 32(%esp), %eax # 4-byte Reload +.LBB114_14: + movl %eax, 24(%edx) + movl 48(%esp), %eax # 4-byte Reload + jne .LBB114_16 +# BB#15: + movl 52(%esp), %eax # 4-byte Reload +.LBB114_16: + movl %eax, 28(%edx) + addl $700, %esp # imm = 0x2BC + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end114: + .size mcl_fp_mont8Lbmi2, .Lfunc_end114-mcl_fp_mont8Lbmi2 + + .globl mcl_fp_montNF8Lbmi2 + .align 16, 0x90 + .type mcl_fp_montNF8Lbmi2,@function +mcl_fp_montNF8Lbmi2: # @mcl_fp_montNF8Lbmi2 +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $700, %esp # imm = 0x2BC + calll .L115$pb +.L115$pb: + popl %ebx +.Ltmp6: + addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp6-.L115$pb), %ebx + movl 732(%esp), %eax + movl -4(%eax), %esi + movl %esi, 28(%esp) # 4-byte Spill + movl 728(%esp), %eax + movl (%eax), %eax + movl %eax, (%esp) + leal 664(%esp), %ecx + movl 724(%esp), %edx + calll .LmulPv256x32 + movl 664(%esp), %ebp + movl 668(%esp), %edi + movl %ebp, %eax + imull %esi, %eax + movl 696(%esp), %ecx + movl %ecx, 56(%esp) # 4-byte Spill + movl 692(%esp), %ecx + movl %ecx, 60(%esp) # 4-byte Spill + movl 688(%esp), %ecx + movl %ecx, 48(%esp) # 4-byte Spill + movl 684(%esp), %ecx + movl %ecx, 40(%esp) # 4-byte Spill + movl 680(%esp), %ecx + movl %ecx, 36(%esp) # 4-byte Spill + movl 676(%esp), %ecx + movl %ecx, 44(%esp) # 4-byte Spill + movl 672(%esp), %esi + movl %eax, (%esp) + leal 624(%esp), %ecx + movl 732(%esp), %edx + calll .LmulPv256x32 + addl 624(%esp), %ebp + adcl 628(%esp), %edi + adcl 632(%esp), %esi + movl 44(%esp), %eax # 4-byte Reload + adcl 636(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 36(%esp), %ebp # 4-byte Reload + adcl 640(%esp), %ebp + movl 40(%esp), %eax # 4-byte Reload + adcl 644(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 648(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 652(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 656(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 728(%esp), %eax + movl 4(%eax), %eax + movl %eax, (%esp) + leal 584(%esp), %ecx + movl 724(%esp), %edx + calll .LmulPv256x32 + movl 616(%esp), %ecx + addl 584(%esp), %edi + adcl 588(%esp), %esi + movl 44(%esp), %eax # 4-byte Reload + adcl 592(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + adcl 596(%esp), %ebp + movl %ebp, 36(%esp) # 4-byte Spill + movl 40(%esp), %eax # 4-byte Reload + adcl 600(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 48(%esp), %ebp # 4-byte Reload + adcl 604(%esp), %ebp + movl 60(%esp), %eax # 4-byte Reload + adcl 608(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 612(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + adcl $0, %ecx + movl %ecx, 52(%esp) # 4-byte Spill + movl %edi, %eax + imull 28(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 544(%esp), %ecx + movl 732(%esp), %eax + movl %eax, %edx + calll .LmulPv256x32 + addl 544(%esp), %edi + adcl 548(%esp), %esi + movl 44(%esp), %eax # 4-byte Reload + adcl 552(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 36(%esp), %eax # 4-byte Reload + adcl 556(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 40(%esp), %eax # 4-byte Reload + adcl 560(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + adcl 564(%esp), %ebp + movl %ebp, 48(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 568(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 56(%esp), %edi # 4-byte Reload + adcl 572(%esp), %edi + movl 52(%esp), %ebp # 4-byte Reload + adcl 576(%esp), %ebp + movl 728(%esp), %eax + movl 8(%eax), %eax + movl %eax, (%esp) + leal 504(%esp), %ecx + movl 724(%esp), %edx + calll .LmulPv256x32 + movl 536(%esp), %ecx + addl 504(%esp), %esi + movl 44(%esp), %eax # 4-byte Reload + adcl 508(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 36(%esp), %eax # 4-byte Reload + adcl 512(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 40(%esp), %eax # 4-byte Reload + adcl 516(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 520(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 524(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + adcl 528(%esp), %edi + adcl 532(%esp), %ebp + movl %ebp, 52(%esp) # 4-byte Spill + adcl $0, %ecx + movl %ecx, 32(%esp) # 4-byte Spill + movl %esi, %eax + imull 28(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 464(%esp), %ecx + movl 732(%esp), %edx + calll .LmulPv256x32 + addl 464(%esp), %esi + movl 44(%esp), %eax # 4-byte Reload + adcl 468(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 36(%esp), %ebp # 4-byte Reload + adcl 472(%esp), %ebp + movl 40(%esp), %eax # 4-byte Reload + adcl 476(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 480(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 60(%esp), %esi # 4-byte Reload + adcl 484(%esp), %esi + adcl 488(%esp), %edi + movl %edi, 56(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 492(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 32(%esp), %edi # 4-byte Reload + adcl 496(%esp), %edi + movl 728(%esp), %eax + movl 12(%eax), %eax + movl %eax, (%esp) + leal 424(%esp), %ecx + movl 724(%esp), %edx + calll .LmulPv256x32 + movl 456(%esp), %eax + movl 44(%esp), %edx # 4-byte Reload + addl 424(%esp), %edx + adcl 428(%esp), %ebp + movl %ebp, 36(%esp) # 4-byte Spill + movl 40(%esp), %ecx # 4-byte Reload + adcl 432(%esp), %ecx + movl %ecx, 40(%esp) # 4-byte Spill + movl 48(%esp), %ecx # 4-byte Reload + adcl 436(%esp), %ecx + movl %ecx, 48(%esp) # 4-byte Spill + adcl 440(%esp), %esi + movl %esi, 60(%esp) # 4-byte Spill + movl 56(%esp), %ecx # 4-byte Reload + adcl 444(%esp), %ecx + movl %ecx, 56(%esp) # 4-byte Spill + movl 52(%esp), %ecx # 4-byte Reload + adcl 448(%esp), %ecx + movl %ecx, 52(%esp) # 4-byte Spill + adcl 452(%esp), %edi + movl %edi, %ebp + movl %eax, %edi + adcl $0, %edi + movl %edx, %eax + movl %edx, %esi + imull 28(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 384(%esp), %ecx + movl 732(%esp), %edx + calll .LmulPv256x32 + addl 384(%esp), %esi + movl 36(%esp), %eax # 4-byte Reload + adcl 388(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 40(%esp), %eax # 4-byte Reload + adcl 392(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 48(%esp), %esi # 4-byte Reload + adcl 396(%esp), %esi + movl 60(%esp), %eax # 4-byte Reload + adcl 400(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 404(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 408(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + adcl 412(%esp), %ebp + adcl 416(%esp), %edi + movl %edi, 44(%esp) # 4-byte Spill + movl 728(%esp), %eax + movl 16(%eax), %eax + movl %eax, (%esp) + leal 344(%esp), %ecx + movl 724(%esp), %edx + calll .LmulPv256x32 + movl 376(%esp), %edx + movl 36(%esp), %ecx # 4-byte Reload + addl 344(%esp), %ecx + movl 40(%esp), %eax # 4-byte Reload + adcl 348(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + adcl 352(%esp), %esi + movl %esi, 48(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 356(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 56(%esp), %esi # 4-byte Reload + adcl 360(%esp), %esi + movl 52(%esp), %edi # 4-byte Reload + adcl 364(%esp), %edi + adcl 368(%esp), %ebp + movl %ebp, 32(%esp) # 4-byte Spill + movl 44(%esp), %eax # 4-byte Reload + adcl 372(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + adcl $0, %edx + movl %edx, 36(%esp) # 4-byte Spill + movl %ecx, %eax + movl %ecx, %ebp + imull 28(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 304(%esp), %ecx + movl 732(%esp), %edx + calll .LmulPv256x32 + addl 304(%esp), %ebp + movl 40(%esp), %ebp # 4-byte Reload + adcl 308(%esp), %ebp + movl 48(%esp), %eax # 4-byte Reload + adcl 312(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 316(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + adcl 320(%esp), %esi + movl %esi, 56(%esp) # 4-byte Spill + adcl 324(%esp), %edi + movl %edi, 52(%esp) # 4-byte Spill + movl 32(%esp), %esi # 4-byte Reload + adcl 328(%esp), %esi + movl 44(%esp), %eax # 4-byte Reload + adcl 332(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 36(%esp), %edi # 4-byte Reload + adcl 336(%esp), %edi + movl 728(%esp), %eax + movl 20(%eax), %eax + movl %eax, (%esp) + leal 264(%esp), %ecx + movl 724(%esp), %eax + movl %eax, %edx + calll .LmulPv256x32 + movl 296(%esp), %edx + movl %ebp, %ecx + addl 264(%esp), %ecx + movl 48(%esp), %eax # 4-byte Reload + adcl 268(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 272(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 276(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 52(%esp), %ebp # 4-byte Reload + adcl 280(%esp), %ebp + adcl 284(%esp), %esi + movl %esi, 32(%esp) # 4-byte Spill + movl 44(%esp), %eax # 4-byte Reload + adcl 288(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + adcl 292(%esp), %edi + movl %edi, 36(%esp) # 4-byte Spill + movl %edx, %edi + adcl $0, %edi + movl %ecx, %eax + movl %ecx, %esi + imull 28(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 224(%esp), %ecx + movl 732(%esp), %edx + calll .LmulPv256x32 + addl 224(%esp), %esi + movl 48(%esp), %eax # 4-byte Reload + adcl 228(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 232(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 56(%esp), %esi # 4-byte Reload + adcl 236(%esp), %esi + adcl 240(%esp), %ebp + movl %ebp, 52(%esp) # 4-byte Spill + movl 32(%esp), %eax # 4-byte Reload + adcl 244(%esp), %eax + movl %eax, 32(%esp) # 4-byte Spill + movl 44(%esp), %eax # 4-byte Reload + adcl 248(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 36(%esp), %eax # 4-byte Reload + adcl 252(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + adcl 256(%esp), %edi + movl %edi, 40(%esp) # 4-byte Spill + movl 728(%esp), %eax + movl 24(%eax), %eax + movl %eax, (%esp) + leal 184(%esp), %ecx + movl 724(%esp), %edx + calll .LmulPv256x32 + movl 216(%esp), %ebp + movl 48(%esp), %ecx # 4-byte Reload + addl 184(%esp), %ecx + movl 60(%esp), %eax # 4-byte Reload + adcl 188(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + adcl 192(%esp), %esi + movl %esi, 56(%esp) # 4-byte Spill + movl 52(%esp), %edi # 4-byte Reload + adcl 196(%esp), %edi + movl 32(%esp), %eax # 4-byte Reload + adcl 200(%esp), %eax + movl %eax, 32(%esp) # 4-byte Spill + movl 44(%esp), %eax # 4-byte Reload + adcl 204(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 36(%esp), %eax # 4-byte Reload + adcl 208(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 40(%esp), %eax # 4-byte Reload + adcl 212(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + adcl $0, %ebp + movl %ecx, %eax + movl %ecx, %esi + imull 28(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 144(%esp), %ecx + movl 732(%esp), %edx + calll .LmulPv256x32 + addl 144(%esp), %esi + movl 60(%esp), %eax # 4-byte Reload + adcl 148(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 152(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + adcl 156(%esp), %edi + movl %edi, 52(%esp) # 4-byte Spill + movl 32(%esp), %esi # 4-byte Reload + adcl 160(%esp), %esi + movl 44(%esp), %eax # 4-byte Reload + adcl 164(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 36(%esp), %eax # 4-byte Reload + adcl 168(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 40(%esp), %eax # 4-byte Reload + adcl 172(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + adcl 176(%esp), %ebp + movl 728(%esp), %eax + movl 28(%eax), %eax + movl %eax, (%esp) + leal 104(%esp), %ecx + movl 724(%esp), %edx + calll .LmulPv256x32 + movl 136(%esp), %edi + movl 60(%esp), %ecx # 4-byte Reload + addl 104(%esp), %ecx + movl 56(%esp), %eax # 4-byte Reload + adcl 108(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 112(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + adcl 116(%esp), %esi + movl %esi, 32(%esp) # 4-byte Spill + movl 44(%esp), %eax # 4-byte Reload + adcl 120(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 36(%esp), %eax # 4-byte Reload + adcl 124(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 40(%esp), %eax # 4-byte Reload + adcl 128(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + adcl 132(%esp), %ebp + adcl $0, %edi + movl 28(%esp), %eax # 4-byte Reload + imull %ecx, %eax + movl %ecx, %esi + movl %eax, (%esp) + leal 64(%esp), %ecx + movl 732(%esp), %edx + calll .LmulPv256x32 + addl 64(%esp), %esi + movl 32(%esp), %esi # 4-byte Reload + movl 56(%esp), %eax # 4-byte Reload + movl 44(%esp), %ebx # 4-byte Reload + adcl 68(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 52(%esp), %ecx # 4-byte Reload + adcl 72(%esp), %ecx + movl %ecx, 52(%esp) # 4-byte Spill + adcl 76(%esp), %esi + movl %esi, 32(%esp) # 4-byte Spill + adcl 80(%esp), %ebx + movl %ebx, 44(%esp) # 4-byte Spill + movl 36(%esp), %edx # 4-byte Reload + adcl 84(%esp), %edx + movl %edx, 36(%esp) # 4-byte Spill + movl 40(%esp), %edx # 4-byte Reload + adcl 88(%esp), %edx + movl %edx, 40(%esp) # 4-byte Spill + adcl 92(%esp), %ebp + movl %ebp, 60(%esp) # 4-byte Spill + adcl 96(%esp), %edi + movl %edi, 48(%esp) # 4-byte Spill + movl %eax, %edx + movl 732(%esp), %eax + subl (%eax), %edx + sbbl 4(%eax), %ecx + sbbl 8(%eax), %esi + sbbl 12(%eax), %ebx + movl %ebx, 12(%esp) # 4-byte Spill + movl 36(%esp), %ebx # 4-byte Reload + sbbl 16(%eax), %ebx + movl %ebx, 16(%esp) # 4-byte Spill + movl 40(%esp), %ebx # 4-byte Reload + sbbl 20(%eax), %ebx + movl %ebx, 20(%esp) # 4-byte Spill + sbbl 24(%eax), %ebp + movl %ebp, 24(%esp) # 4-byte Spill + sbbl 28(%eax), %edi + movl %edi, 28(%esp) # 4-byte Spill + testl %edi, %edi + js .LBB115_2 +# BB#1: + movl %edx, 56(%esp) # 4-byte Spill +.LBB115_2: + movl 720(%esp), %edx + movl 56(%esp), %eax # 4-byte Reload + movl %eax, (%edx) + movl 52(%esp), %eax # 4-byte Reload + js .LBB115_4 +# BB#3: + movl %ecx, %eax +.LBB115_4: + movl %eax, 4(%edx) + js .LBB115_6 +# BB#5: + movl %esi, 32(%esp) # 4-byte Spill +.LBB115_6: + movl 32(%esp), %eax # 4-byte Reload + movl %eax, 8(%edx) + movl 36(%esp), %edi # 4-byte Reload + movl 40(%esp), %ebp # 4-byte Reload + movl 60(%esp), %eax # 4-byte Reload + movl 48(%esp), %ecx # 4-byte Reload + js .LBB115_8 +# BB#7: + movl 12(%esp), %esi # 4-byte Reload + movl %esi, 44(%esp) # 4-byte Spill +.LBB115_8: + movl 44(%esp), %esi # 4-byte Reload + movl %esi, 12(%edx) + js .LBB115_10 +# BB#9: + movl 16(%esp), %edi # 4-byte Reload +.LBB115_10: + movl %edi, 16(%edx) + js .LBB115_12 +# BB#11: + movl 20(%esp), %ebp # 4-byte Reload +.LBB115_12: + movl %ebp, 20(%edx) + js .LBB115_14 +# BB#13: + movl 24(%esp), %eax # 4-byte Reload +.LBB115_14: + movl %eax, 24(%edx) + js .LBB115_16 +# BB#15: + movl 28(%esp), %ecx # 4-byte Reload +.LBB115_16: + movl %ecx, 28(%edx) + addl $700, %esp # imm = 0x2BC + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end115: + .size mcl_fp_montNF8Lbmi2, .Lfunc_end115-mcl_fp_montNF8Lbmi2 + + .globl mcl_fp_montRed8Lbmi2 + .align 16, 0x90 + .type mcl_fp_montRed8Lbmi2,@function +mcl_fp_montRed8Lbmi2: # @mcl_fp_montRed8Lbmi2 +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $428, %esp # imm = 0x1AC + calll .L116$pb +.L116$pb: + popl %ebx +.Ltmp7: + addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp7-.L116$pb), %ebx + movl 456(%esp), %edx + movl -4(%edx), %edi + movl %edi, 60(%esp) # 4-byte Spill + movl 452(%esp), %eax + movl (%eax), %esi + movl %esi, 56(%esp) # 4-byte Spill + movl 4(%eax), %ecx + movl %ecx, 64(%esp) # 4-byte Spill + movl %esi, %ecx + imull %edi, %ecx + movl 60(%eax), %edi + movl %edi, 84(%esp) # 4-byte Spill + movl 56(%eax), %edi + movl %edi, 88(%esp) # 4-byte Spill + movl 52(%eax), %esi + movl %esi, 80(%esp) # 4-byte Spill + movl 48(%eax), %esi + movl %esi, 100(%esp) # 4-byte Spill + movl 44(%eax), %esi + movl %esi, 76(%esp) # 4-byte Spill + movl 40(%eax), %edi + movl %edi, 104(%esp) # 4-byte Spill + movl 36(%eax), %edi + movl %edi, 108(%esp) # 4-byte Spill + movl 32(%eax), %edi + movl %edi, 92(%esp) # 4-byte Spill + movl 28(%eax), %edi + movl %edi, 96(%esp) # 4-byte Spill + movl 24(%eax), %edi + movl %edi, 72(%esp) # 4-byte Spill + movl 20(%eax), %edi + movl %edi, 68(%esp) # 4-byte Spill + movl 16(%eax), %ebp + movl 12(%eax), %edi + movl 8(%eax), %esi + movl (%edx), %eax + movl %eax, 24(%esp) # 4-byte Spill + movl 28(%edx), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 24(%edx), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 20(%edx), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 16(%edx), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 12(%edx), %eax + movl %eax, 32(%esp) # 4-byte Spill + movl 8(%edx), %eax + movl %eax, 28(%esp) # 4-byte Spill + movl 4(%edx), %eax + movl %eax, 20(%esp) # 4-byte Spill + movl %ecx, (%esp) + leal 392(%esp), %ecx + calll .LmulPv256x32 + movl 56(%esp), %eax # 4-byte Reload + addl 392(%esp), %eax + movl 64(%esp), %ecx # 4-byte Reload + adcl 396(%esp), %ecx + adcl 400(%esp), %esi + movl %esi, 16(%esp) # 4-byte Spill + adcl 404(%esp), %edi + movl %edi, 52(%esp) # 4-byte Spill + adcl 408(%esp), %ebp + movl %ebp, 56(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 412(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 416(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 420(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 424(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + adcl $0, 108(%esp) # 4-byte Folded Spill + adcl $0, 104(%esp) # 4-byte Folded Spill + movl 76(%esp), %esi # 4-byte Reload + adcl $0, %esi + adcl $0, 100(%esp) # 4-byte Folded Spill + movl 80(%esp), %ebp # 4-byte Reload + adcl $0, %ebp + adcl $0, 88(%esp) # 4-byte Folded Spill + adcl $0, 84(%esp) # 4-byte Folded Spill + sbbl %eax, %eax + movl %eax, 64(%esp) # 4-byte Spill + movl %ecx, %edi + movl %edi, %eax + imull 60(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 352(%esp), %ecx + movl 456(%esp), %edx + calll .LmulPv256x32 + movl 64(%esp), %eax # 4-byte Reload + andl $1, %eax + addl 352(%esp), %edi + movl 16(%esp), %edx # 4-byte Reload + adcl 356(%esp), %edx + movl 52(%esp), %ecx # 4-byte Reload + adcl 360(%esp), %ecx + movl %ecx, 52(%esp) # 4-byte Spill + movl 56(%esp), %ecx # 4-byte Reload + adcl 364(%esp), %ecx + movl %ecx, 56(%esp) # 4-byte Spill + movl 68(%esp), %ecx # 4-byte Reload + adcl 368(%esp), %ecx + movl %ecx, 68(%esp) # 4-byte Spill + movl 72(%esp), %ecx # 4-byte Reload + adcl 372(%esp), %ecx + movl %ecx, 72(%esp) # 4-byte Spill + movl 96(%esp), %ecx # 4-byte Reload + adcl 376(%esp), %ecx + movl %ecx, 96(%esp) # 4-byte Spill + movl 92(%esp), %ecx # 4-byte Reload + adcl 380(%esp), %ecx + movl %ecx, 92(%esp) # 4-byte Spill + movl 108(%esp), %ecx # 4-byte Reload + adcl 384(%esp), %ecx + movl %ecx, 108(%esp) # 4-byte Spill + adcl $0, 104(%esp) # 4-byte Folded Spill + adcl $0, %esi + adcl $0, 100(%esp) # 4-byte Folded Spill + adcl $0, %ebp + movl %ebp, 80(%esp) # 4-byte Spill + adcl $0, 88(%esp) # 4-byte Folded Spill + adcl $0, 84(%esp) # 4-byte Folded Spill + adcl $0, %eax + movl %eax, 64(%esp) # 4-byte Spill + movl %edx, %edi + movl %edi, %eax + imull 60(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 312(%esp), %ecx + movl 456(%esp), %edx + calll .LmulPv256x32 + addl 312(%esp), %edi + movl 52(%esp), %edi # 4-byte Reload + adcl 316(%esp), %edi + movl 56(%esp), %eax # 4-byte Reload + adcl 320(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 324(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 328(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 332(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 336(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl 340(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 104(%esp), %eax # 4-byte Reload + adcl 344(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + adcl $0, %esi + movl %esi, 76(%esp) # 4-byte Spill + movl 100(%esp), %ebp # 4-byte Reload + adcl $0, %ebp + movl 80(%esp), %esi # 4-byte Reload + adcl $0, %esi + adcl $0, 88(%esp) # 4-byte Folded Spill + adcl $0, 84(%esp) # 4-byte Folded Spill + adcl $0, 64(%esp) # 4-byte Folded Spill + movl %edi, %eax + imull 60(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 272(%esp), %ecx + movl 456(%esp), %edx + calll .LmulPv256x32 + addl 272(%esp), %edi + movl 56(%esp), %ecx # 4-byte Reload + adcl 276(%esp), %ecx + movl 68(%esp), %eax # 4-byte Reload + adcl 280(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 284(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 288(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 292(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl 296(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 104(%esp), %eax # 4-byte Reload + adcl 300(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 304(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + adcl $0, %ebp + movl %ebp, 100(%esp) # 4-byte Spill + adcl $0, %esi + movl %esi, 80(%esp) # 4-byte Spill + movl 88(%esp), %edi # 4-byte Reload + adcl $0, %edi + movl 84(%esp), %esi # 4-byte Reload + adcl $0, %esi + adcl $0, 64(%esp) # 4-byte Folded Spill + movl %ecx, %ebp + movl %ebp, %eax + imull 60(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 232(%esp), %ecx + movl 456(%esp), %edx + calll .LmulPv256x32 + addl 232(%esp), %ebp + movl 68(%esp), %ecx # 4-byte Reload + adcl 236(%esp), %ecx + movl 72(%esp), %eax # 4-byte Reload + adcl 240(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 244(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 248(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 108(%esp), %ebp # 4-byte Reload + adcl 252(%esp), %ebp + movl 104(%esp), %eax # 4-byte Reload + adcl 256(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 260(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 264(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + adcl $0, 80(%esp) # 4-byte Folded Spill + adcl $0, %edi + movl %edi, 88(%esp) # 4-byte Spill + adcl $0, %esi + movl %esi, 84(%esp) # 4-byte Spill + movl 64(%esp), %esi # 4-byte Reload + adcl $0, %esi + movl %ecx, %edi + movl %edi, %eax + imull 60(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 192(%esp), %ecx + movl 456(%esp), %edx + calll .LmulPv256x32 + addl 192(%esp), %edi + movl 72(%esp), %ecx # 4-byte Reload + adcl 196(%esp), %ecx + movl 96(%esp), %eax # 4-byte Reload + adcl 200(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 92(%esp), %edi # 4-byte Reload + adcl 204(%esp), %edi + adcl 208(%esp), %ebp + movl %ebp, 108(%esp) # 4-byte Spill + movl 104(%esp), %ebp # 4-byte Reload + adcl 212(%esp), %ebp + movl 76(%esp), %eax # 4-byte Reload + adcl 216(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 220(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 224(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + adcl $0, 88(%esp) # 4-byte Folded Spill + adcl $0, 84(%esp) # 4-byte Folded Spill + adcl $0, %esi + movl %esi, 64(%esp) # 4-byte Spill + movl %ecx, %eax + movl %ecx, %esi + imull 60(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 152(%esp), %ecx + movl 456(%esp), %edx + calll .LmulPv256x32 + addl 152(%esp), %esi + movl 96(%esp), %ecx # 4-byte Reload + adcl 156(%esp), %ecx + adcl 160(%esp), %edi + movl %edi, 92(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl 164(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + adcl 168(%esp), %ebp + movl %ebp, 104(%esp) # 4-byte Spill + movl 76(%esp), %edi # 4-byte Reload + adcl 172(%esp), %edi + movl 100(%esp), %eax # 4-byte Reload + adcl 176(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 80(%esp), %ebp # 4-byte Reload + adcl 180(%esp), %ebp + movl 88(%esp), %eax # 4-byte Reload + adcl 184(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + adcl $0, 84(%esp) # 4-byte Folded Spill + adcl $0, 64(%esp) # 4-byte Folded Spill + movl 60(%esp), %eax # 4-byte Reload + imull %ecx, %eax + movl %ecx, %esi + movl %eax, (%esp) + leal 112(%esp), %ecx + movl 456(%esp), %edx + calll .LmulPv256x32 + addl 112(%esp), %esi + movl 92(%esp), %ecx # 4-byte Reload + adcl 116(%esp), %ecx + movl 108(%esp), %eax # 4-byte Reload + adcl 120(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 104(%esp), %eax # 4-byte Reload + adcl 124(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + adcl 128(%esp), %edi + movl %edi, 76(%esp) # 4-byte Spill + movl %edi, %ebx + movl 100(%esp), %eax # 4-byte Reload + adcl 132(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl %eax, %esi + adcl 136(%esp), %ebp + movl %ebp, 80(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 140(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 144(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 64(%esp), %edi # 4-byte Reload + adcl $0, %edi + movl %ecx, %edx + subl 24(%esp), %edx # 4-byte Folded Reload + movl 108(%esp), %eax # 4-byte Reload + sbbl 20(%esp), %eax # 4-byte Folded Reload + movl 104(%esp), %ebp # 4-byte Reload + sbbl 28(%esp), %ebp # 4-byte Folded Reload + sbbl 32(%esp), %ebx # 4-byte Folded Reload + sbbl 36(%esp), %esi # 4-byte Folded Reload + movl %esi, 68(%esp) # 4-byte Spill + movl 80(%esp), %esi # 4-byte Reload + sbbl 40(%esp), %esi # 4-byte Folded Reload + movl %esi, 72(%esp) # 4-byte Spill + movl 88(%esp), %esi # 4-byte Reload + sbbl 44(%esp), %esi # 4-byte Folded Reload + movl %esi, 92(%esp) # 4-byte Spill + movl 84(%esp), %esi # 4-byte Reload + sbbl 48(%esp), %esi # 4-byte Folded Reload + movl %esi, 96(%esp) # 4-byte Spill + sbbl $0, %edi + andl $1, %edi + jne .LBB116_2 +# BB#1: + movl %edx, %ecx +.LBB116_2: + movl 448(%esp), %edx + movl %ecx, (%edx) + movl %edi, %ecx + testb %cl, %cl + jne .LBB116_4 +# BB#3: + movl %eax, 108(%esp) # 4-byte Spill +.LBB116_4: + movl 108(%esp), %eax # 4-byte Reload + movl %eax, 4(%edx) + movl 104(%esp), %eax # 4-byte Reload + jne .LBB116_6 +# BB#5: + movl %ebp, %eax +.LBB116_6: + movl %eax, 8(%edx) + movl 84(%esp), %eax # 4-byte Reload + movl 76(%esp), %ebp # 4-byte Reload + jne .LBB116_8 +# BB#7: + movl %ebx, %ebp +.LBB116_8: + movl %ebp, 12(%edx) + movl 100(%esp), %ebx # 4-byte Reload + jne .LBB116_10 +# BB#9: + movl 68(%esp), %ebx # 4-byte Reload +.LBB116_10: + movl %ebx, 16(%edx) + movl 80(%esp), %edi # 4-byte Reload + jne .LBB116_12 +# BB#11: + movl 72(%esp), %edi # 4-byte Reload +.LBB116_12: + movl %edi, 20(%edx) + movl 88(%esp), %esi # 4-byte Reload + jne .LBB116_14 +# BB#13: + movl 92(%esp), %esi # 4-byte Reload +.LBB116_14: + movl %esi, 24(%edx) + jne .LBB116_16 +# BB#15: + movl 96(%esp), %eax # 4-byte Reload +.LBB116_16: + movl %eax, 28(%edx) + addl $428, %esp # imm = 0x1AC + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end116: + .size mcl_fp_montRed8Lbmi2, .Lfunc_end116-mcl_fp_montRed8Lbmi2 + + .globl mcl_fp_addPre8Lbmi2 + .align 16, 0x90 + .type mcl_fp_addPre8Lbmi2,@function +mcl_fp_addPre8Lbmi2: # @mcl_fp_addPre8Lbmi2 +# BB#0: + pushl %ebx + pushl %edi + pushl %esi + movl 24(%esp), %eax + movl (%eax), %ecx + movl 4(%eax), %edx + movl 20(%esp), %esi + addl (%esi), %ecx + adcl 4(%esi), %edx + movl 8(%eax), %edi + adcl 8(%esi), %edi + movl 16(%esp), %ebx + movl %ecx, (%ebx) + movl 12(%esi), %ecx + movl %edx, 4(%ebx) + movl 16(%esi), %edx + adcl 12(%eax), %ecx + adcl 16(%eax), %edx + movl %edi, 8(%ebx) + movl 20(%eax), %edi + movl %ecx, 12(%ebx) + movl 20(%esi), %ecx + adcl %edi, %ecx + movl 24(%eax), %edi + movl %edx, 16(%ebx) + movl 24(%esi), %edx + adcl %edi, %edx + movl %ecx, 20(%ebx) + movl %edx, 24(%ebx) + movl 28(%eax), %eax + movl 28(%esi), %ecx + adcl %eax, %ecx + movl %ecx, 28(%ebx) + sbbl %eax, %eax + andl $1, %eax + popl %esi + popl %edi + popl %ebx + retl +.Lfunc_end117: + .size mcl_fp_addPre8Lbmi2, .Lfunc_end117-mcl_fp_addPre8Lbmi2 + + .globl mcl_fp_subPre8Lbmi2 + .align 16, 0x90 + .type mcl_fp_subPre8Lbmi2,@function +mcl_fp_subPre8Lbmi2: # @mcl_fp_subPre8Lbmi2 +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + movl 24(%esp), %ecx + movl (%ecx), %edx + movl 4(%ecx), %esi + xorl %eax, %eax + movl 28(%esp), %edi + subl (%edi), %edx + sbbl 4(%edi), %esi + movl 8(%ecx), %ebx + sbbl 8(%edi), %ebx + movl 20(%esp), %ebp + movl %edx, (%ebp) + movl 12(%ecx), %edx + sbbl 12(%edi), %edx + movl %esi, 4(%ebp) + movl 16(%ecx), %esi + sbbl 16(%edi), %esi + movl %ebx, 8(%ebp) + movl 20(%edi), %ebx + movl %edx, 12(%ebp) + movl 20(%ecx), %edx + sbbl %ebx, %edx + movl 24(%edi), %ebx + movl %esi, 16(%ebp) + movl 24(%ecx), %esi + sbbl %ebx, %esi + movl %edx, 20(%ebp) + movl %esi, 24(%ebp) + movl 28(%edi), %edx + movl 28(%ecx), %ecx + sbbl %edx, %ecx + movl %ecx, 28(%ebp) + sbbl $0, %eax + andl $1, %eax + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end118: + .size mcl_fp_subPre8Lbmi2, .Lfunc_end118-mcl_fp_subPre8Lbmi2 + + .globl mcl_fp_shr1_8Lbmi2 + .align 16, 0x90 + .type mcl_fp_shr1_8Lbmi2,@function +mcl_fp_shr1_8Lbmi2: # @mcl_fp_shr1_8Lbmi2 +# BB#0: + pushl %esi + movl 12(%esp), %eax + movl (%eax), %ecx + movl 4(%eax), %edx + shrdl $1, %edx, %ecx + movl 8(%esp), %esi + movl %ecx, (%esi) + movl 8(%eax), %ecx + shrdl $1, %ecx, %edx + movl %edx, 4(%esi) + movl 12(%eax), %edx + shrdl $1, %edx, %ecx + movl %ecx, 8(%esi) + movl 16(%eax), %ecx + shrdl $1, %ecx, %edx + movl %edx, 12(%esi) + movl 20(%eax), %edx + shrdl $1, %edx, %ecx + movl %ecx, 16(%esi) + movl 24(%eax), %ecx + shrdl $1, %ecx, %edx + movl %edx, 20(%esi) + movl 28(%eax), %eax + shrdl $1, %eax, %ecx + movl %ecx, 24(%esi) + shrl %eax + movl %eax, 28(%esi) + popl %esi + retl +.Lfunc_end119: + .size mcl_fp_shr1_8Lbmi2, .Lfunc_end119-mcl_fp_shr1_8Lbmi2 + + .globl mcl_fp_add8Lbmi2 + .align 16, 0x90 + .type mcl_fp_add8Lbmi2,@function +mcl_fp_add8Lbmi2: # @mcl_fp_add8Lbmi2 +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $20, %esp + movl 48(%esp), %edi + movl (%edi), %ecx + movl 4(%edi), %eax + movl 44(%esp), %edx + addl (%edx), %ecx + movl %ecx, 8(%esp) # 4-byte Spill + movl %ecx, %ebp + adcl 4(%edx), %eax + movl %eax, 16(%esp) # 4-byte Spill + movl 8(%edi), %eax + adcl 8(%edx), %eax + movl %eax, 12(%esp) # 4-byte Spill + movl 12(%edx), %esi + movl 16(%edx), %eax + adcl 12(%edi), %esi + adcl 16(%edi), %eax + movl %eax, 4(%esp) # 4-byte Spill + movl 20(%edx), %ecx + adcl 20(%edi), %ecx + movl 24(%edx), %ebx + adcl 24(%edi), %ebx + movl 28(%edx), %edi + movl 48(%esp), %edx + adcl 28(%edx), %edi + movl 40(%esp), %edx + movl %ebp, (%edx) + movl 16(%esp), %ebp # 4-byte Reload + movl %ebp, 4(%edx) + movl 12(%esp), %ebp # 4-byte Reload + movl %ebp, 8(%edx) + movl %esi, 12(%edx) + movl %eax, 16(%edx) + movl %ecx, 20(%edx) + movl %ebx, 24(%edx) + movl %edi, 28(%edx) + sbbl %eax, %eax + andl $1, %eax + movl 52(%esp), %edx + movl 8(%esp), %ebp # 4-byte Reload + subl (%edx), %ebp + movl %ebp, 8(%esp) # 4-byte Spill + movl 16(%esp), %ebp # 4-byte Reload + movl 52(%esp), %edx + sbbl 4(%edx), %ebp + movl %ebp, 16(%esp) # 4-byte Spill + movl 12(%esp), %ebp # 4-byte Reload + movl 52(%esp), %edx + sbbl 8(%edx), %ebp + movl %ebp, 12(%esp) # 4-byte Spill + movl 52(%esp), %ebp + sbbl 12(%ebp), %esi + movl %esi, (%esp) # 4-byte Spill + movl 4(%esp), %edx # 4-byte Reload + sbbl 16(%ebp), %edx + movl %edx, %esi + sbbl 20(%ebp), %ecx + sbbl 24(%ebp), %ebx + sbbl 28(%ebp), %edi + sbbl $0, %eax + testb $1, %al + jne .LBB120_2 +# BB#1: # %nocarry + movl 8(%esp), %edx # 4-byte Reload + movl 40(%esp), %ebp + movl %edx, (%ebp) + movl 16(%esp), %edx # 4-byte Reload + movl %edx, 4(%ebp) + movl 12(%esp), %edx # 4-byte Reload + movl %edx, 8(%ebp) + movl (%esp), %eax # 4-byte Reload + movl %eax, 12(%ebp) + movl %esi, 16(%ebp) + movl %ecx, 20(%ebp) + movl %ebx, 24(%ebp) + movl %edi, 28(%ebp) +.LBB120_2: # %carry + addl $20, %esp + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end120: + .size mcl_fp_add8Lbmi2, .Lfunc_end120-mcl_fp_add8Lbmi2 + + .globl mcl_fp_addNF8Lbmi2 + .align 16, 0x90 + .type mcl_fp_addNF8Lbmi2,@function +mcl_fp_addNF8Lbmi2: # @mcl_fp_addNF8Lbmi2 +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $56, %esp + movl 84(%esp), %eax + movl (%eax), %ecx + movl 4(%eax), %edi + movl 80(%esp), %ebx + addl (%ebx), %ecx + movl %ecx, 24(%esp) # 4-byte Spill + adcl 4(%ebx), %edi + movl %edi, 32(%esp) # 4-byte Spill + movl 28(%eax), %ecx + movl %ecx, 48(%esp) # 4-byte Spill + movl 24(%eax), %ecx + movl %ecx, 52(%esp) # 4-byte Spill + movl 20(%eax), %ebp + movl 16(%eax), %esi + movl 12(%eax), %edx + movl 8(%eax), %ecx + adcl 8(%ebx), %ecx + movl %ecx, 28(%esp) # 4-byte Spill + adcl 12(%ebx), %edx + movl %edx, 36(%esp) # 4-byte Spill + adcl 16(%ebx), %esi + movl %esi, 40(%esp) # 4-byte Spill + adcl 20(%ebx), %ebp + movl %ebp, 44(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 24(%ebx), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 28(%ebx), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 88(%esp), %ebx + movl 24(%esp), %ebp # 4-byte Reload + movl %ebp, %eax + subl (%ebx), %eax + movl %eax, (%esp) # 4-byte Spill + sbbl 4(%ebx), %edi + movl %edi, 4(%esp) # 4-byte Spill + movl %ebp, %eax + sbbl 8(%ebx), %ecx + movl %ecx, 8(%esp) # 4-byte Spill + sbbl 12(%ebx), %edx + movl %edx, 12(%esp) # 4-byte Spill + sbbl 16(%ebx), %esi + movl %esi, 16(%esp) # 4-byte Spill + movl 44(%esp), %esi # 4-byte Reload + sbbl 20(%ebx), %esi + movl %esi, 20(%esp) # 4-byte Spill + movl 52(%esp), %ebp # 4-byte Reload + sbbl 24(%ebx), %ebp + movl 48(%esp), %esi # 4-byte Reload + sbbl 28(%ebx), %esi + testl %esi, %esi + js .LBB121_2 +# BB#1: + movl (%esp), %eax # 4-byte Reload +.LBB121_2: + movl 76(%esp), %ebx + movl %eax, (%ebx) + movl 32(%esp), %eax # 4-byte Reload + js .LBB121_4 +# BB#3: + movl 4(%esp), %eax # 4-byte Reload +.LBB121_4: + movl %eax, 4(%ebx) + movl 40(%esp), %edx # 4-byte Reload + movl 28(%esp), %edi # 4-byte Reload + js .LBB121_6 +# BB#5: + movl 8(%esp), %edi # 4-byte Reload +.LBB121_6: + movl %edi, 8(%ebx) + movl 44(%esp), %ecx # 4-byte Reload + movl 36(%esp), %eax # 4-byte Reload + js .LBB121_8 +# BB#7: + movl 12(%esp), %eax # 4-byte Reload +.LBB121_8: + movl %eax, 12(%ebx) + movl 48(%esp), %edi # 4-byte Reload + movl 52(%esp), %eax # 4-byte Reload + js .LBB121_10 +# BB#9: + movl 16(%esp), %edx # 4-byte Reload +.LBB121_10: + movl %edx, 16(%ebx) + js .LBB121_12 +# BB#11: + movl 20(%esp), %ecx # 4-byte Reload +.LBB121_12: + movl %ecx, 20(%ebx) + js .LBB121_14 +# BB#13: + movl %ebp, %eax +.LBB121_14: + movl %eax, 24(%ebx) + js .LBB121_16 +# BB#15: + movl %esi, %edi +.LBB121_16: + movl %edi, 28(%ebx) + addl $56, %esp + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end121: + .size mcl_fp_addNF8Lbmi2, .Lfunc_end121-mcl_fp_addNF8Lbmi2 + + .globl mcl_fp_sub8Lbmi2 + .align 16, 0x90 + .type mcl_fp_sub8Lbmi2,@function +mcl_fp_sub8Lbmi2: # @mcl_fp_sub8Lbmi2 +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $28, %esp + movl 52(%esp), %esi + movl (%esi), %ecx + movl 4(%esi), %eax + xorl %ebx, %ebx + movl 56(%esp), %ebp + subl (%ebp), %ecx + movl %ecx, 16(%esp) # 4-byte Spill + sbbl 4(%ebp), %eax + movl %eax, 20(%esp) # 4-byte Spill + movl 8(%esi), %edx + sbbl 8(%ebp), %edx + movl %edx, 8(%esp) # 4-byte Spill + movl 12(%esi), %eax + sbbl 12(%ebp), %eax + movl %eax, 24(%esp) # 4-byte Spill + movl 16(%esi), %ecx + sbbl 16(%ebp), %ecx + movl %ecx, 12(%esp) # 4-byte Spill + movl 20(%esi), %eax + sbbl 20(%ebp), %eax + movl %eax, 4(%esp) # 4-byte Spill + movl 24(%esi), %edi + sbbl 24(%ebp), %edi + movl 28(%esi), %esi + sbbl 28(%ebp), %esi + sbbl $0, %ebx + testb $1, %bl + movl 48(%esp), %ebx + movl 16(%esp), %ebp # 4-byte Reload + movl %ebp, (%ebx) + movl 20(%esp), %ebp # 4-byte Reload + movl %ebp, 4(%ebx) + movl %edx, 8(%ebx) + movl 24(%esp), %edx # 4-byte Reload + movl %edx, 12(%ebx) + movl %ecx, 16(%ebx) + movl %eax, 20(%ebx) + movl %edi, 24(%ebx) + movl %esi, 28(%ebx) + je .LBB122_2 +# BB#1: # %carry + movl %esi, (%esp) # 4-byte Spill + movl 60(%esp), %esi + movl 16(%esp), %ecx # 4-byte Reload + addl (%esi), %ecx + movl %ecx, (%ebx) + movl 20(%esp), %edx # 4-byte Reload + adcl 4(%esi), %edx + movl %edx, 4(%ebx) + movl 8(%esp), %ebp # 4-byte Reload + adcl 8(%esi), %ebp + movl 12(%esi), %eax + adcl 24(%esp), %eax # 4-byte Folded Reload + movl %ebp, 8(%ebx) + movl 16(%esi), %ecx + adcl 12(%esp), %ecx # 4-byte Folded Reload + movl %eax, 12(%ebx) + movl 20(%esi), %eax + adcl 4(%esp), %eax # 4-byte Folded Reload + movl %ecx, 16(%ebx) + movl %eax, 20(%ebx) + movl 24(%esi), %eax + adcl %edi, %eax + movl %eax, 24(%ebx) + movl 28(%esi), %eax + adcl (%esp), %eax # 4-byte Folded Reload + movl %eax, 28(%ebx) +.LBB122_2: # %nocarry + addl $28, %esp + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end122: + .size mcl_fp_sub8Lbmi2, .Lfunc_end122-mcl_fp_sub8Lbmi2 + + .globl mcl_fp_subNF8Lbmi2 + .align 16, 0x90 + .type mcl_fp_subNF8Lbmi2,@function +mcl_fp_subNF8Lbmi2: # @mcl_fp_subNF8Lbmi2 +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $40, %esp + movl 64(%esp), %eax + movl (%eax), %esi + movl 4(%eax), %edx + movl 68(%esp), %ecx + subl (%ecx), %esi + movl %esi, 24(%esp) # 4-byte Spill + sbbl 4(%ecx), %edx + movl %edx, 28(%esp) # 4-byte Spill + movl 28(%eax), %edx + movl 24(%eax), %esi + movl 20(%eax), %edi + movl 16(%eax), %ebx + movl 12(%eax), %ebp + movl 8(%eax), %eax + sbbl 8(%ecx), %eax + movl %eax, 8(%esp) # 4-byte Spill + sbbl 12(%ecx), %ebp + movl %ebp, 12(%esp) # 4-byte Spill + sbbl 16(%ecx), %ebx + movl %ebx, 16(%esp) # 4-byte Spill + sbbl 20(%ecx), %edi + movl %edi, 20(%esp) # 4-byte Spill + sbbl 24(%ecx), %esi + movl %esi, 32(%esp) # 4-byte Spill + movl %edx, %edi + sbbl 28(%ecx), %edi + movl %edi, 36(%esp) # 4-byte Spill + sarl $31, %edi + movl 72(%esp), %ebp + movl 28(%ebp), %eax + andl %edi, %eax + movl %eax, 4(%esp) # 4-byte Spill + movl 24(%ebp), %eax + andl %edi, %eax + movl %eax, (%esp) # 4-byte Spill + movl 20(%ebp), %ebx + andl %edi, %ebx + movl 16(%ebp), %esi + andl %edi, %esi + movl 12(%ebp), %edx + andl %edi, %edx + movl 8(%ebp), %ecx + andl %edi, %ecx + movl 4(%ebp), %eax + andl %edi, %eax + andl (%ebp), %edi + addl 24(%esp), %edi # 4-byte Folded Reload + adcl 28(%esp), %eax # 4-byte Folded Reload + movl 60(%esp), %ebp + movl %edi, (%ebp) + adcl 8(%esp), %ecx # 4-byte Folded Reload + movl %eax, 4(%ebp) + adcl 12(%esp), %edx # 4-byte Folded Reload + movl %ecx, 8(%ebp) + adcl 16(%esp), %esi # 4-byte Folded Reload + movl %edx, 12(%ebp) + adcl 20(%esp), %ebx # 4-byte Folded Reload + movl %esi, 16(%ebp) + movl (%esp), %eax # 4-byte Reload + adcl 32(%esp), %eax # 4-byte Folded Reload + movl %ebx, 20(%ebp) + movl %eax, 24(%ebp) + movl 4(%esp), %eax # 4-byte Reload + adcl 36(%esp), %eax # 4-byte Folded Reload + movl %eax, 28(%ebp) + addl $40, %esp + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end123: + .size mcl_fp_subNF8Lbmi2, .Lfunc_end123-mcl_fp_subNF8Lbmi2 + + .globl mcl_fpDbl_add8Lbmi2 + .align 16, 0x90 + .type mcl_fpDbl_add8Lbmi2,@function +mcl_fpDbl_add8Lbmi2: # @mcl_fpDbl_add8Lbmi2 +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $56, %esp + movl 84(%esp), %ecx + movl (%ecx), %esi + movl 4(%ecx), %edx + movl 80(%esp), %ebp + addl (%ebp), %esi + adcl 4(%ebp), %edx + movl 8(%ecx), %edi + adcl 8(%ebp), %edi + movl 12(%ebp), %ebx + movl 76(%esp), %eax + movl %esi, (%eax) + movl 16(%ebp), %esi + adcl 12(%ecx), %ebx + adcl 16(%ecx), %esi + movl %edx, 4(%eax) + movl 40(%ecx), %edx + movl %edx, 52(%esp) # 4-byte Spill + movl %edi, 8(%eax) + movl 20(%ecx), %edx + movl %ebx, 12(%eax) + movl 20(%ebp), %edi + adcl %edx, %edi + movl 24(%ecx), %edx + movl %esi, 16(%eax) + movl 24(%ebp), %esi + adcl %edx, %esi + movl 28(%ecx), %edx + movl %edi, 20(%eax) + movl 28(%ebp), %ebx + adcl %edx, %ebx + movl 32(%ecx), %edx + movl %esi, 24(%eax) + movl 32(%ebp), %esi + adcl %edx, %esi + movl %esi, 44(%esp) # 4-byte Spill + movl 36(%ecx), %edx + movl %ebx, 28(%eax) + movl 36(%ebp), %ebx + adcl %edx, %ebx + movl %ebx, 28(%esp) # 4-byte Spill + movl 40(%ebp), %eax + adcl 52(%esp), %eax # 4-byte Folded Reload + movl %eax, 48(%esp) # 4-byte Spill + movl 44(%ecx), %edx + movl 44(%ebp), %edi + adcl %edx, %edi + movl %edi, 32(%esp) # 4-byte Spill + movl 48(%ecx), %edx + movl 48(%ebp), %eax + adcl %edx, %eax + movl %eax, 24(%esp) # 4-byte Spill + movl 52(%ecx), %edx + movl 52(%ebp), %esi + adcl %edx, %esi + movl %esi, 36(%esp) # 4-byte Spill + movl 56(%ecx), %edx + movl 56(%ebp), %eax + adcl %edx, %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 60(%ecx), %ecx + movl 60(%ebp), %ebp + adcl %ecx, %ebp + movl %ebp, 40(%esp) # 4-byte Spill + sbbl %ecx, %ecx + andl $1, %ecx + movl 44(%esp), %eax # 4-byte Reload + movl 88(%esp), %edx + subl (%edx), %eax + movl %eax, (%esp) # 4-byte Spill + movl 88(%esp), %eax + sbbl 4(%eax), %ebx + movl %eax, %edx + movl %ebx, 4(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + movl %edx, %ebx + sbbl 8(%ebx), %eax + movl %eax, 8(%esp) # 4-byte Spill + movl %edi, %eax + movl 24(%esp), %edi # 4-byte Reload + sbbl 12(%ebx), %eax + movl %eax, 12(%esp) # 4-byte Spill + movl %edi, %eax + sbbl 16(%ebx), %eax + sbbl 20(%ebx), %esi + movl %esi, 16(%esp) # 4-byte Spill + movl 52(%esp), %edx # 4-byte Reload + sbbl 24(%ebx), %edx + movl %edx, 20(%esp) # 4-byte Spill + sbbl 28(%ebx), %ebp + sbbl $0, %ecx + andl $1, %ecx + jne .LBB124_2 +# BB#1: + movl %eax, %edi +.LBB124_2: + testb %cl, %cl + movl 44(%esp), %ecx # 4-byte Reload + jne .LBB124_4 +# BB#3: + movl (%esp), %ecx # 4-byte Reload +.LBB124_4: + movl 76(%esp), %eax + movl %ecx, 32(%eax) + movl 40(%esp), %ecx # 4-byte Reload + movl 32(%esp), %edx # 4-byte Reload + movl 48(%esp), %esi # 4-byte Reload + movl 28(%esp), %ebx # 4-byte Reload + jne .LBB124_6 +# BB#5: + movl 4(%esp), %ebx # 4-byte Reload +.LBB124_6: + movl %ebx, 36(%eax) + jne .LBB124_8 +# BB#7: + movl 8(%esp), %esi # 4-byte Reload +.LBB124_8: + movl %esi, 40(%eax) + movl 36(%esp), %esi # 4-byte Reload + jne .LBB124_10 +# BB#9: + movl 12(%esp), %edx # 4-byte Reload +.LBB124_10: + movl %edx, 44(%eax) + movl %edi, 48(%eax) + movl 52(%esp), %edx # 4-byte Reload + jne .LBB124_12 +# BB#11: + movl 16(%esp), %esi # 4-byte Reload +.LBB124_12: + movl %esi, 52(%eax) + jne .LBB124_14 +# BB#13: + movl 20(%esp), %edx # 4-byte Reload +.LBB124_14: + movl %edx, 56(%eax) + jne .LBB124_16 +# BB#15: + movl %ebp, %ecx +.LBB124_16: + movl %ecx, 60(%eax) + addl $56, %esp + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end124: + .size mcl_fpDbl_add8Lbmi2, .Lfunc_end124-mcl_fpDbl_add8Lbmi2 + + .globl mcl_fpDbl_sub8Lbmi2 + .align 16, 0x90 + .type mcl_fpDbl_sub8Lbmi2,@function +mcl_fpDbl_sub8Lbmi2: # @mcl_fpDbl_sub8Lbmi2 +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $40, %esp + movl 64(%esp), %edi + movl (%edi), %eax + movl 4(%edi), %edx + movl 68(%esp), %ebx + subl (%ebx), %eax + sbbl 4(%ebx), %edx + movl 8(%edi), %esi + sbbl 8(%ebx), %esi + movl 60(%esp), %ecx + movl %eax, (%ecx) + movl 12(%edi), %eax + sbbl 12(%ebx), %eax + movl %edx, 4(%ecx) + movl 16(%edi), %edx + sbbl 16(%ebx), %edx + movl %esi, 8(%ecx) + movl 20(%ebx), %esi + movl %eax, 12(%ecx) + movl 20(%edi), %eax + sbbl %esi, %eax + movl 24(%ebx), %esi + movl %edx, 16(%ecx) + movl 24(%edi), %edx + sbbl %esi, %edx + movl 28(%ebx), %esi + movl %eax, 20(%ecx) + movl 28(%edi), %eax + sbbl %esi, %eax + movl 32(%ebx), %esi + movl %edx, 24(%ecx) + movl 32(%edi), %edx + sbbl %esi, %edx + movl %edx, 16(%esp) # 4-byte Spill + movl 36(%ebx), %edx + movl %eax, 28(%ecx) + movl 36(%edi), %eax + sbbl %edx, %eax + movl %eax, 8(%esp) # 4-byte Spill + movl 40(%ebx), %eax + movl 40(%edi), %edx + sbbl %eax, %edx + movl %edx, 12(%esp) # 4-byte Spill + movl 44(%ebx), %eax + movl 44(%edi), %edx + sbbl %eax, %edx + movl %edx, 20(%esp) # 4-byte Spill + movl 48(%ebx), %eax + movl 48(%edi), %edx + sbbl %eax, %edx + movl %edx, 24(%esp) # 4-byte Spill + movl 52(%ebx), %eax + movl 52(%edi), %edx + sbbl %eax, %edx + movl %edx, 28(%esp) # 4-byte Spill + movl 56(%ebx), %eax + movl 56(%edi), %edx + sbbl %eax, %edx + movl %edx, 32(%esp) # 4-byte Spill + movl 60(%ebx), %eax + movl 60(%edi), %edx + sbbl %eax, %edx + movl %edx, 36(%esp) # 4-byte Spill + movl $0, %eax + sbbl $0, %eax + andl $1, %eax + movl 72(%esp), %ebx + jne .LBB125_1 +# BB#2: + movl $0, 4(%esp) # 4-byte Folded Spill + jmp .LBB125_3 +.LBB125_1: + movl 28(%ebx), %edx + movl %edx, 4(%esp) # 4-byte Spill +.LBB125_3: + testb %al, %al + jne .LBB125_4 +# BB#5: + movl $0, %ebp + movl $0, %eax + jmp .LBB125_6 +.LBB125_4: + movl (%ebx), %eax + movl 4(%ebx), %ebp +.LBB125_6: + jne .LBB125_7 +# BB#8: + movl $0, (%esp) # 4-byte Folded Spill + jmp .LBB125_9 +.LBB125_7: + movl 24(%ebx), %edx + movl %edx, (%esp) # 4-byte Spill +.LBB125_9: + jne .LBB125_10 +# BB#11: + movl $0, %edx + jmp .LBB125_12 +.LBB125_10: + movl 20(%ebx), %edx +.LBB125_12: + jne .LBB125_13 +# BB#14: + movl $0, %esi + jmp .LBB125_15 +.LBB125_13: + movl 16(%ebx), %esi +.LBB125_15: + jne .LBB125_16 +# BB#17: + movl $0, %edi + jmp .LBB125_18 +.LBB125_16: + movl 12(%ebx), %edi +.LBB125_18: + jne .LBB125_19 +# BB#20: + xorl %ebx, %ebx + jmp .LBB125_21 +.LBB125_19: + movl 8(%ebx), %ebx +.LBB125_21: + addl 16(%esp), %eax # 4-byte Folded Reload + adcl 8(%esp), %ebp # 4-byte Folded Reload + movl %eax, 32(%ecx) + adcl 12(%esp), %ebx # 4-byte Folded Reload + movl %ebp, 36(%ecx) + adcl 20(%esp), %edi # 4-byte Folded Reload + movl %ebx, 40(%ecx) + adcl 24(%esp), %esi # 4-byte Folded Reload + movl %edi, 44(%ecx) + adcl 28(%esp), %edx # 4-byte Folded Reload + movl %esi, 48(%ecx) + movl (%esp), %eax # 4-byte Reload + adcl 32(%esp), %eax # 4-byte Folded Reload + movl %edx, 52(%ecx) + movl %eax, 56(%ecx) + movl 4(%esp), %eax # 4-byte Reload + adcl 36(%esp), %eax # 4-byte Folded Reload + movl %eax, 60(%ecx) + addl $40, %esp + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end125: + .size mcl_fpDbl_sub8Lbmi2, .Lfunc_end125-mcl_fpDbl_sub8Lbmi2 + + .align 16, 0x90 + .type .LmulPv288x32,@function +.LmulPv288x32: # @mulPv288x32 +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $24, %esp + movl %edx, %eax + movl 44(%esp), %edx + mulxl 4(%eax), %edi, %esi + mulxl (%eax), %ebp, %ebx + movl %ebp, 20(%esp) # 4-byte Spill + addl %edi, %ebx + movl %ebx, 16(%esp) # 4-byte Spill + mulxl 8(%eax), %edi, %ebx + adcl %esi, %edi + movl %edi, 12(%esp) # 4-byte Spill + mulxl 12(%eax), %esi, %edi + adcl %ebx, %esi + movl %esi, 8(%esp) # 4-byte Spill + movl %edx, %ebp + mulxl 16(%eax), %ebx, %esi + adcl %edi, %ebx + mulxl 20(%eax), %edi, %edx + movl %edx, 4(%esp) # 4-byte Spill + adcl %esi, %edi + movl %ebp, %edx + mulxl 24(%eax), %esi, %edx + movl %edx, (%esp) # 4-byte Spill + adcl 4(%esp), %esi # 4-byte Folded Reload + movl %ebp, %edx + mulxl 28(%eax), %edx, %ebp + movl %ebp, 4(%esp) # 4-byte Spill + adcl (%esp), %edx # 4-byte Folded Reload + movl 20(%esp), %ebp # 4-byte Reload + movl %ebp, (%ecx) + movl 16(%esp), %ebp # 4-byte Reload + movl %ebp, 4(%ecx) + movl 12(%esp), %ebp # 4-byte Reload + movl %ebp, 8(%ecx) + movl 8(%esp), %ebp # 4-byte Reload + movl %ebp, 12(%ecx) + movl %ebx, 16(%ecx) + movl %edi, 20(%ecx) + movl %esi, 24(%ecx) + movl %edx, 28(%ecx) + movl 44(%esp), %edx + mulxl 32(%eax), %eax, %edx + adcl 4(%esp), %eax # 4-byte Folded Reload + movl %eax, 32(%ecx) + adcl $0, %edx + movl %edx, 36(%ecx) + movl %ecx, %eax + addl $24, %esp + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end126: + .size .LmulPv288x32, .Lfunc_end126-.LmulPv288x32 + + .globl mcl_fp_mulUnitPre9Lbmi2 + .align 16, 0x90 + .type mcl_fp_mulUnitPre9Lbmi2,@function +mcl_fp_mulUnitPre9Lbmi2: # @mcl_fp_mulUnitPre9Lbmi2 +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $76, %esp + calll .L127$pb +.L127$pb: + popl %ebx +.Ltmp8: + addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp8-.L127$pb), %ebx + movl 104(%esp), %eax + movl %eax, (%esp) + leal 32(%esp), %ecx + movl 100(%esp), %edx + calll .LmulPv288x32 + movl 68(%esp), %eax + movl %eax, 28(%esp) # 4-byte Spill + movl 64(%esp), %eax + movl %eax, 24(%esp) # 4-byte Spill + movl 60(%esp), %eax + movl %eax, 20(%esp) # 4-byte Spill + movl 56(%esp), %eax + movl %eax, 16(%esp) # 4-byte Spill + movl 52(%esp), %edi + movl 48(%esp), %ebx + movl 44(%esp), %ebp + movl 40(%esp), %esi + movl 32(%esp), %edx + movl 36(%esp), %ecx + movl 96(%esp), %eax + movl %edx, (%eax) + movl %ecx, 4(%eax) + movl %esi, 8(%eax) + movl %ebp, 12(%eax) + movl %ebx, 16(%eax) + movl %edi, 20(%eax) + movl 16(%esp), %ecx # 4-byte Reload + movl %ecx, 24(%eax) + movl 20(%esp), %ecx # 4-byte Reload + movl %ecx, 28(%eax) + movl 24(%esp), %ecx # 4-byte Reload + movl %ecx, 32(%eax) + movl 28(%esp), %ecx # 4-byte Reload + movl %ecx, 36(%eax) + addl $76, %esp + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end127: + .size mcl_fp_mulUnitPre9Lbmi2, .Lfunc_end127-mcl_fp_mulUnitPre9Lbmi2 + + .globl mcl_fpDbl_mulPre9Lbmi2 + .align 16, 0x90 + .type mcl_fpDbl_mulPre9Lbmi2,@function +mcl_fpDbl_mulPre9Lbmi2: # @mcl_fpDbl_mulPre9Lbmi2 +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $428, %esp # imm = 0x1AC + calll .L128$pb +.L128$pb: + popl %esi +.Ltmp9: + addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp9-.L128$pb), %esi + movl %esi, 60(%esp) # 4-byte Spill + movl 456(%esp), %eax + movl (%eax), %eax + movl %eax, (%esp) + leal 384(%esp), %ecx + movl 452(%esp), %edx + movl %edx, %ebp + movl %esi, %ebx + calll .LmulPv288x32 + movl 420(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 416(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 412(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 408(%esp), %eax + movl %eax, 24(%esp) # 4-byte Spill + movl 404(%esp), %eax + movl %eax, 12(%esp) # 4-byte Spill + movl 400(%esp), %eax + movl %eax, 20(%esp) # 4-byte Spill + movl 396(%esp), %eax + movl %eax, 16(%esp) # 4-byte Spill + movl 392(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 384(%esp), %eax + movl 388(%esp), %edi + movl 448(%esp), %ecx + movl %eax, (%ecx) + movl 456(%esp), %eax + movl 4(%eax), %eax + movl %eax, (%esp) + leal 344(%esp), %ecx + movl %ebp, %edx + movl %esi, %ebx + calll .LmulPv288x32 + addl 344(%esp), %edi + movl %edi, 8(%esp) # 4-byte Spill + movl 380(%esp), %eax + movl %eax, 28(%esp) # 4-byte Spill + movl 376(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 372(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 368(%esp), %eax + movl %eax, 32(%esp) # 4-byte Spill + movl 364(%esp), %ebx + movl 360(%esp), %edi + movl 356(%esp), %esi + movl 348(%esp), %ecx + movl 352(%esp), %edx + movl 448(%esp), %eax + movl 8(%esp), %ebp # 4-byte Reload + movl %ebp, 4(%eax) + adcl 40(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 40(%esp) # 4-byte Spill + adcl 16(%esp), %edx # 4-byte Folded Reload + movl %edx, 16(%esp) # 4-byte Spill + adcl 20(%esp), %esi # 4-byte Folded Reload + movl %esi, 20(%esp) # 4-byte Spill + adcl 12(%esp), %edi # 4-byte Folded Reload + movl %edi, 12(%esp) # 4-byte Spill + adcl 24(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 8(%esp) # 4-byte Spill + movl 32(%esp), %eax # 4-byte Reload + adcl 36(%esp), %eax # 4-byte Folded Reload + movl %eax, 32(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 44(%esp), %eax # 4-byte Folded Reload + movl %eax, 48(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 52(%esp), %eax # 4-byte Folded Reload + movl %eax, 56(%esp) # 4-byte Spill + adcl $0, 28(%esp) # 4-byte Folded Spill + movl 456(%esp), %eax + movl 8(%eax), %eax + movl %eax, (%esp) + leal 304(%esp), %ecx + movl 452(%esp), %edx + movl 60(%esp), %ebx # 4-byte Reload + calll .LmulPv288x32 + movl 40(%esp), %eax # 4-byte Reload + addl 304(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 340(%esp), %eax + movl %eax, 24(%esp) # 4-byte Spill + movl 336(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 332(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 328(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 324(%esp), %edi + movl 320(%esp), %ebp + movl 316(%esp), %esi + movl 308(%esp), %ecx + movl 312(%esp), %edx + movl 448(%esp), %eax + movl 40(%esp), %ebx # 4-byte Reload + movl %ebx, 8(%eax) + adcl 16(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 40(%esp) # 4-byte Spill + adcl 20(%esp), %edx # 4-byte Folded Reload + movl %edx, 16(%esp) # 4-byte Spill + adcl 12(%esp), %esi # 4-byte Folded Reload + movl %esi, 20(%esp) # 4-byte Spill + adcl 8(%esp), %ebp # 4-byte Folded Reload + movl %ebp, 12(%esp) # 4-byte Spill + adcl 32(%esp), %edi # 4-byte Folded Reload + movl %edi, 8(%esp) # 4-byte Spill + movl 36(%esp), %eax # 4-byte Reload + adcl 48(%esp), %eax # 4-byte Folded Reload + movl %eax, 36(%esp) # 4-byte Spill + movl 44(%esp), %eax # 4-byte Reload + adcl 56(%esp), %eax # 4-byte Folded Reload + movl %eax, 44(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 28(%esp), %eax # 4-byte Folded Reload + movl %eax, 52(%esp) # 4-byte Spill + adcl $0, 24(%esp) # 4-byte Folded Spill + movl 456(%esp), %eax + movl 12(%eax), %eax + movl %eax, (%esp) + leal 264(%esp), %ecx + movl 452(%esp), %edx + movl 60(%esp), %ebx # 4-byte Reload + calll .LmulPv288x32 + movl 40(%esp), %eax # 4-byte Reload + addl 264(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 300(%esp), %eax + movl %eax, 28(%esp) # 4-byte Spill + movl 296(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 292(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 288(%esp), %eax + movl %eax, 32(%esp) # 4-byte Spill + movl 284(%esp), %ebx + movl 280(%esp), %edi + movl 276(%esp), %esi + movl 268(%esp), %ecx + movl 272(%esp), %edx + movl 448(%esp), %eax + movl 40(%esp), %ebp # 4-byte Reload + movl %ebp, 12(%eax) + adcl 16(%esp), %ecx # 4-byte Folded Reload + movl %ecx, %ebp + adcl 20(%esp), %edx # 4-byte Folded Reload + movl %edx, 16(%esp) # 4-byte Spill + adcl 12(%esp), %esi # 4-byte Folded Reload + movl %esi, 20(%esp) # 4-byte Spill + adcl 8(%esp), %edi # 4-byte Folded Reload + movl %edi, 8(%esp) # 4-byte Spill + adcl 36(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 4(%esp) # 4-byte Spill + movl 32(%esp), %eax # 4-byte Reload + adcl 44(%esp), %eax # 4-byte Folded Reload + movl %eax, 32(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 52(%esp), %eax # 4-byte Folded Reload + movl %eax, 48(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 24(%esp), %eax # 4-byte Folded Reload + movl %eax, 56(%esp) # 4-byte Spill + adcl $0, 28(%esp) # 4-byte Folded Spill + movl 456(%esp), %eax + movl 16(%eax), %eax + movl %eax, (%esp) + leal 224(%esp), %ecx + movl 452(%esp), %edx + movl 60(%esp), %ebx # 4-byte Reload + calll .LmulPv288x32 + addl 224(%esp), %ebp + movl %ebp, 44(%esp) # 4-byte Spill + movl 260(%esp), %eax + movl %eax, 24(%esp) # 4-byte Spill + movl 256(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 252(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 248(%esp), %ebx + movl 244(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 240(%esp), %edi + movl 236(%esp), %ebp + movl 228(%esp), %ecx + movl 232(%esp), %edx + movl 448(%esp), %eax + movl 44(%esp), %esi # 4-byte Reload + movl %esi, 16(%eax) + adcl 16(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 44(%esp) # 4-byte Spill + adcl 20(%esp), %edx # 4-byte Folded Reload + movl %edx, 12(%esp) # 4-byte Spill + adcl 8(%esp), %ebp # 4-byte Folded Reload + movl %ebp, 16(%esp) # 4-byte Spill + adcl 4(%esp), %edi # 4-byte Folded Reload + movl %edi, 8(%esp) # 4-byte Spill + movl 32(%esp), %eax # 4-byte Reload + adcl %eax, 36(%esp) # 4-byte Folded Spill + adcl 48(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 4(%esp) # 4-byte Spill + movl 40(%esp), %eax # 4-byte Reload + adcl 56(%esp), %eax # 4-byte Folded Reload + movl %eax, 40(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 28(%esp), %eax # 4-byte Folded Reload + movl %eax, 52(%esp) # 4-byte Spill + adcl $0, 24(%esp) # 4-byte Folded Spill + movl 456(%esp), %eax + movl 20(%eax), %eax + movl %eax, (%esp) + leal 184(%esp), %ecx + movl 452(%esp), %edx + movl 60(%esp), %ebx # 4-byte Reload + calll .LmulPv288x32 + movl 44(%esp), %eax # 4-byte Reload + addl 184(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 220(%esp), %eax + movl %eax, 28(%esp) # 4-byte Spill + movl 216(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 212(%esp), %eax + movl %eax, 32(%esp) # 4-byte Spill + movl 208(%esp), %eax + movl %eax, 20(%esp) # 4-byte Spill + movl 204(%esp), %edi + movl 200(%esp), %ebx + movl 196(%esp), %esi + movl 188(%esp), %ecx + movl 192(%esp), %edx + movl 448(%esp), %eax + movl 44(%esp), %ebp # 4-byte Reload + movl %ebp, 20(%eax) + adcl 12(%esp), %ecx # 4-byte Folded Reload + movl %ecx, %ebp + adcl 16(%esp), %edx # 4-byte Folded Reload + movl %edx, 12(%esp) # 4-byte Spill + adcl 8(%esp), %esi # 4-byte Folded Reload + movl %esi, 16(%esp) # 4-byte Spill + adcl 36(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 8(%esp) # 4-byte Spill + adcl 4(%esp), %edi # 4-byte Folded Reload + movl %edi, 4(%esp) # 4-byte Spill + movl 40(%esp), %eax # 4-byte Reload + adcl %eax, 20(%esp) # 4-byte Folded Spill + movl 52(%esp), %eax # 4-byte Reload + adcl %eax, 32(%esp) # 4-byte Folded Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 24(%esp), %eax # 4-byte Folded Reload + movl %eax, 48(%esp) # 4-byte Spill + adcl $0, 28(%esp) # 4-byte Folded Spill + movl 456(%esp), %eax + movl 24(%eax), %eax + movl %eax, (%esp) + leal 144(%esp), %ecx + movl 452(%esp), %edx + movl 60(%esp), %ebx # 4-byte Reload + calll .LmulPv288x32 + addl 144(%esp), %ebp + movl %ebp, 52(%esp) # 4-byte Spill + movl 180(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 176(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 172(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 168(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 164(%esp), %ebx + movl 160(%esp), %edi + movl 156(%esp), %esi + movl 148(%esp), %ecx + movl 152(%esp), %edx + movl 448(%esp), %eax + movl 52(%esp), %ebp # 4-byte Reload + movl %ebp, 24(%eax) + adcl 12(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 12(%esp) # 4-byte Spill + adcl 16(%esp), %edx # 4-byte Folded Reload + movl %edx, 24(%esp) # 4-byte Spill + adcl 8(%esp), %esi # 4-byte Folded Reload + movl %esi, 52(%esp) # 4-byte Spill + adcl 4(%esp), %edi # 4-byte Folded Reload + movl %edi, 16(%esp) # 4-byte Spill + adcl 20(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 20(%esp) # 4-byte Spill + movl 40(%esp), %eax # 4-byte Reload + adcl 32(%esp), %eax # 4-byte Folded Reload + movl %eax, 40(%esp) # 4-byte Spill + movl 44(%esp), %eax # 4-byte Reload + adcl 48(%esp), %eax # 4-byte Folded Reload + movl %eax, 44(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 28(%esp), %eax # 4-byte Folded Reload + movl %eax, 56(%esp) # 4-byte Spill + adcl $0, 36(%esp) # 4-byte Folded Spill + movl 456(%esp), %eax + movl 28(%eax), %eax + movl %eax, (%esp) + leal 104(%esp), %ecx + movl 452(%esp), %edx + movl 60(%esp), %ebx # 4-byte Reload + calll .LmulPv288x32 + movl 12(%esp), %esi # 4-byte Reload + addl 104(%esp), %esi + movl 140(%esp), %eax + movl %eax, 32(%esp) # 4-byte Spill + movl 136(%esp), %ebp + movl 132(%esp), %edi + movl 128(%esp), %ebx + movl 124(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 120(%esp), %eax + movl %eax, 28(%esp) # 4-byte Spill + movl 116(%esp), %edx + movl 108(%esp), %eax + movl %eax, 12(%esp) # 4-byte Spill + movl 112(%esp), %ecx + movl 448(%esp), %eax + movl %esi, 28(%eax) + movl 12(%esp), %esi # 4-byte Reload + adcl 24(%esp), %esi # 4-byte Folded Reload + adcl 52(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 24(%esp) # 4-byte Spill + adcl 16(%esp), %edx # 4-byte Folded Reload + movl %edx, 52(%esp) # 4-byte Spill + movl 20(%esp), %eax # 4-byte Reload + adcl %eax, 28(%esp) # 4-byte Folded Spill + movl 40(%esp), %eax # 4-byte Reload + adcl %eax, 48(%esp) # 4-byte Folded Spill + adcl 44(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 40(%esp) # 4-byte Spill + adcl 56(%esp), %edi # 4-byte Folded Reload + movl %edi, 44(%esp) # 4-byte Spill + adcl 36(%esp), %ebp # 4-byte Folded Reload + movl %ebp, 56(%esp) # 4-byte Spill + adcl $0, 32(%esp) # 4-byte Folded Spill + movl 456(%esp), %eax + movl 32(%eax), %eax + movl %eax, (%esp) + leal 64(%esp), %ecx + movl 452(%esp), %edx + movl 60(%esp), %ebx # 4-byte Reload + calll .LmulPv288x32 + movl %esi, %ebp + addl 64(%esp), %ebp + movl 24(%esp), %edx # 4-byte Reload + adcl 68(%esp), %edx + movl 52(%esp), %eax # 4-byte Reload + adcl 72(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 100(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 96(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 92(%esp), %eax + movl %eax, 24(%esp) # 4-byte Spill + movl 88(%esp), %edi + movl 84(%esp), %ebx + movl 80(%esp), %esi + movl 76(%esp), %eax + movl 448(%esp), %ecx + movl %ebp, 32(%ecx) + movl %edx, 36(%ecx) + adcl 28(%esp), %eax # 4-byte Folded Reload + movl 52(%esp), %edx # 4-byte Reload + movl %edx, 40(%ecx) + adcl 48(%esp), %esi # 4-byte Folded Reload + movl %eax, 44(%ecx) + adcl 40(%esp), %ebx # 4-byte Folded Reload + movl %esi, 48(%ecx) + adcl 44(%esp), %edi # 4-byte Folded Reload + movl %ebx, 52(%ecx) + movl 24(%esp), %eax # 4-byte Reload + adcl 56(%esp), %eax # 4-byte Folded Reload + movl %edi, 56(%ecx) + movl %eax, 60(%ecx) + movl 36(%esp), %eax # 4-byte Reload + adcl 32(%esp), %eax # 4-byte Folded Reload + movl %eax, 64(%ecx) + movl 60(%esp), %eax # 4-byte Reload + adcl $0, %eax + movl %eax, 68(%ecx) + addl $428, %esp # imm = 0x1AC + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end128: + .size mcl_fpDbl_mulPre9Lbmi2, .Lfunc_end128-mcl_fpDbl_mulPre9Lbmi2 + + .globl mcl_fpDbl_sqrPre9Lbmi2 + .align 16, 0x90 + .type mcl_fpDbl_sqrPre9Lbmi2,@function +mcl_fpDbl_sqrPre9Lbmi2: # @mcl_fpDbl_sqrPre9Lbmi2 +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $428, %esp # imm = 0x1AC + calll .L129$pb +.L129$pb: + popl %ebx +.Ltmp10: + addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp10-.L129$pb), %ebx + movl %ebx, 60(%esp) # 4-byte Spill + movl 452(%esp), %edx + movl (%edx), %eax + movl %eax, (%esp) + leal 384(%esp), %ecx + movl %edx, %esi + movl %ebx, %edi + calll .LmulPv288x32 + movl 420(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 416(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 412(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 408(%esp), %eax + movl %eax, 24(%esp) # 4-byte Spill + movl 404(%esp), %eax + movl %eax, 12(%esp) # 4-byte Spill + movl 400(%esp), %eax + movl %eax, 20(%esp) # 4-byte Spill + movl 396(%esp), %eax + movl %eax, 16(%esp) # 4-byte Spill + movl 392(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 384(%esp), %eax + movl 388(%esp), %ebp + movl 448(%esp), %ecx + movl %eax, (%ecx) + movl 4(%esi), %eax + movl %eax, (%esp) + leal 344(%esp), %ecx + movl %esi, %edx + movl %edi, %ebx + calll .LmulPv288x32 + addl 344(%esp), %ebp + movl %ebp, 8(%esp) # 4-byte Spill + movl 380(%esp), %eax + movl %eax, 28(%esp) # 4-byte Spill + movl 376(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 372(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 368(%esp), %eax + movl %eax, 32(%esp) # 4-byte Spill + movl 364(%esp), %ebx + movl 360(%esp), %edi + movl 356(%esp), %esi + movl 348(%esp), %ecx + movl 352(%esp), %edx + movl 448(%esp), %eax + movl 8(%esp), %ebp # 4-byte Reload + movl %ebp, 4(%eax) + adcl 40(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 40(%esp) # 4-byte Spill + adcl 16(%esp), %edx # 4-byte Folded Reload + movl %edx, 16(%esp) # 4-byte Spill + adcl 20(%esp), %esi # 4-byte Folded Reload + movl %esi, 20(%esp) # 4-byte Spill + adcl 12(%esp), %edi # 4-byte Folded Reload + movl %edi, 8(%esp) # 4-byte Spill + adcl 24(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 4(%esp) # 4-byte Spill + movl 32(%esp), %eax # 4-byte Reload + adcl 36(%esp), %eax # 4-byte Folded Reload + movl %eax, 32(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 44(%esp), %eax # 4-byte Folded Reload + movl %eax, 48(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 52(%esp), %eax # 4-byte Folded Reload + movl %eax, 56(%esp) # 4-byte Spill + adcl $0, 28(%esp) # 4-byte Folded Spill + movl 452(%esp), %edx + movl 8(%edx), %eax + movl %eax, (%esp) + leal 304(%esp), %ecx + movl 60(%esp), %ebx # 4-byte Reload + calll .LmulPv288x32 + movl 40(%esp), %eax # 4-byte Reload + addl 304(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 340(%esp), %eax + movl %eax, 24(%esp) # 4-byte Spill + movl 336(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 332(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 328(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 324(%esp), %edi + movl 320(%esp), %ebp + movl 316(%esp), %esi + movl 308(%esp), %ecx + movl 312(%esp), %edx + movl 448(%esp), %eax + movl 40(%esp), %ebx # 4-byte Reload + movl %ebx, 8(%eax) + adcl 16(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 40(%esp) # 4-byte Spill + adcl 20(%esp), %edx # 4-byte Folded Reload + movl %edx, 12(%esp) # 4-byte Spill + adcl 8(%esp), %esi # 4-byte Folded Reload + movl %esi, 16(%esp) # 4-byte Spill + adcl 4(%esp), %ebp # 4-byte Folded Reload + movl %ebp, 8(%esp) # 4-byte Spill + adcl 32(%esp), %edi # 4-byte Folded Reload + movl %edi, 4(%esp) # 4-byte Spill + movl 36(%esp), %eax # 4-byte Reload + adcl 48(%esp), %eax # 4-byte Folded Reload + movl %eax, 36(%esp) # 4-byte Spill + movl 44(%esp), %eax # 4-byte Reload + adcl 56(%esp), %eax # 4-byte Folded Reload + movl %eax, 44(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 28(%esp), %eax # 4-byte Folded Reload + movl %eax, 52(%esp) # 4-byte Spill + adcl $0, 24(%esp) # 4-byte Folded Spill + movl 452(%esp), %edx + movl 12(%edx), %eax + movl %eax, (%esp) + leal 264(%esp), %ecx + movl 60(%esp), %ebx # 4-byte Reload + calll .LmulPv288x32 + movl 40(%esp), %eax # 4-byte Reload + addl 264(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 300(%esp), %eax + movl %eax, 20(%esp) # 4-byte Spill + movl 296(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 292(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 288(%esp), %eax + movl %eax, 32(%esp) # 4-byte Spill + movl 284(%esp), %ebx + movl 280(%esp), %edi + movl 276(%esp), %esi + movl 268(%esp), %ecx + movl 272(%esp), %edx + movl 448(%esp), %eax + movl 40(%esp), %ebp # 4-byte Reload + movl %ebp, 12(%eax) + adcl 12(%esp), %ecx # 4-byte Folded Reload + movl %ecx, %ebp + adcl 16(%esp), %edx # 4-byte Folded Reload + movl %edx, 40(%esp) # 4-byte Spill + adcl 8(%esp), %esi # 4-byte Folded Reload + movl %esi, 16(%esp) # 4-byte Spill + adcl 4(%esp), %edi # 4-byte Folded Reload + movl %edi, 8(%esp) # 4-byte Spill + adcl 36(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 36(%esp) # 4-byte Spill + movl 32(%esp), %eax # 4-byte Reload + adcl 44(%esp), %eax # 4-byte Folded Reload + movl %eax, 32(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 52(%esp), %eax # 4-byte Folded Reload + movl %eax, 48(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 24(%esp), %eax # 4-byte Folded Reload + movl %eax, 56(%esp) # 4-byte Spill + adcl $0, 20(%esp) # 4-byte Folded Spill + movl 452(%esp), %edx + movl 16(%edx), %eax + movl %eax, (%esp) + leal 224(%esp), %ecx + movl 60(%esp), %ebx # 4-byte Reload + calll .LmulPv288x32 + addl 224(%esp), %ebp + movl %ebp, 12(%esp) # 4-byte Spill + movl 260(%esp), %eax + movl %eax, 24(%esp) # 4-byte Spill + movl 256(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 252(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 248(%esp), %eax + movl %eax, 28(%esp) # 4-byte Spill + movl 244(%esp), %edi + movl 240(%esp), %ebp + movl 236(%esp), %esi + movl 228(%esp), %ecx + movl 232(%esp), %edx + movl 448(%esp), %eax + movl 12(%esp), %ebx # 4-byte Reload + movl %ebx, 16(%eax) + adcl 40(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 40(%esp) # 4-byte Spill + adcl 16(%esp), %edx # 4-byte Folded Reload + movl %edx, 12(%esp) # 4-byte Spill + adcl 8(%esp), %esi # 4-byte Folded Reload + movl %esi, 16(%esp) # 4-byte Spill + adcl 36(%esp), %ebp # 4-byte Folded Reload + movl %ebp, 8(%esp) # 4-byte Spill + adcl 32(%esp), %edi # 4-byte Folded Reload + movl %edi, 4(%esp) # 4-byte Spill + movl 28(%esp), %eax # 4-byte Reload + adcl 48(%esp), %eax # 4-byte Folded Reload + movl %eax, 28(%esp) # 4-byte Spill + movl 44(%esp), %eax # 4-byte Reload + adcl 56(%esp), %eax # 4-byte Folded Reload + movl %eax, 44(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 20(%esp), %eax # 4-byte Folded Reload + movl %eax, 52(%esp) # 4-byte Spill + adcl $0, 24(%esp) # 4-byte Folded Spill + movl 452(%esp), %edx + movl 20(%edx), %eax + movl %eax, (%esp) + leal 184(%esp), %ecx + movl 60(%esp), %ebx # 4-byte Reload + calll .LmulPv288x32 + movl 40(%esp), %eax # 4-byte Reload + addl 184(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 220(%esp), %eax + movl %eax, 32(%esp) # 4-byte Spill + movl 216(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 212(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 208(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 204(%esp), %ebx + movl 200(%esp), %edi + movl 196(%esp), %esi + movl 188(%esp), %ecx + movl 192(%esp), %edx + movl 448(%esp), %eax + movl 40(%esp), %ebp # 4-byte Reload + movl %ebp, 20(%eax) + adcl 12(%esp), %ecx # 4-byte Folded Reload + movl %ecx, %ebp + adcl 16(%esp), %edx # 4-byte Folded Reload + movl %edx, 16(%esp) # 4-byte Spill + adcl 8(%esp), %esi # 4-byte Folded Reload + movl %esi, 20(%esp) # 4-byte Spill + adcl 4(%esp), %edi # 4-byte Folded Reload + movl %edi, 12(%esp) # 4-byte Spill + adcl 28(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 8(%esp) # 4-byte Spill + movl 36(%esp), %eax # 4-byte Reload + adcl 44(%esp), %eax # 4-byte Folded Reload + movl %eax, 36(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 52(%esp), %eax # 4-byte Folded Reload + movl %eax, 48(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 24(%esp), %eax # 4-byte Folded Reload + movl %eax, 56(%esp) # 4-byte Spill + adcl $0, 32(%esp) # 4-byte Folded Spill + movl 452(%esp), %edx + movl 24(%edx), %eax + movl %eax, (%esp) + leal 144(%esp), %ecx + movl 60(%esp), %ebx # 4-byte Reload + calll .LmulPv288x32 + addl 144(%esp), %ebp + movl %ebp, 24(%esp) # 4-byte Spill + movl 180(%esp), %eax + movl %eax, 28(%esp) # 4-byte Spill + movl 176(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 172(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 168(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 164(%esp), %edi + movl 160(%esp), %ebp + movl 156(%esp), %esi + movl 148(%esp), %ecx + movl 152(%esp), %edx + movl 448(%esp), %eax + movl 24(%esp), %ebx # 4-byte Reload + movl %ebx, 24(%eax) + adcl 16(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 4(%esp) # 4-byte Spill + adcl 20(%esp), %edx # 4-byte Folded Reload + movl %edx, 20(%esp) # 4-byte Spill + adcl 12(%esp), %esi # 4-byte Folded Reload + movl %esi, 24(%esp) # 4-byte Spill + adcl 8(%esp), %ebp # 4-byte Folded Reload + movl %ebp, 16(%esp) # 4-byte Spill + adcl 36(%esp), %edi # 4-byte Folded Reload + movl %edi, 12(%esp) # 4-byte Spill + movl 40(%esp), %eax # 4-byte Reload + adcl 48(%esp), %eax # 4-byte Folded Reload + movl %eax, 40(%esp) # 4-byte Spill + movl 44(%esp), %eax # 4-byte Reload + adcl 56(%esp), %eax # 4-byte Folded Reload + movl %eax, 44(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 32(%esp), %eax # 4-byte Folded Reload + movl %eax, 52(%esp) # 4-byte Spill + adcl $0, 28(%esp) # 4-byte Folded Spill + movl 452(%esp), %edx + movl 28(%edx), %eax + movl %eax, (%esp) + leal 104(%esp), %ecx + movl 60(%esp), %ebx # 4-byte Reload + calll .LmulPv288x32 + movl 4(%esp), %esi # 4-byte Reload + addl 104(%esp), %esi + movl 140(%esp), %eax + movl %eax, 32(%esp) # 4-byte Spill + movl 136(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 132(%esp), %ebp + movl 128(%esp), %ebx + movl 124(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 120(%esp), %edi + movl 116(%esp), %edx + movl 108(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 112(%esp), %ecx + movl 448(%esp), %eax + movl %esi, 28(%eax) + movl 48(%esp), %esi # 4-byte Reload + adcl 20(%esp), %esi # 4-byte Folded Reload + adcl 24(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 20(%esp) # 4-byte Spill + adcl 16(%esp), %edx # 4-byte Folded Reload + movl %edx, 48(%esp) # 4-byte Spill + adcl 12(%esp), %edi # 4-byte Folded Reload + movl %edi, 24(%esp) # 4-byte Spill + movl 40(%esp), %eax # 4-byte Reload + adcl %eax, 36(%esp) # 4-byte Folded Spill + adcl 44(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 44(%esp) # 4-byte Spill + adcl 52(%esp), %ebp # 4-byte Folded Reload + movl %ebp, 52(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 28(%esp), %eax # 4-byte Folded Reload + movl %eax, 56(%esp) # 4-byte Spill + adcl $0, 32(%esp) # 4-byte Folded Spill + movl 452(%esp), %edx + movl 32(%edx), %eax + movl %eax, (%esp) + leal 64(%esp), %ecx + movl 60(%esp), %ebx # 4-byte Reload + calll .LmulPv288x32 + movl %esi, %ebp + addl 64(%esp), %ebp + movl 20(%esp), %edx # 4-byte Reload + adcl 68(%esp), %edx + movl 48(%esp), %eax # 4-byte Reload + adcl 72(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 100(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 96(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 92(%esp), %eax + movl %eax, 28(%esp) # 4-byte Spill + movl 88(%esp), %edi + movl 84(%esp), %ebx + movl 80(%esp), %esi + movl 76(%esp), %eax + movl 448(%esp), %ecx + movl %ebp, 32(%ecx) + movl %edx, 36(%ecx) + adcl 24(%esp), %eax # 4-byte Folded Reload + movl 48(%esp), %edx # 4-byte Reload + movl %edx, 40(%ecx) + adcl 36(%esp), %esi # 4-byte Folded Reload + movl %eax, 44(%ecx) + adcl 44(%esp), %ebx # 4-byte Folded Reload + movl %esi, 48(%ecx) + adcl 52(%esp), %edi # 4-byte Folded Reload + movl %ebx, 52(%ecx) + movl 28(%esp), %eax # 4-byte Reload + adcl 56(%esp), %eax # 4-byte Folded Reload + movl %edi, 56(%ecx) + movl %eax, 60(%ecx) + movl 40(%esp), %eax # 4-byte Reload + adcl 32(%esp), %eax # 4-byte Folded Reload + movl %eax, 64(%ecx) + movl 60(%esp), %eax # 4-byte Reload + adcl $0, %eax + movl %eax, 68(%ecx) + addl $428, %esp # imm = 0x1AC + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end129: + .size mcl_fpDbl_sqrPre9Lbmi2, .Lfunc_end129-mcl_fpDbl_sqrPre9Lbmi2 + + .globl mcl_fp_mont9Lbmi2 + .align 16, 0x90 + .type mcl_fp_mont9Lbmi2,@function +mcl_fp_mont9Lbmi2: # @mcl_fp_mont9Lbmi2 +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $796, %esp # imm = 0x31C + calll .L130$pb +.L130$pb: + popl %ebx +.Ltmp11: + addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp11-.L130$pb), %ebx + movl 828(%esp), %eax + movl -4(%eax), %edi + movl %edi, 28(%esp) # 4-byte Spill + movl 824(%esp), %eax + movl (%eax), %eax + movl %eax, (%esp) + leal 752(%esp), %ecx + movl 820(%esp), %edx + calll .LmulPv288x32 + movl 752(%esp), %ebp + movl 756(%esp), %esi + movl %ebp, %eax + imull %edi, %eax + movl 788(%esp), %ecx + movl %ecx, 68(%esp) # 4-byte Spill + movl 784(%esp), %ecx + movl %ecx, 56(%esp) # 4-byte Spill + movl 780(%esp), %ecx + movl %ecx, 60(%esp) # 4-byte Spill + movl 776(%esp), %ecx + movl %ecx, 48(%esp) # 4-byte Spill + movl 772(%esp), %ecx + movl %ecx, 36(%esp) # 4-byte Spill + movl 768(%esp), %ecx + movl %ecx, 32(%esp) # 4-byte Spill + movl 764(%esp), %ecx + movl %ecx, 44(%esp) # 4-byte Spill + movl 760(%esp), %edi + movl %eax, (%esp) + leal 712(%esp), %ecx + movl 828(%esp), %edx + calll .LmulPv288x32 + addl 712(%esp), %ebp + adcl 716(%esp), %esi + adcl 720(%esp), %edi + movl 44(%esp), %eax # 4-byte Reload + adcl 724(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 32(%esp), %eax # 4-byte Reload + adcl 728(%esp), %eax + movl %eax, 32(%esp) # 4-byte Spill + movl 36(%esp), %eax # 4-byte Reload + adcl 732(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 736(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 740(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 744(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 68(%esp), %ebp # 4-byte Reload + adcl 748(%esp), %ebp + sbbl %eax, %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 824(%esp), %eax + movl 4(%eax), %eax + movl %eax, (%esp) + leal 672(%esp), %ecx + movl 820(%esp), %edx + calll .LmulPv288x32 + movl 64(%esp), %eax # 4-byte Reload + andl $1, %eax + addl 672(%esp), %esi + adcl 676(%esp), %edi + movl 44(%esp), %ecx # 4-byte Reload + adcl 680(%esp), %ecx + movl %ecx, 44(%esp) # 4-byte Spill + movl 32(%esp), %ecx # 4-byte Reload + adcl 684(%esp), %ecx + movl %ecx, 32(%esp) # 4-byte Spill + movl 36(%esp), %ecx # 4-byte Reload + adcl 688(%esp), %ecx + movl %ecx, 36(%esp) # 4-byte Spill + movl 48(%esp), %ecx # 4-byte Reload + adcl 692(%esp), %ecx + movl %ecx, 48(%esp) # 4-byte Spill + movl 60(%esp), %ecx # 4-byte Reload + adcl 696(%esp), %ecx + movl %ecx, 60(%esp) # 4-byte Spill + movl 56(%esp), %ecx # 4-byte Reload + adcl 700(%esp), %ecx + movl %ecx, 56(%esp) # 4-byte Spill + adcl 704(%esp), %ebp + movl %ebp, 68(%esp) # 4-byte Spill + adcl 708(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + sbbl %ebp, %ebp + movl %esi, %eax + imull 28(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 632(%esp), %ecx + movl 828(%esp), %eax + movl %eax, %edx + calll .LmulPv288x32 + andl $1, %ebp + addl 632(%esp), %esi + adcl 636(%esp), %edi + movl 44(%esp), %eax # 4-byte Reload + adcl 640(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 32(%esp), %eax # 4-byte Reload + adcl 644(%esp), %eax + movl %eax, 32(%esp) # 4-byte Spill + movl 36(%esp), %eax # 4-byte Reload + adcl 648(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 652(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 656(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 56(%esp), %esi # 4-byte Reload + adcl 660(%esp), %esi + movl 68(%esp), %eax # 4-byte Reload + adcl 664(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 668(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + adcl $0, %ebp + movl 824(%esp), %eax + movl 8(%eax), %eax + movl %eax, (%esp) + leal 592(%esp), %ecx + movl 820(%esp), %edx + calll .LmulPv288x32 + addl 592(%esp), %edi + movl 44(%esp), %eax # 4-byte Reload + adcl 596(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 32(%esp), %eax # 4-byte Reload + adcl 600(%esp), %eax + movl %eax, 32(%esp) # 4-byte Spill + movl 36(%esp), %eax # 4-byte Reload + adcl 604(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 608(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 612(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + adcl 616(%esp), %esi + movl %esi, 56(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 620(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 64(%esp), %esi # 4-byte Reload + adcl 624(%esp), %esi + adcl 628(%esp), %ebp + movl %ebp, 52(%esp) # 4-byte Spill + sbbl %ebp, %ebp + movl %edi, %eax + imull 28(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 552(%esp), %ecx + movl 828(%esp), %edx + calll .LmulPv288x32 + andl $1, %ebp + addl 552(%esp), %edi + movl 44(%esp), %eax # 4-byte Reload + adcl 556(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 32(%esp), %eax # 4-byte Reload + adcl 560(%esp), %eax + movl %eax, 32(%esp) # 4-byte Spill + movl 36(%esp), %eax # 4-byte Reload + adcl 564(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 568(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 572(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 576(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 68(%esp), %edi # 4-byte Reload + adcl 580(%esp), %edi + adcl 584(%esp), %esi + movl %esi, 64(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 588(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + adcl $0, %ebp + movl 824(%esp), %eax + movl 12(%eax), %eax + movl %eax, (%esp) + leal 512(%esp), %ecx + movl 820(%esp), %edx + calll .LmulPv288x32 + movl 44(%esp), %ecx # 4-byte Reload + addl 512(%esp), %ecx + movl 32(%esp), %eax # 4-byte Reload + adcl 516(%esp), %eax + movl %eax, 32(%esp) # 4-byte Spill + movl 36(%esp), %eax # 4-byte Reload + adcl 520(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 48(%esp), %esi # 4-byte Reload + adcl 524(%esp), %esi + movl 60(%esp), %eax # 4-byte Reload + adcl 528(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 532(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + adcl 536(%esp), %edi + movl %edi, 68(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 540(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 52(%esp), %edi # 4-byte Reload + adcl 544(%esp), %edi + adcl 548(%esp), %ebp + movl %ebp, 44(%esp) # 4-byte Spill + sbbl %eax, %eax + movl %eax, 40(%esp) # 4-byte Spill + movl %ecx, %eax + movl %ecx, %ebp + imull 28(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 472(%esp), %ecx + movl 828(%esp), %edx + calll .LmulPv288x32 + movl 40(%esp), %eax # 4-byte Reload + andl $1, %eax + addl 472(%esp), %ebp + movl 32(%esp), %ecx # 4-byte Reload + adcl 476(%esp), %ecx + movl %ecx, 32(%esp) # 4-byte Spill + movl 36(%esp), %ecx # 4-byte Reload + adcl 480(%esp), %ecx + movl %ecx, 36(%esp) # 4-byte Spill + adcl 484(%esp), %esi + movl %esi, 48(%esp) # 4-byte Spill + movl 60(%esp), %ecx # 4-byte Reload + adcl 488(%esp), %ecx + movl %ecx, 60(%esp) # 4-byte Spill + movl 56(%esp), %ecx # 4-byte Reload + adcl 492(%esp), %ecx + movl %ecx, 56(%esp) # 4-byte Spill + movl 68(%esp), %ebp # 4-byte Reload + adcl 496(%esp), %ebp + movl 64(%esp), %esi # 4-byte Reload + adcl 500(%esp), %esi + adcl 504(%esp), %edi + movl %edi, 52(%esp) # 4-byte Spill + movl 44(%esp), %ecx # 4-byte Reload + adcl 508(%esp), %ecx + movl %ecx, 44(%esp) # 4-byte Spill + adcl $0, %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 824(%esp), %eax + movl 16(%eax), %eax + movl %eax, (%esp) + leal 432(%esp), %ecx + movl 820(%esp), %eax + movl %eax, %edx + calll .LmulPv288x32 + movl 32(%esp), %ecx # 4-byte Reload + addl 432(%esp), %ecx + movl 36(%esp), %eax # 4-byte Reload + adcl 436(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 440(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 60(%esp), %edi # 4-byte Reload + adcl 444(%esp), %edi + movl 56(%esp), %eax # 4-byte Reload + adcl 448(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + adcl 452(%esp), %ebp + movl %ebp, 68(%esp) # 4-byte Spill + adcl 456(%esp), %esi + movl %esi, 64(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 460(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 44(%esp), %eax # 4-byte Reload + adcl 464(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 40(%esp), %eax # 4-byte Reload + adcl 468(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + sbbl %esi, %esi + movl %ecx, %eax + movl %ecx, %ebp + imull 28(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 392(%esp), %ecx + movl 828(%esp), %edx + calll .LmulPv288x32 + movl %esi, %eax + andl $1, %eax + addl 392(%esp), %ebp + movl 36(%esp), %esi # 4-byte Reload + adcl 396(%esp), %esi + movl 48(%esp), %ecx # 4-byte Reload + adcl 400(%esp), %ecx + movl %ecx, 48(%esp) # 4-byte Spill + adcl 404(%esp), %edi + movl %edi, 60(%esp) # 4-byte Spill + movl 56(%esp), %ebp # 4-byte Reload + adcl 408(%esp), %ebp + movl 68(%esp), %ecx # 4-byte Reload + adcl 412(%esp), %ecx + movl %ecx, 68(%esp) # 4-byte Spill + movl 64(%esp), %ecx # 4-byte Reload + adcl 416(%esp), %ecx + movl %ecx, 64(%esp) # 4-byte Spill + movl 52(%esp), %ecx # 4-byte Reload + adcl 420(%esp), %ecx + movl %ecx, 52(%esp) # 4-byte Spill + movl 44(%esp), %ecx # 4-byte Reload + adcl 424(%esp), %ecx + movl %ecx, 44(%esp) # 4-byte Spill + movl 40(%esp), %edi # 4-byte Reload + adcl 428(%esp), %edi + adcl $0, %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 824(%esp), %eax + movl 20(%eax), %eax + movl %eax, (%esp) + leal 352(%esp), %ecx + movl 820(%esp), %edx + calll .LmulPv288x32 + addl 352(%esp), %esi + movl 48(%esp), %eax # 4-byte Reload + adcl 356(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 360(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + adcl 364(%esp), %ebp + movl %ebp, 56(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 368(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 64(%esp), %ebp # 4-byte Reload + adcl 372(%esp), %ebp + movl 52(%esp), %eax # 4-byte Reload + adcl 376(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 44(%esp), %eax # 4-byte Reload + adcl 380(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + adcl 384(%esp), %edi + movl %edi, 40(%esp) # 4-byte Spill + movl 36(%esp), %eax # 4-byte Reload + adcl 388(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + sbbl %edi, %edi + movl %esi, %eax + imull 28(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 312(%esp), %ecx + movl 828(%esp), %edx + calll .LmulPv288x32 + movl %edi, %eax + andl $1, %eax + addl 312(%esp), %esi + movl 48(%esp), %ecx # 4-byte Reload + adcl 316(%esp), %ecx + movl %ecx, 48(%esp) # 4-byte Spill + movl 60(%esp), %ecx # 4-byte Reload + adcl 320(%esp), %ecx + movl %ecx, 60(%esp) # 4-byte Spill + movl 56(%esp), %ecx # 4-byte Reload + adcl 324(%esp), %ecx + movl %ecx, 56(%esp) # 4-byte Spill + movl 68(%esp), %ecx # 4-byte Reload + adcl 328(%esp), %ecx + movl %ecx, 68(%esp) # 4-byte Spill + adcl 332(%esp), %ebp + movl %ebp, 64(%esp) # 4-byte Spill + movl 52(%esp), %esi # 4-byte Reload + adcl 336(%esp), %esi + movl 44(%esp), %edi # 4-byte Reload + adcl 340(%esp), %edi + movl 40(%esp), %ecx # 4-byte Reload + adcl 344(%esp), %ecx + movl %ecx, 40(%esp) # 4-byte Spill + movl 36(%esp), %ecx # 4-byte Reload + adcl 348(%esp), %ecx + movl %ecx, 36(%esp) # 4-byte Spill + adcl $0, %eax + movl %eax, %ebp + movl 824(%esp), %eax + movl 24(%eax), %eax + movl %eax, (%esp) + leal 272(%esp), %ecx + movl 820(%esp), %edx + calll .LmulPv288x32 + movl 48(%esp), %ecx # 4-byte Reload + addl 272(%esp), %ecx + movl 60(%esp), %eax # 4-byte Reload + adcl 276(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 280(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 284(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 288(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + adcl 292(%esp), %esi + movl %esi, 52(%esp) # 4-byte Spill + adcl 296(%esp), %edi + movl %edi, 44(%esp) # 4-byte Spill + movl 40(%esp), %eax # 4-byte Reload + adcl 300(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 36(%esp), %eax # 4-byte Reload + adcl 304(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + adcl 308(%esp), %ebp + movl %ebp, 48(%esp) # 4-byte Spill + sbbl %edi, %edi + movl %ecx, %eax + movl %ecx, %esi + imull 28(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 232(%esp), %ecx + movl 828(%esp), %edx + calll .LmulPv288x32 + movl %edi, %ecx + andl $1, %ecx + addl 232(%esp), %esi + movl 60(%esp), %eax # 4-byte Reload + adcl 236(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 56(%esp), %esi # 4-byte Reload + adcl 240(%esp), %esi + movl 68(%esp), %edi # 4-byte Reload + adcl 244(%esp), %edi + movl 64(%esp), %eax # 4-byte Reload + adcl 248(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 252(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 44(%esp), %eax # 4-byte Reload + adcl 256(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 40(%esp), %ebp # 4-byte Reload + adcl 260(%esp), %ebp + movl 36(%esp), %eax # 4-byte Reload + adcl 264(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 268(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + adcl $0, %ecx + movl %ecx, 32(%esp) # 4-byte Spill + movl 824(%esp), %eax + movl 28(%eax), %eax + movl %eax, (%esp) + leal 192(%esp), %ecx + movl 820(%esp), %edx + calll .LmulPv288x32 + movl 60(%esp), %ecx # 4-byte Reload + addl 192(%esp), %ecx + adcl 196(%esp), %esi + movl %esi, 56(%esp) # 4-byte Spill + adcl 200(%esp), %edi + movl %edi, 68(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 204(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 208(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 44(%esp), %esi # 4-byte Reload + adcl 212(%esp), %esi + adcl 216(%esp), %ebp + movl %ebp, 40(%esp) # 4-byte Spill + movl 36(%esp), %eax # 4-byte Reload + adcl 220(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 224(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 32(%esp), %eax # 4-byte Reload + adcl 228(%esp), %eax + movl %eax, 32(%esp) # 4-byte Spill + sbbl %ebp, %ebp + movl %ecx, %eax + movl %ecx, %edi + imull 28(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 152(%esp), %ecx + movl 828(%esp), %edx + calll .LmulPv288x32 + andl $1, %ebp + addl 152(%esp), %edi + movl 56(%esp), %eax # 4-byte Reload + adcl 156(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 160(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 64(%esp), %edi # 4-byte Reload + adcl 164(%esp), %edi + movl 52(%esp), %eax # 4-byte Reload + adcl 168(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + adcl 172(%esp), %esi + movl %esi, 44(%esp) # 4-byte Spill + movl 40(%esp), %eax # 4-byte Reload + adcl 176(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 36(%esp), %esi # 4-byte Reload + adcl 180(%esp), %esi + movl 48(%esp), %eax # 4-byte Reload + adcl 184(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 32(%esp), %eax # 4-byte Reload + adcl 188(%esp), %eax + movl %eax, 32(%esp) # 4-byte Spill + adcl $0, %ebp + movl %ebp, 60(%esp) # 4-byte Spill + movl 824(%esp), %eax + movl 32(%eax), %eax + movl %eax, (%esp) + leal 112(%esp), %ecx + movl 820(%esp), %edx + calll .LmulPv288x32 + movl 56(%esp), %ecx # 4-byte Reload + addl 112(%esp), %ecx + movl 68(%esp), %eax # 4-byte Reload + adcl 116(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + adcl 120(%esp), %edi + movl %edi, 64(%esp) # 4-byte Spill + movl 40(%esp), %ebp # 4-byte Reload + movl 52(%esp), %eax # 4-byte Reload + adcl 124(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 44(%esp), %eax # 4-byte Reload + adcl 128(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + adcl 132(%esp), %ebp + adcl 136(%esp), %esi + movl %esi, 36(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 140(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 32(%esp), %eax # 4-byte Reload + adcl 144(%esp), %eax + movl %eax, 32(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 148(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + sbbl %edi, %edi + movl 28(%esp), %eax # 4-byte Reload + imull %ecx, %eax + movl %ecx, %esi + movl %eax, (%esp) + leal 72(%esp), %ecx + movl 828(%esp), %edx + calll .LmulPv288x32 + andl $1, %edi + addl 72(%esp), %esi + movl 68(%esp), %eax # 4-byte Reload + adcl 76(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 64(%esp), %ecx # 4-byte Reload + adcl 80(%esp), %ecx + movl %ecx, 64(%esp) # 4-byte Spill + movl 52(%esp), %edx # 4-byte Reload + adcl 84(%esp), %edx + movl %edx, 52(%esp) # 4-byte Spill + movl %edx, %esi + movl 44(%esp), %edx # 4-byte Reload + adcl 88(%esp), %edx + movl %edx, 44(%esp) # 4-byte Spill + adcl 92(%esp), %ebp + movl %ebp, 40(%esp) # 4-byte Spill + movl 36(%esp), %edx # 4-byte Reload + adcl 96(%esp), %edx + movl %edx, 36(%esp) # 4-byte Spill + movl 48(%esp), %edx # 4-byte Reload + adcl 100(%esp), %edx + movl %edx, 48(%esp) # 4-byte Spill + movl 32(%esp), %edx # 4-byte Reload + adcl 104(%esp), %edx + movl %edx, 32(%esp) # 4-byte Spill + movl 60(%esp), %edx # 4-byte Reload + adcl 108(%esp), %edx + movl %edx, 60(%esp) # 4-byte Spill + adcl $0, %edi + movl 828(%esp), %ebx + subl (%ebx), %eax + movl %ecx, %edx + sbbl 4(%ebx), %edx + movl %esi, %ecx + sbbl 8(%ebx), %ecx + movl 44(%esp), %esi # 4-byte Reload + sbbl 12(%ebx), %esi + movl %esi, 16(%esp) # 4-byte Spill + sbbl 16(%ebx), %ebp + movl %ebp, 20(%esp) # 4-byte Spill + movl 36(%esp), %esi # 4-byte Reload + sbbl 20(%ebx), %esi + movl %esi, 24(%esp) # 4-byte Spill + movl 48(%esp), %esi # 4-byte Reload + sbbl 24(%ebx), %esi + movl %esi, 28(%esp) # 4-byte Spill + movl 32(%esp), %esi # 4-byte Reload + sbbl 28(%ebx), %esi + movl 60(%esp), %ebp # 4-byte Reload + sbbl 32(%ebx), %ebp + movl %ebp, 56(%esp) # 4-byte Spill + sbbl $0, %edi + andl $1, %edi + movl %edi, %ebx + jne .LBB130_2 +# BB#1: + movl %esi, 32(%esp) # 4-byte Spill +.LBB130_2: + testb %bl, %bl + movl 68(%esp), %esi # 4-byte Reload + jne .LBB130_4 +# BB#3: + movl %eax, %esi +.LBB130_4: + movl 816(%esp), %ebp + movl %esi, (%ebp) + movl 64(%esp), %eax # 4-byte Reload + jne .LBB130_6 +# BB#5: + movl %edx, %eax +.LBB130_6: + movl %eax, 4(%ebp) + movl 52(%esp), %eax # 4-byte Reload + jne .LBB130_8 +# BB#7: + movl %ecx, %eax +.LBB130_8: + movl %eax, 8(%ebp) + movl 44(%esp), %eax # 4-byte Reload + jne .LBB130_10 +# BB#9: + movl 16(%esp), %eax # 4-byte Reload +.LBB130_10: + movl %eax, 12(%ebp) + jne .LBB130_12 +# BB#11: + movl 20(%esp), %eax # 4-byte Reload + movl %eax, 40(%esp) # 4-byte Spill +.LBB130_12: + movl 40(%esp), %eax # 4-byte Reload + movl %eax, 16(%ebp) + movl 36(%esp), %eax # 4-byte Reload + jne .LBB130_14 +# BB#13: + movl 24(%esp), %eax # 4-byte Reload +.LBB130_14: + movl %eax, 20(%ebp) + movl 48(%esp), %eax # 4-byte Reload + jne .LBB130_16 +# BB#15: + movl 28(%esp), %eax # 4-byte Reload +.LBB130_16: + movl %eax, 24(%ebp) + movl 32(%esp), %eax # 4-byte Reload + movl %eax, 28(%ebp) + movl 60(%esp), %eax # 4-byte Reload + jne .LBB130_18 +# BB#17: + movl 56(%esp), %eax # 4-byte Reload +.LBB130_18: + movl %eax, 32(%ebp) + addl $796, %esp # imm = 0x31C + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end130: + .size mcl_fp_mont9Lbmi2, .Lfunc_end130-mcl_fp_mont9Lbmi2 + + .globl mcl_fp_montNF9Lbmi2 + .align 16, 0x90 + .type mcl_fp_montNF9Lbmi2,@function +mcl_fp_montNF9Lbmi2: # @mcl_fp_montNF9Lbmi2 +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $796, %esp # imm = 0x31C + calll .L131$pb +.L131$pb: + popl %ebx +.Ltmp12: + addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp12-.L131$pb), %ebx + movl 828(%esp), %eax + movl -4(%eax), %edi + movl %edi, 28(%esp) # 4-byte Spill + movl 824(%esp), %eax + movl (%eax), %eax + movl %eax, (%esp) + leal 752(%esp), %ecx + movl 820(%esp), %edx + calll .LmulPv288x32 + movl 752(%esp), %esi + movl 756(%esp), %ebp + movl %esi, %eax + imull %edi, %eax + movl 788(%esp), %ecx + movl %ecx, 68(%esp) # 4-byte Spill + movl 784(%esp), %ecx + movl %ecx, 60(%esp) # 4-byte Spill + movl 780(%esp), %ecx + movl %ecx, 36(%esp) # 4-byte Spill + movl 776(%esp), %ecx + movl %ecx, 56(%esp) # 4-byte Spill + movl 772(%esp), %ecx + movl %ecx, 48(%esp) # 4-byte Spill + movl 768(%esp), %ecx + movl %ecx, 40(%esp) # 4-byte Spill + movl 764(%esp), %ecx + movl %ecx, 32(%esp) # 4-byte Spill + movl 760(%esp), %edi + movl %eax, (%esp) + leal 712(%esp), %ecx + movl 828(%esp), %edx + calll .LmulPv288x32 + addl 712(%esp), %esi + adcl 716(%esp), %ebp + adcl 720(%esp), %edi + movl 32(%esp), %eax # 4-byte Reload + adcl 724(%esp), %eax + movl %eax, 32(%esp) # 4-byte Spill + movl 40(%esp), %eax # 4-byte Reload + adcl 728(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 732(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 736(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 36(%esp), %esi # 4-byte Reload + adcl 740(%esp), %esi + movl 60(%esp), %eax # 4-byte Reload + adcl 744(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 748(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 824(%esp), %eax + movl 4(%eax), %eax + movl %eax, (%esp) + leal 672(%esp), %ecx + movl 820(%esp), %edx + calll .LmulPv288x32 + movl 708(%esp), %eax + addl 672(%esp), %ebp + adcl 676(%esp), %edi + movl 32(%esp), %ecx # 4-byte Reload + adcl 680(%esp), %ecx + movl %ecx, 32(%esp) # 4-byte Spill + movl 40(%esp), %ecx # 4-byte Reload + adcl 684(%esp), %ecx + movl %ecx, 40(%esp) # 4-byte Spill + movl 48(%esp), %ecx # 4-byte Reload + adcl 688(%esp), %ecx + movl %ecx, 48(%esp) # 4-byte Spill + movl 56(%esp), %ecx # 4-byte Reload + adcl 692(%esp), %ecx + movl %ecx, 56(%esp) # 4-byte Spill + adcl 696(%esp), %esi + movl 60(%esp), %ecx # 4-byte Reload + adcl 700(%esp), %ecx + movl %ecx, 60(%esp) # 4-byte Spill + movl 68(%esp), %ecx # 4-byte Reload + adcl 704(%esp), %ecx + movl %ecx, 68(%esp) # 4-byte Spill + adcl $0, %eax + movl %eax, 64(%esp) # 4-byte Spill + movl %ebp, %eax + imull 28(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 632(%esp), %ecx + movl 828(%esp), %edx + calll .LmulPv288x32 + addl 632(%esp), %ebp + adcl 636(%esp), %edi + movl 32(%esp), %eax # 4-byte Reload + adcl 640(%esp), %eax + movl %eax, 32(%esp) # 4-byte Spill + movl 40(%esp), %eax # 4-byte Reload + adcl 644(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 648(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 652(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + adcl 656(%esp), %esi + movl 60(%esp), %eax # 4-byte Reload + adcl 660(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 68(%esp), %ebp # 4-byte Reload + adcl 664(%esp), %ebp + movl 64(%esp), %eax # 4-byte Reload + adcl 668(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 824(%esp), %eax + movl 8(%eax), %eax + movl %eax, (%esp) + leal 592(%esp), %ecx + movl 820(%esp), %edx + calll .LmulPv288x32 + movl 628(%esp), %eax + addl 592(%esp), %edi + movl 32(%esp), %ecx # 4-byte Reload + adcl 596(%esp), %ecx + movl %ecx, 32(%esp) # 4-byte Spill + movl 40(%esp), %ecx # 4-byte Reload + adcl 600(%esp), %ecx + movl %ecx, 40(%esp) # 4-byte Spill + movl 48(%esp), %ecx # 4-byte Reload + adcl 604(%esp), %ecx + movl %ecx, 48(%esp) # 4-byte Spill + movl 56(%esp), %ecx # 4-byte Reload + adcl 608(%esp), %ecx + movl %ecx, 56(%esp) # 4-byte Spill + adcl 612(%esp), %esi + movl 60(%esp), %ecx # 4-byte Reload + adcl 616(%esp), %ecx + movl %ecx, 60(%esp) # 4-byte Spill + adcl 620(%esp), %ebp + movl %ebp, 68(%esp) # 4-byte Spill + movl 64(%esp), %ecx # 4-byte Reload + adcl 624(%esp), %ecx + movl %ecx, 64(%esp) # 4-byte Spill + movl %eax, %ebp + adcl $0, %ebp + movl %edi, %eax + imull 28(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 552(%esp), %ecx + movl 828(%esp), %eax + movl %eax, %edx + calll .LmulPv288x32 + addl 552(%esp), %edi + movl 32(%esp), %eax # 4-byte Reload + adcl 556(%esp), %eax + movl %eax, 32(%esp) # 4-byte Spill + movl 40(%esp), %eax # 4-byte Reload + adcl 560(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 564(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 568(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + adcl 572(%esp), %esi + movl 60(%esp), %edi # 4-byte Reload + adcl 576(%esp), %edi + movl 68(%esp), %eax # 4-byte Reload + adcl 580(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 584(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + adcl 588(%esp), %ebp + movl %ebp, 44(%esp) # 4-byte Spill + movl 824(%esp), %eax + movl 12(%eax), %eax + movl %eax, (%esp) + leal 512(%esp), %ecx + movl 820(%esp), %edx + calll .LmulPv288x32 + movl 548(%esp), %eax + movl 32(%esp), %edx # 4-byte Reload + addl 512(%esp), %edx + movl %edx, 32(%esp) # 4-byte Spill + movl 40(%esp), %ecx # 4-byte Reload + adcl 516(%esp), %ecx + movl %ecx, 40(%esp) # 4-byte Spill + movl 48(%esp), %ecx # 4-byte Reload + adcl 520(%esp), %ecx + movl %ecx, 48(%esp) # 4-byte Spill + movl 56(%esp), %ecx # 4-byte Reload + adcl 524(%esp), %ecx + movl %ecx, 56(%esp) # 4-byte Spill + adcl 528(%esp), %esi + movl %esi, 36(%esp) # 4-byte Spill + adcl 532(%esp), %edi + movl %edi, 60(%esp) # 4-byte Spill + movl 68(%esp), %ebp # 4-byte Reload + adcl 536(%esp), %ebp + movl 64(%esp), %edi # 4-byte Reload + adcl 540(%esp), %edi + movl 44(%esp), %esi # 4-byte Reload + adcl 544(%esp), %esi + adcl $0, %eax + movl %eax, 52(%esp) # 4-byte Spill + movl %edx, %eax + imull 28(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 472(%esp), %ecx + movl 828(%esp), %edx + calll .LmulPv288x32 + movl 32(%esp), %eax # 4-byte Reload + addl 472(%esp), %eax + movl 40(%esp), %eax # 4-byte Reload + adcl 476(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 480(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 484(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 36(%esp), %eax # 4-byte Reload + adcl 488(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 492(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + adcl 496(%esp), %ebp + movl %ebp, 68(%esp) # 4-byte Spill + adcl 500(%esp), %edi + movl %edi, %ebp + adcl 504(%esp), %esi + movl %esi, 44(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 508(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 824(%esp), %eax + movl 16(%eax), %eax + movl %eax, (%esp) + leal 432(%esp), %ecx + movl 820(%esp), %eax + movl %eax, %edx + calll .LmulPv288x32 + movl 468(%esp), %eax + movl 40(%esp), %ecx # 4-byte Reload + addl 432(%esp), %ecx + movl 48(%esp), %esi # 4-byte Reload + adcl 436(%esp), %esi + movl 56(%esp), %edi # 4-byte Reload + adcl 440(%esp), %edi + movl 36(%esp), %edx # 4-byte Reload + adcl 444(%esp), %edx + movl %edx, 36(%esp) # 4-byte Spill + movl 60(%esp), %edx # 4-byte Reload + adcl 448(%esp), %edx + movl %edx, 60(%esp) # 4-byte Spill + movl 68(%esp), %edx # 4-byte Reload + adcl 452(%esp), %edx + movl %edx, 68(%esp) # 4-byte Spill + adcl 456(%esp), %ebp + movl %ebp, 64(%esp) # 4-byte Spill + movl 44(%esp), %edx # 4-byte Reload + adcl 460(%esp), %edx + movl %edx, 44(%esp) # 4-byte Spill + movl 52(%esp), %edx # 4-byte Reload + adcl 464(%esp), %edx + movl %edx, 52(%esp) # 4-byte Spill + adcl $0, %eax + movl %eax, 40(%esp) # 4-byte Spill + movl %ecx, %eax + movl %ecx, %ebp + imull 28(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 392(%esp), %ecx + movl 828(%esp), %edx + calll .LmulPv288x32 + addl 392(%esp), %ebp + adcl 396(%esp), %esi + movl %esi, 48(%esp) # 4-byte Spill + adcl 400(%esp), %edi + movl %edi, 56(%esp) # 4-byte Spill + movl 36(%esp), %eax # 4-byte Reload + adcl 404(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 408(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 68(%esp), %ebp # 4-byte Reload + adcl 412(%esp), %ebp + movl 64(%esp), %eax # 4-byte Reload + adcl 416(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 44(%esp), %eax # 4-byte Reload + adcl 420(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 52(%esp), %edi # 4-byte Reload + adcl 424(%esp), %edi + movl 40(%esp), %esi # 4-byte Reload + adcl 428(%esp), %esi + movl 824(%esp), %eax + movl 20(%eax), %eax + movl %eax, (%esp) + leal 352(%esp), %ecx + movl 820(%esp), %edx + calll .LmulPv288x32 + movl 388(%esp), %eax + movl 48(%esp), %ecx # 4-byte Reload + addl 352(%esp), %ecx + movl 56(%esp), %edx # 4-byte Reload + adcl 356(%esp), %edx + movl %edx, 56(%esp) # 4-byte Spill + movl 36(%esp), %edx # 4-byte Reload + adcl 360(%esp), %edx + movl %edx, 36(%esp) # 4-byte Spill + movl 60(%esp), %edx # 4-byte Reload + adcl 364(%esp), %edx + movl %edx, 60(%esp) # 4-byte Spill + adcl 368(%esp), %ebp + movl %ebp, 68(%esp) # 4-byte Spill + movl 64(%esp), %edx # 4-byte Reload + adcl 372(%esp), %edx + movl %edx, 64(%esp) # 4-byte Spill + movl 44(%esp), %edx # 4-byte Reload + adcl 376(%esp), %edx + movl %edx, 44(%esp) # 4-byte Spill + adcl 380(%esp), %edi + movl %edi, 52(%esp) # 4-byte Spill + adcl 384(%esp), %esi + movl %esi, 40(%esp) # 4-byte Spill + movl %eax, %ebp + adcl $0, %ebp + movl %ecx, %eax + movl %ecx, %edi + imull 28(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 312(%esp), %ecx + movl 828(%esp), %edx + calll .LmulPv288x32 + addl 312(%esp), %edi + movl 56(%esp), %eax # 4-byte Reload + adcl 316(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 36(%esp), %eax # 4-byte Reload + adcl 320(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 60(%esp), %edi # 4-byte Reload + adcl 324(%esp), %edi + movl 68(%esp), %eax # 4-byte Reload + adcl 328(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 332(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 44(%esp), %eax # 4-byte Reload + adcl 336(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 52(%esp), %esi # 4-byte Reload + adcl 340(%esp), %esi + movl 40(%esp), %eax # 4-byte Reload + adcl 344(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + adcl 348(%esp), %ebp + movl %ebp, 48(%esp) # 4-byte Spill + movl 824(%esp), %eax + movl 24(%eax), %eax + movl %eax, (%esp) + leal 272(%esp), %ecx + movl 820(%esp), %edx + calll .LmulPv288x32 + movl 308(%esp), %edx + movl 56(%esp), %ecx # 4-byte Reload + addl 272(%esp), %ecx + movl 36(%esp), %eax # 4-byte Reload + adcl 276(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + adcl 280(%esp), %edi + movl %edi, 60(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 284(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 288(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 44(%esp), %ebp # 4-byte Reload + adcl 292(%esp), %ebp + adcl 296(%esp), %esi + movl %esi, 52(%esp) # 4-byte Spill + movl 40(%esp), %eax # 4-byte Reload + adcl 300(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 304(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + adcl $0, %edx + movl %edx, 56(%esp) # 4-byte Spill + movl %ecx, %eax + movl %ecx, %edi + imull 28(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 232(%esp), %ecx + movl 828(%esp), %edx + calll .LmulPv288x32 + addl 232(%esp), %edi + movl 36(%esp), %esi # 4-byte Reload + adcl 236(%esp), %esi + movl 60(%esp), %eax # 4-byte Reload + adcl 240(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 244(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 248(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl %ebp, %edi + adcl 252(%esp), %edi + movl 52(%esp), %eax # 4-byte Reload + adcl 256(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 40(%esp), %eax # 4-byte Reload + adcl 260(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 264(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 268(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 824(%esp), %eax + movl 28(%eax), %eax + movl %eax, (%esp) + leal 192(%esp), %ecx + movl 820(%esp), %edx + calll .LmulPv288x32 + movl 228(%esp), %ebp + movl %esi, %ecx + addl 192(%esp), %ecx + movl 60(%esp), %esi # 4-byte Reload + adcl 196(%esp), %esi + movl 68(%esp), %eax # 4-byte Reload + adcl 200(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 204(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + adcl 208(%esp), %edi + movl %edi, 44(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 212(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 40(%esp), %eax # 4-byte Reload + adcl 216(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 220(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 224(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + adcl $0, %ebp + movl %ecx, %eax + movl %ecx, %edi + imull 28(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 152(%esp), %ecx + movl 828(%esp), %edx + calll .LmulPv288x32 + addl 152(%esp), %edi + adcl 156(%esp), %esi + movl %esi, 60(%esp) # 4-byte Spill + movl 68(%esp), %edi # 4-byte Reload + adcl 160(%esp), %edi + movl 64(%esp), %esi # 4-byte Reload + adcl 164(%esp), %esi + movl 44(%esp), %eax # 4-byte Reload + adcl 168(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 172(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 40(%esp), %eax # 4-byte Reload + adcl 176(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 180(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 184(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + adcl 188(%esp), %ebp + movl %ebp, 36(%esp) # 4-byte Spill + movl 824(%esp), %eax + movl 32(%eax), %eax + movl %eax, (%esp) + leal 112(%esp), %ecx + movl 820(%esp), %edx + calll .LmulPv288x32 + movl 148(%esp), %ebp + movl 60(%esp), %ecx # 4-byte Reload + addl 112(%esp), %ecx + adcl 116(%esp), %edi + movl %edi, 68(%esp) # 4-byte Spill + adcl 120(%esp), %esi + movl %esi, 64(%esp) # 4-byte Spill + movl 44(%esp), %eax # 4-byte Reload + adcl 124(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 128(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 40(%esp), %esi # 4-byte Reload + adcl 132(%esp), %esi + movl 48(%esp), %eax # 4-byte Reload + adcl 136(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 140(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 36(%esp), %eax # 4-byte Reload + adcl 144(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + adcl $0, %ebp + movl 28(%esp), %eax # 4-byte Reload + imull %ecx, %eax + movl %ecx, %edi + movl %eax, (%esp) + leal 72(%esp), %ecx + movl 828(%esp), %eax + movl %eax, %edx + calll .LmulPv288x32 + addl 72(%esp), %edi + movl 44(%esp), %edi # 4-byte Reload + movl 68(%esp), %eax # 4-byte Reload + adcl 76(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 64(%esp), %ebx # 4-byte Reload + adcl 80(%esp), %ebx + movl %ebx, 64(%esp) # 4-byte Spill + adcl 84(%esp), %edi + movl 52(%esp), %edx # 4-byte Reload + adcl 88(%esp), %edx + movl %edx, 52(%esp) # 4-byte Spill + adcl 92(%esp), %esi + movl %esi, 40(%esp) # 4-byte Spill + movl 48(%esp), %edx # 4-byte Reload + adcl 96(%esp), %edx + movl %edx, 48(%esp) # 4-byte Spill + movl 56(%esp), %edx # 4-byte Reload + adcl 100(%esp), %edx + movl %edx, 56(%esp) # 4-byte Spill + movl 36(%esp), %edx # 4-byte Reload + adcl 104(%esp), %edx + movl %edx, 36(%esp) # 4-byte Spill + adcl 108(%esp), %ebp + movl %ebp, 60(%esp) # 4-byte Spill + movl %eax, %edx + movl 828(%esp), %eax + subl (%eax), %edx + sbbl 4(%eax), %ebx + movl %edi, %ecx + sbbl 8(%eax), %ecx + movl 52(%esp), %esi # 4-byte Reload + sbbl 12(%eax), %esi + movl %esi, 16(%esp) # 4-byte Spill + movl 40(%esp), %esi # 4-byte Reload + sbbl 16(%eax), %esi + movl %esi, 20(%esp) # 4-byte Spill + movl 48(%esp), %esi # 4-byte Reload + sbbl 20(%eax), %esi + movl %esi, 24(%esp) # 4-byte Spill + movl 56(%esp), %esi # 4-byte Reload + sbbl 24(%eax), %esi + movl %esi, 28(%esp) # 4-byte Spill + movl 36(%esp), %esi # 4-byte Reload + sbbl 28(%eax), %esi + movl %esi, 32(%esp) # 4-byte Spill + sbbl 32(%eax), %ebp + movl %ebp, 44(%esp) # 4-byte Spill + sarl $31, %ebp + testl %ebp, %ebp + movl 68(%esp), %eax # 4-byte Reload + js .LBB131_2 +# BB#1: + movl %edx, %eax +.LBB131_2: + movl 816(%esp), %edx + movl %eax, (%edx) + movl 64(%esp), %esi # 4-byte Reload + js .LBB131_4 +# BB#3: + movl %ebx, %esi +.LBB131_4: + movl %esi, 4(%edx) + movl 52(%esp), %ebp # 4-byte Reload + movl 40(%esp), %eax # 4-byte Reload + js .LBB131_6 +# BB#5: + movl %ecx, %edi +.LBB131_6: + movl %edi, 8(%edx) + js .LBB131_8 +# BB#7: + movl 16(%esp), %ebp # 4-byte Reload +.LBB131_8: + movl %ebp, 12(%edx) + js .LBB131_10 +# BB#9: + movl 20(%esp), %eax # 4-byte Reload +.LBB131_10: + movl %eax, 16(%edx) + movl 48(%esp), %eax # 4-byte Reload + js .LBB131_12 +# BB#11: + movl 24(%esp), %eax # 4-byte Reload +.LBB131_12: + movl %eax, 20(%edx) + movl 56(%esp), %eax # 4-byte Reload + js .LBB131_14 +# BB#13: + movl 28(%esp), %eax # 4-byte Reload +.LBB131_14: + movl %eax, 24(%edx) + movl 36(%esp), %eax # 4-byte Reload + js .LBB131_16 +# BB#15: + movl 32(%esp), %eax # 4-byte Reload +.LBB131_16: + movl %eax, 28(%edx) + movl 60(%esp), %eax # 4-byte Reload + js .LBB131_18 +# BB#17: + movl 44(%esp), %eax # 4-byte Reload +.LBB131_18: + movl %eax, 32(%edx) + addl $796, %esp # imm = 0x31C + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end131: + .size mcl_fp_montNF9Lbmi2, .Lfunc_end131-mcl_fp_montNF9Lbmi2 + + .globl mcl_fp_montRed9Lbmi2 + .align 16, 0x90 + .type mcl_fp_montRed9Lbmi2,@function +mcl_fp_montRed9Lbmi2: # @mcl_fp_montRed9Lbmi2 +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $492, %esp # imm = 0x1EC + calll .L132$pb +.L132$pb: + popl %ebx +.Ltmp13: + addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp13-.L132$pb), %ebx + movl 520(%esp), %edx + movl -4(%edx), %edi + movl %edi, 72(%esp) # 4-byte Spill + movl 516(%esp), %eax + movl (%eax), %esi + movl %esi, 76(%esp) # 4-byte Spill + movl 4(%eax), %ecx + movl %ecx, 52(%esp) # 4-byte Spill + movl %esi, %ecx + imull %edi, %ecx + movl 68(%eax), %esi + movl %esi, 80(%esp) # 4-byte Spill + movl 64(%eax), %edi + movl %edi, 88(%esp) # 4-byte Spill + movl 60(%eax), %esi + movl %esi, 100(%esp) # 4-byte Spill + movl 56(%eax), %esi + movl %esi, 112(%esp) # 4-byte Spill + movl 52(%eax), %esi + movl %esi, 108(%esp) # 4-byte Spill + movl 48(%eax), %esi + movl %esi, 116(%esp) # 4-byte Spill + movl 44(%eax), %esi + movl %esi, 96(%esp) # 4-byte Spill + movl 40(%eax), %edi + movl %edi, 124(%esp) # 4-byte Spill + movl 36(%eax), %edi + movl %edi, 120(%esp) # 4-byte Spill + movl 32(%eax), %edi + movl %edi, 104(%esp) # 4-byte Spill + movl 28(%eax), %edi + movl %edi, 92(%esp) # 4-byte Spill + movl 24(%eax), %edi + movl %edi, 84(%esp) # 4-byte Spill + movl 20(%eax), %ebp + movl 16(%eax), %edi + movl 12(%eax), %esi + movl 8(%eax), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl (%edx), %eax + movl %eax, 20(%esp) # 4-byte Spill + movl 32(%edx), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 28(%edx), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 24(%edx), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 20(%edx), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 16(%edx), %eax + movl %eax, 32(%esp) # 4-byte Spill + movl 12(%edx), %eax + movl %eax, 28(%esp) # 4-byte Spill + movl 8(%edx), %eax + movl %eax, 24(%esp) # 4-byte Spill + movl 4(%edx), %eax + movl %eax, 16(%esp) # 4-byte Spill + movl %ecx, (%esp) + leal 448(%esp), %ecx + calll .LmulPv288x32 + movl 76(%esp), %eax # 4-byte Reload + addl 448(%esp), %eax + movl 52(%esp), %ecx # 4-byte Reload + adcl 452(%esp), %ecx + movl %ecx, 52(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 456(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + adcl 460(%esp), %esi + movl %esi, 60(%esp) # 4-byte Spill + adcl 464(%esp), %edi + movl %edi, 64(%esp) # 4-byte Spill + adcl 468(%esp), %ebp + movl %ebp, 68(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 472(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 476(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 104(%esp), %eax # 4-byte Reload + adcl 480(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 120(%esp), %eax # 4-byte Reload + adcl 484(%esp), %eax + movl %eax, 120(%esp) # 4-byte Spill + adcl $0, 124(%esp) # 4-byte Folded Spill + movl 96(%esp), %esi # 4-byte Reload + adcl $0, %esi + adcl $0, 116(%esp) # 4-byte Folded Spill + movl 108(%esp), %edi # 4-byte Reload + adcl $0, %edi + adcl $0, 112(%esp) # 4-byte Folded Spill + adcl $0, 100(%esp) # 4-byte Folded Spill + adcl $0, 88(%esp) # 4-byte Folded Spill + movl 80(%esp), %ebp # 4-byte Reload + adcl $0, %ebp + sbbl %eax, %eax + movl %eax, 76(%esp) # 4-byte Spill + movl %ecx, %eax + imull 72(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 408(%esp), %ecx + movl 520(%esp), %edx + calll .LmulPv288x32 + movl 76(%esp), %eax # 4-byte Reload + andl $1, %eax + movl 52(%esp), %ecx # 4-byte Reload + addl 408(%esp), %ecx + movl 56(%esp), %edx # 4-byte Reload + adcl 412(%esp), %edx + movl 60(%esp), %ecx # 4-byte Reload + adcl 416(%esp), %ecx + movl %ecx, 60(%esp) # 4-byte Spill + movl 64(%esp), %ecx # 4-byte Reload + adcl 420(%esp), %ecx + movl %ecx, 64(%esp) # 4-byte Spill + movl 68(%esp), %ecx # 4-byte Reload + adcl 424(%esp), %ecx + movl %ecx, 68(%esp) # 4-byte Spill + movl 84(%esp), %ecx # 4-byte Reload + adcl 428(%esp), %ecx + movl %ecx, 84(%esp) # 4-byte Spill + movl 92(%esp), %ecx # 4-byte Reload + adcl 432(%esp), %ecx + movl %ecx, 92(%esp) # 4-byte Spill + movl 104(%esp), %ecx # 4-byte Reload + adcl 436(%esp), %ecx + movl %ecx, 104(%esp) # 4-byte Spill + movl 120(%esp), %ecx # 4-byte Reload + adcl 440(%esp), %ecx + movl %ecx, 120(%esp) # 4-byte Spill + movl 124(%esp), %ecx # 4-byte Reload + adcl 444(%esp), %ecx + movl %ecx, 124(%esp) # 4-byte Spill + adcl $0, %esi + adcl $0, 116(%esp) # 4-byte Folded Spill + adcl $0, %edi + movl %edi, 108(%esp) # 4-byte Spill + adcl $0, 112(%esp) # 4-byte Folded Spill + adcl $0, 100(%esp) # 4-byte Folded Spill + adcl $0, 88(%esp) # 4-byte Folded Spill + adcl $0, %ebp + adcl $0, %eax + movl %eax, 76(%esp) # 4-byte Spill + movl %edx, %edi + movl %edi, %eax + imull 72(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 368(%esp), %ecx + movl 520(%esp), %edx + calll .LmulPv288x32 + addl 368(%esp), %edi + movl 60(%esp), %ecx # 4-byte Reload + adcl 372(%esp), %ecx + movl 64(%esp), %eax # 4-byte Reload + adcl 376(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 380(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 384(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 388(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 104(%esp), %eax # 4-byte Reload + adcl 392(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 120(%esp), %eax # 4-byte Reload + adcl 396(%esp), %eax + movl %eax, 120(%esp) # 4-byte Spill + movl 124(%esp), %eax # 4-byte Reload + adcl 400(%esp), %eax + movl %eax, 124(%esp) # 4-byte Spill + adcl 404(%esp), %esi + movl %esi, 96(%esp) # 4-byte Spill + movl 116(%esp), %edi # 4-byte Reload + adcl $0, %edi + movl 108(%esp), %esi # 4-byte Reload + adcl $0, %esi + adcl $0, 112(%esp) # 4-byte Folded Spill + adcl $0, 100(%esp) # 4-byte Folded Spill + adcl $0, 88(%esp) # 4-byte Folded Spill + adcl $0, %ebp + movl %ebp, 80(%esp) # 4-byte Spill + adcl $0, 76(%esp) # 4-byte Folded Spill + movl %ecx, %eax + movl %ecx, %ebp + imull 72(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 328(%esp), %ecx + movl 520(%esp), %edx + calll .LmulPv288x32 + addl 328(%esp), %ebp + movl 64(%esp), %ecx # 4-byte Reload + adcl 332(%esp), %ecx + movl %ecx, 64(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 336(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 340(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 344(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 104(%esp), %eax # 4-byte Reload + adcl 348(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 120(%esp), %eax # 4-byte Reload + adcl 352(%esp), %eax + movl %eax, 120(%esp) # 4-byte Spill + movl 124(%esp), %eax # 4-byte Reload + adcl 356(%esp), %eax + movl %eax, 124(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 360(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + adcl 364(%esp), %edi + movl %edi, 116(%esp) # 4-byte Spill + adcl $0, %esi + movl %esi, 108(%esp) # 4-byte Spill + movl 112(%esp), %ebp # 4-byte Reload + adcl $0, %ebp + movl 100(%esp), %edi # 4-byte Reload + adcl $0, %edi + adcl $0, 88(%esp) # 4-byte Folded Spill + movl 80(%esp), %esi # 4-byte Reload + adcl $0, %esi + adcl $0, 76(%esp) # 4-byte Folded Spill + movl %ecx, %eax + imull 72(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 288(%esp), %ecx + movl 520(%esp), %edx + calll .LmulPv288x32 + movl 64(%esp), %eax # 4-byte Reload + addl 288(%esp), %eax + movl 68(%esp), %ecx # 4-byte Reload + adcl 292(%esp), %ecx + movl 84(%esp), %eax # 4-byte Reload + adcl 296(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 300(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 104(%esp), %eax # 4-byte Reload + adcl 304(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 120(%esp), %eax # 4-byte Reload + adcl 308(%esp), %eax + movl %eax, 120(%esp) # 4-byte Spill + movl 124(%esp), %eax # 4-byte Reload + adcl 312(%esp), %eax + movl %eax, 124(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 316(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 116(%esp), %eax # 4-byte Reload + adcl 320(%esp), %eax + movl %eax, 116(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl 324(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + adcl $0, %ebp + movl %ebp, 112(%esp) # 4-byte Spill + adcl $0, %edi + movl %edi, 100(%esp) # 4-byte Spill + movl 88(%esp), %edi # 4-byte Reload + adcl $0, %edi + adcl $0, %esi + movl %esi, 80(%esp) # 4-byte Spill + adcl $0, 76(%esp) # 4-byte Folded Spill + movl %ecx, %esi + movl %esi, %eax + imull 72(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 248(%esp), %ecx + movl 520(%esp), %eax + movl %eax, %edx + calll .LmulPv288x32 + addl 248(%esp), %esi + movl 84(%esp), %ecx # 4-byte Reload + adcl 252(%esp), %ecx + movl 92(%esp), %eax # 4-byte Reload + adcl 256(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 104(%esp), %eax # 4-byte Reload + adcl 260(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 120(%esp), %ebp # 4-byte Reload + adcl 264(%esp), %ebp + movl 124(%esp), %eax # 4-byte Reload + adcl 268(%esp), %eax + movl %eax, 124(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 272(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 116(%esp), %eax # 4-byte Reload + adcl 276(%esp), %eax + movl %eax, 116(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl 280(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 112(%esp), %eax # 4-byte Reload + adcl 284(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + adcl $0, 100(%esp) # 4-byte Folded Spill + movl %edi, %esi + adcl $0, %esi + adcl $0, 80(%esp) # 4-byte Folded Spill + adcl $0, 76(%esp) # 4-byte Folded Spill + movl %ecx, %eax + movl %ecx, %edi + imull 72(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 208(%esp), %ecx + movl 520(%esp), %edx + calll .LmulPv288x32 + addl 208(%esp), %edi + movl 92(%esp), %ecx # 4-byte Reload + adcl 212(%esp), %ecx + movl 104(%esp), %eax # 4-byte Reload + adcl 216(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + adcl 220(%esp), %ebp + movl %ebp, 120(%esp) # 4-byte Spill + movl 124(%esp), %eax # 4-byte Reload + adcl 224(%esp), %eax + movl %eax, 124(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 228(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 116(%esp), %eax # 4-byte Reload + adcl 232(%esp), %eax + movl %eax, 116(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl 236(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 112(%esp), %eax # 4-byte Reload + adcl 240(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 244(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + adcl $0, %esi + movl %esi, 88(%esp) # 4-byte Spill + adcl $0, 80(%esp) # 4-byte Folded Spill + movl 76(%esp), %edi # 4-byte Reload + adcl $0, %edi + movl %ecx, %eax + movl %ecx, %ebp + imull 72(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 168(%esp), %ecx + movl 520(%esp), %eax + movl %eax, %edx + calll .LmulPv288x32 + addl 168(%esp), %ebp + movl 104(%esp), %ecx # 4-byte Reload + adcl 172(%esp), %ecx + movl 120(%esp), %eax # 4-byte Reload + adcl 176(%esp), %eax + movl %eax, 120(%esp) # 4-byte Spill + movl 124(%esp), %ebp # 4-byte Reload + adcl 180(%esp), %ebp + movl 96(%esp), %esi # 4-byte Reload + adcl 184(%esp), %esi + movl 116(%esp), %eax # 4-byte Reload + adcl 188(%esp), %eax + movl %eax, 116(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl 192(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 112(%esp), %eax # 4-byte Reload + adcl 196(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 200(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 204(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + adcl $0, 80(%esp) # 4-byte Folded Spill + adcl $0, %edi + movl %edi, 76(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + imull %ecx, %eax + movl %ecx, %edi + movl %eax, (%esp) + leal 128(%esp), %ecx + movl 520(%esp), %edx + calll .LmulPv288x32 + addl 128(%esp), %edi + movl 120(%esp), %eax # 4-byte Reload + adcl 132(%esp), %eax + movl %eax, 120(%esp) # 4-byte Spill + movl %eax, %edi + adcl 136(%esp), %ebp + movl %ebp, 124(%esp) # 4-byte Spill + adcl 140(%esp), %esi + movl %esi, 96(%esp) # 4-byte Spill + movl 116(%esp), %ecx # 4-byte Reload + adcl 144(%esp), %ecx + movl %ecx, 116(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl 148(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl %eax, %ebx + movl 112(%esp), %eax # 4-byte Reload + adcl 152(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 156(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 160(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 80(%esp), %edx # 4-byte Reload + adcl 164(%esp), %edx + movl %edx, 80(%esp) # 4-byte Spill + movl 76(%esp), %ebp # 4-byte Reload + adcl $0, %ebp + subl 20(%esp), %edi # 4-byte Folded Reload + movl 124(%esp), %eax # 4-byte Reload + sbbl 16(%esp), %eax # 4-byte Folded Reload + sbbl 24(%esp), %esi # 4-byte Folded Reload + sbbl 28(%esp), %ecx # 4-byte Folded Reload + sbbl 32(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 72(%esp) # 4-byte Spill + movl 112(%esp), %ebx # 4-byte Reload + sbbl 36(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 76(%esp) # 4-byte Spill + movl 100(%esp), %ebx # 4-byte Reload + sbbl 40(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 84(%esp) # 4-byte Spill + movl 88(%esp), %ebx # 4-byte Reload + sbbl 44(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 92(%esp) # 4-byte Spill + movl %edx, %ebx + movl %ebp, %edx + sbbl 48(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 104(%esp) # 4-byte Spill + sbbl $0, %edx + andl $1, %edx + jne .LBB132_2 +# BB#1: + movl %ecx, 116(%esp) # 4-byte Spill +.LBB132_2: + testb %dl, %dl + movl 120(%esp), %ecx # 4-byte Reload + jne .LBB132_4 +# BB#3: + movl %edi, %ecx +.LBB132_4: + movl 512(%esp), %edi + movl %ecx, (%edi) + movl 88(%esp), %ecx # 4-byte Reload + jne .LBB132_6 +# BB#5: + movl %eax, 124(%esp) # 4-byte Spill +.LBB132_6: + movl 124(%esp), %eax # 4-byte Reload + movl %eax, 4(%edi) + movl 96(%esp), %eax # 4-byte Reload + jne .LBB132_8 +# BB#7: + movl %esi, %eax +.LBB132_8: + movl %eax, 8(%edi) + movl 116(%esp), %eax # 4-byte Reload + movl %eax, 12(%edi) + movl 80(%esp), %eax # 4-byte Reload + movl 108(%esp), %ebp # 4-byte Reload + jne .LBB132_10 +# BB#9: + movl 72(%esp), %ebp # 4-byte Reload +.LBB132_10: + movl %ebp, 16(%edi) + movl 112(%esp), %ebx # 4-byte Reload + jne .LBB132_12 +# BB#11: + movl 76(%esp), %ebx # 4-byte Reload +.LBB132_12: + movl %ebx, 20(%edi) + movl 100(%esp), %esi # 4-byte Reload + jne .LBB132_14 +# BB#13: + movl 84(%esp), %esi # 4-byte Reload +.LBB132_14: + movl %esi, 24(%edi) + jne .LBB132_16 +# BB#15: + movl 92(%esp), %ecx # 4-byte Reload +.LBB132_16: + movl %ecx, 28(%edi) + jne .LBB132_18 +# BB#17: + movl 104(%esp), %eax # 4-byte Reload +.LBB132_18: + movl %eax, 32(%edi) + addl $492, %esp # imm = 0x1EC + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end132: + .size mcl_fp_montRed9Lbmi2, .Lfunc_end132-mcl_fp_montRed9Lbmi2 + + .globl mcl_fp_addPre9Lbmi2 + .align 16, 0x90 + .type mcl_fp_addPre9Lbmi2,@function +mcl_fp_addPre9Lbmi2: # @mcl_fp_addPre9Lbmi2 +# BB#0: + pushl %ebx + pushl %edi + pushl %esi + movl 24(%esp), %eax + movl (%eax), %edx + movl 4(%eax), %esi + movl 20(%esp), %ecx + addl (%ecx), %edx + adcl 4(%ecx), %esi + movl 8(%eax), %edi + adcl 8(%ecx), %edi + movl 16(%esp), %ebx + movl %edx, (%ebx) + movl 12(%ecx), %edx + movl %esi, 4(%ebx) + movl 16(%ecx), %esi + adcl 12(%eax), %edx + adcl 16(%eax), %esi + movl %edi, 8(%ebx) + movl 20(%eax), %edi + movl %edx, 12(%ebx) + movl 20(%ecx), %edx + adcl %edi, %edx + movl 24(%eax), %edi + movl %esi, 16(%ebx) + movl 24(%ecx), %esi + adcl %edi, %esi + movl 28(%eax), %edi + movl %edx, 20(%ebx) + movl 28(%ecx), %edx + adcl %edi, %edx + movl %esi, 24(%ebx) + movl %edx, 28(%ebx) + movl 32(%eax), %eax + movl 32(%ecx), %ecx + adcl %eax, %ecx + movl %ecx, 32(%ebx) + sbbl %eax, %eax + andl $1, %eax + popl %esi + popl %edi + popl %ebx + retl +.Lfunc_end133: + .size mcl_fp_addPre9Lbmi2, .Lfunc_end133-mcl_fp_addPre9Lbmi2 + + .globl mcl_fp_subPre9Lbmi2 + .align 16, 0x90 + .type mcl_fp_subPre9Lbmi2,@function +mcl_fp_subPre9Lbmi2: # @mcl_fp_subPre9Lbmi2 +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + movl 24(%esp), %ecx + movl (%ecx), %esi + movl 4(%ecx), %edi + xorl %eax, %eax + movl 28(%esp), %edx + subl (%edx), %esi + sbbl 4(%edx), %edi + movl 8(%ecx), %ebx + sbbl 8(%edx), %ebx + movl 20(%esp), %ebp + movl %esi, (%ebp) + movl 12(%ecx), %esi + sbbl 12(%edx), %esi + movl %edi, 4(%ebp) + movl 16(%ecx), %edi + sbbl 16(%edx), %edi + movl %ebx, 8(%ebp) + movl 20(%edx), %ebx + movl %esi, 12(%ebp) + movl 20(%ecx), %esi + sbbl %ebx, %esi + movl 24(%edx), %ebx + movl %edi, 16(%ebp) + movl 24(%ecx), %edi + sbbl %ebx, %edi + movl 28(%edx), %ebx + movl %esi, 20(%ebp) + movl 28(%ecx), %esi + sbbl %ebx, %esi + movl %edi, 24(%ebp) + movl %esi, 28(%ebp) + movl 32(%edx), %edx + movl 32(%ecx), %ecx + sbbl %edx, %ecx + movl %ecx, 32(%ebp) + sbbl $0, %eax + andl $1, %eax + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end134: + .size mcl_fp_subPre9Lbmi2, .Lfunc_end134-mcl_fp_subPre9Lbmi2 + + .globl mcl_fp_shr1_9Lbmi2 + .align 16, 0x90 + .type mcl_fp_shr1_9Lbmi2,@function +mcl_fp_shr1_9Lbmi2: # @mcl_fp_shr1_9Lbmi2 +# BB#0: + pushl %esi + movl 12(%esp), %eax + movl (%eax), %ecx + movl 4(%eax), %edx + shrdl $1, %edx, %ecx + movl 8(%esp), %esi + movl %ecx, (%esi) + movl 8(%eax), %ecx + shrdl $1, %ecx, %edx + movl %edx, 4(%esi) + movl 12(%eax), %edx + shrdl $1, %edx, %ecx + movl %ecx, 8(%esi) + movl 16(%eax), %ecx + shrdl $1, %ecx, %edx + movl %edx, 12(%esi) + movl 20(%eax), %edx + shrdl $1, %edx, %ecx + movl %ecx, 16(%esi) + movl 24(%eax), %ecx + shrdl $1, %ecx, %edx + movl %edx, 20(%esi) + movl 28(%eax), %edx + shrdl $1, %edx, %ecx + movl %ecx, 24(%esi) + movl 32(%eax), %eax + shrdl $1, %eax, %edx + movl %edx, 28(%esi) + shrl %eax + movl %eax, 32(%esi) + popl %esi + retl +.Lfunc_end135: + .size mcl_fp_shr1_9Lbmi2, .Lfunc_end135-mcl_fp_shr1_9Lbmi2 + + .globl mcl_fp_add9Lbmi2 + .align 16, 0x90 + .type mcl_fp_add9Lbmi2,@function +mcl_fp_add9Lbmi2: # @mcl_fp_add9Lbmi2 +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $20, %esp + movl 48(%esp), %edi + movl (%edi), %ecx + movl 4(%edi), %eax + movl 44(%esp), %ebx + addl (%ebx), %ecx + movl %ecx, %ebp + adcl 4(%ebx), %eax + movl %eax, 16(%esp) # 4-byte Spill + movl 8(%edi), %eax + adcl 8(%ebx), %eax + movl %eax, 12(%esp) # 4-byte Spill + movl 12(%ebx), %ecx + movl 16(%ebx), %eax + adcl 12(%edi), %ecx + movl %ecx, 8(%esp) # 4-byte Spill + adcl 16(%edi), %eax + movl %eax, 4(%esp) # 4-byte Spill + movl 20(%ebx), %esi + adcl 20(%edi), %esi + movl 24(%ebx), %edx + adcl 24(%edi), %edx + movl 28(%ebx), %ecx + adcl 28(%edi), %ecx + movl 32(%ebx), %eax + adcl 32(%edi), %eax + movl 40(%esp), %edi + movl %ebp, (%edi) + movl 16(%esp), %ebx # 4-byte Reload + movl %ebx, 4(%edi) + movl 12(%esp), %ebx # 4-byte Reload + movl %ebx, 8(%edi) + movl 8(%esp), %ebx # 4-byte Reload + movl %ebx, 12(%edi) + movl 4(%esp), %ebx # 4-byte Reload + movl %ebx, 16(%edi) + movl %esi, 20(%edi) + movl %edx, 24(%edi) + movl %ecx, 28(%edi) + movl %eax, 32(%edi) + sbbl %ebx, %ebx + andl $1, %ebx + movl 52(%esp), %edi + subl (%edi), %ebp + movl %ebp, (%esp) # 4-byte Spill + movl 16(%esp), %ebp # 4-byte Reload + sbbl 4(%edi), %ebp + movl %ebp, 16(%esp) # 4-byte Spill + movl 12(%esp), %ebp # 4-byte Reload + sbbl 8(%edi), %ebp + movl %ebp, 12(%esp) # 4-byte Spill + movl 8(%esp), %ebp # 4-byte Reload + sbbl 12(%edi), %ebp + movl %ebp, 8(%esp) # 4-byte Spill + movl 4(%esp), %ebp # 4-byte Reload + sbbl 16(%edi), %ebp + sbbl 20(%edi), %esi + sbbl 24(%edi), %edx + sbbl 28(%edi), %ecx + sbbl 32(%edi), %eax + sbbl $0, %ebx + testb $1, %bl + jne .LBB136_2 +# BB#1: # %nocarry + movl (%esp), %edi # 4-byte Reload + movl 40(%esp), %ebx + movl %edi, (%ebx) + movl 16(%esp), %edi # 4-byte Reload + movl %edi, 4(%ebx) + movl 12(%esp), %edi # 4-byte Reload + movl %edi, 8(%ebx) + movl 8(%esp), %edi # 4-byte Reload + movl %edi, 12(%ebx) + movl %ebp, 16(%ebx) + movl %esi, 20(%ebx) + movl %edx, 24(%ebx) + movl %ecx, 28(%ebx) + movl %eax, 32(%ebx) +.LBB136_2: # %carry + addl $20, %esp + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end136: + .size mcl_fp_add9Lbmi2, .Lfunc_end136-mcl_fp_add9Lbmi2 + + .globl mcl_fp_addNF9Lbmi2 + .align 16, 0x90 + .type mcl_fp_addNF9Lbmi2,@function +mcl_fp_addNF9Lbmi2: # @mcl_fp_addNF9Lbmi2 +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $72, %esp + movl 100(%esp), %eax + movl (%eax), %ecx + movl 4(%eax), %edi + movl 96(%esp), %esi + addl (%esi), %ecx + movl %ecx, 36(%esp) # 4-byte Spill + adcl 4(%esi), %edi + movl %edi, 44(%esp) # 4-byte Spill + movl 32(%eax), %ecx + movl %ecx, 60(%esp) # 4-byte Spill + movl 28(%eax), %ebp + movl 24(%eax), %ecx + movl %ecx, 68(%esp) # 4-byte Spill + movl 20(%eax), %ecx + movl %ecx, 64(%esp) # 4-byte Spill + movl 16(%eax), %ebx + movl 12(%eax), %edx + movl 8(%eax), %ecx + adcl 8(%esi), %ecx + movl %ecx, 40(%esp) # 4-byte Spill + adcl 12(%esi), %edx + movl %edx, 48(%esp) # 4-byte Spill + adcl 16(%esi), %ebx + movl %ebx, 52(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 20(%esi), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 24(%esi), %eax + movl %eax, 68(%esp) # 4-byte Spill + adcl 28(%esi), %ebp + movl %ebp, 56(%esp) # 4-byte Spill + movl 60(%esp), %ebp # 4-byte Reload + adcl 32(%esi), %ebp + movl %ebp, 60(%esp) # 4-byte Spill + movl 104(%esp), %esi + movl 36(%esp), %eax # 4-byte Reload + movl %eax, %ebp + subl (%esi), %ebp + movl %ebp, (%esp) # 4-byte Spill + sbbl 4(%esi), %edi + movl %edi, 4(%esp) # 4-byte Spill + sbbl 8(%esi), %ecx + movl %ecx, 8(%esp) # 4-byte Spill + sbbl 12(%esi), %edx + movl %edx, 12(%esp) # 4-byte Spill + sbbl 16(%esi), %ebx + movl %ebx, 16(%esp) # 4-byte Spill + movl 64(%esp), %ebx # 4-byte Reload + sbbl 20(%esi), %ebx + movl %ebx, 20(%esp) # 4-byte Spill + movl 68(%esp), %ebp # 4-byte Reload + sbbl 24(%esi), %ebp + movl %ebp, 24(%esp) # 4-byte Spill + movl 56(%esp), %ecx # 4-byte Reload + sbbl 28(%esi), %ecx + movl %ecx, 28(%esp) # 4-byte Spill + movl 60(%esp), %ecx # 4-byte Reload + movl %ecx, %edx + movl %ecx, %ebp + sbbl 32(%esi), %edx + movl %edx, 32(%esp) # 4-byte Spill + movl %edx, %esi + sarl $31, %esi + testl %esi, %esi + js .LBB137_2 +# BB#1: + movl (%esp), %eax # 4-byte Reload +.LBB137_2: + movl 92(%esp), %ecx + movl %eax, (%ecx) + movl 44(%esp), %eax # 4-byte Reload + js .LBB137_4 +# BB#3: + movl 4(%esp), %eax # 4-byte Reload +.LBB137_4: + movl %eax, 4(%ecx) + movl 68(%esp), %esi # 4-byte Reload + movl 64(%esp), %edi # 4-byte Reload + movl 52(%esp), %ebx # 4-byte Reload + movl 48(%esp), %edx # 4-byte Reload + movl 40(%esp), %eax # 4-byte Reload + js .LBB137_6 +# BB#5: + movl 8(%esp), %eax # 4-byte Reload +.LBB137_6: + movl %eax, 8(%ecx) + movl %ebp, %eax + js .LBB137_8 +# BB#7: + movl 12(%esp), %edx # 4-byte Reload +.LBB137_8: + movl %edx, 12(%ecx) + movl 56(%esp), %edx # 4-byte Reload + js .LBB137_10 +# BB#9: + movl 16(%esp), %ebx # 4-byte Reload +.LBB137_10: + movl %ebx, 16(%ecx) + js .LBB137_12 +# BB#11: + movl 20(%esp), %edi # 4-byte Reload +.LBB137_12: + movl %edi, 20(%ecx) + js .LBB137_14 +# BB#13: + movl 24(%esp), %esi # 4-byte Reload +.LBB137_14: + movl %esi, 24(%ecx) + js .LBB137_16 +# BB#15: + movl 28(%esp), %edx # 4-byte Reload +.LBB137_16: + movl %edx, 28(%ecx) + js .LBB137_18 +# BB#17: + movl 32(%esp), %eax # 4-byte Reload +.LBB137_18: + movl %eax, 32(%ecx) + addl $72, %esp + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end137: + .size mcl_fp_addNF9Lbmi2, .Lfunc_end137-mcl_fp_addNF9Lbmi2 + + .globl mcl_fp_sub9Lbmi2 + .align 16, 0x90 + .type mcl_fp_sub9Lbmi2,@function +mcl_fp_sub9Lbmi2: # @mcl_fp_sub9Lbmi2 +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $28, %esp + movl 52(%esp), %esi + movl (%esi), %eax + movl 4(%esi), %ecx + xorl %ebx, %ebx + movl 56(%esp), %edi + subl (%edi), %eax + movl %eax, 12(%esp) # 4-byte Spill + sbbl 4(%edi), %ecx + movl %ecx, 16(%esp) # 4-byte Spill + movl 8(%esi), %eax + sbbl 8(%edi), %eax + movl %eax, 20(%esp) # 4-byte Spill + movl 12(%esi), %eax + sbbl 12(%edi), %eax + movl %eax, 24(%esp) # 4-byte Spill + movl 16(%esi), %edx + sbbl 16(%edi), %edx + movl %edx, 8(%esp) # 4-byte Spill + movl 20(%esi), %ecx + sbbl 20(%edi), %ecx + movl %ecx, 4(%esp) # 4-byte Spill + movl 24(%esi), %eax + sbbl 24(%edi), %eax + movl %eax, (%esp) # 4-byte Spill + movl 28(%esi), %ebp + sbbl 28(%edi), %ebp + movl 32(%esi), %esi + sbbl 32(%edi), %esi + sbbl $0, %ebx + testb $1, %bl + movl 48(%esp), %ebx + movl 12(%esp), %edi # 4-byte Reload + movl %edi, (%ebx) + movl 16(%esp), %edi # 4-byte Reload + movl %edi, 4(%ebx) + movl 20(%esp), %edi # 4-byte Reload + movl %edi, 8(%ebx) + movl 24(%esp), %edi # 4-byte Reload + movl %edi, 12(%ebx) + movl %edx, 16(%ebx) + movl %ecx, 20(%ebx) + movl %eax, 24(%ebx) + movl %ebp, 28(%ebx) + movl %esi, 32(%ebx) + je .LBB138_2 +# BB#1: # %carry + movl %esi, %edi + movl 60(%esp), %esi + movl 12(%esp), %ecx # 4-byte Reload + addl (%esi), %ecx + movl %ecx, (%ebx) + movl 16(%esp), %edx # 4-byte Reload + adcl 4(%esi), %edx + movl %edx, 4(%ebx) + movl 20(%esp), %ecx # 4-byte Reload + adcl 8(%esi), %ecx + movl 12(%esi), %eax + adcl 24(%esp), %eax # 4-byte Folded Reload + movl %ecx, 8(%ebx) + movl 16(%esi), %ecx + adcl 8(%esp), %ecx # 4-byte Folded Reload + movl %eax, 12(%ebx) + movl 20(%esi), %eax + adcl 4(%esp), %eax # 4-byte Folded Reload + movl %ecx, 16(%ebx) + movl 24(%esi), %ecx + adcl (%esp), %ecx # 4-byte Folded Reload + movl %eax, 20(%ebx) + movl %ecx, 24(%ebx) + movl 28(%esi), %eax + adcl %ebp, %eax + movl %eax, 28(%ebx) + movl 32(%esi), %eax + adcl %edi, %eax + movl %eax, 32(%ebx) +.LBB138_2: # %nocarry + addl $28, %esp + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end138: + .size mcl_fp_sub9Lbmi2, .Lfunc_end138-mcl_fp_sub9Lbmi2 + + .globl mcl_fp_subNF9Lbmi2 + .align 16, 0x90 + .type mcl_fp_subNF9Lbmi2,@function +mcl_fp_subNF9Lbmi2: # @mcl_fp_subNF9Lbmi2 +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $48, %esp + movl 72(%esp), %edx + movl (%edx), %ecx + movl 4(%edx), %eax + movl 76(%esp), %esi + subl (%esi), %ecx + movl %ecx, 36(%esp) # 4-byte Spill + sbbl 4(%esi), %eax + movl %eax, 32(%esp) # 4-byte Spill + movl 32(%edx), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 28(%edx), %ebp + movl 24(%edx), %edi + movl 20(%edx), %ebx + movl 16(%edx), %ecx + movl 12(%edx), %eax + movl 8(%edx), %edx + sbbl 8(%esi), %edx + movl %edx, 12(%esp) # 4-byte Spill + sbbl 12(%esi), %eax + movl %eax, 16(%esp) # 4-byte Spill + sbbl 16(%esi), %ecx + movl %ecx, 20(%esp) # 4-byte Spill + sbbl 20(%esi), %ebx + movl %ebx, 24(%esp) # 4-byte Spill + sbbl 24(%esi), %edi + movl %edi, 28(%esp) # 4-byte Spill + sbbl 28(%esi), %ebp + movl %ebp, 40(%esp) # 4-byte Spill + movl 44(%esp), %eax # 4-byte Reload + sbbl 32(%esi), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl %eax, %ecx + sarl $31, %ecx + movl %ecx, %edi + shldl $1, %eax, %edi + movl 80(%esp), %ebp + movl 12(%ebp), %eax + andl %edi, %eax + movl %eax, 4(%esp) # 4-byte Spill + movl 4(%ebp), %ebx + andl %edi, %ebx + andl (%ebp), %edi + movl 32(%ebp), %eax + andl %ecx, %eax + movl %eax, 8(%esp) # 4-byte Spill + rorxl $31, %ecx, %eax + andl 28(%ebp), %ecx + movl 24(%ebp), %edx + andl %eax, %edx + movl %edx, (%esp) # 4-byte Spill + movl 20(%ebp), %esi + andl %eax, %esi + movl 16(%ebp), %edx + andl %eax, %edx + andl 8(%ebp), %eax + addl 36(%esp), %edi # 4-byte Folded Reload + adcl 32(%esp), %ebx # 4-byte Folded Reload + movl 68(%esp), %ebp + movl %edi, (%ebp) + adcl 12(%esp), %eax # 4-byte Folded Reload + movl %ebx, 4(%ebp) + movl 4(%esp), %edi # 4-byte Reload + adcl 16(%esp), %edi # 4-byte Folded Reload + movl %eax, 8(%ebp) + adcl 20(%esp), %edx # 4-byte Folded Reload + movl %edi, 12(%ebp) + adcl 24(%esp), %esi # 4-byte Folded Reload + movl %edx, 16(%ebp) + movl (%esp), %eax # 4-byte Reload + adcl 28(%esp), %eax # 4-byte Folded Reload + movl %esi, 20(%ebp) + adcl 40(%esp), %ecx # 4-byte Folded Reload + movl %eax, 24(%ebp) + movl %ecx, 28(%ebp) + movl 8(%esp), %eax # 4-byte Reload + adcl 44(%esp), %eax # 4-byte Folded Reload + movl %eax, 32(%ebp) + addl $48, %esp + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end139: + .size mcl_fp_subNF9Lbmi2, .Lfunc_end139-mcl_fp_subNF9Lbmi2 + + .globl mcl_fpDbl_add9Lbmi2 + .align 16, 0x90 + .type mcl_fpDbl_add9Lbmi2,@function +mcl_fpDbl_add9Lbmi2: # @mcl_fpDbl_add9Lbmi2 +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $68, %esp + movl 96(%esp), %edx + movl 92(%esp), %edi + movl 12(%edi), %esi + movl 16(%edi), %ecx + movl 8(%edx), %ebx + movl (%edx), %ebp + addl (%edi), %ebp + movl 88(%esp), %eax + movl %ebp, (%eax) + movl 4(%edx), %ebp + adcl 4(%edi), %ebp + adcl 8(%edi), %ebx + adcl 12(%edx), %esi + adcl 16(%edx), %ecx + movl %ebp, 4(%eax) + movl 44(%edx), %ebp + movl %ebp, 64(%esp) # 4-byte Spill + movl %ebx, 8(%eax) + movl 20(%edx), %ebx + movl %esi, 12(%eax) + movl 20(%edi), %esi + adcl %ebx, %esi + movl 24(%edx), %ebx + movl %ecx, 16(%eax) + movl 24(%edi), %ecx + adcl %ebx, %ecx + movl 28(%edx), %ebx + movl %esi, 20(%eax) + movl 28(%edi), %esi + adcl %ebx, %esi + movl 32(%edx), %ebx + movl %ecx, 24(%eax) + movl 32(%edi), %ecx + adcl %ebx, %ecx + movl 36(%edx), %ebp + movl %esi, 28(%eax) + movl 36(%edi), %esi + adcl %ebp, %esi + movl %esi, 60(%esp) # 4-byte Spill + movl 40(%edx), %esi + movl %ecx, 32(%eax) + movl 40(%edi), %eax + adcl %esi, %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 44(%edi), %eax + adcl 64(%esp), %eax # 4-byte Folded Reload + movl %eax, 56(%esp) # 4-byte Spill + movl 48(%edx), %ecx + movl 48(%edi), %ebx + adcl %ecx, %ebx + movl %ebx, 36(%esp) # 4-byte Spill + movl 52(%edx), %eax + movl 52(%edi), %ecx + adcl %eax, %ecx + movl %ecx, 40(%esp) # 4-byte Spill + movl 56(%edx), %esi + movl 56(%edi), %eax + adcl %esi, %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 60(%edx), %ebp + movl 60(%edi), %esi + adcl %ebp, %esi + movl %esi, 44(%esp) # 4-byte Spill + movl 64(%edx), %eax + movl 64(%edi), %ebp + adcl %eax, %ebp + movl %ebp, 48(%esp) # 4-byte Spill + movl 68(%edx), %edx + movl 68(%edi), %eax + adcl %edx, %eax + movl %eax, 32(%esp) # 4-byte Spill + sbbl %edx, %edx + andl $1, %edx + movl 100(%esp), %edi + movl 60(%esp), %eax # 4-byte Reload + subl (%edi), %eax + movl %eax, 16(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + sbbl 4(%edi), %eax + movl %eax, 12(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + sbbl 8(%edi), %eax + movl %eax, 8(%esp) # 4-byte Spill + sbbl 12(%edi), %ebx + movl %ebx, 4(%esp) # 4-byte Spill + sbbl 16(%edi), %ecx + movl %ecx, (%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + sbbl 20(%edi), %eax + movl %eax, 20(%esp) # 4-byte Spill + sbbl 24(%edi), %esi + movl %esi, 24(%esp) # 4-byte Spill + movl %ebp, %eax + movl 32(%esp), %ebp # 4-byte Reload + sbbl 28(%edi), %eax + movl %eax, 28(%esp) # 4-byte Spill + movl %ebp, %ebx + sbbl 32(%edi), %ebx + sbbl $0, %edx + andl $1, %edx + jne .LBB140_2 +# BB#1: + movl %ebx, %ebp +.LBB140_2: + testb %dl, %dl + movl 60(%esp), %edx # 4-byte Reload + movl 40(%esp), %ecx # 4-byte Reload + movl 36(%esp), %esi # 4-byte Reload + movl 56(%esp), %edi # 4-byte Reload + movl 52(%esp), %ebx # 4-byte Reload + jne .LBB140_4 +# BB#3: + movl (%esp), %ecx # 4-byte Reload + movl 4(%esp), %esi # 4-byte Reload + movl 8(%esp), %edi # 4-byte Reload + movl 12(%esp), %ebx # 4-byte Reload + movl 16(%esp), %edx # 4-byte Reload +.LBB140_4: + movl 88(%esp), %eax + movl %edx, 36(%eax) + movl %ebx, 40(%eax) + movl %edi, 44(%eax) + movl %esi, 48(%eax) + movl %ecx, 52(%eax) + movl 44(%esp), %edx # 4-byte Reload + movl 64(%esp), %ecx # 4-byte Reload + jne .LBB140_6 +# BB#5: + movl 20(%esp), %ecx # 4-byte Reload +.LBB140_6: + movl %ecx, 56(%eax) + movl 48(%esp), %ecx # 4-byte Reload + jne .LBB140_8 +# BB#7: + movl 24(%esp), %edx # 4-byte Reload +.LBB140_8: + movl %edx, 60(%eax) + jne .LBB140_10 +# BB#9: + movl 28(%esp), %ecx # 4-byte Reload +.LBB140_10: + movl %ecx, 64(%eax) + movl %ebp, 68(%eax) + addl $68, %esp + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end140: + .size mcl_fpDbl_add9Lbmi2, .Lfunc_end140-mcl_fpDbl_add9Lbmi2 + + .globl mcl_fpDbl_sub9Lbmi2 + .align 16, 0x90 + .type mcl_fpDbl_sub9Lbmi2,@function +mcl_fpDbl_sub9Lbmi2: # @mcl_fpDbl_sub9Lbmi2 +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $52, %esp + movl 76(%esp), %ebx + movl (%ebx), %eax + movl 4(%ebx), %edx + movl 80(%esp), %ebp + subl (%ebp), %eax + sbbl 4(%ebp), %edx + movl 8(%ebx), %esi + sbbl 8(%ebp), %esi + movl 72(%esp), %ecx + movl %eax, (%ecx) + movl 12(%ebx), %eax + sbbl 12(%ebp), %eax + movl %edx, 4(%ecx) + movl 16(%ebx), %edx + sbbl 16(%ebp), %edx + movl %esi, 8(%ecx) + movl 20(%ebp), %esi + movl %eax, 12(%ecx) + movl 20(%ebx), %eax + sbbl %esi, %eax + movl 24(%ebp), %esi + movl %edx, 16(%ecx) + movl 24(%ebx), %edx + sbbl %esi, %edx + movl 28(%ebp), %esi + movl %eax, 20(%ecx) + movl 28(%ebx), %eax + sbbl %esi, %eax + movl 32(%ebp), %esi + movl %edx, 24(%ecx) + movl 32(%ebx), %edx + sbbl %esi, %edx + movl 36(%ebp), %esi + movl %eax, 28(%ecx) + movl 36(%ebx), %eax + sbbl %esi, %eax + movl %eax, 24(%esp) # 4-byte Spill + movl 40(%ebp), %eax + movl %edx, 32(%ecx) + movl 40(%ebx), %edx + sbbl %eax, %edx + movl %edx, 16(%esp) # 4-byte Spill + movl 44(%ebp), %eax + movl 44(%ebx), %edx + sbbl %eax, %edx + movl %edx, 20(%esp) # 4-byte Spill + movl 48(%ebp), %eax + movl 48(%ebx), %edx + sbbl %eax, %edx + movl %edx, 28(%esp) # 4-byte Spill + movl 52(%ebp), %eax + movl 52(%ebx), %edx + sbbl %eax, %edx + movl %edx, 32(%esp) # 4-byte Spill + movl 56(%ebp), %eax + movl 56(%ebx), %edx + sbbl %eax, %edx + movl %edx, 36(%esp) # 4-byte Spill + movl 60(%ebp), %eax + movl 60(%ebx), %edx + sbbl %eax, %edx + movl %edx, 40(%esp) # 4-byte Spill + movl 64(%ebp), %eax + movl 64(%ebx), %edx + sbbl %eax, %edx + movl %edx, 44(%esp) # 4-byte Spill + movl 68(%ebp), %eax + movl 68(%ebx), %edx + sbbl %eax, %edx + movl %edx, 48(%esp) # 4-byte Spill + movl $0, %eax + sbbl $0, %eax + andl $1, %eax + movl 84(%esp), %ebp + jne .LBB141_1 +# BB#2: + movl $0, 12(%esp) # 4-byte Folded Spill + jmp .LBB141_3 +.LBB141_1: + movl 32(%ebp), %edx + movl %edx, 12(%esp) # 4-byte Spill +.LBB141_3: + testb %al, %al + jne .LBB141_4 +# BB#5: + movl $0, 4(%esp) # 4-byte Folded Spill + movl $0, %esi + jmp .LBB141_6 +.LBB141_4: + movl (%ebp), %esi + movl 4(%ebp), %eax + movl %eax, 4(%esp) # 4-byte Spill +.LBB141_6: + jne .LBB141_7 +# BB#8: + movl $0, 8(%esp) # 4-byte Folded Spill + jmp .LBB141_9 +.LBB141_7: + movl 28(%ebp), %eax + movl %eax, 8(%esp) # 4-byte Spill +.LBB141_9: + jne .LBB141_10 +# BB#11: + movl $0, (%esp) # 4-byte Folded Spill + jmp .LBB141_12 +.LBB141_10: + movl 24(%ebp), %eax + movl %eax, (%esp) # 4-byte Spill +.LBB141_12: + jne .LBB141_13 +# BB#14: + movl $0, %edi + jmp .LBB141_15 +.LBB141_13: + movl 20(%ebp), %edi +.LBB141_15: + jne .LBB141_16 +# BB#17: + movl $0, %ebx + jmp .LBB141_18 +.LBB141_16: + movl 16(%ebp), %ebx +.LBB141_18: + jne .LBB141_19 +# BB#20: + movl %ebp, %eax + movl $0, %ebp + jmp .LBB141_21 +.LBB141_19: + movl %ebp, %eax + movl 12(%eax), %ebp +.LBB141_21: + jne .LBB141_22 +# BB#23: + xorl %eax, %eax + jmp .LBB141_24 +.LBB141_22: + movl 8(%eax), %eax +.LBB141_24: + addl 24(%esp), %esi # 4-byte Folded Reload + movl 4(%esp), %edx # 4-byte Reload + adcl 16(%esp), %edx # 4-byte Folded Reload + movl %esi, 36(%ecx) + adcl 20(%esp), %eax # 4-byte Folded Reload + movl %edx, 40(%ecx) + adcl 28(%esp), %ebp # 4-byte Folded Reload + movl %eax, 44(%ecx) + adcl 32(%esp), %ebx # 4-byte Folded Reload + movl %ebp, 48(%ecx) + adcl 36(%esp), %edi # 4-byte Folded Reload + movl %ebx, 52(%ecx) + movl (%esp), %edx # 4-byte Reload + adcl 40(%esp), %edx # 4-byte Folded Reload + movl %edi, 56(%ecx) + movl 8(%esp), %eax # 4-byte Reload + adcl 44(%esp), %eax # 4-byte Folded Reload + movl %edx, 60(%ecx) + movl %eax, 64(%ecx) + movl 12(%esp), %eax # 4-byte Reload + adcl 48(%esp), %eax # 4-byte Folded Reload + movl %eax, 68(%ecx) + addl $52, %esp + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end141: + .size mcl_fpDbl_sub9Lbmi2, .Lfunc_end141-mcl_fpDbl_sub9Lbmi2 + + .align 16, 0x90 + .type .LmulPv320x32,@function +.LmulPv320x32: # @mulPv320x32 +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $28, %esp + movl %edx, %eax + movl 48(%esp), %edx + mulxl 4(%eax), %edi, %esi + mulxl (%eax), %ebp, %ebx + movl %ebp, 24(%esp) # 4-byte Spill + addl %edi, %ebx + movl %ebx, 20(%esp) # 4-byte Spill + mulxl 8(%eax), %edi, %ebx + adcl %esi, %edi + movl %edi, 16(%esp) # 4-byte Spill + mulxl 12(%eax), %esi, %edi + adcl %ebx, %esi + movl %esi, 12(%esp) # 4-byte Spill + mulxl 16(%eax), %esi, %ebx + movl %ebx, 4(%esp) # 4-byte Spill + adcl %edi, %esi + movl %esi, 8(%esp) # 4-byte Spill + movl %edx, %ebp + mulxl 20(%eax), %ebx, %esi + adcl 4(%esp), %ebx # 4-byte Folded Reload + mulxl 24(%eax), %edi, %edx + movl %edx, 4(%esp) # 4-byte Spill + adcl %esi, %edi + movl %ebp, %edx + mulxl 28(%eax), %esi, %edx + movl %edx, (%esp) # 4-byte Spill + adcl 4(%esp), %esi # 4-byte Folded Reload + movl %ebp, %edx + mulxl 32(%eax), %edx, %ebp + movl %ebp, 4(%esp) # 4-byte Spill + adcl (%esp), %edx # 4-byte Folded Reload + movl 24(%esp), %ebp # 4-byte Reload + movl %ebp, (%ecx) + movl 20(%esp), %ebp # 4-byte Reload + movl %ebp, 4(%ecx) + movl 16(%esp), %ebp # 4-byte Reload + movl %ebp, 8(%ecx) + movl 12(%esp), %ebp # 4-byte Reload + movl %ebp, 12(%ecx) + movl 8(%esp), %ebp # 4-byte Reload + movl %ebp, 16(%ecx) + movl %ebx, 20(%ecx) + movl %edi, 24(%ecx) + movl %esi, 28(%ecx) + movl %edx, 32(%ecx) + movl 48(%esp), %edx + mulxl 36(%eax), %eax, %edx + adcl 4(%esp), %eax # 4-byte Folded Reload + movl %eax, 36(%ecx) + adcl $0, %edx + movl %edx, 40(%ecx) + movl %ecx, %eax + addl $28, %esp + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end142: + .size .LmulPv320x32, .Lfunc_end142-.LmulPv320x32 + + .globl mcl_fp_mulUnitPre10Lbmi2 + .align 16, 0x90 + .type mcl_fp_mulUnitPre10Lbmi2,@function +mcl_fp_mulUnitPre10Lbmi2: # @mcl_fp_mulUnitPre10Lbmi2 +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $76, %esp + calll .L143$pb +.L143$pb: + popl %ebx +.Ltmp14: + addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp14-.L143$pb), %ebx + movl 104(%esp), %eax + movl %eax, (%esp) + leal 32(%esp), %ecx + movl 100(%esp), %edx + calll .LmulPv320x32 + movl 72(%esp), %eax + movl %eax, 28(%esp) # 4-byte Spill + movl 68(%esp), %eax + movl %eax, 24(%esp) # 4-byte Spill + movl 64(%esp), %eax + movl %eax, 20(%esp) # 4-byte Spill + movl 60(%esp), %eax + movl %eax, 16(%esp) # 4-byte Spill + movl 56(%esp), %eax + movl %eax, 12(%esp) # 4-byte Spill + movl 52(%esp), %ebx + movl 48(%esp), %ebp + movl 44(%esp), %edi + movl 40(%esp), %esi + movl 32(%esp), %edx + movl 36(%esp), %ecx + movl 96(%esp), %eax + movl %edx, (%eax) + movl %ecx, 4(%eax) + movl %esi, 8(%eax) + movl %edi, 12(%eax) + movl %ebp, 16(%eax) + movl %ebx, 20(%eax) + movl 12(%esp), %ecx # 4-byte Reload + movl %ecx, 24(%eax) + movl 16(%esp), %ecx # 4-byte Reload + movl %ecx, 28(%eax) + movl 20(%esp), %ecx # 4-byte Reload + movl %ecx, 32(%eax) + movl 24(%esp), %ecx # 4-byte Reload + movl %ecx, 36(%eax) + movl 28(%esp), %ecx # 4-byte Reload + movl %ecx, 40(%eax) + addl $76, %esp + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end143: + .size mcl_fp_mulUnitPre10Lbmi2, .Lfunc_end143-mcl_fp_mulUnitPre10Lbmi2 + + .globl mcl_fpDbl_mulPre10Lbmi2 + .align 16, 0x90 + .type mcl_fpDbl_mulPre10Lbmi2,@function +mcl_fpDbl_mulPre10Lbmi2: # @mcl_fpDbl_mulPre10Lbmi2 +# BB#0: + pushl %ebp + movl %esp, %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $188, %esp + calll .L144$pb +.L144$pb: + popl %ebx +.Ltmp15: + addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp15-.L144$pb), %ebx + movl %ebx, -128(%ebp) # 4-byte Spill + movl 16(%ebp), %edi + movl %edi, 8(%esp) + movl 12(%ebp), %esi + movl %esi, 4(%esp) + movl 8(%ebp), %eax + movl %eax, (%esp) + calll mcl_fpDbl_mulPre5Lbmi2@PLT + leal 20(%edi), %eax + movl %eax, 8(%esp) + leal 20(%esi), %eax + movl %eax, 4(%esp) + movl 8(%ebp), %eax + leal 40(%eax), %eax + movl %eax, (%esp) + calll mcl_fpDbl_mulPre5Lbmi2@PLT + movl 28(%esi), %edi + movl (%esi), %ebx + movl 4(%esi), %eax + addl 20(%esi), %ebx + movl %ebx, -148(%ebp) # 4-byte Spill + adcl 24(%esi), %eax + movl %eax, -132(%ebp) # 4-byte Spill + adcl 8(%esi), %edi + movl %edi, -140(%ebp) # 4-byte Spill + seto %al + lahf + movl %eax, %eax + movl %eax, -96(%ebp) # 4-byte Spill + movl 16(%ebp), %esi + movl (%esi), %eax + movl 4(%esi), %ecx + addl 20(%esi), %eax + movl %eax, -152(%ebp) # 4-byte Spill + adcl 24(%esi), %ecx + movl %ecx, -120(%ebp) # 4-byte Spill + movl 28(%esi), %eax + adcl 8(%esi), %eax + movl %eax, -160(%ebp) # 4-byte Spill + movl 32(%esi), %eax + adcl 12(%esi), %eax + movl 36(%esi), %ecx + adcl 16(%esi), %ecx + pushl %eax + seto %al + lahf + movl %eax, %esi + popl %eax + movl %esi, -156(%ebp) # 4-byte Spill + movl %ebx, -124(%ebp) # 4-byte Spill + jb .LBB144_2 +# BB#1: + xorl %edi, %edi + movl $0, -124(%ebp) # 4-byte Folded Spill +.LBB144_2: + movl %edi, -136(%ebp) # 4-byte Spill + movl 12(%ebp), %esi + movl %esi, %ebx + movl 36(%ebx), %esi + movl 32(%ebx), %edi + movl -96(%ebp), %edx # 4-byte Reload + pushl %eax + movl %edx, %eax + addb $127, %al + sahf + popl %eax + adcl 12(%ebx), %edi + movl %edi, -116(%ebp) # 4-byte Spill + adcl 16(%ebx), %esi + movl %esi, -144(%ebp) # 4-byte Spill + movl %ecx, -112(%ebp) # 4-byte Spill + movl %eax, -104(%ebp) # 4-byte Spill + movl -160(%ebp), %edx # 4-byte Reload + movl %edx, -108(%ebp) # 4-byte Spill + movl -120(%ebp), %esi # 4-byte Reload + movl %esi, -96(%ebp) # 4-byte Spill + movl -152(%ebp), %ebx # 4-byte Reload + movl %ebx, -100(%ebp) # 4-byte Spill + jb .LBB144_4 +# BB#3: + movl $0, -112(%ebp) # 4-byte Folded Spill + movl $0, -104(%ebp) # 4-byte Folded Spill + movl $0, -108(%ebp) # 4-byte Folded Spill + movl $0, -96(%ebp) # 4-byte Folded Spill + movl $0, -100(%ebp) # 4-byte Folded Spill +.LBB144_4: + movl -148(%ebp), %esi # 4-byte Reload + movl %esi, -72(%ebp) + movl -132(%ebp), %edi # 4-byte Reload + movl %edi, -68(%ebp) + movl -140(%ebp), %esi # 4-byte Reload + movl %esi, -64(%ebp) + movl %ebx, -92(%ebp) + movl -120(%ebp), %esi # 4-byte Reload + movl %esi, -88(%ebp) + movl %edx, -84(%ebp) + movl %eax, -80(%ebp) + movl %ecx, -76(%ebp) + sbbl %edx, %edx + movl -116(%ebp), %eax # 4-byte Reload + movl %eax, -60(%ebp) + movl -144(%ebp), %ebx # 4-byte Reload + movl %ebx, -56(%ebp) + movl -156(%ebp), %ecx # 4-byte Reload + pushl %eax + movl %ecx, %eax + addb $127, %al + sahf + popl %eax + jb .LBB144_6 +# BB#5: + movl $0, %ebx + movl $0, %eax + movl $0, %edi +.LBB144_6: + movl %eax, -116(%ebp) # 4-byte Spill + sbbl %eax, %eax + leal -92(%ebp), %ecx + movl %ecx, 8(%esp) + leal -72(%ebp), %ecx + movl %ecx, 4(%esp) + leal -52(%ebp), %ecx + movl %ecx, (%esp) + andl %eax, %edx + movl -124(%ebp), %eax # 4-byte Reload + addl %eax, -100(%ebp) # 4-byte Folded Spill + adcl %edi, -96(%ebp) # 4-byte Folded Spill + movl -108(%ebp), %esi # 4-byte Reload + adcl -136(%ebp), %esi # 4-byte Folded Reload + movl -116(%ebp), %eax # 4-byte Reload + adcl %eax, -104(%ebp) # 4-byte Folded Spill + movl -112(%ebp), %edi # 4-byte Reload + adcl %ebx, %edi + sbbl %eax, %eax + andl $1, %eax + movl %eax, -120(%ebp) # 4-byte Spill + andl $1, %edx + movl %edx, -116(%ebp) # 4-byte Spill + movl -128(%ebp), %ebx # 4-byte Reload + calll mcl_fpDbl_mulPre5Lbmi2@PLT + movl -100(%ebp), %eax # 4-byte Reload + addl -32(%ebp), %eax + movl %eax, -100(%ebp) # 4-byte Spill + movl -96(%ebp), %eax # 4-byte Reload + adcl -28(%ebp), %eax + movl %eax, -96(%ebp) # 4-byte Spill + adcl -24(%ebp), %esi + movl %esi, -108(%ebp) # 4-byte Spill + movl -104(%ebp), %eax # 4-byte Reload + adcl -20(%ebp), %eax + movl %eax, -104(%ebp) # 4-byte Spill + adcl -16(%ebp), %edi + movl %edi, -112(%ebp) # 4-byte Spill + movl -120(%ebp), %eax # 4-byte Reload + adcl %eax, -116(%ebp) # 4-byte Folded Spill + movl -52(%ebp), %ecx + movl 8(%ebp), %esi + subl (%esi), %ecx + movl -48(%ebp), %ebx + sbbl 4(%esi), %ebx + movl -44(%ebp), %eax + sbbl 8(%esi), %eax + movl %eax, -120(%ebp) # 4-byte Spill + movl -40(%ebp), %edx + sbbl 12(%esi), %edx + movl -36(%ebp), %edi + sbbl 16(%esi), %edi + movl 20(%esi), %eax + movl %eax, -124(%ebp) # 4-byte Spill + sbbl %eax, -100(%ebp) # 4-byte Folded Spill + movl 24(%esi), %eax + movl %eax, -128(%ebp) # 4-byte Spill + sbbl %eax, -96(%ebp) # 4-byte Folded Spill + movl 28(%esi), %eax + movl %eax, -132(%ebp) # 4-byte Spill + sbbl %eax, -108(%ebp) # 4-byte Folded Spill + movl 32(%esi), %eax + movl %eax, -136(%ebp) # 4-byte Spill + sbbl %eax, -104(%ebp) # 4-byte Folded Spill + movl 36(%esi), %eax + movl %eax, -140(%ebp) # 4-byte Spill + sbbl %eax, -112(%ebp) # 4-byte Folded Spill + sbbl $0, -116(%ebp) # 4-byte Folded Spill + movl 40(%esi), %eax + movl %eax, -160(%ebp) # 4-byte Spill + subl %eax, %ecx + movl 44(%esi), %eax + movl %eax, -164(%ebp) # 4-byte Spill + sbbl %eax, %ebx + movl 48(%esi), %eax + movl %eax, -168(%ebp) # 4-byte Spill + sbbl %eax, -120(%ebp) # 4-byte Folded Spill + movl 52(%esi), %eax + movl %eax, -172(%ebp) # 4-byte Spill + sbbl %eax, %edx + movl 56(%esi), %eax + movl %eax, -176(%ebp) # 4-byte Spill + sbbl %eax, %edi + movl 60(%esi), %eax + movl %eax, -180(%ebp) # 4-byte Spill + sbbl %eax, -100(%ebp) # 4-byte Folded Spill + movl 64(%esi), %eax + movl %eax, -144(%ebp) # 4-byte Spill + sbbl %eax, -96(%ebp) # 4-byte Folded Spill + movl 68(%esi), %eax + movl %eax, -148(%ebp) # 4-byte Spill + sbbl %eax, -108(%ebp) # 4-byte Folded Spill + movl 72(%esi), %eax + movl %eax, -152(%ebp) # 4-byte Spill + sbbl %eax, -104(%ebp) # 4-byte Folded Spill + movl 76(%esi), %eax + movl %eax, -156(%ebp) # 4-byte Spill + sbbl %eax, -112(%ebp) # 4-byte Folded Spill + sbbl $0, -116(%ebp) # 4-byte Folded Spill + addl -124(%ebp), %ecx # 4-byte Folded Reload + adcl -128(%ebp), %ebx # 4-byte Folded Reload + movl %ecx, 20(%esi) + movl -120(%ebp), %eax # 4-byte Reload + adcl -132(%ebp), %eax # 4-byte Folded Reload + movl %ebx, 24(%esi) + adcl -136(%ebp), %edx # 4-byte Folded Reload + movl %eax, 28(%esi) + adcl -140(%ebp), %edi # 4-byte Folded Reload + movl %edx, 32(%esi) + movl -100(%ebp), %eax # 4-byte Reload + adcl -160(%ebp), %eax # 4-byte Folded Reload + movl %edi, 36(%esi) + movl -96(%ebp), %ecx # 4-byte Reload + adcl -164(%ebp), %ecx # 4-byte Folded Reload + movl %eax, 40(%esi) + movl -108(%ebp), %eax # 4-byte Reload + adcl -168(%ebp), %eax # 4-byte Folded Reload + movl %ecx, 44(%esi) + movl -104(%ebp), %ecx # 4-byte Reload + adcl -172(%ebp), %ecx # 4-byte Folded Reload + movl %eax, 48(%esi) + movl -112(%ebp), %edx # 4-byte Reload + adcl -176(%ebp), %edx # 4-byte Folded Reload + movl %ecx, 52(%esi) + movl -116(%ebp), %eax # 4-byte Reload + adcl -180(%ebp), %eax # 4-byte Folded Reload + movl %edx, 56(%esi) + movl %eax, 60(%esi) + movl -144(%ebp), %eax # 4-byte Reload + adcl $0, %eax + movl %eax, 64(%esi) + movl -148(%ebp), %eax # 4-byte Reload + adcl $0, %eax + movl %eax, 68(%esi) + movl -152(%ebp), %eax # 4-byte Reload + adcl $0, %eax + movl %eax, 72(%esi) + movl -156(%ebp), %eax # 4-byte Reload + adcl $0, %eax + movl %eax, 76(%esi) + addl $188, %esp + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end144: + .size mcl_fpDbl_mulPre10Lbmi2, .Lfunc_end144-mcl_fpDbl_mulPre10Lbmi2 + + .globl mcl_fpDbl_sqrPre10Lbmi2 + .align 16, 0x90 + .type mcl_fpDbl_sqrPre10Lbmi2,@function +mcl_fpDbl_sqrPre10Lbmi2: # @mcl_fpDbl_sqrPre10Lbmi2 +# BB#0: + pushl %ebp + movl %esp, %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $188, %esp + calll .L145$pb +.L145$pb: + popl %ebx +.Ltmp16: + addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp16-.L145$pb), %ebx + movl %ebx, -120(%ebp) # 4-byte Spill + movl 12(%ebp), %edi + movl %edi, 8(%esp) + movl %edi, 4(%esp) + movl 8(%ebp), %esi + movl %esi, (%esp) + calll mcl_fpDbl_mulPre5Lbmi2@PLT + leal 20(%edi), %eax + movl %eax, 8(%esp) + movl %eax, 4(%esp) + leal 40(%esi), %eax + movl %eax, (%esp) + calll mcl_fpDbl_mulPre5Lbmi2@PLT + movl 36(%edi), %eax + movl 32(%edi), %ebx + movl 28(%edi), %esi + movl (%edi), %ecx + movl 4(%edi), %edx + addl 20(%edi), %ecx + adcl 24(%edi), %edx + adcl 8(%edi), %esi + adcl 12(%edi), %ebx + movl %ebx, -124(%ebp) # 4-byte Spill + adcl 16(%edi), %eax + pushl %eax + seto %al + lahf + movl %eax, %edi + popl %eax + movl %edi, -128(%ebp) # 4-byte Spill + pushl %eax + seto %al + lahf + movl %eax, %edi + popl %eax + movl %edi, -108(%ebp) # 4-byte Spill + pushl %eax + seto %al + lahf + movl %eax, %edi + popl %eax + movl %edi, -104(%ebp) # 4-byte Spill + pushl %eax + seto %al + lahf + movl %eax, %edi + popl %eax + movl %edi, -100(%ebp) # 4-byte Spill + pushl %eax + seto %al + lahf + movl %eax, %edi + popl %eax + movl %edi, -96(%ebp) # 4-byte Spill + pushl %eax + seto %al + lahf + movl %eax, %edi + popl %eax + sbbl %ebx, %ebx + movl %ebx, -116(%ebp) # 4-byte Spill + pushl %eax + movl %edi, %eax + addb $127, %al + sahf + popl %eax + jb .LBB145_1 +# BB#2: + movl $0, -112(%ebp) # 4-byte Folded Spill + jmp .LBB145_3 +.LBB145_1: + leal (%ecx,%ecx), %edi + movl %edi, -112(%ebp) # 4-byte Spill +.LBB145_3: + movl -96(%ebp), %edi # 4-byte Reload + pushl %eax + movl %edi, %eax + addb $127, %al + sahf + popl %eax + movl -124(%ebp), %edi # 4-byte Reload + jb .LBB145_4 +# BB#5: + movl $0, -96(%ebp) # 4-byte Folded Spill + jmp .LBB145_6 +.LBB145_4: + movl %edx, %ebx + shldl $1, %ecx, %ebx + movl %ebx, -96(%ebp) # 4-byte Spill +.LBB145_6: + movl -100(%ebp), %ebx # 4-byte Reload + pushl %eax + movl %ebx, %eax + addb $127, %al + sahf + popl %eax + jb .LBB145_7 +# BB#8: + movl $0, -100(%ebp) # 4-byte Folded Spill + jmp .LBB145_9 +.LBB145_7: + movl %esi, %ebx + shldl $1, %edx, %ebx + movl %ebx, -100(%ebp) # 4-byte Spill +.LBB145_9: + movl -104(%ebp), %ebx # 4-byte Reload + pushl %eax + movl %ebx, %eax + addb $127, %al + sahf + popl %eax + jb .LBB145_10 +# BB#11: + movl $0, -104(%ebp) # 4-byte Folded Spill + jmp .LBB145_12 +.LBB145_10: + movl %edi, %ebx + shldl $1, %esi, %ebx + movl %ebx, -104(%ebp) # 4-byte Spill +.LBB145_12: + movl -108(%ebp), %ebx # 4-byte Reload + pushl %eax + movl %ebx, %eax + addb $127, %al + sahf + popl %eax + jb .LBB145_13 +# BB#14: + movl $0, -108(%ebp) # 4-byte Folded Spill + jmp .LBB145_15 +.LBB145_13: + movl %eax, %ebx + shldl $1, %edi, %ebx + movl %ebx, -108(%ebp) # 4-byte Spill +.LBB145_15: + movl %ecx, -72(%ebp) + movl %edx, -68(%ebp) + movl %esi, -64(%ebp) + movl %edi, -60(%ebp) + movl %eax, -56(%ebp) + movl %ecx, -92(%ebp) + movl %edx, -88(%ebp) + movl %esi, -84(%ebp) + movl %edi, -80(%ebp) + movl %eax, -76(%ebp) + movl -128(%ebp), %ecx # 4-byte Reload + pushl %eax + movl %ecx, %eax + addb $127, %al + sahf + popl %eax + jb .LBB145_16 +# BB#17: + movl $0, -124(%ebp) # 4-byte Folded Spill + jmp .LBB145_18 +.LBB145_16: + shrl $31, %eax + movl %eax, -124(%ebp) # 4-byte Spill +.LBB145_18: + leal -52(%ebp), %eax + movl %eax, (%esp) + leal -72(%ebp), %eax + movl %eax, 4(%esp) + leal -92(%ebp), %eax + movl %eax, 8(%esp) + movl -116(%ebp), %esi # 4-byte Reload + andl $1, %esi + movl -120(%ebp), %ebx # 4-byte Reload + calll mcl_fpDbl_mulPre5Lbmi2@PLT + movl -112(%ebp), %edi # 4-byte Reload + addl -32(%ebp), %edi + movl -96(%ebp), %eax # 4-byte Reload + adcl -28(%ebp), %eax + movl %eax, -96(%ebp) # 4-byte Spill + movl -100(%ebp), %eax # 4-byte Reload + adcl -24(%ebp), %eax + movl %eax, -100(%ebp) # 4-byte Spill + movl -104(%ebp), %eax # 4-byte Reload + adcl -20(%ebp), %eax + movl %eax, -104(%ebp) # 4-byte Spill + movl -108(%ebp), %eax # 4-byte Reload + adcl -16(%ebp), %eax + movl %eax, -108(%ebp) # 4-byte Spill + adcl -124(%ebp), %esi # 4-byte Folded Reload + movl -52(%ebp), %edx + movl 8(%ebp), %eax + subl (%eax), %edx + movl -48(%ebp), %ebx + sbbl 4(%eax), %ebx + movl -44(%ebp), %ecx + sbbl 8(%eax), %ecx + movl %ecx, -116(%ebp) # 4-byte Spill + movl -40(%ebp), %ecx + sbbl 12(%eax), %ecx + movl %ecx, -144(%ebp) # 4-byte Spill + movl -36(%ebp), %ecx + sbbl 16(%eax), %ecx + movl %ecx, -120(%ebp) # 4-byte Spill + movl 20(%eax), %ecx + movl %ecx, -124(%ebp) # 4-byte Spill + sbbl %ecx, %edi + movl %edi, -112(%ebp) # 4-byte Spill + movl 24(%eax), %ecx + movl %ecx, -128(%ebp) # 4-byte Spill + sbbl %ecx, -96(%ebp) # 4-byte Folded Spill + movl 28(%eax), %ecx + movl %ecx, -132(%ebp) # 4-byte Spill + sbbl %ecx, -100(%ebp) # 4-byte Folded Spill + movl 32(%eax), %ecx + movl %ecx, -136(%ebp) # 4-byte Spill + sbbl %ecx, -104(%ebp) # 4-byte Folded Spill + movl 36(%eax), %ecx + movl %ecx, -140(%ebp) # 4-byte Spill + sbbl %ecx, -108(%ebp) # 4-byte Folded Spill + sbbl $0, %esi + movl 40(%eax), %ecx + movl %ecx, -160(%ebp) # 4-byte Spill + subl %ecx, %edx + movl 44(%eax), %ecx + movl %ecx, -164(%ebp) # 4-byte Spill + sbbl %ecx, %ebx + movl 48(%eax), %ecx + movl %ecx, -168(%ebp) # 4-byte Spill + sbbl %ecx, -116(%ebp) # 4-byte Folded Spill + movl 52(%eax), %ecx + movl %ecx, -172(%ebp) # 4-byte Spill + movl -144(%ebp), %edi # 4-byte Reload + sbbl %ecx, %edi + movl 56(%eax), %ecx + movl %ecx, -176(%ebp) # 4-byte Spill + sbbl %ecx, -120(%ebp) # 4-byte Folded Spill + movl 60(%eax), %ecx + movl %ecx, -180(%ebp) # 4-byte Spill + sbbl %ecx, -112(%ebp) # 4-byte Folded Spill + movl 64(%eax), %ecx + movl %ecx, -144(%ebp) # 4-byte Spill + sbbl %ecx, -96(%ebp) # 4-byte Folded Spill + movl 68(%eax), %ecx + movl %ecx, -148(%ebp) # 4-byte Spill + sbbl %ecx, -100(%ebp) # 4-byte Folded Spill + movl 72(%eax), %ecx + movl %ecx, -152(%ebp) # 4-byte Spill + sbbl %ecx, -104(%ebp) # 4-byte Folded Spill + movl 76(%eax), %ecx + movl %ecx, -156(%ebp) # 4-byte Spill + sbbl %ecx, -108(%ebp) # 4-byte Folded Spill + sbbl $0, %esi + addl -124(%ebp), %edx # 4-byte Folded Reload + adcl -128(%ebp), %ebx # 4-byte Folded Reload + movl %edx, 20(%eax) + movl -116(%ebp), %ecx # 4-byte Reload + adcl -132(%ebp), %ecx # 4-byte Folded Reload + movl %ebx, 24(%eax) + adcl -136(%ebp), %edi # 4-byte Folded Reload + movl %ecx, 28(%eax) + movl -120(%ebp), %edx # 4-byte Reload + adcl -140(%ebp), %edx # 4-byte Folded Reload + movl %edi, 32(%eax) + movl -112(%ebp), %ecx # 4-byte Reload + adcl -160(%ebp), %ecx # 4-byte Folded Reload + movl %edx, 36(%eax) + movl -96(%ebp), %edx # 4-byte Reload + adcl -164(%ebp), %edx # 4-byte Folded Reload + movl %ecx, 40(%eax) + movl -100(%ebp), %ecx # 4-byte Reload + adcl -168(%ebp), %ecx # 4-byte Folded Reload + movl %edx, 44(%eax) + movl -104(%ebp), %edx # 4-byte Reload + adcl -172(%ebp), %edx # 4-byte Folded Reload + movl %ecx, 48(%eax) + movl -108(%ebp), %ecx # 4-byte Reload + adcl -176(%ebp), %ecx # 4-byte Folded Reload + movl %edx, 52(%eax) + adcl -180(%ebp), %esi # 4-byte Folded Reload + movl %ecx, 56(%eax) + movl %esi, 60(%eax) + movl -144(%ebp), %ecx # 4-byte Reload + adcl $0, %ecx + movl %ecx, 64(%eax) + movl -148(%ebp), %ecx # 4-byte Reload + adcl $0, %ecx + movl %ecx, 68(%eax) + movl -152(%ebp), %ecx # 4-byte Reload + adcl $0, %ecx + movl %ecx, 72(%eax) + movl -156(%ebp), %ecx # 4-byte Reload + adcl $0, %ecx + movl %ecx, 76(%eax) + addl $188, %esp + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end145: + .size mcl_fpDbl_sqrPre10Lbmi2, .Lfunc_end145-mcl_fpDbl_sqrPre10Lbmi2 + + .globl mcl_fp_mont10Lbmi2 + .align 16, 0x90 + .type mcl_fp_mont10Lbmi2,@function +mcl_fp_mont10Lbmi2: # @mcl_fp_mont10Lbmi2 +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $1036, %esp # imm = 0x40C + calll .L146$pb +.L146$pb: + popl %ebx +.Ltmp17: + addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp17-.L146$pb), %ebx + movl 1068(%esp), %eax + movl -4(%eax), %esi + movl %esi, 32(%esp) # 4-byte Spill + movl 1064(%esp), %eax + movl (%eax), %eax + movl %eax, (%esp) + leal 992(%esp), %ecx + movl 1060(%esp), %edx + calll .LmulPv320x32 + movl 992(%esp), %edi + movl 996(%esp), %ebp + movl %edi, %eax + imull %esi, %eax + movl 1032(%esp), %ecx + movl %ecx, 76(%esp) # 4-byte Spill + movl 1028(%esp), %ecx + movl %ecx, 72(%esp) # 4-byte Spill + movl 1024(%esp), %ecx + movl %ecx, 68(%esp) # 4-byte Spill + movl 1020(%esp), %ecx + movl %ecx, 56(%esp) # 4-byte Spill + movl 1016(%esp), %ecx + movl %ecx, 48(%esp) # 4-byte Spill + movl 1012(%esp), %ecx + movl %ecx, 36(%esp) # 4-byte Spill + movl 1008(%esp), %ecx + movl %ecx, 40(%esp) # 4-byte Spill + movl 1004(%esp), %ecx + movl %ecx, 44(%esp) # 4-byte Spill + movl 1000(%esp), %esi + movl %eax, (%esp) + leal 944(%esp), %ecx + movl 1068(%esp), %edx + calll .LmulPv320x32 + addl 944(%esp), %edi + adcl 948(%esp), %ebp + adcl 952(%esp), %esi + movl 44(%esp), %eax # 4-byte Reload + adcl 956(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 40(%esp), %eax # 4-byte Reload + adcl 960(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 36(%esp), %eax # 4-byte Reload + adcl 964(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 968(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 972(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 976(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 980(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 984(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + sbbl %edi, %edi + movl 1064(%esp), %eax + movl 4(%eax), %eax + movl %eax, (%esp) + leal 896(%esp), %ecx + movl 1060(%esp), %edx + calll .LmulPv320x32 + andl $1, %edi + addl 896(%esp), %ebp + adcl 900(%esp), %esi + movl 44(%esp), %eax # 4-byte Reload + adcl 904(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 40(%esp), %eax # 4-byte Reload + adcl 908(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 36(%esp), %eax # 4-byte Reload + adcl 912(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 916(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 920(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 924(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 928(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 932(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + adcl 936(%esp), %edi + sbbl %eax, %eax + movl %eax, 64(%esp) # 4-byte Spill + movl %ebp, %eax + imull 32(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 848(%esp), %ecx + movl 1068(%esp), %edx + calll .LmulPv320x32 + movl 64(%esp), %eax # 4-byte Reload + andl $1, %eax + addl 848(%esp), %ebp + adcl 852(%esp), %esi + movl 44(%esp), %ecx # 4-byte Reload + adcl 856(%esp), %ecx + movl %ecx, 44(%esp) # 4-byte Spill + movl 40(%esp), %ecx # 4-byte Reload + adcl 860(%esp), %ecx + movl %ecx, 40(%esp) # 4-byte Spill + movl 36(%esp), %ecx # 4-byte Reload + adcl 864(%esp), %ecx + movl %ecx, 36(%esp) # 4-byte Spill + movl 48(%esp), %ecx # 4-byte Reload + adcl 868(%esp), %ecx + movl %ecx, 48(%esp) # 4-byte Spill + movl 56(%esp), %ecx # 4-byte Reload + adcl 872(%esp), %ecx + movl %ecx, 56(%esp) # 4-byte Spill + movl 68(%esp), %ecx # 4-byte Reload + adcl 876(%esp), %ecx + movl %ecx, 68(%esp) # 4-byte Spill + movl 72(%esp), %ecx # 4-byte Reload + adcl 880(%esp), %ecx + movl %ecx, 72(%esp) # 4-byte Spill + movl 76(%esp), %ebp # 4-byte Reload + adcl 884(%esp), %ebp + adcl 888(%esp), %edi + adcl $0, %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 1064(%esp), %eax + movl 8(%eax), %eax + movl %eax, (%esp) + leal 800(%esp), %ecx + movl 1060(%esp), %edx + calll .LmulPv320x32 + addl 800(%esp), %esi + movl 44(%esp), %eax # 4-byte Reload + adcl 804(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 40(%esp), %eax # 4-byte Reload + adcl 808(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 36(%esp), %eax # 4-byte Reload + adcl 812(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 816(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 820(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 824(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 828(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + adcl 832(%esp), %ebp + movl %ebp, 76(%esp) # 4-byte Spill + adcl 836(%esp), %edi + movl %edi, 60(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 840(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + sbbl %edi, %edi + movl %esi, %eax + imull 32(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 752(%esp), %ecx + movl 1068(%esp), %eax + movl %eax, %edx + calll .LmulPv320x32 + andl $1, %edi + movl %edi, %ecx + addl 752(%esp), %esi + movl 44(%esp), %eax # 4-byte Reload + adcl 756(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 40(%esp), %eax # 4-byte Reload + adcl 760(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 36(%esp), %eax # 4-byte Reload + adcl 764(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 48(%esp), %ebp # 4-byte Reload + adcl 768(%esp), %ebp + movl 56(%esp), %eax # 4-byte Reload + adcl 772(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 776(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 72(%esp), %esi # 4-byte Reload + adcl 780(%esp), %esi + movl 76(%esp), %edi # 4-byte Reload + adcl 784(%esp), %edi + movl 60(%esp), %eax # 4-byte Reload + adcl 788(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 792(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + adcl $0, %ecx + movl %ecx, 52(%esp) # 4-byte Spill + movl 1064(%esp), %eax + movl 12(%eax), %eax + movl %eax, (%esp) + leal 704(%esp), %ecx + movl 1060(%esp), %edx + calll .LmulPv320x32 + movl 44(%esp), %ecx # 4-byte Reload + addl 704(%esp), %ecx + movl %ecx, 44(%esp) # 4-byte Spill + movl 40(%esp), %eax # 4-byte Reload + adcl 708(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 36(%esp), %eax # 4-byte Reload + adcl 712(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + adcl 716(%esp), %ebp + movl %ebp, 48(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 720(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 724(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + adcl 728(%esp), %esi + movl %esi, 72(%esp) # 4-byte Spill + adcl 732(%esp), %edi + movl %edi, 76(%esp) # 4-byte Spill + movl 60(%esp), %ebp # 4-byte Reload + adcl 736(%esp), %ebp + movl 64(%esp), %eax # 4-byte Reload + adcl 740(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 52(%esp), %edi # 4-byte Reload + adcl 744(%esp), %edi + sbbl %esi, %esi + movl %ecx, %eax + imull 32(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 656(%esp), %ecx + movl 1068(%esp), %edx + calll .LmulPv320x32 + andl $1, %esi + movl %esi, %ecx + movl 44(%esp), %eax # 4-byte Reload + addl 656(%esp), %eax + movl 40(%esp), %eax # 4-byte Reload + adcl 660(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 36(%esp), %eax # 4-byte Reload + adcl 664(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 668(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 672(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 68(%esp), %esi # 4-byte Reload + adcl 676(%esp), %esi + movl 72(%esp), %eax # 4-byte Reload + adcl 680(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 684(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + adcl 688(%esp), %ebp + movl 64(%esp), %eax # 4-byte Reload + adcl 692(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + adcl 696(%esp), %edi + adcl $0, %ecx + movl %ecx, 44(%esp) # 4-byte Spill + movl 1064(%esp), %eax + movl 16(%eax), %eax + movl %eax, (%esp) + leal 608(%esp), %ecx + movl 1060(%esp), %edx + calll .LmulPv320x32 + movl 40(%esp), %ecx # 4-byte Reload + addl 608(%esp), %ecx + movl %ecx, 40(%esp) # 4-byte Spill + movl 36(%esp), %eax # 4-byte Reload + adcl 612(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 616(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 620(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + adcl 624(%esp), %esi + movl %esi, 68(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 628(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 632(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + adcl 636(%esp), %ebp + movl %ebp, 60(%esp) # 4-byte Spill + movl 64(%esp), %esi # 4-byte Reload + adcl 640(%esp), %esi + adcl 644(%esp), %edi + movl %edi, 52(%esp) # 4-byte Spill + movl 44(%esp), %edi # 4-byte Reload + adcl 648(%esp), %edi + sbbl %ebp, %ebp + movl %ecx, %eax + imull 32(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 560(%esp), %ecx + movl 1068(%esp), %edx + calll .LmulPv320x32 + andl $1, %ebp + movl %ebp, %ecx + movl 40(%esp), %eax # 4-byte Reload + addl 560(%esp), %eax + movl 36(%esp), %eax # 4-byte Reload + adcl 564(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 568(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 56(%esp), %ebp # 4-byte Reload + adcl 572(%esp), %ebp + movl 68(%esp), %eax # 4-byte Reload + adcl 576(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 580(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 584(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 588(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + adcl 592(%esp), %esi + movl %esi, 64(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 596(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + adcl 600(%esp), %edi + adcl $0, %ecx + movl %ecx, 40(%esp) # 4-byte Spill + movl 1064(%esp), %eax + movl 20(%eax), %eax + movl %eax, (%esp) + leal 512(%esp), %ecx + movl 1060(%esp), %edx + calll .LmulPv320x32 + movl 36(%esp), %ecx # 4-byte Reload + addl 512(%esp), %ecx + movl 48(%esp), %eax # 4-byte Reload + adcl 516(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl %ebp, %esi + adcl 520(%esp), %esi + movl 68(%esp), %eax # 4-byte Reload + adcl 524(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 528(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 532(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 536(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 540(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 544(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + adcl 548(%esp), %edi + movl %edi, 44(%esp) # 4-byte Spill + movl 40(%esp), %eax # 4-byte Reload + adcl 552(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + sbbl %ebp, %ebp + movl %ecx, %eax + movl %ecx, %edi + imull 32(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 464(%esp), %ecx + movl 1068(%esp), %edx + calll .LmulPv320x32 + andl $1, %ebp + movl %ebp, %eax + addl 464(%esp), %edi + movl 48(%esp), %ecx # 4-byte Reload + adcl 468(%esp), %ecx + movl %ecx, 48(%esp) # 4-byte Spill + adcl 472(%esp), %esi + movl %esi, 56(%esp) # 4-byte Spill + movl 68(%esp), %ecx # 4-byte Reload + adcl 476(%esp), %ecx + movl %ecx, 68(%esp) # 4-byte Spill + movl 72(%esp), %ecx # 4-byte Reload + adcl 480(%esp), %ecx + movl %ecx, 72(%esp) # 4-byte Spill + movl 76(%esp), %esi # 4-byte Reload + adcl 484(%esp), %esi + movl 60(%esp), %ecx # 4-byte Reload + adcl 488(%esp), %ecx + movl %ecx, 60(%esp) # 4-byte Spill + movl 64(%esp), %ecx # 4-byte Reload + adcl 492(%esp), %ecx + movl %ecx, 64(%esp) # 4-byte Spill + movl 52(%esp), %ebp # 4-byte Reload + adcl 496(%esp), %ebp + movl 44(%esp), %ecx # 4-byte Reload + adcl 500(%esp), %ecx + movl %ecx, 44(%esp) # 4-byte Spill + movl 40(%esp), %edi # 4-byte Reload + adcl 504(%esp), %edi + adcl $0, %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 1064(%esp), %eax + movl 24(%eax), %eax + movl %eax, (%esp) + leal 416(%esp), %ecx + movl 1060(%esp), %eax + movl %eax, %edx + calll .LmulPv320x32 + movl 48(%esp), %ecx # 4-byte Reload + addl 416(%esp), %ecx + movl 56(%esp), %eax # 4-byte Reload + adcl 420(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 424(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 428(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + adcl 432(%esp), %esi + movl %esi, 76(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 436(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 440(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + adcl 444(%esp), %ebp + movl %ebp, 52(%esp) # 4-byte Spill + movl 44(%esp), %eax # 4-byte Reload + adcl 448(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + adcl 452(%esp), %edi + movl %edi, 40(%esp) # 4-byte Spill + movl 36(%esp), %eax # 4-byte Reload + adcl 456(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + sbbl %edi, %edi + movl %ecx, %eax + movl %ecx, %esi + imull 32(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 368(%esp), %ecx + movl 1068(%esp), %edx + calll .LmulPv320x32 + andl $1, %edi + movl %edi, %ecx + addl 368(%esp), %esi + movl 56(%esp), %eax # 4-byte Reload + adcl 372(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 376(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 72(%esp), %esi # 4-byte Reload + adcl 380(%esp), %esi + movl 76(%esp), %ebp # 4-byte Reload + adcl 384(%esp), %ebp + movl 60(%esp), %eax # 4-byte Reload + adcl 388(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 392(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 396(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 44(%esp), %edi # 4-byte Reload + adcl 400(%esp), %edi + movl 40(%esp), %eax # 4-byte Reload + adcl 404(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 36(%esp), %eax # 4-byte Reload + adcl 408(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + adcl $0, %ecx + movl %ecx, 48(%esp) # 4-byte Spill + movl 1064(%esp), %eax + movl 28(%eax), %eax + movl %eax, (%esp) + leal 320(%esp), %ecx + movl 1060(%esp), %edx + calll .LmulPv320x32 + movl 56(%esp), %ecx # 4-byte Reload + addl 320(%esp), %ecx + movl 68(%esp), %eax # 4-byte Reload + adcl 324(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + adcl 328(%esp), %esi + movl %esi, 72(%esp) # 4-byte Spill + adcl 332(%esp), %ebp + movl %ebp, 76(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 336(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 340(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 344(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + adcl 348(%esp), %edi + movl %edi, 44(%esp) # 4-byte Spill + movl 40(%esp), %eax # 4-byte Reload + adcl 352(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 36(%esp), %eax # 4-byte Reload + adcl 356(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 48(%esp), %ebp # 4-byte Reload + adcl 360(%esp), %ebp + sbbl %edi, %edi + movl %ecx, %eax + movl %ecx, %esi + imull 32(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 272(%esp), %ecx + movl 1068(%esp), %edx + calll .LmulPv320x32 + andl $1, %edi + movl %edi, %ecx + addl 272(%esp), %esi + movl 68(%esp), %edi # 4-byte Reload + adcl 276(%esp), %edi + movl 72(%esp), %eax # 4-byte Reload + adcl 280(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 284(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 60(%esp), %esi # 4-byte Reload + adcl 288(%esp), %esi + movl 64(%esp), %eax # 4-byte Reload + adcl 292(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 296(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 44(%esp), %eax # 4-byte Reload + adcl 300(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 40(%esp), %eax # 4-byte Reload + adcl 304(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 36(%esp), %eax # 4-byte Reload + adcl 308(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + adcl 312(%esp), %ebp + movl %ebp, 48(%esp) # 4-byte Spill + movl %ecx, %ebp + adcl $0, %ebp + movl 1064(%esp), %eax + movl 32(%eax), %eax + movl %eax, (%esp) + leal 224(%esp), %ecx + movl 1060(%esp), %edx + calll .LmulPv320x32 + movl %edi, %ecx + addl 224(%esp), %ecx + movl 72(%esp), %eax # 4-byte Reload + adcl 228(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 232(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + adcl 236(%esp), %esi + movl %esi, 60(%esp) # 4-byte Spill + movl 64(%esp), %edi # 4-byte Reload + adcl 240(%esp), %edi + movl 52(%esp), %eax # 4-byte Reload + adcl 244(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 44(%esp), %eax # 4-byte Reload + adcl 248(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 40(%esp), %eax # 4-byte Reload + adcl 252(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 36(%esp), %eax # 4-byte Reload + adcl 256(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 260(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + adcl 264(%esp), %ebp + movl %ebp, 68(%esp) # 4-byte Spill + sbbl %ebp, %ebp + movl %ecx, %eax + movl %ecx, %esi + imull 32(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 176(%esp), %ecx + movl 1068(%esp), %edx + calll .LmulPv320x32 + andl $1, %ebp + addl 176(%esp), %esi + movl 72(%esp), %eax # 4-byte Reload + adcl 180(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 184(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 188(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl %edi, %esi + adcl 192(%esp), %esi + movl 52(%esp), %edi # 4-byte Reload + adcl 196(%esp), %edi + movl 44(%esp), %eax # 4-byte Reload + adcl 200(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 40(%esp), %eax # 4-byte Reload + adcl 204(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 36(%esp), %eax # 4-byte Reload + adcl 208(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 212(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 216(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + adcl $0, %ebp + movl 1064(%esp), %eax + movl 36(%eax), %eax + movl %eax, (%esp) + leal 128(%esp), %ecx + movl 1060(%esp), %edx + calll .LmulPv320x32 + movl 72(%esp), %ecx # 4-byte Reload + addl 128(%esp), %ecx + movl 76(%esp), %eax # 4-byte Reload + adcl 132(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 136(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + adcl 140(%esp), %esi + movl %esi, 64(%esp) # 4-byte Spill + adcl 144(%esp), %edi + movl %edi, 52(%esp) # 4-byte Spill + movl 44(%esp), %eax # 4-byte Reload + adcl 148(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 40(%esp), %eax # 4-byte Reload + adcl 152(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 36(%esp), %eax # 4-byte Reload + adcl 156(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 160(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 164(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + adcl 168(%esp), %ebp + sbbl %esi, %esi + movl 32(%esp), %eax # 4-byte Reload + imull %ecx, %eax + movl %ecx, %edi + movl %eax, (%esp) + leal 80(%esp), %ecx + movl 1068(%esp), %edx + calll .LmulPv320x32 + andl $1, %esi + addl 80(%esp), %edi + movl 76(%esp), %eax # 4-byte Reload + movl 64(%esp), %ebx # 4-byte Reload + adcl 84(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 60(%esp), %ecx # 4-byte Reload + adcl 88(%esp), %ecx + movl %ecx, 60(%esp) # 4-byte Spill + adcl 92(%esp), %ebx + movl 52(%esp), %edx # 4-byte Reload + adcl 96(%esp), %edx + movl %edx, 52(%esp) # 4-byte Spill + movl 44(%esp), %edx # 4-byte Reload + adcl 100(%esp), %edx + movl %edx, 44(%esp) # 4-byte Spill + movl 40(%esp), %edx # 4-byte Reload + adcl 104(%esp), %edx + movl %edx, 40(%esp) # 4-byte Spill + movl %edx, %edi + movl 36(%esp), %edx # 4-byte Reload + adcl 108(%esp), %edx + movl %edx, 36(%esp) # 4-byte Spill + movl 48(%esp), %edx # 4-byte Reload + adcl 112(%esp), %edx + movl %edx, 48(%esp) # 4-byte Spill + movl 68(%esp), %edx # 4-byte Reload + adcl 116(%esp), %edx + movl %edx, 68(%esp) # 4-byte Spill + adcl 120(%esp), %ebp + movl %ebp, 72(%esp) # 4-byte Spill + adcl $0, %esi + movl 1068(%esp), %edx + subl (%edx), %eax + sbbl 4(%edx), %ecx + movl %ecx, 16(%esp) # 4-byte Spill + movl %ebx, %ecx + sbbl 8(%edx), %ecx + movl %ecx, 20(%esp) # 4-byte Spill + movl 52(%esp), %ecx # 4-byte Reload + sbbl 12(%edx), %ecx + movl %ecx, 24(%esp) # 4-byte Spill + movl 44(%esp), %ecx # 4-byte Reload + sbbl 16(%edx), %ecx + movl %ecx, 28(%esp) # 4-byte Spill + sbbl 20(%edx), %edi + movl %edi, 32(%esp) # 4-byte Spill + movl 36(%esp), %ecx # 4-byte Reload + sbbl 24(%edx), %ecx + movl %ecx, 56(%esp) # 4-byte Spill + movl 48(%esp), %ecx # 4-byte Reload + sbbl 28(%edx), %ecx + movl 68(%esp), %edi # 4-byte Reload + sbbl 32(%edx), %edi + movl %edi, 64(%esp) # 4-byte Spill + sbbl 36(%edx), %ebp + movl %ebp, %edx + sbbl $0, %esi + andl $1, %esi + jne .LBB146_2 +# BB#1: + movl %ecx, 48(%esp) # 4-byte Spill +.LBB146_2: + movl %esi, %ecx + testb %cl, %cl + movl 76(%esp), %esi # 4-byte Reload + jne .LBB146_4 +# BB#3: + movl %eax, %esi +.LBB146_4: + movl 1056(%esp), %eax + movl %esi, (%eax) + movl 60(%esp), %edi # 4-byte Reload + jne .LBB146_6 +# BB#5: + movl 16(%esp), %edi # 4-byte Reload +.LBB146_6: + movl %edi, 4(%eax) + jne .LBB146_8 +# BB#7: + movl 20(%esp), %ebx # 4-byte Reload +.LBB146_8: + movl %ebx, 8(%eax) + movl 52(%esp), %ebp # 4-byte Reload + movl 44(%esp), %ecx # 4-byte Reload + jne .LBB146_10 +# BB#9: + movl 24(%esp), %ebp # 4-byte Reload +.LBB146_10: + movl %ebp, 12(%eax) + jne .LBB146_12 +# BB#11: + movl 28(%esp), %ecx # 4-byte Reload +.LBB146_12: + movl %ecx, 16(%eax) + movl 40(%esp), %ecx # 4-byte Reload + jne .LBB146_14 +# BB#13: + movl 32(%esp), %ecx # 4-byte Reload +.LBB146_14: + movl %ecx, 20(%eax) + movl 36(%esp), %ecx # 4-byte Reload + jne .LBB146_16 +# BB#15: + movl 56(%esp), %ecx # 4-byte Reload +.LBB146_16: + movl %ecx, 24(%eax) + movl 48(%esp), %ecx # 4-byte Reload + movl %ecx, 28(%eax) + movl 68(%esp), %ecx # 4-byte Reload + jne .LBB146_18 +# BB#17: + movl 64(%esp), %ecx # 4-byte Reload +.LBB146_18: + movl %ecx, 32(%eax) + movl 72(%esp), %ecx # 4-byte Reload + jne .LBB146_20 +# BB#19: + movl %edx, %ecx +.LBB146_20: + movl %ecx, 36(%eax) + addl $1036, %esp # imm = 0x40C + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end146: + .size mcl_fp_mont10Lbmi2, .Lfunc_end146-mcl_fp_mont10Lbmi2 + + .globl mcl_fp_montNF10Lbmi2 + .align 16, 0x90 + .type mcl_fp_montNF10Lbmi2,@function +mcl_fp_montNF10Lbmi2: # @mcl_fp_montNF10Lbmi2 +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $1020, %esp # imm = 0x3FC + calll .L147$pb +.L147$pb: + popl %ebx +.Ltmp18: + addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp18-.L147$pb), %ebx + movl 1052(%esp), %eax + movl -4(%eax), %ebp + movl %ebp, 20(%esp) # 4-byte Spill + movl 1048(%esp), %eax + movl (%eax), %eax + movl %eax, (%esp) + leal 976(%esp), %ecx + movl 1044(%esp), %edx + calll .LmulPv320x32 + movl 976(%esp), %edi + movl 980(%esp), %esi + movl %edi, %eax + imull %ebp, %eax + movl 1016(%esp), %ecx + movl %ecx, 60(%esp) # 4-byte Spill + movl 1012(%esp), %ecx + movl %ecx, 56(%esp) # 4-byte Spill + movl 1008(%esp), %ecx + movl %ecx, 48(%esp) # 4-byte Spill + movl 1004(%esp), %ecx + movl %ecx, 36(%esp) # 4-byte Spill + movl 1000(%esp), %ecx + movl %ecx, 40(%esp) # 4-byte Spill + movl 996(%esp), %ecx + movl %ecx, 24(%esp) # 4-byte Spill + movl 992(%esp), %ecx + movl %ecx, 44(%esp) # 4-byte Spill + movl 988(%esp), %ecx + movl %ecx, 28(%esp) # 4-byte Spill + movl 984(%esp), %ebp + movl %eax, (%esp) + leal 928(%esp), %ecx + movl 1052(%esp), %edx + calll .LmulPv320x32 + addl 928(%esp), %edi + adcl 932(%esp), %esi + adcl 936(%esp), %ebp + movl 28(%esp), %eax # 4-byte Reload + adcl 940(%esp), %eax + movl %eax, 28(%esp) # 4-byte Spill + movl 44(%esp), %eax # 4-byte Reload + adcl 944(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 24(%esp), %eax # 4-byte Reload + adcl 948(%esp), %eax + movl %eax, 24(%esp) # 4-byte Spill + movl 40(%esp), %edi # 4-byte Reload + adcl 952(%esp), %edi + movl 36(%esp), %eax # 4-byte Reload + adcl 956(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 960(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 964(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 968(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 1048(%esp), %eax + movl 4(%eax), %eax + movl %eax, (%esp) + leal 880(%esp), %ecx + movl 1044(%esp), %edx + calll .LmulPv320x32 + movl 920(%esp), %ecx + addl 880(%esp), %esi + adcl 884(%esp), %ebp + movl 28(%esp), %eax # 4-byte Reload + adcl 888(%esp), %eax + movl %eax, 28(%esp) # 4-byte Spill + movl 44(%esp), %eax # 4-byte Reload + adcl 892(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 24(%esp), %eax # 4-byte Reload + adcl 896(%esp), %eax + movl %eax, 24(%esp) # 4-byte Spill + adcl 900(%esp), %edi + movl %edi, 40(%esp) # 4-byte Spill + movl 36(%esp), %eax # 4-byte Reload + adcl 904(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 908(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 912(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 916(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + adcl $0, %ecx + movl %ecx, 52(%esp) # 4-byte Spill + movl %esi, %eax + imull 20(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 832(%esp), %ecx + movl 1052(%esp), %edx + calll .LmulPv320x32 + addl 832(%esp), %esi + adcl 836(%esp), %ebp + movl 28(%esp), %eax # 4-byte Reload + adcl 840(%esp), %eax + movl %eax, 28(%esp) # 4-byte Spill + movl 44(%esp), %eax # 4-byte Reload + adcl 844(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 24(%esp), %edi # 4-byte Reload + adcl 848(%esp), %edi + movl 40(%esp), %eax # 4-byte Reload + adcl 852(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 36(%esp), %eax # 4-byte Reload + adcl 856(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 860(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 864(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 868(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 52(%esp), %esi # 4-byte Reload + adcl 872(%esp), %esi + movl 1048(%esp), %eax + movl 8(%eax), %eax + movl %eax, (%esp) + leal 784(%esp), %ecx + movl 1044(%esp), %edx + calll .LmulPv320x32 + movl 824(%esp), %ecx + addl 784(%esp), %ebp + movl 28(%esp), %eax # 4-byte Reload + adcl 788(%esp), %eax + movl %eax, 28(%esp) # 4-byte Spill + movl 44(%esp), %eax # 4-byte Reload + adcl 792(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + adcl 796(%esp), %edi + movl %edi, 24(%esp) # 4-byte Spill + movl 40(%esp), %eax # 4-byte Reload + adcl 800(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 36(%esp), %eax # 4-byte Reload + adcl 804(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 48(%esp), %edi # 4-byte Reload + adcl 808(%esp), %edi + movl 56(%esp), %eax # 4-byte Reload + adcl 812(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 816(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + adcl 820(%esp), %esi + movl %esi, 52(%esp) # 4-byte Spill + adcl $0, %ecx + movl %ecx, 32(%esp) # 4-byte Spill + movl %ebp, %eax + imull 20(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 736(%esp), %ecx + movl 1052(%esp), %edx + calll .LmulPv320x32 + addl 736(%esp), %ebp + movl 28(%esp), %eax # 4-byte Reload + adcl 740(%esp), %eax + movl %eax, 28(%esp) # 4-byte Spill + movl 44(%esp), %eax # 4-byte Reload + adcl 744(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 24(%esp), %eax # 4-byte Reload + adcl 748(%esp), %eax + movl %eax, 24(%esp) # 4-byte Spill + movl 40(%esp), %eax # 4-byte Reload + adcl 752(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 36(%esp), %eax # 4-byte Reload + adcl 756(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + adcl 760(%esp), %edi + movl 56(%esp), %ebp # 4-byte Reload + adcl 764(%esp), %ebp + movl 60(%esp), %esi # 4-byte Reload + adcl 768(%esp), %esi + movl 52(%esp), %eax # 4-byte Reload + adcl 772(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 32(%esp), %eax # 4-byte Reload + adcl 776(%esp), %eax + movl %eax, 32(%esp) # 4-byte Spill + movl 1048(%esp), %eax + movl 12(%eax), %eax + movl %eax, (%esp) + leal 688(%esp), %ecx + movl 1044(%esp), %eax + movl %eax, %edx + calll .LmulPv320x32 + movl 728(%esp), %eax + movl 28(%esp), %edx # 4-byte Reload + addl 688(%esp), %edx + movl 44(%esp), %ecx # 4-byte Reload + adcl 692(%esp), %ecx + movl %ecx, 44(%esp) # 4-byte Spill + movl 24(%esp), %ecx # 4-byte Reload + adcl 696(%esp), %ecx + movl %ecx, 24(%esp) # 4-byte Spill + movl 40(%esp), %ecx # 4-byte Reload + adcl 700(%esp), %ecx + movl %ecx, 40(%esp) # 4-byte Spill + movl 36(%esp), %ecx # 4-byte Reload + adcl 704(%esp), %ecx + movl %ecx, 36(%esp) # 4-byte Spill + adcl 708(%esp), %edi + movl %edi, 48(%esp) # 4-byte Spill + adcl 712(%esp), %ebp + movl %ebp, 56(%esp) # 4-byte Spill + adcl 716(%esp), %esi + movl %esi, 60(%esp) # 4-byte Spill + movl 52(%esp), %ebp # 4-byte Reload + adcl 720(%esp), %ebp + movl 32(%esp), %esi # 4-byte Reload + adcl 724(%esp), %esi + adcl $0, %eax + movl %eax, 28(%esp) # 4-byte Spill + movl %edx, %eax + movl %edx, %edi + imull 20(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 640(%esp), %ecx + movl 1052(%esp), %eax + movl %eax, %edx + calll .LmulPv320x32 + addl 640(%esp), %edi + movl 44(%esp), %eax # 4-byte Reload + adcl 644(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 24(%esp), %eax # 4-byte Reload + adcl 648(%esp), %eax + movl %eax, 24(%esp) # 4-byte Spill + movl 40(%esp), %eax # 4-byte Reload + adcl 652(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 36(%esp), %edi # 4-byte Reload + adcl 656(%esp), %edi + movl 48(%esp), %eax # 4-byte Reload + adcl 660(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 664(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 668(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + adcl 672(%esp), %ebp + movl %ebp, 52(%esp) # 4-byte Spill + adcl 676(%esp), %esi + movl %esi, %ebp + movl 28(%esp), %esi # 4-byte Reload + adcl 680(%esp), %esi + movl 1048(%esp), %eax + movl 16(%eax), %eax + movl %eax, (%esp) + leal 592(%esp), %ecx + movl 1044(%esp), %edx + calll .LmulPv320x32 + movl 632(%esp), %edx + movl 44(%esp), %ecx # 4-byte Reload + addl 592(%esp), %ecx + movl 24(%esp), %eax # 4-byte Reload + adcl 596(%esp), %eax + movl %eax, 24(%esp) # 4-byte Spill + movl 40(%esp), %eax # 4-byte Reload + adcl 600(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + adcl 604(%esp), %edi + movl %edi, 36(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 608(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 612(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 616(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 620(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + adcl 624(%esp), %ebp + movl %ebp, 32(%esp) # 4-byte Spill + adcl 628(%esp), %esi + movl %esi, 28(%esp) # 4-byte Spill + adcl $0, %edx + movl %edx, 44(%esp) # 4-byte Spill + movl %ecx, %eax + movl %ecx, %esi + imull 20(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 544(%esp), %ecx + movl 1052(%esp), %edx + calll .LmulPv320x32 + addl 544(%esp), %esi + movl 24(%esp), %edi # 4-byte Reload + adcl 548(%esp), %edi + movl 40(%esp), %esi # 4-byte Reload + adcl 552(%esp), %esi + movl 36(%esp), %ebp # 4-byte Reload + adcl 556(%esp), %ebp + movl 48(%esp), %eax # 4-byte Reload + adcl 560(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 564(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 568(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 572(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 32(%esp), %eax # 4-byte Reload + adcl 576(%esp), %eax + movl %eax, 32(%esp) # 4-byte Spill + movl 28(%esp), %eax # 4-byte Reload + adcl 580(%esp), %eax + movl %eax, 28(%esp) # 4-byte Spill + movl 44(%esp), %eax # 4-byte Reload + adcl 584(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 1048(%esp), %eax + movl 20(%eax), %eax + movl %eax, (%esp) + leal 496(%esp), %ecx + movl 1044(%esp), %edx + calll .LmulPv320x32 + movl 536(%esp), %edx + addl 496(%esp), %edi + adcl 500(%esp), %esi + movl %esi, 40(%esp) # 4-byte Spill + adcl 504(%esp), %ebp + movl 48(%esp), %eax # 4-byte Reload + adcl 508(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 512(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 516(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 520(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 32(%esp), %eax # 4-byte Reload + adcl 524(%esp), %eax + movl %eax, 32(%esp) # 4-byte Spill + movl 28(%esp), %esi # 4-byte Reload + adcl 528(%esp), %esi + movl 44(%esp), %eax # 4-byte Reload + adcl 532(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + adcl $0, %edx + movl %edx, 24(%esp) # 4-byte Spill + movl %edi, %eax + imull 20(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 448(%esp), %ecx + movl 1052(%esp), %edx + calll .LmulPv320x32 + addl 448(%esp), %edi + movl 40(%esp), %eax # 4-byte Reload + adcl 452(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + adcl 456(%esp), %ebp + movl 48(%esp), %eax # 4-byte Reload + adcl 460(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 56(%esp), %edi # 4-byte Reload + adcl 464(%esp), %edi + movl 60(%esp), %eax # 4-byte Reload + adcl 468(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 472(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 32(%esp), %eax # 4-byte Reload + adcl 476(%esp), %eax + movl %eax, 32(%esp) # 4-byte Spill + adcl 480(%esp), %esi + movl %esi, 28(%esp) # 4-byte Spill + movl 44(%esp), %eax # 4-byte Reload + adcl 484(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 24(%esp), %esi # 4-byte Reload + adcl 488(%esp), %esi + movl 1048(%esp), %eax + movl 24(%eax), %eax + movl %eax, (%esp) + leal 400(%esp), %ecx + movl 1044(%esp), %edx + calll .LmulPv320x32 + movl 440(%esp), %eax + movl 40(%esp), %ecx # 4-byte Reload + addl 400(%esp), %ecx + adcl 404(%esp), %ebp + movl 48(%esp), %edx # 4-byte Reload + adcl 408(%esp), %edx + movl %edx, 48(%esp) # 4-byte Spill + adcl 412(%esp), %edi + movl %edi, 56(%esp) # 4-byte Spill + movl 60(%esp), %edi # 4-byte Reload + adcl 416(%esp), %edi + movl 52(%esp), %edx # 4-byte Reload + adcl 420(%esp), %edx + movl %edx, 52(%esp) # 4-byte Spill + movl 32(%esp), %edx # 4-byte Reload + adcl 424(%esp), %edx + movl %edx, 32(%esp) # 4-byte Spill + movl 28(%esp), %edx # 4-byte Reload + adcl 428(%esp), %edx + movl %edx, 28(%esp) # 4-byte Spill + movl 44(%esp), %edx # 4-byte Reload + adcl 432(%esp), %edx + movl %edx, 44(%esp) # 4-byte Spill + adcl 436(%esp), %esi + movl %esi, 24(%esp) # 4-byte Spill + adcl $0, %eax + movl %eax, 40(%esp) # 4-byte Spill + movl %ecx, %eax + movl %ecx, %esi + imull 20(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 352(%esp), %ecx + movl 1052(%esp), %edx + calll .LmulPv320x32 + addl 352(%esp), %esi + adcl 356(%esp), %ebp + movl %ebp, 36(%esp) # 4-byte Spill + movl 48(%esp), %ebp # 4-byte Reload + adcl 360(%esp), %ebp + movl 56(%esp), %eax # 4-byte Reload + adcl 364(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl %edi, %esi + adcl 368(%esp), %esi + movl 52(%esp), %edi # 4-byte Reload + adcl 372(%esp), %edi + movl 32(%esp), %eax # 4-byte Reload + adcl 376(%esp), %eax + movl %eax, 32(%esp) # 4-byte Spill + movl 28(%esp), %eax # 4-byte Reload + adcl 380(%esp), %eax + movl %eax, 28(%esp) # 4-byte Spill + movl 44(%esp), %eax # 4-byte Reload + adcl 384(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 24(%esp), %eax # 4-byte Reload + adcl 388(%esp), %eax + movl %eax, 24(%esp) # 4-byte Spill + movl 40(%esp), %eax # 4-byte Reload + adcl 392(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 1048(%esp), %eax + movl 28(%eax), %eax + movl %eax, (%esp) + leal 304(%esp), %ecx + movl 1044(%esp), %eax + movl %eax, %edx + calll .LmulPv320x32 + movl 344(%esp), %edx + movl 36(%esp), %ecx # 4-byte Reload + addl 304(%esp), %ecx + adcl 308(%esp), %ebp + movl %ebp, 48(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 312(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + adcl 316(%esp), %esi + movl %esi, 60(%esp) # 4-byte Spill + adcl 320(%esp), %edi + movl %edi, 52(%esp) # 4-byte Spill + movl 32(%esp), %ebp # 4-byte Reload + adcl 324(%esp), %ebp + movl 28(%esp), %eax # 4-byte Reload + adcl 328(%esp), %eax + movl %eax, 28(%esp) # 4-byte Spill + movl 44(%esp), %esi # 4-byte Reload + adcl 332(%esp), %esi + movl 24(%esp), %eax # 4-byte Reload + adcl 336(%esp), %eax + movl %eax, 24(%esp) # 4-byte Spill + movl 40(%esp), %eax # 4-byte Reload + adcl 340(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + adcl $0, %edx + movl %edx, 36(%esp) # 4-byte Spill + movl %ecx, %eax + movl %ecx, %edi + imull 20(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 256(%esp), %ecx + movl 1052(%esp), %edx + calll .LmulPv320x32 + addl 256(%esp), %edi + movl 48(%esp), %eax # 4-byte Reload + adcl 260(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 264(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 268(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 52(%esp), %edi # 4-byte Reload + adcl 272(%esp), %edi + adcl 276(%esp), %ebp + movl 28(%esp), %eax # 4-byte Reload + adcl 280(%esp), %eax + movl %eax, 28(%esp) # 4-byte Spill + adcl 284(%esp), %esi + movl %esi, 44(%esp) # 4-byte Spill + movl 24(%esp), %esi # 4-byte Reload + adcl 288(%esp), %esi + movl 40(%esp), %eax # 4-byte Reload + adcl 292(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 36(%esp), %eax # 4-byte Reload + adcl 296(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 1048(%esp), %eax + movl 32(%eax), %eax + movl %eax, (%esp) + leal 208(%esp), %ecx + movl 1044(%esp), %edx + calll .LmulPv320x32 + movl 248(%esp), %edx + movl 48(%esp), %ecx # 4-byte Reload + addl 208(%esp), %ecx + movl 56(%esp), %eax # 4-byte Reload + adcl 212(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 216(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + adcl 220(%esp), %edi + movl %edi, 52(%esp) # 4-byte Spill + adcl 224(%esp), %ebp + movl 28(%esp), %eax # 4-byte Reload + adcl 228(%esp), %eax + movl %eax, 28(%esp) # 4-byte Spill + movl 44(%esp), %eax # 4-byte Reload + adcl 232(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + adcl 236(%esp), %esi + movl %esi, 24(%esp) # 4-byte Spill + movl 40(%esp), %eax # 4-byte Reload + adcl 240(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 36(%esp), %eax # 4-byte Reload + adcl 244(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + adcl $0, %edx + movl %edx, 48(%esp) # 4-byte Spill + movl %ecx, %eax + movl %ecx, %edi + imull 20(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 160(%esp), %ecx + movl 1052(%esp), %edx + calll .LmulPv320x32 + addl 160(%esp), %edi + movl 56(%esp), %eax # 4-byte Reload + adcl 164(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 168(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 172(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl %ebp, %edi + adcl 176(%esp), %edi + movl 28(%esp), %esi # 4-byte Reload + adcl 180(%esp), %esi + movl 44(%esp), %eax # 4-byte Reload + adcl 184(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 24(%esp), %eax # 4-byte Reload + adcl 188(%esp), %eax + movl %eax, 24(%esp) # 4-byte Spill + movl 40(%esp), %ebp # 4-byte Reload + adcl 192(%esp), %ebp + movl 36(%esp), %eax # 4-byte Reload + adcl 196(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 200(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 1048(%esp), %eax + movl 36(%eax), %eax + movl %eax, (%esp) + leal 112(%esp), %ecx + movl 1044(%esp), %edx + calll .LmulPv320x32 + movl 152(%esp), %edx + movl 56(%esp), %ecx # 4-byte Reload + addl 112(%esp), %ecx + movl 60(%esp), %eax # 4-byte Reload + adcl 116(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 120(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + adcl 124(%esp), %edi + movl %edi, 32(%esp) # 4-byte Spill + adcl 128(%esp), %esi + movl %esi, %edi + movl 44(%esp), %eax # 4-byte Reload + adcl 132(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 24(%esp), %eax # 4-byte Reload + adcl 136(%esp), %eax + movl %eax, 24(%esp) # 4-byte Spill + adcl 140(%esp), %ebp + movl %ebp, 40(%esp) # 4-byte Spill + movl 36(%esp), %esi # 4-byte Reload + adcl 144(%esp), %esi + movl 48(%esp), %eax # 4-byte Reload + adcl 148(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + adcl $0, %edx + movl %edx, 56(%esp) # 4-byte Spill + movl 20(%esp), %eax # 4-byte Reload + imull %ecx, %eax + movl %ecx, %ebp + movl %eax, (%esp) + leal 64(%esp), %ecx + movl 1052(%esp), %edx + calll .LmulPv320x32 + addl 64(%esp), %ebp + movl %edi, %ebp + movl 60(%esp), %eax # 4-byte Reload + movl 32(%esp), %ebx # 4-byte Reload + adcl 68(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 52(%esp), %ecx # 4-byte Reload + adcl 72(%esp), %ecx + movl %ecx, 52(%esp) # 4-byte Spill + adcl 76(%esp), %ebx + adcl 80(%esp), %ebp + movl 44(%esp), %edx # 4-byte Reload + adcl 84(%esp), %edx + movl %edx, 44(%esp) # 4-byte Spill + movl 24(%esp), %edx # 4-byte Reload + adcl 88(%esp), %edx + movl %edx, 24(%esp) # 4-byte Spill + movl 40(%esp), %edx # 4-byte Reload + adcl 92(%esp), %edx + movl %edx, 40(%esp) # 4-byte Spill + adcl 96(%esp), %esi + movl %esi, 36(%esp) # 4-byte Spill + movl 48(%esp), %edx # 4-byte Reload + adcl 100(%esp), %edx + movl %edx, 48(%esp) # 4-byte Spill + movl 56(%esp), %edx # 4-byte Reload + adcl 104(%esp), %edx + movl %edx, 56(%esp) # 4-byte Spill + movl %eax, %edx + movl 1052(%esp), %edi + subl (%edi), %edx + sbbl 4(%edi), %ecx + movl %ebx, %eax + sbbl 8(%edi), %eax + movl %ebp, %esi + sbbl 12(%edi), %esi + movl %esi, 4(%esp) # 4-byte Spill + movl 44(%esp), %esi # 4-byte Reload + sbbl 16(%edi), %esi + movl %esi, 8(%esp) # 4-byte Spill + movl 24(%esp), %esi # 4-byte Reload + sbbl 20(%edi), %esi + movl %esi, 12(%esp) # 4-byte Spill + movl 40(%esp), %esi # 4-byte Reload + sbbl 24(%edi), %esi + movl %esi, 16(%esp) # 4-byte Spill + movl 36(%esp), %esi # 4-byte Reload + sbbl 28(%edi), %esi + movl %esi, 20(%esp) # 4-byte Spill + movl 48(%esp), %esi # 4-byte Reload + sbbl 32(%edi), %esi + movl %esi, 28(%esp) # 4-byte Spill + movl 56(%esp), %esi # 4-byte Reload + sbbl 36(%edi), %esi + movl %esi, 32(%esp) # 4-byte Spill + movl %esi, %edi + sarl $31, %edi + testl %edi, %edi + movl 60(%esp), %edi # 4-byte Reload + js .LBB147_2 +# BB#1: + movl %edx, %edi +.LBB147_2: + movl 1040(%esp), %edx + movl %edi, (%edx) + movl 52(%esp), %edi # 4-byte Reload + js .LBB147_4 +# BB#3: + movl %ecx, %edi +.LBB147_4: + movl %edi, 4(%edx) + js .LBB147_6 +# BB#5: + movl %eax, %ebx +.LBB147_6: + movl %ebx, 8(%edx) + js .LBB147_8 +# BB#7: + movl 4(%esp), %ebp # 4-byte Reload +.LBB147_8: + movl %ebp, 12(%edx) + movl 44(%esp), %esi # 4-byte Reload + movl 24(%esp), %eax # 4-byte Reload + js .LBB147_10 +# BB#9: + movl 8(%esp), %esi # 4-byte Reload +.LBB147_10: + movl %esi, 16(%edx) + js .LBB147_12 +# BB#11: + movl 12(%esp), %eax # 4-byte Reload +.LBB147_12: + movl %eax, 20(%edx) + movl 40(%esp), %eax # 4-byte Reload + js .LBB147_14 +# BB#13: + movl 16(%esp), %eax # 4-byte Reload +.LBB147_14: + movl %eax, 24(%edx) + movl 36(%esp), %eax # 4-byte Reload + js .LBB147_16 +# BB#15: + movl 20(%esp), %eax # 4-byte Reload +.LBB147_16: + movl %eax, 28(%edx) + movl 48(%esp), %eax # 4-byte Reload + js .LBB147_18 +# BB#17: + movl 28(%esp), %eax # 4-byte Reload +.LBB147_18: + movl %eax, 32(%edx) + movl 56(%esp), %eax # 4-byte Reload + js .LBB147_20 +# BB#19: + movl 32(%esp), %eax # 4-byte Reload +.LBB147_20: + movl %eax, 36(%edx) + addl $1020, %esp # imm = 0x3FC + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end147: + .size mcl_fp_montNF10Lbmi2, .Lfunc_end147-mcl_fp_montNF10Lbmi2 + + .globl mcl_fp_montRed10Lbmi2 + .align 16, 0x90 + .type mcl_fp_montRed10Lbmi2,@function +mcl_fp_montRed10Lbmi2: # @mcl_fp_montRed10Lbmi2 +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $604, %esp # imm = 0x25C + calll .L148$pb +.L148$pb: + popl %eax +.Ltmp19: + addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp19-.L148$pb), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 632(%esp), %edx + movl -4(%edx), %esi + movl %esi, 60(%esp) # 4-byte Spill + movl 628(%esp), %ecx + movl (%ecx), %ebx + movl %ebx, 56(%esp) # 4-byte Spill + movl 4(%ecx), %edi + movl %edi, 68(%esp) # 4-byte Spill + imull %esi, %ebx + movl 76(%ecx), %esi + movl %esi, 84(%esp) # 4-byte Spill + movl 72(%ecx), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 68(%ecx), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 64(%ecx), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 60(%ecx), %esi + movl %esi, 104(%esp) # 4-byte Spill + movl 56(%ecx), %esi + movl %esi, 120(%esp) # 4-byte Spill + movl 52(%ecx), %esi + movl %esi, 80(%esp) # 4-byte Spill + movl 48(%ecx), %esi + movl %esi, 116(%esp) # 4-byte Spill + movl 44(%ecx), %esi + movl %esi, 124(%esp) # 4-byte Spill + movl 40(%ecx), %esi + movl %esi, 112(%esp) # 4-byte Spill + movl 36(%ecx), %esi + movl %esi, 92(%esp) # 4-byte Spill + movl 32(%ecx), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 28(%ecx), %edi + movl %edi, 96(%esp) # 4-byte Spill + movl 24(%ecx), %edi + movl %edi, 72(%esp) # 4-byte Spill + movl 20(%ecx), %ebp + movl 16(%ecx), %edi + movl 12(%ecx), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 8(%ecx), %esi + movl (%edx), %ecx + movl %ecx, 12(%esp) # 4-byte Spill + movl 36(%edx), %ecx + movl %ecx, 44(%esp) # 4-byte Spill + movl 32(%edx), %ecx + movl %ecx, 40(%esp) # 4-byte Spill + movl 28(%edx), %ecx + movl %ecx, 36(%esp) # 4-byte Spill + movl 24(%edx), %ecx + movl %ecx, 32(%esp) # 4-byte Spill + movl 20(%edx), %ecx + movl %ecx, 28(%esp) # 4-byte Spill + movl 16(%edx), %ecx + movl %ecx, 24(%esp) # 4-byte Spill + movl 12(%edx), %ecx + movl %ecx, 20(%esp) # 4-byte Spill + movl 8(%edx), %ecx + movl %ecx, 16(%esp) # 4-byte Spill + movl 4(%edx), %ecx + movl %ecx, 8(%esp) # 4-byte Spill + movl %ebx, (%esp) + leal 560(%esp), %ecx + movl 64(%esp), %ebx # 4-byte Reload + calll .LmulPv320x32 + movl 56(%esp), %eax # 4-byte Reload + addl 560(%esp), %eax + movl 68(%esp), %ecx # 4-byte Reload + adcl 564(%esp), %ecx + adcl 568(%esp), %esi + movl %esi, 4(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 572(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + adcl 576(%esp), %edi + movl %edi, 48(%esp) # 4-byte Spill + adcl 580(%esp), %ebp + movl %ebp, 56(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 584(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 588(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 592(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 596(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 112(%esp), %eax # 4-byte Reload + adcl 600(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + adcl $0, 124(%esp) # 4-byte Folded Spill + adcl $0, 116(%esp) # 4-byte Folded Spill + adcl $0, 80(%esp) # 4-byte Folded Spill + adcl $0, 120(%esp) # 4-byte Folded Spill + adcl $0, 104(%esp) # 4-byte Folded Spill + movl 88(%esp), %ebp # 4-byte Reload + adcl $0, %ebp + adcl $0, 108(%esp) # 4-byte Folded Spill + movl 76(%esp), %edi # 4-byte Reload + adcl $0, %edi + adcl $0, 84(%esp) # 4-byte Folded Spill + sbbl %eax, %eax + movl %eax, 68(%esp) # 4-byte Spill + movl %ecx, %eax + movl %ecx, %esi + imull 60(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 512(%esp), %ecx + movl 632(%esp), %edx + movl 64(%esp), %ebx # 4-byte Reload + calll .LmulPv320x32 + movl 68(%esp), %eax # 4-byte Reload + andl $1, %eax + addl 512(%esp), %esi + movl 4(%esp), %edx # 4-byte Reload + adcl 516(%esp), %edx + movl 52(%esp), %ecx # 4-byte Reload + adcl 520(%esp), %ecx + movl %ecx, 52(%esp) # 4-byte Spill + movl 48(%esp), %ecx # 4-byte Reload + adcl 524(%esp), %ecx + movl %ecx, 48(%esp) # 4-byte Spill + movl 56(%esp), %ecx # 4-byte Reload + adcl 528(%esp), %ecx + movl %ecx, 56(%esp) # 4-byte Spill + movl 72(%esp), %ecx # 4-byte Reload + adcl 532(%esp), %ecx + movl %ecx, 72(%esp) # 4-byte Spill + movl 96(%esp), %ecx # 4-byte Reload + adcl 536(%esp), %ecx + movl %ecx, 96(%esp) # 4-byte Spill + movl 100(%esp), %ecx # 4-byte Reload + adcl 540(%esp), %ecx + movl %ecx, 100(%esp) # 4-byte Spill + movl 92(%esp), %ecx # 4-byte Reload + adcl 544(%esp), %ecx + movl %ecx, 92(%esp) # 4-byte Spill + movl 112(%esp), %ecx # 4-byte Reload + adcl 548(%esp), %ecx + movl %ecx, 112(%esp) # 4-byte Spill + movl 124(%esp), %ecx # 4-byte Reload + adcl 552(%esp), %ecx + movl %ecx, 124(%esp) # 4-byte Spill + adcl $0, 116(%esp) # 4-byte Folded Spill + adcl $0, 80(%esp) # 4-byte Folded Spill + adcl $0, 120(%esp) # 4-byte Folded Spill + adcl $0, 104(%esp) # 4-byte Folded Spill + adcl $0, %ebp + movl %ebp, 88(%esp) # 4-byte Spill + adcl $0, 108(%esp) # 4-byte Folded Spill + adcl $0, %edi + adcl $0, 84(%esp) # 4-byte Folded Spill + adcl $0, %eax + movl %eax, 68(%esp) # 4-byte Spill + movl %edx, %esi + movl %esi, %eax + imull 60(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 464(%esp), %ecx + movl 632(%esp), %edx + movl 64(%esp), %ebx # 4-byte Reload + calll .LmulPv320x32 + addl 464(%esp), %esi + movl 52(%esp), %ecx # 4-byte Reload + adcl 468(%esp), %ecx + movl %ecx, 52(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 472(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 476(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 480(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 484(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 488(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 92(%esp), %esi # 4-byte Reload + adcl 492(%esp), %esi + movl 112(%esp), %eax # 4-byte Reload + adcl 496(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 124(%esp), %eax # 4-byte Reload + adcl 500(%esp), %eax + movl %eax, 124(%esp) # 4-byte Spill + movl 116(%esp), %eax # 4-byte Reload + adcl 504(%esp), %eax + movl %eax, 116(%esp) # 4-byte Spill + adcl $0, 80(%esp) # 4-byte Folded Spill + movl 120(%esp), %ebp # 4-byte Reload + adcl $0, %ebp + adcl $0, 104(%esp) # 4-byte Folded Spill + adcl $0, 88(%esp) # 4-byte Folded Spill + adcl $0, 108(%esp) # 4-byte Folded Spill + adcl $0, %edi + adcl $0, 84(%esp) # 4-byte Folded Spill + adcl $0, 68(%esp) # 4-byte Folded Spill + movl %ecx, %eax + imull 60(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 416(%esp), %ecx + movl 632(%esp), %edx + movl 64(%esp), %ebx # 4-byte Reload + calll .LmulPv320x32 + movl 52(%esp), %eax # 4-byte Reload + addl 416(%esp), %eax + movl 48(%esp), %ecx # 4-byte Reload + adcl 420(%esp), %ecx + movl 56(%esp), %eax # 4-byte Reload + adcl 424(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 428(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 432(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 436(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + adcl 440(%esp), %esi + movl %esi, 92(%esp) # 4-byte Spill + movl 112(%esp), %eax # 4-byte Reload + adcl 444(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 124(%esp), %eax # 4-byte Reload + adcl 448(%esp), %eax + movl %eax, 124(%esp) # 4-byte Spill + movl 116(%esp), %eax # 4-byte Reload + adcl 452(%esp), %eax + movl %eax, 116(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 456(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + adcl $0, %ebp + movl %ebp, 120(%esp) # 4-byte Spill + adcl $0, 104(%esp) # 4-byte Folded Spill + adcl $0, 88(%esp) # 4-byte Folded Spill + adcl $0, 108(%esp) # 4-byte Folded Spill + adcl $0, %edi + movl %edi, 76(%esp) # 4-byte Spill + movl 84(%esp), %esi # 4-byte Reload + adcl $0, %esi + adcl $0, 68(%esp) # 4-byte Folded Spill + movl %ecx, %ebp + movl %ebp, %eax + movl 60(%esp), %edi # 4-byte Reload + imull %edi, %eax + movl %eax, (%esp) + leal 368(%esp), %ecx + movl 632(%esp), %eax + movl %eax, %edx + movl 64(%esp), %ebx # 4-byte Reload + calll .LmulPv320x32 + addl 368(%esp), %ebp + movl 56(%esp), %ecx # 4-byte Reload + adcl 372(%esp), %ecx + movl 72(%esp), %eax # 4-byte Reload + adcl 376(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 380(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 384(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 388(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 112(%esp), %eax # 4-byte Reload + adcl 392(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 124(%esp), %eax # 4-byte Reload + adcl 396(%esp), %eax + movl %eax, 124(%esp) # 4-byte Spill + movl 116(%esp), %eax # 4-byte Reload + adcl 400(%esp), %eax + movl %eax, 116(%esp) # 4-byte Spill + movl 80(%esp), %ebp # 4-byte Reload + adcl 404(%esp), %ebp + movl 120(%esp), %eax # 4-byte Reload + adcl 408(%esp), %eax + movl %eax, 120(%esp) # 4-byte Spill + adcl $0, 104(%esp) # 4-byte Folded Spill + adcl $0, 88(%esp) # 4-byte Folded Spill + adcl $0, 108(%esp) # 4-byte Folded Spill + adcl $0, 76(%esp) # 4-byte Folded Spill + adcl $0, %esi + movl %esi, 84(%esp) # 4-byte Spill + adcl $0, 68(%esp) # 4-byte Folded Spill + movl %ecx, %esi + movl %esi, %eax + imull %edi, %eax + movl %eax, (%esp) + leal 320(%esp), %ecx + movl 632(%esp), %edx + movl 64(%esp), %ebx # 4-byte Reload + calll .LmulPv320x32 + addl 320(%esp), %esi + movl 72(%esp), %ecx # 4-byte Reload + adcl 324(%esp), %ecx + movl 96(%esp), %eax # 4-byte Reload + adcl 328(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 332(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 336(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 112(%esp), %eax # 4-byte Reload + adcl 340(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 124(%esp), %edi # 4-byte Reload + adcl 344(%esp), %edi + movl 116(%esp), %eax # 4-byte Reload + adcl 348(%esp), %eax + movl %eax, 116(%esp) # 4-byte Spill + adcl 352(%esp), %ebp + movl %ebp, 80(%esp) # 4-byte Spill + movl 120(%esp), %eax # 4-byte Reload + adcl 356(%esp), %eax + movl %eax, 120(%esp) # 4-byte Spill + movl 104(%esp), %esi # 4-byte Reload + adcl 360(%esp), %esi + adcl $0, 88(%esp) # 4-byte Folded Spill + adcl $0, 108(%esp) # 4-byte Folded Spill + adcl $0, 76(%esp) # 4-byte Folded Spill + adcl $0, 84(%esp) # 4-byte Folded Spill + adcl $0, 68(%esp) # 4-byte Folded Spill + movl %ecx, %ebp + movl %ebp, %eax + imull 60(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 272(%esp), %ecx + movl 632(%esp), %edx + movl 64(%esp), %ebx # 4-byte Reload + calll .LmulPv320x32 + addl 272(%esp), %ebp + movl 96(%esp), %ecx # 4-byte Reload + adcl 276(%esp), %ecx + movl %ecx, 96(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 280(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 284(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 112(%esp), %ebp # 4-byte Reload + adcl 288(%esp), %ebp + adcl 292(%esp), %edi + movl %edi, 124(%esp) # 4-byte Spill + movl 116(%esp), %edi # 4-byte Reload + adcl 296(%esp), %edi + movl 80(%esp), %eax # 4-byte Reload + adcl 300(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 120(%esp), %eax # 4-byte Reload + adcl 304(%esp), %eax + movl %eax, 120(%esp) # 4-byte Spill + adcl 308(%esp), %esi + movl %esi, 104(%esp) # 4-byte Spill + movl 88(%esp), %esi # 4-byte Reload + adcl 312(%esp), %esi + adcl $0, 108(%esp) # 4-byte Folded Spill + adcl $0, 76(%esp) # 4-byte Folded Spill + adcl $0, 84(%esp) # 4-byte Folded Spill + adcl $0, 68(%esp) # 4-byte Folded Spill + movl %ecx, %eax + imull 60(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 224(%esp), %ecx + movl 632(%esp), %edx + movl 64(%esp), %ebx # 4-byte Reload + calll .LmulPv320x32 + movl 96(%esp), %eax # 4-byte Reload + addl 224(%esp), %eax + movl 100(%esp), %eax # 4-byte Reload + adcl 228(%esp), %eax + movl 92(%esp), %ecx # 4-byte Reload + adcl 232(%esp), %ecx + movl %ecx, 92(%esp) # 4-byte Spill + adcl 236(%esp), %ebp + movl %ebp, 112(%esp) # 4-byte Spill + movl 124(%esp), %ecx # 4-byte Reload + adcl 240(%esp), %ecx + movl %ecx, 124(%esp) # 4-byte Spill + adcl 244(%esp), %edi + movl %edi, 116(%esp) # 4-byte Spill + movl 80(%esp), %ebp # 4-byte Reload + adcl 248(%esp), %ebp + movl 120(%esp), %ecx # 4-byte Reload + adcl 252(%esp), %ecx + movl %ecx, 120(%esp) # 4-byte Spill + movl 104(%esp), %ecx # 4-byte Reload + adcl 256(%esp), %ecx + movl %ecx, 104(%esp) # 4-byte Spill + adcl 260(%esp), %esi + movl %esi, 88(%esp) # 4-byte Spill + movl 108(%esp), %ecx # 4-byte Reload + adcl 264(%esp), %ecx + movl %ecx, 108(%esp) # 4-byte Spill + adcl $0, 76(%esp) # 4-byte Folded Spill + adcl $0, 84(%esp) # 4-byte Folded Spill + movl 68(%esp), %esi # 4-byte Reload + adcl $0, %esi + movl %eax, %edi + imull 60(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 176(%esp), %ecx + movl 632(%esp), %edx + movl 64(%esp), %ebx # 4-byte Reload + calll .LmulPv320x32 + addl 176(%esp), %edi + movl 92(%esp), %ecx # 4-byte Reload + adcl 180(%esp), %ecx + movl 112(%esp), %edi # 4-byte Reload + adcl 184(%esp), %edi + movl 124(%esp), %eax # 4-byte Reload + adcl 188(%esp), %eax + movl %eax, 124(%esp) # 4-byte Spill + movl 116(%esp), %eax # 4-byte Reload + adcl 192(%esp), %eax + movl %eax, 116(%esp) # 4-byte Spill + adcl 196(%esp), %ebp + movl 120(%esp), %eax # 4-byte Reload + adcl 200(%esp), %eax + movl %eax, 120(%esp) # 4-byte Spill + movl 104(%esp), %eax # 4-byte Reload + adcl 204(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 208(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl 212(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 216(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + adcl $0, 84(%esp) # 4-byte Folded Spill + adcl $0, %esi + movl %esi, 68(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + imull %ecx, %eax + movl %ecx, %esi + movl %eax, (%esp) + leal 128(%esp), %ecx + movl 632(%esp), %edx + movl 64(%esp), %ebx # 4-byte Reload + calll .LmulPv320x32 + addl 128(%esp), %esi + movl %edi, %eax + adcl 132(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl %eax, %edi + movl 124(%esp), %ecx # 4-byte Reload + adcl 136(%esp), %ecx + movl %ecx, 124(%esp) # 4-byte Spill + movl 116(%esp), %esi # 4-byte Reload + adcl 140(%esp), %esi + movl %esi, 116(%esp) # 4-byte Spill + adcl 144(%esp), %ebp + movl %ebp, 80(%esp) # 4-byte Spill + movl %ebp, %edx + movl 120(%esp), %eax # 4-byte Reload + adcl 148(%esp), %eax + movl %eax, 120(%esp) # 4-byte Spill + movl 104(%esp), %eax # 4-byte Reload + adcl 152(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 156(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl 160(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 76(%esp), %ebx # 4-byte Reload + adcl 164(%esp), %ebx + movl %ebx, 76(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 168(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 68(%esp), %ebp # 4-byte Reload + adcl $0, %ebp + subl 12(%esp), %edi # 4-byte Folded Reload + sbbl 8(%esp), %ecx # 4-byte Folded Reload + sbbl 16(%esp), %esi # 4-byte Folded Reload + sbbl 20(%esp), %edx # 4-byte Folded Reload + movl 120(%esp), %eax # 4-byte Reload + sbbl 24(%esp), %eax # 4-byte Folded Reload + movl %eax, 64(%esp) # 4-byte Spill + movl 104(%esp), %eax # 4-byte Reload + sbbl 28(%esp), %eax # 4-byte Folded Reload + movl %eax, 68(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + sbbl 32(%esp), %eax # 4-byte Folded Reload + movl %eax, 72(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + sbbl 36(%esp), %eax # 4-byte Folded Reload + movl %eax, 92(%esp) # 4-byte Spill + movl %ebp, %eax + sbbl 40(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 100(%esp) # 4-byte Spill + movl 84(%esp), %ebx # 4-byte Reload + sbbl 44(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 96(%esp) # 4-byte Spill + sbbl $0, %eax + andl $1, %eax + jne .LBB148_2 +# BB#1: + movl %edx, 80(%esp) # 4-byte Spill +.LBB148_2: + testb %al, %al + movl 112(%esp), %edx # 4-byte Reload + jne .LBB148_4 +# BB#3: + movl %edi, %edx +.LBB148_4: + movl 624(%esp), %edi + movl %edx, (%edi) + movl 108(%esp), %edx # 4-byte Reload + jne .LBB148_6 +# BB#5: + movl %ecx, 124(%esp) # 4-byte Spill +.LBB148_6: + movl 124(%esp), %ecx # 4-byte Reload + movl %ecx, 4(%edi) + movl 116(%esp), %ecx # 4-byte Reload + jne .LBB148_8 +# BB#7: + movl %esi, %ecx +.LBB148_8: + movl %ecx, 8(%edi) + movl 80(%esp), %eax # 4-byte Reload + movl %eax, 12(%edi) + movl 76(%esp), %ecx # 4-byte Reload + movl 120(%esp), %eax # 4-byte Reload + jne .LBB148_10 +# BB#9: + movl 64(%esp), %eax # 4-byte Reload +.LBB148_10: + movl %eax, 16(%edi) + movl 84(%esp), %eax # 4-byte Reload + movl 104(%esp), %ebp # 4-byte Reload + jne .LBB148_12 +# BB#11: + movl 68(%esp), %ebp # 4-byte Reload +.LBB148_12: + movl %ebp, 20(%edi) + movl 88(%esp), %ebx # 4-byte Reload + jne .LBB148_14 +# BB#13: + movl 72(%esp), %ebx # 4-byte Reload +.LBB148_14: + movl %ebx, 24(%edi) + jne .LBB148_16 +# BB#15: + movl 92(%esp), %edx # 4-byte Reload +.LBB148_16: + movl %edx, 28(%edi) + jne .LBB148_18 +# BB#17: + movl 100(%esp), %ecx # 4-byte Reload +.LBB148_18: + movl %ecx, 32(%edi) + jne .LBB148_20 +# BB#19: + movl 96(%esp), %eax # 4-byte Reload +.LBB148_20: + movl %eax, 36(%edi) + addl $604, %esp # imm = 0x25C + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end148: + .size mcl_fp_montRed10Lbmi2, .Lfunc_end148-mcl_fp_montRed10Lbmi2 + + .globl mcl_fp_addPre10Lbmi2 + .align 16, 0x90 + .type mcl_fp_addPre10Lbmi2,@function +mcl_fp_addPre10Lbmi2: # @mcl_fp_addPre10Lbmi2 +# BB#0: + pushl %ebx + pushl %edi + pushl %esi + movl 24(%esp), %eax + movl (%eax), %edx + movl 4(%eax), %esi + movl 20(%esp), %ecx + addl (%ecx), %edx + adcl 4(%ecx), %esi + movl 8(%eax), %edi + adcl 8(%ecx), %edi + movl 16(%esp), %ebx + movl %edx, (%ebx) + movl 12(%ecx), %edx + movl %esi, 4(%ebx) + movl 16(%ecx), %esi + adcl 12(%eax), %edx + adcl 16(%eax), %esi + movl %edi, 8(%ebx) + movl 20(%eax), %edi + movl %edx, 12(%ebx) + movl 20(%ecx), %edx + adcl %edi, %edx + movl 24(%eax), %edi + movl %esi, 16(%ebx) + movl 24(%ecx), %esi + adcl %edi, %esi + movl 28(%eax), %edi + movl %edx, 20(%ebx) + movl 28(%ecx), %edx + adcl %edi, %edx + movl 32(%eax), %edi + movl %esi, 24(%ebx) + movl 32(%ecx), %esi + adcl %edi, %esi + movl %edx, 28(%ebx) + movl %esi, 32(%ebx) + movl 36(%eax), %eax + movl 36(%ecx), %ecx + adcl %eax, %ecx + movl %ecx, 36(%ebx) + sbbl %eax, %eax + andl $1, %eax + popl %esi + popl %edi + popl %ebx + retl +.Lfunc_end149: + .size mcl_fp_addPre10Lbmi2, .Lfunc_end149-mcl_fp_addPre10Lbmi2 + + .globl mcl_fp_subPre10Lbmi2 + .align 16, 0x90 + .type mcl_fp_subPre10Lbmi2,@function +mcl_fp_subPre10Lbmi2: # @mcl_fp_subPre10Lbmi2 +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + movl 24(%esp), %ecx + movl (%ecx), %esi + movl 4(%ecx), %edi + xorl %eax, %eax + movl 28(%esp), %edx + subl (%edx), %esi + sbbl 4(%edx), %edi + movl 8(%ecx), %ebx + sbbl 8(%edx), %ebx + movl 20(%esp), %ebp + movl %esi, (%ebp) + movl 12(%ecx), %esi + sbbl 12(%edx), %esi + movl %edi, 4(%ebp) + movl 16(%ecx), %edi + sbbl 16(%edx), %edi + movl %ebx, 8(%ebp) + movl 20(%edx), %ebx + movl %esi, 12(%ebp) + movl 20(%ecx), %esi + sbbl %ebx, %esi + movl 24(%edx), %ebx + movl %edi, 16(%ebp) + movl 24(%ecx), %edi + sbbl %ebx, %edi + movl 28(%edx), %ebx + movl %esi, 20(%ebp) + movl 28(%ecx), %esi + sbbl %ebx, %esi + movl 32(%edx), %ebx + movl %edi, 24(%ebp) + movl 32(%ecx), %edi + sbbl %ebx, %edi + movl %esi, 28(%ebp) + movl %edi, 32(%ebp) + movl 36(%edx), %edx + movl 36(%ecx), %ecx + sbbl %edx, %ecx + movl %ecx, 36(%ebp) + sbbl $0, %eax + andl $1, %eax + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end150: + .size mcl_fp_subPre10Lbmi2, .Lfunc_end150-mcl_fp_subPre10Lbmi2 + + .globl mcl_fp_shr1_10Lbmi2 + .align 16, 0x90 + .type mcl_fp_shr1_10Lbmi2,@function +mcl_fp_shr1_10Lbmi2: # @mcl_fp_shr1_10Lbmi2 +# BB#0: + pushl %esi + movl 12(%esp), %eax + movl (%eax), %ecx + movl 4(%eax), %edx + shrdl $1, %edx, %ecx + movl 8(%esp), %esi + movl %ecx, (%esi) + movl 8(%eax), %ecx + shrdl $1, %ecx, %edx + movl %edx, 4(%esi) + movl 12(%eax), %edx + shrdl $1, %edx, %ecx + movl %ecx, 8(%esi) + movl 16(%eax), %ecx + shrdl $1, %ecx, %edx + movl %edx, 12(%esi) + movl 20(%eax), %edx + shrdl $1, %edx, %ecx + movl %ecx, 16(%esi) + movl 24(%eax), %ecx + shrdl $1, %ecx, %edx + movl %edx, 20(%esi) + movl 28(%eax), %edx + shrdl $1, %edx, %ecx + movl %ecx, 24(%esi) + movl 32(%eax), %ecx + shrdl $1, %ecx, %edx + movl %edx, 28(%esi) + movl 36(%eax), %eax + shrdl $1, %eax, %ecx + movl %ecx, 32(%esi) + shrl %eax + movl %eax, 36(%esi) + popl %esi + retl +.Lfunc_end151: + .size mcl_fp_shr1_10Lbmi2, .Lfunc_end151-mcl_fp_shr1_10Lbmi2 + + .globl mcl_fp_add10Lbmi2 + .align 16, 0x90 + .type mcl_fp_add10Lbmi2,@function +mcl_fp_add10Lbmi2: # @mcl_fp_add10Lbmi2 +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $24, %esp + movl 52(%esp), %edi + movl (%edi), %ecx + movl 4(%edi), %eax + movl 48(%esp), %ebx + addl (%ebx), %ecx + movl %ecx, (%esp) # 4-byte Spill + adcl 4(%ebx), %eax + movl %eax, 20(%esp) # 4-byte Spill + movl 8(%edi), %eax + adcl 8(%ebx), %eax + movl %eax, 16(%esp) # 4-byte Spill + movl 12(%ebx), %ecx + movl 16(%ebx), %eax + adcl 12(%edi), %ecx + movl %ecx, 12(%esp) # 4-byte Spill + adcl 16(%edi), %eax + movl %eax, 8(%esp) # 4-byte Spill + movl 20(%ebx), %eax + adcl 20(%edi), %eax + movl %eax, 4(%esp) # 4-byte Spill + movl 24(%ebx), %esi + adcl 24(%edi), %esi + movl 28(%ebx), %ebp + adcl 28(%edi), %ebp + movl 32(%ebx), %edx + adcl 32(%edi), %edx + movl 36(%ebx), %ecx + adcl 36(%edi), %ecx + movl 44(%esp), %edi + movl (%esp), %ebx # 4-byte Reload + movl %ebx, (%edi) + movl 20(%esp), %eax # 4-byte Reload + movl %eax, 4(%edi) + movl 16(%esp), %eax # 4-byte Reload + movl %eax, 8(%edi) + movl 12(%esp), %eax # 4-byte Reload + movl %eax, 12(%edi) + movl 8(%esp), %eax # 4-byte Reload + movl %eax, 16(%edi) + movl 4(%esp), %eax # 4-byte Reload + movl %eax, 20(%edi) + movl %esi, 24(%edi) + movl %ebp, 28(%edi) + movl %edx, 32(%edi) + movl %ecx, 36(%edi) + sbbl %eax, %eax + andl $1, %eax + movl 56(%esp), %edi + subl (%edi), %ebx + movl %ebx, (%esp) # 4-byte Spill + movl 20(%esp), %ebx # 4-byte Reload + sbbl 4(%edi), %ebx + movl %ebx, 20(%esp) # 4-byte Spill + movl 16(%esp), %ebx # 4-byte Reload + sbbl 8(%edi), %ebx + movl %ebx, 16(%esp) # 4-byte Spill + movl 12(%esp), %ebx # 4-byte Reload + sbbl 12(%edi), %ebx + movl %ebx, 12(%esp) # 4-byte Spill + movl 8(%esp), %ebx # 4-byte Reload + sbbl 16(%edi), %ebx + movl %ebx, 8(%esp) # 4-byte Spill + movl 4(%esp), %ebx # 4-byte Reload + sbbl 20(%edi), %ebx + movl %ebx, 4(%esp) # 4-byte Spill + sbbl 24(%edi), %esi + sbbl 28(%edi), %ebp + sbbl 32(%edi), %edx + sbbl 36(%edi), %ecx + sbbl $0, %eax + testb $1, %al + jne .LBB152_2 +# BB#1: # %nocarry + movl (%esp), %edi # 4-byte Reload + movl 44(%esp), %ebx + movl %edi, (%ebx) + movl 20(%esp), %edi # 4-byte Reload + movl %edi, 4(%ebx) + movl 16(%esp), %edi # 4-byte Reload + movl %edi, 8(%ebx) + movl 12(%esp), %edi # 4-byte Reload + movl %edi, 12(%ebx) + movl 8(%esp), %edi # 4-byte Reload + movl %edi, 16(%ebx) + movl 4(%esp), %eax # 4-byte Reload + movl %eax, 20(%ebx) + movl %esi, 24(%ebx) + movl %ebp, 28(%ebx) + movl %edx, 32(%ebx) + movl %ecx, 36(%ebx) +.LBB152_2: # %carry + addl $24, %esp + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end152: + .size mcl_fp_add10Lbmi2, .Lfunc_end152-mcl_fp_add10Lbmi2 + + .globl mcl_fp_addNF10Lbmi2 + .align 16, 0x90 + .type mcl_fp_addNF10Lbmi2,@function +mcl_fp_addNF10Lbmi2: # @mcl_fp_addNF10Lbmi2 +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $72, %esp + movl 100(%esp), %ecx + movl (%ecx), %eax + movl 4(%ecx), %esi + movl 96(%esp), %edx + addl (%edx), %eax + movl %eax, 52(%esp) # 4-byte Spill + adcl 4(%edx), %esi + movl %esi, 56(%esp) # 4-byte Spill + movl 36(%ecx), %edi + movl 32(%ecx), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 28(%ecx), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 24(%ecx), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 20(%ecx), %ebp + movl 16(%ecx), %ebx + movl 12(%ecx), %eax + movl 8(%ecx), %esi + adcl 8(%edx), %esi + adcl 12(%edx), %eax + movl %eax, 40(%esp) # 4-byte Spill + adcl 16(%edx), %ebx + movl %ebx, 44(%esp) # 4-byte Spill + adcl 20(%edx), %ebp + movl %ebp, 48(%esp) # 4-byte Spill + movl 60(%esp), %ecx # 4-byte Reload + adcl 24(%edx), %ecx + movl %ecx, 60(%esp) # 4-byte Spill + movl 68(%esp), %ecx # 4-byte Reload + adcl 28(%edx), %ecx + movl %ecx, 68(%esp) # 4-byte Spill + movl 64(%esp), %ecx # 4-byte Reload + adcl 32(%edx), %ecx + movl %ecx, 64(%esp) # 4-byte Spill + movl %esi, %ecx + adcl 36(%edx), %edi + movl %edi, 36(%esp) # 4-byte Spill + movl 104(%esp), %edi + movl 52(%esp), %edx # 4-byte Reload + subl (%edi), %edx + movl 56(%esp), %esi # 4-byte Reload + sbbl 4(%edi), %esi + movl %esi, (%esp) # 4-byte Spill + movl %ecx, %esi + sbbl 8(%edi), %esi + movl %esi, 4(%esp) # 4-byte Spill + sbbl 12(%edi), %eax + movl %eax, 8(%esp) # 4-byte Spill + sbbl 16(%edi), %ebx + movl %ebx, 12(%esp) # 4-byte Spill + sbbl 20(%edi), %ebp + movl %ebp, 16(%esp) # 4-byte Spill + movl 60(%esp), %esi # 4-byte Reload + movl %esi, %eax + movl %esi, %ebp + sbbl 24(%edi), %eax + movl %eax, 20(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + sbbl 28(%edi), %eax + movl %eax, 24(%esp) # 4-byte Spill + movl 64(%esp), %esi # 4-byte Reload + movl %esi, %eax + movl %esi, %ebx + sbbl 32(%edi), %eax + movl %eax, 28(%esp) # 4-byte Spill + movl 36(%esp), %eax # 4-byte Reload + movl %eax, %esi + sbbl 36(%edi), %esi + movl %esi, 32(%esp) # 4-byte Spill + movl %esi, %edi + movl 52(%esp), %esi # 4-byte Reload + sarl $31, %edi + testl %edi, %edi + js .LBB153_2 +# BB#1: + movl %edx, %esi +.LBB153_2: + movl 92(%esp), %edx + movl %esi, (%edx) + movl 56(%esp), %esi # 4-byte Reload + js .LBB153_4 +# BB#3: + movl (%esp), %esi # 4-byte Reload +.LBB153_4: + movl %esi, 4(%edx) + movl %ebp, %edi + movl 40(%esp), %esi # 4-byte Reload + js .LBB153_6 +# BB#5: + movl 4(%esp), %ecx # 4-byte Reload +.LBB153_6: + movl %ecx, 8(%edx) + movl %ebx, %ecx + movl 44(%esp), %ebp # 4-byte Reload + js .LBB153_8 +# BB#7: + movl 8(%esp), %esi # 4-byte Reload +.LBB153_8: + movl %esi, 12(%edx) + movl 68(%esp), %esi # 4-byte Reload + movl 48(%esp), %ebx # 4-byte Reload + js .LBB153_10 +# BB#9: + movl 12(%esp), %ebp # 4-byte Reload +.LBB153_10: + movl %ebp, 16(%edx) + js .LBB153_12 +# BB#11: + movl 16(%esp), %ebx # 4-byte Reload +.LBB153_12: + movl %ebx, 20(%edx) + js .LBB153_14 +# BB#13: + movl 20(%esp), %edi # 4-byte Reload +.LBB153_14: + movl %edi, 24(%edx) + js .LBB153_16 +# BB#15: + movl 24(%esp), %esi # 4-byte Reload +.LBB153_16: + movl %esi, 28(%edx) + js .LBB153_18 +# BB#17: + movl 28(%esp), %ecx # 4-byte Reload +.LBB153_18: + movl %ecx, 32(%edx) + js .LBB153_20 +# BB#19: + movl 32(%esp), %eax # 4-byte Reload +.LBB153_20: + movl %eax, 36(%edx) + addl $72, %esp + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end153: + .size mcl_fp_addNF10Lbmi2, .Lfunc_end153-mcl_fp_addNF10Lbmi2 + + .globl mcl_fp_sub10Lbmi2 + .align 16, 0x90 + .type mcl_fp_sub10Lbmi2,@function +mcl_fp_sub10Lbmi2: # @mcl_fp_sub10Lbmi2 +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $32, %esp + movl 56(%esp), %esi + movl (%esi), %ecx + movl 4(%esi), %eax + xorl %ebx, %ebx + movl 60(%esp), %edi + subl (%edi), %ecx + movl %ecx, 12(%esp) # 4-byte Spill + sbbl 4(%edi), %eax + movl %eax, 16(%esp) # 4-byte Spill + movl 8(%esi), %eax + sbbl 8(%edi), %eax + movl %eax, 24(%esp) # 4-byte Spill + movl 12(%esi), %eax + sbbl 12(%edi), %eax + movl %eax, 28(%esp) # 4-byte Spill + movl 16(%esi), %eax + sbbl 16(%edi), %eax + movl %eax, 20(%esp) # 4-byte Spill + movl 20(%esi), %edx + sbbl 20(%edi), %edx + movl %edx, 8(%esp) # 4-byte Spill + movl 24(%esi), %ecx + sbbl 24(%edi), %ecx + movl %ecx, 4(%esp) # 4-byte Spill + movl 28(%esi), %eax + sbbl 28(%edi), %eax + movl %eax, (%esp) # 4-byte Spill + movl 32(%esi), %ebp + sbbl 32(%edi), %ebp + movl 36(%esi), %esi + sbbl 36(%edi), %esi + sbbl $0, %ebx + testb $1, %bl + movl 52(%esp), %ebx + movl 12(%esp), %edi # 4-byte Reload + movl %edi, (%ebx) + movl 16(%esp), %edi # 4-byte Reload + movl %edi, 4(%ebx) + movl 24(%esp), %edi # 4-byte Reload + movl %edi, 8(%ebx) + movl 28(%esp), %edi # 4-byte Reload + movl %edi, 12(%ebx) + movl 20(%esp), %edi # 4-byte Reload + movl %edi, 16(%ebx) + movl %edx, 20(%ebx) + movl %ecx, 24(%ebx) + movl %eax, 28(%ebx) + movl %ebp, 32(%ebx) + movl %esi, 36(%ebx) + je .LBB154_2 +# BB#1: # %carry + movl %esi, %edi + movl 64(%esp), %esi + movl 12(%esp), %ecx # 4-byte Reload + addl (%esi), %ecx + movl %ecx, (%ebx) + movl 16(%esp), %edx # 4-byte Reload + adcl 4(%esi), %edx + movl %edx, 4(%ebx) + movl 24(%esp), %ecx # 4-byte Reload + adcl 8(%esi), %ecx + movl 12(%esi), %eax + adcl 28(%esp), %eax # 4-byte Folded Reload + movl %ecx, 8(%ebx) + movl 16(%esi), %ecx + adcl 20(%esp), %ecx # 4-byte Folded Reload + movl %eax, 12(%ebx) + movl 20(%esi), %eax + adcl 8(%esp), %eax # 4-byte Folded Reload + movl %ecx, 16(%ebx) + movl 24(%esi), %ecx + adcl 4(%esp), %ecx # 4-byte Folded Reload + movl %eax, 20(%ebx) + movl 28(%esi), %eax + adcl (%esp), %eax # 4-byte Folded Reload + movl %ecx, 24(%ebx) + movl %eax, 28(%ebx) + movl 32(%esi), %eax + adcl %ebp, %eax + movl %eax, 32(%ebx) + movl 36(%esi), %eax + adcl %edi, %eax + movl %eax, 36(%ebx) +.LBB154_2: # %nocarry + addl $32, %esp + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end154: + .size mcl_fp_sub10Lbmi2, .Lfunc_end154-mcl_fp_sub10Lbmi2 + + .globl mcl_fp_subNF10Lbmi2 + .align 16, 0x90 + .type mcl_fp_subNF10Lbmi2,@function +mcl_fp_subNF10Lbmi2: # @mcl_fp_subNF10Lbmi2 +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $56, %esp + movl 80(%esp), %eax + movl 36(%eax), %esi + movl (%eax), %edi + movl 4(%eax), %edx + movl 84(%esp), %ecx + subl (%ecx), %edi + movl %edi, 36(%esp) # 4-byte Spill + sbbl 4(%ecx), %edx + movl %edx, 40(%esp) # 4-byte Spill + movl 32(%eax), %edx + movl %edx, 48(%esp) # 4-byte Spill + movl 28(%eax), %edi + movl 24(%eax), %ebx + movl 20(%eax), %ebp + movl 16(%eax), %edx + movl %edx, 52(%esp) # 4-byte Spill + movl 12(%eax), %edx + movl 8(%eax), %eax + sbbl 8(%ecx), %eax + movl %eax, 16(%esp) # 4-byte Spill + sbbl 12(%ecx), %edx + movl %edx, 24(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + sbbl 16(%ecx), %eax + movl %eax, 52(%esp) # 4-byte Spill + sbbl 20(%ecx), %ebp + movl %ebp, 28(%esp) # 4-byte Spill + sbbl 24(%ecx), %ebx + movl %ebx, 32(%esp) # 4-byte Spill + sbbl 28(%ecx), %edi + movl %edi, 44(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + sbbl 32(%ecx), %eax + movl %eax, 48(%esp) # 4-byte Spill + sbbl 36(%ecx), %esi + movl %esi, 20(%esp) # 4-byte Spill + movl %esi, %eax + sarl $31, %eax + movl %eax, %edx + addl %edx, %edx + movl %eax, %ecx + adcl %ecx, %ecx + movl %esi, %ebx + shrl $31, %ebx + orl %edx, %ebx + movl 88(%esp), %edi + movl 20(%edi), %edx + andl %ecx, %edx + movl %edx, 12(%esp) # 4-byte Spill + movl 12(%edi), %edx + andl %ecx, %edx + movl %edx, 4(%esp) # 4-byte Spill + andl 4(%edi), %ecx + movl 16(%edi), %edx + andl %ebx, %edx + movl %edx, (%esp) # 4-byte Spill + movl 8(%edi), %edx + andl %ebx, %edx + andl (%edi), %ebx + movl 36(%edi), %esi + andl %eax, %esi + movl %esi, 8(%esp) # 4-byte Spill + movl 32(%edi), %ebp + andl %eax, %ebp + movl 28(%edi), %esi + andl %eax, %esi + andl 24(%edi), %eax + addl 36(%esp), %ebx # 4-byte Folded Reload + adcl 40(%esp), %ecx # 4-byte Folded Reload + movl 76(%esp), %edi + movl %ebx, (%edi) + adcl 16(%esp), %edx # 4-byte Folded Reload + movl %ecx, 4(%edi) + movl 4(%esp), %ecx # 4-byte Reload + adcl 24(%esp), %ecx # 4-byte Folded Reload + movl %edx, 8(%edi) + movl (%esp), %edx # 4-byte Reload + adcl 52(%esp), %edx # 4-byte Folded Reload + movl %ecx, 12(%edi) + movl 12(%esp), %ecx # 4-byte Reload + adcl 28(%esp), %ecx # 4-byte Folded Reload + movl %edx, 16(%edi) + adcl 32(%esp), %eax # 4-byte Folded Reload + movl %ecx, 20(%edi) + adcl 44(%esp), %esi # 4-byte Folded Reload + movl %eax, 24(%edi) + adcl 48(%esp), %ebp # 4-byte Folded Reload + movl %esi, 28(%edi) + movl %ebp, 32(%edi) + movl 8(%esp), %eax # 4-byte Reload + adcl 20(%esp), %eax # 4-byte Folded Reload + movl %eax, 36(%edi) + addl $56, %esp + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end155: + .size mcl_fp_subNF10Lbmi2, .Lfunc_end155-mcl_fp_subNF10Lbmi2 + + .globl mcl_fpDbl_add10Lbmi2 + .align 16, 0x90 + .type mcl_fpDbl_add10Lbmi2,@function +mcl_fpDbl_add10Lbmi2: # @mcl_fpDbl_add10Lbmi2 +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $72, %esp + movl 100(%esp), %edx + movl 96(%esp), %edi + movl 12(%edi), %esi + movl 16(%edi), %ecx + movl 8(%edx), %ebx + movl (%edx), %ebp + addl (%edi), %ebp + movl 92(%esp), %eax + movl %ebp, (%eax) + movl 4(%edx), %ebp + adcl 4(%edi), %ebp + adcl 8(%edi), %ebx + adcl 12(%edx), %esi + adcl 16(%edx), %ecx + movl %ebp, 4(%eax) + movl 48(%edx), %ebp + movl %ebx, 8(%eax) + movl 20(%edx), %ebx + movl %esi, 12(%eax) + movl 20(%edi), %esi + adcl %ebx, %esi + movl 24(%edx), %ebx + movl %ecx, 16(%eax) + movl 24(%edi), %ecx + adcl %ebx, %ecx + movl 28(%edx), %ebx + movl %esi, 20(%eax) + movl 28(%edi), %esi + adcl %ebx, %esi + movl 32(%edx), %ebx + movl %ecx, 24(%eax) + movl 32(%edi), %ecx + adcl %ebx, %ecx + movl 36(%edx), %ebx + movl %esi, 28(%eax) + movl 36(%edi), %esi + adcl %ebx, %esi + movl 40(%edx), %ebx + movl %ecx, 32(%eax) + movl 40(%edi), %ecx + adcl %ebx, %ecx + movl %ecx, 64(%esp) # 4-byte Spill + movl 44(%edx), %ebx + movl %esi, 36(%eax) + movl 44(%edi), %eax + adcl %ebx, %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 48(%edi), %eax + adcl %ebp, %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 52(%edx), %eax + movl 52(%edi), %ecx + adcl %eax, %ecx + movl %ecx, 56(%esp) # 4-byte Spill + movl 56(%edx), %eax + movl 56(%edi), %ecx + adcl %eax, %ecx + movl %ecx, 60(%esp) # 4-byte Spill + movl 60(%edx), %eax + movl 60(%edi), %ecx + adcl %eax, %ecx + movl 64(%edx), %esi + movl 64(%edi), %eax + adcl %esi, %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 68(%edx), %ebx + movl 68(%edi), %esi + adcl %ebx, %esi + movl %esi, 44(%esp) # 4-byte Spill + movl 72(%edx), %ebx + movl 72(%edi), %ebp + adcl %ebx, %ebp + movl %ebp, 48(%esp) # 4-byte Spill + movl 76(%edx), %edx + movl 76(%edi), %edi + adcl %edx, %edi + movl %edi, 36(%esp) # 4-byte Spill + sbbl %edx, %edx + andl $1, %edx + movl 104(%esp), %ebx + movl 64(%esp), %edi # 4-byte Reload + subl (%ebx), %edi + movl %edi, 20(%esp) # 4-byte Spill + movl 68(%esp), %edi # 4-byte Reload + sbbl 4(%ebx), %edi + movl %edi, 16(%esp) # 4-byte Spill + movl 52(%esp), %edi # 4-byte Reload + sbbl 8(%ebx), %edi + movl %edi, 12(%esp) # 4-byte Spill + movl 56(%esp), %edi # 4-byte Reload + sbbl 12(%ebx), %edi + movl %edi, 8(%esp) # 4-byte Spill + movl 60(%esp), %edi # 4-byte Reload + sbbl 16(%ebx), %edi + movl %edi, 4(%esp) # 4-byte Spill + movl %ecx, %edi + sbbl 20(%ebx), %edi + movl %edi, (%esp) # 4-byte Spill + sbbl 24(%ebx), %eax + movl %eax, 24(%esp) # 4-byte Spill + sbbl 28(%ebx), %esi + movl %esi, 28(%esp) # 4-byte Spill + movl %ebp, %eax + movl 36(%esp), %ebp # 4-byte Reload + sbbl 32(%ebx), %eax + movl %eax, 32(%esp) # 4-byte Spill + movl %ebp, %edi + sbbl 36(%ebx), %edi + sbbl $0, %edx + andl $1, %edx + jne .LBB156_2 +# BB#1: + movl %edi, %ebp +.LBB156_2: + testb %dl, %dl + movl 64(%esp), %edx # 4-byte Reload + movl 60(%esp), %esi # 4-byte Reload + movl 56(%esp), %edi # 4-byte Reload + movl 52(%esp), %ebx # 4-byte Reload + jne .LBB156_4 +# BB#3: + movl (%esp), %ecx # 4-byte Reload + movl 4(%esp), %esi # 4-byte Reload + movl 8(%esp), %edi # 4-byte Reload + movl 12(%esp), %ebx # 4-byte Reload + movl 16(%esp), %eax # 4-byte Reload + movl %eax, 68(%esp) # 4-byte Spill + movl 20(%esp), %edx # 4-byte Reload +.LBB156_4: + movl 92(%esp), %eax + movl %edx, 40(%eax) + movl 68(%esp), %edx # 4-byte Reload + movl %edx, 44(%eax) + movl %ebx, 48(%eax) + movl %edi, 52(%eax) + movl %esi, 56(%eax) + movl %ecx, 60(%eax) + movl 44(%esp), %edx # 4-byte Reload + movl 40(%esp), %ecx # 4-byte Reload + jne .LBB156_6 +# BB#5: + movl 24(%esp), %ecx # 4-byte Reload +.LBB156_6: + movl %ecx, 64(%eax) + movl 48(%esp), %ecx # 4-byte Reload + jne .LBB156_8 +# BB#7: + movl 28(%esp), %edx # 4-byte Reload +.LBB156_8: + movl %edx, 68(%eax) + jne .LBB156_10 +# BB#9: + movl 32(%esp), %ecx # 4-byte Reload +.LBB156_10: + movl %ecx, 72(%eax) + movl %ebp, 76(%eax) + addl $72, %esp + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end156: + .size mcl_fpDbl_add10Lbmi2, .Lfunc_end156-mcl_fpDbl_add10Lbmi2 + + .globl mcl_fpDbl_sub10Lbmi2 + .align 16, 0x90 + .type mcl_fpDbl_sub10Lbmi2,@function +mcl_fpDbl_sub10Lbmi2: # @mcl_fpDbl_sub10Lbmi2 +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $60, %esp + movl 84(%esp), %ebp + movl (%ebp), %edx + movl 4(%ebp), %esi + movl 88(%esp), %eax + subl (%eax), %edx + sbbl 4(%eax), %esi + movl 8(%ebp), %edi + sbbl 8(%eax), %edi + movl 80(%esp), %ecx + movl %edx, (%ecx) + movl 12(%ebp), %edx + sbbl 12(%eax), %edx + movl %esi, 4(%ecx) + movl 16(%ebp), %esi + sbbl 16(%eax), %esi + movl %edi, 8(%ecx) + movl 20(%eax), %edi + movl %edx, 12(%ecx) + movl 20(%ebp), %edx + sbbl %edi, %edx + movl 24(%eax), %edi + movl %esi, 16(%ecx) + movl 24(%ebp), %esi + sbbl %edi, %esi + movl 28(%eax), %edi + movl %edx, 20(%ecx) + movl 28(%ebp), %edx + sbbl %edi, %edx + movl 32(%eax), %edi + movl %esi, 24(%ecx) + movl 32(%ebp), %esi + sbbl %edi, %esi + movl 36(%eax), %edi + movl %edx, 28(%ecx) + movl 36(%ebp), %edx + sbbl %edi, %edx + movl 40(%eax), %edi + movl %esi, 32(%ecx) + movl 40(%ebp), %esi + sbbl %edi, %esi + movl %esi, 28(%esp) # 4-byte Spill + movl 44(%eax), %esi + movl %edx, 36(%ecx) + movl 44(%ebp), %edx + sbbl %esi, %edx + movl %edx, 20(%esp) # 4-byte Spill + movl 48(%eax), %edx + movl 48(%ebp), %esi + sbbl %edx, %esi + movl %esi, 24(%esp) # 4-byte Spill + movl 52(%eax), %edx + movl 52(%ebp), %esi + sbbl %edx, %esi + movl %esi, 32(%esp) # 4-byte Spill + movl 56(%eax), %edx + movl 56(%ebp), %esi + sbbl %edx, %esi + movl %esi, 36(%esp) # 4-byte Spill + movl 60(%eax), %edx + movl 60(%ebp), %esi + sbbl %edx, %esi + movl %esi, 40(%esp) # 4-byte Spill + movl 64(%eax), %edx + movl 64(%ebp), %esi + sbbl %edx, %esi + movl %esi, 44(%esp) # 4-byte Spill + movl 68(%eax), %edx + movl 68(%ebp), %esi + sbbl %edx, %esi + movl %esi, 48(%esp) # 4-byte Spill + movl 72(%eax), %edx + movl 72(%ebp), %esi + sbbl %edx, %esi + movl %esi, 52(%esp) # 4-byte Spill + movl 76(%eax), %eax + movl 76(%ebp), %edx + sbbl %eax, %edx + movl %edx, 56(%esp) # 4-byte Spill + movl $0, %eax + sbbl $0, %eax + andl $1, %eax + movl 92(%esp), %esi + jne .LBB157_1 +# BB#2: + movl $0, 16(%esp) # 4-byte Folded Spill + jmp .LBB157_3 +.LBB157_1: + movl 36(%esi), %edx + movl %edx, 16(%esp) # 4-byte Spill +.LBB157_3: + testb %al, %al + jne .LBB157_4 +# BB#5: + movl $0, 8(%esp) # 4-byte Folded Spill + movl $0, %ebx + jmp .LBB157_6 +.LBB157_4: + movl (%esi), %ebx + movl 4(%esi), %eax + movl %eax, 8(%esp) # 4-byte Spill +.LBB157_6: + jne .LBB157_7 +# BB#8: + movl $0, 12(%esp) # 4-byte Folded Spill + jmp .LBB157_9 +.LBB157_7: + movl 32(%esi), %eax + movl %eax, 12(%esp) # 4-byte Spill +.LBB157_9: + jne .LBB157_10 +# BB#11: + movl $0, 4(%esp) # 4-byte Folded Spill + jmp .LBB157_12 +.LBB157_10: + movl 28(%esi), %eax + movl %eax, 4(%esp) # 4-byte Spill +.LBB157_12: + jne .LBB157_13 +# BB#14: + movl $0, (%esp) # 4-byte Folded Spill + jmp .LBB157_15 +.LBB157_13: + movl 24(%esi), %eax + movl %eax, (%esp) # 4-byte Spill +.LBB157_15: + jne .LBB157_16 +# BB#17: + movl $0, %ebp + jmp .LBB157_18 +.LBB157_16: + movl 20(%esi), %ebp +.LBB157_18: + jne .LBB157_19 +# BB#20: + movl $0, %eax + jmp .LBB157_21 +.LBB157_19: + movl 16(%esi), %eax +.LBB157_21: + jne .LBB157_22 +# BB#23: + movl $0, %edx + jmp .LBB157_24 +.LBB157_22: + movl 12(%esi), %edx +.LBB157_24: + jne .LBB157_25 +# BB#26: + xorl %esi, %esi + jmp .LBB157_27 +.LBB157_25: + movl 8(%esi), %esi +.LBB157_27: + addl 28(%esp), %ebx # 4-byte Folded Reload + movl 8(%esp), %edi # 4-byte Reload + adcl 20(%esp), %edi # 4-byte Folded Reload + movl %ebx, 40(%ecx) + adcl 24(%esp), %esi # 4-byte Folded Reload + movl %edi, 44(%ecx) + adcl 32(%esp), %edx # 4-byte Folded Reload + movl %esi, 48(%ecx) + adcl 36(%esp), %eax # 4-byte Folded Reload + movl %edx, 52(%ecx) + adcl 40(%esp), %ebp # 4-byte Folded Reload + movl %eax, 56(%ecx) + movl (%esp), %eax # 4-byte Reload + adcl 44(%esp), %eax # 4-byte Folded Reload + movl %ebp, 60(%ecx) + movl 4(%esp), %edx # 4-byte Reload + adcl 48(%esp), %edx # 4-byte Folded Reload + movl %eax, 64(%ecx) + movl 12(%esp), %eax # 4-byte Reload + adcl 52(%esp), %eax # 4-byte Folded Reload + movl %edx, 68(%ecx) + movl %eax, 72(%ecx) + movl 16(%esp), %eax # 4-byte Reload + adcl 56(%esp), %eax # 4-byte Folded Reload + movl %eax, 76(%ecx) + addl $60, %esp + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end157: + .size mcl_fpDbl_sub10Lbmi2, .Lfunc_end157-mcl_fpDbl_sub10Lbmi2 + + .align 16, 0x90 + .type .LmulPv352x32,@function +.LmulPv352x32: # @mulPv352x32 +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $32, %esp + movl %edx, %eax + movl 52(%esp), %edx + mulxl 4(%eax), %ebx, %esi + mulxl (%eax), %edi, %ebp + movl %edi, 28(%esp) # 4-byte Spill + addl %ebx, %ebp + movl %ebp, 24(%esp) # 4-byte Spill + mulxl 8(%eax), %edi, %ebx + adcl %esi, %edi + movl %edi, 20(%esp) # 4-byte Spill + mulxl 12(%eax), %esi, %edi + adcl %ebx, %esi + movl %esi, 16(%esp) # 4-byte Spill + mulxl 16(%eax), %esi, %ebx + adcl %edi, %esi + movl %esi, 12(%esp) # 4-byte Spill + mulxl 20(%eax), %esi, %edi + adcl %ebx, %esi + movl %esi, 8(%esp) # 4-byte Spill + mulxl 24(%eax), %ebx, %esi + adcl %edi, %ebx + mulxl 28(%eax), %edi, %ebp + movl %ebp, 4(%esp) # 4-byte Spill + adcl %esi, %edi + mulxl 32(%eax), %esi, %ebp + movl %ebp, (%esp) # 4-byte Spill + adcl 4(%esp), %esi # 4-byte Folded Reload + mulxl 36(%eax), %edx, %ebp + movl %ebp, 4(%esp) # 4-byte Spill + adcl (%esp), %edx # 4-byte Folded Reload + movl 28(%esp), %ebp # 4-byte Reload + movl %ebp, (%ecx) + movl 24(%esp), %ebp # 4-byte Reload + movl %ebp, 4(%ecx) + movl 20(%esp), %ebp # 4-byte Reload + movl %ebp, 8(%ecx) + movl 16(%esp), %ebp # 4-byte Reload + movl %ebp, 12(%ecx) + movl 12(%esp), %ebp # 4-byte Reload + movl %ebp, 16(%ecx) + movl 8(%esp), %ebp # 4-byte Reload + movl %ebp, 20(%ecx) + movl %ebx, 24(%ecx) + movl %edi, 28(%ecx) + movl %esi, 32(%ecx) + movl %edx, 36(%ecx) + movl 52(%esp), %edx + mulxl 40(%eax), %eax, %edx + adcl 4(%esp), %eax # 4-byte Folded Reload + movl %eax, 40(%ecx) + adcl $0, %edx + movl %edx, 44(%ecx) + movl %ecx, %eax + addl $32, %esp + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end158: + .size .LmulPv352x32, .Lfunc_end158-.LmulPv352x32 + + .globl mcl_fp_mulUnitPre11Lbmi2 + .align 16, 0x90 + .type mcl_fp_mulUnitPre11Lbmi2,@function +mcl_fp_mulUnitPre11Lbmi2: # @mcl_fp_mulUnitPre11Lbmi2 +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $92, %esp + calll .L159$pb +.L159$pb: + popl %ebx +.Ltmp20: + addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp20-.L159$pb), %ebx + movl 120(%esp), %eax + movl %eax, (%esp) + leal 40(%esp), %ecx + movl 116(%esp), %edx + calll .LmulPv352x32 + movl 84(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 80(%esp), %eax + movl %eax, 32(%esp) # 4-byte Spill + movl 76(%esp), %eax + movl %eax, 28(%esp) # 4-byte Spill + movl 72(%esp), %eax + movl %eax, 24(%esp) # 4-byte Spill + movl 68(%esp), %eax + movl %eax, 20(%esp) # 4-byte Spill + movl 64(%esp), %eax + movl %eax, 16(%esp) # 4-byte Spill + movl 60(%esp), %ebp + movl 56(%esp), %ebx + movl 52(%esp), %edi + movl 48(%esp), %esi + movl 40(%esp), %edx + movl 44(%esp), %ecx + movl 112(%esp), %eax + movl %edx, (%eax) + movl %ecx, 4(%eax) + movl %esi, 8(%eax) + movl %edi, 12(%eax) + movl %ebx, 16(%eax) + movl %ebp, 20(%eax) + movl 16(%esp), %ecx # 4-byte Reload + movl %ecx, 24(%eax) + movl 20(%esp), %ecx # 4-byte Reload + movl %ecx, 28(%eax) + movl 24(%esp), %ecx # 4-byte Reload + movl %ecx, 32(%eax) + movl 28(%esp), %ecx # 4-byte Reload + movl %ecx, 36(%eax) + movl 32(%esp), %ecx # 4-byte Reload + movl %ecx, 40(%eax) + movl 36(%esp), %ecx # 4-byte Reload + movl %ecx, 44(%eax) + addl $92, %esp + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end159: + .size mcl_fp_mulUnitPre11Lbmi2, .Lfunc_end159-mcl_fp_mulUnitPre11Lbmi2 + + .globl mcl_fpDbl_mulPre11Lbmi2 + .align 16, 0x90 + .type mcl_fpDbl_mulPre11Lbmi2,@function +mcl_fpDbl_mulPre11Lbmi2: # @mcl_fpDbl_mulPre11Lbmi2 +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $620, %esp # imm = 0x26C + calll .L160$pb +.L160$pb: + popl %eax +.Ltmp21: + addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp21-.L160$pb), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl %eax, %ebx + movl 648(%esp), %eax + movl (%eax), %eax + movl %eax, (%esp) + leal 568(%esp), %ecx + movl 644(%esp), %edx + movl %edx, %ebp + movl %ebx, %edi + calll .LmulPv352x32 + movl 612(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 608(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 604(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 600(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 596(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 592(%esp), %eax + movl %eax, 32(%esp) # 4-byte Spill + movl 588(%esp), %eax + movl %eax, 28(%esp) # 4-byte Spill + movl 584(%esp), %eax + movl %eax, 24(%esp) # 4-byte Spill + movl 580(%esp), %eax + movl %eax, 20(%esp) # 4-byte Spill + movl 576(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 568(%esp), %eax + movl 572(%esp), %esi + movl 640(%esp), %ecx + movl %eax, (%ecx) + movl 648(%esp), %eax + movl 4(%eax), %eax + movl %eax, (%esp) + leal 520(%esp), %ecx + movl %ebp, %edx + movl %edi, %ebx + calll .LmulPv352x32 + addl 520(%esp), %esi + movl %esi, 16(%esp) # 4-byte Spill + movl 564(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 560(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 556(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 552(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 548(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 544(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 540(%esp), %ebx + movl 536(%esp), %edi + movl 532(%esp), %esi + movl 524(%esp), %ecx + movl 528(%esp), %edx + movl 640(%esp), %eax + movl 16(%esp), %ebp # 4-byte Reload + movl %ebp, 4(%eax) + adcl 56(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 56(%esp) # 4-byte Spill + adcl 20(%esp), %edx # 4-byte Folded Reload + movl %edx, 20(%esp) # 4-byte Spill + adcl 24(%esp), %esi # 4-byte Folded Reload + movl %esi, 24(%esp) # 4-byte Spill + adcl 28(%esp), %edi # 4-byte Folded Reload + movl %edi, 28(%esp) # 4-byte Spill + adcl 32(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 12(%esp) # 4-byte Spill + movl 40(%esp), %eax # 4-byte Reload + adcl %eax, 44(%esp) # 4-byte Folded Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 52(%esp), %eax # 4-byte Folded Reload + movl %eax, 48(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 60(%esp), %eax # 4-byte Folded Reload + movl %eax, 64(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 68(%esp), %eax # 4-byte Folded Reload + movl %eax, 72(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 76(%esp), %eax # 4-byte Folded Reload + movl %eax, 80(%esp) # 4-byte Spill + adcl $0, 36(%esp) # 4-byte Folded Spill + movl 648(%esp), %eax + movl 8(%eax), %eax + movl %eax, (%esp) + leal 472(%esp), %ecx + movl 644(%esp), %edx + movl 84(%esp), %ebx # 4-byte Reload + calll .LmulPv352x32 + movl 56(%esp), %eax # 4-byte Reload + addl 472(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 516(%esp), %eax + movl %eax, 32(%esp) # 4-byte Spill + movl 512(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 508(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 504(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 500(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 496(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 492(%esp), %ebp + movl 488(%esp), %edi + movl 484(%esp), %esi + movl 476(%esp), %ecx + movl 480(%esp), %edx + movl 640(%esp), %eax + movl 56(%esp), %ebx # 4-byte Reload + movl %ebx, 8(%eax) + adcl 20(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 56(%esp) # 4-byte Spill + adcl 24(%esp), %edx # 4-byte Folded Reload + movl %edx, 16(%esp) # 4-byte Spill + adcl 28(%esp), %esi # 4-byte Folded Reload + movl %esi, 20(%esp) # 4-byte Spill + adcl 12(%esp), %edi # 4-byte Folded Reload + movl %edi, 24(%esp) # 4-byte Spill + adcl 44(%esp), %ebp # 4-byte Folded Reload + movl %ebp, 44(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl %eax, 40(%esp) # 4-byte Folded Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 64(%esp), %eax # 4-byte Folded Reload + movl %eax, 52(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 72(%esp), %eax # 4-byte Folded Reload + movl %eax, 60(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 80(%esp), %eax # 4-byte Folded Reload + movl %eax, 68(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 36(%esp), %eax # 4-byte Folded Reload + movl %eax, 76(%esp) # 4-byte Spill + adcl $0, 32(%esp) # 4-byte Folded Spill + movl 648(%esp), %eax + movl 12(%eax), %eax + movl %eax, (%esp) + leal 424(%esp), %ecx + movl 644(%esp), %edx + movl 84(%esp), %ebx # 4-byte Reload + calll .LmulPv352x32 + movl 56(%esp), %eax # 4-byte Reload + addl 424(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 468(%esp), %eax + movl %eax, 28(%esp) # 4-byte Spill + movl 464(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 460(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 456(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 452(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 448(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 444(%esp), %ebx + movl 440(%esp), %edi + movl 436(%esp), %esi + movl 428(%esp), %ecx + movl 432(%esp), %edx + movl 640(%esp), %eax + movl 56(%esp), %ebp # 4-byte Reload + movl %ebp, 12(%eax) + adcl 16(%esp), %ecx # 4-byte Folded Reload + movl %ecx, %ebp + adcl 20(%esp), %edx # 4-byte Folded Reload + movl %edx, 56(%esp) # 4-byte Spill + adcl 24(%esp), %esi # 4-byte Folded Reload + movl %esi, 20(%esp) # 4-byte Spill + adcl 44(%esp), %edi # 4-byte Folded Reload + movl %edi, 24(%esp) # 4-byte Spill + adcl 40(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 12(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl %eax, 36(%esp) # 4-byte Folded Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 60(%esp), %eax # 4-byte Folded Reload + movl %eax, 48(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 68(%esp), %eax # 4-byte Folded Reload + movl %eax, 64(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 76(%esp), %eax # 4-byte Folded Reload + movl %eax, 72(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 32(%esp), %eax # 4-byte Folded Reload + movl %eax, 80(%esp) # 4-byte Spill + adcl $0, 28(%esp) # 4-byte Folded Spill + movl 648(%esp), %eax + movl 16(%eax), %eax + movl %eax, (%esp) + leal 376(%esp), %ecx + movl 644(%esp), %edx + movl 84(%esp), %ebx # 4-byte Reload + calll .LmulPv352x32 + addl 376(%esp), %ebp + movl %ebp, 52(%esp) # 4-byte Spill + movl 420(%esp), %eax + movl %eax, 32(%esp) # 4-byte Spill + movl 416(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 412(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 408(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 404(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 400(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 396(%esp), %ebp + movl 392(%esp), %edi + movl 388(%esp), %esi + movl 380(%esp), %ecx + movl 384(%esp), %edx + movl 640(%esp), %eax + movl 52(%esp), %ebx # 4-byte Reload + movl %ebx, 16(%eax) + adcl 56(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 56(%esp) # 4-byte Spill + adcl 20(%esp), %edx # 4-byte Folded Reload + movl %edx, 16(%esp) # 4-byte Spill + adcl 24(%esp), %esi # 4-byte Folded Reload + movl %esi, 20(%esp) # 4-byte Spill + adcl 12(%esp), %edi # 4-byte Folded Reload + movl %edi, 24(%esp) # 4-byte Spill + adcl 36(%esp), %ebp # 4-byte Folded Reload + movl %ebp, 12(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl %eax, 40(%esp) # 4-byte Folded Spill + movl 44(%esp), %eax # 4-byte Reload + adcl 64(%esp), %eax # 4-byte Folded Reload + movl %eax, 44(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 72(%esp), %eax # 4-byte Folded Reload + movl %eax, 60(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 80(%esp), %eax # 4-byte Folded Reload + movl %eax, 68(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 28(%esp), %eax # 4-byte Folded Reload + movl %eax, 76(%esp) # 4-byte Spill + adcl $0, 32(%esp) # 4-byte Folded Spill + movl 648(%esp), %eax + movl 20(%eax), %eax + movl %eax, (%esp) + leal 328(%esp), %ecx + movl 644(%esp), %edx + movl 84(%esp), %ebx # 4-byte Reload + calll .LmulPv352x32 + movl 56(%esp), %eax # 4-byte Reload + addl 328(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 372(%esp), %eax + movl %eax, 28(%esp) # 4-byte Spill + movl 368(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 364(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 360(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 356(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 352(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 348(%esp), %ebx + movl 344(%esp), %edi + movl 340(%esp), %esi + movl 332(%esp), %ecx + movl 336(%esp), %edx + movl 640(%esp), %eax + movl 56(%esp), %ebp # 4-byte Reload + movl %ebp, 20(%eax) + adcl 16(%esp), %ecx # 4-byte Folded Reload + movl %ecx, %ebp + adcl 20(%esp), %edx # 4-byte Folded Reload + movl %edx, 56(%esp) # 4-byte Spill + adcl 24(%esp), %esi # 4-byte Folded Reload + movl %esi, 20(%esp) # 4-byte Spill + adcl 12(%esp), %edi # 4-byte Folded Reload + movl %edi, 24(%esp) # 4-byte Spill + adcl 40(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 40(%esp) # 4-byte Spill + movl 44(%esp), %eax # 4-byte Reload + adcl %eax, 36(%esp) # 4-byte Folded Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 60(%esp), %eax # 4-byte Folded Reload + movl %eax, 52(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 68(%esp), %eax # 4-byte Folded Reload + movl %eax, 64(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 76(%esp), %eax # 4-byte Folded Reload + movl %eax, 72(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 32(%esp), %eax # 4-byte Folded Reload + movl %eax, 80(%esp) # 4-byte Spill + adcl $0, 28(%esp) # 4-byte Folded Spill + movl 648(%esp), %eax + movl 24(%eax), %eax + movl %eax, (%esp) + leal 280(%esp), %ecx + movl 644(%esp), %edx + movl 84(%esp), %ebx # 4-byte Reload + calll .LmulPv352x32 + addl 280(%esp), %ebp + movl %ebp, 16(%esp) # 4-byte Spill + movl 324(%esp), %eax + movl %eax, 32(%esp) # 4-byte Spill + movl 320(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 316(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 312(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 308(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 304(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 300(%esp), %ebp + movl 296(%esp), %edi + movl 292(%esp), %esi + movl 284(%esp), %ecx + movl 288(%esp), %edx + movl 640(%esp), %eax + movl 16(%esp), %ebx # 4-byte Reload + movl %ebx, 24(%eax) + adcl 56(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 56(%esp) # 4-byte Spill + adcl 20(%esp), %edx # 4-byte Folded Reload + movl %edx, 16(%esp) # 4-byte Spill + adcl 24(%esp), %esi # 4-byte Folded Reload + movl %esi, 20(%esp) # 4-byte Spill + adcl 40(%esp), %edi # 4-byte Folded Reload + movl %edi, 24(%esp) # 4-byte Spill + adcl 36(%esp), %ebp # 4-byte Folded Reload + movl %ebp, 36(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl %eax, 44(%esp) # 4-byte Folded Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 64(%esp), %eax # 4-byte Folded Reload + movl %eax, 48(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 72(%esp), %eax # 4-byte Folded Reload + movl %eax, 60(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 80(%esp), %eax # 4-byte Folded Reload + movl %eax, 68(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 28(%esp), %eax # 4-byte Folded Reload + movl %eax, 76(%esp) # 4-byte Spill + adcl $0, 32(%esp) # 4-byte Folded Spill + movl 648(%esp), %eax + movl 28(%eax), %eax + movl %eax, (%esp) + leal 232(%esp), %ecx + movl 644(%esp), %edx + movl 84(%esp), %ebx # 4-byte Reload + calll .LmulPv352x32 + movl 56(%esp), %eax # 4-byte Reload + addl 232(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 276(%esp), %eax + movl %eax, 28(%esp) # 4-byte Spill + movl 272(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 268(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 264(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 260(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 256(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 252(%esp), %ebx + movl 248(%esp), %edi + movl 244(%esp), %esi + movl 236(%esp), %ecx + movl 240(%esp), %edx + movl 640(%esp), %eax + movl 56(%esp), %ebp # 4-byte Reload + movl %ebp, 28(%eax) + adcl 16(%esp), %ecx # 4-byte Folded Reload + movl %ecx, %ebp + adcl 20(%esp), %edx # 4-byte Folded Reload + movl %edx, 56(%esp) # 4-byte Spill + adcl 24(%esp), %esi # 4-byte Folded Reload + movl %esi, 16(%esp) # 4-byte Spill + adcl 36(%esp), %edi # 4-byte Folded Reload + movl %edi, 20(%esp) # 4-byte Spill + adcl 44(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 44(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl %eax, 40(%esp) # 4-byte Folded Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 60(%esp), %eax # 4-byte Folded Reload + movl %eax, 52(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 68(%esp), %eax # 4-byte Folded Reload + movl %eax, 64(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 76(%esp), %eax # 4-byte Folded Reload + movl %eax, 72(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 32(%esp), %eax # 4-byte Folded Reload + movl %eax, 80(%esp) # 4-byte Spill + adcl $0, 28(%esp) # 4-byte Folded Spill + movl 648(%esp), %eax + movl 32(%eax), %eax + movl %eax, (%esp) + leal 184(%esp), %ecx + movl 644(%esp), %edx + movl 84(%esp), %ebx # 4-byte Reload + calll .LmulPv352x32 + addl 184(%esp), %ebp + movl %ebp, 32(%esp) # 4-byte Spill + movl 228(%esp), %eax + movl %eax, 24(%esp) # 4-byte Spill + movl 224(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 220(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 216(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 212(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 208(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 204(%esp), %ebp + movl 200(%esp), %edi + movl 196(%esp), %esi + movl 188(%esp), %ecx + movl 192(%esp), %edx + movl 640(%esp), %eax + movl 32(%esp), %ebx # 4-byte Reload + movl %ebx, 32(%eax) + adcl 56(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 56(%esp) # 4-byte Spill + adcl 16(%esp), %edx # 4-byte Folded Reload + movl %edx, 12(%esp) # 4-byte Spill + adcl 20(%esp), %esi # 4-byte Folded Reload + movl %esi, 16(%esp) # 4-byte Spill + adcl 44(%esp), %edi # 4-byte Folded Reload + movl %edi, 20(%esp) # 4-byte Spill + adcl 40(%esp), %ebp # 4-byte Folded Reload + movl %ebp, 8(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl %eax, 36(%esp) # 4-byte Folded Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 64(%esp), %eax # 4-byte Folded Reload + movl %eax, 48(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 72(%esp), %eax # 4-byte Folded Reload + movl %eax, 60(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 80(%esp), %eax # 4-byte Folded Reload + movl %eax, 68(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 28(%esp), %eax # 4-byte Folded Reload + movl %eax, 76(%esp) # 4-byte Spill + adcl $0, 24(%esp) # 4-byte Folded Spill + movl 648(%esp), %edi + movl 36(%edi), %eax + movl %eax, (%esp) + leal 136(%esp), %ecx + movl 644(%esp), %eax + movl %eax, %edx + movl 84(%esp), %ebx # 4-byte Reload + calll .LmulPv352x32 + movl 56(%esp), %eax # 4-byte Reload + addl 136(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 180(%esp), %eax + movl %eax, 32(%esp) # 4-byte Spill + movl 176(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 172(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 168(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 164(%esp), %ebp + movl 160(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 156(%esp), %edi + movl 152(%esp), %esi + movl 148(%esp), %edx + movl 140(%esp), %ecx + movl 144(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 640(%esp), %eax + movl 56(%esp), %ebx # 4-byte Reload + movl %ebx, 36(%eax) + adcl 12(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 52(%esp) # 4-byte Spill + movl 44(%esp), %eax # 4-byte Reload + adcl 16(%esp), %eax # 4-byte Folded Reload + movl %eax, 44(%esp) # 4-byte Spill + adcl 20(%esp), %edx # 4-byte Folded Reload + movl %edx, 56(%esp) # 4-byte Spill + adcl 8(%esp), %esi # 4-byte Folded Reload + movl %esi, 28(%esp) # 4-byte Spill + adcl 36(%esp), %edi # 4-byte Folded Reload + movl %edi, 36(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl %eax, 40(%esp) # 4-byte Folded Spill + adcl 60(%esp), %ebp # 4-byte Folded Reload + movl %ebp, 60(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 68(%esp), %eax # 4-byte Folded Reload + movl %eax, 72(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 76(%esp), %eax # 4-byte Folded Reload + movl %eax, 64(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 24(%esp), %eax # 4-byte Folded Reload + movl %eax, 80(%esp) # 4-byte Spill + adcl $0, 32(%esp) # 4-byte Folded Spill + movl 648(%esp), %eax + movl 40(%eax), %eax + movl %eax, (%esp) + leal 88(%esp), %ecx + movl 644(%esp), %edx + movl 84(%esp), %ebx # 4-byte Reload + calll .LmulPv352x32 + movl 52(%esp), %eax # 4-byte Reload + addl 88(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 44(%esp), %ebp # 4-byte Reload + adcl 92(%esp), %ebp + movl 56(%esp), %eax # 4-byte Reload + adcl 96(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 132(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 128(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 124(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 120(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 116(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 112(%esp), %edi + movl 108(%esp), %esi + movl 104(%esp), %edx + movl 100(%esp), %ecx + movl 640(%esp), %eax + movl 52(%esp), %ebx # 4-byte Reload + movl %ebx, 40(%eax) + movl %ebp, 44(%eax) + adcl 28(%esp), %ecx # 4-byte Folded Reload + movl 56(%esp), %ebp # 4-byte Reload + movl %ebp, 48(%eax) + adcl 36(%esp), %edx # 4-byte Folded Reload + movl %ecx, 52(%eax) + adcl 40(%esp), %esi # 4-byte Folded Reload + movl %edx, 56(%eax) + adcl 60(%esp), %edi # 4-byte Folded Reload + movl %esi, 60(%eax) + movl 44(%esp), %ecx # 4-byte Reload + adcl 72(%esp), %ecx # 4-byte Folded Reload + movl %edi, 64(%eax) + movl 48(%esp), %edx # 4-byte Reload + adcl 64(%esp), %edx # 4-byte Folded Reload + movl %ecx, 68(%eax) + movl 68(%esp), %ecx # 4-byte Reload + adcl 80(%esp), %ecx # 4-byte Folded Reload + movl %edx, 72(%eax) + movl %ecx, 76(%eax) + movl 76(%esp), %ecx # 4-byte Reload + adcl 32(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 80(%eax) + movl 84(%esp), %ecx # 4-byte Reload + adcl $0, %ecx + movl %ecx, 84(%eax) + addl $620, %esp # imm = 0x26C + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end160: + .size mcl_fpDbl_mulPre11Lbmi2, .Lfunc_end160-mcl_fpDbl_mulPre11Lbmi2 + + .globl mcl_fpDbl_sqrPre11Lbmi2 + .align 16, 0x90 + .type mcl_fpDbl_sqrPre11Lbmi2,@function +mcl_fpDbl_sqrPre11Lbmi2: # @mcl_fpDbl_sqrPre11Lbmi2 +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $620, %esp # imm = 0x26C + calll .L161$pb +.L161$pb: + popl %ebx +.Ltmp22: + addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp22-.L161$pb), %ebx + movl %ebx, 84(%esp) # 4-byte Spill + movl 644(%esp), %edx + movl (%edx), %eax + movl %eax, (%esp) + leal 568(%esp), %ecx + movl %edx, %esi + movl %ebx, %edi + calll .LmulPv352x32 + movl 612(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 608(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 604(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 600(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 596(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 592(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 588(%esp), %eax + movl %eax, 28(%esp) # 4-byte Spill + movl 584(%esp), %eax + movl %eax, 24(%esp) # 4-byte Spill + movl 580(%esp), %eax + movl %eax, 20(%esp) # 4-byte Spill + movl 576(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 568(%esp), %eax + movl 572(%esp), %ebp + movl 640(%esp), %ecx + movl %eax, (%ecx) + movl %esi, %edx + movl 4(%edx), %eax + movl %eax, (%esp) + leal 520(%esp), %ecx + movl %edi, %ebx + calll .LmulPv352x32 + addl 520(%esp), %ebp + movl %ebp, 16(%esp) # 4-byte Spill + movl 564(%esp), %eax + movl %eax, 32(%esp) # 4-byte Spill + movl 560(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 556(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 552(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 548(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 544(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 540(%esp), %ebx + movl 536(%esp), %edi + movl 532(%esp), %esi + movl 524(%esp), %ecx + movl 528(%esp), %edx + movl 640(%esp), %eax + movl 16(%esp), %ebp # 4-byte Reload + movl %ebp, 4(%eax) + adcl 60(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 60(%esp) # 4-byte Spill + adcl 20(%esp), %edx # 4-byte Folded Reload + movl %edx, 16(%esp) # 4-byte Spill + adcl 24(%esp), %esi # 4-byte Folded Reload + movl %esi, 20(%esp) # 4-byte Spill + adcl 28(%esp), %edi # 4-byte Folded Reload + movl %edi, 24(%esp) # 4-byte Spill + adcl 36(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 12(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl %eax, 40(%esp) # 4-byte Folded Spill + movl 44(%esp), %eax # 4-byte Reload + adcl 52(%esp), %eax # 4-byte Folded Reload + movl %eax, 44(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 56(%esp), %eax # 4-byte Folded Reload + movl %eax, 68(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 64(%esp), %eax # 4-byte Folded Reload + movl %eax, 76(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 72(%esp), %eax # 4-byte Folded Reload + movl %eax, 80(%esp) # 4-byte Spill + adcl $0, 32(%esp) # 4-byte Folded Spill + movl 644(%esp), %edx + movl 8(%edx), %eax + movl %eax, (%esp) + leal 472(%esp), %ecx + movl 84(%esp), %ebx # 4-byte Reload + calll .LmulPv352x32 + movl 60(%esp), %eax # 4-byte Reload + addl 472(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 516(%esp), %eax + movl %eax, 28(%esp) # 4-byte Spill + movl 512(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 508(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 504(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 500(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 496(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 492(%esp), %ebp + movl 488(%esp), %edi + movl 484(%esp), %esi + movl 476(%esp), %ecx + movl 480(%esp), %edx + movl 640(%esp), %eax + movl 60(%esp), %ebx # 4-byte Reload + movl %ebx, 8(%eax) + adcl 16(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 48(%esp) # 4-byte Spill + adcl 20(%esp), %edx # 4-byte Folded Reload + movl %edx, 16(%esp) # 4-byte Spill + adcl 24(%esp), %esi # 4-byte Folded Reload + movl %esi, 20(%esp) # 4-byte Spill + adcl 12(%esp), %edi # 4-byte Folded Reload + movl %edi, 24(%esp) # 4-byte Spill + adcl 40(%esp), %ebp # 4-byte Folded Reload + movl %ebp, 12(%esp) # 4-byte Spill + movl 44(%esp), %eax # 4-byte Reload + adcl %eax, 36(%esp) # 4-byte Folded Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 68(%esp), %eax # 4-byte Folded Reload + movl %eax, 52(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 76(%esp), %eax # 4-byte Folded Reload + movl %eax, 56(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 80(%esp), %eax # 4-byte Folded Reload + movl %eax, 64(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 32(%esp), %eax # 4-byte Folded Reload + movl %eax, 72(%esp) # 4-byte Spill + adcl $0, 28(%esp) # 4-byte Folded Spill + movl 644(%esp), %edx + movl 12(%edx), %eax + movl %eax, (%esp) + leal 424(%esp), %ecx + movl 84(%esp), %ebx # 4-byte Reload + calll .LmulPv352x32 + movl 48(%esp), %eax # 4-byte Reload + addl 424(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 468(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 464(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 460(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 456(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 452(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 448(%esp), %ebx + movl 444(%esp), %edi + movl 440(%esp), %esi + movl 436(%esp), %edx + movl 428(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 432(%esp), %ecx + movl 640(%esp), %eax + movl 48(%esp), %ebp # 4-byte Reload + movl %ebp, 12(%eax) + movl 16(%esp), %eax # 4-byte Reload + adcl %eax, 80(%esp) # 4-byte Folded Spill + adcl 20(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 20(%esp) # 4-byte Spill + adcl 24(%esp), %edx # 4-byte Folded Reload + movl %edx, 24(%esp) # 4-byte Spill + adcl 12(%esp), %esi # 4-byte Folded Reload + movl %esi, 32(%esp) # 4-byte Spill + adcl 36(%esp), %edi # 4-byte Folded Reload + movl %edi, 12(%esp) # 4-byte Spill + adcl 52(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 52(%esp) # 4-byte Spill + movl 44(%esp), %eax # 4-byte Reload + adcl 56(%esp), %eax # 4-byte Folded Reload + movl %eax, 44(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 64(%esp), %eax # 4-byte Folded Reload + movl %eax, 60(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 72(%esp), %eax # 4-byte Folded Reload + movl %eax, 68(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 28(%esp), %eax # 4-byte Folded Reload + movl %eax, 76(%esp) # 4-byte Spill + adcl $0, 40(%esp) # 4-byte Folded Spill + movl 644(%esp), %edx + movl 16(%edx), %eax + movl %eax, (%esp) + leal 376(%esp), %ecx + movl 84(%esp), %ebx # 4-byte Reload + calll .LmulPv352x32 + movl 80(%esp), %eax # 4-byte Reload + addl 376(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 420(%esp), %eax + movl %eax, 28(%esp) # 4-byte Spill + movl 416(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 412(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 408(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 404(%esp), %ebx + movl 400(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 396(%esp), %edi + movl 392(%esp), %esi + movl 388(%esp), %edx + movl 380(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 384(%esp), %ecx + movl 640(%esp), %eax + movl 80(%esp), %ebp # 4-byte Reload + movl %ebp, 16(%eax) + movl 48(%esp), %ebp # 4-byte Reload + adcl 20(%esp), %ebp # 4-byte Folded Reload + adcl 24(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 16(%esp) # 4-byte Spill + adcl 32(%esp), %edx # 4-byte Folded Reload + movl %edx, 20(%esp) # 4-byte Spill + adcl 12(%esp), %esi # 4-byte Folded Reload + movl %esi, 80(%esp) # 4-byte Spill + adcl 52(%esp), %edi # 4-byte Folded Reload + movl %edi, 8(%esp) # 4-byte Spill + movl 44(%esp), %eax # 4-byte Reload + adcl %eax, 36(%esp) # 4-byte Folded Spill + adcl 60(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 44(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 68(%esp), %eax # 4-byte Folded Reload + movl %eax, 56(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 76(%esp), %eax # 4-byte Folded Reload + movl %eax, 64(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 40(%esp), %eax # 4-byte Folded Reload + movl %eax, 72(%esp) # 4-byte Spill + adcl $0, 28(%esp) # 4-byte Folded Spill + movl 644(%esp), %edx + movl 20(%edx), %eax + movl %eax, (%esp) + leal 328(%esp), %ecx + movl 84(%esp), %ebx # 4-byte Reload + calll .LmulPv352x32 + addl 328(%esp), %ebp + movl %ebp, 48(%esp) # 4-byte Spill + movl 372(%esp), %eax + movl %eax, 24(%esp) # 4-byte Spill + movl 368(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 364(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 360(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 356(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 352(%esp), %eax + movl %eax, 32(%esp) # 4-byte Spill + movl 348(%esp), %ebp + movl 344(%esp), %edi + movl 340(%esp), %esi + movl 332(%esp), %ecx + movl 336(%esp), %edx + movl 640(%esp), %eax + movl 48(%esp), %ebx # 4-byte Reload + movl %ebx, 20(%eax) + adcl 16(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 48(%esp) # 4-byte Spill + adcl 20(%esp), %edx # 4-byte Folded Reload + movl %edx, 12(%esp) # 4-byte Spill + adcl 80(%esp), %esi # 4-byte Folded Reload + movl %esi, 16(%esp) # 4-byte Spill + adcl 8(%esp), %edi # 4-byte Folded Reload + movl %edi, 20(%esp) # 4-byte Spill + adcl 36(%esp), %ebp # 4-byte Folded Reload + movl %ebp, 8(%esp) # 4-byte Spill + movl 44(%esp), %eax # 4-byte Reload + adcl %eax, 32(%esp) # 4-byte Folded Spill + movl 40(%esp), %eax # 4-byte Reload + adcl 56(%esp), %eax # 4-byte Folded Reload + movl %eax, 40(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 64(%esp), %eax # 4-byte Folded Reload + movl %eax, 52(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 72(%esp), %eax # 4-byte Folded Reload + movl %eax, 60(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 28(%esp), %eax # 4-byte Folded Reload + movl %eax, 68(%esp) # 4-byte Spill + adcl $0, 24(%esp) # 4-byte Folded Spill + movl 644(%esp), %edx + movl 24(%edx), %eax + movl %eax, (%esp) + leal 280(%esp), %ecx + movl 84(%esp), %ebx # 4-byte Reload + calll .LmulPv352x32 + movl 48(%esp), %eax # 4-byte Reload + addl 280(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 324(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 320(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 316(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 312(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 308(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 304(%esp), %ebx + movl 300(%esp), %edi + movl 296(%esp), %esi + movl 292(%esp), %edx + movl 284(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 288(%esp), %ecx + movl 640(%esp), %eax + movl 48(%esp), %ebp # 4-byte Reload + movl %ebp, 24(%eax) + movl 12(%esp), %eax # 4-byte Reload + adcl %eax, 80(%esp) # 4-byte Folded Spill + adcl 16(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 16(%esp) # 4-byte Spill + adcl 20(%esp), %edx # 4-byte Folded Reload + movl %edx, 28(%esp) # 4-byte Spill + adcl 8(%esp), %esi # 4-byte Folded Reload + movl %esi, 48(%esp) # 4-byte Spill + adcl 32(%esp), %edi # 4-byte Folded Reload + movl %edi, 12(%esp) # 4-byte Spill + adcl 40(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 8(%esp) # 4-byte Spill + movl 44(%esp), %eax # 4-byte Reload + adcl 52(%esp), %eax # 4-byte Folded Reload + movl %eax, 44(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 60(%esp), %eax # 4-byte Folded Reload + movl %eax, 64(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 68(%esp), %eax # 4-byte Folded Reload + movl %eax, 72(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 24(%esp), %eax # 4-byte Folded Reload + movl %eax, 76(%esp) # 4-byte Spill + adcl $0, 36(%esp) # 4-byte Folded Spill + movl 644(%esp), %edx + movl 28(%edx), %eax + movl %eax, (%esp) + leal 232(%esp), %ecx + movl 84(%esp), %ebx # 4-byte Reload + calll .LmulPv352x32 + movl 80(%esp), %eax # 4-byte Reload + addl 232(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 276(%esp), %eax + movl %eax, 32(%esp) # 4-byte Spill + movl 272(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 268(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 264(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 260(%esp), %ebx + movl 256(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 252(%esp), %edi + movl 248(%esp), %esi + movl 244(%esp), %edx + movl 236(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 240(%esp), %ecx + movl 640(%esp), %eax + movl 80(%esp), %ebp # 4-byte Reload + movl %ebp, 28(%eax) + movl 52(%esp), %ebp # 4-byte Reload + adcl 16(%esp), %ebp # 4-byte Folded Reload + adcl 28(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 16(%esp) # 4-byte Spill + adcl 48(%esp), %edx # 4-byte Folded Reload + movl %edx, 20(%esp) # 4-byte Spill + adcl 12(%esp), %esi # 4-byte Folded Reload + movl %esi, 24(%esp) # 4-byte Spill + adcl 8(%esp), %edi # 4-byte Folded Reload + movl %edi, 12(%esp) # 4-byte Spill + movl 44(%esp), %eax # 4-byte Reload + adcl %eax, 40(%esp) # 4-byte Folded Spill + adcl 64(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 44(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 72(%esp), %eax # 4-byte Folded Reload + movl %eax, 56(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 76(%esp), %eax # 4-byte Folded Reload + movl %eax, 60(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 36(%esp), %eax # 4-byte Folded Reload + movl %eax, 68(%esp) # 4-byte Spill + adcl $0, 32(%esp) # 4-byte Folded Spill + movl 644(%esp), %edx + movl 32(%edx), %eax + movl %eax, (%esp) + leal 184(%esp), %ecx + movl 84(%esp), %ebx # 4-byte Reload + calll .LmulPv352x32 + addl 184(%esp), %ebp + movl %ebp, 52(%esp) # 4-byte Spill + movl 228(%esp), %eax + movl %eax, 28(%esp) # 4-byte Spill + movl 224(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 220(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 216(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 212(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 208(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 204(%esp), %ebp + movl 200(%esp), %edi + movl 196(%esp), %esi + movl 188(%esp), %ecx + movl 192(%esp), %edx + movl 640(%esp), %eax + movl 52(%esp), %ebx # 4-byte Reload + movl %ebx, 32(%eax) + adcl 16(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 52(%esp) # 4-byte Spill + adcl 20(%esp), %edx # 4-byte Folded Reload + movl %edx, 16(%esp) # 4-byte Spill + adcl 24(%esp), %esi # 4-byte Folded Reload + movl %esi, 20(%esp) # 4-byte Spill + adcl 12(%esp), %edi # 4-byte Folded Reload + movl %edi, 24(%esp) # 4-byte Spill + adcl 40(%esp), %ebp # 4-byte Folded Reload + movl %ebp, 12(%esp) # 4-byte Spill + movl 44(%esp), %eax # 4-byte Reload + adcl %eax, 36(%esp) # 4-byte Folded Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 56(%esp), %eax # 4-byte Folded Reload + movl %eax, 48(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 60(%esp), %eax # 4-byte Folded Reload + movl %eax, 64(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 68(%esp), %eax # 4-byte Folded Reload + movl %eax, 72(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 32(%esp), %eax # 4-byte Folded Reload + movl %eax, 80(%esp) # 4-byte Spill + adcl $0, 28(%esp) # 4-byte Folded Spill + movl 644(%esp), %edx + movl 36(%edx), %eax + movl %eax, (%esp) + leal 136(%esp), %ecx + movl 84(%esp), %ebx # 4-byte Reload + calll .LmulPv352x32 + movl 52(%esp), %eax # 4-byte Reload + addl 136(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 180(%esp), %eax + movl %eax, 32(%esp) # 4-byte Spill + movl 176(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 172(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 168(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 164(%esp), %ebp + movl 160(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 156(%esp), %edi + movl 152(%esp), %esi + movl 148(%esp), %edx + movl 140(%esp), %ecx + movl 144(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 640(%esp), %eax + movl 52(%esp), %ebx # 4-byte Reload + movl %ebx, 36(%eax) + adcl 16(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 52(%esp) # 4-byte Spill + movl 44(%esp), %eax # 4-byte Reload + adcl 20(%esp), %eax # 4-byte Folded Reload + movl %eax, 44(%esp) # 4-byte Spill + adcl 24(%esp), %edx # 4-byte Folded Reload + movl %edx, 56(%esp) # 4-byte Spill + adcl 12(%esp), %esi # 4-byte Folded Reload + movl %esi, 24(%esp) # 4-byte Spill + adcl 36(%esp), %edi # 4-byte Folded Reload + movl %edi, 36(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl %eax, 40(%esp) # 4-byte Folded Spill + adcl 64(%esp), %ebp # 4-byte Folded Reload + movl %ebp, 64(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 72(%esp), %eax # 4-byte Folded Reload + movl %eax, 68(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 80(%esp), %eax # 4-byte Folded Reload + movl %eax, 60(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 28(%esp), %eax # 4-byte Folded Reload + movl %eax, 76(%esp) # 4-byte Spill + adcl $0, 32(%esp) # 4-byte Folded Spill + movl 644(%esp), %edx + movl 40(%edx), %eax + movl %eax, (%esp) + leal 88(%esp), %ecx + movl 84(%esp), %ebx # 4-byte Reload + calll .LmulPv352x32 + movl 52(%esp), %eax # 4-byte Reload + addl 88(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 44(%esp), %ebp # 4-byte Reload + adcl 92(%esp), %ebp + movl 56(%esp), %eax # 4-byte Reload + adcl 96(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 132(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 128(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 124(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 120(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 116(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 112(%esp), %edi + movl 108(%esp), %esi + movl 104(%esp), %edx + movl 100(%esp), %ecx + movl 640(%esp), %eax + movl 52(%esp), %ebx # 4-byte Reload + movl %ebx, 40(%eax) + movl %ebp, 44(%eax) + adcl 24(%esp), %ecx # 4-byte Folded Reload + movl 56(%esp), %ebp # 4-byte Reload + movl %ebp, 48(%eax) + adcl 36(%esp), %edx # 4-byte Folded Reload + movl %ecx, 52(%eax) + adcl 40(%esp), %esi # 4-byte Folded Reload + movl %edx, 56(%eax) + adcl 64(%esp), %edi # 4-byte Folded Reload + movl %esi, 60(%eax) + movl 44(%esp), %ecx # 4-byte Reload + adcl 68(%esp), %ecx # 4-byte Folded Reload + movl %edi, 64(%eax) + movl 48(%esp), %edx # 4-byte Reload + adcl 60(%esp), %edx # 4-byte Folded Reload + movl %ecx, 68(%eax) + movl 72(%esp), %ecx # 4-byte Reload + adcl 76(%esp), %ecx # 4-byte Folded Reload + movl %edx, 72(%eax) + movl %ecx, 76(%eax) + movl 80(%esp), %ecx # 4-byte Reload + adcl 32(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 80(%eax) + movl 84(%esp), %ecx # 4-byte Reload + adcl $0, %ecx + movl %ecx, 84(%eax) + addl $620, %esp # imm = 0x26C + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end161: + .size mcl_fpDbl_sqrPre11Lbmi2, .Lfunc_end161-mcl_fpDbl_sqrPre11Lbmi2 + + .globl mcl_fp_mont11Lbmi2 + .align 16, 0x90 + .type mcl_fp_mont11Lbmi2,@function +mcl_fp_mont11Lbmi2: # @mcl_fp_mont11Lbmi2 +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $1132, %esp # imm = 0x46C + calll .L162$pb +.L162$pb: + popl %ebx +.Ltmp23: + addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp23-.L162$pb), %ebx + movl 1164(%esp), %eax + movl -4(%eax), %ebp + movl %ebp, 20(%esp) # 4-byte Spill + movl 1160(%esp), %eax + movl (%eax), %eax + movl %eax, (%esp) + leal 1080(%esp), %ecx + movl 1156(%esp), %edx + calll .LmulPv352x32 + movl 1080(%esp), %edi + movl 1084(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl %edi, %eax + imull %ebp, %eax + movl 1124(%esp), %ecx + movl %ecx, 68(%esp) # 4-byte Spill + movl 1120(%esp), %ecx + movl %ecx, 64(%esp) # 4-byte Spill + movl 1116(%esp), %ecx + movl %ecx, 60(%esp) # 4-byte Spill + movl 1112(%esp), %ecx + movl %ecx, 44(%esp) # 4-byte Spill + movl 1108(%esp), %ecx + movl %ecx, 32(%esp) # 4-byte Spill + movl 1104(%esp), %ecx + movl %ecx, 28(%esp) # 4-byte Spill + movl 1100(%esp), %ecx + movl %ecx, 24(%esp) # 4-byte Spill + movl 1096(%esp), %ecx + movl %ecx, 36(%esp) # 4-byte Spill + movl 1092(%esp), %esi + movl 1088(%esp), %ebp + movl %eax, (%esp) + leal 1032(%esp), %ecx + movl 1164(%esp), %edx + calll .LmulPv352x32 + addl 1032(%esp), %edi + movl 56(%esp), %eax # 4-byte Reload + adcl 1036(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + adcl 1040(%esp), %ebp + adcl 1044(%esp), %esi + movl %esi, 40(%esp) # 4-byte Spill + movl 36(%esp), %eax # 4-byte Reload + adcl 1048(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 24(%esp), %eax # 4-byte Reload + adcl 1052(%esp), %eax + movl %eax, 24(%esp) # 4-byte Spill + movl 28(%esp), %eax # 4-byte Reload + adcl 1056(%esp), %eax + movl %eax, 28(%esp) # 4-byte Spill + movl 32(%esp), %eax # 4-byte Reload + adcl 1060(%esp), %eax + movl %eax, 32(%esp) # 4-byte Spill + movl 44(%esp), %eax # 4-byte Reload + adcl 1064(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 1068(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 1072(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 68(%esp), %esi # 4-byte Reload + adcl 1076(%esp), %esi + sbbl %edi, %edi + movl 1160(%esp), %eax + movl 4(%eax), %eax + movl %eax, (%esp) + leal 984(%esp), %ecx + movl 1156(%esp), %edx + calll .LmulPv352x32 + andl $1, %edi + movl 56(%esp), %ecx # 4-byte Reload + addl 984(%esp), %ecx + adcl 988(%esp), %ebp + movl 40(%esp), %eax # 4-byte Reload + adcl 992(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 36(%esp), %eax # 4-byte Reload + adcl 996(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 24(%esp), %eax # 4-byte Reload + adcl 1000(%esp), %eax + movl %eax, 24(%esp) # 4-byte Spill + movl 28(%esp), %eax # 4-byte Reload + adcl 1004(%esp), %eax + movl %eax, 28(%esp) # 4-byte Spill + movl 32(%esp), %eax # 4-byte Reload + adcl 1008(%esp), %eax + movl %eax, 32(%esp) # 4-byte Spill + movl 44(%esp), %eax # 4-byte Reload + adcl 1012(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 1016(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 1020(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + adcl 1024(%esp), %esi + movl %esi, 68(%esp) # 4-byte Spill + adcl 1028(%esp), %edi + movl %edi, 56(%esp) # 4-byte Spill + sbbl %edi, %edi + movl %ecx, %eax + movl %ecx, %esi + imull 20(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 936(%esp), %ecx + movl 1164(%esp), %edx + calll .LmulPv352x32 + andl $1, %edi + movl %edi, %ecx + addl 936(%esp), %esi + adcl 940(%esp), %ebp + movl 40(%esp), %eax # 4-byte Reload + adcl 944(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 36(%esp), %eax # 4-byte Reload + adcl 948(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 24(%esp), %eax # 4-byte Reload + adcl 952(%esp), %eax + movl %eax, 24(%esp) # 4-byte Spill + movl 28(%esp), %eax # 4-byte Reload + adcl 956(%esp), %eax + movl %eax, 28(%esp) # 4-byte Spill + movl 32(%esp), %eax # 4-byte Reload + adcl 960(%esp), %eax + movl %eax, 32(%esp) # 4-byte Spill + movl 44(%esp), %edi # 4-byte Reload + adcl 964(%esp), %edi + movl 60(%esp), %eax # 4-byte Reload + adcl 968(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 972(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 976(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 56(%esp), %esi # 4-byte Reload + adcl 980(%esp), %esi + adcl $0, %ecx + movl %ecx, 52(%esp) # 4-byte Spill + movl 1160(%esp), %eax + movl 8(%eax), %eax + movl %eax, (%esp) + leal 888(%esp), %ecx + movl 1156(%esp), %edx + calll .LmulPv352x32 + addl 888(%esp), %ebp + movl 40(%esp), %eax # 4-byte Reload + adcl 892(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 36(%esp), %eax # 4-byte Reload + adcl 896(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 24(%esp), %eax # 4-byte Reload + adcl 900(%esp), %eax + movl %eax, 24(%esp) # 4-byte Spill + movl 28(%esp), %eax # 4-byte Reload + adcl 904(%esp), %eax + movl %eax, 28(%esp) # 4-byte Spill + movl 32(%esp), %eax # 4-byte Reload + adcl 908(%esp), %eax + movl %eax, 32(%esp) # 4-byte Spill + adcl 912(%esp), %edi + movl %edi, 44(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 916(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 920(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 924(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + adcl 928(%esp), %esi + movl %esi, %edi + movl 52(%esp), %eax # 4-byte Reload + adcl 932(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + sbbl %esi, %esi + movl %ebp, %eax + imull 20(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 840(%esp), %ecx + movl 1164(%esp), %edx + calll .LmulPv352x32 + movl %esi, %eax + andl $1, %eax + addl 840(%esp), %ebp + movl 40(%esp), %ecx # 4-byte Reload + adcl 844(%esp), %ecx + movl %ecx, 40(%esp) # 4-byte Spill + movl 36(%esp), %ecx # 4-byte Reload + adcl 848(%esp), %ecx + movl %ecx, 36(%esp) # 4-byte Spill + movl 24(%esp), %ecx # 4-byte Reload + adcl 852(%esp), %ecx + movl %ecx, 24(%esp) # 4-byte Spill + movl 28(%esp), %ecx # 4-byte Reload + adcl 856(%esp), %ecx + movl %ecx, 28(%esp) # 4-byte Spill + movl 32(%esp), %ecx # 4-byte Reload + adcl 860(%esp), %ecx + movl %ecx, 32(%esp) # 4-byte Spill + movl 44(%esp), %ecx # 4-byte Reload + adcl 864(%esp), %ecx + movl %ecx, 44(%esp) # 4-byte Spill + movl 60(%esp), %ecx # 4-byte Reload + adcl 868(%esp), %ecx + movl %ecx, 60(%esp) # 4-byte Spill + movl 64(%esp), %ebp # 4-byte Reload + adcl 872(%esp), %ebp + movl 68(%esp), %esi # 4-byte Reload + adcl 876(%esp), %esi + adcl 880(%esp), %edi + movl 52(%esp), %ecx # 4-byte Reload + adcl 884(%esp), %ecx + movl %ecx, 52(%esp) # 4-byte Spill + adcl $0, %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 1160(%esp), %eax + movl 12(%eax), %eax + movl %eax, (%esp) + leal 792(%esp), %ecx + movl 1156(%esp), %edx + calll .LmulPv352x32 + movl 40(%esp), %ecx # 4-byte Reload + addl 792(%esp), %ecx + movl 36(%esp), %eax # 4-byte Reload + adcl 796(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 24(%esp), %eax # 4-byte Reload + adcl 800(%esp), %eax + movl %eax, 24(%esp) # 4-byte Spill + movl 28(%esp), %eax # 4-byte Reload + adcl 804(%esp), %eax + movl %eax, 28(%esp) # 4-byte Spill + movl 32(%esp), %eax # 4-byte Reload + adcl 808(%esp), %eax + movl %eax, 32(%esp) # 4-byte Spill + movl 44(%esp), %eax # 4-byte Reload + adcl 812(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 816(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + adcl 820(%esp), %ebp + movl %ebp, 64(%esp) # 4-byte Spill + adcl 824(%esp), %esi + movl %esi, 68(%esp) # 4-byte Spill + adcl 828(%esp), %edi + movl %edi, 56(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 832(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 48(%esp), %esi # 4-byte Reload + adcl 836(%esp), %esi + sbbl %edi, %edi + movl %ecx, %eax + movl %ecx, %ebp + imull 20(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 744(%esp), %ecx + movl 1164(%esp), %eax + movl %eax, %edx + calll .LmulPv352x32 + andl $1, %edi + movl %edi, %ecx + addl 744(%esp), %ebp + movl 36(%esp), %eax # 4-byte Reload + adcl 748(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 24(%esp), %eax # 4-byte Reload + adcl 752(%esp), %eax + movl %eax, 24(%esp) # 4-byte Spill + movl 28(%esp), %eax # 4-byte Reload + adcl 756(%esp), %eax + movl %eax, 28(%esp) # 4-byte Spill + movl 32(%esp), %eax # 4-byte Reload + adcl 760(%esp), %eax + movl %eax, 32(%esp) # 4-byte Spill + movl 44(%esp), %eax # 4-byte Reload + adcl 764(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 60(%esp), %ebp # 4-byte Reload + adcl 768(%esp), %ebp + movl 64(%esp), %eax # 4-byte Reload + adcl 772(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 68(%esp), %edi # 4-byte Reload + adcl 776(%esp), %edi + movl 56(%esp), %eax # 4-byte Reload + adcl 780(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 784(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + adcl 788(%esp), %esi + movl %esi, 48(%esp) # 4-byte Spill + adcl $0, %ecx + movl %ecx, 40(%esp) # 4-byte Spill + movl 1160(%esp), %eax + movl 16(%eax), %eax + movl %eax, (%esp) + leal 696(%esp), %ecx + movl 1156(%esp), %eax + movl %eax, %edx + calll .LmulPv352x32 + movl 36(%esp), %ecx # 4-byte Reload + addl 696(%esp), %ecx + movl 24(%esp), %eax # 4-byte Reload + adcl 700(%esp), %eax + movl %eax, 24(%esp) # 4-byte Spill + movl 28(%esp), %eax # 4-byte Reload + adcl 704(%esp), %eax + movl %eax, 28(%esp) # 4-byte Spill + movl 32(%esp), %eax # 4-byte Reload + adcl 708(%esp), %eax + movl %eax, 32(%esp) # 4-byte Spill + movl 44(%esp), %eax # 4-byte Reload + adcl 712(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + adcl 716(%esp), %ebp + movl %ebp, 60(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 720(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + adcl 724(%esp), %edi + movl %edi, 68(%esp) # 4-byte Spill + movl 56(%esp), %esi # 4-byte Reload + adcl 728(%esp), %esi + movl 52(%esp), %eax # 4-byte Reload + adcl 732(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 736(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 40(%esp), %eax # 4-byte Reload + adcl 740(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + sbbl %edi, %edi + movl %ecx, %eax + movl %ecx, %ebp + imull 20(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 648(%esp), %ecx + movl 1164(%esp), %edx + calll .LmulPv352x32 + andl $1, %edi + movl %edi, %ecx + addl 648(%esp), %ebp + movl 24(%esp), %eax # 4-byte Reload + adcl 652(%esp), %eax + movl %eax, 24(%esp) # 4-byte Spill + movl 28(%esp), %ebp # 4-byte Reload + adcl 656(%esp), %ebp + movl 32(%esp), %eax # 4-byte Reload + adcl 660(%esp), %eax + movl %eax, 32(%esp) # 4-byte Spill + movl 44(%esp), %eax # 4-byte Reload + adcl 664(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 668(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 672(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 68(%esp), %edi # 4-byte Reload + adcl 676(%esp), %edi + adcl 680(%esp), %esi + movl %esi, 56(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 684(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 688(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 40(%esp), %esi # 4-byte Reload + adcl 692(%esp), %esi + adcl $0, %ecx + movl %ecx, 36(%esp) # 4-byte Spill + movl 1160(%esp), %eax + movl 20(%eax), %eax + movl %eax, (%esp) + leal 600(%esp), %ecx + movl 1156(%esp), %edx + calll .LmulPv352x32 + movl 24(%esp), %ecx # 4-byte Reload + addl 600(%esp), %ecx + adcl 604(%esp), %ebp + movl %ebp, 28(%esp) # 4-byte Spill + movl 32(%esp), %ebp # 4-byte Reload + adcl 608(%esp), %ebp + movl 44(%esp), %eax # 4-byte Reload + adcl 612(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 616(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 620(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + adcl 624(%esp), %edi + movl %edi, 68(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 628(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 632(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 636(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + adcl 640(%esp), %esi + movl %esi, 40(%esp) # 4-byte Spill + movl 36(%esp), %eax # 4-byte Reload + adcl 644(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + sbbl %esi, %esi + movl %ecx, %eax + movl %ecx, %edi + imull 20(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 552(%esp), %ecx + movl 1164(%esp), %edx + calll .LmulPv352x32 + andl $1, %esi + movl %esi, %eax + addl 552(%esp), %edi + movl 28(%esp), %ecx # 4-byte Reload + adcl 556(%esp), %ecx + movl %ecx, 28(%esp) # 4-byte Spill + movl %ebp, %edi + adcl 560(%esp), %edi + movl 44(%esp), %ecx # 4-byte Reload + adcl 564(%esp), %ecx + movl %ecx, 44(%esp) # 4-byte Spill + movl 60(%esp), %ecx # 4-byte Reload + adcl 568(%esp), %ecx + movl %ecx, 60(%esp) # 4-byte Spill + movl 64(%esp), %esi # 4-byte Reload + adcl 572(%esp), %esi + movl 68(%esp), %ecx # 4-byte Reload + adcl 576(%esp), %ecx + movl %ecx, 68(%esp) # 4-byte Spill + movl 56(%esp), %ecx # 4-byte Reload + adcl 580(%esp), %ecx + movl %ecx, 56(%esp) # 4-byte Spill + movl 52(%esp), %ebp # 4-byte Reload + adcl 584(%esp), %ebp + movl 48(%esp), %ecx # 4-byte Reload + adcl 588(%esp), %ecx + movl %ecx, 48(%esp) # 4-byte Spill + movl 40(%esp), %ecx # 4-byte Reload + adcl 592(%esp), %ecx + movl %ecx, 40(%esp) # 4-byte Spill + movl 36(%esp), %ecx # 4-byte Reload + adcl 596(%esp), %ecx + movl %ecx, 36(%esp) # 4-byte Spill + adcl $0, %eax + movl %eax, 24(%esp) # 4-byte Spill + movl 1160(%esp), %eax + movl 24(%eax), %eax + movl %eax, (%esp) + leal 504(%esp), %ecx + movl 1156(%esp), %edx + calll .LmulPv352x32 + movl 28(%esp), %ecx # 4-byte Reload + addl 504(%esp), %ecx + adcl 508(%esp), %edi + movl %edi, 32(%esp) # 4-byte Spill + movl 44(%esp), %eax # 4-byte Reload + adcl 512(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 516(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + adcl 520(%esp), %esi + movl %esi, 64(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 524(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 528(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + adcl 532(%esp), %ebp + movl %ebp, 52(%esp) # 4-byte Spill + movl 48(%esp), %esi # 4-byte Reload + adcl 536(%esp), %esi + movl 40(%esp), %eax # 4-byte Reload + adcl 540(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 36(%esp), %eax # 4-byte Reload + adcl 544(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 24(%esp), %eax # 4-byte Reload + adcl 548(%esp), %eax + movl %eax, 24(%esp) # 4-byte Spill + sbbl %ebp, %ebp + movl %ecx, %eax + movl %ecx, %edi + imull 20(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 456(%esp), %ecx + movl 1164(%esp), %eax + movl %eax, %edx + calll .LmulPv352x32 + andl $1, %ebp + movl %ebp, %eax + addl 456(%esp), %edi + movl 32(%esp), %ecx # 4-byte Reload + adcl 460(%esp), %ecx + movl %ecx, 32(%esp) # 4-byte Spill + movl 44(%esp), %ebp # 4-byte Reload + adcl 464(%esp), %ebp + movl 60(%esp), %ecx # 4-byte Reload + adcl 468(%esp), %ecx + movl %ecx, 60(%esp) # 4-byte Spill + movl 64(%esp), %ecx # 4-byte Reload + adcl 472(%esp), %ecx + movl %ecx, 64(%esp) # 4-byte Spill + movl 68(%esp), %ecx # 4-byte Reload + adcl 476(%esp), %ecx + movl %ecx, 68(%esp) # 4-byte Spill + movl 56(%esp), %ecx # 4-byte Reload + adcl 480(%esp), %ecx + movl %ecx, 56(%esp) # 4-byte Spill + movl 52(%esp), %edi # 4-byte Reload + adcl 484(%esp), %edi + adcl 488(%esp), %esi + movl %esi, 48(%esp) # 4-byte Spill + movl 40(%esp), %ecx # 4-byte Reload + adcl 492(%esp), %ecx + movl %ecx, 40(%esp) # 4-byte Spill + movl 36(%esp), %esi # 4-byte Reload + adcl 496(%esp), %esi + movl 24(%esp), %ecx # 4-byte Reload + adcl 500(%esp), %ecx + movl %ecx, 24(%esp) # 4-byte Spill + adcl $0, %eax + movl %eax, 28(%esp) # 4-byte Spill + movl 1160(%esp), %eax + movl 28(%eax), %eax + movl %eax, (%esp) + leal 408(%esp), %ecx + movl 1156(%esp), %edx + calll .LmulPv352x32 + movl 32(%esp), %ecx # 4-byte Reload + addl 408(%esp), %ecx + adcl 412(%esp), %ebp + movl %ebp, 44(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 416(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 420(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 424(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 56(%esp), %ebp # 4-byte Reload + adcl 428(%esp), %ebp + adcl 432(%esp), %edi + movl %edi, 52(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 436(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 40(%esp), %eax # 4-byte Reload + adcl 440(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + adcl 444(%esp), %esi + movl %esi, 36(%esp) # 4-byte Spill + movl 24(%esp), %eax # 4-byte Reload + adcl 448(%esp), %eax + movl %eax, 24(%esp) # 4-byte Spill + movl 28(%esp), %eax # 4-byte Reload + adcl 452(%esp), %eax + movl %eax, 28(%esp) # 4-byte Spill + sbbl %edi, %edi + movl %ecx, %eax + movl %ecx, %esi + imull 20(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 360(%esp), %ecx + movl 1164(%esp), %edx + calll .LmulPv352x32 + andl $1, %edi + movl %edi, %ecx + addl 360(%esp), %esi + movl 44(%esp), %eax # 4-byte Reload + adcl 364(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 60(%esp), %edi # 4-byte Reload + adcl 368(%esp), %edi + movl 64(%esp), %eax # 4-byte Reload + adcl 372(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 376(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + adcl 380(%esp), %ebp + movl %ebp, 56(%esp) # 4-byte Spill + movl 52(%esp), %esi # 4-byte Reload + adcl 384(%esp), %esi + movl 48(%esp), %eax # 4-byte Reload + adcl 388(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 40(%esp), %ebp # 4-byte Reload + adcl 392(%esp), %ebp + movl 36(%esp), %eax # 4-byte Reload + adcl 396(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 24(%esp), %eax # 4-byte Reload + adcl 400(%esp), %eax + movl %eax, 24(%esp) # 4-byte Spill + movl 28(%esp), %eax # 4-byte Reload + adcl 404(%esp), %eax + movl %eax, 28(%esp) # 4-byte Spill + adcl $0, %ecx + movl %ecx, 32(%esp) # 4-byte Spill + movl 1160(%esp), %eax + movl 32(%eax), %eax + movl %eax, (%esp) + leal 312(%esp), %ecx + movl 1156(%esp), %edx + calll .LmulPv352x32 + movl 44(%esp), %ecx # 4-byte Reload + addl 312(%esp), %ecx + adcl 316(%esp), %edi + movl %edi, 60(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 320(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 324(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 328(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + adcl 332(%esp), %esi + movl %esi, 52(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 336(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + adcl 340(%esp), %ebp + movl %ebp, 40(%esp) # 4-byte Spill + movl 36(%esp), %eax # 4-byte Reload + adcl 344(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 24(%esp), %edi # 4-byte Reload + adcl 348(%esp), %edi + movl 28(%esp), %eax # 4-byte Reload + adcl 352(%esp), %eax + movl %eax, 28(%esp) # 4-byte Spill + movl 32(%esp), %eax # 4-byte Reload + adcl 356(%esp), %eax + movl %eax, 32(%esp) # 4-byte Spill + sbbl %ebp, %ebp + movl %ecx, %eax + movl %ecx, %esi + imull 20(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 264(%esp), %ecx + movl 1164(%esp), %edx + calll .LmulPv352x32 + andl $1, %ebp + movl %ebp, %ecx + addl 264(%esp), %esi + movl 60(%esp), %eax # 4-byte Reload + adcl 268(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 272(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 68(%esp), %ebp # 4-byte Reload + adcl 276(%esp), %ebp + movl 56(%esp), %eax # 4-byte Reload + adcl 280(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 52(%esp), %esi # 4-byte Reload + adcl 284(%esp), %esi + movl 48(%esp), %eax # 4-byte Reload + adcl 288(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 40(%esp), %eax # 4-byte Reload + adcl 292(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 36(%esp), %eax # 4-byte Reload + adcl 296(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + adcl 300(%esp), %edi + movl %edi, 24(%esp) # 4-byte Spill + movl 28(%esp), %edi # 4-byte Reload + adcl 304(%esp), %edi + movl 32(%esp), %eax # 4-byte Reload + adcl 308(%esp), %eax + movl %eax, 32(%esp) # 4-byte Spill + adcl $0, %ecx + movl %ecx, 44(%esp) # 4-byte Spill + movl 1160(%esp), %eax + movl 36(%eax), %eax + movl %eax, (%esp) + leal 216(%esp), %ecx + movl 1156(%esp), %edx + calll .LmulPv352x32 + movl 60(%esp), %ecx # 4-byte Reload + addl 216(%esp), %ecx + movl 64(%esp), %eax # 4-byte Reload + adcl 220(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + adcl 224(%esp), %ebp + movl %ebp, 68(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 228(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + adcl 232(%esp), %esi + movl %esi, 52(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 236(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 40(%esp), %eax # 4-byte Reload + adcl 240(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 36(%esp), %eax # 4-byte Reload + adcl 244(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 24(%esp), %eax # 4-byte Reload + adcl 248(%esp), %eax + movl %eax, 24(%esp) # 4-byte Spill + adcl 252(%esp), %edi + movl %edi, 28(%esp) # 4-byte Spill + movl 32(%esp), %eax # 4-byte Reload + adcl 256(%esp), %eax + movl %eax, 32(%esp) # 4-byte Spill + movl 44(%esp), %eax # 4-byte Reload + adcl 260(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + sbbl %esi, %esi + movl %ecx, %eax + movl %ecx, %ebp + imull 20(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 168(%esp), %ecx + movl 1164(%esp), %edx + calll .LmulPv352x32 + movl %esi, %ecx + andl $1, %ecx + addl 168(%esp), %ebp + movl 64(%esp), %esi # 4-byte Reload + adcl 172(%esp), %esi + movl 68(%esp), %eax # 4-byte Reload + adcl 176(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 180(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 52(%esp), %ebp # 4-byte Reload + adcl 184(%esp), %ebp + movl 48(%esp), %edi # 4-byte Reload + adcl 188(%esp), %edi + movl 40(%esp), %eax # 4-byte Reload + adcl 192(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 36(%esp), %eax # 4-byte Reload + adcl 196(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 24(%esp), %eax # 4-byte Reload + adcl 200(%esp), %eax + movl %eax, 24(%esp) # 4-byte Spill + movl 28(%esp), %eax # 4-byte Reload + adcl 204(%esp), %eax + movl %eax, 28(%esp) # 4-byte Spill + movl 32(%esp), %eax # 4-byte Reload + adcl 208(%esp), %eax + movl %eax, 32(%esp) # 4-byte Spill + movl 44(%esp), %eax # 4-byte Reload + adcl 212(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + adcl $0, %ecx + movl %ecx, 64(%esp) # 4-byte Spill + movl 1160(%esp), %eax + movl 40(%eax), %eax + movl %eax, (%esp) + leal 120(%esp), %ecx + movl 1156(%esp), %edx + calll .LmulPv352x32 + movl %esi, %ecx + addl 120(%esp), %ecx + movl 68(%esp), %eax # 4-byte Reload + adcl 124(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 128(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + adcl 132(%esp), %ebp + movl %ebp, 52(%esp) # 4-byte Spill + adcl 136(%esp), %edi + movl %edi, 48(%esp) # 4-byte Spill + movl 40(%esp), %eax # 4-byte Reload + adcl 140(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 36(%esp), %eax # 4-byte Reload + adcl 144(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 24(%esp), %eax # 4-byte Reload + adcl 148(%esp), %eax + movl %eax, 24(%esp) # 4-byte Spill + movl 28(%esp), %eax # 4-byte Reload + adcl 152(%esp), %eax + movl %eax, 28(%esp) # 4-byte Spill + movl 32(%esp), %ebp # 4-byte Reload + adcl 156(%esp), %ebp + movl 44(%esp), %eax # 4-byte Reload + adcl 160(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 164(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + sbbl %esi, %esi + movl 20(%esp), %eax # 4-byte Reload + imull %ecx, %eax + movl %ecx, %edi + movl %eax, (%esp) + leal 72(%esp), %ecx + movl 1164(%esp), %eax + movl %eax, %edx + calll .LmulPv352x32 + andl $1, %esi + addl 72(%esp), %edi + movl 48(%esp), %edi # 4-byte Reload + movl 68(%esp), %eax # 4-byte Reload + adcl 76(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 56(%esp), %ecx # 4-byte Reload + adcl 80(%esp), %ecx + movl %ecx, 56(%esp) # 4-byte Spill + movl 52(%esp), %edx # 4-byte Reload + adcl 84(%esp), %edx + movl %edx, 52(%esp) # 4-byte Spill + adcl 88(%esp), %edi + movl %edi, 48(%esp) # 4-byte Spill + movl 40(%esp), %edx # 4-byte Reload + adcl 92(%esp), %edx + movl %edx, 40(%esp) # 4-byte Spill + movl 36(%esp), %edx # 4-byte Reload + adcl 96(%esp), %edx + movl %edx, 36(%esp) # 4-byte Spill + movl 24(%esp), %edx # 4-byte Reload + adcl 100(%esp), %edx + movl %edx, 24(%esp) # 4-byte Spill + movl 28(%esp), %edx # 4-byte Reload + adcl 104(%esp), %edx + movl %edx, 28(%esp) # 4-byte Spill + adcl 108(%esp), %ebp + movl %ebp, 32(%esp) # 4-byte Spill + movl 44(%esp), %edx # 4-byte Reload + adcl 112(%esp), %edx + movl %edx, 44(%esp) # 4-byte Spill + movl 64(%esp), %edx # 4-byte Reload + adcl 116(%esp), %edx + movl %edx, 64(%esp) # 4-byte Spill + adcl $0, %esi + movl 1164(%esp), %ebp + subl (%ebp), %eax + movl %ecx, %edx + sbbl 4(%ebp), %edx + movl 52(%esp), %ecx # 4-byte Reload + sbbl 8(%ebp), %ecx + sbbl 12(%ebp), %edi + movl %edi, 4(%esp) # 4-byte Spill + movl 40(%esp), %edi # 4-byte Reload + sbbl 16(%ebp), %edi + movl %edi, 8(%esp) # 4-byte Spill + movl 36(%esp), %edi # 4-byte Reload + sbbl 20(%ebp), %edi + movl %edi, 12(%esp) # 4-byte Spill + movl 24(%esp), %edi # 4-byte Reload + sbbl 24(%ebp), %edi + movl %edi, 16(%esp) # 4-byte Spill + movl 28(%esp), %ebx # 4-byte Reload + sbbl 28(%ebp), %ebx + movl 32(%esp), %edi # 4-byte Reload + sbbl 32(%ebp), %edi + movl %edi, 20(%esp) # 4-byte Spill + movl 44(%esp), %edi # 4-byte Reload + sbbl 36(%ebp), %edi + movl %edi, 60(%esp) # 4-byte Spill + movl 64(%esp), %edi # 4-byte Reload + sbbl 40(%ebp), %edi + movl %edi, %ebp + sbbl $0, %esi + andl $1, %esi + jne .LBB162_2 +# BB#1: + movl %ebx, 28(%esp) # 4-byte Spill +.LBB162_2: + movl %esi, %ebx + testb %bl, %bl + movl 68(%esp), %ebx # 4-byte Reload + jne .LBB162_4 +# BB#3: + movl %eax, %ebx +.LBB162_4: + movl 1152(%esp), %eax + movl %ebx, (%eax) + movl 56(%esp), %edi # 4-byte Reload + jne .LBB162_6 +# BB#5: + movl %edx, %edi +.LBB162_6: + movl %edi, 4(%eax) + movl 52(%esp), %edx # 4-byte Reload + jne .LBB162_8 +# BB#7: + movl %ecx, %edx +.LBB162_8: + movl %edx, 8(%eax) + jne .LBB162_10 +# BB#9: + movl 4(%esp), %ecx # 4-byte Reload + movl %ecx, 48(%esp) # 4-byte Spill +.LBB162_10: + movl 48(%esp), %ecx # 4-byte Reload + movl %ecx, 12(%eax) + movl 40(%esp), %ecx # 4-byte Reload + jne .LBB162_12 +# BB#11: + movl 8(%esp), %ecx # 4-byte Reload +.LBB162_12: + movl %ecx, 16(%eax) + movl 36(%esp), %ecx # 4-byte Reload + jne .LBB162_14 +# BB#13: + movl 12(%esp), %ecx # 4-byte Reload +.LBB162_14: + movl %ecx, 20(%eax) + movl 24(%esp), %ecx # 4-byte Reload + jne .LBB162_16 +# BB#15: + movl 16(%esp), %ecx # 4-byte Reload +.LBB162_16: + movl %ecx, 24(%eax) + movl 28(%esp), %ecx # 4-byte Reload + movl %ecx, 28(%eax) + movl 32(%esp), %ecx # 4-byte Reload + jne .LBB162_18 +# BB#17: + movl 20(%esp), %ecx # 4-byte Reload +.LBB162_18: + movl %ecx, 32(%eax) + movl 44(%esp), %ecx # 4-byte Reload + jne .LBB162_20 +# BB#19: + movl 60(%esp), %ecx # 4-byte Reload +.LBB162_20: + movl %ecx, 36(%eax) + movl 64(%esp), %ecx # 4-byte Reload + jne .LBB162_22 +# BB#21: + movl %ebp, %ecx +.LBB162_22: + movl %ecx, 40(%eax) + addl $1132, %esp # imm = 0x46C + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end162: + .size mcl_fp_mont11Lbmi2, .Lfunc_end162-mcl_fp_mont11Lbmi2 + + .globl mcl_fp_montNF11Lbmi2 + .align 16, 0x90 + .type mcl_fp_montNF11Lbmi2,@function +mcl_fp_montNF11Lbmi2: # @mcl_fp_montNF11Lbmi2 +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $1132, %esp # imm = 0x46C + calll .L163$pb +.L163$pb: + popl %ebx +.Ltmp24: + addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp24-.L163$pb), %ebx + movl 1164(%esp), %eax + movl -4(%eax), %esi + movl %esi, 24(%esp) # 4-byte Spill + movl 1160(%esp), %eax + movl (%eax), %eax + movl %eax, (%esp) + leal 1080(%esp), %ecx + movl 1156(%esp), %edx + calll .LmulPv352x32 + movl 1080(%esp), %ebp + movl 1084(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl %ebp, %eax + imull %esi, %eax + movl 1124(%esp), %ecx + movl %ecx, 68(%esp) # 4-byte Spill + movl 1120(%esp), %ecx + movl %ecx, 64(%esp) # 4-byte Spill + movl 1116(%esp), %ecx + movl %ecx, 56(%esp) # 4-byte Spill + movl 1112(%esp), %ecx + movl %ecx, 52(%esp) # 4-byte Spill + movl 1108(%esp), %ecx + movl %ecx, 36(%esp) # 4-byte Spill + movl 1104(%esp), %ecx + movl %ecx, 32(%esp) # 4-byte Spill + movl 1100(%esp), %ecx + movl %ecx, 28(%esp) # 4-byte Spill + movl 1096(%esp), %ecx + movl %ecx, 40(%esp) # 4-byte Spill + movl 1092(%esp), %esi + movl 1088(%esp), %edi + movl %eax, (%esp) + leal 1032(%esp), %ecx + movl 1164(%esp), %edx + calll .LmulPv352x32 + addl 1032(%esp), %ebp + movl 60(%esp), %eax # 4-byte Reload + adcl 1036(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + adcl 1040(%esp), %edi + adcl 1044(%esp), %esi + movl %esi, 44(%esp) # 4-byte Spill + movl 40(%esp), %ebp # 4-byte Reload + adcl 1048(%esp), %ebp + movl 28(%esp), %esi # 4-byte Reload + adcl 1052(%esp), %esi + movl 32(%esp), %eax # 4-byte Reload + adcl 1056(%esp), %eax + movl %eax, 32(%esp) # 4-byte Spill + movl 36(%esp), %eax # 4-byte Reload + adcl 1060(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 1064(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 1068(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 1072(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 1076(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 1160(%esp), %eax + movl 4(%eax), %eax + movl %eax, (%esp) + leal 984(%esp), %ecx + movl 1156(%esp), %edx + calll .LmulPv352x32 + movl 1028(%esp), %edx + movl 60(%esp), %ecx # 4-byte Reload + addl 984(%esp), %ecx + adcl 988(%esp), %edi + movl 44(%esp), %eax # 4-byte Reload + adcl 992(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + adcl 996(%esp), %ebp + movl %ebp, 40(%esp) # 4-byte Spill + adcl 1000(%esp), %esi + movl %esi, 28(%esp) # 4-byte Spill + movl 32(%esp), %esi # 4-byte Reload + adcl 1004(%esp), %esi + movl 36(%esp), %eax # 4-byte Reload + adcl 1008(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 1012(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 1016(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 1020(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 1024(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + adcl $0, %edx + movl %edx, 60(%esp) # 4-byte Spill + movl %ecx, %eax + movl %ecx, %ebp + imull 24(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 936(%esp), %ecx + movl 1164(%esp), %eax + movl %eax, %edx + calll .LmulPv352x32 + addl 936(%esp), %ebp + adcl 940(%esp), %edi + movl 44(%esp), %eax # 4-byte Reload + adcl 944(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 40(%esp), %eax # 4-byte Reload + adcl 948(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 28(%esp), %eax # 4-byte Reload + adcl 952(%esp), %eax + movl %eax, 28(%esp) # 4-byte Spill + adcl 956(%esp), %esi + movl %esi, 32(%esp) # 4-byte Spill + movl 36(%esp), %esi # 4-byte Reload + adcl 960(%esp), %esi + movl 52(%esp), %eax # 4-byte Reload + adcl 964(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 968(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 972(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 976(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 60(%esp), %ebp # 4-byte Reload + adcl 980(%esp), %ebp + movl 1160(%esp), %eax + movl 8(%eax), %eax + movl %eax, (%esp) + leal 888(%esp), %ecx + movl 1156(%esp), %edx + calll .LmulPv352x32 + movl 932(%esp), %eax + addl 888(%esp), %edi + movl 44(%esp), %ecx # 4-byte Reload + adcl 892(%esp), %ecx + movl %ecx, 44(%esp) # 4-byte Spill + movl 40(%esp), %ecx # 4-byte Reload + adcl 896(%esp), %ecx + movl %ecx, 40(%esp) # 4-byte Spill + movl 28(%esp), %ecx # 4-byte Reload + adcl 900(%esp), %ecx + movl %ecx, 28(%esp) # 4-byte Spill + movl 32(%esp), %ecx # 4-byte Reload + adcl 904(%esp), %ecx + movl %ecx, 32(%esp) # 4-byte Spill + adcl 908(%esp), %esi + movl 52(%esp), %ecx # 4-byte Reload + adcl 912(%esp), %ecx + movl %ecx, 52(%esp) # 4-byte Spill + movl 56(%esp), %ecx # 4-byte Reload + adcl 916(%esp), %ecx + movl %ecx, 56(%esp) # 4-byte Spill + movl 64(%esp), %ecx # 4-byte Reload + adcl 920(%esp), %ecx + movl %ecx, 64(%esp) # 4-byte Spill + movl 68(%esp), %ecx # 4-byte Reload + adcl 924(%esp), %ecx + movl %ecx, 68(%esp) # 4-byte Spill + adcl 928(%esp), %ebp + movl %ebp, 60(%esp) # 4-byte Spill + movl %eax, %ebp + adcl $0, %ebp + movl %edi, %eax + imull 24(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 840(%esp), %ecx + movl 1164(%esp), %edx + calll .LmulPv352x32 + addl 840(%esp), %edi + movl 44(%esp), %eax # 4-byte Reload + adcl 844(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 40(%esp), %eax # 4-byte Reload + adcl 848(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 28(%esp), %eax # 4-byte Reload + adcl 852(%esp), %eax + movl %eax, 28(%esp) # 4-byte Spill + movl 32(%esp), %eax # 4-byte Reload + adcl 856(%esp), %eax + movl %eax, 32(%esp) # 4-byte Spill + adcl 860(%esp), %esi + movl %esi, 36(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 864(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 868(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 64(%esp), %edi # 4-byte Reload + adcl 872(%esp), %edi + movl 68(%esp), %esi # 4-byte Reload + adcl 876(%esp), %esi + movl 60(%esp), %eax # 4-byte Reload + adcl 880(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + adcl 884(%esp), %ebp + movl %ebp, 48(%esp) # 4-byte Spill + movl 1160(%esp), %eax + movl 12(%eax), %eax + movl %eax, (%esp) + leal 792(%esp), %ecx + movl 1156(%esp), %edx + calll .LmulPv352x32 + movl 836(%esp), %eax + movl 44(%esp), %edx # 4-byte Reload + addl 792(%esp), %edx + movl 40(%esp), %ecx # 4-byte Reload + adcl 796(%esp), %ecx + movl %ecx, 40(%esp) # 4-byte Spill + movl 28(%esp), %ecx # 4-byte Reload + adcl 800(%esp), %ecx + movl %ecx, 28(%esp) # 4-byte Spill + movl 32(%esp), %ecx # 4-byte Reload + adcl 804(%esp), %ecx + movl %ecx, 32(%esp) # 4-byte Spill + movl 36(%esp), %ecx # 4-byte Reload + adcl 808(%esp), %ecx + movl %ecx, 36(%esp) # 4-byte Spill + movl 52(%esp), %ecx # 4-byte Reload + adcl 812(%esp), %ecx + movl %ecx, 52(%esp) # 4-byte Spill + movl 56(%esp), %ecx # 4-byte Reload + adcl 816(%esp), %ecx + movl %ecx, 56(%esp) # 4-byte Spill + adcl 820(%esp), %edi + movl %edi, 64(%esp) # 4-byte Spill + adcl 824(%esp), %esi + movl %esi, 68(%esp) # 4-byte Spill + movl 60(%esp), %ebp # 4-byte Reload + adcl 828(%esp), %ebp + movl 48(%esp), %ecx # 4-byte Reload + adcl 832(%esp), %ecx + movl %ecx, 48(%esp) # 4-byte Spill + adcl $0, %eax + movl %eax, 44(%esp) # 4-byte Spill + movl %edx, %esi + movl %esi, %eax + imull 24(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 744(%esp), %ecx + movl 1164(%esp), %edx + calll .LmulPv352x32 + addl 744(%esp), %esi + movl 40(%esp), %eax # 4-byte Reload + adcl 748(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 28(%esp), %eax # 4-byte Reload + adcl 752(%esp), %eax + movl %eax, 28(%esp) # 4-byte Spill + movl 32(%esp), %eax # 4-byte Reload + adcl 756(%esp), %eax + movl %eax, 32(%esp) # 4-byte Spill + movl 36(%esp), %eax # 4-byte Reload + adcl 760(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 52(%esp), %esi # 4-byte Reload + adcl 764(%esp), %esi + movl 56(%esp), %edi # 4-byte Reload + adcl 768(%esp), %edi + movl 64(%esp), %eax # 4-byte Reload + adcl 772(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 776(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + adcl 780(%esp), %ebp + movl %ebp, 60(%esp) # 4-byte Spill + movl 48(%esp), %ebp # 4-byte Reload + adcl 784(%esp), %ebp + movl 44(%esp), %eax # 4-byte Reload + adcl 788(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 1160(%esp), %eax + movl 16(%eax), %eax + movl %eax, (%esp) + leal 696(%esp), %ecx + movl 1156(%esp), %edx + calll .LmulPv352x32 + movl 740(%esp), %edx + movl 40(%esp), %eax # 4-byte Reload + addl 696(%esp), %eax + movl 28(%esp), %ecx # 4-byte Reload + adcl 700(%esp), %ecx + movl %ecx, 28(%esp) # 4-byte Spill + movl 32(%esp), %ecx # 4-byte Reload + adcl 704(%esp), %ecx + movl %ecx, 32(%esp) # 4-byte Spill + movl 36(%esp), %ecx # 4-byte Reload + adcl 708(%esp), %ecx + movl %ecx, 36(%esp) # 4-byte Spill + adcl 712(%esp), %esi + movl %esi, 52(%esp) # 4-byte Spill + adcl 716(%esp), %edi + movl %edi, 56(%esp) # 4-byte Spill + movl 64(%esp), %edi # 4-byte Reload + adcl 720(%esp), %edi + movl 68(%esp), %ecx # 4-byte Reload + adcl 724(%esp), %ecx + movl %ecx, 68(%esp) # 4-byte Spill + movl 60(%esp), %ecx # 4-byte Reload + adcl 728(%esp), %ecx + movl %ecx, 60(%esp) # 4-byte Spill + adcl 732(%esp), %ebp + movl %ebp, 48(%esp) # 4-byte Spill + movl 44(%esp), %esi # 4-byte Reload + adcl 736(%esp), %esi + adcl $0, %edx + movl %edx, 40(%esp) # 4-byte Spill + movl %eax, %ebp + imull 24(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 648(%esp), %ecx + movl 1164(%esp), %edx + calll .LmulPv352x32 + addl 648(%esp), %ebp + movl 28(%esp), %eax # 4-byte Reload + adcl 652(%esp), %eax + movl %eax, 28(%esp) # 4-byte Spill + movl 32(%esp), %ebp # 4-byte Reload + adcl 656(%esp), %ebp + movl 36(%esp), %eax # 4-byte Reload + adcl 660(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 664(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 668(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + adcl 672(%esp), %edi + movl %edi, 64(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 676(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 680(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 684(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + adcl 688(%esp), %esi + movl %esi, %edi + movl 40(%esp), %esi # 4-byte Reload + adcl 692(%esp), %esi + movl 1160(%esp), %eax + movl 20(%eax), %eax + movl %eax, (%esp) + leal 600(%esp), %ecx + movl 1156(%esp), %eax + movl %eax, %edx + calll .LmulPv352x32 + movl 644(%esp), %eax + movl 28(%esp), %ecx # 4-byte Reload + addl 600(%esp), %ecx + adcl 604(%esp), %ebp + movl %ebp, 32(%esp) # 4-byte Spill + movl 36(%esp), %edx # 4-byte Reload + adcl 608(%esp), %edx + movl %edx, 36(%esp) # 4-byte Spill + movl 52(%esp), %edx # 4-byte Reload + adcl 612(%esp), %edx + movl %edx, 52(%esp) # 4-byte Spill + movl 56(%esp), %edx # 4-byte Reload + adcl 616(%esp), %edx + movl %edx, 56(%esp) # 4-byte Spill + movl 64(%esp), %edx # 4-byte Reload + adcl 620(%esp), %edx + movl %edx, 64(%esp) # 4-byte Spill + movl 68(%esp), %edx # 4-byte Reload + adcl 624(%esp), %edx + movl %edx, 68(%esp) # 4-byte Spill + movl 60(%esp), %edx # 4-byte Reload + adcl 628(%esp), %edx + movl %edx, 60(%esp) # 4-byte Spill + movl 48(%esp), %edx # 4-byte Reload + adcl 632(%esp), %edx + movl %edx, 48(%esp) # 4-byte Spill + adcl 636(%esp), %edi + movl %edi, 44(%esp) # 4-byte Spill + adcl 640(%esp), %esi + movl %esi, 40(%esp) # 4-byte Spill + movl %eax, %ebp + adcl $0, %ebp + movl %ecx, %esi + movl %esi, %eax + imull 24(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 552(%esp), %ecx + movl 1164(%esp), %edx + calll .LmulPv352x32 + addl 552(%esp), %esi + movl 32(%esp), %eax # 4-byte Reload + adcl 556(%esp), %eax + movl %eax, 32(%esp) # 4-byte Spill + movl 36(%esp), %edi # 4-byte Reload + adcl 560(%esp), %edi + movl 52(%esp), %eax # 4-byte Reload + adcl 564(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 568(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 572(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 68(%esp), %esi # 4-byte Reload + adcl 576(%esp), %esi + movl 60(%esp), %eax # 4-byte Reload + adcl 580(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 584(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 44(%esp), %eax # 4-byte Reload + adcl 588(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 40(%esp), %eax # 4-byte Reload + adcl 592(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + adcl 596(%esp), %ebp + movl %ebp, 28(%esp) # 4-byte Spill + movl 1160(%esp), %ecx + movl %ecx, %eax + movl 24(%eax), %eax + movl %eax, (%esp) + leal 504(%esp), %ecx + movl 1156(%esp), %edx + calll .LmulPv352x32 + movl 548(%esp), %edx + movl 32(%esp), %eax # 4-byte Reload + addl 504(%esp), %eax + adcl 508(%esp), %edi + movl %edi, 36(%esp) # 4-byte Spill + movl 52(%esp), %ecx # 4-byte Reload + adcl 512(%esp), %ecx + movl %ecx, 52(%esp) # 4-byte Spill + movl 56(%esp), %ebp # 4-byte Reload + adcl 516(%esp), %ebp + movl 64(%esp), %ecx # 4-byte Reload + adcl 520(%esp), %ecx + movl %ecx, 64(%esp) # 4-byte Spill + adcl 524(%esp), %esi + movl %esi, 68(%esp) # 4-byte Spill + movl 60(%esp), %ecx # 4-byte Reload + adcl 528(%esp), %ecx + movl %ecx, 60(%esp) # 4-byte Spill + movl 48(%esp), %ecx # 4-byte Reload + adcl 532(%esp), %ecx + movl %ecx, 48(%esp) # 4-byte Spill + movl 44(%esp), %edi # 4-byte Reload + adcl 536(%esp), %edi + movl 40(%esp), %ecx # 4-byte Reload + adcl 540(%esp), %ecx + movl %ecx, 40(%esp) # 4-byte Spill + movl 28(%esp), %ecx # 4-byte Reload + adcl 544(%esp), %ecx + movl %ecx, 28(%esp) # 4-byte Spill + adcl $0, %edx + movl %edx, 32(%esp) # 4-byte Spill + movl %eax, %esi + imull 24(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 456(%esp), %ecx + movl 1164(%esp), %edx + calll .LmulPv352x32 + addl 456(%esp), %esi + movl 36(%esp), %eax # 4-byte Reload + adcl 460(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 464(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + adcl 468(%esp), %ebp + movl %ebp, 56(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 472(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 476(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 60(%esp), %esi # 4-byte Reload + adcl 480(%esp), %esi + movl 48(%esp), %eax # 4-byte Reload + adcl 484(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + adcl 488(%esp), %edi + movl %edi, 44(%esp) # 4-byte Spill + movl 40(%esp), %eax # 4-byte Reload + adcl 492(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 28(%esp), %edi # 4-byte Reload + adcl 496(%esp), %edi + movl 32(%esp), %eax # 4-byte Reload + adcl 500(%esp), %eax + movl %eax, 32(%esp) # 4-byte Spill + movl 1160(%esp), %eax + movl 28(%eax), %eax + movl %eax, (%esp) + leal 408(%esp), %ecx + movl 1156(%esp), %edx + calll .LmulPv352x32 + movl 452(%esp), %edx + movl 36(%esp), %ecx # 4-byte Reload + addl 408(%esp), %ecx + movl 52(%esp), %ebp # 4-byte Reload + adcl 412(%esp), %ebp + movl 56(%esp), %eax # 4-byte Reload + adcl 416(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 420(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 424(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + adcl 428(%esp), %esi + movl %esi, 60(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 432(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 44(%esp), %eax # 4-byte Reload + adcl 436(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 40(%esp), %eax # 4-byte Reload + adcl 440(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + adcl 444(%esp), %edi + movl %edi, 28(%esp) # 4-byte Spill + movl 32(%esp), %edi # 4-byte Reload + adcl 448(%esp), %edi + adcl $0, %edx + movl %edx, 36(%esp) # 4-byte Spill + movl %ecx, %eax + movl %ecx, %esi + imull 24(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 360(%esp), %ecx + movl 1164(%esp), %edx + calll .LmulPv352x32 + addl 360(%esp), %esi + adcl 364(%esp), %ebp + movl %ebp, 52(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 368(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 64(%esp), %esi # 4-byte Reload + adcl 372(%esp), %esi + movl 68(%esp), %eax # 4-byte Reload + adcl 376(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 380(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 384(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 44(%esp), %eax # 4-byte Reload + adcl 388(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 40(%esp), %ebp # 4-byte Reload + adcl 392(%esp), %ebp + movl 28(%esp), %eax # 4-byte Reload + adcl 396(%esp), %eax + movl %eax, 28(%esp) # 4-byte Spill + adcl 400(%esp), %edi + movl %edi, 32(%esp) # 4-byte Spill + movl 36(%esp), %eax # 4-byte Reload + adcl 404(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 1160(%esp), %eax + movl 32(%eax), %eax + movl %eax, (%esp) + leal 312(%esp), %ecx + movl 1156(%esp), %edx + calll .LmulPv352x32 + movl 356(%esp), %edx + movl 52(%esp), %ecx # 4-byte Reload + addl 312(%esp), %ecx + movl 56(%esp), %eax # 4-byte Reload + adcl 316(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + adcl 320(%esp), %esi + movl %esi, 64(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 324(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 328(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 48(%esp), %edi # 4-byte Reload + adcl 332(%esp), %edi + movl 44(%esp), %eax # 4-byte Reload + adcl 336(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + adcl 340(%esp), %ebp + movl %ebp, 40(%esp) # 4-byte Spill + movl 28(%esp), %eax # 4-byte Reload + adcl 344(%esp), %eax + movl %eax, 28(%esp) # 4-byte Spill + movl 32(%esp), %eax # 4-byte Reload + adcl 348(%esp), %eax + movl %eax, 32(%esp) # 4-byte Spill + movl 36(%esp), %eax # 4-byte Reload + adcl 352(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + adcl $0, %edx + movl %edx, 52(%esp) # 4-byte Spill + movl %ecx, %eax + movl %ecx, %esi + imull 24(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 264(%esp), %ecx + movl 1164(%esp), %edx + calll .LmulPv352x32 + addl 264(%esp), %esi + movl 56(%esp), %eax # 4-byte Reload + adcl 268(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 272(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 68(%esp), %ebp # 4-byte Reload + adcl 276(%esp), %ebp + movl 60(%esp), %eax # 4-byte Reload + adcl 280(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl %edi, %esi + adcl 284(%esp), %esi + movl 44(%esp), %eax # 4-byte Reload + adcl 288(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 40(%esp), %edi # 4-byte Reload + adcl 292(%esp), %edi + movl 28(%esp), %eax # 4-byte Reload + adcl 296(%esp), %eax + movl %eax, 28(%esp) # 4-byte Spill + movl 32(%esp), %eax # 4-byte Reload + adcl 300(%esp), %eax + movl %eax, 32(%esp) # 4-byte Spill + movl 36(%esp), %eax # 4-byte Reload + adcl 304(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 308(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 1160(%esp), %eax + movl 36(%eax), %eax + movl %eax, (%esp) + leal 216(%esp), %ecx + movl 1156(%esp), %edx + calll .LmulPv352x32 + movl 260(%esp), %edx + movl 56(%esp), %ecx # 4-byte Reload + addl 216(%esp), %ecx + movl 64(%esp), %eax # 4-byte Reload + adcl 220(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + adcl 224(%esp), %ebp + movl %ebp, 68(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 228(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + adcl 232(%esp), %esi + movl %esi, 48(%esp) # 4-byte Spill + movl 44(%esp), %eax # 4-byte Reload + adcl 236(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + adcl 240(%esp), %edi + movl %edi, 40(%esp) # 4-byte Spill + movl 28(%esp), %ebp # 4-byte Reload + adcl 244(%esp), %ebp + movl 32(%esp), %eax # 4-byte Reload + adcl 248(%esp), %eax + movl %eax, 32(%esp) # 4-byte Spill + movl 36(%esp), %eax # 4-byte Reload + adcl 252(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 256(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + adcl $0, %edx + movl %edx, 56(%esp) # 4-byte Spill + movl %ecx, %eax + movl %ecx, %edi + imull 24(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 168(%esp), %ecx + movl 1164(%esp), %edx + calll .LmulPv352x32 + addl 168(%esp), %edi + movl 64(%esp), %eax # 4-byte Reload + adcl 172(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 68(%esp), %esi # 4-byte Reload + adcl 176(%esp), %esi + movl 60(%esp), %edi # 4-byte Reload + adcl 180(%esp), %edi + movl 48(%esp), %eax # 4-byte Reload + adcl 184(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 44(%esp), %eax # 4-byte Reload + adcl 188(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 40(%esp), %eax # 4-byte Reload + adcl 192(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + adcl 196(%esp), %ebp + movl %ebp, 28(%esp) # 4-byte Spill + movl 32(%esp), %eax # 4-byte Reload + adcl 200(%esp), %eax + movl %eax, 32(%esp) # 4-byte Spill + movl 36(%esp), %ebp # 4-byte Reload + adcl 204(%esp), %ebp + movl 52(%esp), %eax # 4-byte Reload + adcl 208(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 212(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 1160(%esp), %eax + movl 40(%eax), %eax + movl %eax, (%esp) + leal 120(%esp), %ecx + movl 1156(%esp), %edx + calll .LmulPv352x32 + movl 164(%esp), %edx + movl 64(%esp), %ecx # 4-byte Reload + addl 120(%esp), %ecx + adcl 124(%esp), %esi + movl %esi, 68(%esp) # 4-byte Spill + adcl 128(%esp), %edi + movl %edi, 60(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 132(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 44(%esp), %esi # 4-byte Reload + adcl 136(%esp), %esi + movl 40(%esp), %eax # 4-byte Reload + adcl 140(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 28(%esp), %eax # 4-byte Reload + adcl 144(%esp), %eax + movl %eax, 28(%esp) # 4-byte Spill + movl 32(%esp), %eax # 4-byte Reload + adcl 148(%esp), %eax + movl %eax, 32(%esp) # 4-byte Spill + adcl 152(%esp), %ebp + movl %ebp, 36(%esp) # 4-byte Spill + movl 52(%esp), %ebp # 4-byte Reload + adcl 156(%esp), %ebp + movl 56(%esp), %eax # 4-byte Reload + adcl 160(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + adcl $0, %edx + movl %edx, 64(%esp) # 4-byte Spill + movl 24(%esp), %eax # 4-byte Reload + imull %ecx, %eax + movl %ecx, %edi + movl %eax, (%esp) + leal 72(%esp), %ecx + movl 1164(%esp), %edx + calll .LmulPv352x32 + addl 72(%esp), %edi + movl 48(%esp), %edi # 4-byte Reload + movl 68(%esp), %eax # 4-byte Reload + adcl 76(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 60(%esp), %ecx # 4-byte Reload + adcl 80(%esp), %ecx + movl %ecx, 60(%esp) # 4-byte Spill + adcl 84(%esp), %edi + adcl 88(%esp), %esi + movl %esi, 44(%esp) # 4-byte Spill + movl 40(%esp), %edx # 4-byte Reload + adcl 92(%esp), %edx + movl %edx, 40(%esp) # 4-byte Spill + movl 28(%esp), %edx # 4-byte Reload + adcl 96(%esp), %edx + movl %edx, 28(%esp) # 4-byte Spill + movl 32(%esp), %edx # 4-byte Reload + adcl 100(%esp), %edx + movl %edx, 32(%esp) # 4-byte Spill + movl 36(%esp), %edx # 4-byte Reload + adcl 104(%esp), %edx + movl %edx, 36(%esp) # 4-byte Spill + adcl 108(%esp), %ebp + movl %ebp, 52(%esp) # 4-byte Spill + movl 56(%esp), %edx # 4-byte Reload + adcl 112(%esp), %edx + movl %edx, 56(%esp) # 4-byte Spill + movl 64(%esp), %edx # 4-byte Reload + adcl 116(%esp), %edx + movl %edx, 64(%esp) # 4-byte Spill + movl %eax, %edx + movl 1164(%esp), %ebx + subl (%ebx), %edx + movl %ecx, %esi + sbbl 4(%ebx), %esi + movl %edi, %ecx + sbbl 8(%ebx), %ecx + movl 44(%esp), %eax # 4-byte Reload + sbbl 12(%ebx), %eax + movl 40(%esp), %ebp # 4-byte Reload + sbbl 16(%ebx), %ebp + movl %ebp, 4(%esp) # 4-byte Spill + movl 28(%esp), %ebp # 4-byte Reload + sbbl 20(%ebx), %ebp + movl %ebp, 8(%esp) # 4-byte Spill + movl 32(%esp), %ebp # 4-byte Reload + sbbl 24(%ebx), %ebp + movl %ebp, 12(%esp) # 4-byte Spill + movl 36(%esp), %ebp # 4-byte Reload + sbbl 28(%ebx), %ebp + movl %ebp, 16(%esp) # 4-byte Spill + movl 52(%esp), %ebp # 4-byte Reload + sbbl 32(%ebx), %ebp + movl %ebp, 20(%esp) # 4-byte Spill + movl 56(%esp), %ebp # 4-byte Reload + sbbl 36(%ebx), %ebp + movl %ebp, 24(%esp) # 4-byte Spill + movl 64(%esp), %ebp # 4-byte Reload + sbbl 40(%ebx), %ebp + movl %ebp, 48(%esp) # 4-byte Spill + movl %ebp, %ebx + sarl $31, %ebx + testl %ebx, %ebx + movl 68(%esp), %ebx # 4-byte Reload + js .LBB163_2 +# BB#1: + movl %edx, %ebx +.LBB163_2: + movl 1152(%esp), %edx + movl %ebx, (%edx) + movl 60(%esp), %ebp # 4-byte Reload + js .LBB163_4 +# BB#3: + movl %esi, %ebp +.LBB163_4: + movl %ebp, 4(%edx) + js .LBB163_6 +# BB#5: + movl %ecx, %edi +.LBB163_6: + movl %edi, 8(%edx) + movl 44(%esp), %ecx # 4-byte Reload + js .LBB163_8 +# BB#7: + movl %eax, %ecx +.LBB163_8: + movl %ecx, 12(%edx) + movl 40(%esp), %eax # 4-byte Reload + js .LBB163_10 +# BB#9: + movl 4(%esp), %eax # 4-byte Reload +.LBB163_10: + movl %eax, 16(%edx) + movl 28(%esp), %eax # 4-byte Reload + js .LBB163_12 +# BB#11: + movl 8(%esp), %eax # 4-byte Reload +.LBB163_12: + movl %eax, 20(%edx) + movl 32(%esp), %eax # 4-byte Reload + js .LBB163_14 +# BB#13: + movl 12(%esp), %eax # 4-byte Reload +.LBB163_14: + movl %eax, 24(%edx) + movl 36(%esp), %eax # 4-byte Reload + js .LBB163_16 +# BB#15: + movl 16(%esp), %eax # 4-byte Reload +.LBB163_16: + movl %eax, 28(%edx) + movl 52(%esp), %eax # 4-byte Reload + js .LBB163_18 +# BB#17: + movl 20(%esp), %eax # 4-byte Reload +.LBB163_18: + movl %eax, 32(%edx) + movl 56(%esp), %eax # 4-byte Reload + js .LBB163_20 +# BB#19: + movl 24(%esp), %eax # 4-byte Reload +.LBB163_20: + movl %eax, 36(%edx) + movl 64(%esp), %eax # 4-byte Reload + js .LBB163_22 +# BB#21: + movl 48(%esp), %eax # 4-byte Reload +.LBB163_22: + movl %eax, 40(%edx) + addl $1132, %esp # imm = 0x46C + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end163: + .size mcl_fp_montNF11Lbmi2, .Lfunc_end163-mcl_fp_montNF11Lbmi2 + + .globl mcl_fp_montRed11Lbmi2 + .align 16, 0x90 + .type mcl_fp_montRed11Lbmi2,@function +mcl_fp_montRed11Lbmi2: # @mcl_fp_montRed11Lbmi2 +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $668, %esp # imm = 0x29C + calll .L164$pb +.L164$pb: + popl %eax +.Ltmp25: + addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp25-.L164$pb), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 696(%esp), %edx + movl -4(%edx), %esi + movl %esi, 68(%esp) # 4-byte Spill + movl 692(%esp), %ecx + movl (%ecx), %ebx + movl %ebx, 60(%esp) # 4-byte Spill + movl 4(%ecx), %eax + movl %eax, 64(%esp) # 4-byte Spill + imull %esi, %ebx + movl 84(%ecx), %esi + movl %esi, 80(%esp) # 4-byte Spill + movl 80(%ecx), %esi + movl %esi, 96(%esp) # 4-byte Spill + movl 76(%ecx), %esi + movl %esi, 92(%esp) # 4-byte Spill + movl 72(%ecx), %esi + movl %esi, 104(%esp) # 4-byte Spill + movl 68(%ecx), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 64(%ecx), %eax + movl %eax, 120(%esp) # 4-byte Spill + movl 60(%ecx), %esi + movl %esi, 124(%esp) # 4-byte Spill + movl 56(%ecx), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 52(%ecx), %esi + movl %esi, 116(%esp) # 4-byte Spill + movl 48(%ecx), %esi + movl %esi, 128(%esp) # 4-byte Spill + movl 44(%ecx), %eax + movl %eax, 132(%esp) # 4-byte Spill + movl 40(%ecx), %esi + movl %esi, 100(%esp) # 4-byte Spill + movl 36(%ecx), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 32(%ecx), %esi + movl %esi, 88(%esp) # 4-byte Spill + movl 28(%ecx), %esi + movl %esi, 76(%esp) # 4-byte Spill + movl 24(%ecx), %ebp + movl 20(%ecx), %edi + movl 16(%ecx), %esi + movl 12(%ecx), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 8(%ecx), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl (%edx), %ecx + movl %ecx, 12(%esp) # 4-byte Spill + movl 40(%edx), %ecx + movl %ecx, 44(%esp) # 4-byte Spill + movl 36(%edx), %ecx + movl %ecx, 40(%esp) # 4-byte Spill + movl 32(%edx), %ecx + movl %ecx, 36(%esp) # 4-byte Spill + movl 28(%edx), %ecx + movl %ecx, 32(%esp) # 4-byte Spill + movl 24(%edx), %ecx + movl %ecx, 28(%esp) # 4-byte Spill + movl 20(%edx), %ecx + movl %ecx, 24(%esp) # 4-byte Spill + movl 16(%edx), %ecx + movl %ecx, 20(%esp) # 4-byte Spill + movl 12(%edx), %ecx + movl %ecx, 16(%esp) # 4-byte Spill + movl 8(%edx), %ecx + movl %ecx, 8(%esp) # 4-byte Spill + movl 4(%edx), %ecx + movl %ecx, 4(%esp) # 4-byte Spill + movl %ebx, (%esp) + leal 616(%esp), %ecx + movl 72(%esp), %ebx # 4-byte Reload + calll .LmulPv352x32 + movl 60(%esp), %eax # 4-byte Reload + addl 616(%esp), %eax + movl 64(%esp), %ecx # 4-byte Reload + adcl 620(%esp), %ecx + movl 56(%esp), %eax # 4-byte Reload + adcl 624(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 628(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + adcl 632(%esp), %esi + movl %esi, 52(%esp) # 4-byte Spill + adcl 636(%esp), %edi + movl %edi, 60(%esp) # 4-byte Spill + adcl 640(%esp), %ebp + movl %ebp, 64(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 644(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 648(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 652(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 656(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 132(%esp), %eax # 4-byte Reload + adcl 660(%esp), %eax + movl %eax, 132(%esp) # 4-byte Spill + adcl $0, 128(%esp) # 4-byte Folded Spill + adcl $0, 116(%esp) # 4-byte Folded Spill + adcl $0, 112(%esp) # 4-byte Folded Spill + adcl $0, 124(%esp) # 4-byte Folded Spill + movl 120(%esp), %ebp # 4-byte Reload + adcl $0, %ebp + adcl $0, 108(%esp) # 4-byte Folded Spill + adcl $0, 104(%esp) # 4-byte Folded Spill + adcl $0, 92(%esp) # 4-byte Folded Spill + adcl $0, 96(%esp) # 4-byte Folded Spill + adcl $0, 80(%esp) # 4-byte Folded Spill + sbbl %edi, %edi + movl %ecx, %eax + movl %ecx, %esi + imull 68(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 568(%esp), %ecx + movl 696(%esp), %edx + movl 72(%esp), %ebx # 4-byte Reload + calll .LmulPv352x32 + andl $1, %edi + movl %edi, %ecx + addl 568(%esp), %esi + movl 56(%esp), %edx # 4-byte Reload + adcl 572(%esp), %edx + movl 48(%esp), %eax # 4-byte Reload + adcl 576(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 580(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 584(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 588(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 592(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 596(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 84(%esp), %edi # 4-byte Reload + adcl 600(%esp), %edi + movl 100(%esp), %eax # 4-byte Reload + adcl 604(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 132(%esp), %eax # 4-byte Reload + adcl 608(%esp), %eax + movl %eax, 132(%esp) # 4-byte Spill + movl 128(%esp), %eax # 4-byte Reload + adcl 612(%esp), %eax + movl %eax, 128(%esp) # 4-byte Spill + adcl $0, 116(%esp) # 4-byte Folded Spill + adcl $0, 112(%esp) # 4-byte Folded Spill + adcl $0, 124(%esp) # 4-byte Folded Spill + adcl $0, %ebp + movl %ebp, 120(%esp) # 4-byte Spill + movl 108(%esp), %esi # 4-byte Reload + adcl $0, %esi + adcl $0, 104(%esp) # 4-byte Folded Spill + adcl $0, 92(%esp) # 4-byte Folded Spill + adcl $0, 96(%esp) # 4-byte Folded Spill + adcl $0, 80(%esp) # 4-byte Folded Spill + adcl $0, %ecx + movl %ecx, 56(%esp) # 4-byte Spill + movl %edx, %ebp + movl %ebp, %eax + imull 68(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 520(%esp), %ecx + movl 696(%esp), %edx + movl 72(%esp), %ebx # 4-byte Reload + calll .LmulPv352x32 + addl 520(%esp), %ebp + movl 48(%esp), %ecx # 4-byte Reload + adcl 524(%esp), %ecx + movl 52(%esp), %eax # 4-byte Reload + adcl 528(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 532(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 536(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 540(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 544(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + adcl 548(%esp), %edi + movl %edi, 84(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 552(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 132(%esp), %eax # 4-byte Reload + adcl 556(%esp), %eax + movl %eax, 132(%esp) # 4-byte Spill + movl 128(%esp), %eax # 4-byte Reload + adcl 560(%esp), %eax + movl %eax, 128(%esp) # 4-byte Spill + movl 116(%esp), %eax # 4-byte Reload + adcl 564(%esp), %eax + movl %eax, 116(%esp) # 4-byte Spill + adcl $0, 112(%esp) # 4-byte Folded Spill + movl 124(%esp), %ebp # 4-byte Reload + adcl $0, %ebp + movl 120(%esp), %edi # 4-byte Reload + adcl $0, %edi + adcl $0, %esi + movl %esi, 108(%esp) # 4-byte Spill + adcl $0, 104(%esp) # 4-byte Folded Spill + adcl $0, 92(%esp) # 4-byte Folded Spill + adcl $0, 96(%esp) # 4-byte Folded Spill + adcl $0, 80(%esp) # 4-byte Folded Spill + adcl $0, 56(%esp) # 4-byte Folded Spill + movl %ecx, %eax + movl %ecx, %esi + imull 68(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 472(%esp), %ecx + movl 696(%esp), %edx + movl 72(%esp), %ebx # 4-byte Reload + calll .LmulPv352x32 + addl 472(%esp), %esi + movl 52(%esp), %ecx # 4-byte Reload + adcl 476(%esp), %ecx + movl 60(%esp), %eax # 4-byte Reload + adcl 480(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 484(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 488(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 492(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 496(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 500(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 132(%esp), %eax # 4-byte Reload + adcl 504(%esp), %eax + movl %eax, 132(%esp) # 4-byte Spill + movl 128(%esp), %eax # 4-byte Reload + adcl 508(%esp), %eax + movl %eax, 128(%esp) # 4-byte Spill + movl 116(%esp), %eax # 4-byte Reload + adcl 512(%esp), %eax + movl %eax, 116(%esp) # 4-byte Spill + movl 112(%esp), %eax # 4-byte Reload + adcl 516(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + adcl $0, %ebp + movl %ebp, 124(%esp) # 4-byte Spill + adcl $0, %edi + movl %edi, 120(%esp) # 4-byte Spill + adcl $0, 108(%esp) # 4-byte Folded Spill + movl 104(%esp), %edi # 4-byte Reload + adcl $0, %edi + adcl $0, 92(%esp) # 4-byte Folded Spill + adcl $0, 96(%esp) # 4-byte Folded Spill + movl 80(%esp), %esi # 4-byte Reload + adcl $0, %esi + adcl $0, 56(%esp) # 4-byte Folded Spill + movl %ecx, %ebp + movl %ebp, %eax + imull 68(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 424(%esp), %ecx + movl 696(%esp), %edx + movl 72(%esp), %ebx # 4-byte Reload + calll .LmulPv352x32 + addl 424(%esp), %ebp + movl 60(%esp), %ecx # 4-byte Reload + adcl 428(%esp), %ecx + movl 64(%esp), %eax # 4-byte Reload + adcl 432(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 436(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 440(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 444(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 448(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 132(%esp), %eax # 4-byte Reload + adcl 452(%esp), %eax + movl %eax, 132(%esp) # 4-byte Spill + movl 128(%esp), %eax # 4-byte Reload + adcl 456(%esp), %eax + movl %eax, 128(%esp) # 4-byte Spill + movl 116(%esp), %eax # 4-byte Reload + adcl 460(%esp), %eax + movl %eax, 116(%esp) # 4-byte Spill + movl 112(%esp), %ebp # 4-byte Reload + adcl 464(%esp), %ebp + movl 124(%esp), %eax # 4-byte Reload + adcl 468(%esp), %eax + movl %eax, 124(%esp) # 4-byte Spill + adcl $0, 120(%esp) # 4-byte Folded Spill + adcl $0, 108(%esp) # 4-byte Folded Spill + adcl $0, %edi + movl %edi, 104(%esp) # 4-byte Spill + adcl $0, 92(%esp) # 4-byte Folded Spill + adcl $0, 96(%esp) # 4-byte Folded Spill + adcl $0, %esi + movl %esi, 80(%esp) # 4-byte Spill + movl 56(%esp), %edi # 4-byte Reload + adcl $0, %edi + movl %ecx, %esi + movl %esi, %eax + imull 68(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 376(%esp), %ecx + movl 696(%esp), %edx + movl 72(%esp), %ebx # 4-byte Reload + calll .LmulPv352x32 + addl 376(%esp), %esi + movl 64(%esp), %ecx # 4-byte Reload + adcl 380(%esp), %ecx + movl 76(%esp), %eax # 4-byte Reload + adcl 384(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 388(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 392(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 396(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 132(%esp), %eax # 4-byte Reload + adcl 400(%esp), %eax + movl %eax, 132(%esp) # 4-byte Spill + movl 128(%esp), %esi # 4-byte Reload + adcl 404(%esp), %esi + movl 116(%esp), %eax # 4-byte Reload + adcl 408(%esp), %eax + movl %eax, 116(%esp) # 4-byte Spill + adcl 412(%esp), %ebp + movl %ebp, 112(%esp) # 4-byte Spill + movl 124(%esp), %eax # 4-byte Reload + adcl 416(%esp), %eax + movl %eax, 124(%esp) # 4-byte Spill + movl 120(%esp), %eax # 4-byte Reload + adcl 420(%esp), %eax + movl %eax, 120(%esp) # 4-byte Spill + adcl $0, 108(%esp) # 4-byte Folded Spill + adcl $0, 104(%esp) # 4-byte Folded Spill + adcl $0, 92(%esp) # 4-byte Folded Spill + movl 96(%esp), %ebp # 4-byte Reload + adcl $0, %ebp + adcl $0, 80(%esp) # 4-byte Folded Spill + adcl $0, %edi + movl %edi, 56(%esp) # 4-byte Spill + movl %ecx, %eax + movl %ecx, %edi + imull 68(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 328(%esp), %ecx + movl 696(%esp), %edx + movl 72(%esp), %ebx # 4-byte Reload + calll .LmulPv352x32 + addl 328(%esp), %edi + movl 76(%esp), %ecx # 4-byte Reload + adcl 332(%esp), %ecx + movl 88(%esp), %eax # 4-byte Reload + adcl 336(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 340(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 100(%esp), %edi # 4-byte Reload + adcl 344(%esp), %edi + movl 132(%esp), %eax # 4-byte Reload + adcl 348(%esp), %eax + movl %eax, 132(%esp) # 4-byte Spill + adcl 352(%esp), %esi + movl %esi, 128(%esp) # 4-byte Spill + movl 116(%esp), %esi # 4-byte Reload + adcl 356(%esp), %esi + movl 112(%esp), %eax # 4-byte Reload + adcl 360(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 124(%esp), %eax # 4-byte Reload + adcl 364(%esp), %eax + movl %eax, 124(%esp) # 4-byte Spill + movl 120(%esp), %eax # 4-byte Reload + adcl 368(%esp), %eax + movl %eax, 120(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl 372(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + adcl $0, 104(%esp) # 4-byte Folded Spill + adcl $0, 92(%esp) # 4-byte Folded Spill + adcl $0, %ebp + movl %ebp, 96(%esp) # 4-byte Spill + adcl $0, 80(%esp) # 4-byte Folded Spill + adcl $0, 56(%esp) # 4-byte Folded Spill + movl %ecx, %ebp + movl %ebp, %eax + imull 68(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 280(%esp), %ecx + movl 696(%esp), %eax + movl %eax, %edx + movl 72(%esp), %ebx # 4-byte Reload + calll .LmulPv352x32 + addl 280(%esp), %ebp + movl 88(%esp), %ebp # 4-byte Reload + adcl 284(%esp), %ebp + movl 84(%esp), %eax # 4-byte Reload + adcl 288(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + adcl 292(%esp), %edi + movl %edi, 100(%esp) # 4-byte Spill + movl 132(%esp), %edi # 4-byte Reload + adcl 296(%esp), %edi + movl 128(%esp), %eax # 4-byte Reload + adcl 300(%esp), %eax + movl %eax, 128(%esp) # 4-byte Spill + adcl 304(%esp), %esi + movl %esi, 116(%esp) # 4-byte Spill + movl 112(%esp), %eax # 4-byte Reload + adcl 308(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 124(%esp), %eax # 4-byte Reload + adcl 312(%esp), %eax + movl %eax, 124(%esp) # 4-byte Spill + movl 120(%esp), %eax # 4-byte Reload + adcl 316(%esp), %eax + movl %eax, 120(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl 320(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 104(%esp), %eax # 4-byte Reload + adcl 324(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 92(%esp), %esi # 4-byte Reload + adcl $0, %esi + adcl $0, 96(%esp) # 4-byte Folded Spill + adcl $0, 80(%esp) # 4-byte Folded Spill + adcl $0, 56(%esp) # 4-byte Folded Spill + movl %ebp, %eax + imull 68(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 232(%esp), %ecx + movl 696(%esp), %edx + movl 72(%esp), %ebx # 4-byte Reload + calll .LmulPv352x32 + addl 232(%esp), %ebp + movl 84(%esp), %ebp # 4-byte Reload + adcl 236(%esp), %ebp + movl 100(%esp), %eax # 4-byte Reload + adcl 240(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + adcl 244(%esp), %edi + movl %edi, 132(%esp) # 4-byte Spill + movl 128(%esp), %eax # 4-byte Reload + adcl 248(%esp), %eax + movl %eax, 128(%esp) # 4-byte Spill + movl 116(%esp), %eax # 4-byte Reload + adcl 252(%esp), %eax + movl %eax, 116(%esp) # 4-byte Spill + movl 112(%esp), %eax # 4-byte Reload + adcl 256(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 124(%esp), %eax # 4-byte Reload + adcl 260(%esp), %eax + movl %eax, 124(%esp) # 4-byte Spill + movl 120(%esp), %eax # 4-byte Reload + adcl 264(%esp), %eax + movl %eax, 120(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl 268(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 104(%esp), %eax # 4-byte Reload + adcl 272(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + adcl 276(%esp), %esi + movl %esi, 92(%esp) # 4-byte Spill + adcl $0, 96(%esp) # 4-byte Folded Spill + movl 80(%esp), %esi # 4-byte Reload + adcl $0, %esi + movl 56(%esp), %edi # 4-byte Reload + adcl $0, %edi + movl %ebp, %eax + imull 68(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 184(%esp), %ecx + movl 696(%esp), %edx + movl 72(%esp), %ebx # 4-byte Reload + calll .LmulPv352x32 + addl 184(%esp), %ebp + movl 100(%esp), %ecx # 4-byte Reload + adcl 188(%esp), %ecx + movl 132(%esp), %eax # 4-byte Reload + adcl 192(%esp), %eax + movl %eax, 132(%esp) # 4-byte Spill + movl 128(%esp), %eax # 4-byte Reload + adcl 196(%esp), %eax + movl %eax, 128(%esp) # 4-byte Spill + movl 116(%esp), %eax # 4-byte Reload + adcl 200(%esp), %eax + movl %eax, 116(%esp) # 4-byte Spill + movl 112(%esp), %eax # 4-byte Reload + adcl 204(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 124(%esp), %eax # 4-byte Reload + adcl 208(%esp), %eax + movl %eax, 124(%esp) # 4-byte Spill + movl 120(%esp), %eax # 4-byte Reload + adcl 212(%esp), %eax + movl %eax, 120(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl 216(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 104(%esp), %eax # 4-byte Reload + adcl 220(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 224(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 228(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + adcl $0, %esi + movl %esi, 80(%esp) # 4-byte Spill + adcl $0, %edi + movl %edi, %ebp + movl 68(%esp), %eax # 4-byte Reload + imull %ecx, %eax + movl %ecx, %esi + movl %eax, (%esp) + leal 136(%esp), %ecx + movl 696(%esp), %edx + movl 72(%esp), %ebx # 4-byte Reload + calll .LmulPv352x32 + addl 136(%esp), %esi + movl 132(%esp), %eax # 4-byte Reload + adcl 140(%esp), %eax + movl %eax, 132(%esp) # 4-byte Spill + movl %eax, %edi + movl 128(%esp), %edx # 4-byte Reload + adcl 144(%esp), %edx + movl %edx, 128(%esp) # 4-byte Spill + movl 116(%esp), %ecx # 4-byte Reload + adcl 148(%esp), %ecx + movl %ecx, 116(%esp) # 4-byte Spill + movl 112(%esp), %esi # 4-byte Reload + adcl 152(%esp), %esi + movl %esi, 112(%esp) # 4-byte Spill + movl 124(%esp), %eax # 4-byte Reload + adcl 156(%esp), %eax + movl %eax, 124(%esp) # 4-byte Spill + movl 120(%esp), %eax # 4-byte Reload + adcl 160(%esp), %eax + movl %eax, 120(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl 164(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 104(%esp), %eax # 4-byte Reload + adcl 168(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 172(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 176(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 80(%esp), %ebx # 4-byte Reload + adcl 180(%esp), %ebx + movl %ebx, 80(%esp) # 4-byte Spill + adcl $0, %ebp + subl 12(%esp), %edi # 4-byte Folded Reload + sbbl 4(%esp), %edx # 4-byte Folded Reload + sbbl 8(%esp), %ecx # 4-byte Folded Reload + sbbl 16(%esp), %esi # 4-byte Folded Reload + movl 124(%esp), %eax # 4-byte Reload + sbbl 20(%esp), %eax # 4-byte Folded Reload + movl %eax, 64(%esp) # 4-byte Spill + movl 120(%esp), %eax # 4-byte Reload + sbbl 24(%esp), %eax # 4-byte Folded Reload + movl %eax, 68(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + sbbl 28(%esp), %eax # 4-byte Folded Reload + movl %eax, 72(%esp) # 4-byte Spill + movl 104(%esp), %eax # 4-byte Reload + sbbl 32(%esp), %eax # 4-byte Folded Reload + movl %eax, 76(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + sbbl 36(%esp), %eax # 4-byte Folded Reload + movl %eax, 84(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + sbbl 40(%esp), %eax # 4-byte Folded Reload + movl %eax, 88(%esp) # 4-byte Spill + movl %ebx, %eax + movl %ebp, %ebx + sbbl 44(%esp), %eax # 4-byte Folded Reload + movl %eax, 100(%esp) # 4-byte Spill + sbbl $0, %ebx + andl $1, %ebx + jne .LBB164_2 +# BB#1: + movl %esi, 112(%esp) # 4-byte Spill +.LBB164_2: + testb %bl, %bl + movl 132(%esp), %esi # 4-byte Reload + jne .LBB164_4 +# BB#3: + movl %edi, %esi +.LBB164_4: + movl 688(%esp), %edi + movl %esi, (%edi) + movl 104(%esp), %esi # 4-byte Reload + jne .LBB164_6 +# BB#5: + movl %edx, 128(%esp) # 4-byte Spill +.LBB164_6: + movl 128(%esp), %edx # 4-byte Reload + movl %edx, 4(%edi) + movl 116(%esp), %edx # 4-byte Reload + jne .LBB164_8 +# BB#7: + movl %ecx, %edx +.LBB164_8: + movl %edx, 8(%edi) + movl 112(%esp), %ecx # 4-byte Reload + movl %ecx, 12(%edi) + movl 92(%esp), %edx # 4-byte Reload + movl 124(%esp), %ecx # 4-byte Reload + jne .LBB164_10 +# BB#9: + movl 64(%esp), %ecx # 4-byte Reload +.LBB164_10: + movl %ecx, 16(%edi) + movl 96(%esp), %ecx # 4-byte Reload + movl 120(%esp), %eax # 4-byte Reload + jne .LBB164_12 +# BB#11: + movl 68(%esp), %eax # 4-byte Reload +.LBB164_12: + movl %eax, 20(%edi) + movl 80(%esp), %eax # 4-byte Reload + movl 108(%esp), %ebp # 4-byte Reload + jne .LBB164_14 +# BB#13: + movl 72(%esp), %ebp # 4-byte Reload +.LBB164_14: + movl %ebp, 24(%edi) + jne .LBB164_16 +# BB#15: + movl 76(%esp), %esi # 4-byte Reload +.LBB164_16: + movl %esi, 28(%edi) + jne .LBB164_18 +# BB#17: + movl 84(%esp), %edx # 4-byte Reload +.LBB164_18: + movl %edx, 32(%edi) + jne .LBB164_20 +# BB#19: + movl 88(%esp), %ecx # 4-byte Reload +.LBB164_20: + movl %ecx, 36(%edi) + jne .LBB164_22 +# BB#21: + movl 100(%esp), %eax # 4-byte Reload +.LBB164_22: + movl %eax, 40(%edi) + addl $668, %esp # imm = 0x29C + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end164: + .size mcl_fp_montRed11Lbmi2, .Lfunc_end164-mcl_fp_montRed11Lbmi2 + + .globl mcl_fp_addPre11Lbmi2 + .align 16, 0x90 + .type mcl_fp_addPre11Lbmi2,@function +mcl_fp_addPre11Lbmi2: # @mcl_fp_addPre11Lbmi2 +# BB#0: + pushl %ebx + pushl %edi + pushl %esi + movl 24(%esp), %eax + movl (%eax), %edx + movl 4(%eax), %esi + movl 20(%esp), %ecx + addl (%ecx), %edx + adcl 4(%ecx), %esi + movl 8(%eax), %edi + adcl 8(%ecx), %edi + movl 16(%esp), %ebx + movl %edx, (%ebx) + movl 12(%ecx), %edx + movl %esi, 4(%ebx) + movl 16(%ecx), %esi + adcl 12(%eax), %edx + adcl 16(%eax), %esi + movl %edi, 8(%ebx) + movl 20(%eax), %edi + movl %edx, 12(%ebx) + movl 20(%ecx), %edx + adcl %edi, %edx + movl 24(%eax), %edi + movl %esi, 16(%ebx) + movl 24(%ecx), %esi + adcl %edi, %esi + movl 28(%eax), %edi + movl %edx, 20(%ebx) + movl 28(%ecx), %edx + adcl %edi, %edx + movl 32(%eax), %edi + movl %esi, 24(%ebx) + movl 32(%ecx), %esi + adcl %edi, %esi + movl 36(%eax), %edi + movl %edx, 28(%ebx) + movl 36(%ecx), %edx + adcl %edi, %edx + movl %esi, 32(%ebx) + movl %edx, 36(%ebx) + movl 40(%eax), %eax + movl 40(%ecx), %ecx + adcl %eax, %ecx + movl %ecx, 40(%ebx) + sbbl %eax, %eax + andl $1, %eax + popl %esi + popl %edi + popl %ebx + retl +.Lfunc_end165: + .size mcl_fp_addPre11Lbmi2, .Lfunc_end165-mcl_fp_addPre11Lbmi2 + + .globl mcl_fp_subPre11Lbmi2 + .align 16, 0x90 + .type mcl_fp_subPre11Lbmi2,@function +mcl_fp_subPre11Lbmi2: # @mcl_fp_subPre11Lbmi2 +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + movl 24(%esp), %ecx + movl (%ecx), %esi + movl 4(%ecx), %edi + xorl %eax, %eax + movl 28(%esp), %edx + subl (%edx), %esi + sbbl 4(%edx), %edi + movl 8(%ecx), %ebx + sbbl 8(%edx), %ebx + movl 20(%esp), %ebp + movl %esi, (%ebp) + movl 12(%ecx), %esi + sbbl 12(%edx), %esi + movl %edi, 4(%ebp) + movl 16(%ecx), %edi + sbbl 16(%edx), %edi + movl %ebx, 8(%ebp) + movl 20(%edx), %ebx + movl %esi, 12(%ebp) + movl 20(%ecx), %esi + sbbl %ebx, %esi + movl 24(%edx), %ebx + movl %edi, 16(%ebp) + movl 24(%ecx), %edi + sbbl %ebx, %edi + movl 28(%edx), %ebx + movl %esi, 20(%ebp) + movl 28(%ecx), %esi + sbbl %ebx, %esi + movl 32(%edx), %ebx + movl %edi, 24(%ebp) + movl 32(%ecx), %edi + sbbl %ebx, %edi + movl 36(%edx), %ebx + movl %esi, 28(%ebp) + movl 36(%ecx), %esi + sbbl %ebx, %esi + movl %edi, 32(%ebp) + movl %esi, 36(%ebp) + movl 40(%edx), %edx + movl 40(%ecx), %ecx + sbbl %edx, %ecx + movl %ecx, 40(%ebp) + sbbl $0, %eax + andl $1, %eax + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end166: + .size mcl_fp_subPre11Lbmi2, .Lfunc_end166-mcl_fp_subPre11Lbmi2 + + .globl mcl_fp_shr1_11Lbmi2 + .align 16, 0x90 + .type mcl_fp_shr1_11Lbmi2,@function +mcl_fp_shr1_11Lbmi2: # @mcl_fp_shr1_11Lbmi2 +# BB#0: + pushl %esi + movl 12(%esp), %eax + movl (%eax), %ecx + movl 4(%eax), %edx + shrdl $1, %edx, %ecx + movl 8(%esp), %esi + movl %ecx, (%esi) + movl 8(%eax), %ecx + shrdl $1, %ecx, %edx + movl %edx, 4(%esi) + movl 12(%eax), %edx + shrdl $1, %edx, %ecx + movl %ecx, 8(%esi) + movl 16(%eax), %ecx + shrdl $1, %ecx, %edx + movl %edx, 12(%esi) + movl 20(%eax), %edx + shrdl $1, %edx, %ecx + movl %ecx, 16(%esi) + movl 24(%eax), %ecx + shrdl $1, %ecx, %edx + movl %edx, 20(%esi) + movl 28(%eax), %edx + shrdl $1, %edx, %ecx + movl %ecx, 24(%esi) + movl 32(%eax), %ecx + shrdl $1, %ecx, %edx + movl %edx, 28(%esi) + movl 36(%eax), %edx + shrdl $1, %edx, %ecx + movl %ecx, 32(%esi) + movl 40(%eax), %eax + shrdl $1, %eax, %edx + movl %edx, 36(%esi) + shrl %eax + movl %eax, 40(%esi) + popl %esi + retl +.Lfunc_end167: + .size mcl_fp_shr1_11Lbmi2, .Lfunc_end167-mcl_fp_shr1_11Lbmi2 + + .globl mcl_fp_add11Lbmi2 + .align 16, 0x90 + .type mcl_fp_add11Lbmi2,@function +mcl_fp_add11Lbmi2: # @mcl_fp_add11Lbmi2 +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $32, %esp + movl 60(%esp), %edi + movl (%edi), %ecx + movl 4(%edi), %eax + movl 56(%esp), %esi + addl (%esi), %ecx + movl %ecx, 4(%esp) # 4-byte Spill + movl %ecx, %ebp + adcl 4(%esi), %eax + movl %eax, 28(%esp) # 4-byte Spill + movl 8(%edi), %eax + adcl 8(%esi), %eax + movl %eax, 24(%esp) # 4-byte Spill + movl 12(%esi), %eax + movl 16(%esi), %ecx + adcl 12(%edi), %eax + movl %eax, 20(%esp) # 4-byte Spill + adcl 16(%edi), %ecx + movl %ecx, 16(%esp) # 4-byte Spill + movl 20(%esi), %eax + adcl 20(%edi), %eax + movl %eax, 12(%esp) # 4-byte Spill + movl 24(%esi), %eax + adcl 24(%edi), %eax + movl %eax, 8(%esp) # 4-byte Spill + movl 28(%esi), %ebx + adcl 28(%edi), %ebx + movl %ebx, (%esp) # 4-byte Spill + movl 32(%esi), %ecx + adcl 32(%edi), %ecx + movl 36(%esi), %eax + adcl 36(%edi), %eax + movl 40(%esi), %edx + adcl 40(%edi), %edx + movl 52(%esp), %esi + movl %ebp, (%esi) + movl 28(%esp), %edi # 4-byte Reload + movl %edi, 4(%esi) + movl 24(%esp), %edi # 4-byte Reload + movl %edi, 8(%esi) + movl 20(%esp), %ebp # 4-byte Reload + movl %ebp, 12(%esi) + movl 16(%esp), %ebp # 4-byte Reload + movl %ebp, 16(%esi) + movl 12(%esp), %ebp # 4-byte Reload + movl %ebp, 20(%esi) + movl 8(%esp), %ebp # 4-byte Reload + movl %ebp, 24(%esi) + movl %ebx, 28(%esi) + movl %ecx, 32(%esi) + movl %eax, 36(%esi) + movl %edx, 40(%esi) + sbbl %ebx, %ebx + andl $1, %ebx + movl 64(%esp), %ebp + movl 4(%esp), %edi # 4-byte Reload + subl (%ebp), %edi + movl %edi, 4(%esp) # 4-byte Spill + movl 28(%esp), %edi # 4-byte Reload + sbbl 4(%ebp), %edi + movl %edi, 28(%esp) # 4-byte Spill + movl 24(%esp), %edi # 4-byte Reload + sbbl 8(%ebp), %edi + movl %edi, 24(%esp) # 4-byte Spill + movl 20(%esp), %edi # 4-byte Reload + sbbl 12(%ebp), %edi + movl %edi, 20(%esp) # 4-byte Spill + movl 16(%esp), %edi # 4-byte Reload + sbbl 16(%ebp), %edi + movl %edi, 16(%esp) # 4-byte Spill + movl 12(%esp), %edi # 4-byte Reload + sbbl 20(%ebp), %edi + movl %edi, 12(%esp) # 4-byte Spill + movl 8(%esp), %edi # 4-byte Reload + sbbl 24(%ebp), %edi + movl %edi, 8(%esp) # 4-byte Spill + movl (%esp), %edi # 4-byte Reload + sbbl 28(%ebp), %edi + movl %edi, (%esp) # 4-byte Spill + sbbl 32(%ebp), %ecx + sbbl 36(%ebp), %eax + sbbl 40(%ebp), %edx + movl %edx, %edi + sbbl $0, %ebx + testb $1, %bl + jne .LBB168_2 +# BB#1: # %nocarry + movl 4(%esp), %ebx # 4-byte Reload + movl %ebx, (%esi) + movl 28(%esp), %ebx # 4-byte Reload + movl %ebx, 4(%esi) + movl 24(%esp), %ebx # 4-byte Reload + movl %ebx, 8(%esi) + movl 20(%esp), %ebx # 4-byte Reload + movl %ebx, 12(%esi) + movl 16(%esp), %ebx # 4-byte Reload + movl %ebx, 16(%esi) + movl 12(%esp), %ebx # 4-byte Reload + movl %ebx, 20(%esi) + movl 8(%esp), %ebx # 4-byte Reload + movl %ebx, 24(%esi) + movl (%esp), %edx # 4-byte Reload + movl %edx, 28(%esi) + movl %ecx, 32(%esi) + movl %eax, 36(%esi) + movl %edi, 40(%esi) +.LBB168_2: # %carry + addl $32, %esp + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end168: + .size mcl_fp_add11Lbmi2, .Lfunc_end168-mcl_fp_add11Lbmi2 + + .globl mcl_fp_addNF11Lbmi2 + .align 16, 0x90 + .type mcl_fp_addNF11Lbmi2,@function +mcl_fp_addNF11Lbmi2: # @mcl_fp_addNF11Lbmi2 +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $80, %esp + movl 108(%esp), %edx + movl (%edx), %eax + movl 4(%edx), %ecx + movl 104(%esp), %esi + addl (%esi), %eax + movl %eax, 52(%esp) # 4-byte Spill + adcl 4(%esi), %ecx + movl %ecx, 60(%esp) # 4-byte Spill + movl 40(%edx), %ebx + movl 36(%edx), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 32(%edx), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 28(%edx), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 24(%edx), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 20(%edx), %ebp + movl 16(%edx), %edi + movl 12(%edx), %eax + movl 8(%edx), %ecx + adcl 8(%esi), %ecx + adcl 12(%esi), %eax + movl %eax, 40(%esp) # 4-byte Spill + adcl 16(%esi), %edi + movl %edi, 48(%esp) # 4-byte Spill + adcl 20(%esi), %ebp + movl %ebp, 44(%esp) # 4-byte Spill + movl 64(%esp), %edx # 4-byte Reload + adcl 24(%esi), %edx + movl %edx, 64(%esp) # 4-byte Spill + movl 76(%esp), %edx # 4-byte Reload + adcl 28(%esi), %edx + movl %edx, 76(%esp) # 4-byte Spill + movl 72(%esp), %edx # 4-byte Reload + adcl 32(%esi), %edx + movl %edx, 72(%esp) # 4-byte Spill + movl 68(%esp), %edx # 4-byte Reload + adcl 36(%esi), %edx + movl %edx, 68(%esp) # 4-byte Spill + movl %ecx, %edx + adcl 40(%esi), %ebx + movl %ebx, 56(%esp) # 4-byte Spill + movl 112(%esp), %ebx + movl 52(%esp), %esi # 4-byte Reload + subl (%ebx), %esi + movl 60(%esp), %ecx # 4-byte Reload + sbbl 4(%ebx), %ecx + movl %ecx, (%esp) # 4-byte Spill + movl %edx, %ecx + sbbl 8(%ebx), %ecx + movl %ecx, 4(%esp) # 4-byte Spill + sbbl 12(%ebx), %eax + movl %eax, 8(%esp) # 4-byte Spill + sbbl 16(%ebx), %edi + movl %edi, 12(%esp) # 4-byte Spill + sbbl 20(%ebx), %ebp + movl %ebp, 16(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + movl %eax, %ecx + sbbl 24(%ebx), %ecx + movl %ecx, 20(%esp) # 4-byte Spill + movl 76(%esp), %ecx # 4-byte Reload + sbbl 28(%ebx), %ecx + movl %ecx, 24(%esp) # 4-byte Spill + movl 72(%esp), %ecx # 4-byte Reload + sbbl 32(%ebx), %ecx + movl %ecx, 28(%esp) # 4-byte Spill + movl 68(%esp), %edi # 4-byte Reload + movl %edi, %ecx + movl %edi, %ebp + sbbl 36(%ebx), %ecx + movl %ecx, 32(%esp) # 4-byte Spill + movl 56(%esp), %ecx # 4-byte Reload + movl %ecx, %edi + sbbl 40(%ebx), %edi + movl %edi, 36(%esp) # 4-byte Spill + movl %edi, %ebx + movl 52(%esp), %edi # 4-byte Reload + sarl $31, %ebx + testl %ebx, %ebx + js .LBB169_2 +# BB#1: + movl %esi, %edi +.LBB169_2: + movl 100(%esp), %esi + movl %edi, (%esi) + movl 60(%esp), %edi # 4-byte Reload + js .LBB169_4 +# BB#3: + movl (%esp), %edi # 4-byte Reload +.LBB169_4: + movl %edi, 4(%esi) + movl %eax, %edi + js .LBB169_6 +# BB#5: + movl 4(%esp), %edx # 4-byte Reload +.LBB169_6: + movl %edx, 8(%esi) + movl %ebp, %ecx + movl 72(%esp), %edx # 4-byte Reload + movl 40(%esp), %eax # 4-byte Reload + js .LBB169_8 +# BB#7: + movl 8(%esp), %eax # 4-byte Reload +.LBB169_8: + movl %eax, 12(%esi) + movl 76(%esp), %eax # 4-byte Reload + movl 44(%esp), %ebp # 4-byte Reload + js .LBB169_10 +# BB#9: + movl 12(%esp), %ebx # 4-byte Reload + movl %ebx, 48(%esp) # 4-byte Spill +.LBB169_10: + movl 48(%esp), %ebx # 4-byte Reload + movl %ebx, 16(%esi) + js .LBB169_12 +# BB#11: + movl 16(%esp), %ebp # 4-byte Reload +.LBB169_12: + movl %ebp, 20(%esi) + js .LBB169_14 +# BB#13: + movl 20(%esp), %edi # 4-byte Reload +.LBB169_14: + movl %edi, 24(%esi) + js .LBB169_16 +# BB#15: + movl 24(%esp), %eax # 4-byte Reload +.LBB169_16: + movl %eax, 28(%esi) + js .LBB169_18 +# BB#17: + movl 28(%esp), %edx # 4-byte Reload +.LBB169_18: + movl %edx, 32(%esi) + js .LBB169_20 +# BB#19: + movl 32(%esp), %ecx # 4-byte Reload +.LBB169_20: + movl %ecx, 36(%esi) + movl 56(%esp), %eax # 4-byte Reload + js .LBB169_22 +# BB#21: + movl 36(%esp), %eax # 4-byte Reload +.LBB169_22: + movl %eax, 40(%esi) + addl $80, %esp + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end169: + .size mcl_fp_addNF11Lbmi2, .Lfunc_end169-mcl_fp_addNF11Lbmi2 + + .globl mcl_fp_sub11Lbmi2 + .align 16, 0x90 + .type mcl_fp_sub11Lbmi2,@function +mcl_fp_sub11Lbmi2: # @mcl_fp_sub11Lbmi2 +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $40, %esp + movl 64(%esp), %ebp + movl (%ebp), %ecx + movl 4(%ebp), %eax + movl 68(%esp), %edi + subl (%edi), %ecx + movl %ecx, 16(%esp) # 4-byte Spill + sbbl 4(%edi), %eax + movl %eax, 28(%esp) # 4-byte Spill + movl 8(%ebp), %eax + sbbl 8(%edi), %eax + movl %eax, 32(%esp) # 4-byte Spill + movl 12(%ebp), %ebx + sbbl 12(%edi), %ebx + movl 16(%ebp), %eax + sbbl 16(%edi), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 20(%ebp), %eax + sbbl 20(%edi), %eax + movl %eax, 24(%esp) # 4-byte Spill + movl 24(%ebp), %eax + sbbl 24(%edi), %eax + movl %eax, 20(%esp) # 4-byte Spill + movl 28(%ebp), %edx + sbbl 28(%edi), %edx + movl %edx, 4(%esp) # 4-byte Spill + movl 32(%ebp), %ecx + sbbl 32(%edi), %ecx + movl %ecx, (%esp) # 4-byte Spill + movl 36(%ebp), %eax + sbbl 36(%edi), %eax + movl %eax, 12(%esp) # 4-byte Spill + movl 40(%ebp), %eax + sbbl 40(%edi), %eax + movl %eax, 8(%esp) # 4-byte Spill + movl %ebx, %ebp + movl 16(%esp), %esi # 4-byte Reload + movl $0, %ebx + sbbl $0, %ebx + testb $1, %bl + movl 60(%esp), %ebx + movl %esi, (%ebx) + movl 28(%esp), %edi # 4-byte Reload + movl %edi, 4(%ebx) + movl 32(%esp), %edi # 4-byte Reload + movl %edi, 8(%ebx) + movl %ebp, 12(%ebx) + movl 36(%esp), %edi # 4-byte Reload + movl %edi, 16(%ebx) + movl 24(%esp), %edi # 4-byte Reload + movl %edi, 20(%ebx) + movl 20(%esp), %edi # 4-byte Reload + movl %edi, 24(%ebx) + movl %edx, 28(%ebx) + movl %ecx, 32(%ebx) + movl 12(%esp), %ecx # 4-byte Reload + movl %ecx, 36(%ebx) + movl %ecx, %edi + movl %eax, 40(%ebx) + je .LBB170_2 +# BB#1: # %carry + movl 72(%esp), %eax + addl (%eax), %esi + movl %esi, (%ebx) + movl 28(%esp), %edx # 4-byte Reload + movl %eax, %esi + adcl 4(%esi), %edx + movl %edx, 4(%ebx) + movl 32(%esp), %ecx # 4-byte Reload + adcl 8(%esi), %ecx + movl 12(%esi), %eax + adcl %ebp, %eax + movl %ecx, 8(%ebx) + movl 16(%esi), %ecx + adcl 36(%esp), %ecx # 4-byte Folded Reload + movl %eax, 12(%ebx) + movl 20(%esi), %eax + adcl 24(%esp), %eax # 4-byte Folded Reload + movl %ecx, 16(%ebx) + movl 24(%esi), %ecx + adcl 20(%esp), %ecx # 4-byte Folded Reload + movl %eax, 20(%ebx) + movl 28(%esi), %eax + adcl 4(%esp), %eax # 4-byte Folded Reload + movl %ecx, 24(%ebx) + movl 32(%esi), %ecx + adcl (%esp), %ecx # 4-byte Folded Reload + movl %eax, 28(%ebx) + movl %ecx, 32(%ebx) + movl 36(%esi), %eax + adcl %edi, %eax + movl %eax, 36(%ebx) + movl 40(%esi), %eax + adcl 8(%esp), %eax # 4-byte Folded Reload + movl %eax, 40(%ebx) +.LBB170_2: # %nocarry + addl $40, %esp + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end170: + .size mcl_fp_sub11Lbmi2, .Lfunc_end170-mcl_fp_sub11Lbmi2 + + .globl mcl_fp_subNF11Lbmi2 + .align 16, 0x90 + .type mcl_fp_subNF11Lbmi2,@function +mcl_fp_subNF11Lbmi2: # @mcl_fp_subNF11Lbmi2 +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $64, %esp + movl 88(%esp), %eax + movl (%eax), %edx + movl 4(%eax), %ecx + movl 92(%esp), %edi + subl (%edi), %edx + movl %edx, 40(%esp) # 4-byte Spill + sbbl 4(%edi), %ecx + movl %ecx, 44(%esp) # 4-byte Spill + movl 40(%eax), %ecx + movl %ecx, 52(%esp) # 4-byte Spill + movl 36(%eax), %ecx + movl %ecx, 60(%esp) # 4-byte Spill + movl 32(%eax), %ecx + movl %ecx, 56(%esp) # 4-byte Spill + movl 28(%eax), %ebx + movl 24(%eax), %ebp + movl 20(%eax), %esi + movl 16(%eax), %edx + movl 12(%eax), %ecx + movl 8(%eax), %eax + sbbl 8(%edi), %eax + movl %eax, 16(%esp) # 4-byte Spill + sbbl 12(%edi), %ecx + movl %ecx, 20(%esp) # 4-byte Spill + sbbl 16(%edi), %edx + movl %edx, 24(%esp) # 4-byte Spill + sbbl 20(%edi), %esi + movl %esi, 28(%esp) # 4-byte Spill + sbbl 24(%edi), %ebp + movl %ebp, 32(%esp) # 4-byte Spill + sbbl 28(%edi), %ebx + movl %ebx, 36(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + sbbl 32(%edi), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + sbbl 36(%edi), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + sbbl 40(%edi), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl %eax, %ecx + sarl $31, %ecx + movl %ecx, %edx + shldl $1, %eax, %edx + movl 96(%esp), %ebx + movl 4(%ebx), %eax + andl %edx, %eax + movl %eax, 48(%esp) # 4-byte Spill + andl (%ebx), %edx + movl 40(%ebx), %eax + andl %ecx, %eax + movl %eax, 12(%esp) # 4-byte Spill + movl 36(%ebx), %eax + andl %ecx, %eax + movl %eax, 8(%esp) # 4-byte Spill + movl 32(%ebx), %eax + andl %ecx, %eax + movl %eax, 4(%esp) # 4-byte Spill + movl 28(%ebx), %eax + andl %ecx, %eax + movl %eax, (%esp) # 4-byte Spill + movl 24(%ebx), %ebp + andl %ecx, %ebp + rorxl $31, %ecx, %eax + andl 20(%ebx), %ecx + movl 16(%ebx), %edi + andl %eax, %edi + movl 12(%ebx), %esi + andl %eax, %esi + andl 8(%ebx), %eax + addl 40(%esp), %edx # 4-byte Folded Reload + movl 48(%esp), %ebx # 4-byte Reload + adcl 44(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 48(%esp) # 4-byte Spill + movl 84(%esp), %ebx + movl %edx, (%ebx) + adcl 16(%esp), %eax # 4-byte Folded Reload + movl 48(%esp), %edx # 4-byte Reload + movl %edx, 4(%ebx) + adcl 20(%esp), %esi # 4-byte Folded Reload + movl %eax, 8(%ebx) + adcl 24(%esp), %edi # 4-byte Folded Reload + movl %esi, 12(%ebx) + adcl 28(%esp), %ecx # 4-byte Folded Reload + movl %edi, 16(%ebx) + adcl 32(%esp), %ebp # 4-byte Folded Reload + movl %ecx, 20(%ebx) + movl (%esp), %eax # 4-byte Reload + adcl 36(%esp), %eax # 4-byte Folded Reload + movl %ebp, 24(%ebx) + movl 4(%esp), %ecx # 4-byte Reload + adcl 56(%esp), %ecx # 4-byte Folded Reload + movl %eax, 28(%ebx) + movl 8(%esp), %eax # 4-byte Reload + adcl 60(%esp), %eax # 4-byte Folded Reload + movl %ecx, 32(%ebx) + movl %eax, 36(%ebx) + movl 12(%esp), %eax # 4-byte Reload + adcl 52(%esp), %eax # 4-byte Folded Reload + movl %eax, 40(%ebx) + addl $64, %esp + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end171: + .size mcl_fp_subNF11Lbmi2, .Lfunc_end171-mcl_fp_subNF11Lbmi2 + + .globl mcl_fpDbl_add11Lbmi2 + .align 16, 0x90 + .type mcl_fpDbl_add11Lbmi2,@function +mcl_fpDbl_add11Lbmi2: # @mcl_fpDbl_add11Lbmi2 +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $80, %esp + movl 108(%esp), %ecx + movl 104(%esp), %edi + movl 12(%edi), %esi + movl 16(%edi), %edx + movl 8(%ecx), %ebx + movl (%ecx), %ebp + addl (%edi), %ebp + movl 100(%esp), %eax + movl %ebp, (%eax) + movl 4(%ecx), %ebp + adcl 4(%edi), %ebp + adcl 8(%edi), %ebx + adcl 12(%ecx), %esi + adcl 16(%ecx), %edx + movl %ebp, 4(%eax) + movl 52(%ecx), %ebp + movl %ebx, 8(%eax) + movl 20(%ecx), %ebx + movl %esi, 12(%eax) + movl 20(%edi), %esi + adcl %ebx, %esi + movl 24(%ecx), %ebx + movl %edx, 16(%eax) + movl 24(%edi), %edx + adcl %ebx, %edx + movl 28(%ecx), %ebx + movl %esi, 20(%eax) + movl 28(%edi), %esi + adcl %ebx, %esi + movl 32(%ecx), %ebx + movl %edx, 24(%eax) + movl 32(%edi), %edx + adcl %ebx, %edx + movl 36(%ecx), %ebx + movl %esi, 28(%eax) + movl 36(%edi), %esi + adcl %ebx, %esi + movl 40(%ecx), %ebx + movl %edx, 32(%eax) + movl 40(%edi), %edx + adcl %ebx, %edx + movl 44(%ecx), %ebx + movl %esi, 36(%eax) + movl 44(%edi), %esi + adcl %ebx, %esi + movl %esi, 68(%esp) # 4-byte Spill + movl 48(%ecx), %esi + movl %edx, 40(%eax) + movl 48(%edi), %eax + adcl %esi, %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 52(%edi), %eax + adcl %ebp, %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 56(%ecx), %edx + movl 56(%edi), %eax + adcl %edx, %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 60(%ecx), %edx + movl 60(%edi), %eax + adcl %edx, %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 64(%ecx), %edx + movl 64(%edi), %eax + adcl %edx, %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 68(%ecx), %eax + movl 68(%edi), %edx + adcl %eax, %edx + movl 72(%ecx), %esi + movl 72(%edi), %eax + adcl %esi, %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 76(%ecx), %ebx + movl 76(%edi), %esi + adcl %ebx, %esi + movl %esi, 48(%esp) # 4-byte Spill + movl 80(%ecx), %ebp + movl 80(%edi), %ebx + adcl %ebp, %ebx + movl %ebx, 52(%esp) # 4-byte Spill + movl 84(%ecx), %ecx + movl 84(%edi), %edi + adcl %ecx, %edi + movl %edi, 40(%esp) # 4-byte Spill + sbbl %ecx, %ecx + andl $1, %ecx + movl 112(%esp), %ebp + movl 68(%esp), %edi # 4-byte Reload + subl (%ebp), %edi + movl %edi, 24(%esp) # 4-byte Spill + movl 72(%esp), %edi # 4-byte Reload + sbbl 4(%ebp), %edi + movl %edi, 20(%esp) # 4-byte Spill + movl 76(%esp), %edi # 4-byte Reload + sbbl 8(%ebp), %edi + movl %edi, 16(%esp) # 4-byte Spill + movl 56(%esp), %edi # 4-byte Reload + sbbl 12(%ebp), %edi + movl %edi, 12(%esp) # 4-byte Spill + movl 60(%esp), %edi # 4-byte Reload + sbbl 16(%ebp), %edi + movl %edi, 8(%esp) # 4-byte Spill + movl 64(%esp), %edi # 4-byte Reload + sbbl 20(%ebp), %edi + movl %edi, 4(%esp) # 4-byte Spill + movl %edx, %edi + sbbl 24(%ebp), %edi + movl %edi, (%esp) # 4-byte Spill + sbbl 28(%ebp), %eax + movl %eax, 28(%esp) # 4-byte Spill + sbbl 32(%ebp), %esi + movl %esi, 32(%esp) # 4-byte Spill + movl %ebx, %eax + movl 40(%esp), %ebx # 4-byte Reload + sbbl 36(%ebp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl %ebx, %edi + sbbl 40(%ebp), %edi + sbbl $0, %ecx + andl $1, %ecx + jne .LBB172_2 +# BB#1: + movl %edi, %ebx +.LBB172_2: + testb %cl, %cl + movl 68(%esp), %ecx # 4-byte Reload + movl 64(%esp), %esi # 4-byte Reload + movl 60(%esp), %edi # 4-byte Reload + movl 56(%esp), %ebp # 4-byte Reload + jne .LBB172_4 +# BB#3: + movl (%esp), %edx # 4-byte Reload + movl 4(%esp), %esi # 4-byte Reload + movl 8(%esp), %edi # 4-byte Reload + movl 12(%esp), %ebp # 4-byte Reload + movl 16(%esp), %eax # 4-byte Reload + movl %eax, 76(%esp) # 4-byte Spill + movl 20(%esp), %eax # 4-byte Reload + movl %eax, 72(%esp) # 4-byte Spill + movl 24(%esp), %ecx # 4-byte Reload +.LBB172_4: + movl 100(%esp), %eax + movl %ecx, 44(%eax) + movl 72(%esp), %ecx # 4-byte Reload + movl %ecx, 48(%eax) + movl 76(%esp), %ecx # 4-byte Reload + movl %ecx, 52(%eax) + movl %ebp, 56(%eax) + movl %edi, 60(%eax) + movl %esi, 64(%eax) + movl %edx, 68(%eax) + movl 52(%esp), %ecx # 4-byte Reload + movl 44(%esp), %edx # 4-byte Reload + jne .LBB172_6 +# BB#5: + movl 28(%esp), %edx # 4-byte Reload +.LBB172_6: + movl %edx, 72(%eax) + movl 48(%esp), %edx # 4-byte Reload + jne .LBB172_8 +# BB#7: + movl 32(%esp), %edx # 4-byte Reload +.LBB172_8: + movl %edx, 76(%eax) + jne .LBB172_10 +# BB#9: + movl 36(%esp), %ecx # 4-byte Reload +.LBB172_10: + movl %ecx, 80(%eax) + movl %ebx, 84(%eax) + addl $80, %esp + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end172: + .size mcl_fpDbl_add11Lbmi2, .Lfunc_end172-mcl_fpDbl_add11Lbmi2 + + .globl mcl_fpDbl_sub11Lbmi2 + .align 16, 0x90 + .type mcl_fpDbl_sub11Lbmi2,@function +mcl_fpDbl_sub11Lbmi2: # @mcl_fpDbl_sub11Lbmi2 +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $72, %esp + movl 96(%esp), %edx + movl (%edx), %eax + movl 4(%edx), %esi + movl 100(%esp), %ebp + subl (%ebp), %eax + sbbl 4(%ebp), %esi + movl 8(%edx), %edi + sbbl 8(%ebp), %edi + movl 92(%esp), %ecx + movl %eax, (%ecx) + movl 12(%edx), %eax + sbbl 12(%ebp), %eax + movl %esi, 4(%ecx) + movl 16(%edx), %esi + sbbl 16(%ebp), %esi + movl %edi, 8(%ecx) + movl 20(%ebp), %edi + movl %eax, 12(%ecx) + movl 20(%edx), %eax + sbbl %edi, %eax + movl 24(%ebp), %edi + movl %esi, 16(%ecx) + movl 24(%edx), %esi + sbbl %edi, %esi + movl 28(%ebp), %edi + movl %eax, 20(%ecx) + movl 28(%edx), %eax + sbbl %edi, %eax + movl 32(%ebp), %edi + movl %esi, 24(%ecx) + movl 32(%edx), %esi + sbbl %edi, %esi + movl 36(%ebp), %edi + movl %eax, 28(%ecx) + movl 36(%edx), %eax + sbbl %edi, %eax + movl 40(%ebp), %edi + movl %esi, 32(%ecx) + movl 40(%edx), %esi + sbbl %edi, %esi + movl 44(%ebp), %edi + movl %eax, 36(%ecx) + movl 44(%edx), %eax + sbbl %edi, %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 48(%ebp), %eax + movl %esi, 40(%ecx) + movl 48(%edx), %esi + sbbl %eax, %esi + movl %esi, 24(%esp) # 4-byte Spill + movl 52(%ebp), %eax + movl 52(%edx), %esi + sbbl %eax, %esi + movl %esi, 32(%esp) # 4-byte Spill + movl 56(%ebp), %eax + movl 56(%edx), %esi + sbbl %eax, %esi + movl %esi, 40(%esp) # 4-byte Spill + movl 60(%ebp), %eax + movl 60(%edx), %esi + sbbl %eax, %esi + movl %esi, 44(%esp) # 4-byte Spill + movl 64(%ebp), %eax + movl 64(%edx), %esi + sbbl %eax, %esi + movl %esi, 48(%esp) # 4-byte Spill + movl 68(%ebp), %eax + movl 68(%edx), %esi + sbbl %eax, %esi + movl %esi, 52(%esp) # 4-byte Spill + movl 72(%ebp), %eax + movl 72(%edx), %esi + sbbl %eax, %esi + movl %esi, 56(%esp) # 4-byte Spill + movl 76(%ebp), %eax + movl 76(%edx), %esi + sbbl %eax, %esi + movl %esi, 60(%esp) # 4-byte Spill + movl 80(%ebp), %eax + movl 80(%edx), %esi + sbbl %eax, %esi + movl %esi, 64(%esp) # 4-byte Spill + movl 84(%ebp), %eax + movl 84(%edx), %edx + sbbl %eax, %edx + movl %edx, 68(%esp) # 4-byte Spill + movl $0, %eax + sbbl $0, %eax + andl $1, %eax + movl 104(%esp), %ebp + jne .LBB173_1 +# BB#2: + movl $0, 28(%esp) # 4-byte Folded Spill + jmp .LBB173_3 +.LBB173_1: + movl 40(%ebp), %edx + movl %edx, 28(%esp) # 4-byte Spill +.LBB173_3: + testb %al, %al + jne .LBB173_4 +# BB#5: + movl $0, 16(%esp) # 4-byte Folded Spill + movl $0, 8(%esp) # 4-byte Folded Spill + jmp .LBB173_6 +.LBB173_4: + movl (%ebp), %eax + movl %eax, 8(%esp) # 4-byte Spill + movl 4(%ebp), %eax + movl %eax, 16(%esp) # 4-byte Spill +.LBB173_6: + jne .LBB173_7 +# BB#8: + movl $0, 20(%esp) # 4-byte Folded Spill + jmp .LBB173_9 +.LBB173_7: + movl 36(%ebp), %eax + movl %eax, 20(%esp) # 4-byte Spill +.LBB173_9: + jne .LBB173_10 +# BB#11: + movl $0, 12(%esp) # 4-byte Folded Spill + jmp .LBB173_12 +.LBB173_10: + movl 32(%ebp), %eax + movl %eax, 12(%esp) # 4-byte Spill +.LBB173_12: + jne .LBB173_13 +# BB#14: + movl $0, 4(%esp) # 4-byte Folded Spill + jmp .LBB173_15 +.LBB173_13: + movl 28(%ebp), %eax + movl %eax, 4(%esp) # 4-byte Spill +.LBB173_15: + jne .LBB173_16 +# BB#17: + movl $0, (%esp) # 4-byte Folded Spill + jmp .LBB173_18 +.LBB173_16: + movl 24(%ebp), %eax + movl %eax, (%esp) # 4-byte Spill +.LBB173_18: + jne .LBB173_19 +# BB#20: + movl $0, %edx + jmp .LBB173_21 +.LBB173_19: + movl 20(%ebp), %edx +.LBB173_21: + jne .LBB173_22 +# BB#23: + movl $0, %edi + jmp .LBB173_24 +.LBB173_22: + movl 16(%ebp), %edi +.LBB173_24: + jne .LBB173_25 +# BB#26: + movl $0, %ebx + jmp .LBB173_27 +.LBB173_25: + movl 12(%ebp), %ebx +.LBB173_27: + jne .LBB173_28 +# BB#29: + xorl %ebp, %ebp + jmp .LBB173_30 +.LBB173_28: + movl 8(%ebp), %ebp +.LBB173_30: + movl 8(%esp), %esi # 4-byte Reload + addl 36(%esp), %esi # 4-byte Folded Reload + movl 16(%esp), %eax # 4-byte Reload + adcl 24(%esp), %eax # 4-byte Folded Reload + movl %esi, 44(%ecx) + adcl 32(%esp), %ebp # 4-byte Folded Reload + movl %eax, 48(%ecx) + adcl 40(%esp), %ebx # 4-byte Folded Reload + movl %ebp, 52(%ecx) + adcl 44(%esp), %edi # 4-byte Folded Reload + movl %ebx, 56(%ecx) + adcl 48(%esp), %edx # 4-byte Folded Reload + movl %edi, 60(%ecx) + movl (%esp), %esi # 4-byte Reload + adcl 52(%esp), %esi # 4-byte Folded Reload + movl %edx, 64(%ecx) + movl 4(%esp), %eax # 4-byte Reload + adcl 56(%esp), %eax # 4-byte Folded Reload + movl %esi, 68(%ecx) + movl 12(%esp), %edx # 4-byte Reload + adcl 60(%esp), %edx # 4-byte Folded Reload + movl %eax, 72(%ecx) + movl 20(%esp), %eax # 4-byte Reload + adcl 64(%esp), %eax # 4-byte Folded Reload + movl %edx, 76(%ecx) + movl %eax, 80(%ecx) + movl 28(%esp), %eax # 4-byte Reload + adcl 68(%esp), %eax # 4-byte Folded Reload + movl %eax, 84(%ecx) + addl $72, %esp + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end173: + .size mcl_fpDbl_sub11Lbmi2, .Lfunc_end173-mcl_fpDbl_sub11Lbmi2 + + .align 16, 0x90 + .type .LmulPv384x32,@function +.LmulPv384x32: # @mulPv384x32 +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $36, %esp + movl %edx, %eax + movl 56(%esp), %edx + mulxl 4(%eax), %ebx, %edi + mulxl (%eax), %esi, %ebp + movl %esi, 32(%esp) # 4-byte Spill + addl %ebx, %ebp + movl %ebp, 28(%esp) # 4-byte Spill + mulxl 8(%eax), %ebx, %esi + adcl %edi, %ebx + movl %ebx, 24(%esp) # 4-byte Spill + mulxl 12(%eax), %edi, %ebx + adcl %esi, %edi + movl %edi, 20(%esp) # 4-byte Spill + mulxl 16(%eax), %esi, %edi + adcl %ebx, %esi + movl %esi, 16(%esp) # 4-byte Spill + mulxl 20(%eax), %esi, %ebx + adcl %edi, %esi + movl %esi, 12(%esp) # 4-byte Spill + mulxl 24(%eax), %esi, %edi + adcl %ebx, %esi + movl %esi, 8(%esp) # 4-byte Spill + mulxl 28(%eax), %ebx, %esi + adcl %edi, %ebx + mulxl 32(%eax), %edi, %ebp + movl %ebp, 4(%esp) # 4-byte Spill + adcl %esi, %edi + mulxl 36(%eax), %esi, %ebp + movl %ebp, (%esp) # 4-byte Spill + adcl 4(%esp), %esi # 4-byte Folded Reload + mulxl 40(%eax), %edx, %ebp + movl %ebp, 4(%esp) # 4-byte Spill + adcl (%esp), %edx # 4-byte Folded Reload + movl 32(%esp), %ebp # 4-byte Reload + movl %ebp, (%ecx) + movl 28(%esp), %ebp # 4-byte Reload + movl %ebp, 4(%ecx) + movl 24(%esp), %ebp # 4-byte Reload + movl %ebp, 8(%ecx) + movl 20(%esp), %ebp # 4-byte Reload + movl %ebp, 12(%ecx) + movl 16(%esp), %ebp # 4-byte Reload + movl %ebp, 16(%ecx) + movl 12(%esp), %ebp # 4-byte Reload + movl %ebp, 20(%ecx) + movl 8(%esp), %ebp # 4-byte Reload + movl %ebp, 24(%ecx) + movl %ebx, 28(%ecx) + movl %edi, 32(%ecx) + movl %esi, 36(%ecx) + movl %edx, 40(%ecx) + movl 56(%esp), %edx + mulxl 44(%eax), %eax, %edx + adcl 4(%esp), %eax # 4-byte Folded Reload + movl %eax, 44(%ecx) + adcl $0, %edx + movl %edx, 48(%ecx) + movl %ecx, %eax + addl $36, %esp + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end174: + .size .LmulPv384x32, .Lfunc_end174-.LmulPv384x32 + + .globl mcl_fp_mulUnitPre12Lbmi2 + .align 16, 0x90 + .type mcl_fp_mulUnitPre12Lbmi2,@function +mcl_fp_mulUnitPre12Lbmi2: # @mcl_fp_mulUnitPre12Lbmi2 +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $92, %esp + calll .L175$pb +.L175$pb: + popl %ebx +.Ltmp26: + addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp26-.L175$pb), %ebx + movl 120(%esp), %eax + movl %eax, (%esp) + leal 40(%esp), %ecx + movl 116(%esp), %edx + calll .LmulPv384x32 + movl 88(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 84(%esp), %eax + movl %eax, 32(%esp) # 4-byte Spill + movl 80(%esp), %eax + movl %eax, 28(%esp) # 4-byte Spill + movl 76(%esp), %eax + movl %eax, 24(%esp) # 4-byte Spill + movl 72(%esp), %eax + movl %eax, 20(%esp) # 4-byte Spill + movl 68(%esp), %eax + movl %eax, 16(%esp) # 4-byte Spill + movl 64(%esp), %eax + movl %eax, 12(%esp) # 4-byte Spill + movl 60(%esp), %ebp + movl 56(%esp), %ebx + movl 52(%esp), %edi + movl 48(%esp), %esi + movl 40(%esp), %edx + movl 44(%esp), %ecx + movl 112(%esp), %eax + movl %edx, (%eax) + movl %ecx, 4(%eax) + movl %esi, 8(%eax) + movl %edi, 12(%eax) + movl %ebx, 16(%eax) + movl %ebp, 20(%eax) + movl 12(%esp), %ecx # 4-byte Reload + movl %ecx, 24(%eax) + movl 16(%esp), %ecx # 4-byte Reload + movl %ecx, 28(%eax) + movl 20(%esp), %ecx # 4-byte Reload + movl %ecx, 32(%eax) + movl 24(%esp), %ecx # 4-byte Reload + movl %ecx, 36(%eax) + movl 28(%esp), %ecx # 4-byte Reload + movl %ecx, 40(%eax) + movl 32(%esp), %ecx # 4-byte Reload + movl %ecx, 44(%eax) + movl 36(%esp), %ecx # 4-byte Reload + movl %ecx, 48(%eax) + addl $92, %esp + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end175: + .size mcl_fp_mulUnitPre12Lbmi2, .Lfunc_end175-mcl_fp_mulUnitPre12Lbmi2 + + .globl mcl_fpDbl_mulPre12Lbmi2 + .align 16, 0x90 + .type mcl_fpDbl_mulPre12Lbmi2,@function +mcl_fpDbl_mulPre12Lbmi2: # @mcl_fpDbl_mulPre12Lbmi2 +# BB#0: + pushl %ebp + movl %esp, %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $220, %esp + calll .L176$pb +.L176$pb: + popl %ebx +.Ltmp27: + addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp27-.L176$pb), %ebx + movl %ebx, -164(%ebp) # 4-byte Spill + movl 16(%ebp), %esi + movl %esi, 8(%esp) + movl 12(%ebp), %edi + movl %edi, 4(%esp) + movl 8(%ebp), %eax + movl %eax, (%esp) + calll mcl_fpDbl_mulPre6Lbmi2@PLT + leal 24(%esi), %eax + movl %eax, 8(%esp) + leal 24(%edi), %eax + movl %eax, 4(%esp) + movl 8(%ebp), %eax + leal 48(%eax), %eax + movl %eax, (%esp) + calll mcl_fpDbl_mulPre6Lbmi2@PLT + movl 40(%edi), %ebx + movl 36(%edi), %eax + movl 32(%edi), %edx + movl (%edi), %esi + movl 4(%edi), %ecx + addl 24(%edi), %esi + adcl 28(%edi), %ecx + movl %ecx, -172(%ebp) # 4-byte Spill + adcl 8(%edi), %edx + movl %edx, -188(%ebp) # 4-byte Spill + adcl 12(%edi), %eax + movl %eax, -168(%ebp) # 4-byte Spill + adcl 16(%edi), %ebx + movl %ebx, -180(%ebp) # 4-byte Spill + seto %al + lahf + movl %eax, %eax + movl %eax, -112(%ebp) # 4-byte Spill + movl 16(%ebp), %edi + movl (%edi), %eax + addl 24(%edi), %eax + movl %eax, -136(%ebp) # 4-byte Spill + movl 4(%edi), %eax + adcl 28(%edi), %eax + movl %eax, -140(%ebp) # 4-byte Spill + movl 32(%edi), %eax + adcl 8(%edi), %eax + movl %eax, -144(%ebp) # 4-byte Spill + movl 36(%edi), %eax + adcl 12(%edi), %eax + movl %eax, -148(%ebp) # 4-byte Spill + movl 40(%edi), %ecx + adcl 16(%edi), %ecx + movl 44(%edi), %eax + adcl 20(%edi), %eax + pushl %eax + seto %al + lahf + movl %eax, %edi + popl %eax + movl %edi, -184(%ebp) # 4-byte Spill + movl %ebx, %edi + movl %edx, -156(%ebp) # 4-byte Spill + movl %esi, -160(%ebp) # 4-byte Spill + movl %esi, %edx + jb .LBB176_2 +# BB#1: + xorl %edi, %edi + movl $0, -156(%ebp) # 4-byte Folded Spill + movl $0, -160(%ebp) # 4-byte Folded Spill +.LBB176_2: + movl %edi, -176(%ebp) # 4-byte Spill + movl 12(%ebp), %esi + movl 44(%esi), %edi + movl -112(%ebp), %ebx # 4-byte Reload + pushl %eax + movl %ebx, %eax + addb $127, %al + sahf + popl %eax + adcl 20(%esi), %edi + movl %edi, -132(%ebp) # 4-byte Spill + movl %eax, -124(%ebp) # 4-byte Spill + movl %ecx, -112(%ebp) # 4-byte Spill + movl -148(%ebp), %esi # 4-byte Reload + movl %esi, -116(%ebp) # 4-byte Spill + movl -144(%ebp), %esi # 4-byte Reload + movl %esi, -120(%ebp) # 4-byte Spill + movl -140(%ebp), %esi # 4-byte Reload + movl %esi, -128(%ebp) # 4-byte Spill + movl -136(%ebp), %esi # 4-byte Reload + movl %esi, -152(%ebp) # 4-byte Spill + jb .LBB176_4 +# BB#3: + movl $0, -124(%ebp) # 4-byte Folded Spill + movl $0, -112(%ebp) # 4-byte Folded Spill + movl $0, -116(%ebp) # 4-byte Folded Spill + movl $0, -120(%ebp) # 4-byte Folded Spill + movl $0, -128(%ebp) # 4-byte Folded Spill + movl $0, -152(%ebp) # 4-byte Folded Spill +.LBB176_4: + movl %edx, -84(%ebp) + movl -172(%ebp), %esi # 4-byte Reload + movl %esi, -80(%ebp) + movl -188(%ebp), %edx # 4-byte Reload + movl %edx, -76(%ebp) + movl -168(%ebp), %edi # 4-byte Reload + movl %edi, -72(%ebp) + movl -180(%ebp), %edx # 4-byte Reload + movl %edx, -68(%ebp) + movl -136(%ebp), %edx # 4-byte Reload + movl %edx, -108(%ebp) + movl -140(%ebp), %edx # 4-byte Reload + movl %edx, -104(%ebp) + movl -144(%ebp), %edx # 4-byte Reload + movl %edx, -100(%ebp) + movl -148(%ebp), %edx # 4-byte Reload + movl %edx, -96(%ebp) + movl %ecx, -92(%ebp) + movl %eax, -88(%ebp) + movl %edi, %ebx + sbbl %edx, %edx + movl -132(%ebp), %eax # 4-byte Reload + movl %eax, -64(%ebp) + movl -184(%ebp), %ecx # 4-byte Reload + pushl %eax + movl %ecx, %eax + addb $127, %al + sahf + popl %eax + jb .LBB176_6 +# BB#5: + movl $0, %eax + movl $0, %ebx + movl $0, %esi +.LBB176_6: + movl %eax, -132(%ebp) # 4-byte Spill + sbbl %eax, %eax + leal -108(%ebp), %ecx + movl %ecx, 8(%esp) + leal -84(%ebp), %ecx + movl %ecx, 4(%esp) + leal -60(%ebp), %ecx + movl %ecx, (%esp) + andl %eax, %edx + movl -152(%ebp), %edi # 4-byte Reload + addl -160(%ebp), %edi # 4-byte Folded Reload + adcl %esi, -128(%ebp) # 4-byte Folded Spill + movl -156(%ebp), %eax # 4-byte Reload + adcl %eax, -120(%ebp) # 4-byte Folded Spill + adcl %ebx, -116(%ebp) # 4-byte Folded Spill + movl -176(%ebp), %eax # 4-byte Reload + adcl %eax, -112(%ebp) # 4-byte Folded Spill + movl -132(%ebp), %eax # 4-byte Reload + adcl %eax, -124(%ebp) # 4-byte Folded Spill + sbbl %esi, %esi + andl $1, %esi + andl $1, %edx + movl %edx, -132(%ebp) # 4-byte Spill + movl -164(%ebp), %ebx # 4-byte Reload + calll mcl_fpDbl_mulPre6Lbmi2@PLT + addl -36(%ebp), %edi + movl -128(%ebp), %eax # 4-byte Reload + adcl -32(%ebp), %eax + movl %eax, -128(%ebp) # 4-byte Spill + movl -120(%ebp), %eax # 4-byte Reload + adcl -28(%ebp), %eax + movl %eax, -120(%ebp) # 4-byte Spill + movl -116(%ebp), %eax # 4-byte Reload + adcl -24(%ebp), %eax + movl %eax, -116(%ebp) # 4-byte Spill + movl -112(%ebp), %eax # 4-byte Reload + adcl -20(%ebp), %eax + movl %eax, -112(%ebp) # 4-byte Spill + movl -124(%ebp), %eax # 4-byte Reload + adcl -16(%ebp), %eax + movl %eax, -124(%ebp) # 4-byte Spill + adcl %esi, -132(%ebp) # 4-byte Folded Spill + movl -60(%ebp), %ecx + movl 8(%ebp), %eax + subl (%eax), %ecx + movl %ecx, -144(%ebp) # 4-byte Spill + movl -56(%ebp), %esi + sbbl 4(%eax), %esi + movl -52(%ebp), %ecx + sbbl 8(%eax), %ecx + movl %ecx, -136(%ebp) # 4-byte Spill + movl -48(%ebp), %edx + sbbl 12(%eax), %edx + movl -44(%ebp), %ebx + sbbl 16(%eax), %ebx + movl -40(%ebp), %ecx + sbbl 20(%eax), %ecx + movl %ecx, -140(%ebp) # 4-byte Spill + movl 24(%eax), %ecx + movl %ecx, -148(%ebp) # 4-byte Spill + sbbl %ecx, %edi + movl 28(%eax), %ecx + movl %ecx, -152(%ebp) # 4-byte Spill + sbbl %ecx, -128(%ebp) # 4-byte Folded Spill + movl 32(%eax), %ecx + movl %ecx, -156(%ebp) # 4-byte Spill + sbbl %ecx, -120(%ebp) # 4-byte Folded Spill + movl 36(%eax), %ecx + movl %ecx, -160(%ebp) # 4-byte Spill + sbbl %ecx, -116(%ebp) # 4-byte Folded Spill + movl 40(%eax), %ecx + movl %ecx, -164(%ebp) # 4-byte Spill + sbbl %ecx, -112(%ebp) # 4-byte Folded Spill + movl 44(%eax), %ecx + movl %ecx, -168(%ebp) # 4-byte Spill + sbbl %ecx, -124(%ebp) # 4-byte Folded Spill + sbbl $0, -132(%ebp) # 4-byte Folded Spill + movl 48(%eax), %ecx + movl %ecx, -192(%ebp) # 4-byte Spill + subl %ecx, -144(%ebp) # 4-byte Folded Spill + movl 52(%eax), %ecx + movl %ecx, -196(%ebp) # 4-byte Spill + sbbl %ecx, %esi + movl 56(%eax), %ecx + movl %ecx, -200(%ebp) # 4-byte Spill + sbbl %ecx, -136(%ebp) # 4-byte Folded Spill + movl 60(%eax), %ecx + movl %ecx, -204(%ebp) # 4-byte Spill + sbbl %ecx, %edx + movl 64(%eax), %ecx + movl %ecx, -208(%ebp) # 4-byte Spill + sbbl %ecx, %ebx + movl 68(%eax), %ecx + movl %ecx, -212(%ebp) # 4-byte Spill + sbbl %ecx, -140(%ebp) # 4-byte Folded Spill + movl 72(%eax), %ecx + movl %ecx, -216(%ebp) # 4-byte Spill + sbbl %ecx, %edi + movl 76(%eax), %ecx + movl %ecx, -172(%ebp) # 4-byte Spill + sbbl %ecx, -128(%ebp) # 4-byte Folded Spill + movl 80(%eax), %ecx + movl %ecx, -176(%ebp) # 4-byte Spill + sbbl %ecx, -120(%ebp) # 4-byte Folded Spill + movl 84(%eax), %ecx + movl %ecx, -180(%ebp) # 4-byte Spill + sbbl %ecx, -116(%ebp) # 4-byte Folded Spill + movl 88(%eax), %ecx + movl %ecx, -184(%ebp) # 4-byte Spill + sbbl %ecx, -112(%ebp) # 4-byte Folded Spill + movl 92(%eax), %ecx + movl %ecx, -188(%ebp) # 4-byte Spill + sbbl %ecx, -124(%ebp) # 4-byte Folded Spill + sbbl $0, -132(%ebp) # 4-byte Folded Spill + movl -144(%ebp), %ecx # 4-byte Reload + addl -148(%ebp), %ecx # 4-byte Folded Reload + adcl -152(%ebp), %esi # 4-byte Folded Reload + movl %ecx, 24(%eax) + movl -136(%ebp), %ecx # 4-byte Reload + adcl -156(%ebp), %ecx # 4-byte Folded Reload + movl %esi, 28(%eax) + adcl -160(%ebp), %edx # 4-byte Folded Reload + movl %ecx, 32(%eax) + adcl -164(%ebp), %ebx # 4-byte Folded Reload + movl %edx, 36(%eax) + movl -140(%ebp), %ecx # 4-byte Reload + adcl -168(%ebp), %ecx # 4-byte Folded Reload + movl %ebx, 40(%eax) + adcl -192(%ebp), %edi # 4-byte Folded Reload + movl %ecx, 44(%eax) + movl -128(%ebp), %ecx # 4-byte Reload + adcl -196(%ebp), %ecx # 4-byte Folded Reload + movl %edi, 48(%eax) + movl -120(%ebp), %edx # 4-byte Reload + adcl -200(%ebp), %edx # 4-byte Folded Reload + movl %ecx, 52(%eax) + movl -116(%ebp), %ecx # 4-byte Reload + adcl -204(%ebp), %ecx # 4-byte Folded Reload + movl %edx, 56(%eax) + movl -112(%ebp), %edx # 4-byte Reload + adcl -208(%ebp), %edx # 4-byte Folded Reload + movl %ecx, 60(%eax) + movl -124(%ebp), %ecx # 4-byte Reload + adcl -212(%ebp), %ecx # 4-byte Folded Reload + movl %edx, 64(%eax) + movl -132(%ebp), %edx # 4-byte Reload + adcl -216(%ebp), %edx # 4-byte Folded Reload + movl %ecx, 68(%eax) + movl %edx, 72(%eax) + movl -172(%ebp), %ecx # 4-byte Reload + adcl $0, %ecx + movl %ecx, 76(%eax) + movl -176(%ebp), %ecx # 4-byte Reload + adcl $0, %ecx + movl %ecx, 80(%eax) + movl -180(%ebp), %ecx # 4-byte Reload + adcl $0, %ecx + movl %ecx, 84(%eax) + movl -184(%ebp), %ecx # 4-byte Reload + adcl $0, %ecx + movl %ecx, 88(%eax) + movl -188(%ebp), %ecx # 4-byte Reload + adcl $0, %ecx + movl %ecx, 92(%eax) + addl $220, %esp + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end176: + .size mcl_fpDbl_mulPre12Lbmi2, .Lfunc_end176-mcl_fpDbl_mulPre12Lbmi2 + + .globl mcl_fpDbl_sqrPre12Lbmi2 + .align 16, 0x90 + .type mcl_fpDbl_sqrPre12Lbmi2,@function +mcl_fpDbl_sqrPre12Lbmi2: # @mcl_fpDbl_sqrPre12Lbmi2 +# BB#0: + pushl %ebp + movl %esp, %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $220, %esp + calll .L177$pb +.L177$pb: + popl %ebx +.Ltmp28: + addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp28-.L177$pb), %ebx + movl %ebx, -152(%ebp) # 4-byte Spill + movl 12(%ebp), %edi + movl %edi, 8(%esp) + movl %edi, 4(%esp) + movl 8(%ebp), %esi + movl %esi, (%esp) + calll mcl_fpDbl_mulPre6Lbmi2@PLT + leal 24(%edi), %eax + movl %eax, 8(%esp) + movl %eax, 4(%esp) + leal 48(%esi), %eax + movl %eax, (%esp) + calll mcl_fpDbl_mulPre6Lbmi2@PLT + movl 44(%edi), %eax + movl %eax, -136(%ebp) # 4-byte Spill + movl 40(%edi), %edx + movl 36(%edi), %eax + movl (%edi), %ebx + movl 4(%edi), %esi + addl 24(%edi), %ebx + adcl 28(%edi), %esi + movl 32(%edi), %ecx + adcl 8(%edi), %ecx + movl %ecx, -144(%ebp) # 4-byte Spill + adcl 12(%edi), %eax + movl %eax, -140(%ebp) # 4-byte Spill + adcl 16(%edi), %edx + movl %edx, %ecx + movl -136(%ebp), %eax # 4-byte Reload + adcl 20(%edi), %eax + movl %eax, -136(%ebp) # 4-byte Spill + seto %al + lahf + movl %eax, %edx + movl %edx, -156(%ebp) # 4-byte Spill + pushl %eax + seto %al + lahf + movl %eax, %edx + popl %eax + movl %edx, -124(%ebp) # 4-byte Spill + seto %al + lahf + movl %eax, %eax + movl %eax, -120(%ebp) # 4-byte Spill + seto %al + lahf + movl %eax, %edx + sbbl %edi, %edi + movl %edi, -148(%ebp) # 4-byte Spill + movl %ebx, %edi + addl %edi, %edi + movl %edi, -112(%ebp) # 4-byte Spill + movl %esi, %edi + movl %esi, %eax + adcl %edi, %edi + movl %edi, -132(%ebp) # 4-byte Spill + pushl %eax + movl %edx, %eax + addb $127, %al + sahf + popl %eax + jb .LBB177_2 +# BB#1: + movl $0, -132(%ebp) # 4-byte Folded Spill + movl $0, -112(%ebp) # 4-byte Folded Spill +.LBB177_2: + movl -144(%ebp), %esi # 4-byte Reload + addl %esi, %esi + movl -140(%ebp), %edx # 4-byte Reload + adcl %edx, %edx + movl %edx, -116(%ebp) # 4-byte Spill + movl -120(%ebp), %edx # 4-byte Reload + pushl %eax + movl %edx, %eax + addb $127, %al + sahf + popl %eax + jb .LBB177_3 +# BB#4: + movl $0, -116(%ebp) # 4-byte Folded Spill + movl $0, -120(%ebp) # 4-byte Folded Spill + jmp .LBB177_5 +.LBB177_3: + movl %eax, %edx + shrl $31, %edx + orl %esi, %edx + movl %edx, -120(%ebp) # 4-byte Spill +.LBB177_5: + movl -136(%ebp), %edx # 4-byte Reload + movl %ecx, %esi + addl %esi, %esi + adcl %edx, %edx + movl -124(%ebp), %edi # 4-byte Reload + pushl %eax + movl %edi, %eax + addb $127, %al + sahf + popl %eax + jb .LBB177_6 +# BB#7: + xorl %edx, %edx + movl $0, -128(%ebp) # 4-byte Folded Spill + movl -140(%ebp), %edi # 4-byte Reload + jmp .LBB177_8 +.LBB177_6: + movl %ecx, -124(%ebp) # 4-byte Spill + movl -140(%ebp), %edi # 4-byte Reload + movl %edi, %ecx + shrl $31, %ecx + orl %esi, %ecx + movl %ecx, -128(%ebp) # 4-byte Spill + movl -124(%ebp), %ecx # 4-byte Reload +.LBB177_8: + movl %edx, -124(%ebp) # 4-byte Spill + movl %ebx, -84(%ebp) + movl %eax, -80(%ebp) + movl -144(%ebp), %esi # 4-byte Reload + movl %esi, -76(%ebp) + movl %edi, -72(%ebp) + movl %ecx, -68(%ebp) + movl -136(%ebp), %edx # 4-byte Reload + movl %edx, -64(%ebp) + movl %ebx, -108(%ebp) + movl %eax, -104(%ebp) + movl %esi, -100(%ebp) + movl %edi, -96(%ebp) + movl %ecx, -92(%ebp) + movl %edx, -88(%ebp) + movl -156(%ebp), %eax # 4-byte Reload + movl %eax, %eax + addb $127, %al + sahf + jb .LBB177_9 +# BB#10: + movl $0, -136(%ebp) # 4-byte Folded Spill + jmp .LBB177_11 +.LBB177_9: + shrl $31, %edx + movl %edx, -136(%ebp) # 4-byte Spill +.LBB177_11: + leal -108(%ebp), %eax + movl %eax, 8(%esp) + leal -84(%ebp), %eax + movl %eax, 4(%esp) + leal -60(%ebp), %eax + movl %eax, (%esp) + movl -148(%ebp), %esi # 4-byte Reload + andl $1, %esi + movl -152(%ebp), %ebx # 4-byte Reload + calll mcl_fpDbl_mulPre6Lbmi2@PLT + movl -112(%ebp), %eax # 4-byte Reload + addl -36(%ebp), %eax + movl %eax, -112(%ebp) # 4-byte Spill + movl -132(%ebp), %edi # 4-byte Reload + adcl -32(%ebp), %edi + movl -120(%ebp), %eax # 4-byte Reload + adcl -28(%ebp), %eax + movl %eax, -120(%ebp) # 4-byte Spill + movl -116(%ebp), %eax # 4-byte Reload + adcl -24(%ebp), %eax + movl %eax, -116(%ebp) # 4-byte Spill + movl -128(%ebp), %eax # 4-byte Reload + adcl -20(%ebp), %eax + movl %eax, -128(%ebp) # 4-byte Spill + movl -124(%ebp), %eax # 4-byte Reload + adcl -16(%ebp), %eax + movl %eax, -124(%ebp) # 4-byte Spill + adcl -136(%ebp), %esi # 4-byte Folded Reload + movl -60(%ebp), %edx + movl 8(%ebp), %eax + subl (%eax), %edx + movl -56(%ebp), %ebx + sbbl 4(%eax), %ebx + movl -52(%ebp), %ecx + sbbl 8(%eax), %ecx + movl %ecx, -136(%ebp) # 4-byte Spill + movl -48(%ebp), %ecx + sbbl 12(%eax), %ecx + movl %ecx, -144(%ebp) # 4-byte Spill + movl -44(%ebp), %ecx + sbbl 16(%eax), %ecx + movl %ecx, -172(%ebp) # 4-byte Spill + movl -40(%ebp), %ecx + sbbl 20(%eax), %ecx + movl %ecx, -140(%ebp) # 4-byte Spill + movl 24(%eax), %ecx + movl %ecx, -148(%ebp) # 4-byte Spill + sbbl %ecx, -112(%ebp) # 4-byte Folded Spill + movl 28(%eax), %ecx + movl %ecx, -152(%ebp) # 4-byte Spill + sbbl %ecx, %edi + movl %edi, -132(%ebp) # 4-byte Spill + movl 32(%eax), %ecx + movl %ecx, -156(%ebp) # 4-byte Spill + sbbl %ecx, -120(%ebp) # 4-byte Folded Spill + movl 36(%eax), %ecx + movl %ecx, -160(%ebp) # 4-byte Spill + sbbl %ecx, -116(%ebp) # 4-byte Folded Spill + movl 40(%eax), %ecx + movl %ecx, -164(%ebp) # 4-byte Spill + sbbl %ecx, -128(%ebp) # 4-byte Folded Spill + movl 44(%eax), %ecx + movl %ecx, -168(%ebp) # 4-byte Spill + sbbl %ecx, -124(%ebp) # 4-byte Folded Spill + sbbl $0, %esi + movl 48(%eax), %ecx + movl %ecx, -192(%ebp) # 4-byte Spill + subl %ecx, %edx + movl 52(%eax), %ecx + movl %ecx, -196(%ebp) # 4-byte Spill + sbbl %ecx, %ebx + movl 56(%eax), %ecx + movl %ecx, -200(%ebp) # 4-byte Spill + sbbl %ecx, -136(%ebp) # 4-byte Folded Spill + movl 60(%eax), %ecx + movl %ecx, -204(%ebp) # 4-byte Spill + sbbl %ecx, -144(%ebp) # 4-byte Folded Spill + movl 64(%eax), %ecx + movl %ecx, -208(%ebp) # 4-byte Spill + movl -172(%ebp), %edi # 4-byte Reload + sbbl %ecx, %edi + movl 68(%eax), %ecx + movl %ecx, -212(%ebp) # 4-byte Spill + sbbl %ecx, -140(%ebp) # 4-byte Folded Spill + movl 72(%eax), %ecx + movl %ecx, -216(%ebp) # 4-byte Spill + sbbl %ecx, -112(%ebp) # 4-byte Folded Spill + movl 76(%eax), %ecx + movl %ecx, -172(%ebp) # 4-byte Spill + sbbl %ecx, -132(%ebp) # 4-byte Folded Spill + movl 80(%eax), %ecx + movl %ecx, -176(%ebp) # 4-byte Spill + sbbl %ecx, -120(%ebp) # 4-byte Folded Spill + movl 84(%eax), %ecx + movl %ecx, -180(%ebp) # 4-byte Spill + sbbl %ecx, -116(%ebp) # 4-byte Folded Spill + movl 88(%eax), %ecx + movl %ecx, -184(%ebp) # 4-byte Spill + sbbl %ecx, -128(%ebp) # 4-byte Folded Spill + movl 92(%eax), %ecx + movl %ecx, -188(%ebp) # 4-byte Spill + sbbl %ecx, -124(%ebp) # 4-byte Folded Spill + sbbl $0, %esi + addl -148(%ebp), %edx # 4-byte Folded Reload + adcl -152(%ebp), %ebx # 4-byte Folded Reload + movl %edx, 24(%eax) + movl -136(%ebp), %ecx # 4-byte Reload + adcl -156(%ebp), %ecx # 4-byte Folded Reload + movl %ebx, 28(%eax) + movl -144(%ebp), %edx # 4-byte Reload + adcl -160(%ebp), %edx # 4-byte Folded Reload + movl %ecx, 32(%eax) + adcl -164(%ebp), %edi # 4-byte Folded Reload + movl %edx, 36(%eax) + movl -140(%ebp), %edx # 4-byte Reload + adcl -168(%ebp), %edx # 4-byte Folded Reload + movl %edi, 40(%eax) + movl -112(%ebp), %ecx # 4-byte Reload + adcl -192(%ebp), %ecx # 4-byte Folded Reload + movl %edx, 44(%eax) + movl -132(%ebp), %edi # 4-byte Reload + adcl -196(%ebp), %edi # 4-byte Folded Reload + movl %ecx, 48(%eax) + movl -120(%ebp), %edx # 4-byte Reload + adcl -200(%ebp), %edx # 4-byte Folded Reload + movl %edi, 52(%eax) + movl -116(%ebp), %ecx # 4-byte Reload + adcl -204(%ebp), %ecx # 4-byte Folded Reload + movl %edx, 56(%eax) + movl -128(%ebp), %edx # 4-byte Reload + adcl -208(%ebp), %edx # 4-byte Folded Reload + movl %ecx, 60(%eax) + movl -124(%ebp), %ecx # 4-byte Reload + adcl -212(%ebp), %ecx # 4-byte Folded Reload + movl %edx, 64(%eax) + adcl -216(%ebp), %esi # 4-byte Folded Reload + movl %ecx, 68(%eax) + movl %esi, 72(%eax) + movl -172(%ebp), %ecx # 4-byte Reload + adcl $0, %ecx + movl %ecx, 76(%eax) + movl -176(%ebp), %ecx # 4-byte Reload + adcl $0, %ecx + movl %ecx, 80(%eax) + movl -180(%ebp), %ecx # 4-byte Reload + adcl $0, %ecx + movl %ecx, 84(%eax) + movl -184(%ebp), %ecx # 4-byte Reload + adcl $0, %ecx + movl %ecx, 88(%eax) + movl -188(%ebp), %ecx # 4-byte Reload + adcl $0, %ecx + movl %ecx, 92(%eax) + addl $220, %esp + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end177: + .size mcl_fpDbl_sqrPre12Lbmi2, .Lfunc_end177-mcl_fpDbl_sqrPre12Lbmi2 + + .globl mcl_fp_mont12Lbmi2 + .align 16, 0x90 + .type mcl_fp_mont12Lbmi2,@function +mcl_fp_mont12Lbmi2: # @mcl_fp_mont12Lbmi2 +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $1436, %esp # imm = 0x59C + calll .L178$pb +.L178$pb: + popl %ebx +.Ltmp29: + addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp29-.L178$pb), %ebx + movl 1468(%esp), %eax + movl -4(%eax), %esi + movl %esi, 40(%esp) # 4-byte Spill + movl 1464(%esp), %eax + movl (%eax), %eax + movl %eax, (%esp) + leal 1384(%esp), %ecx + movl 1460(%esp), %edx + calll .LmulPv384x32 + movl 1384(%esp), %ebp + movl 1388(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl %ebp, %eax + imull %esi, %eax + movl 1432(%esp), %ecx + movl %ecx, 92(%esp) # 4-byte Spill + movl 1428(%esp), %ecx + movl %ecx, 80(%esp) # 4-byte Spill + movl 1424(%esp), %ecx + movl %ecx, 88(%esp) # 4-byte Spill + movl 1420(%esp), %ecx + movl %ecx, 76(%esp) # 4-byte Spill + movl 1416(%esp), %ecx + movl %ecx, 60(%esp) # 4-byte Spill + movl 1412(%esp), %ecx + movl %ecx, 52(%esp) # 4-byte Spill + movl 1408(%esp), %ecx + movl %ecx, 44(%esp) # 4-byte Spill + movl 1404(%esp), %ecx + movl %ecx, 48(%esp) # 4-byte Spill + movl 1400(%esp), %ecx + movl %ecx, 56(%esp) # 4-byte Spill + movl 1396(%esp), %edi + movl 1392(%esp), %esi + movl %eax, (%esp) + leal 1328(%esp), %ecx + movl 1468(%esp), %edx + calll .LmulPv384x32 + addl 1328(%esp), %ebp + movl 84(%esp), %eax # 4-byte Reload + adcl 1332(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + adcl 1336(%esp), %esi + movl %esi, 72(%esp) # 4-byte Spill + adcl 1340(%esp), %edi + movl %edi, 68(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 1344(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 1348(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 44(%esp), %eax # 4-byte Reload + adcl 1352(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 1356(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 1360(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 1364(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 1368(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 80(%esp), %esi # 4-byte Reload + adcl 1372(%esp), %esi + movl 92(%esp), %ebp # 4-byte Reload + adcl 1376(%esp), %ebp + sbbl %edi, %edi + movl 1464(%esp), %eax + movl 4(%eax), %eax + movl %eax, (%esp) + leal 1272(%esp), %ecx + movl 1460(%esp), %edx + calll .LmulPv384x32 + andl $1, %edi + movl 84(%esp), %ecx # 4-byte Reload + addl 1272(%esp), %ecx + movl 72(%esp), %eax # 4-byte Reload + adcl 1276(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 1280(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 1284(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 1288(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 44(%esp), %eax # 4-byte Reload + adcl 1292(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 1296(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 1300(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 1304(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 1308(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + adcl 1312(%esp), %esi + movl %esi, 80(%esp) # 4-byte Spill + adcl 1316(%esp), %ebp + adcl 1320(%esp), %edi + sbbl %eax, %eax + movl %eax, 84(%esp) # 4-byte Spill + movl %ecx, %esi + movl %esi, %eax + imull 40(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 1216(%esp), %ecx + movl 1468(%esp), %edx + calll .LmulPv384x32 + movl 84(%esp), %eax # 4-byte Reload + andl $1, %eax + addl 1216(%esp), %esi + movl 72(%esp), %ecx # 4-byte Reload + adcl 1220(%esp), %ecx + movl %ecx, 72(%esp) # 4-byte Spill + movl 68(%esp), %esi # 4-byte Reload + adcl 1224(%esp), %esi + movl 56(%esp), %ecx # 4-byte Reload + adcl 1228(%esp), %ecx + movl %ecx, 56(%esp) # 4-byte Spill + movl 48(%esp), %ecx # 4-byte Reload + adcl 1232(%esp), %ecx + movl %ecx, 48(%esp) # 4-byte Spill + movl 44(%esp), %ecx # 4-byte Reload + adcl 1236(%esp), %ecx + movl %ecx, 44(%esp) # 4-byte Spill + movl 52(%esp), %ecx # 4-byte Reload + adcl 1240(%esp), %ecx + movl %ecx, 52(%esp) # 4-byte Spill + movl 60(%esp), %ecx # 4-byte Reload + adcl 1244(%esp), %ecx + movl %ecx, 60(%esp) # 4-byte Spill + movl 76(%esp), %ecx # 4-byte Reload + adcl 1248(%esp), %ecx + movl %ecx, 76(%esp) # 4-byte Spill + movl 88(%esp), %ecx # 4-byte Reload + adcl 1252(%esp), %ecx + movl %ecx, 88(%esp) # 4-byte Spill + movl 80(%esp), %ecx # 4-byte Reload + adcl 1256(%esp), %ecx + movl %ecx, 80(%esp) # 4-byte Spill + adcl 1260(%esp), %ebp + adcl 1264(%esp), %edi + adcl $0, %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 1464(%esp), %eax + movl 8(%eax), %eax + movl %eax, (%esp) + leal 1160(%esp), %ecx + movl 1460(%esp), %edx + calll .LmulPv384x32 + movl 72(%esp), %ecx # 4-byte Reload + addl 1160(%esp), %ecx + adcl 1164(%esp), %esi + movl %esi, 68(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 1168(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 1172(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 44(%esp), %eax # 4-byte Reload + adcl 1176(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 1180(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 1184(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 1188(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 1192(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 1196(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + adcl 1200(%esp), %ebp + movl %ebp, 92(%esp) # 4-byte Spill + adcl 1204(%esp), %edi + movl 84(%esp), %eax # 4-byte Reload + adcl 1208(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + sbbl %ebp, %ebp + movl %ecx, %esi + movl %esi, %eax + imull 40(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 1104(%esp), %ecx + movl 1468(%esp), %eax + movl %eax, %edx + calll .LmulPv384x32 + andl $1, %ebp + movl %ebp, %ecx + addl 1104(%esp), %esi + movl 68(%esp), %eax # 4-byte Reload + adcl 1108(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 1112(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 1116(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 44(%esp), %eax # 4-byte Reload + adcl 1120(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 1124(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 1128(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 1132(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 1136(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 80(%esp), %esi # 4-byte Reload + adcl 1140(%esp), %esi + movl 92(%esp), %eax # 4-byte Reload + adcl 1144(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + adcl 1148(%esp), %edi + movl 84(%esp), %ebp # 4-byte Reload + adcl 1152(%esp), %ebp + adcl $0, %ecx + movl %ecx, 72(%esp) # 4-byte Spill + movl 1464(%esp), %eax + movl 12(%eax), %eax + movl %eax, (%esp) + leal 1048(%esp), %ecx + movl 1460(%esp), %edx + calll .LmulPv384x32 + movl 68(%esp), %ecx # 4-byte Reload + addl 1048(%esp), %ecx + movl 56(%esp), %eax # 4-byte Reload + adcl 1052(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 1056(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 44(%esp), %eax # 4-byte Reload + adcl 1060(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 1064(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 1068(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 1072(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 1076(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + adcl 1080(%esp), %esi + movl %esi, 80(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 1084(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + adcl 1088(%esp), %edi + adcl 1092(%esp), %ebp + movl %ebp, 84(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 1096(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + sbbl %esi, %esi + movl %ecx, %ebp + movl %ebp, %eax + imull 40(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 992(%esp), %ecx + movl 1468(%esp), %edx + calll .LmulPv384x32 + andl $1, %esi + movl %esi, %eax + addl 992(%esp), %ebp + movl 56(%esp), %ecx # 4-byte Reload + adcl 996(%esp), %ecx + movl %ecx, 56(%esp) # 4-byte Spill + movl 48(%esp), %ecx # 4-byte Reload + adcl 1000(%esp), %ecx + movl %ecx, 48(%esp) # 4-byte Spill + movl 44(%esp), %ebp # 4-byte Reload + adcl 1004(%esp), %ebp + movl 52(%esp), %ecx # 4-byte Reload + adcl 1008(%esp), %ecx + movl %ecx, 52(%esp) # 4-byte Spill + movl 60(%esp), %ecx # 4-byte Reload + adcl 1012(%esp), %ecx + movl %ecx, 60(%esp) # 4-byte Spill + movl 76(%esp), %ecx # 4-byte Reload + adcl 1016(%esp), %ecx + movl %ecx, 76(%esp) # 4-byte Spill + movl 88(%esp), %esi # 4-byte Reload + adcl 1020(%esp), %esi + movl 80(%esp), %ecx # 4-byte Reload + adcl 1024(%esp), %ecx + movl %ecx, 80(%esp) # 4-byte Spill + movl 92(%esp), %ecx # 4-byte Reload + adcl 1028(%esp), %ecx + movl %ecx, 92(%esp) # 4-byte Spill + adcl 1032(%esp), %edi + movl %edi, 68(%esp) # 4-byte Spill + movl 84(%esp), %ecx # 4-byte Reload + adcl 1036(%esp), %ecx + movl %ecx, 84(%esp) # 4-byte Spill + movl 72(%esp), %ecx # 4-byte Reload + adcl 1040(%esp), %ecx + movl %ecx, 72(%esp) # 4-byte Spill + movl %eax, %edi + adcl $0, %edi + movl 1464(%esp), %eax + movl 16(%eax), %eax + movl %eax, (%esp) + leal 936(%esp), %ecx + movl 1460(%esp), %edx + calll .LmulPv384x32 + movl 56(%esp), %ecx # 4-byte Reload + addl 936(%esp), %ecx + movl %ecx, 56(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 940(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + adcl 944(%esp), %ebp + movl %ebp, 44(%esp) # 4-byte Spill + movl 52(%esp), %ebp # 4-byte Reload + adcl 948(%esp), %ebp + movl 60(%esp), %eax # 4-byte Reload + adcl 952(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 956(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + adcl 960(%esp), %esi + movl %esi, 88(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 964(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 968(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 972(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 976(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 72(%esp), %esi # 4-byte Reload + adcl 980(%esp), %esi + adcl 984(%esp), %edi + movl %edi, 64(%esp) # 4-byte Spill + sbbl %edi, %edi + movl %ecx, %eax + imull 40(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 880(%esp), %ecx + movl 1468(%esp), %edx + calll .LmulPv384x32 + andl $1, %edi + movl %edi, %ecx + movl 56(%esp), %eax # 4-byte Reload + addl 880(%esp), %eax + movl 48(%esp), %eax # 4-byte Reload + adcl 884(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 44(%esp), %eax # 4-byte Reload + adcl 888(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + adcl 892(%esp), %ebp + movl %ebp, 52(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 896(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 900(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 904(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 908(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 92(%esp), %edi # 4-byte Reload + adcl 912(%esp), %edi + movl 68(%esp), %eax # 4-byte Reload + adcl 916(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 920(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + adcl 924(%esp), %esi + movl %esi, %ebp + movl 64(%esp), %eax # 4-byte Reload + adcl 928(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + adcl $0, %ecx + movl %ecx, 56(%esp) # 4-byte Spill + movl 1464(%esp), %eax + movl 20(%eax), %eax + movl %eax, (%esp) + leal 824(%esp), %ecx + movl 1460(%esp), %edx + calll .LmulPv384x32 + movl 48(%esp), %ecx # 4-byte Reload + addl 824(%esp), %ecx + movl 44(%esp), %eax # 4-byte Reload + adcl 828(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 832(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 836(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 76(%esp), %esi # 4-byte Reload + adcl 840(%esp), %esi + movl 88(%esp), %eax # 4-byte Reload + adcl 844(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 848(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + adcl 852(%esp), %edi + movl %edi, 92(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 856(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 860(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + adcl 864(%esp), %ebp + movl %ebp, 72(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 868(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 872(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + sbbl %ebp, %ebp + movl %ecx, %edi + movl %edi, %eax + imull 40(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 768(%esp), %ecx + movl 1468(%esp), %edx + calll .LmulPv384x32 + andl $1, %ebp + movl %ebp, %ecx + addl 768(%esp), %edi + movl 44(%esp), %eax # 4-byte Reload + adcl 772(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 776(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 60(%esp), %ebp # 4-byte Reload + adcl 780(%esp), %ebp + adcl 784(%esp), %esi + movl %esi, 76(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 788(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 792(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 796(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 68(%esp), %esi # 4-byte Reload + adcl 800(%esp), %esi + movl 84(%esp), %eax # 4-byte Reload + adcl 804(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 72(%esp), %edi # 4-byte Reload + adcl 808(%esp), %edi + movl 64(%esp), %eax # 4-byte Reload + adcl 812(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 816(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + adcl $0, %ecx + movl %ecx, 48(%esp) # 4-byte Spill + movl 1464(%esp), %eax + movl 24(%eax), %eax + movl %eax, (%esp) + leal 712(%esp), %ecx + movl 1460(%esp), %eax + movl %eax, %edx + calll .LmulPv384x32 + movl 44(%esp), %eax # 4-byte Reload + addl 712(%esp), %eax + movl 52(%esp), %ecx # 4-byte Reload + adcl 716(%esp), %ecx + movl %ecx, 52(%esp) # 4-byte Spill + adcl 720(%esp), %ebp + movl %ebp, 60(%esp) # 4-byte Spill + movl 76(%esp), %ecx # 4-byte Reload + adcl 724(%esp), %ecx + movl %ecx, 76(%esp) # 4-byte Spill + movl 88(%esp), %ecx # 4-byte Reload + adcl 728(%esp), %ecx + movl %ecx, 88(%esp) # 4-byte Spill + movl 80(%esp), %ecx # 4-byte Reload + adcl 732(%esp), %ecx + movl %ecx, 80(%esp) # 4-byte Spill + movl 92(%esp), %ecx # 4-byte Reload + adcl 736(%esp), %ecx + movl %ecx, 92(%esp) # 4-byte Spill + adcl 740(%esp), %esi + movl %esi, 68(%esp) # 4-byte Spill + movl 84(%esp), %ecx # 4-byte Reload + adcl 744(%esp), %ecx + movl %ecx, 84(%esp) # 4-byte Spill + adcl 748(%esp), %edi + movl %edi, 72(%esp) # 4-byte Spill + movl 64(%esp), %ecx # 4-byte Reload + adcl 752(%esp), %ecx + movl %ecx, 64(%esp) # 4-byte Spill + movl 56(%esp), %ecx # 4-byte Reload + adcl 756(%esp), %ecx + movl %ecx, 56(%esp) # 4-byte Spill + movl 48(%esp), %edi # 4-byte Reload + adcl 760(%esp), %edi + sbbl %ebp, %ebp + movl %eax, %esi + imull 40(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 656(%esp), %ecx + movl 1468(%esp), %edx + calll .LmulPv384x32 + andl $1, %ebp + movl %ebp, %eax + addl 656(%esp), %esi + movl 52(%esp), %ecx # 4-byte Reload + adcl 660(%esp), %ecx + movl %ecx, 52(%esp) # 4-byte Spill + movl 60(%esp), %esi # 4-byte Reload + adcl 664(%esp), %esi + movl 76(%esp), %ecx # 4-byte Reload + adcl 668(%esp), %ecx + movl %ecx, 76(%esp) # 4-byte Spill + movl 88(%esp), %ecx # 4-byte Reload + adcl 672(%esp), %ecx + movl %ecx, 88(%esp) # 4-byte Spill + movl 80(%esp), %ebp # 4-byte Reload + adcl 676(%esp), %ebp + movl 92(%esp), %ecx # 4-byte Reload + adcl 680(%esp), %ecx + movl %ecx, 92(%esp) # 4-byte Spill + movl 68(%esp), %ecx # 4-byte Reload + adcl 684(%esp), %ecx + movl %ecx, 68(%esp) # 4-byte Spill + movl 84(%esp), %ecx # 4-byte Reload + adcl 688(%esp), %ecx + movl %ecx, 84(%esp) # 4-byte Spill + movl 72(%esp), %ecx # 4-byte Reload + adcl 692(%esp), %ecx + movl %ecx, 72(%esp) # 4-byte Spill + movl 64(%esp), %ecx # 4-byte Reload + adcl 696(%esp), %ecx + movl %ecx, 64(%esp) # 4-byte Spill + movl 56(%esp), %ecx # 4-byte Reload + adcl 700(%esp), %ecx + movl %ecx, 56(%esp) # 4-byte Spill + adcl 704(%esp), %edi + movl %edi, 48(%esp) # 4-byte Spill + movl %eax, %edi + adcl $0, %edi + movl 1464(%esp), %eax + movl 28(%eax), %eax + movl %eax, (%esp) + leal 600(%esp), %ecx + movl 1460(%esp), %edx + calll .LmulPv384x32 + movl 52(%esp), %ecx # 4-byte Reload + addl 600(%esp), %ecx + adcl 604(%esp), %esi + movl %esi, 60(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 608(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 612(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + adcl 616(%esp), %ebp + movl %ebp, 80(%esp) # 4-byte Spill + movl 92(%esp), %ebp # 4-byte Reload + adcl 620(%esp), %ebp + movl 68(%esp), %eax # 4-byte Reload + adcl 624(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 628(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 632(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 64(%esp), %esi # 4-byte Reload + adcl 636(%esp), %esi + movl 56(%esp), %eax # 4-byte Reload + adcl 640(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 644(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + adcl 648(%esp), %edi + movl %edi, 52(%esp) # 4-byte Spill + sbbl %eax, %eax + movl %eax, 44(%esp) # 4-byte Spill + movl %ecx, %eax + movl %ecx, %edi + imull 40(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 544(%esp), %ecx + movl 1468(%esp), %edx + calll .LmulPv384x32 + movl 44(%esp), %eax # 4-byte Reload + andl $1, %eax + addl 544(%esp), %edi + movl 60(%esp), %ecx # 4-byte Reload + adcl 548(%esp), %ecx + movl %ecx, 60(%esp) # 4-byte Spill + movl 76(%esp), %ecx # 4-byte Reload + adcl 552(%esp), %ecx + movl %ecx, 76(%esp) # 4-byte Spill + movl 88(%esp), %ecx # 4-byte Reload + adcl 556(%esp), %ecx + movl %ecx, 88(%esp) # 4-byte Spill + movl 80(%esp), %ecx # 4-byte Reload + adcl 560(%esp), %ecx + movl %ecx, 80(%esp) # 4-byte Spill + adcl 564(%esp), %ebp + movl %ebp, 92(%esp) # 4-byte Spill + movl 68(%esp), %ecx # 4-byte Reload + adcl 568(%esp), %ecx + movl %ecx, 68(%esp) # 4-byte Spill + movl 84(%esp), %edi # 4-byte Reload + adcl 572(%esp), %edi + movl 72(%esp), %ecx # 4-byte Reload + adcl 576(%esp), %ecx + movl %ecx, 72(%esp) # 4-byte Spill + adcl 580(%esp), %esi + movl %esi, 64(%esp) # 4-byte Spill + movl 56(%esp), %esi # 4-byte Reload + adcl 584(%esp), %esi + movl 48(%esp), %ecx # 4-byte Reload + adcl 588(%esp), %ecx + movl %ecx, 48(%esp) # 4-byte Spill + movl 52(%esp), %ebp # 4-byte Reload + adcl 592(%esp), %ebp + adcl $0, %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 1464(%esp), %eax + movl 32(%eax), %eax + movl %eax, (%esp) + leal 488(%esp), %ecx + movl 1460(%esp), %edx + calll .LmulPv384x32 + movl 60(%esp), %ecx # 4-byte Reload + addl 488(%esp), %ecx + movl 76(%esp), %eax # 4-byte Reload + adcl 492(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 496(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 500(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 504(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 508(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + adcl 512(%esp), %edi + movl %edi, 84(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 516(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 520(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + adcl 524(%esp), %esi + movl %esi, 56(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 528(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + adcl 532(%esp), %ebp + movl %ebp, 52(%esp) # 4-byte Spill + movl 44(%esp), %ebp # 4-byte Reload + adcl 536(%esp), %ebp + sbbl %edi, %edi + movl %ecx, %eax + movl %ecx, %esi + imull 40(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 432(%esp), %ecx + movl 1468(%esp), %edx + calll .LmulPv384x32 + andl $1, %edi + movl %edi, %ecx + addl 432(%esp), %esi + movl 76(%esp), %eax # 4-byte Reload + adcl 436(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 88(%esp), %edi # 4-byte Reload + adcl 440(%esp), %edi + movl 80(%esp), %esi # 4-byte Reload + adcl 444(%esp), %esi + movl 92(%esp), %eax # 4-byte Reload + adcl 448(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 452(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 456(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 460(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 464(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 468(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 472(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 476(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + adcl 480(%esp), %ebp + movl %ebp, 44(%esp) # 4-byte Spill + adcl $0, %ecx + movl %ecx, 60(%esp) # 4-byte Spill + movl 1464(%esp), %eax + movl 36(%eax), %eax + movl %eax, (%esp) + leal 376(%esp), %ecx + movl 1460(%esp), %edx + calll .LmulPv384x32 + movl 76(%esp), %ecx # 4-byte Reload + addl 376(%esp), %ecx + adcl 380(%esp), %edi + movl %edi, 88(%esp) # 4-byte Spill + adcl 384(%esp), %esi + movl %esi, 80(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 388(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 68(%esp), %edi # 4-byte Reload + adcl 392(%esp), %edi + movl 84(%esp), %eax # 4-byte Reload + adcl 396(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 400(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 404(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 408(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 412(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 52(%esp), %ebp # 4-byte Reload + adcl 416(%esp), %ebp + movl 44(%esp), %eax # 4-byte Reload + adcl 420(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 424(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + sbbl %eax, %eax + movl %eax, 76(%esp) # 4-byte Spill + movl %ecx, %eax + movl %ecx, %esi + imull 40(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 320(%esp), %ecx + movl 1468(%esp), %edx + calll .LmulPv384x32 + movl 76(%esp), %ecx # 4-byte Reload + andl $1, %ecx + addl 320(%esp), %esi + movl 88(%esp), %eax # 4-byte Reload + adcl 324(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 328(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 92(%esp), %esi # 4-byte Reload + adcl 332(%esp), %esi + adcl 336(%esp), %edi + movl %edi, 68(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 340(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 72(%esp), %edi # 4-byte Reload + adcl 344(%esp), %edi + movl 64(%esp), %eax # 4-byte Reload + adcl 348(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 352(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 356(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + adcl 360(%esp), %ebp + movl %ebp, 52(%esp) # 4-byte Spill + movl 44(%esp), %eax # 4-byte Reload + adcl 364(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 368(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + adcl $0, %ecx + movl %ecx, 76(%esp) # 4-byte Spill + movl 1464(%esp), %eax + movl 40(%eax), %eax + movl %eax, (%esp) + leal 264(%esp), %ecx + movl 1460(%esp), %edx + calll .LmulPv384x32 + movl 88(%esp), %ecx # 4-byte Reload + addl 264(%esp), %ecx + movl 80(%esp), %eax # 4-byte Reload + adcl 268(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + adcl 272(%esp), %esi + movl %esi, 92(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 276(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 280(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + adcl 284(%esp), %edi + movl %edi, 72(%esp) # 4-byte Spill + movl 64(%esp), %edi # 4-byte Reload + adcl 288(%esp), %edi + movl 56(%esp), %eax # 4-byte Reload + adcl 292(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 48(%esp), %ebp # 4-byte Reload + adcl 296(%esp), %ebp + movl 52(%esp), %eax # 4-byte Reload + adcl 300(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 44(%esp), %eax # 4-byte Reload + adcl 304(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 308(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 312(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + sbbl %eax, %eax + movl %eax, 88(%esp) # 4-byte Spill + movl %ecx, %eax + movl %ecx, %esi + imull 40(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 208(%esp), %ecx + movl 1468(%esp), %edx + calll .LmulPv384x32 + movl 88(%esp), %ecx # 4-byte Reload + andl $1, %ecx + addl 208(%esp), %esi + movl 80(%esp), %eax # 4-byte Reload + adcl 212(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 216(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 220(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 84(%esp), %esi # 4-byte Reload + adcl 224(%esp), %esi + movl 72(%esp), %eax # 4-byte Reload + adcl 228(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + adcl 232(%esp), %edi + movl %edi, 64(%esp) # 4-byte Spill + movl 56(%esp), %edi # 4-byte Reload + adcl 236(%esp), %edi + adcl 240(%esp), %ebp + movl %ebp, 48(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 244(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 44(%esp), %ebp # 4-byte Reload + adcl 248(%esp), %ebp + movl 60(%esp), %eax # 4-byte Reload + adcl 252(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 256(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + adcl $0, %ecx + movl %ecx, 88(%esp) # 4-byte Spill + movl 1464(%esp), %eax + movl 44(%eax), %eax + movl %eax, (%esp) + leal 152(%esp), %ecx + movl 1460(%esp), %edx + calll .LmulPv384x32 + movl 80(%esp), %ecx # 4-byte Reload + addl 152(%esp), %ecx + movl 92(%esp), %eax # 4-byte Reload + adcl 156(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 160(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + adcl 164(%esp), %esi + movl %esi, 84(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 168(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 172(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + adcl 176(%esp), %edi + movl %edi, 56(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 180(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 184(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + adcl 188(%esp), %ebp + movl %ebp, 44(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 192(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 76(%esp), %ebp # 4-byte Reload + adcl 196(%esp), %ebp + movl 88(%esp), %eax # 4-byte Reload + adcl 200(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + sbbl %esi, %esi + movl 40(%esp), %eax # 4-byte Reload + imull %ecx, %eax + movl %ecx, %edi + movl %eax, (%esp) + leal 96(%esp), %ecx + movl 1468(%esp), %edx + calll .LmulPv384x32 + andl $1, %esi + addl 96(%esp), %edi + movl 84(%esp), %ebx # 4-byte Reload + movl 92(%esp), %eax # 4-byte Reload + movl 72(%esp), %ecx # 4-byte Reload + adcl 100(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 68(%esp), %edx # 4-byte Reload + adcl 104(%esp), %edx + movl %edx, 68(%esp) # 4-byte Spill + movl %edx, %edi + adcl 108(%esp), %ebx + adcl 112(%esp), %ecx + movl %ecx, 72(%esp) # 4-byte Spill + movl 64(%esp), %edx # 4-byte Reload + adcl 116(%esp), %edx + movl %edx, 64(%esp) # 4-byte Spill + movl 56(%esp), %edx # 4-byte Reload + adcl 120(%esp), %edx + movl %edx, 56(%esp) # 4-byte Spill + movl 48(%esp), %edx # 4-byte Reload + adcl 124(%esp), %edx + movl %edx, 48(%esp) # 4-byte Spill + movl 52(%esp), %edx # 4-byte Reload + adcl 128(%esp), %edx + movl %edx, 52(%esp) # 4-byte Spill + movl 44(%esp), %edx # 4-byte Reload + adcl 132(%esp), %edx + movl %edx, 44(%esp) # 4-byte Spill + movl 60(%esp), %edx # 4-byte Reload + adcl 136(%esp), %edx + movl %edx, 60(%esp) # 4-byte Spill + adcl 140(%esp), %ebp + movl %ebp, 76(%esp) # 4-byte Spill + movl 88(%esp), %edx # 4-byte Reload + adcl 144(%esp), %edx + movl %edx, 88(%esp) # 4-byte Spill + movl %edx, %ebp + adcl $0, %esi + movl 1468(%esp), %edx + subl (%edx), %eax + sbbl 4(%edx), %edi + movl %edi, 16(%esp) # 4-byte Spill + movl %ebx, %edi + sbbl 8(%edx), %edi + movl %edi, 20(%esp) # 4-byte Spill + sbbl 12(%edx), %ecx + movl %ecx, 24(%esp) # 4-byte Spill + movl 64(%esp), %ecx # 4-byte Reload + sbbl 16(%edx), %ecx + movl %ecx, 28(%esp) # 4-byte Spill + movl 56(%esp), %ecx # 4-byte Reload + sbbl 20(%edx), %ecx + movl %ecx, 32(%esp) # 4-byte Spill + movl 48(%esp), %ecx # 4-byte Reload + sbbl 24(%edx), %ecx + movl %ecx, 36(%esp) # 4-byte Spill + movl 52(%esp), %ecx # 4-byte Reload + sbbl 28(%edx), %ecx + movl 44(%esp), %edi # 4-byte Reload + sbbl 32(%edx), %edi + movl %edi, 40(%esp) # 4-byte Spill + movl 60(%esp), %edi # 4-byte Reload + sbbl 36(%edx), %edi + movl %edi, 80(%esp) # 4-byte Spill + movl 76(%esp), %edi # 4-byte Reload + sbbl 40(%edx), %edi + movl %edi, 84(%esp) # 4-byte Spill + sbbl 44(%edx), %ebp + movl %ebp, %edx + sbbl $0, %esi + andl $1, %esi + jne .LBB178_2 +# BB#1: + movl %ecx, 52(%esp) # 4-byte Spill +.LBB178_2: + movl %esi, %ecx + testb %cl, %cl + movl 92(%esp), %ecx # 4-byte Reload + jne .LBB178_4 +# BB#3: + movl %eax, %ecx +.LBB178_4: + movl 1456(%esp), %eax + movl %ecx, (%eax) + movl 68(%esp), %edi # 4-byte Reload + jne .LBB178_6 +# BB#5: + movl 16(%esp), %edi # 4-byte Reload +.LBB178_6: + movl %edi, 4(%eax) + movl 64(%esp), %ebp # 4-byte Reload + jne .LBB178_8 +# BB#7: + movl 20(%esp), %ebx # 4-byte Reload +.LBB178_8: + movl %ebx, 8(%eax) + jne .LBB178_10 +# BB#9: + movl 24(%esp), %ecx # 4-byte Reload + movl %ecx, 72(%esp) # 4-byte Spill +.LBB178_10: + movl 72(%esp), %ecx # 4-byte Reload + movl %ecx, 12(%eax) + jne .LBB178_12 +# BB#11: + movl 28(%esp), %ebp # 4-byte Reload +.LBB178_12: + movl %ebp, 16(%eax) + movl 56(%esp), %ecx # 4-byte Reload + jne .LBB178_14 +# BB#13: + movl 32(%esp), %ecx # 4-byte Reload +.LBB178_14: + movl %ecx, 20(%eax) + movl 48(%esp), %ecx # 4-byte Reload + jne .LBB178_16 +# BB#15: + movl 36(%esp), %ecx # 4-byte Reload +.LBB178_16: + movl %ecx, 24(%eax) + movl 52(%esp), %ecx # 4-byte Reload + movl %ecx, 28(%eax) + movl 44(%esp), %ecx # 4-byte Reload + jne .LBB178_18 +# BB#17: + movl 40(%esp), %ecx # 4-byte Reload +.LBB178_18: + movl %ecx, 32(%eax) + movl 60(%esp), %ecx # 4-byte Reload + jne .LBB178_20 +# BB#19: + movl 80(%esp), %ecx # 4-byte Reload +.LBB178_20: + movl %ecx, 36(%eax) + movl 76(%esp), %ecx # 4-byte Reload + jne .LBB178_22 +# BB#21: + movl 84(%esp), %ecx # 4-byte Reload +.LBB178_22: + movl %ecx, 40(%eax) + movl 88(%esp), %ecx # 4-byte Reload + jne .LBB178_24 +# BB#23: + movl %edx, %ecx +.LBB178_24: + movl %ecx, 44(%eax) + addl $1436, %esp # imm = 0x59C + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end178: + .size mcl_fp_mont12Lbmi2, .Lfunc_end178-mcl_fp_mont12Lbmi2 + + .globl mcl_fp_montNF12Lbmi2 + .align 16, 0x90 + .type mcl_fp_montNF12Lbmi2,@function +mcl_fp_montNF12Lbmi2: # @mcl_fp_montNF12Lbmi2 +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $1420, %esp # imm = 0x58C + calll .L179$pb +.L179$pb: + popl %ebx +.Ltmp30: + addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp30-.L179$pb), %ebx + movl 1452(%esp), %eax + movl -4(%eax), %esi + movl %esi, 28(%esp) # 4-byte Spill + movl 1448(%esp), %eax + movl (%eax), %eax + movl %eax, (%esp) + leal 1368(%esp), %ecx + movl 1444(%esp), %edx + calll .LmulPv384x32 + movl 1368(%esp), %ebp + movl 1372(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl %ebp, %eax + imull %esi, %eax + movl 1416(%esp), %ecx + movl %ecx, 76(%esp) # 4-byte Spill + movl 1412(%esp), %ecx + movl %ecx, 60(%esp) # 4-byte Spill + movl 1408(%esp), %ecx + movl %ecx, 64(%esp) # 4-byte Spill + movl 1404(%esp), %ecx + movl %ecx, 72(%esp) # 4-byte Spill + movl 1400(%esp), %ecx + movl %ecx, 44(%esp) # 4-byte Spill + movl 1396(%esp), %ecx + movl %ecx, 48(%esp) # 4-byte Spill + movl 1392(%esp), %ecx + movl %ecx, 32(%esp) # 4-byte Spill + movl 1388(%esp), %ecx + movl %ecx, 36(%esp) # 4-byte Spill + movl 1384(%esp), %ecx + movl %ecx, 40(%esp) # 4-byte Spill + movl 1380(%esp), %edi + movl 1376(%esp), %esi + movl %eax, (%esp) + leal 1312(%esp), %ecx + movl 1452(%esp), %edx + calll .LmulPv384x32 + addl 1312(%esp), %ebp + movl 56(%esp), %eax # 4-byte Reload + adcl 1316(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + adcl 1320(%esp), %esi + movl %esi, 68(%esp) # 4-byte Spill + adcl 1324(%esp), %edi + movl %edi, 52(%esp) # 4-byte Spill + movl 40(%esp), %eax # 4-byte Reload + adcl 1328(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 36(%esp), %eax # 4-byte Reload + adcl 1332(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 32(%esp), %eax # 4-byte Reload + adcl 1336(%esp), %eax + movl %eax, 32(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 1340(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 44(%esp), %edi # 4-byte Reload + adcl 1344(%esp), %edi + movl 72(%esp), %eax # 4-byte Reload + adcl 1348(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 1352(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 60(%esp), %esi # 4-byte Reload + adcl 1356(%esp), %esi + movl 76(%esp), %ebp # 4-byte Reload + adcl 1360(%esp), %ebp + movl 1448(%esp), %eax + movl 4(%eax), %eax + movl %eax, (%esp) + leal 1256(%esp), %ecx + movl 1444(%esp), %edx + calll .LmulPv384x32 + movl 1304(%esp), %eax + movl 56(%esp), %edx # 4-byte Reload + addl 1256(%esp), %edx + movl 68(%esp), %ecx # 4-byte Reload + adcl 1260(%esp), %ecx + movl %ecx, 68(%esp) # 4-byte Spill + movl 52(%esp), %ecx # 4-byte Reload + adcl 1264(%esp), %ecx + movl %ecx, 52(%esp) # 4-byte Spill + movl 40(%esp), %ecx # 4-byte Reload + adcl 1268(%esp), %ecx + movl %ecx, 40(%esp) # 4-byte Spill + movl 36(%esp), %ecx # 4-byte Reload + adcl 1272(%esp), %ecx + movl %ecx, 36(%esp) # 4-byte Spill + movl 32(%esp), %ecx # 4-byte Reload + adcl 1276(%esp), %ecx + movl %ecx, 32(%esp) # 4-byte Spill + movl 48(%esp), %ecx # 4-byte Reload + adcl 1280(%esp), %ecx + movl %ecx, 48(%esp) # 4-byte Spill + adcl 1284(%esp), %edi + movl %edi, 44(%esp) # 4-byte Spill + movl 72(%esp), %ecx # 4-byte Reload + adcl 1288(%esp), %ecx + movl %ecx, 72(%esp) # 4-byte Spill + movl 64(%esp), %ecx # 4-byte Reload + adcl 1292(%esp), %ecx + movl %ecx, 64(%esp) # 4-byte Spill + adcl 1296(%esp), %esi + movl %esi, 60(%esp) # 4-byte Spill + adcl 1300(%esp), %ebp + adcl $0, %eax + movl %eax, %edi + movl %edx, %esi + movl %esi, %eax + imull 28(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 1200(%esp), %ecx + movl 1452(%esp), %edx + calll .LmulPv384x32 + addl 1200(%esp), %esi + movl 68(%esp), %eax # 4-byte Reload + adcl 1204(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 52(%esp), %esi # 4-byte Reload + adcl 1208(%esp), %esi + movl 40(%esp), %eax # 4-byte Reload + adcl 1212(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 36(%esp), %eax # 4-byte Reload + adcl 1216(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 32(%esp), %eax # 4-byte Reload + adcl 1220(%esp), %eax + movl %eax, 32(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 1224(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 44(%esp), %eax # 4-byte Reload + adcl 1228(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 1232(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 1236(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 1240(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + adcl 1244(%esp), %ebp + adcl 1248(%esp), %edi + movl 1448(%esp), %eax + movl 8(%eax), %eax + movl %eax, (%esp) + leal 1144(%esp), %ecx + movl 1444(%esp), %edx + calll .LmulPv384x32 + movl 1192(%esp), %eax + movl 68(%esp), %edx # 4-byte Reload + addl 1144(%esp), %edx + adcl 1148(%esp), %esi + movl %esi, 52(%esp) # 4-byte Spill + movl 40(%esp), %ecx # 4-byte Reload + adcl 1152(%esp), %ecx + movl %ecx, 40(%esp) # 4-byte Spill + movl 36(%esp), %ecx # 4-byte Reload + adcl 1156(%esp), %ecx + movl %ecx, 36(%esp) # 4-byte Spill + movl 32(%esp), %esi # 4-byte Reload + adcl 1160(%esp), %esi + movl 48(%esp), %ecx # 4-byte Reload + adcl 1164(%esp), %ecx + movl %ecx, 48(%esp) # 4-byte Spill + movl 44(%esp), %ecx # 4-byte Reload + adcl 1168(%esp), %ecx + movl %ecx, 44(%esp) # 4-byte Spill + movl 72(%esp), %ecx # 4-byte Reload + adcl 1172(%esp), %ecx + movl %ecx, 72(%esp) # 4-byte Spill + movl 64(%esp), %ecx # 4-byte Reload + adcl 1176(%esp), %ecx + movl %ecx, 64(%esp) # 4-byte Spill + movl 60(%esp), %ecx # 4-byte Reload + adcl 1180(%esp), %ecx + movl %ecx, 60(%esp) # 4-byte Spill + adcl 1184(%esp), %ebp + adcl 1188(%esp), %edi + movl %edi, 68(%esp) # 4-byte Spill + adcl $0, %eax + movl %eax, 56(%esp) # 4-byte Spill + movl %edx, %edi + movl %edi, %eax + imull 28(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 1088(%esp), %ecx + movl 1452(%esp), %eax + movl %eax, %edx + calll .LmulPv384x32 + addl 1088(%esp), %edi + movl 52(%esp), %eax # 4-byte Reload + adcl 1092(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 40(%esp), %eax # 4-byte Reload + adcl 1096(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 36(%esp), %eax # 4-byte Reload + adcl 1100(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl %esi, %edi + adcl 1104(%esp), %edi + movl 48(%esp), %eax # 4-byte Reload + adcl 1108(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 44(%esp), %eax # 4-byte Reload + adcl 1112(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 1116(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 1120(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 60(%esp), %esi # 4-byte Reload + adcl 1124(%esp), %esi + adcl 1128(%esp), %ebp + movl %ebp, 76(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 1132(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 56(%esp), %ebp # 4-byte Reload + adcl 1136(%esp), %ebp + movl 1448(%esp), %eax + movl 12(%eax), %eax + movl %eax, (%esp) + leal 1032(%esp), %ecx + movl 1444(%esp), %edx + calll .LmulPv384x32 + movl 1080(%esp), %eax + movl 52(%esp), %edx # 4-byte Reload + addl 1032(%esp), %edx + movl 40(%esp), %ecx # 4-byte Reload + adcl 1036(%esp), %ecx + movl %ecx, 40(%esp) # 4-byte Spill + movl 36(%esp), %ecx # 4-byte Reload + adcl 1040(%esp), %ecx + movl %ecx, 36(%esp) # 4-byte Spill + adcl 1044(%esp), %edi + movl %edi, 32(%esp) # 4-byte Spill + movl 48(%esp), %ecx # 4-byte Reload + adcl 1048(%esp), %ecx + movl %ecx, 48(%esp) # 4-byte Spill + movl 44(%esp), %ecx # 4-byte Reload + adcl 1052(%esp), %ecx + movl %ecx, 44(%esp) # 4-byte Spill + movl 72(%esp), %ecx # 4-byte Reload + adcl 1056(%esp), %ecx + movl %ecx, 72(%esp) # 4-byte Spill + movl 64(%esp), %ecx # 4-byte Reload + adcl 1060(%esp), %ecx + movl %ecx, 64(%esp) # 4-byte Spill + adcl 1064(%esp), %esi + movl %esi, 60(%esp) # 4-byte Spill + movl 76(%esp), %ecx # 4-byte Reload + adcl 1068(%esp), %ecx + movl %ecx, 76(%esp) # 4-byte Spill + movl 68(%esp), %ecx # 4-byte Reload + adcl 1072(%esp), %ecx + movl %ecx, 68(%esp) # 4-byte Spill + adcl 1076(%esp), %ebp + movl %ebp, 56(%esp) # 4-byte Spill + adcl $0, %eax + movl %eax, %edi + movl %edx, %esi + movl %esi, %eax + imull 28(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 976(%esp), %ecx + movl 1452(%esp), %edx + calll .LmulPv384x32 + addl 976(%esp), %esi + movl 40(%esp), %eax # 4-byte Reload + adcl 980(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 36(%esp), %eax # 4-byte Reload + adcl 984(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 32(%esp), %eax # 4-byte Reload + adcl 988(%esp), %eax + movl %eax, 32(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 992(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 44(%esp), %eax # 4-byte Reload + adcl 996(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 1000(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 64(%esp), %esi # 4-byte Reload + adcl 1004(%esp), %esi + movl 60(%esp), %eax # 4-byte Reload + adcl 1008(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 76(%esp), %ebp # 4-byte Reload + adcl 1012(%esp), %ebp + movl 68(%esp), %eax # 4-byte Reload + adcl 1016(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 1020(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + adcl 1024(%esp), %edi + movl %edi, 52(%esp) # 4-byte Spill + movl 1448(%esp), %eax + movl 16(%eax), %eax + movl %eax, (%esp) + leal 920(%esp), %ecx + movl 1444(%esp), %edx + calll .LmulPv384x32 + movl 968(%esp), %eax + movl 40(%esp), %edx # 4-byte Reload + addl 920(%esp), %edx + movl 36(%esp), %ecx # 4-byte Reload + adcl 924(%esp), %ecx + movl %ecx, 36(%esp) # 4-byte Spill + movl 32(%esp), %ecx # 4-byte Reload + adcl 928(%esp), %ecx + movl %ecx, 32(%esp) # 4-byte Spill + movl 48(%esp), %ecx # 4-byte Reload + adcl 932(%esp), %ecx + movl %ecx, 48(%esp) # 4-byte Spill + movl 44(%esp), %ecx # 4-byte Reload + adcl 936(%esp), %ecx + movl %ecx, 44(%esp) # 4-byte Spill + movl 72(%esp), %ecx # 4-byte Reload + adcl 940(%esp), %ecx + movl %ecx, 72(%esp) # 4-byte Spill + adcl 944(%esp), %esi + movl 60(%esp), %ecx # 4-byte Reload + adcl 948(%esp), %ecx + movl %ecx, 60(%esp) # 4-byte Spill + adcl 952(%esp), %ebp + movl %ebp, 76(%esp) # 4-byte Spill + movl 68(%esp), %edi # 4-byte Reload + adcl 956(%esp), %edi + movl 56(%esp), %ecx # 4-byte Reload + adcl 960(%esp), %ecx + movl %ecx, 56(%esp) # 4-byte Spill + movl 52(%esp), %ecx # 4-byte Reload + adcl 964(%esp), %ecx + movl %ecx, 52(%esp) # 4-byte Spill + adcl $0, %eax + movl %eax, 40(%esp) # 4-byte Spill + movl %edx, %ebp + movl %ebp, %eax + imull 28(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 864(%esp), %ecx + movl 1452(%esp), %edx + calll .LmulPv384x32 + addl 864(%esp), %ebp + movl 36(%esp), %eax # 4-byte Reload + adcl 868(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 32(%esp), %eax # 4-byte Reload + adcl 872(%esp), %eax + movl %eax, 32(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 876(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 44(%esp), %eax # 4-byte Reload + adcl 880(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 72(%esp), %ebp # 4-byte Reload + adcl 884(%esp), %ebp + adcl 888(%esp), %esi + movl 60(%esp), %eax # 4-byte Reload + adcl 892(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 896(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + adcl 900(%esp), %edi + movl %edi, 68(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 904(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 908(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 40(%esp), %edi # 4-byte Reload + adcl 912(%esp), %edi + movl 1448(%esp), %eax + movl 20(%eax), %eax + movl %eax, (%esp) + leal 808(%esp), %ecx + movl 1444(%esp), %edx + calll .LmulPv384x32 + movl 856(%esp), %edx + movl 36(%esp), %ecx # 4-byte Reload + addl 808(%esp), %ecx + movl 32(%esp), %eax # 4-byte Reload + adcl 812(%esp), %eax + movl %eax, 32(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 816(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 44(%esp), %eax # 4-byte Reload + adcl 820(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + adcl 824(%esp), %ebp + movl %ebp, 72(%esp) # 4-byte Spill + adcl 828(%esp), %esi + movl %esi, 64(%esp) # 4-byte Spill + movl 60(%esp), %ebp # 4-byte Reload + adcl 832(%esp), %ebp + movl 76(%esp), %eax # 4-byte Reload + adcl 836(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 840(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 844(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 848(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + adcl 852(%esp), %edi + movl %edi, 40(%esp) # 4-byte Spill + adcl $0, %edx + movl %edx, 36(%esp) # 4-byte Spill + movl %ecx, %esi + movl %esi, %eax + imull 28(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 752(%esp), %ecx + movl 1452(%esp), %eax + movl %eax, %edx + calll .LmulPv384x32 + addl 752(%esp), %esi + movl 32(%esp), %eax # 4-byte Reload + adcl 756(%esp), %eax + movl %eax, 32(%esp) # 4-byte Spill + movl 48(%esp), %edi # 4-byte Reload + adcl 760(%esp), %edi + movl 44(%esp), %esi # 4-byte Reload + adcl 764(%esp), %esi + movl 72(%esp), %eax # 4-byte Reload + adcl 768(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 772(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + adcl 776(%esp), %ebp + movl %ebp, 60(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 780(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 784(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 788(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 52(%esp), %ebp # 4-byte Reload + adcl 792(%esp), %ebp + movl 40(%esp), %eax # 4-byte Reload + adcl 796(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 36(%esp), %eax # 4-byte Reload + adcl 800(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 1448(%esp), %ecx + movl %ecx, %eax + movl 24(%eax), %eax + movl %eax, (%esp) + leal 696(%esp), %ecx + movl 1444(%esp), %eax + movl %eax, %edx + calll .LmulPv384x32 + movl 744(%esp), %ecx + movl 32(%esp), %eax # 4-byte Reload + addl 696(%esp), %eax + adcl 700(%esp), %edi + movl %edi, 48(%esp) # 4-byte Spill + adcl 704(%esp), %esi + movl %esi, 44(%esp) # 4-byte Spill + movl 72(%esp), %edx # 4-byte Reload + adcl 708(%esp), %edx + movl %edx, 72(%esp) # 4-byte Spill + movl 64(%esp), %edx # 4-byte Reload + adcl 712(%esp), %edx + movl %edx, 64(%esp) # 4-byte Spill + movl 60(%esp), %esi # 4-byte Reload + adcl 716(%esp), %esi + movl 76(%esp), %edx # 4-byte Reload + adcl 720(%esp), %edx + movl %edx, 76(%esp) # 4-byte Spill + movl 68(%esp), %edx # 4-byte Reload + adcl 724(%esp), %edx + movl %edx, 68(%esp) # 4-byte Spill + movl 56(%esp), %edi # 4-byte Reload + adcl 728(%esp), %edi + adcl 732(%esp), %ebp + movl %ebp, 52(%esp) # 4-byte Spill + movl 40(%esp), %edx # 4-byte Reload + adcl 736(%esp), %edx + movl %edx, 40(%esp) # 4-byte Spill + movl 36(%esp), %edx # 4-byte Reload + adcl 740(%esp), %edx + movl %edx, 36(%esp) # 4-byte Spill + adcl $0, %ecx + movl %ecx, 32(%esp) # 4-byte Spill + movl %eax, %ebp + imull 28(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 640(%esp), %ecx + movl 1452(%esp), %edx + calll .LmulPv384x32 + addl 640(%esp), %ebp + movl 48(%esp), %eax # 4-byte Reload + adcl 644(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 44(%esp), %ebp # 4-byte Reload + adcl 648(%esp), %ebp + movl 72(%esp), %eax # 4-byte Reload + adcl 652(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 656(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + adcl 660(%esp), %esi + movl %esi, 60(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 664(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 68(%esp), %esi # 4-byte Reload + adcl 668(%esp), %esi + adcl 672(%esp), %edi + movl %edi, 56(%esp) # 4-byte Spill + movl 52(%esp), %edi # 4-byte Reload + adcl 676(%esp), %edi + movl 40(%esp), %eax # 4-byte Reload + adcl 680(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 36(%esp), %eax # 4-byte Reload + adcl 684(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 32(%esp), %eax # 4-byte Reload + adcl 688(%esp), %eax + movl %eax, 32(%esp) # 4-byte Spill + movl 1448(%esp), %eax + movl 28(%eax), %eax + movl %eax, (%esp) + leal 584(%esp), %ecx + movl 1444(%esp), %edx + calll .LmulPv384x32 + movl 632(%esp), %edx + movl 48(%esp), %ecx # 4-byte Reload + addl 584(%esp), %ecx + adcl 588(%esp), %ebp + movl %ebp, 44(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 592(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 64(%esp), %ebp # 4-byte Reload + adcl 596(%esp), %ebp + movl 60(%esp), %eax # 4-byte Reload + adcl 600(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 604(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + adcl 608(%esp), %esi + movl %esi, 68(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 612(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + adcl 616(%esp), %edi + movl %edi, 52(%esp) # 4-byte Spill + movl 40(%esp), %edi # 4-byte Reload + adcl 620(%esp), %edi + movl 36(%esp), %eax # 4-byte Reload + adcl 624(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 32(%esp), %eax # 4-byte Reload + adcl 628(%esp), %eax + movl %eax, 32(%esp) # 4-byte Spill + adcl $0, %edx + movl %edx, 48(%esp) # 4-byte Spill + movl %ecx, %eax + movl %ecx, %esi + imull 28(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 528(%esp), %ecx + movl 1452(%esp), %edx + calll .LmulPv384x32 + addl 528(%esp), %esi + movl 44(%esp), %eax # 4-byte Reload + adcl 532(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 536(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + adcl 540(%esp), %ebp + movl %ebp, 64(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 544(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 548(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 552(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 556(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 560(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + adcl 564(%esp), %edi + movl %edi, 40(%esp) # 4-byte Spill + movl 36(%esp), %edi # 4-byte Reload + adcl 568(%esp), %edi + movl 32(%esp), %esi # 4-byte Reload + adcl 572(%esp), %esi + movl 48(%esp), %ebp # 4-byte Reload + adcl 576(%esp), %ebp + movl 1448(%esp), %eax + movl 32(%eax), %eax + movl %eax, (%esp) + leal 472(%esp), %ecx + movl 1444(%esp), %edx + calll .LmulPv384x32 + movl 520(%esp), %edx + movl 44(%esp), %ecx # 4-byte Reload + addl 472(%esp), %ecx + movl 72(%esp), %eax # 4-byte Reload + adcl 476(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 480(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 484(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 488(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 492(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 496(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 500(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 40(%esp), %eax # 4-byte Reload + adcl 504(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + adcl 508(%esp), %edi + movl %edi, 36(%esp) # 4-byte Spill + adcl 512(%esp), %esi + movl %esi, 32(%esp) # 4-byte Spill + adcl 516(%esp), %ebp + movl %ebp, 48(%esp) # 4-byte Spill + adcl $0, %edx + movl %edx, 44(%esp) # 4-byte Spill + movl %ecx, %eax + movl %ecx, %ebp + imull 28(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 416(%esp), %ecx + movl 1452(%esp), %edx + calll .LmulPv384x32 + addl 416(%esp), %ebp + movl 72(%esp), %eax # 4-byte Reload + adcl 420(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 424(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 428(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 76(%esp), %edi # 4-byte Reload + adcl 432(%esp), %edi + movl 68(%esp), %eax # 4-byte Reload + adcl 436(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 56(%esp), %ebp # 4-byte Reload + adcl 440(%esp), %ebp + movl 52(%esp), %esi # 4-byte Reload + adcl 444(%esp), %esi + movl 40(%esp), %eax # 4-byte Reload + adcl 448(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 36(%esp), %eax # 4-byte Reload + adcl 452(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 32(%esp), %eax # 4-byte Reload + adcl 456(%esp), %eax + movl %eax, 32(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 460(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 44(%esp), %eax # 4-byte Reload + adcl 464(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 1448(%esp), %eax + movl 36(%eax), %eax + movl %eax, (%esp) + leal 360(%esp), %ecx + movl 1444(%esp), %edx + calll .LmulPv384x32 + movl 408(%esp), %edx + movl 72(%esp), %ecx # 4-byte Reload + addl 360(%esp), %ecx + movl 64(%esp), %eax # 4-byte Reload + adcl 364(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 368(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + adcl 372(%esp), %edi + movl %edi, 76(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 376(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + adcl 380(%esp), %ebp + movl %ebp, 56(%esp) # 4-byte Spill + adcl 384(%esp), %esi + movl 40(%esp), %eax # 4-byte Reload + adcl 388(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 36(%esp), %eax # 4-byte Reload + adcl 392(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 32(%esp), %eax # 4-byte Reload + adcl 396(%esp), %eax + movl %eax, 32(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 400(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 44(%esp), %eax # 4-byte Reload + adcl 404(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + adcl $0, %edx + movl %edx, 72(%esp) # 4-byte Spill + movl %ecx, %eax + movl %ecx, %ebp + imull 28(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 304(%esp), %ecx + movl 1452(%esp), %edx + calll .LmulPv384x32 + addl 304(%esp), %ebp + movl 64(%esp), %eax # 4-byte Reload + adcl 308(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 60(%esp), %edi # 4-byte Reload + adcl 312(%esp), %edi + movl 76(%esp), %eax # 4-byte Reload + adcl 316(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 68(%esp), %ebp # 4-byte Reload + adcl 320(%esp), %ebp + movl 56(%esp), %eax # 4-byte Reload + adcl 324(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + adcl 328(%esp), %esi + movl %esi, 52(%esp) # 4-byte Spill + movl 40(%esp), %esi # 4-byte Reload + adcl 332(%esp), %esi + movl 36(%esp), %eax # 4-byte Reload + adcl 336(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 32(%esp), %eax # 4-byte Reload + adcl 340(%esp), %eax + movl %eax, 32(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 344(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 44(%esp), %eax # 4-byte Reload + adcl 348(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 352(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 1448(%esp), %eax + movl 40(%eax), %eax + movl %eax, (%esp) + leal 248(%esp), %ecx + movl 1444(%esp), %edx + calll .LmulPv384x32 + movl 296(%esp), %edx + movl 64(%esp), %ecx # 4-byte Reload + addl 248(%esp), %ecx + adcl 252(%esp), %edi + movl 76(%esp), %eax # 4-byte Reload + adcl 256(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + adcl 260(%esp), %ebp + movl %ebp, 68(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 264(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 268(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl %esi, %ebp + adcl 272(%esp), %ebp + movl 36(%esp), %eax # 4-byte Reload + adcl 276(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 32(%esp), %eax # 4-byte Reload + adcl 280(%esp), %eax + movl %eax, 32(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 284(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 44(%esp), %eax # 4-byte Reload + adcl 288(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 292(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + adcl $0, %edx + movl %edx, 64(%esp) # 4-byte Spill + movl %ecx, %eax + movl %ecx, %esi + imull 28(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 192(%esp), %ecx + movl 1452(%esp), %edx + calll .LmulPv384x32 + addl 192(%esp), %esi + adcl 196(%esp), %edi + movl %edi, 60(%esp) # 4-byte Spill + movl 76(%esp), %edi # 4-byte Reload + adcl 200(%esp), %edi + movl 68(%esp), %esi # 4-byte Reload + adcl 204(%esp), %esi + movl 56(%esp), %eax # 4-byte Reload + adcl 208(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 212(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + adcl 216(%esp), %ebp + movl %ebp, 40(%esp) # 4-byte Spill + movl 36(%esp), %eax # 4-byte Reload + adcl 220(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 32(%esp), %ebp # 4-byte Reload + adcl 224(%esp), %ebp + movl 48(%esp), %eax # 4-byte Reload + adcl 228(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 44(%esp), %eax # 4-byte Reload + adcl 232(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 236(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 240(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 1448(%esp), %eax + movl 44(%eax), %eax + movl %eax, (%esp) + leal 136(%esp), %ecx + movl 1444(%esp), %edx + calll .LmulPv384x32 + movl 184(%esp), %edx + movl 60(%esp), %ecx # 4-byte Reload + addl 136(%esp), %ecx + adcl 140(%esp), %edi + movl %edi, 76(%esp) # 4-byte Spill + adcl 144(%esp), %esi + movl %esi, 68(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 148(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 152(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 40(%esp), %eax # 4-byte Reload + adcl 156(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 36(%esp), %edi # 4-byte Reload + adcl 160(%esp), %edi + adcl 164(%esp), %ebp + movl %ebp, 32(%esp) # 4-byte Spill + movl 48(%esp), %ebp # 4-byte Reload + adcl 168(%esp), %ebp + movl 44(%esp), %eax # 4-byte Reload + adcl 172(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 176(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 180(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + adcl $0, %edx + movl %edx, 60(%esp) # 4-byte Spill + movl 28(%esp), %eax # 4-byte Reload + imull %ecx, %eax + movl %ecx, %esi + movl %eax, (%esp) + leal 80(%esp), %ecx + movl 1452(%esp), %eax + movl %eax, %edx + calll .LmulPv384x32 + addl 80(%esp), %esi + movl 56(%esp), %esi # 4-byte Reload + movl 76(%esp), %eax # 4-byte Reload + adcl 84(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 68(%esp), %ecx # 4-byte Reload + adcl 88(%esp), %ecx + movl %ecx, 68(%esp) # 4-byte Spill + adcl 92(%esp), %esi + movl 52(%esp), %edx # 4-byte Reload + adcl 96(%esp), %edx + movl %edx, 52(%esp) # 4-byte Spill + movl 40(%esp), %edx # 4-byte Reload + adcl 100(%esp), %edx + movl %edx, 40(%esp) # 4-byte Spill + adcl 104(%esp), %edi + movl %edi, 36(%esp) # 4-byte Spill + movl 32(%esp), %edx # 4-byte Reload + adcl 108(%esp), %edx + movl %edx, 32(%esp) # 4-byte Spill + adcl 112(%esp), %ebp + movl %ebp, 48(%esp) # 4-byte Spill + movl 44(%esp), %edx # 4-byte Reload + adcl 116(%esp), %edx + movl %edx, 44(%esp) # 4-byte Spill + movl 72(%esp), %edx # 4-byte Reload + adcl 120(%esp), %edx + movl %edx, 72(%esp) # 4-byte Spill + movl 64(%esp), %edx # 4-byte Reload + adcl 124(%esp), %edx + movl %edx, 64(%esp) # 4-byte Spill + movl 60(%esp), %edx # 4-byte Reload + adcl 128(%esp), %edx + movl %edx, 60(%esp) # 4-byte Spill + movl %eax, %edx + movl 1452(%esp), %ebp + subl (%ebp), %edx + movl %ecx, %eax + sbbl 4(%ebp), %eax + movl %esi, %ebx + sbbl 8(%ebp), %ebx + movl 52(%esp), %ecx # 4-byte Reload + sbbl 12(%ebp), %ecx + movl 40(%esp), %edi # 4-byte Reload + sbbl 16(%ebp), %edi + movl %edi, 4(%esp) # 4-byte Spill + movl 36(%esp), %edi # 4-byte Reload + sbbl 20(%ebp), %edi + movl %edi, 8(%esp) # 4-byte Spill + movl 32(%esp), %edi # 4-byte Reload + sbbl 24(%ebp), %edi + movl %edi, 12(%esp) # 4-byte Spill + movl 48(%esp), %edi # 4-byte Reload + sbbl 28(%ebp), %edi + movl %edi, 16(%esp) # 4-byte Spill + movl 44(%esp), %edi # 4-byte Reload + sbbl 32(%ebp), %edi + movl %edi, 20(%esp) # 4-byte Spill + movl 72(%esp), %edi # 4-byte Reload + sbbl 36(%ebp), %edi + movl %edi, 24(%esp) # 4-byte Spill + movl 64(%esp), %edi # 4-byte Reload + sbbl 40(%ebp), %edi + movl %edi, 28(%esp) # 4-byte Spill + movl 60(%esp), %edi # 4-byte Reload + sbbl 44(%ebp), %edi + movl %edi, 56(%esp) # 4-byte Spill + movl %edi, %ebp + sarl $31, %ebp + testl %ebp, %ebp + movl 76(%esp), %ebp # 4-byte Reload + js .LBB179_2 +# BB#1: + movl %edx, %ebp +.LBB179_2: + movl 1440(%esp), %edx + movl %ebp, (%edx) + movl 68(%esp), %edi # 4-byte Reload + js .LBB179_4 +# BB#3: + movl %eax, %edi +.LBB179_4: + movl %edi, 4(%edx) + js .LBB179_6 +# BB#5: + movl %ebx, %esi +.LBB179_6: + movl %esi, 8(%edx) + movl 52(%esp), %eax # 4-byte Reload + js .LBB179_8 +# BB#7: + movl %ecx, %eax +.LBB179_8: + movl %eax, 12(%edx) + movl 40(%esp), %eax # 4-byte Reload + js .LBB179_10 +# BB#9: + movl 4(%esp), %eax # 4-byte Reload +.LBB179_10: + movl %eax, 16(%edx) + movl 36(%esp), %eax # 4-byte Reload + js .LBB179_12 +# BB#11: + movl 8(%esp), %eax # 4-byte Reload +.LBB179_12: + movl %eax, 20(%edx) + movl 32(%esp), %eax # 4-byte Reload + js .LBB179_14 +# BB#13: + movl 12(%esp), %eax # 4-byte Reload +.LBB179_14: + movl %eax, 24(%edx) + movl 48(%esp), %eax # 4-byte Reload + js .LBB179_16 +# BB#15: + movl 16(%esp), %eax # 4-byte Reload +.LBB179_16: + movl %eax, 28(%edx) + movl 44(%esp), %eax # 4-byte Reload + js .LBB179_18 +# BB#17: + movl 20(%esp), %eax # 4-byte Reload +.LBB179_18: + movl %eax, 32(%edx) + movl 72(%esp), %eax # 4-byte Reload + js .LBB179_20 +# BB#19: + movl 24(%esp), %eax # 4-byte Reload +.LBB179_20: + movl %eax, 36(%edx) + movl 64(%esp), %eax # 4-byte Reload + js .LBB179_22 +# BB#21: + movl 28(%esp), %eax # 4-byte Reload +.LBB179_22: + movl %eax, 40(%edx) + movl 60(%esp), %eax # 4-byte Reload + js .LBB179_24 +# BB#23: + movl 56(%esp), %eax # 4-byte Reload +.LBB179_24: + movl %eax, 44(%edx) + addl $1420, %esp # imm = 0x58C + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end179: + .size mcl_fp_montNF12Lbmi2, .Lfunc_end179-mcl_fp_montNF12Lbmi2 + + .globl mcl_fp_montRed12Lbmi2 + .align 16, 0x90 + .type mcl_fp_montRed12Lbmi2,@function +mcl_fp_montRed12Lbmi2: # @mcl_fp_montRed12Lbmi2 +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $828, %esp # imm = 0x33C + calll .L180$pb +.L180$pb: + popl %eax +.Ltmp31: + addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp31-.L180$pb), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 856(%esp), %edx + movl -4(%edx), %esi + movl %esi, 84(%esp) # 4-byte Spill + movl 852(%esp), %ecx + movl (%ecx), %ebx + movl %ebx, 88(%esp) # 4-byte Spill + movl 4(%ecx), %edi + movl %edi, 100(%esp) # 4-byte Spill + imull %esi, %ebx + movl 92(%ecx), %esi + movl %esi, 104(%esp) # 4-byte Spill + movl 88(%ecx), %esi + movl %esi, 108(%esp) # 4-byte Spill + movl 84(%ecx), %esi + movl %esi, 120(%esp) # 4-byte Spill + movl 80(%ecx), %eax + movl %eax, 128(%esp) # 4-byte Spill + movl 76(%ecx), %eax + movl %eax, 144(%esp) # 4-byte Spill + movl 72(%ecx), %eax + movl %eax, 124(%esp) # 4-byte Spill + movl 68(%ecx), %edi + movl %edi, 140(%esp) # 4-byte Spill + movl 64(%ecx), %eax + movl %eax, 116(%esp) # 4-byte Spill + movl 60(%ecx), %esi + movl %esi, 148(%esp) # 4-byte Spill + movl 56(%ecx), %eax + movl %eax, 136(%esp) # 4-byte Spill + movl 52(%ecx), %esi + movl %esi, 156(%esp) # 4-byte Spill + movl 48(%ecx), %edi + movl %edi, 152(%esp) # 4-byte Spill + movl 44(%ecx), %edi + movl %edi, 132(%esp) # 4-byte Spill + movl 40(%ecx), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 36(%ecx), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 32(%ecx), %edi + movl 28(%ecx), %esi + movl 24(%ecx), %ebp + movl %ebp, 80(%esp) # 4-byte Spill + movl 20(%ecx), %ebp + movl 16(%ecx), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 12(%ecx), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 8(%ecx), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl (%edx), %ecx + movl %ecx, 24(%esp) # 4-byte Spill + movl 44(%edx), %ecx + movl %ecx, 60(%esp) # 4-byte Spill + movl 40(%edx), %ecx + movl %ecx, 56(%esp) # 4-byte Spill + movl 36(%edx), %ecx + movl %ecx, 52(%esp) # 4-byte Spill + movl 32(%edx), %ecx + movl %ecx, 48(%esp) # 4-byte Spill + movl 28(%edx), %ecx + movl %ecx, 44(%esp) # 4-byte Spill + movl 24(%edx), %ecx + movl %ecx, 40(%esp) # 4-byte Spill + movl 20(%edx), %ecx + movl %ecx, 36(%esp) # 4-byte Spill + movl 16(%edx), %ecx + movl %ecx, 32(%esp) # 4-byte Spill + movl 12(%edx), %ecx + movl %ecx, 28(%esp) # 4-byte Spill + movl 8(%edx), %ecx + movl %ecx, 20(%esp) # 4-byte Spill + movl 4(%edx), %ecx + movl %ecx, 16(%esp) # 4-byte Spill + movl %ebx, (%esp) + leal 776(%esp), %ecx + movl 92(%esp), %ebx # 4-byte Reload + calll .LmulPv384x32 + movl 88(%esp), %eax # 4-byte Reload + addl 776(%esp), %eax + movl 100(%esp), %ecx # 4-byte Reload + adcl 780(%esp), %ecx + movl 76(%esp), %eax # 4-byte Reload + adcl 784(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 788(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 792(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + adcl 796(%esp), %ebp + movl %ebp, 72(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 800(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + adcl 804(%esp), %esi + movl %esi, 88(%esp) # 4-byte Spill + adcl 808(%esp), %edi + movl %edi, 100(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 812(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 112(%esp), %eax # 4-byte Reload + adcl 816(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 132(%esp), %eax # 4-byte Reload + adcl 820(%esp), %eax + movl %eax, 132(%esp) # 4-byte Spill + movl 152(%esp), %eax # 4-byte Reload + adcl 824(%esp), %eax + movl %eax, 152(%esp) # 4-byte Spill + adcl $0, 156(%esp) # 4-byte Folded Spill + movl 136(%esp), %edi # 4-byte Reload + adcl $0, %edi + adcl $0, 148(%esp) # 4-byte Folded Spill + adcl $0, 116(%esp) # 4-byte Folded Spill + adcl $0, 140(%esp) # 4-byte Folded Spill + adcl $0, 124(%esp) # 4-byte Folded Spill + adcl $0, 144(%esp) # 4-byte Folded Spill + adcl $0, 128(%esp) # 4-byte Folded Spill + adcl $0, 120(%esp) # 4-byte Folded Spill + adcl $0, 108(%esp) # 4-byte Folded Spill + adcl $0, 104(%esp) # 4-byte Folded Spill + sbbl %ebp, %ebp + movl %ecx, %esi + movl %esi, %eax + imull 84(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 720(%esp), %ecx + movl 856(%esp), %edx + movl 92(%esp), %ebx # 4-byte Reload + calll .LmulPv384x32 + andl $1, %ebp + movl %ebp, %ecx + addl 720(%esp), %esi + movl 76(%esp), %ebp # 4-byte Reload + adcl 724(%esp), %ebp + movl 64(%esp), %eax # 4-byte Reload + adcl 728(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 732(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 736(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 740(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 744(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 748(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 96(%esp), %esi # 4-byte Reload + adcl 752(%esp), %esi + movl 112(%esp), %eax # 4-byte Reload + adcl 756(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 132(%esp), %eax # 4-byte Reload + adcl 760(%esp), %eax + movl %eax, 132(%esp) # 4-byte Spill + movl 152(%esp), %eax # 4-byte Reload + adcl 764(%esp), %eax + movl %eax, 152(%esp) # 4-byte Spill + movl 156(%esp), %eax # 4-byte Reload + adcl 768(%esp), %eax + movl %eax, 156(%esp) # 4-byte Spill + adcl $0, %edi + adcl $0, 148(%esp) # 4-byte Folded Spill + adcl $0, 116(%esp) # 4-byte Folded Spill + adcl $0, 140(%esp) # 4-byte Folded Spill + adcl $0, 124(%esp) # 4-byte Folded Spill + adcl $0, 144(%esp) # 4-byte Folded Spill + adcl $0, 128(%esp) # 4-byte Folded Spill + adcl $0, 120(%esp) # 4-byte Folded Spill + adcl $0, 108(%esp) # 4-byte Folded Spill + adcl $0, 104(%esp) # 4-byte Folded Spill + adcl $0, %ecx + movl %ecx, 76(%esp) # 4-byte Spill + movl %ebp, %eax + imull 84(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 664(%esp), %ecx + movl 856(%esp), %edx + movl 92(%esp), %ebx # 4-byte Reload + calll .LmulPv384x32 + addl 664(%esp), %ebp + movl 64(%esp), %ecx # 4-byte Reload + adcl 668(%esp), %ecx + movl 68(%esp), %eax # 4-byte Reload + adcl 672(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 676(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 680(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 684(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 688(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + adcl 692(%esp), %esi + movl %esi, 96(%esp) # 4-byte Spill + movl 112(%esp), %ebp # 4-byte Reload + adcl 696(%esp), %ebp + movl 132(%esp), %eax # 4-byte Reload + adcl 700(%esp), %eax + movl %eax, 132(%esp) # 4-byte Spill + movl 152(%esp), %eax # 4-byte Reload + adcl 704(%esp), %eax + movl %eax, 152(%esp) # 4-byte Spill + movl 156(%esp), %eax # 4-byte Reload + adcl 708(%esp), %eax + movl %eax, 156(%esp) # 4-byte Spill + adcl 712(%esp), %edi + movl %edi, 136(%esp) # 4-byte Spill + adcl $0, 148(%esp) # 4-byte Folded Spill + adcl $0, 116(%esp) # 4-byte Folded Spill + adcl $0, 140(%esp) # 4-byte Folded Spill + adcl $0, 124(%esp) # 4-byte Folded Spill + movl 144(%esp), %edi # 4-byte Reload + adcl $0, %edi + adcl $0, 128(%esp) # 4-byte Folded Spill + adcl $0, 120(%esp) # 4-byte Folded Spill + adcl $0, 108(%esp) # 4-byte Folded Spill + adcl $0, 104(%esp) # 4-byte Folded Spill + adcl $0, 76(%esp) # 4-byte Folded Spill + movl %ecx, %eax + movl %ecx, %esi + imull 84(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 608(%esp), %ecx + movl 856(%esp), %edx + movl 92(%esp), %ebx # 4-byte Reload + calll .LmulPv384x32 + addl 608(%esp), %esi + movl 68(%esp), %esi # 4-byte Reload + adcl 612(%esp), %esi + movl 72(%esp), %eax # 4-byte Reload + adcl 616(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 620(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 624(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 628(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 632(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + adcl 636(%esp), %ebp + movl %ebp, 112(%esp) # 4-byte Spill + movl 132(%esp), %eax # 4-byte Reload + adcl 640(%esp), %eax + movl %eax, 132(%esp) # 4-byte Spill + movl 152(%esp), %eax # 4-byte Reload + adcl 644(%esp), %eax + movl %eax, 152(%esp) # 4-byte Spill + movl 156(%esp), %eax # 4-byte Reload + adcl 648(%esp), %eax + movl %eax, 156(%esp) # 4-byte Spill + movl 136(%esp), %eax # 4-byte Reload + adcl 652(%esp), %eax + movl %eax, 136(%esp) # 4-byte Spill + movl 148(%esp), %eax # 4-byte Reload + adcl 656(%esp), %eax + movl %eax, 148(%esp) # 4-byte Spill + adcl $0, 116(%esp) # 4-byte Folded Spill + adcl $0, 140(%esp) # 4-byte Folded Spill + adcl $0, 124(%esp) # 4-byte Folded Spill + adcl $0, %edi + movl %edi, 144(%esp) # 4-byte Spill + adcl $0, 128(%esp) # 4-byte Folded Spill + movl 120(%esp), %ebp # 4-byte Reload + adcl $0, %ebp + movl 108(%esp), %edi # 4-byte Reload + adcl $0, %edi + adcl $0, 104(%esp) # 4-byte Folded Spill + adcl $0, 76(%esp) # 4-byte Folded Spill + movl %esi, %eax + imull 84(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 552(%esp), %ecx + movl 856(%esp), %edx + movl 92(%esp), %ebx # 4-byte Reload + calll .LmulPv384x32 + addl 552(%esp), %esi + movl 72(%esp), %ecx # 4-byte Reload + adcl 556(%esp), %ecx + movl 80(%esp), %eax # 4-byte Reload + adcl 560(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 564(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 568(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 572(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 112(%esp), %eax # 4-byte Reload + adcl 576(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 132(%esp), %eax # 4-byte Reload + adcl 580(%esp), %eax + movl %eax, 132(%esp) # 4-byte Spill + movl 152(%esp), %eax # 4-byte Reload + adcl 584(%esp), %eax + movl %eax, 152(%esp) # 4-byte Spill + movl 156(%esp), %eax # 4-byte Reload + adcl 588(%esp), %eax + movl %eax, 156(%esp) # 4-byte Spill + movl 136(%esp), %eax # 4-byte Reload + adcl 592(%esp), %eax + movl %eax, 136(%esp) # 4-byte Spill + movl 148(%esp), %eax # 4-byte Reload + adcl 596(%esp), %eax + movl %eax, 148(%esp) # 4-byte Spill + movl 116(%esp), %eax # 4-byte Reload + adcl 600(%esp), %eax + movl %eax, 116(%esp) # 4-byte Spill + adcl $0, 140(%esp) # 4-byte Folded Spill + adcl $0, 124(%esp) # 4-byte Folded Spill + adcl $0, 144(%esp) # 4-byte Folded Spill + adcl $0, 128(%esp) # 4-byte Folded Spill + adcl $0, %ebp + movl %ebp, 120(%esp) # 4-byte Spill + adcl $0, %edi + movl %edi, 108(%esp) # 4-byte Spill + movl 104(%esp), %eax # 4-byte Reload + adcl $0, %eax + movl %eax, %esi + adcl $0, 76(%esp) # 4-byte Folded Spill + movl %ecx, %edi + movl %edi, %eax + imull 84(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 496(%esp), %ecx + movl 856(%esp), %edx + movl 92(%esp), %ebx # 4-byte Reload + calll .LmulPv384x32 + addl 496(%esp), %edi + movl 80(%esp), %ecx # 4-byte Reload + adcl 500(%esp), %ecx + movl 88(%esp), %eax # 4-byte Reload + adcl 504(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 508(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 512(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 112(%esp), %eax # 4-byte Reload + adcl 516(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 132(%esp), %eax # 4-byte Reload + adcl 520(%esp), %eax + movl %eax, 132(%esp) # 4-byte Spill + movl 152(%esp), %eax # 4-byte Reload + adcl 524(%esp), %eax + movl %eax, 152(%esp) # 4-byte Spill + movl 156(%esp), %ebp # 4-byte Reload + adcl 528(%esp), %ebp + movl 136(%esp), %edi # 4-byte Reload + adcl 532(%esp), %edi + movl 148(%esp), %eax # 4-byte Reload + adcl 536(%esp), %eax + movl %eax, 148(%esp) # 4-byte Spill + movl 116(%esp), %eax # 4-byte Reload + adcl 540(%esp), %eax + movl %eax, 116(%esp) # 4-byte Spill + movl 140(%esp), %eax # 4-byte Reload + adcl 544(%esp), %eax + movl %eax, 140(%esp) # 4-byte Spill + adcl $0, 124(%esp) # 4-byte Folded Spill + adcl $0, 144(%esp) # 4-byte Folded Spill + adcl $0, 128(%esp) # 4-byte Folded Spill + adcl $0, 120(%esp) # 4-byte Folded Spill + adcl $0, 108(%esp) # 4-byte Folded Spill + adcl $0, %esi + movl %esi, 104(%esp) # 4-byte Spill + adcl $0, 76(%esp) # 4-byte Folded Spill + movl %ecx, %eax + movl %ecx, %esi + imull 84(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 440(%esp), %ecx + movl 856(%esp), %edx + movl 92(%esp), %ebx # 4-byte Reload + calll .LmulPv384x32 + addl 440(%esp), %esi + movl 88(%esp), %ecx # 4-byte Reload + adcl 444(%esp), %ecx + movl 100(%esp), %eax # 4-byte Reload + adcl 448(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 452(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 112(%esp), %eax # 4-byte Reload + adcl 456(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 132(%esp), %eax # 4-byte Reload + adcl 460(%esp), %eax + movl %eax, 132(%esp) # 4-byte Spill + movl 152(%esp), %eax # 4-byte Reload + adcl 464(%esp), %eax + movl %eax, 152(%esp) # 4-byte Spill + adcl 468(%esp), %ebp + movl %ebp, 156(%esp) # 4-byte Spill + adcl 472(%esp), %edi + movl %edi, 136(%esp) # 4-byte Spill + movl 148(%esp), %esi # 4-byte Reload + adcl 476(%esp), %esi + movl 116(%esp), %eax # 4-byte Reload + adcl 480(%esp), %eax + movl %eax, 116(%esp) # 4-byte Spill + movl 140(%esp), %eax # 4-byte Reload + adcl 484(%esp), %eax + movl %eax, 140(%esp) # 4-byte Spill + movl 124(%esp), %eax # 4-byte Reload + adcl 488(%esp), %eax + movl %eax, 124(%esp) # 4-byte Spill + adcl $0, 144(%esp) # 4-byte Folded Spill + adcl $0, 128(%esp) # 4-byte Folded Spill + adcl $0, 120(%esp) # 4-byte Folded Spill + adcl $0, 108(%esp) # 4-byte Folded Spill + adcl $0, 104(%esp) # 4-byte Folded Spill + adcl $0, 76(%esp) # 4-byte Folded Spill + movl %ecx, %edi + movl %edi, %eax + imull 84(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 384(%esp), %ecx + movl 856(%esp), %eax + movl %eax, %edx + movl 92(%esp), %ebx # 4-byte Reload + calll .LmulPv384x32 + addl 384(%esp), %edi + movl 100(%esp), %ecx # 4-byte Reload + adcl 388(%esp), %ecx + movl %ecx, 100(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 392(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 112(%esp), %eax # 4-byte Reload + adcl 396(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 132(%esp), %ebp # 4-byte Reload + adcl 400(%esp), %ebp + movl 152(%esp), %edi # 4-byte Reload + adcl 404(%esp), %edi + movl 156(%esp), %eax # 4-byte Reload + adcl 408(%esp), %eax + movl %eax, 156(%esp) # 4-byte Spill + movl 136(%esp), %eax # 4-byte Reload + adcl 412(%esp), %eax + movl %eax, 136(%esp) # 4-byte Spill + adcl 416(%esp), %esi + movl %esi, 148(%esp) # 4-byte Spill + movl 116(%esp), %eax # 4-byte Reload + adcl 420(%esp), %eax + movl %eax, 116(%esp) # 4-byte Spill + movl 140(%esp), %esi # 4-byte Reload + adcl 424(%esp), %esi + movl 124(%esp), %eax # 4-byte Reload + adcl 428(%esp), %eax + movl %eax, 124(%esp) # 4-byte Spill + movl 144(%esp), %eax # 4-byte Reload + adcl 432(%esp), %eax + movl %eax, 144(%esp) # 4-byte Spill + adcl $0, 128(%esp) # 4-byte Folded Spill + adcl $0, 120(%esp) # 4-byte Folded Spill + adcl $0, 108(%esp) # 4-byte Folded Spill + adcl $0, 104(%esp) # 4-byte Folded Spill + adcl $0, 76(%esp) # 4-byte Folded Spill + movl %ecx, %eax + imull 84(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 328(%esp), %ecx + movl 856(%esp), %edx + movl 92(%esp), %ebx # 4-byte Reload + calll .LmulPv384x32 + movl 100(%esp), %eax # 4-byte Reload + addl 328(%esp), %eax + movl 96(%esp), %eax # 4-byte Reload + adcl 332(%esp), %eax + movl 112(%esp), %ecx # 4-byte Reload + adcl 336(%esp), %ecx + movl %ecx, 112(%esp) # 4-byte Spill + adcl 340(%esp), %ebp + movl %ebp, 132(%esp) # 4-byte Spill + adcl 344(%esp), %edi + movl %edi, 152(%esp) # 4-byte Spill + movl 156(%esp), %ecx # 4-byte Reload + adcl 348(%esp), %ecx + movl %ecx, 156(%esp) # 4-byte Spill + movl 136(%esp), %ecx # 4-byte Reload + adcl 352(%esp), %ecx + movl %ecx, 136(%esp) # 4-byte Spill + movl 148(%esp), %ecx # 4-byte Reload + adcl 356(%esp), %ecx + movl %ecx, 148(%esp) # 4-byte Spill + movl 116(%esp), %ebp # 4-byte Reload + adcl 360(%esp), %ebp + adcl 364(%esp), %esi + movl %esi, 140(%esp) # 4-byte Spill + movl 124(%esp), %ecx # 4-byte Reload + adcl 368(%esp), %ecx + movl %ecx, 124(%esp) # 4-byte Spill + movl 144(%esp), %ecx # 4-byte Reload + adcl 372(%esp), %ecx + movl %ecx, 144(%esp) # 4-byte Spill + movl 128(%esp), %ecx # 4-byte Reload + adcl 376(%esp), %ecx + movl %ecx, 128(%esp) # 4-byte Spill + adcl $0, 120(%esp) # 4-byte Folded Spill + adcl $0, 108(%esp) # 4-byte Folded Spill + adcl $0, 104(%esp) # 4-byte Folded Spill + movl 76(%esp), %edi # 4-byte Reload + adcl $0, %edi + movl %eax, %esi + imull 84(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 272(%esp), %ecx + movl 856(%esp), %edx + movl 92(%esp), %ebx # 4-byte Reload + calll .LmulPv384x32 + addl 272(%esp), %esi + movl 112(%esp), %eax # 4-byte Reload + adcl 276(%esp), %eax + movl 132(%esp), %ecx # 4-byte Reload + adcl 280(%esp), %ecx + movl %ecx, 132(%esp) # 4-byte Spill + movl 152(%esp), %ecx # 4-byte Reload + adcl 284(%esp), %ecx + movl %ecx, 152(%esp) # 4-byte Spill + movl 156(%esp), %ecx # 4-byte Reload + adcl 288(%esp), %ecx + movl %ecx, 156(%esp) # 4-byte Spill + movl 136(%esp), %ecx # 4-byte Reload + adcl 292(%esp), %ecx + movl %ecx, 136(%esp) # 4-byte Spill + movl 148(%esp), %ecx # 4-byte Reload + adcl 296(%esp), %ecx + movl %ecx, 148(%esp) # 4-byte Spill + movl %ebp, %esi + adcl 300(%esp), %esi + movl 140(%esp), %ecx # 4-byte Reload + adcl 304(%esp), %ecx + movl %ecx, 140(%esp) # 4-byte Spill + movl 124(%esp), %ecx # 4-byte Reload + adcl 308(%esp), %ecx + movl %ecx, 124(%esp) # 4-byte Spill + movl 144(%esp), %ecx # 4-byte Reload + adcl 312(%esp), %ecx + movl %ecx, 144(%esp) # 4-byte Spill + movl 128(%esp), %ecx # 4-byte Reload + adcl 316(%esp), %ecx + movl %ecx, 128(%esp) # 4-byte Spill + movl 120(%esp), %ecx # 4-byte Reload + adcl 320(%esp), %ecx + movl %ecx, 120(%esp) # 4-byte Spill + adcl $0, 108(%esp) # 4-byte Folded Spill + adcl $0, 104(%esp) # 4-byte Folded Spill + adcl $0, %edi + movl %edi, %ebp + movl %eax, %edi + imull 84(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 216(%esp), %ecx + movl 856(%esp), %edx + movl 92(%esp), %ebx # 4-byte Reload + calll .LmulPv384x32 + addl 216(%esp), %edi + movl 132(%esp), %ecx # 4-byte Reload + adcl 220(%esp), %ecx + movl 152(%esp), %eax # 4-byte Reload + adcl 224(%esp), %eax + movl %eax, 152(%esp) # 4-byte Spill + movl 156(%esp), %eax # 4-byte Reload + adcl 228(%esp), %eax + movl %eax, 156(%esp) # 4-byte Spill + movl 136(%esp), %eax # 4-byte Reload + adcl 232(%esp), %eax + movl %eax, 136(%esp) # 4-byte Spill + movl 148(%esp), %eax # 4-byte Reload + adcl 236(%esp), %eax + movl %eax, 148(%esp) # 4-byte Spill + adcl 240(%esp), %esi + movl %esi, 116(%esp) # 4-byte Spill + movl 140(%esp), %eax # 4-byte Reload + adcl 244(%esp), %eax + movl %eax, 140(%esp) # 4-byte Spill + movl 124(%esp), %esi # 4-byte Reload + adcl 248(%esp), %esi + movl 144(%esp), %eax # 4-byte Reload + adcl 252(%esp), %eax + movl %eax, 144(%esp) # 4-byte Spill + movl 128(%esp), %eax # 4-byte Reload + adcl 256(%esp), %eax + movl %eax, 128(%esp) # 4-byte Spill + movl 120(%esp), %eax # 4-byte Reload + adcl 260(%esp), %eax + movl %eax, 120(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl 264(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + adcl $0, 104(%esp) # 4-byte Folded Spill + adcl $0, %ebp + movl 84(%esp), %eax # 4-byte Reload + imull %ecx, %eax + movl %ecx, %edi + movl %eax, (%esp) + leal 160(%esp), %ecx + movl 856(%esp), %edx + movl 92(%esp), %ebx # 4-byte Reload + calll .LmulPv384x32 + addl 160(%esp), %edi + movl 152(%esp), %eax # 4-byte Reload + adcl 164(%esp), %eax + movl %eax, 152(%esp) # 4-byte Spill + movl %eax, %edi + movl 156(%esp), %eax # 4-byte Reload + adcl 168(%esp), %eax + movl %eax, 156(%esp) # 4-byte Spill + movl 136(%esp), %edx # 4-byte Reload + adcl 172(%esp), %edx + movl %edx, 136(%esp) # 4-byte Spill + movl 148(%esp), %ebx # 4-byte Reload + adcl 176(%esp), %ebx + movl %ebx, 148(%esp) # 4-byte Spill + movl 116(%esp), %ecx # 4-byte Reload + adcl 180(%esp), %ecx + movl %ecx, 116(%esp) # 4-byte Spill + movl 140(%esp), %eax # 4-byte Reload + adcl 184(%esp), %eax + movl %eax, 140(%esp) # 4-byte Spill + adcl 188(%esp), %esi + movl %esi, 124(%esp) # 4-byte Spill + movl 144(%esp), %eax # 4-byte Reload + adcl 192(%esp), %eax + movl %eax, 144(%esp) # 4-byte Spill + movl 128(%esp), %eax # 4-byte Reload + adcl 196(%esp), %eax + movl %eax, 128(%esp) # 4-byte Spill + movl 120(%esp), %eax # 4-byte Reload + adcl 200(%esp), %eax + movl %eax, 120(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl 204(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 104(%esp), %eax # 4-byte Reload + adcl 208(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + adcl $0, %ebp + subl 24(%esp), %edi # 4-byte Folded Reload + movl 156(%esp), %esi # 4-byte Reload + sbbl 16(%esp), %esi # 4-byte Folded Reload + sbbl 20(%esp), %edx # 4-byte Folded Reload + sbbl 28(%esp), %ebx # 4-byte Folded Reload + sbbl 32(%esp), %ecx # 4-byte Folded Reload + movl 140(%esp), %eax # 4-byte Reload + sbbl 36(%esp), %eax # 4-byte Folded Reload + movl %eax, 84(%esp) # 4-byte Spill + movl 124(%esp), %eax # 4-byte Reload + sbbl 40(%esp), %eax # 4-byte Folded Reload + movl %eax, 88(%esp) # 4-byte Spill + movl 144(%esp), %eax # 4-byte Reload + sbbl 44(%esp), %eax # 4-byte Folded Reload + movl %eax, 92(%esp) # 4-byte Spill + movl 128(%esp), %eax # 4-byte Reload + sbbl 48(%esp), %eax # 4-byte Folded Reload + movl %eax, 96(%esp) # 4-byte Spill + movl 120(%esp), %eax # 4-byte Reload + sbbl 52(%esp), %eax # 4-byte Folded Reload + movl %eax, 100(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + sbbl 56(%esp), %eax # 4-byte Folded Reload + movl %eax, 112(%esp) # 4-byte Spill + movl 104(%esp), %eax # 4-byte Reload + sbbl 60(%esp), %eax # 4-byte Folded Reload + movl %eax, 132(%esp) # 4-byte Spill + sbbl $0, %ebp + andl $1, %ebp + jne .LBB180_2 +# BB#1: + movl %ebx, 148(%esp) # 4-byte Spill +.LBB180_2: + movl %ebp, %ebx + testb %bl, %bl + movl 152(%esp), %ebx # 4-byte Reload + jne .LBB180_4 +# BB#3: + movl %edi, %ebx +.LBB180_4: + movl 848(%esp), %edi + movl %ebx, (%edi) + movl 144(%esp), %ebx # 4-byte Reload + jne .LBB180_6 +# BB#5: + movl %esi, 156(%esp) # 4-byte Spill +.LBB180_6: + movl 156(%esp), %esi # 4-byte Reload + movl %esi, 4(%edi) + movl 136(%esp), %esi # 4-byte Reload + jne .LBB180_8 +# BB#7: + movl %edx, %esi +.LBB180_8: + movl %esi, 8(%edi) + movl 148(%esp), %edx # 4-byte Reload + movl %edx, 12(%edi) + movl 128(%esp), %esi # 4-byte Reload + movl 116(%esp), %edx # 4-byte Reload + jne .LBB180_10 +# BB#9: + movl %ecx, %edx +.LBB180_10: + movl %edx, 16(%edi) + movl 120(%esp), %edx # 4-byte Reload + movl 140(%esp), %ecx # 4-byte Reload + jne .LBB180_12 +# BB#11: + movl 84(%esp), %ecx # 4-byte Reload +.LBB180_12: + movl %ecx, 20(%edi) + movl 108(%esp), %ecx # 4-byte Reload + movl 124(%esp), %eax # 4-byte Reload + jne .LBB180_14 +# BB#13: + movl 88(%esp), %eax # 4-byte Reload +.LBB180_14: + movl %eax, 24(%edi) + movl 104(%esp), %eax # 4-byte Reload + jne .LBB180_16 +# BB#15: + movl 92(%esp), %ebx # 4-byte Reload +.LBB180_16: + movl %ebx, 28(%edi) + jne .LBB180_18 +# BB#17: + movl 96(%esp), %esi # 4-byte Reload +.LBB180_18: + movl %esi, 32(%edi) + jne .LBB180_20 +# BB#19: + movl 100(%esp), %edx # 4-byte Reload +.LBB180_20: + movl %edx, 36(%edi) + jne .LBB180_22 +# BB#21: + movl 112(%esp), %ecx # 4-byte Reload +.LBB180_22: + movl %ecx, 40(%edi) + jne .LBB180_24 +# BB#23: + movl 132(%esp), %eax # 4-byte Reload +.LBB180_24: + movl %eax, 44(%edi) + addl $828, %esp # imm = 0x33C + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end180: + .size mcl_fp_montRed12Lbmi2, .Lfunc_end180-mcl_fp_montRed12Lbmi2 + + .globl mcl_fp_addPre12Lbmi2 + .align 16, 0x90 + .type mcl_fp_addPre12Lbmi2,@function +mcl_fp_addPre12Lbmi2: # @mcl_fp_addPre12Lbmi2 +# BB#0: + pushl %ebx + pushl %edi + pushl %esi + movl 24(%esp), %eax + movl (%eax), %edx + movl 4(%eax), %esi + movl 20(%esp), %ecx + addl (%ecx), %edx + adcl 4(%ecx), %esi + movl 8(%eax), %edi + adcl 8(%ecx), %edi + movl 16(%esp), %ebx + movl %edx, (%ebx) + movl 12(%ecx), %edx + movl %esi, 4(%ebx) + movl 16(%ecx), %esi + adcl 12(%eax), %edx + adcl 16(%eax), %esi + movl %edi, 8(%ebx) + movl 20(%eax), %edi + movl %edx, 12(%ebx) + movl 20(%ecx), %edx + adcl %edi, %edx + movl 24(%eax), %edi + movl %esi, 16(%ebx) + movl 24(%ecx), %esi + adcl %edi, %esi + movl 28(%eax), %edi + movl %edx, 20(%ebx) + movl 28(%ecx), %edx + adcl %edi, %edx + movl 32(%eax), %edi + movl %esi, 24(%ebx) + movl 32(%ecx), %esi + adcl %edi, %esi + movl 36(%eax), %edi + movl %edx, 28(%ebx) + movl 36(%ecx), %edx + adcl %edi, %edx + movl 40(%eax), %edi + movl %esi, 32(%ebx) + movl 40(%ecx), %esi + adcl %edi, %esi + movl %edx, 36(%ebx) + movl %esi, 40(%ebx) + movl 44(%eax), %eax + movl 44(%ecx), %ecx + adcl %eax, %ecx + movl %ecx, 44(%ebx) + sbbl %eax, %eax + andl $1, %eax + popl %esi + popl %edi + popl %ebx + retl +.Lfunc_end181: + .size mcl_fp_addPre12Lbmi2, .Lfunc_end181-mcl_fp_addPre12Lbmi2 + + .globl mcl_fp_subPre12Lbmi2 + .align 16, 0x90 + .type mcl_fp_subPre12Lbmi2,@function +mcl_fp_subPre12Lbmi2: # @mcl_fp_subPre12Lbmi2 +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + movl 24(%esp), %ecx + movl (%ecx), %esi + movl 4(%ecx), %edi + xorl %eax, %eax + movl 28(%esp), %edx + subl (%edx), %esi + sbbl 4(%edx), %edi + movl 8(%ecx), %ebx + sbbl 8(%edx), %ebx + movl 20(%esp), %ebp + movl %esi, (%ebp) + movl 12(%ecx), %esi + sbbl 12(%edx), %esi + movl %edi, 4(%ebp) + movl 16(%ecx), %edi + sbbl 16(%edx), %edi + movl %ebx, 8(%ebp) + movl 20(%edx), %ebx + movl %esi, 12(%ebp) + movl 20(%ecx), %esi + sbbl %ebx, %esi + movl 24(%edx), %ebx + movl %edi, 16(%ebp) + movl 24(%ecx), %edi + sbbl %ebx, %edi + movl 28(%edx), %ebx + movl %esi, 20(%ebp) + movl 28(%ecx), %esi + sbbl %ebx, %esi + movl 32(%edx), %ebx + movl %edi, 24(%ebp) + movl 32(%ecx), %edi + sbbl %ebx, %edi + movl 36(%edx), %ebx + movl %esi, 28(%ebp) + movl 36(%ecx), %esi + sbbl %ebx, %esi + movl 40(%edx), %ebx + movl %edi, 32(%ebp) + movl 40(%ecx), %edi + sbbl %ebx, %edi + movl %esi, 36(%ebp) + movl %edi, 40(%ebp) + movl 44(%edx), %edx + movl 44(%ecx), %ecx + sbbl %edx, %ecx + movl %ecx, 44(%ebp) + sbbl $0, %eax + andl $1, %eax + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end182: + .size mcl_fp_subPre12Lbmi2, .Lfunc_end182-mcl_fp_subPre12Lbmi2 + + .globl mcl_fp_shr1_12Lbmi2 + .align 16, 0x90 + .type mcl_fp_shr1_12Lbmi2,@function +mcl_fp_shr1_12Lbmi2: # @mcl_fp_shr1_12Lbmi2 +# BB#0: + pushl %esi + movl 12(%esp), %eax + movl (%eax), %edx + movl 4(%eax), %esi + shrdl $1, %esi, %edx + movl 8(%esp), %ecx + movl %edx, (%ecx) + movl 8(%eax), %edx + shrdl $1, %edx, %esi + movl %esi, 4(%ecx) + movl 12(%eax), %esi + shrdl $1, %esi, %edx + movl %edx, 8(%ecx) + movl 16(%eax), %edx + shrdl $1, %edx, %esi + movl %esi, 12(%ecx) + movl 20(%eax), %esi + shrdl $1, %esi, %edx + movl %edx, 16(%ecx) + movl 24(%eax), %edx + shrdl $1, %edx, %esi + movl %esi, 20(%ecx) + movl 28(%eax), %esi + shrdl $1, %esi, %edx + movl %edx, 24(%ecx) + movl 32(%eax), %edx + shrdl $1, %edx, %esi + movl %esi, 28(%ecx) + movl 36(%eax), %esi + shrdl $1, %esi, %edx + movl %edx, 32(%ecx) + movl 40(%eax), %edx + shrdl $1, %edx, %esi + movl %esi, 36(%ecx) + movl 44(%eax), %eax + shrdl $1, %eax, %edx + movl %edx, 40(%ecx) + shrl %eax + movl %eax, 44(%ecx) + popl %esi + retl +.Lfunc_end183: + .size mcl_fp_shr1_12Lbmi2, .Lfunc_end183-mcl_fp_shr1_12Lbmi2 + + .globl mcl_fp_add12Lbmi2 + .align 16, 0x90 + .type mcl_fp_add12Lbmi2,@function +mcl_fp_add12Lbmi2: # @mcl_fp_add12Lbmi2 +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $36, %esp + movl 64(%esp), %ebx + movl (%ebx), %edx + movl 4(%ebx), %ecx + movl 60(%esp), %eax + addl (%eax), %edx + movl %edx, 4(%esp) # 4-byte Spill + adcl 4(%eax), %ecx + movl %ecx, 32(%esp) # 4-byte Spill + movl 8(%ebx), %ecx + adcl 8(%eax), %ecx + movl %ecx, 28(%esp) # 4-byte Spill + movl 12(%eax), %edx + movl 16(%eax), %ecx + adcl 12(%ebx), %edx + movl %edx, 24(%esp) # 4-byte Spill + adcl 16(%ebx), %ecx + movl %ecx, 20(%esp) # 4-byte Spill + movl 20(%eax), %ecx + adcl 20(%ebx), %ecx + movl %ecx, 16(%esp) # 4-byte Spill + movl 24(%eax), %ecx + adcl 24(%ebx), %ecx + movl %ecx, 12(%esp) # 4-byte Spill + movl 28(%eax), %ecx + adcl 28(%ebx), %ecx + movl %ecx, 8(%esp) # 4-byte Spill + movl 32(%eax), %ebp + adcl 32(%ebx), %ebp + movl %ebp, (%esp) # 4-byte Spill + movl 36(%eax), %edi + adcl 36(%ebx), %edi + movl 40(%eax), %esi + adcl 40(%ebx), %esi + movl 44(%eax), %edx + adcl 44(%ebx), %edx + movl 56(%esp), %ebx + movl 4(%esp), %eax # 4-byte Reload + movl %eax, (%ebx) + movl 32(%esp), %ecx # 4-byte Reload + movl %ecx, 4(%ebx) + movl 28(%esp), %ecx # 4-byte Reload + movl %ecx, 8(%ebx) + movl 24(%esp), %ecx # 4-byte Reload + movl %ecx, 12(%ebx) + movl 20(%esp), %ecx # 4-byte Reload + movl %ecx, 16(%ebx) + movl 16(%esp), %ecx # 4-byte Reload + movl %ecx, 20(%ebx) + movl 12(%esp), %ecx # 4-byte Reload + movl %ecx, 24(%ebx) + movl 8(%esp), %ecx # 4-byte Reload + movl %ecx, 28(%ebx) + movl %ebp, 32(%ebx) + movl %edi, 36(%ebx) + movl %esi, 40(%ebx) + movl %edx, 44(%ebx) + sbbl %ecx, %ecx + andl $1, %ecx + movl 68(%esp), %ebp + subl (%ebp), %eax + movl %eax, 4(%esp) # 4-byte Spill + movl 32(%esp), %eax # 4-byte Reload + sbbl 4(%ebp), %eax + movl %eax, 32(%esp) # 4-byte Spill + movl 28(%esp), %eax # 4-byte Reload + sbbl 8(%ebp), %eax + movl %eax, 28(%esp) # 4-byte Spill + movl 24(%esp), %eax # 4-byte Reload + sbbl 12(%ebp), %eax + movl %eax, 24(%esp) # 4-byte Spill + movl 20(%esp), %eax # 4-byte Reload + sbbl 16(%ebp), %eax + movl %eax, 20(%esp) # 4-byte Spill + movl 16(%esp), %eax # 4-byte Reload + sbbl 20(%ebp), %eax + movl %eax, 16(%esp) # 4-byte Spill + movl 12(%esp), %eax # 4-byte Reload + sbbl 24(%ebp), %eax + movl %eax, 12(%esp) # 4-byte Spill + movl 8(%esp), %eax # 4-byte Reload + sbbl 28(%ebp), %eax + movl %eax, 8(%esp) # 4-byte Spill + movl (%esp), %eax # 4-byte Reload + sbbl 32(%ebp), %eax + movl %eax, (%esp) # 4-byte Spill + sbbl 36(%ebp), %edi + sbbl 40(%ebp), %esi + sbbl 44(%ebp), %edx + sbbl $0, %ecx + testb $1, %cl + jne .LBB184_2 +# BB#1: # %nocarry + movl 4(%esp), %eax # 4-byte Reload + movl %eax, (%ebx) + movl 32(%esp), %eax # 4-byte Reload + movl %eax, 4(%ebx) + movl 28(%esp), %eax # 4-byte Reload + movl %eax, 8(%ebx) + movl 24(%esp), %eax # 4-byte Reload + movl %eax, 12(%ebx) + movl 20(%esp), %eax # 4-byte Reload + movl %eax, 16(%ebx) + movl 16(%esp), %eax # 4-byte Reload + movl %eax, 20(%ebx) + movl 12(%esp), %eax # 4-byte Reload + movl %eax, 24(%ebx) + movl 8(%esp), %eax # 4-byte Reload + movl %eax, 28(%ebx) + movl (%esp), %eax # 4-byte Reload + movl %eax, 32(%ebx) + movl %edi, 36(%ebx) + movl %esi, 40(%ebx) + movl %edx, 44(%ebx) +.LBB184_2: # %carry + addl $36, %esp + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end184: + .size mcl_fp_add12Lbmi2, .Lfunc_end184-mcl_fp_add12Lbmi2 + + .globl mcl_fp_addNF12Lbmi2 + .align 16, 0x90 + .type mcl_fp_addNF12Lbmi2,@function +mcl_fp_addNF12Lbmi2: # @mcl_fp_addNF12Lbmi2 +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $88, %esp + movl 116(%esp), %esi + movl (%esi), %eax + movl 4(%esi), %ecx + movl 112(%esp), %edx + addl (%edx), %eax + movl %eax, 60(%esp) # 4-byte Spill + adcl 4(%edx), %ecx + movl %ecx, 64(%esp) # 4-byte Spill + movl 44(%esi), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 40(%esi), %ebp + movl 36(%esi), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 32(%esi), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 28(%esi), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 24(%esi), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 20(%esi), %ebx + movl 16(%esi), %edi + movl 12(%esi), %ecx + movl 8(%esi), %eax + adcl 8(%edx), %eax + adcl 12(%edx), %ecx + movl %ecx, 48(%esp) # 4-byte Spill + adcl 16(%edx), %edi + movl %edi, 52(%esp) # 4-byte Spill + adcl 20(%edx), %ebx + movl %ebx, 56(%esp) # 4-byte Spill + movl 68(%esp), %esi # 4-byte Reload + adcl 24(%edx), %esi + movl %esi, 68(%esp) # 4-byte Spill + movl 84(%esp), %esi # 4-byte Reload + adcl 28(%edx), %esi + movl %esi, 84(%esp) # 4-byte Spill + movl 76(%esp), %esi # 4-byte Reload + adcl 32(%edx), %esi + movl %esi, 76(%esp) # 4-byte Spill + movl 72(%esp), %esi # 4-byte Reload + adcl 36(%edx), %esi + movl %esi, 72(%esp) # 4-byte Spill + movl %eax, %esi + adcl 40(%edx), %ebp + movl %ebp, 44(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 44(%edx), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 120(%esp), %ebp + movl 60(%esp), %edx # 4-byte Reload + subl (%ebp), %edx + movl 64(%esp), %eax # 4-byte Reload + sbbl 4(%ebp), %eax + movl %eax, (%esp) # 4-byte Spill + movl %esi, %eax + sbbl 8(%ebp), %eax + movl %eax, 4(%esp) # 4-byte Spill + sbbl 12(%ebp), %ecx + movl %ecx, 8(%esp) # 4-byte Spill + sbbl 16(%ebp), %edi + movl %edi, 12(%esp) # 4-byte Spill + sbbl 20(%ebp), %ebx + movl %ebx, 16(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + movl %eax, %ecx + sbbl 24(%ebp), %ecx + movl %ecx, 20(%esp) # 4-byte Spill + movl 84(%esp), %ecx # 4-byte Reload + sbbl 28(%ebp), %ecx + movl %ecx, 24(%esp) # 4-byte Spill + movl 76(%esp), %ecx # 4-byte Reload + sbbl 32(%ebp), %ecx + movl %ecx, 28(%esp) # 4-byte Spill + movl 72(%esp), %ecx # 4-byte Reload + sbbl 36(%ebp), %ecx + movl %ecx, 32(%esp) # 4-byte Spill + movl 44(%esp), %ecx # 4-byte Reload + movl %ecx, %edi + sbbl 40(%ebp), %edi + movl %edi, 36(%esp) # 4-byte Spill + movl 80(%esp), %edi # 4-byte Reload + sbbl 44(%ebp), %edi + movl %edi, 40(%esp) # 4-byte Spill + movl %edi, %ebp + movl 60(%esp), %edi # 4-byte Reload + sarl $31, %ebp + testl %ebp, %ebp + js .LBB185_2 +# BB#1: + movl %edx, %edi +.LBB185_2: + movl 108(%esp), %edx + movl %edi, (%edx) + movl 64(%esp), %edi # 4-byte Reload + js .LBB185_4 +# BB#3: + movl (%esp), %edi # 4-byte Reload +.LBB185_4: + movl %edi, 4(%edx) + movl %eax, %ebp + js .LBB185_6 +# BB#5: + movl 4(%esp), %esi # 4-byte Reload +.LBB185_6: + movl %esi, 8(%edx) + movl %ecx, %esi + movl 52(%esp), %eax # 4-byte Reload + movl 48(%esp), %ecx # 4-byte Reload + js .LBB185_8 +# BB#7: + movl 8(%esp), %ecx # 4-byte Reload +.LBB185_8: + movl %ecx, 12(%edx) + movl 76(%esp), %ebx # 4-byte Reload + movl 84(%esp), %edi # 4-byte Reload + js .LBB185_10 +# BB#9: + movl 12(%esp), %eax # 4-byte Reload +.LBB185_10: + movl %eax, 16(%edx) + movl 80(%esp), %ecx # 4-byte Reload + js .LBB185_12 +# BB#11: + movl 16(%esp), %eax # 4-byte Reload + movl %eax, 56(%esp) # 4-byte Spill +.LBB185_12: + movl 56(%esp), %eax # 4-byte Reload + movl %eax, 20(%edx) + js .LBB185_14 +# BB#13: + movl 20(%esp), %ebp # 4-byte Reload +.LBB185_14: + movl %ebp, 24(%edx) + js .LBB185_16 +# BB#15: + movl 24(%esp), %edi # 4-byte Reload +.LBB185_16: + movl %edi, 28(%edx) + js .LBB185_18 +# BB#17: + movl 28(%esp), %ebx # 4-byte Reload +.LBB185_18: + movl %ebx, 32(%edx) + movl 72(%esp), %eax # 4-byte Reload + js .LBB185_20 +# BB#19: + movl 32(%esp), %eax # 4-byte Reload +.LBB185_20: + movl %eax, 36(%edx) + js .LBB185_22 +# BB#21: + movl 36(%esp), %esi # 4-byte Reload +.LBB185_22: + movl %esi, 40(%edx) + js .LBB185_24 +# BB#23: + movl 40(%esp), %ecx # 4-byte Reload +.LBB185_24: + movl %ecx, 44(%edx) + addl $88, %esp + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end185: + .size mcl_fp_addNF12Lbmi2, .Lfunc_end185-mcl_fp_addNF12Lbmi2 + + .globl mcl_fp_sub12Lbmi2 + .align 16, 0x90 + .type mcl_fp_sub12Lbmi2,@function +mcl_fp_sub12Lbmi2: # @mcl_fp_sub12Lbmi2 +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $40, %esp + movl 64(%esp), %esi + movl (%esi), %ecx + movl 4(%esi), %eax + xorl %ebx, %ebx + movl 68(%esp), %edi + subl (%edi), %ecx + movl %ecx, 12(%esp) # 4-byte Spill + sbbl 4(%edi), %eax + movl %eax, 24(%esp) # 4-byte Spill + movl 8(%esi), %eax + sbbl 8(%edi), %eax + movl %eax, 32(%esp) # 4-byte Spill + movl 12(%esi), %eax + sbbl 12(%edi), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 16(%esi), %eax + sbbl 16(%edi), %eax + movl %eax, 28(%esp) # 4-byte Spill + movl 20(%esi), %eax + sbbl 20(%edi), %eax + movl %eax, 20(%esp) # 4-byte Spill + movl 24(%esi), %eax + sbbl 24(%edi), %eax + movl %eax, 16(%esp) # 4-byte Spill + movl 28(%esi), %edx + sbbl 28(%edi), %edx + movl %edx, 8(%esp) # 4-byte Spill + movl 32(%esi), %ecx + sbbl 32(%edi), %ecx + movl %ecx, 4(%esp) # 4-byte Spill + movl 36(%esi), %eax + sbbl 36(%edi), %eax + movl %eax, (%esp) # 4-byte Spill + movl 40(%esi), %ebp + sbbl 40(%edi), %ebp + movl 44(%esi), %esi + sbbl 44(%edi), %esi + sbbl $0, %ebx + testb $1, %bl + movl 60(%esp), %ebx + movl 12(%esp), %edi # 4-byte Reload + movl %edi, (%ebx) + movl 24(%esp), %edi # 4-byte Reload + movl %edi, 4(%ebx) + movl 32(%esp), %edi # 4-byte Reload + movl %edi, 8(%ebx) + movl 36(%esp), %edi # 4-byte Reload + movl %edi, 12(%ebx) + movl 28(%esp), %edi # 4-byte Reload + movl %edi, 16(%ebx) + movl 20(%esp), %edi # 4-byte Reload + movl %edi, 20(%ebx) + movl 16(%esp), %edi # 4-byte Reload + movl %edi, 24(%ebx) + movl %edx, 28(%ebx) + movl %ecx, 32(%ebx) + movl %eax, 36(%ebx) + movl %ebp, 40(%ebx) + movl %esi, 44(%ebx) + je .LBB186_2 +# BB#1: # %carry + movl %esi, %edi + movl 72(%esp), %esi + movl 12(%esp), %ecx # 4-byte Reload + addl (%esi), %ecx + movl %ecx, (%ebx) + movl 24(%esp), %edx # 4-byte Reload + adcl 4(%esi), %edx + movl %edx, 4(%ebx) + movl 32(%esp), %ecx # 4-byte Reload + adcl 8(%esi), %ecx + movl 12(%esi), %eax + adcl 36(%esp), %eax # 4-byte Folded Reload + movl %ecx, 8(%ebx) + movl 16(%esi), %ecx + adcl 28(%esp), %ecx # 4-byte Folded Reload + movl %eax, 12(%ebx) + movl 20(%esi), %eax + adcl 20(%esp), %eax # 4-byte Folded Reload + movl %ecx, 16(%ebx) + movl 24(%esi), %ecx + adcl 16(%esp), %ecx # 4-byte Folded Reload + movl %eax, 20(%ebx) + movl 28(%esi), %eax + adcl 8(%esp), %eax # 4-byte Folded Reload + movl %ecx, 24(%ebx) + movl 32(%esi), %ecx + adcl 4(%esp), %ecx # 4-byte Folded Reload + movl %eax, 28(%ebx) + movl 36(%esi), %eax + adcl (%esp), %eax # 4-byte Folded Reload + movl %ecx, 32(%ebx) + movl %eax, 36(%ebx) + movl 40(%esi), %eax + adcl %ebp, %eax + movl %eax, 40(%ebx) + movl 44(%esi), %eax + adcl %edi, %eax + movl %eax, 44(%ebx) +.LBB186_2: # %nocarry + addl $40, %esp + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end186: + .size mcl_fp_sub12Lbmi2, .Lfunc_end186-mcl_fp_sub12Lbmi2 + + .globl mcl_fp_subNF12Lbmi2 + .align 16, 0x90 + .type mcl_fp_subNF12Lbmi2,@function +mcl_fp_subNF12Lbmi2: # @mcl_fp_subNF12Lbmi2 +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $72, %esp + movl 96(%esp), %ecx + movl (%ecx), %edx + movl 4(%ecx), %eax + movl 100(%esp), %edi + subl (%edi), %edx + movl %edx, 48(%esp) # 4-byte Spill + sbbl 4(%edi), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 44(%ecx), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 40(%ecx), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 36(%ecx), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 32(%ecx), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 28(%ecx), %ebp + movl 24(%ecx), %ebx + movl 20(%ecx), %esi + movl 16(%ecx), %edx + movl 12(%ecx), %eax + movl 8(%ecx), %ecx + sbbl 8(%edi), %ecx + movl %ecx, 24(%esp) # 4-byte Spill + sbbl 12(%edi), %eax + movl %eax, 28(%esp) # 4-byte Spill + sbbl 16(%edi), %edx + movl %edx, 32(%esp) # 4-byte Spill + sbbl 20(%edi), %esi + movl %esi, 36(%esp) # 4-byte Spill + sbbl 24(%edi), %ebx + movl %ebx, 40(%esp) # 4-byte Spill + sbbl 28(%edi), %ebp + movl %ebp, 44(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + sbbl 32(%edi), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + sbbl 36(%edi), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + sbbl 40(%edi), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 56(%esp), %ecx # 4-byte Reload + sbbl 44(%edi), %ecx + movl %ecx, 56(%esp) # 4-byte Spill + movl %ecx, %eax + sarl $31, %eax + movl %eax, %edx + addl %edx, %edx + movl %eax, %edi + adcl %edi, %edi + movl %eax, %ebp + adcl %ebp, %ebp + movl %eax, %esi + adcl %esi, %esi + shrl $31, %ecx + orl %edx, %ecx + movl 104(%esp), %edx + andl 12(%edx), %esi + movl %esi, 8(%esp) # 4-byte Spill + andl 8(%edx), %ebp + andl 4(%edx), %edi + andl (%edx), %ecx + movl 44(%edx), %esi + andl %eax, %esi + movl %esi, 20(%esp) # 4-byte Spill + movl 40(%edx), %esi + andl %eax, %esi + movl %esi, 16(%esp) # 4-byte Spill + movl 36(%edx), %esi + andl %eax, %esi + movl %esi, 12(%esp) # 4-byte Spill + movl 32(%edx), %esi + andl %eax, %esi + movl %esi, 4(%esp) # 4-byte Spill + movl 28(%edx), %esi + andl %eax, %esi + movl %esi, (%esp) # 4-byte Spill + movl 24(%edx), %ebx + andl %eax, %ebx + movl 20(%edx), %esi + andl %eax, %esi + andl 16(%edx), %eax + addl 48(%esp), %ecx # 4-byte Folded Reload + adcl 52(%esp), %edi # 4-byte Folded Reload + movl 92(%esp), %edx + movl %ecx, (%edx) + adcl 24(%esp), %ebp # 4-byte Folded Reload + movl %edi, 4(%edx) + movl 8(%esp), %ecx # 4-byte Reload + adcl 28(%esp), %ecx # 4-byte Folded Reload + movl %ebp, 8(%edx) + adcl 32(%esp), %eax # 4-byte Folded Reload + movl %ecx, 12(%edx) + adcl 36(%esp), %esi # 4-byte Folded Reload + movl %eax, 16(%edx) + adcl 40(%esp), %ebx # 4-byte Folded Reload + movl %esi, 20(%edx) + movl (%esp), %ecx # 4-byte Reload + adcl 44(%esp), %ecx # 4-byte Folded Reload + movl %ebx, 24(%edx) + movl 4(%esp), %eax # 4-byte Reload + adcl 60(%esp), %eax # 4-byte Folded Reload + movl %ecx, 28(%edx) + movl 12(%esp), %ecx # 4-byte Reload + adcl 64(%esp), %ecx # 4-byte Folded Reload + movl %eax, 32(%edx) + movl 16(%esp), %eax # 4-byte Reload + adcl 68(%esp), %eax # 4-byte Folded Reload + movl %ecx, 36(%edx) + movl %eax, 40(%edx) + movl 20(%esp), %eax # 4-byte Reload + adcl 56(%esp), %eax # 4-byte Folded Reload + movl %eax, 44(%edx) + addl $72, %esp + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end187: + .size mcl_fp_subNF12Lbmi2, .Lfunc_end187-mcl_fp_subNF12Lbmi2 + + .globl mcl_fpDbl_add12Lbmi2 + .align 16, 0x90 + .type mcl_fpDbl_add12Lbmi2,@function +mcl_fpDbl_add12Lbmi2: # @mcl_fpDbl_add12Lbmi2 +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $88, %esp + movl 116(%esp), %ecx + movl 112(%esp), %edi + movl 12(%edi), %esi + movl 16(%edi), %edx + movl 8(%ecx), %ebx + movl (%ecx), %ebp + addl (%edi), %ebp + movl 108(%esp), %eax + movl %ebp, (%eax) + movl 4(%ecx), %ebp + adcl 4(%edi), %ebp + adcl 8(%edi), %ebx + adcl 12(%ecx), %esi + adcl 16(%ecx), %edx + movl %ebp, 4(%eax) + movl 56(%ecx), %ebp + movl %ebx, 8(%eax) + movl 20(%ecx), %ebx + movl %esi, 12(%eax) + movl 20(%edi), %esi + adcl %ebx, %esi + movl 24(%ecx), %ebx + movl %edx, 16(%eax) + movl 24(%edi), %edx + adcl %ebx, %edx + movl 28(%ecx), %ebx + movl %esi, 20(%eax) + movl 28(%edi), %esi + adcl %ebx, %esi + movl 32(%ecx), %ebx + movl %edx, 24(%eax) + movl 32(%edi), %edx + adcl %ebx, %edx + movl 36(%ecx), %ebx + movl %esi, 28(%eax) + movl 36(%edi), %esi + adcl %ebx, %esi + movl 40(%ecx), %ebx + movl %edx, 32(%eax) + movl 40(%edi), %edx + adcl %ebx, %edx + movl 44(%ecx), %ebx + movl %esi, 36(%eax) + movl 44(%edi), %esi + adcl %ebx, %esi + movl 48(%ecx), %ebx + movl %edx, 40(%eax) + movl 48(%edi), %edx + adcl %ebx, %edx + movl %edx, 72(%esp) # 4-byte Spill + movl 52(%ecx), %ebx + movl %esi, 44(%eax) + movl 52(%edi), %eax + adcl %ebx, %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 56(%edi), %eax + adcl %ebp, %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 60(%ecx), %eax + movl 60(%edi), %edx + adcl %eax, %edx + movl %edx, 84(%esp) # 4-byte Spill + movl 64(%ecx), %eax + movl 64(%edi), %edx + adcl %eax, %edx + movl %edx, 60(%esp) # 4-byte Spill + movl 68(%ecx), %eax + movl 68(%edi), %edx + adcl %eax, %edx + movl %edx, 64(%esp) # 4-byte Spill + movl 72(%ecx), %eax + movl 72(%edi), %edx + adcl %eax, %edx + movl %edx, 68(%esp) # 4-byte Spill + movl 76(%ecx), %eax + movl 76(%edi), %edx + adcl %eax, %edx + movl 80(%ecx), %esi + movl 80(%edi), %eax + adcl %esi, %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 84(%ecx), %ebx + movl 84(%edi), %esi + adcl %ebx, %esi + movl %esi, 52(%esp) # 4-byte Spill + movl 88(%ecx), %ebp + movl 88(%edi), %ebx + adcl %ebp, %ebx + movl %ebx, 56(%esp) # 4-byte Spill + movl 92(%ecx), %ecx + movl 92(%edi), %edi + adcl %ecx, %edi + movl %edi, 44(%esp) # 4-byte Spill + sbbl %ecx, %ecx + andl $1, %ecx + movl 120(%esp), %ebp + movl 72(%esp), %edi # 4-byte Reload + subl (%ebp), %edi + movl %edi, 28(%esp) # 4-byte Spill + movl 76(%esp), %edi # 4-byte Reload + sbbl 4(%ebp), %edi + movl %edi, 24(%esp) # 4-byte Spill + movl 80(%esp), %edi # 4-byte Reload + sbbl 8(%ebp), %edi + movl %edi, 20(%esp) # 4-byte Spill + movl 84(%esp), %edi # 4-byte Reload + sbbl 12(%ebp), %edi + movl %edi, 16(%esp) # 4-byte Spill + movl 60(%esp), %edi # 4-byte Reload + sbbl 16(%ebp), %edi + movl %edi, 12(%esp) # 4-byte Spill + movl 64(%esp), %edi # 4-byte Reload + sbbl 20(%ebp), %edi + movl %edi, 8(%esp) # 4-byte Spill + movl 68(%esp), %edi # 4-byte Reload + sbbl 24(%ebp), %edi + movl %edi, 4(%esp) # 4-byte Spill + movl %edx, %edi + sbbl 28(%ebp), %edi + movl %edi, (%esp) # 4-byte Spill + sbbl 32(%ebp), %eax + movl %eax, 32(%esp) # 4-byte Spill + sbbl 36(%ebp), %esi + movl %esi, 36(%esp) # 4-byte Spill + movl %ebx, %eax + movl 44(%esp), %ebx # 4-byte Reload + sbbl 40(%ebp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl %ebx, %edi + sbbl 44(%ebp), %edi + sbbl $0, %ecx + andl $1, %ecx + jne .LBB188_2 +# BB#1: + movl %edi, %ebx +.LBB188_2: + testb %cl, %cl + movl 72(%esp), %ecx # 4-byte Reload + movl 68(%esp), %esi # 4-byte Reload + movl 64(%esp), %edi # 4-byte Reload + movl 60(%esp), %ebp # 4-byte Reload + jne .LBB188_4 +# BB#3: + movl (%esp), %edx # 4-byte Reload + movl 4(%esp), %esi # 4-byte Reload + movl 8(%esp), %edi # 4-byte Reload + movl 12(%esp), %ebp # 4-byte Reload + movl 16(%esp), %eax # 4-byte Reload + movl %eax, 84(%esp) # 4-byte Spill + movl 20(%esp), %eax # 4-byte Reload + movl %eax, 80(%esp) # 4-byte Spill + movl 24(%esp), %eax # 4-byte Reload + movl %eax, 76(%esp) # 4-byte Spill + movl 28(%esp), %ecx # 4-byte Reload +.LBB188_4: + movl 108(%esp), %eax + movl %ecx, 48(%eax) + movl 76(%esp), %ecx # 4-byte Reload + movl %ecx, 52(%eax) + movl 80(%esp), %ecx # 4-byte Reload + movl %ecx, 56(%eax) + movl 84(%esp), %ecx # 4-byte Reload + movl %ecx, 60(%eax) + movl %ebp, 64(%eax) + movl %edi, 68(%eax) + movl %esi, 72(%eax) + movl %edx, 76(%eax) + movl 56(%esp), %ecx # 4-byte Reload + movl 48(%esp), %edx # 4-byte Reload + jne .LBB188_6 +# BB#5: + movl 32(%esp), %edx # 4-byte Reload +.LBB188_6: + movl %edx, 80(%eax) + movl 52(%esp), %edx # 4-byte Reload + jne .LBB188_8 +# BB#7: + movl 36(%esp), %edx # 4-byte Reload +.LBB188_8: + movl %edx, 84(%eax) + jne .LBB188_10 +# BB#9: + movl 40(%esp), %ecx # 4-byte Reload +.LBB188_10: + movl %ecx, 88(%eax) + movl %ebx, 92(%eax) + addl $88, %esp + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end188: + .size mcl_fpDbl_add12Lbmi2, .Lfunc_end188-mcl_fpDbl_add12Lbmi2 + + .globl mcl_fpDbl_sub12Lbmi2 + .align 16, 0x90 + .type mcl_fpDbl_sub12Lbmi2,@function +mcl_fpDbl_sub12Lbmi2: # @mcl_fpDbl_sub12Lbmi2 +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $76, %esp + movl 100(%esp), %esi + movl (%esi), %eax + movl 4(%esi), %edx + movl 104(%esp), %ebx + subl (%ebx), %eax + sbbl 4(%ebx), %edx + movl 8(%esi), %edi + sbbl 8(%ebx), %edi + movl 96(%esp), %ecx + movl %eax, (%ecx) + movl 12(%esi), %eax + sbbl 12(%ebx), %eax + movl %edx, 4(%ecx) + movl 16(%esi), %edx + sbbl 16(%ebx), %edx + movl %edi, 8(%ecx) + movl 20(%ebx), %edi + movl %eax, 12(%ecx) + movl 20(%esi), %eax + sbbl %edi, %eax + movl 24(%ebx), %edi + movl %edx, 16(%ecx) + movl 24(%esi), %edx + sbbl %edi, %edx + movl 28(%ebx), %edi + movl %eax, 20(%ecx) + movl 28(%esi), %eax + sbbl %edi, %eax + movl 32(%ebx), %edi + movl %edx, 24(%ecx) + movl 32(%esi), %edx + sbbl %edi, %edx + movl 36(%ebx), %edi + movl %eax, 28(%ecx) + movl 36(%esi), %eax + sbbl %edi, %eax + movl 40(%ebx), %edi + movl %edx, 32(%ecx) + movl 40(%esi), %edx + sbbl %edi, %edx + movl 44(%ebx), %edi + movl %eax, 36(%ecx) + movl 44(%esi), %eax + sbbl %edi, %eax + movl 48(%ebx), %edi + movl %edx, 40(%ecx) + movl 48(%esi), %edx + sbbl %edi, %edx + movl %edx, 32(%esp) # 4-byte Spill + movl 52(%ebx), %edx + movl %eax, 44(%ecx) + movl 52(%esi), %eax + sbbl %edx, %eax + movl %eax, 24(%esp) # 4-byte Spill + movl 56(%ebx), %eax + movl 56(%esi), %edx + sbbl %eax, %edx + movl %edx, 28(%esp) # 4-byte Spill + movl 60(%ebx), %eax + movl 60(%esi), %edx + sbbl %eax, %edx + movl %edx, 40(%esp) # 4-byte Spill + movl 64(%ebx), %eax + movl 64(%esi), %edx + sbbl %eax, %edx + movl %edx, 44(%esp) # 4-byte Spill + movl 68(%ebx), %eax + movl 68(%esi), %edx + sbbl %eax, %edx + movl %edx, 48(%esp) # 4-byte Spill + movl 72(%ebx), %eax + movl 72(%esi), %edx + sbbl %eax, %edx + movl %edx, 52(%esp) # 4-byte Spill + movl 76(%ebx), %eax + movl 76(%esi), %edx + sbbl %eax, %edx + movl %edx, 56(%esp) # 4-byte Spill + movl 80(%ebx), %eax + movl 80(%esi), %edx + sbbl %eax, %edx + movl %edx, 60(%esp) # 4-byte Spill + movl 84(%ebx), %eax + movl 84(%esi), %edx + sbbl %eax, %edx + movl %edx, 64(%esp) # 4-byte Spill + movl 88(%ebx), %eax + movl 88(%esi), %edx + sbbl %eax, %edx + movl %edx, 68(%esp) # 4-byte Spill + movl 92(%ebx), %eax + movl 92(%esi), %edx + sbbl %eax, %edx + movl %edx, 72(%esp) # 4-byte Spill + movl $0, %eax + sbbl $0, %eax + andl $1, %eax + movl 108(%esp), %ebp + jne .LBB189_1 +# BB#2: + movl $0, 36(%esp) # 4-byte Folded Spill + jmp .LBB189_3 +.LBB189_1: + movl 44(%ebp), %edx + movl %edx, 36(%esp) # 4-byte Spill +.LBB189_3: + testb %al, %al + jne .LBB189_4 +# BB#5: + movl $0, 12(%esp) # 4-byte Folded Spill + movl $0, %esi + jmp .LBB189_6 +.LBB189_4: + movl (%ebp), %esi + movl 4(%ebp), %eax + movl %eax, 12(%esp) # 4-byte Spill +.LBB189_6: + jne .LBB189_7 +# BB#8: + movl $0, 20(%esp) # 4-byte Folded Spill + jmp .LBB189_9 +.LBB189_7: + movl 40(%ebp), %eax + movl %eax, 20(%esp) # 4-byte Spill +.LBB189_9: + jne .LBB189_10 +# BB#11: + movl $0, 16(%esp) # 4-byte Folded Spill + jmp .LBB189_12 +.LBB189_10: + movl 36(%ebp), %eax + movl %eax, 16(%esp) # 4-byte Spill +.LBB189_12: + jne .LBB189_13 +# BB#14: + movl $0, 8(%esp) # 4-byte Folded Spill + jmp .LBB189_15 +.LBB189_13: + movl 32(%ebp), %eax + movl %eax, 8(%esp) # 4-byte Spill +.LBB189_15: + jne .LBB189_16 +# BB#17: + movl $0, 4(%esp) # 4-byte Folded Spill + jmp .LBB189_18 +.LBB189_16: + movl 28(%ebp), %eax + movl %eax, 4(%esp) # 4-byte Spill +.LBB189_18: + jne .LBB189_19 +# BB#20: + movl $0, (%esp) # 4-byte Folded Spill + jmp .LBB189_21 +.LBB189_19: + movl 24(%ebp), %eax + movl %eax, (%esp) # 4-byte Spill +.LBB189_21: + jne .LBB189_22 +# BB#23: + movl $0, %ebx + jmp .LBB189_24 +.LBB189_22: + movl 20(%ebp), %ebx +.LBB189_24: + jne .LBB189_25 +# BB#26: + movl $0, %eax + jmp .LBB189_27 +.LBB189_25: + movl 16(%ebp), %eax +.LBB189_27: + jne .LBB189_28 +# BB#29: + movl %ebp, %edx + movl $0, %ebp + jmp .LBB189_30 +.LBB189_28: + movl %ebp, %edx + movl 12(%edx), %ebp +.LBB189_30: + jne .LBB189_31 +# BB#32: + xorl %edx, %edx + jmp .LBB189_33 +.LBB189_31: + movl 8(%edx), %edx +.LBB189_33: + addl 32(%esp), %esi # 4-byte Folded Reload + movl 12(%esp), %edi # 4-byte Reload + adcl 24(%esp), %edi # 4-byte Folded Reload + movl %esi, 48(%ecx) + adcl 28(%esp), %edx # 4-byte Folded Reload + movl %edi, 52(%ecx) + adcl 40(%esp), %ebp # 4-byte Folded Reload + movl %edx, 56(%ecx) + adcl 44(%esp), %eax # 4-byte Folded Reload + movl %ebp, 60(%ecx) + adcl 48(%esp), %ebx # 4-byte Folded Reload + movl %eax, 64(%ecx) + movl (%esp), %eax # 4-byte Reload + adcl 52(%esp), %eax # 4-byte Folded Reload + movl %ebx, 68(%ecx) + movl 4(%esp), %edx # 4-byte Reload + adcl 56(%esp), %edx # 4-byte Folded Reload + movl %eax, 72(%ecx) + movl 8(%esp), %eax # 4-byte Reload + adcl 60(%esp), %eax # 4-byte Folded Reload + movl %edx, 76(%ecx) + movl 16(%esp), %edx # 4-byte Reload + adcl 64(%esp), %edx # 4-byte Folded Reload + movl %eax, 80(%ecx) + movl 20(%esp), %eax # 4-byte Reload + adcl 68(%esp), %eax # 4-byte Folded Reload + movl %edx, 84(%ecx) + movl %eax, 88(%ecx) + movl 36(%esp), %eax # 4-byte Reload + adcl 72(%esp), %eax # 4-byte Folded Reload + movl %eax, 92(%ecx) + addl $76, %esp + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end189: + .size mcl_fpDbl_sub12Lbmi2, .Lfunc_end189-mcl_fpDbl_sub12Lbmi2 + + .align 16, 0x90 + .type .LmulPv416x32,@function +.LmulPv416x32: # @mulPv416x32 +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $44, %esp + movl %edx, %eax + movl 64(%esp), %ebx + movl %ebx, %edx + mulxl 4(%eax), %esi, %ebp + movl %ebx, %edx + mulxl (%eax), %edi, %edx + movl %edi, 40(%esp) # 4-byte Spill + addl %esi, %edx + movl %edx, 36(%esp) # 4-byte Spill + movl %ebx, %edx + mulxl 8(%eax), %edx, %esi + adcl %ebp, %edx + movl %edx, 32(%esp) # 4-byte Spill + movl %ebx, %edx + mulxl 12(%eax), %edx, %edi + adcl %esi, %edx + movl %edx, 28(%esp) # 4-byte Spill + movl %ebx, %edx + mulxl 16(%eax), %edx, %esi + adcl %edi, %edx + movl %edx, 24(%esp) # 4-byte Spill + movl %ebx, %edx + mulxl 20(%eax), %edx, %edi + adcl %esi, %edx + movl %edx, 20(%esp) # 4-byte Spill + movl %ebx, %edx + mulxl 24(%eax), %edx, %esi + adcl %edi, %edx + movl %edx, 16(%esp) # 4-byte Spill + movl %ebx, %edx + mulxl 28(%eax), %edx, %edi + adcl %esi, %edx + movl %edx, 12(%esp) # 4-byte Spill + movl %ebx, %edx + mulxl 32(%eax), %edx, %esi + adcl %edi, %edx + movl %edx, 8(%esp) # 4-byte Spill + movl %ebx, %edx + mulxl 36(%eax), %edi, %ebp + adcl %esi, %edi + movl %ebx, %edx + mulxl 40(%eax), %esi, %edx + movl %edx, (%esp) # 4-byte Spill + adcl %ebp, %esi + movl %ebx, %edx + mulxl 44(%eax), %edx, %ebp + movl %ebp, 4(%esp) # 4-byte Spill + adcl (%esp), %edx # 4-byte Folded Reload + movl 40(%esp), %ebp # 4-byte Reload + movl %ebp, (%ecx) + movl 36(%esp), %ebp # 4-byte Reload + movl %ebp, 4(%ecx) + movl 32(%esp), %ebp # 4-byte Reload + movl %ebp, 8(%ecx) + movl 28(%esp), %ebp # 4-byte Reload + movl %ebp, 12(%ecx) + movl 24(%esp), %ebp # 4-byte Reload + movl %ebp, 16(%ecx) + movl 20(%esp), %ebp # 4-byte Reload + movl %ebp, 20(%ecx) + movl 16(%esp), %ebp # 4-byte Reload + movl %ebp, 24(%ecx) + movl 12(%esp), %ebp # 4-byte Reload + movl %ebp, 28(%ecx) + movl 8(%esp), %ebp # 4-byte Reload + movl %ebp, 32(%ecx) + movl %edi, 36(%ecx) + movl %esi, 40(%ecx) + movl %edx, 44(%ecx) + movl %ebx, %edx + mulxl 48(%eax), %eax, %edx + adcl 4(%esp), %eax # 4-byte Folded Reload + movl %eax, 48(%ecx) + adcl $0, %edx + movl %edx, 52(%ecx) + movl %ecx, %eax + addl $44, %esp + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end190: + .size .LmulPv416x32, .Lfunc_end190-.LmulPv416x32 + + .globl mcl_fp_mulUnitPre13Lbmi2 + .align 16, 0x90 + .type mcl_fp_mulUnitPre13Lbmi2,@function +mcl_fp_mulUnitPre13Lbmi2: # @mcl_fp_mulUnitPre13Lbmi2 +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $108, %esp + calll .L191$pb +.L191$pb: + popl %ebx +.Ltmp32: + addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp32-.L191$pb), %ebx + movl 136(%esp), %eax + movl %eax, (%esp) + leal 48(%esp), %ecx + movl 132(%esp), %edx + calll .LmulPv416x32 + movl 100(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 96(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 92(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 88(%esp), %eax + movl %eax, 32(%esp) # 4-byte Spill + movl 84(%esp), %eax + movl %eax, 28(%esp) # 4-byte Spill + movl 80(%esp), %eax + movl %eax, 24(%esp) # 4-byte Spill + movl 76(%esp), %eax + movl %eax, 20(%esp) # 4-byte Spill + movl 72(%esp), %eax + movl %eax, 16(%esp) # 4-byte Spill + movl 68(%esp), %ebp + movl 64(%esp), %ebx + movl 60(%esp), %edi + movl 56(%esp), %esi + movl 48(%esp), %edx + movl 52(%esp), %ecx + movl 128(%esp), %eax + movl %edx, (%eax) + movl %ecx, 4(%eax) + movl %esi, 8(%eax) + movl %edi, 12(%eax) + movl %ebx, 16(%eax) + movl %ebp, 20(%eax) + movl 16(%esp), %ecx # 4-byte Reload + movl %ecx, 24(%eax) + movl 20(%esp), %ecx # 4-byte Reload + movl %ecx, 28(%eax) + movl 24(%esp), %ecx # 4-byte Reload + movl %ecx, 32(%eax) + movl 28(%esp), %ecx # 4-byte Reload + movl %ecx, 36(%eax) + movl 32(%esp), %ecx # 4-byte Reload + movl %ecx, 40(%eax) + movl 36(%esp), %ecx # 4-byte Reload + movl %ecx, 44(%eax) + movl 40(%esp), %ecx # 4-byte Reload + movl %ecx, 48(%eax) + movl 44(%esp), %ecx # 4-byte Reload + movl %ecx, 52(%eax) + addl $108, %esp + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end191: + .size mcl_fp_mulUnitPre13Lbmi2, .Lfunc_end191-mcl_fp_mulUnitPre13Lbmi2 + + .globl mcl_fpDbl_mulPre13Lbmi2 + .align 16, 0x90 + .type mcl_fpDbl_mulPre13Lbmi2,@function +mcl_fpDbl_mulPre13Lbmi2: # @mcl_fpDbl_mulPre13Lbmi2 +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $844, %esp # imm = 0x34C + calll .L192$pb +.L192$pb: + popl %edi +.Ltmp33: + addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp33-.L192$pb), %edi + movl %edi, 108(%esp) # 4-byte Spill + movl 872(%esp), %eax + movl (%eax), %eax + movl %eax, (%esp) + leal 784(%esp), %ecx + movl 868(%esp), %edx + movl %edx, %esi + movl %edi, %ebx + calll .LmulPv416x32 + movl 836(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 832(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 828(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 824(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 820(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 816(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 812(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 808(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 804(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 800(%esp), %eax + movl %eax, 32(%esp) # 4-byte Spill + movl 796(%esp), %eax + movl %eax, 28(%esp) # 4-byte Spill + movl 792(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 784(%esp), %eax + movl 788(%esp), %ebp + movl 864(%esp), %ecx + movl %eax, (%ecx) + movl 872(%esp), %eax + movl 4(%eax), %eax + movl %eax, (%esp) + leal 728(%esp), %ecx + movl %esi, %edx + movl %edi, %ebx + calll .LmulPv416x32 + addl 728(%esp), %ebp + movl %ebp, 24(%esp) # 4-byte Spill + movl 780(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 776(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 772(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 768(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 764(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 760(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 756(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 752(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 748(%esp), %edi + movl 744(%esp), %esi + movl 740(%esp), %edx + movl 732(%esp), %eax + movl 736(%esp), %ecx + movl 864(%esp), %ebp + movl 24(%esp), %ebx # 4-byte Reload + movl %ebx, 4(%ebp) + adcl 60(%esp), %eax # 4-byte Folded Reload + movl %eax, 60(%esp) # 4-byte Spill + adcl 28(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 24(%esp) # 4-byte Spill + adcl 32(%esp), %edx # 4-byte Folded Reload + movl %edx, 28(%esp) # 4-byte Spill + adcl 36(%esp), %esi # 4-byte Folded Reload + movl %esi, 32(%esp) # 4-byte Spill + adcl 40(%esp), %edi # 4-byte Folded Reload + movl %edi, 36(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 56(%esp), %eax # 4-byte Folded Reload + movl %eax, 52(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 68(%esp), %eax # 4-byte Folded Reload + movl %eax, 48(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 76(%esp), %eax # 4-byte Folded Reload + movl %eax, 64(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 84(%esp), %eax # 4-byte Folded Reload + movl %eax, 72(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 92(%esp), %eax # 4-byte Folded Reload + movl %eax, 80(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 100(%esp), %eax # 4-byte Folded Reload + movl %eax, 88(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 104(%esp), %eax # 4-byte Folded Reload + movl %eax, 96(%esp) # 4-byte Spill + adcl $0, 44(%esp) # 4-byte Folded Spill + movl 872(%esp), %eax + movl 8(%eax), %eax + movl %eax, (%esp) + leal 672(%esp), %ecx + movl 868(%esp), %edx + movl 108(%esp), %ebx # 4-byte Reload + calll .LmulPv416x32 + movl 60(%esp), %eax # 4-byte Reload + addl 672(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 724(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 720(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 716(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 712(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 708(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 704(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 700(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 696(%esp), %ebx + movl 692(%esp), %edi + movl 688(%esp), %esi + movl 684(%esp), %edx + movl 676(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 680(%esp), %ecx + movl 864(%esp), %eax + movl 60(%esp), %ebp # 4-byte Reload + movl %ebp, 8(%eax) + movl 24(%esp), %eax # 4-byte Reload + adcl %eax, 104(%esp) # 4-byte Folded Spill + adcl 28(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 24(%esp) # 4-byte Spill + adcl 32(%esp), %edx # 4-byte Folded Reload + movl %edx, 28(%esp) # 4-byte Spill + adcl 36(%esp), %esi # 4-byte Folded Reload + movl %esi, 32(%esp) # 4-byte Spill + adcl 52(%esp), %edi # 4-byte Folded Reload + movl %edi, 60(%esp) # 4-byte Spill + adcl 48(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 48(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 64(%esp), %eax # 4-byte Folded Reload + movl %eax, 56(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 72(%esp), %eax # 4-byte Folded Reload + movl %eax, 68(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 80(%esp), %eax # 4-byte Folded Reload + movl %eax, 76(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 88(%esp), %eax # 4-byte Folded Reload + movl %eax, 84(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 96(%esp), %eax # 4-byte Folded Reload + movl %eax, 92(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 44(%esp), %eax # 4-byte Folded Reload + movl %eax, 100(%esp) # 4-byte Spill + adcl $0, 40(%esp) # 4-byte Folded Spill + movl 872(%esp), %eax + movl 12(%eax), %eax + movl %eax, (%esp) + leal 616(%esp), %ecx + movl 868(%esp), %edx + movl 108(%esp), %ebx # 4-byte Reload + calll .LmulPv416x32 + movl 104(%esp), %eax # 4-byte Reload + addl 616(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 668(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 664(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 660(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 656(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 652(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 648(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 644(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 640(%esp), %ebx + movl 636(%esp), %edi + movl 632(%esp), %esi + movl 628(%esp), %edx + movl 620(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 624(%esp), %ecx + movl 864(%esp), %eax + movl 104(%esp), %ebp # 4-byte Reload + movl %ebp, 12(%eax) + movl 52(%esp), %ebp # 4-byte Reload + adcl 24(%esp), %ebp # 4-byte Folded Reload + adcl 28(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 16(%esp) # 4-byte Spill + adcl 32(%esp), %edx # 4-byte Folded Reload + movl %edx, 20(%esp) # 4-byte Spill + adcl 60(%esp), %esi # 4-byte Folded Reload + movl %esi, 24(%esp) # 4-byte Spill + adcl 48(%esp), %edi # 4-byte Folded Reload + movl %edi, 28(%esp) # 4-byte Spill + adcl 56(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 56(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 68(%esp), %eax # 4-byte Folded Reload + movl %eax, 64(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl %eax, 44(%esp) # 4-byte Folded Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 84(%esp), %eax # 4-byte Folded Reload + movl %eax, 72(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 92(%esp), %eax # 4-byte Folded Reload + movl %eax, 80(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 100(%esp), %eax # 4-byte Folded Reload + movl %eax, 88(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 40(%esp), %eax # 4-byte Folded Reload + movl %eax, 96(%esp) # 4-byte Spill + adcl $0, 36(%esp) # 4-byte Folded Spill + movl 872(%esp), %eax + movl 16(%eax), %eax + movl %eax, (%esp) + leal 560(%esp), %ecx + movl 868(%esp), %edx + movl 108(%esp), %ebx # 4-byte Reload + calll .LmulPv416x32 + addl 560(%esp), %ebp + movl %ebp, 52(%esp) # 4-byte Spill + movl 612(%esp), %eax + movl %eax, 32(%esp) # 4-byte Spill + movl 608(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 604(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 600(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 596(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 592(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 588(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 584(%esp), %ebx + movl 580(%esp), %edi + movl 576(%esp), %esi + movl 572(%esp), %edx + movl 564(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 568(%esp), %ecx + movl 864(%esp), %eax + movl 52(%esp), %ebp # 4-byte Reload + movl %ebp, 16(%eax) + movl 16(%esp), %eax # 4-byte Reload + adcl %eax, 104(%esp) # 4-byte Folded Spill + adcl 20(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 16(%esp) # 4-byte Spill + adcl 24(%esp), %edx # 4-byte Folded Reload + movl %edx, 20(%esp) # 4-byte Spill + adcl 28(%esp), %esi # 4-byte Folded Reload + movl %esi, 24(%esp) # 4-byte Spill + adcl 56(%esp), %edi # 4-byte Folded Reload + movl %edi, 28(%esp) # 4-byte Spill + adcl 64(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 64(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 44(%esp), %eax # 4-byte Folded Reload + movl %eax, 60(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl %eax, 40(%esp) # 4-byte Folded Spill + movl 80(%esp), %eax # 4-byte Reload + adcl %eax, 48(%esp) # 4-byte Folded Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 88(%esp), %eax # 4-byte Folded Reload + movl %eax, 76(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 96(%esp), %eax # 4-byte Folded Reload + movl %eax, 84(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 36(%esp), %eax # 4-byte Folded Reload + movl %eax, 92(%esp) # 4-byte Spill + adcl $0, 32(%esp) # 4-byte Folded Spill + movl 872(%esp), %eax + movl 20(%eax), %eax + movl %eax, (%esp) + leal 504(%esp), %ecx + movl 868(%esp), %edx + movl 108(%esp), %ebx # 4-byte Reload + calll .LmulPv416x32 + movl 104(%esp), %eax # 4-byte Reload + addl 504(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 556(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 552(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 548(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 544(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 540(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 536(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 532(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 528(%esp), %ebx + movl 524(%esp), %edi + movl 520(%esp), %esi + movl 516(%esp), %edx + movl 508(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 512(%esp), %ecx + movl 864(%esp), %eax + movl 104(%esp), %ebp # 4-byte Reload + movl %ebp, 20(%eax) + movl 56(%esp), %ebp # 4-byte Reload + adcl 16(%esp), %ebp # 4-byte Folded Reload + adcl 20(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 20(%esp) # 4-byte Spill + adcl 24(%esp), %edx # 4-byte Folded Reload + movl %edx, 24(%esp) # 4-byte Spill + adcl 28(%esp), %esi # 4-byte Folded Reload + movl %esi, 28(%esp) # 4-byte Spill + adcl 64(%esp), %edi # 4-byte Folded Reload + movl %edi, 36(%esp) # 4-byte Spill + adcl 60(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 60(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 40(%esp), %eax # 4-byte Folded Reload + movl %eax, 68(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 48(%esp), %eax # 4-byte Folded Reload + movl %eax, 100(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl %eax, 52(%esp) # 4-byte Folded Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 84(%esp), %eax # 4-byte Folded Reload + movl %eax, 72(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 92(%esp), %eax # 4-byte Folded Reload + movl %eax, 80(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 32(%esp), %eax # 4-byte Folded Reload + movl %eax, 88(%esp) # 4-byte Spill + adcl $0, 44(%esp) # 4-byte Folded Spill + movl 872(%esp), %eax + movl 24(%eax), %eax + movl %eax, (%esp) + leal 448(%esp), %ecx + movl 868(%esp), %edx + movl 108(%esp), %ebx # 4-byte Reload + calll .LmulPv416x32 + addl 448(%esp), %ebp + movl %ebp, 56(%esp) # 4-byte Spill + movl 500(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 496(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 492(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 488(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 484(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 480(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 476(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 472(%esp), %ebp + movl 468(%esp), %edi + movl 464(%esp), %esi + movl 460(%esp), %edx + movl 452(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 456(%esp), %ecx + movl 864(%esp), %eax + movl 56(%esp), %ebx # 4-byte Reload + movl %ebx, 24(%eax) + movl 20(%esp), %eax # 4-byte Reload + adcl %eax, 104(%esp) # 4-byte Folded Spill + adcl 24(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 24(%esp) # 4-byte Spill + adcl 28(%esp), %edx # 4-byte Folded Reload + movl %edx, 28(%esp) # 4-byte Spill + adcl 36(%esp), %esi # 4-byte Folded Reload + movl %esi, 32(%esp) # 4-byte Spill + adcl 60(%esp), %edi # 4-byte Folded Reload + movl %edi, 36(%esp) # 4-byte Spill + adcl 68(%esp), %ebp # 4-byte Folded Reload + movl %ebp, 20(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 100(%esp), %eax # 4-byte Folded Reload + movl %eax, 64(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 52(%esp), %eax # 4-byte Folded Reload + movl %eax, 96(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl %eax, 48(%esp) # 4-byte Folded Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 80(%esp), %eax # 4-byte Folded Reload + movl %eax, 76(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 88(%esp), %eax # 4-byte Folded Reload + movl %eax, 84(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 44(%esp), %eax # 4-byte Folded Reload + movl %eax, 92(%esp) # 4-byte Spill + adcl $0, 40(%esp) # 4-byte Folded Spill + movl 872(%esp), %eax + movl 28(%eax), %eax + movl %eax, (%esp) + leal 392(%esp), %ecx + movl 868(%esp), %edx + movl 108(%esp), %ebx # 4-byte Reload + calll .LmulPv416x32 + movl 104(%esp), %eax # 4-byte Reload + addl 392(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 444(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 440(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 436(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 432(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 428(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 424(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 420(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 416(%esp), %ebx + movl 412(%esp), %edi + movl 408(%esp), %esi + movl 404(%esp), %edx + movl 396(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 400(%esp), %ecx + movl 864(%esp), %eax + movl 104(%esp), %ebp # 4-byte Reload + movl %ebp, 28(%eax) + movl 60(%esp), %ebp # 4-byte Reload + adcl 24(%esp), %ebp # 4-byte Folded Reload + adcl 28(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 24(%esp) # 4-byte Spill + adcl 32(%esp), %edx # 4-byte Folded Reload + movl %edx, 28(%esp) # 4-byte Spill + adcl 36(%esp), %esi # 4-byte Folded Reload + movl %esi, 32(%esp) # 4-byte Spill + adcl 20(%esp), %edi # 4-byte Folded Reload + movl %edi, 36(%esp) # 4-byte Spill + adcl 64(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 20(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 96(%esp), %eax # 4-byte Folded Reload + movl %eax, 68(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 48(%esp), %eax # 4-byte Folded Reload + movl %eax, 100(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl %eax, 56(%esp) # 4-byte Folded Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 84(%esp), %eax # 4-byte Folded Reload + movl %eax, 72(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 92(%esp), %eax # 4-byte Folded Reload + movl %eax, 80(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 40(%esp), %eax # 4-byte Folded Reload + movl %eax, 88(%esp) # 4-byte Spill + adcl $0, 44(%esp) # 4-byte Folded Spill + movl 872(%esp), %eax + movl 32(%eax), %eax + movl %eax, (%esp) + leal 336(%esp), %ecx + movl 868(%esp), %edx + movl 108(%esp), %ebx # 4-byte Reload + calll .LmulPv416x32 + addl 336(%esp), %ebp + movl %ebp, 60(%esp) # 4-byte Spill + movl 388(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 384(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 380(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 376(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 372(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 368(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 364(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 360(%esp), %ebp + movl 356(%esp), %edi + movl 352(%esp), %esi + movl 348(%esp), %edx + movl 340(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 344(%esp), %ecx + movl 864(%esp), %eax + movl 60(%esp), %ebx # 4-byte Reload + movl %ebx, 32(%eax) + movl 24(%esp), %eax # 4-byte Reload + adcl %eax, 104(%esp) # 4-byte Folded Spill + adcl 28(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 24(%esp) # 4-byte Spill + adcl 32(%esp), %edx # 4-byte Folded Reload + movl %edx, 28(%esp) # 4-byte Spill + adcl 36(%esp), %esi # 4-byte Folded Reload + movl %esi, 32(%esp) # 4-byte Spill + adcl 20(%esp), %edi # 4-byte Folded Reload + movl %edi, 60(%esp) # 4-byte Spill + adcl 68(%esp), %ebp # 4-byte Folded Reload + movl %ebp, 16(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 100(%esp), %eax # 4-byte Folded Reload + movl %eax, 64(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 56(%esp), %eax # 4-byte Folded Reload + movl %eax, 96(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl %eax, 52(%esp) # 4-byte Folded Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 80(%esp), %eax # 4-byte Folded Reload + movl %eax, 76(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 88(%esp), %eax # 4-byte Folded Reload + movl %eax, 84(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 44(%esp), %eax # 4-byte Folded Reload + movl %eax, 92(%esp) # 4-byte Spill + adcl $0, 40(%esp) # 4-byte Folded Spill + movl 872(%esp), %eax + movl 36(%eax), %eax + movl %eax, (%esp) + leal 280(%esp), %ecx + movl 868(%esp), %edx + movl 108(%esp), %ebx # 4-byte Reload + calll .LmulPv416x32 + movl 104(%esp), %eax # 4-byte Reload + addl 280(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 332(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 328(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 324(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 320(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 316(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 312(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 308(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 304(%esp), %ebx + movl 300(%esp), %edi + movl 296(%esp), %esi + movl 292(%esp), %edx + movl 284(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 288(%esp), %ecx + movl 864(%esp), %eax + movl 104(%esp), %ebp # 4-byte Reload + movl %ebp, 36(%eax) + movl 56(%esp), %ebp # 4-byte Reload + adcl 24(%esp), %ebp # 4-byte Folded Reload + adcl 28(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 20(%esp) # 4-byte Spill + adcl 32(%esp), %edx # 4-byte Folded Reload + movl %edx, 24(%esp) # 4-byte Spill + adcl 60(%esp), %esi # 4-byte Folded Reload + movl %esi, 28(%esp) # 4-byte Spill + adcl 16(%esp), %edi # 4-byte Folded Reload + movl %edi, 32(%esp) # 4-byte Spill + adcl 64(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 16(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 96(%esp), %eax # 4-byte Folded Reload + movl %eax, 68(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 52(%esp), %eax # 4-byte Folded Reload + movl %eax, 100(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl %eax, 48(%esp) # 4-byte Folded Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 84(%esp), %eax # 4-byte Folded Reload + movl %eax, 72(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 92(%esp), %eax # 4-byte Folded Reload + movl %eax, 80(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 40(%esp), %eax # 4-byte Folded Reload + movl %eax, 88(%esp) # 4-byte Spill + adcl $0, 36(%esp) # 4-byte Folded Spill + movl 872(%esp), %eax + movl 40(%eax), %eax + movl %eax, (%esp) + leal 224(%esp), %ecx + movl 868(%esp), %edx + movl 108(%esp), %ebx # 4-byte Reload + calll .LmulPv416x32 + addl 224(%esp), %ebp + movl %ebp, 56(%esp) # 4-byte Spill + movl 276(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 272(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 268(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 264(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 260(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 256(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 252(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 248(%esp), %ebx + movl 244(%esp), %edi + movl 240(%esp), %esi + movl 236(%esp), %edx + movl 228(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 232(%esp), %ecx + movl 864(%esp), %eax + movl 56(%esp), %ebp # 4-byte Reload + movl %ebp, 40(%eax) + movl 60(%esp), %eax # 4-byte Reload + adcl 20(%esp), %eax # 4-byte Folded Reload + movl %eax, 60(%esp) # 4-byte Spill + adcl 24(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 24(%esp) # 4-byte Spill + adcl 28(%esp), %edx # 4-byte Folded Reload + movl %edx, 28(%esp) # 4-byte Spill + adcl 32(%esp), %esi # 4-byte Folded Reload + movl %esi, 32(%esp) # 4-byte Spill + adcl 16(%esp), %edi # 4-byte Folded Reload + movl %edi, 40(%esp) # 4-byte Spill + adcl 68(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 20(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 100(%esp), %eax # 4-byte Folded Reload + movl %eax, 64(%esp) # 4-byte Spill + movl 104(%esp), %eax # 4-byte Reload + adcl 48(%esp), %eax # 4-byte Folded Reload + movl %eax, 104(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 72(%esp), %eax # 4-byte Folded Reload + movl %eax, 76(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 80(%esp), %eax # 4-byte Folded Reload + movl %eax, 84(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 88(%esp), %eax # 4-byte Folded Reload + movl %eax, 92(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 36(%esp), %eax # 4-byte Folded Reload + movl %eax, 96(%esp) # 4-byte Spill + adcl $0, 44(%esp) # 4-byte Folded Spill + movl 872(%esp), %edi + movl 44(%edi), %eax + movl %eax, (%esp) + leal 168(%esp), %ecx + movl 868(%esp), %eax + movl %eax, %edx + movl 108(%esp), %ebx # 4-byte Reload + calll .LmulPv416x32 + movl 60(%esp), %esi # 4-byte Reload + addl 168(%esp), %esi + movl 220(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 216(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 212(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 208(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 204(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 200(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 196(%esp), %ebp + movl 192(%esp), %ebx + movl 188(%esp), %edi + movl 184(%esp), %edx + movl 180(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 172(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 176(%esp), %ecx + movl 864(%esp), %eax + movl %esi, 44(%eax) + movl 68(%esp), %esi # 4-byte Reload + adcl 24(%esp), %esi # 4-byte Folded Reload + adcl 28(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 68(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 32(%esp), %eax # 4-byte Folded Reload + movl %eax, 60(%esp) # 4-byte Spill + adcl 40(%esp), %edx # 4-byte Folded Reload + movl %edx, 40(%esp) # 4-byte Spill + adcl 20(%esp), %edi # 4-byte Folded Reload + movl %edi, 48(%esp) # 4-byte Spill + adcl 64(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 64(%esp) # 4-byte Spill + adcl 104(%esp), %ebp # 4-byte Folded Reload + movl %ebp, 104(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl %eax, 56(%esp) # 4-byte Folded Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 84(%esp), %eax # 4-byte Folded Reload + movl %eax, 72(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 92(%esp), %eax # 4-byte Folded Reload + movl %eax, 88(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 96(%esp), %eax # 4-byte Folded Reload + movl %eax, 80(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 44(%esp), %eax # 4-byte Folded Reload + movl %eax, 100(%esp) # 4-byte Spill + adcl $0, 52(%esp) # 4-byte Folded Spill + movl 872(%esp), %eax + movl 48(%eax), %eax + movl %eax, (%esp) + leal 112(%esp), %ecx + movl 868(%esp), %edx + movl 108(%esp), %ebx # 4-byte Reload + calll .LmulPv416x32 + addl 112(%esp), %esi + movl %esi, %ebp + movl 68(%esp), %eax # 4-byte Reload + adcl 116(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 60(%esp), %edi # 4-byte Reload + adcl 120(%esp), %edi + movl 164(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 160(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 156(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 152(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 148(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 144(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 140(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 136(%esp), %ebx + movl 132(%esp), %esi + movl 128(%esp), %edx + movl 124(%esp), %ecx + movl 864(%esp), %eax + movl %ebp, 48(%eax) + movl 68(%esp), %ebp # 4-byte Reload + movl %ebp, 52(%eax) + adcl 40(%esp), %ecx # 4-byte Folded Reload + movl %edi, 56(%eax) + adcl 48(%esp), %edx # 4-byte Folded Reload + movl %ecx, 60(%eax) + adcl 64(%esp), %esi # 4-byte Folded Reload + movl %edx, 64(%eax) + adcl 104(%esp), %ebx # 4-byte Folded Reload + movl %esi, 68(%eax) + movl 44(%esp), %ecx # 4-byte Reload + adcl 56(%esp), %ecx # 4-byte Folded Reload + movl %ebx, 72(%eax) + movl 60(%esp), %edx # 4-byte Reload + adcl 72(%esp), %edx # 4-byte Folded Reload + movl %ecx, 76(%eax) + movl 76(%esp), %ecx # 4-byte Reload + adcl 88(%esp), %ecx # 4-byte Folded Reload + movl %edx, 80(%eax) + movl 84(%esp), %edx # 4-byte Reload + adcl 80(%esp), %edx # 4-byte Folded Reload + movl %ecx, 84(%eax) + movl 92(%esp), %ecx # 4-byte Reload + adcl 100(%esp), %ecx # 4-byte Folded Reload + movl %edx, 88(%eax) + movl %ecx, 92(%eax) + movl 96(%esp), %ecx # 4-byte Reload + adcl 52(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 96(%eax) + movl 108(%esp), %ecx # 4-byte Reload + adcl $0, %ecx + movl %ecx, 100(%eax) + addl $844, %esp # imm = 0x34C + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end192: + .size mcl_fpDbl_mulPre13Lbmi2, .Lfunc_end192-mcl_fpDbl_mulPre13Lbmi2 + + .globl mcl_fpDbl_sqrPre13Lbmi2 + .align 16, 0x90 + .type mcl_fpDbl_sqrPre13Lbmi2,@function +mcl_fpDbl_sqrPre13Lbmi2: # @mcl_fpDbl_sqrPre13Lbmi2 +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $844, %esp # imm = 0x34C + calll .L193$pb +.L193$pb: + popl %ebx +.Ltmp34: + addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp34-.L193$pb), %ebx + movl %ebx, 108(%esp) # 4-byte Spill + movl 868(%esp), %edx + movl (%edx), %eax + movl %eax, (%esp) + leal 784(%esp), %ecx + movl %edx, %edi + movl %ebx, %esi + calll .LmulPv416x32 + movl 836(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 832(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 828(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 824(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 820(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 816(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 812(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 808(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 804(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 800(%esp), %eax + movl %eax, 32(%esp) # 4-byte Spill + movl 796(%esp), %eax + movl %eax, 28(%esp) # 4-byte Spill + movl 792(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 784(%esp), %eax + movl 788(%esp), %ebp + movl 864(%esp), %ecx + movl %eax, (%ecx) + movl %edi, %edx + movl 4(%edx), %eax + movl %eax, (%esp) + leal 728(%esp), %ecx + movl %esi, %ebx + calll .LmulPv416x32 + addl 728(%esp), %ebp + movl %ebp, 24(%esp) # 4-byte Spill + movl 780(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 776(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 772(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 768(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 764(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 760(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 756(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 752(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 748(%esp), %edi + movl 744(%esp), %esi + movl 740(%esp), %edx + movl 732(%esp), %eax + movl 736(%esp), %ecx + movl 864(%esp), %ebp + movl 24(%esp), %ebx # 4-byte Reload + movl %ebx, 4(%ebp) + adcl 60(%esp), %eax # 4-byte Folded Reload + movl %eax, 60(%esp) # 4-byte Spill + adcl 28(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 24(%esp) # 4-byte Spill + adcl 32(%esp), %edx # 4-byte Folded Reload + movl %edx, 28(%esp) # 4-byte Spill + adcl 36(%esp), %esi # 4-byte Folded Reload + movl %esi, 32(%esp) # 4-byte Spill + adcl 40(%esp), %edi # 4-byte Folded Reload + movl %edi, 36(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 56(%esp), %eax # 4-byte Folded Reload + movl %eax, 52(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 68(%esp), %eax # 4-byte Folded Reload + movl %eax, 48(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 76(%esp), %eax # 4-byte Folded Reload + movl %eax, 64(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 84(%esp), %eax # 4-byte Folded Reload + movl %eax, 72(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 92(%esp), %eax # 4-byte Folded Reload + movl %eax, 80(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 100(%esp), %eax # 4-byte Folded Reload + movl %eax, 88(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 104(%esp), %eax # 4-byte Folded Reload + movl %eax, 96(%esp) # 4-byte Spill + adcl $0, 44(%esp) # 4-byte Folded Spill + movl 868(%esp), %edx + movl 8(%edx), %eax + movl %eax, (%esp) + leal 672(%esp), %ecx + movl 108(%esp), %ebx # 4-byte Reload + calll .LmulPv416x32 + movl 60(%esp), %eax # 4-byte Reload + addl 672(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 724(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 720(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 716(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 712(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 708(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 704(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 700(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 696(%esp), %ebx + movl 692(%esp), %edi + movl 688(%esp), %esi + movl 684(%esp), %edx + movl 676(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 680(%esp), %ecx + movl 864(%esp), %eax + movl 60(%esp), %ebp # 4-byte Reload + movl %ebp, 8(%eax) + movl 24(%esp), %eax # 4-byte Reload + adcl %eax, 104(%esp) # 4-byte Folded Spill + adcl 28(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 24(%esp) # 4-byte Spill + adcl 32(%esp), %edx # 4-byte Folded Reload + movl %edx, 28(%esp) # 4-byte Spill + adcl 36(%esp), %esi # 4-byte Folded Reload + movl %esi, 32(%esp) # 4-byte Spill + adcl 52(%esp), %edi # 4-byte Folded Reload + movl %edi, 60(%esp) # 4-byte Spill + adcl 48(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 48(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 64(%esp), %eax # 4-byte Folded Reload + movl %eax, 56(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 72(%esp), %eax # 4-byte Folded Reload + movl %eax, 68(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 80(%esp), %eax # 4-byte Folded Reload + movl %eax, 76(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 88(%esp), %eax # 4-byte Folded Reload + movl %eax, 84(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 96(%esp), %eax # 4-byte Folded Reload + movl %eax, 92(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 44(%esp), %eax # 4-byte Folded Reload + movl %eax, 100(%esp) # 4-byte Spill + adcl $0, 40(%esp) # 4-byte Folded Spill + movl 868(%esp), %edx + movl 12(%edx), %eax + movl %eax, (%esp) + leal 616(%esp), %ecx + movl 108(%esp), %ebx # 4-byte Reload + calll .LmulPv416x32 + movl 104(%esp), %eax # 4-byte Reload + addl 616(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 668(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 664(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 660(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 656(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 652(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 648(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 644(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 640(%esp), %ebx + movl 636(%esp), %edi + movl 632(%esp), %esi + movl 628(%esp), %edx + movl 620(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 624(%esp), %ecx + movl 864(%esp), %eax + movl 104(%esp), %ebp # 4-byte Reload + movl %ebp, 12(%eax) + movl 52(%esp), %ebp # 4-byte Reload + adcl 24(%esp), %ebp # 4-byte Folded Reload + adcl 28(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 16(%esp) # 4-byte Spill + adcl 32(%esp), %edx # 4-byte Folded Reload + movl %edx, 20(%esp) # 4-byte Spill + adcl 60(%esp), %esi # 4-byte Folded Reload + movl %esi, 24(%esp) # 4-byte Spill + adcl 48(%esp), %edi # 4-byte Folded Reload + movl %edi, 28(%esp) # 4-byte Spill + adcl 56(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 56(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 68(%esp), %eax # 4-byte Folded Reload + movl %eax, 64(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl %eax, 44(%esp) # 4-byte Folded Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 84(%esp), %eax # 4-byte Folded Reload + movl %eax, 72(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 92(%esp), %eax # 4-byte Folded Reload + movl %eax, 80(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 100(%esp), %eax # 4-byte Folded Reload + movl %eax, 88(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 40(%esp), %eax # 4-byte Folded Reload + movl %eax, 96(%esp) # 4-byte Spill + adcl $0, 36(%esp) # 4-byte Folded Spill + movl 868(%esp), %edx + movl 16(%edx), %eax + movl %eax, (%esp) + leal 560(%esp), %ecx + movl 108(%esp), %ebx # 4-byte Reload + calll .LmulPv416x32 + addl 560(%esp), %ebp + movl %ebp, 52(%esp) # 4-byte Spill + movl 612(%esp), %eax + movl %eax, 32(%esp) # 4-byte Spill + movl 608(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 604(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 600(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 596(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 592(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 588(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 584(%esp), %ebx + movl 580(%esp), %edi + movl 576(%esp), %esi + movl 572(%esp), %edx + movl 564(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 568(%esp), %ecx + movl 864(%esp), %eax + movl 52(%esp), %ebp # 4-byte Reload + movl %ebp, 16(%eax) + movl 16(%esp), %eax # 4-byte Reload + adcl %eax, 104(%esp) # 4-byte Folded Spill + adcl 20(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 16(%esp) # 4-byte Spill + adcl 24(%esp), %edx # 4-byte Folded Reload + movl %edx, 20(%esp) # 4-byte Spill + adcl 28(%esp), %esi # 4-byte Folded Reload + movl %esi, 24(%esp) # 4-byte Spill + adcl 56(%esp), %edi # 4-byte Folded Reload + movl %edi, 28(%esp) # 4-byte Spill + adcl 64(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 64(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 44(%esp), %eax # 4-byte Folded Reload + movl %eax, 60(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl %eax, 40(%esp) # 4-byte Folded Spill + movl 80(%esp), %eax # 4-byte Reload + adcl %eax, 48(%esp) # 4-byte Folded Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 88(%esp), %eax # 4-byte Folded Reload + movl %eax, 76(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 96(%esp), %eax # 4-byte Folded Reload + movl %eax, 84(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 36(%esp), %eax # 4-byte Folded Reload + movl %eax, 92(%esp) # 4-byte Spill + adcl $0, 32(%esp) # 4-byte Folded Spill + movl 868(%esp), %edx + movl 20(%edx), %eax + movl %eax, (%esp) + leal 504(%esp), %ecx + movl 108(%esp), %ebx # 4-byte Reload + calll .LmulPv416x32 + movl 104(%esp), %eax # 4-byte Reload + addl 504(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 556(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 552(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 548(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 544(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 540(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 536(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 532(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 528(%esp), %ebx + movl 524(%esp), %edi + movl 520(%esp), %esi + movl 516(%esp), %edx + movl 508(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 512(%esp), %ecx + movl 864(%esp), %eax + movl 104(%esp), %ebp # 4-byte Reload + movl %ebp, 20(%eax) + movl 56(%esp), %ebp # 4-byte Reload + adcl 16(%esp), %ebp # 4-byte Folded Reload + adcl 20(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 20(%esp) # 4-byte Spill + adcl 24(%esp), %edx # 4-byte Folded Reload + movl %edx, 24(%esp) # 4-byte Spill + adcl 28(%esp), %esi # 4-byte Folded Reload + movl %esi, 28(%esp) # 4-byte Spill + adcl 64(%esp), %edi # 4-byte Folded Reload + movl %edi, 36(%esp) # 4-byte Spill + adcl 60(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 60(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 40(%esp), %eax # 4-byte Folded Reload + movl %eax, 68(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 48(%esp), %eax # 4-byte Folded Reload + movl %eax, 100(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl %eax, 52(%esp) # 4-byte Folded Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 84(%esp), %eax # 4-byte Folded Reload + movl %eax, 72(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 92(%esp), %eax # 4-byte Folded Reload + movl %eax, 80(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 32(%esp), %eax # 4-byte Folded Reload + movl %eax, 88(%esp) # 4-byte Spill + adcl $0, 44(%esp) # 4-byte Folded Spill + movl 868(%esp), %edx + movl 24(%edx), %eax + movl %eax, (%esp) + leal 448(%esp), %ecx + movl 108(%esp), %ebx # 4-byte Reload + calll .LmulPv416x32 + addl 448(%esp), %ebp + movl %ebp, 56(%esp) # 4-byte Spill + movl 500(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 496(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 492(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 488(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 484(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 480(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 476(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 472(%esp), %ebp + movl 468(%esp), %edi + movl 464(%esp), %esi + movl 460(%esp), %edx + movl 452(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 456(%esp), %ecx + movl 864(%esp), %eax + movl 56(%esp), %ebx # 4-byte Reload + movl %ebx, 24(%eax) + movl 20(%esp), %eax # 4-byte Reload + adcl %eax, 104(%esp) # 4-byte Folded Spill + adcl 24(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 24(%esp) # 4-byte Spill + adcl 28(%esp), %edx # 4-byte Folded Reload + movl %edx, 28(%esp) # 4-byte Spill + adcl 36(%esp), %esi # 4-byte Folded Reload + movl %esi, 32(%esp) # 4-byte Spill + adcl 60(%esp), %edi # 4-byte Folded Reload + movl %edi, 36(%esp) # 4-byte Spill + adcl 68(%esp), %ebp # 4-byte Folded Reload + movl %ebp, 20(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 100(%esp), %eax # 4-byte Folded Reload + movl %eax, 64(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 52(%esp), %eax # 4-byte Folded Reload + movl %eax, 96(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl %eax, 48(%esp) # 4-byte Folded Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 80(%esp), %eax # 4-byte Folded Reload + movl %eax, 76(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 88(%esp), %eax # 4-byte Folded Reload + movl %eax, 84(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 44(%esp), %eax # 4-byte Folded Reload + movl %eax, 92(%esp) # 4-byte Spill + adcl $0, 40(%esp) # 4-byte Folded Spill + movl 868(%esp), %edx + movl 28(%edx), %eax + movl %eax, (%esp) + leal 392(%esp), %ecx + movl 108(%esp), %ebx # 4-byte Reload + calll .LmulPv416x32 + movl 104(%esp), %eax # 4-byte Reload + addl 392(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 444(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 440(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 436(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 432(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 428(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 424(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 420(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 416(%esp), %ebx + movl 412(%esp), %edi + movl 408(%esp), %esi + movl 404(%esp), %edx + movl 396(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 400(%esp), %ecx + movl 864(%esp), %eax + movl 104(%esp), %ebp # 4-byte Reload + movl %ebp, 28(%eax) + movl 60(%esp), %ebp # 4-byte Reload + adcl 24(%esp), %ebp # 4-byte Folded Reload + adcl 28(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 24(%esp) # 4-byte Spill + adcl 32(%esp), %edx # 4-byte Folded Reload + movl %edx, 28(%esp) # 4-byte Spill + adcl 36(%esp), %esi # 4-byte Folded Reload + movl %esi, 32(%esp) # 4-byte Spill + adcl 20(%esp), %edi # 4-byte Folded Reload + movl %edi, 36(%esp) # 4-byte Spill + adcl 64(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 20(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 96(%esp), %eax # 4-byte Folded Reload + movl %eax, 68(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 48(%esp), %eax # 4-byte Folded Reload + movl %eax, 100(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl %eax, 56(%esp) # 4-byte Folded Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 84(%esp), %eax # 4-byte Folded Reload + movl %eax, 72(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 92(%esp), %eax # 4-byte Folded Reload + movl %eax, 80(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 40(%esp), %eax # 4-byte Folded Reload + movl %eax, 88(%esp) # 4-byte Spill + adcl $0, 44(%esp) # 4-byte Folded Spill + movl 868(%esp), %edx + movl 32(%edx), %eax + movl %eax, (%esp) + leal 336(%esp), %ecx + movl 108(%esp), %ebx # 4-byte Reload + calll .LmulPv416x32 + addl 336(%esp), %ebp + movl %ebp, 60(%esp) # 4-byte Spill + movl 388(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 384(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 380(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 376(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 372(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 368(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 364(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 360(%esp), %ebp + movl 356(%esp), %edi + movl 352(%esp), %esi + movl 348(%esp), %edx + movl 340(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 344(%esp), %ecx + movl 864(%esp), %eax + movl 60(%esp), %ebx # 4-byte Reload + movl %ebx, 32(%eax) + movl 24(%esp), %eax # 4-byte Reload + adcl %eax, 104(%esp) # 4-byte Folded Spill + adcl 28(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 24(%esp) # 4-byte Spill + adcl 32(%esp), %edx # 4-byte Folded Reload + movl %edx, 28(%esp) # 4-byte Spill + adcl 36(%esp), %esi # 4-byte Folded Reload + movl %esi, 32(%esp) # 4-byte Spill + adcl 20(%esp), %edi # 4-byte Folded Reload + movl %edi, 60(%esp) # 4-byte Spill + adcl 68(%esp), %ebp # 4-byte Folded Reload + movl %ebp, 16(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 100(%esp), %eax # 4-byte Folded Reload + movl %eax, 64(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 56(%esp), %eax # 4-byte Folded Reload + movl %eax, 96(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl %eax, 52(%esp) # 4-byte Folded Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 80(%esp), %eax # 4-byte Folded Reload + movl %eax, 76(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 88(%esp), %eax # 4-byte Folded Reload + movl %eax, 84(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 44(%esp), %eax # 4-byte Folded Reload + movl %eax, 92(%esp) # 4-byte Spill + adcl $0, 40(%esp) # 4-byte Folded Spill + movl 868(%esp), %edx + movl 36(%edx), %eax + movl %eax, (%esp) + leal 280(%esp), %ecx + movl 108(%esp), %ebx # 4-byte Reload + calll .LmulPv416x32 + movl 104(%esp), %eax # 4-byte Reload + addl 280(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 332(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 328(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 324(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 320(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 316(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 312(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 308(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 304(%esp), %ebx + movl 300(%esp), %edi + movl 296(%esp), %esi + movl 292(%esp), %edx + movl 284(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 288(%esp), %ecx + movl 864(%esp), %eax + movl 104(%esp), %ebp # 4-byte Reload + movl %ebp, 36(%eax) + movl 56(%esp), %ebp # 4-byte Reload + adcl 24(%esp), %ebp # 4-byte Folded Reload + adcl 28(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 20(%esp) # 4-byte Spill + adcl 32(%esp), %edx # 4-byte Folded Reload + movl %edx, 24(%esp) # 4-byte Spill + adcl 60(%esp), %esi # 4-byte Folded Reload + movl %esi, 28(%esp) # 4-byte Spill + adcl 16(%esp), %edi # 4-byte Folded Reload + movl %edi, 32(%esp) # 4-byte Spill + adcl 64(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 16(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 96(%esp), %eax # 4-byte Folded Reload + movl %eax, 68(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 52(%esp), %eax # 4-byte Folded Reload + movl %eax, 100(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl %eax, 48(%esp) # 4-byte Folded Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 84(%esp), %eax # 4-byte Folded Reload + movl %eax, 72(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 92(%esp), %eax # 4-byte Folded Reload + movl %eax, 80(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 40(%esp), %eax # 4-byte Folded Reload + movl %eax, 88(%esp) # 4-byte Spill + adcl $0, 36(%esp) # 4-byte Folded Spill + movl 868(%esp), %edx + movl 40(%edx), %eax + movl %eax, (%esp) + leal 224(%esp), %ecx + movl 108(%esp), %ebx # 4-byte Reload + calll .LmulPv416x32 + addl 224(%esp), %ebp + movl %ebp, 56(%esp) # 4-byte Spill + movl 276(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 272(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 268(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 264(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 260(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 256(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 252(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 248(%esp), %ebx + movl 244(%esp), %edi + movl 240(%esp), %esi + movl 236(%esp), %edx + movl 228(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 232(%esp), %ecx + movl 864(%esp), %eax + movl 56(%esp), %ebp # 4-byte Reload + movl %ebp, 40(%eax) + movl 60(%esp), %eax # 4-byte Reload + adcl 20(%esp), %eax # 4-byte Folded Reload + movl %eax, 60(%esp) # 4-byte Spill + adcl 24(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 24(%esp) # 4-byte Spill + adcl 28(%esp), %edx # 4-byte Folded Reload + movl %edx, 28(%esp) # 4-byte Spill + adcl 32(%esp), %esi # 4-byte Folded Reload + movl %esi, 32(%esp) # 4-byte Spill + adcl 16(%esp), %edi # 4-byte Folded Reload + movl %edi, 40(%esp) # 4-byte Spill + adcl 68(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 20(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 100(%esp), %eax # 4-byte Folded Reload + movl %eax, 64(%esp) # 4-byte Spill + movl 104(%esp), %eax # 4-byte Reload + adcl 48(%esp), %eax # 4-byte Folded Reload + movl %eax, 104(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 72(%esp), %eax # 4-byte Folded Reload + movl %eax, 76(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 80(%esp), %eax # 4-byte Folded Reload + movl %eax, 84(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 88(%esp), %eax # 4-byte Folded Reload + movl %eax, 92(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 36(%esp), %eax # 4-byte Folded Reload + movl %eax, 96(%esp) # 4-byte Spill + adcl $0, 44(%esp) # 4-byte Folded Spill + movl 868(%esp), %edx + movl 44(%edx), %eax + movl %eax, (%esp) + leal 168(%esp), %ecx + movl 108(%esp), %ebx # 4-byte Reload + calll .LmulPv416x32 + movl 60(%esp), %esi # 4-byte Reload + addl 168(%esp), %esi + movl 220(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 216(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 212(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 208(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 204(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 200(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 196(%esp), %ebp + movl 192(%esp), %ebx + movl 188(%esp), %edi + movl 184(%esp), %edx + movl 180(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 172(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 176(%esp), %ecx + movl 864(%esp), %eax + movl %esi, 44(%eax) + movl 68(%esp), %esi # 4-byte Reload + adcl 24(%esp), %esi # 4-byte Folded Reload + adcl 28(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 68(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 32(%esp), %eax # 4-byte Folded Reload + movl %eax, 60(%esp) # 4-byte Spill + adcl 40(%esp), %edx # 4-byte Folded Reload + movl %edx, 40(%esp) # 4-byte Spill + adcl 20(%esp), %edi # 4-byte Folded Reload + movl %edi, 48(%esp) # 4-byte Spill + adcl 64(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 64(%esp) # 4-byte Spill + adcl 104(%esp), %ebp # 4-byte Folded Reload + movl %ebp, 104(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl %eax, 56(%esp) # 4-byte Folded Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 84(%esp), %eax # 4-byte Folded Reload + movl %eax, 72(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 92(%esp), %eax # 4-byte Folded Reload + movl %eax, 88(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 96(%esp), %eax # 4-byte Folded Reload + movl %eax, 80(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 44(%esp), %eax # 4-byte Folded Reload + movl %eax, 100(%esp) # 4-byte Spill + adcl $0, 52(%esp) # 4-byte Folded Spill + movl 868(%esp), %edx + movl 48(%edx), %eax + movl %eax, (%esp) + leal 112(%esp), %ecx + movl 108(%esp), %ebx # 4-byte Reload + calll .LmulPv416x32 + addl 112(%esp), %esi + movl %esi, %ebp + movl 68(%esp), %eax # 4-byte Reload + adcl 116(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 60(%esp), %edi # 4-byte Reload + adcl 120(%esp), %edi + movl 164(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 160(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 156(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 152(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 148(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 144(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 140(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 136(%esp), %ebx + movl 132(%esp), %esi + movl 128(%esp), %edx + movl 124(%esp), %ecx + movl 864(%esp), %eax + movl %ebp, 48(%eax) + movl 68(%esp), %ebp # 4-byte Reload + movl %ebp, 52(%eax) + adcl 40(%esp), %ecx # 4-byte Folded Reload + movl %edi, 56(%eax) + adcl 48(%esp), %edx # 4-byte Folded Reload + movl %ecx, 60(%eax) + adcl 64(%esp), %esi # 4-byte Folded Reload + movl %edx, 64(%eax) + adcl 104(%esp), %ebx # 4-byte Folded Reload + movl %esi, 68(%eax) + movl 44(%esp), %ecx # 4-byte Reload + adcl 56(%esp), %ecx # 4-byte Folded Reload + movl %ebx, 72(%eax) + movl 60(%esp), %edx # 4-byte Reload + adcl 72(%esp), %edx # 4-byte Folded Reload + movl %ecx, 76(%eax) + movl 76(%esp), %ecx # 4-byte Reload + adcl 88(%esp), %ecx # 4-byte Folded Reload + movl %edx, 80(%eax) + movl 84(%esp), %edx # 4-byte Reload + adcl 80(%esp), %edx # 4-byte Folded Reload + movl %ecx, 84(%eax) + movl 92(%esp), %ecx # 4-byte Reload + adcl 100(%esp), %ecx # 4-byte Folded Reload + movl %edx, 88(%eax) + movl %ecx, 92(%eax) + movl 96(%esp), %ecx # 4-byte Reload + adcl 52(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 96(%eax) + movl 108(%esp), %ecx # 4-byte Reload + adcl $0, %ecx + movl %ecx, 100(%eax) + addl $844, %esp # imm = 0x34C + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end193: + .size mcl_fpDbl_sqrPre13Lbmi2, .Lfunc_end193-mcl_fpDbl_sqrPre13Lbmi2 + + .globl mcl_fp_mont13Lbmi2 + .align 16, 0x90 + .type mcl_fp_mont13Lbmi2,@function +mcl_fp_mont13Lbmi2: # @mcl_fp_mont13Lbmi2 +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $1548, %esp # imm = 0x60C + calll .L194$pb +.L194$pb: + popl %ebx +.Ltmp35: + addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp35-.L194$pb), %ebx + movl 1580(%esp), %eax + movl -4(%eax), %edi + movl %edi, 28(%esp) # 4-byte Spill + movl 1576(%esp), %eax + movl (%eax), %eax + movl %eax, (%esp) + leal 1488(%esp), %ecx + movl 1572(%esp), %edx + calll .LmulPv416x32 + movl 1488(%esp), %esi + movl 1492(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl %esi, %eax + imull %edi, %eax + movl 1540(%esp), %ecx + movl %ecx, 80(%esp) # 4-byte Spill + movl 1536(%esp), %ecx + movl %ecx, 72(%esp) # 4-byte Spill + movl 1532(%esp), %ecx + movl %ecx, 68(%esp) # 4-byte Spill + movl 1528(%esp), %ecx + movl %ecx, 52(%esp) # 4-byte Spill + movl 1524(%esp), %ecx + movl %ecx, 56(%esp) # 4-byte Spill + movl 1520(%esp), %ecx + movl %ecx, 44(%esp) # 4-byte Spill + movl 1516(%esp), %ecx + movl %ecx, 36(%esp) # 4-byte Spill + movl 1512(%esp), %ecx + movl %ecx, 32(%esp) # 4-byte Spill + movl 1508(%esp), %ecx + movl %ecx, 48(%esp) # 4-byte Spill + movl 1504(%esp), %edi + movl 1500(%esp), %ebp + movl 1496(%esp), %ecx + movl %ecx, 84(%esp) # 4-byte Spill + movl %eax, (%esp) + leal 1432(%esp), %ecx + movl 1580(%esp), %edx + calll .LmulPv416x32 + addl 1432(%esp), %esi + movl 76(%esp), %eax # 4-byte Reload + adcl 1436(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 1440(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + adcl 1444(%esp), %ebp + movl %ebp, 60(%esp) # 4-byte Spill + adcl 1448(%esp), %edi + movl 48(%esp), %eax # 4-byte Reload + adcl 1452(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 32(%esp), %eax # 4-byte Reload + adcl 1456(%esp), %eax + movl %eax, 32(%esp) # 4-byte Spill + movl 36(%esp), %eax # 4-byte Reload + adcl 1460(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 44(%esp), %eax # 4-byte Reload + adcl 1464(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 1468(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 52(%esp), %ebp # 4-byte Reload + adcl 1472(%esp), %ebp + movl 68(%esp), %eax # 4-byte Reload + adcl 1476(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 1480(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 1484(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + sbbl %esi, %esi + movl 1576(%esp), %eax + movl 4(%eax), %eax + movl %eax, (%esp) + leal 1376(%esp), %ecx + movl 1572(%esp), %edx + calll .LmulPv416x32 + andl $1, %esi + movl 76(%esp), %ecx # 4-byte Reload + addl 1376(%esp), %ecx + movl %ecx, 76(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 1380(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 1384(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + adcl 1388(%esp), %edi + movl %edi, 40(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 1392(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 32(%esp), %eax # 4-byte Reload + adcl 1396(%esp), %eax + movl %eax, 32(%esp) # 4-byte Spill + movl 36(%esp), %eax # 4-byte Reload + adcl 1400(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 44(%esp), %edi # 4-byte Reload + adcl 1404(%esp), %edi + movl 56(%esp), %eax # 4-byte Reload + adcl 1408(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + adcl 1412(%esp), %ebp + movl %ebp, 52(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 1416(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 72(%esp), %ebp # 4-byte Reload + adcl 1420(%esp), %ebp + movl 80(%esp), %eax # 4-byte Reload + adcl 1424(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + adcl 1428(%esp), %esi + movl %esi, 64(%esp) # 4-byte Spill + sbbl %esi, %esi + movl %ecx, %eax + imull 28(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 1320(%esp), %ecx + movl 1580(%esp), %edx + calll .LmulPv416x32 + andl $1, %esi + movl %esi, %eax + movl 76(%esp), %ecx # 4-byte Reload + addl 1320(%esp), %ecx + movl 84(%esp), %ecx # 4-byte Reload + adcl 1324(%esp), %ecx + movl %ecx, 84(%esp) # 4-byte Spill + movl 60(%esp), %ecx # 4-byte Reload + adcl 1328(%esp), %ecx + movl %ecx, 60(%esp) # 4-byte Spill + movl 40(%esp), %ecx # 4-byte Reload + adcl 1332(%esp), %ecx + movl %ecx, 40(%esp) # 4-byte Spill + movl 48(%esp), %ecx # 4-byte Reload + adcl 1336(%esp), %ecx + movl %ecx, 48(%esp) # 4-byte Spill + movl 32(%esp), %esi # 4-byte Reload + adcl 1340(%esp), %esi + movl 36(%esp), %ecx # 4-byte Reload + adcl 1344(%esp), %ecx + movl %ecx, 36(%esp) # 4-byte Spill + adcl 1348(%esp), %edi + movl %edi, 44(%esp) # 4-byte Spill + movl 56(%esp), %ecx # 4-byte Reload + adcl 1352(%esp), %ecx + movl %ecx, 56(%esp) # 4-byte Spill + movl 52(%esp), %ecx # 4-byte Reload + adcl 1356(%esp), %ecx + movl %ecx, 52(%esp) # 4-byte Spill + movl 68(%esp), %ecx # 4-byte Reload + adcl 1360(%esp), %ecx + movl %ecx, 68(%esp) # 4-byte Spill + adcl 1364(%esp), %ebp + movl %ebp, 72(%esp) # 4-byte Spill + movl 80(%esp), %ebp # 4-byte Reload + adcl 1368(%esp), %ebp + movl 64(%esp), %edi # 4-byte Reload + adcl 1372(%esp), %edi + adcl $0, %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 1576(%esp), %eax + movl 8(%eax), %eax + movl %eax, (%esp) + leal 1264(%esp), %ecx + movl 1572(%esp), %edx + calll .LmulPv416x32 + movl 84(%esp), %ecx # 4-byte Reload + addl 1264(%esp), %ecx + movl 60(%esp), %eax # 4-byte Reload + adcl 1268(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 40(%esp), %eax # 4-byte Reload + adcl 1272(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 1276(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + adcl 1280(%esp), %esi + movl %esi, 32(%esp) # 4-byte Spill + movl 36(%esp), %eax # 4-byte Reload + adcl 1284(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 44(%esp), %eax # 4-byte Reload + adcl 1288(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 1292(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 1296(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 68(%esp), %esi # 4-byte Reload + adcl 1300(%esp), %esi + movl 72(%esp), %eax # 4-byte Reload + adcl 1304(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + adcl 1308(%esp), %ebp + adcl 1312(%esp), %edi + movl %edi, 64(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 1316(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + sbbl %eax, %eax + movl %eax, 84(%esp) # 4-byte Spill + movl %ecx, %edi + movl %edi, %eax + imull 28(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 1208(%esp), %ecx + movl 1580(%esp), %eax + movl %eax, %edx + calll .LmulPv416x32 + movl 84(%esp), %eax # 4-byte Reload + andl $1, %eax + addl 1208(%esp), %edi + movl 60(%esp), %ecx # 4-byte Reload + adcl 1212(%esp), %ecx + movl %ecx, 60(%esp) # 4-byte Spill + movl 40(%esp), %ecx # 4-byte Reload + adcl 1216(%esp), %ecx + movl %ecx, 40(%esp) # 4-byte Spill + movl 48(%esp), %ecx # 4-byte Reload + adcl 1220(%esp), %ecx + movl %ecx, 48(%esp) # 4-byte Spill + movl 32(%esp), %ecx # 4-byte Reload + adcl 1224(%esp), %ecx + movl %ecx, 32(%esp) # 4-byte Spill + movl 36(%esp), %ecx # 4-byte Reload + adcl 1228(%esp), %ecx + movl %ecx, 36(%esp) # 4-byte Spill + movl 44(%esp), %ecx # 4-byte Reload + adcl 1232(%esp), %ecx + movl %ecx, 44(%esp) # 4-byte Spill + movl 56(%esp), %ecx # 4-byte Reload + adcl 1236(%esp), %ecx + movl %ecx, 56(%esp) # 4-byte Spill + movl 52(%esp), %ecx # 4-byte Reload + adcl 1240(%esp), %ecx + movl %ecx, 52(%esp) # 4-byte Spill + adcl 1244(%esp), %esi + movl %esi, 68(%esp) # 4-byte Spill + movl 72(%esp), %edi # 4-byte Reload + adcl 1248(%esp), %edi + adcl 1252(%esp), %ebp + movl %ebp, %esi + movl 64(%esp), %ecx # 4-byte Reload + adcl 1256(%esp), %ecx + movl %ecx, 64(%esp) # 4-byte Spill + movl 76(%esp), %ebp # 4-byte Reload + adcl 1260(%esp), %ebp + adcl $0, %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 1576(%esp), %eax + movl 12(%eax), %eax + movl %eax, (%esp) + leal 1152(%esp), %ecx + movl 1572(%esp), %edx + calll .LmulPv416x32 + movl 60(%esp), %ecx # 4-byte Reload + addl 1152(%esp), %ecx + movl 40(%esp), %eax # 4-byte Reload + adcl 1156(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 1160(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 32(%esp), %eax # 4-byte Reload + adcl 1164(%esp), %eax + movl %eax, 32(%esp) # 4-byte Spill + movl 36(%esp), %eax # 4-byte Reload + adcl 1168(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 44(%esp), %eax # 4-byte Reload + adcl 1172(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 1176(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 1180(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 1184(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + adcl 1188(%esp), %edi + movl %edi, 72(%esp) # 4-byte Spill + adcl 1192(%esp), %esi + movl %esi, 80(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 1196(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + adcl 1200(%esp), %ebp + movl %ebp, 76(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 1204(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + sbbl %ebp, %ebp + movl %ecx, %esi + movl %esi, %eax + imull 28(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 1096(%esp), %ecx + movl 1580(%esp), %edx + calll .LmulPv416x32 + andl $1, %ebp + movl %ebp, %eax + addl 1096(%esp), %esi + movl 40(%esp), %ecx # 4-byte Reload + adcl 1100(%esp), %ecx + movl %ecx, 40(%esp) # 4-byte Spill + movl 48(%esp), %ecx # 4-byte Reload + adcl 1104(%esp), %ecx + movl %ecx, 48(%esp) # 4-byte Spill + movl 32(%esp), %ecx # 4-byte Reload + adcl 1108(%esp), %ecx + movl %ecx, 32(%esp) # 4-byte Spill + movl 36(%esp), %ecx # 4-byte Reload + adcl 1112(%esp), %ecx + movl %ecx, 36(%esp) # 4-byte Spill + movl 44(%esp), %ecx # 4-byte Reload + adcl 1116(%esp), %ecx + movl %ecx, 44(%esp) # 4-byte Spill + movl 56(%esp), %ecx # 4-byte Reload + adcl 1120(%esp), %ecx + movl %ecx, 56(%esp) # 4-byte Spill + movl 52(%esp), %ebp # 4-byte Reload + adcl 1124(%esp), %ebp + movl 68(%esp), %edi # 4-byte Reload + adcl 1128(%esp), %edi + movl 72(%esp), %esi # 4-byte Reload + adcl 1132(%esp), %esi + movl 80(%esp), %ecx # 4-byte Reload + adcl 1136(%esp), %ecx + movl %ecx, 80(%esp) # 4-byte Spill + movl 64(%esp), %ecx # 4-byte Reload + adcl 1140(%esp), %ecx + movl %ecx, 64(%esp) # 4-byte Spill + movl 76(%esp), %ecx # 4-byte Reload + adcl 1144(%esp), %ecx + movl %ecx, 76(%esp) # 4-byte Spill + movl 84(%esp), %ecx # 4-byte Reload + adcl 1148(%esp), %ecx + movl %ecx, 84(%esp) # 4-byte Spill + adcl $0, %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 1576(%esp), %eax + movl 16(%eax), %eax + movl %eax, (%esp) + leal 1040(%esp), %ecx + movl 1572(%esp), %edx + calll .LmulPv416x32 + movl 40(%esp), %ecx # 4-byte Reload + addl 1040(%esp), %ecx + movl 48(%esp), %eax # 4-byte Reload + adcl 1044(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 32(%esp), %eax # 4-byte Reload + adcl 1048(%esp), %eax + movl %eax, 32(%esp) # 4-byte Spill + movl 36(%esp), %eax # 4-byte Reload + adcl 1052(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 44(%esp), %eax # 4-byte Reload + adcl 1056(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 1060(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + adcl 1064(%esp), %ebp + movl %ebp, 52(%esp) # 4-byte Spill + adcl 1068(%esp), %edi + movl %edi, 68(%esp) # 4-byte Spill + adcl 1072(%esp), %esi + movl %esi, 72(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 1076(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 1080(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 76(%esp), %esi # 4-byte Reload + adcl 1084(%esp), %esi + movl 84(%esp), %eax # 4-byte Reload + adcl 1088(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 1092(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + sbbl %edi, %edi + movl %ecx, %ebp + movl %ebp, %eax + imull 28(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 984(%esp), %ecx + movl 1580(%esp), %edx + calll .LmulPv416x32 + andl $1, %edi + addl 984(%esp), %ebp + movl 48(%esp), %eax # 4-byte Reload + adcl 988(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 32(%esp), %eax # 4-byte Reload + adcl 992(%esp), %eax + movl %eax, 32(%esp) # 4-byte Spill + movl 36(%esp), %ebp # 4-byte Reload + adcl 996(%esp), %ebp + movl 44(%esp), %eax # 4-byte Reload + adcl 1000(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 1004(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 1008(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 1012(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 1016(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 1020(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 1024(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + adcl 1028(%esp), %esi + movl %esi, 76(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 1032(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 1036(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + adcl $0, %edi + movl 1576(%esp), %eax + movl 20(%eax), %eax + movl %eax, (%esp) + leal 928(%esp), %ecx + movl 1572(%esp), %edx + calll .LmulPv416x32 + movl 48(%esp), %ecx # 4-byte Reload + addl 928(%esp), %ecx + movl 32(%esp), %eax # 4-byte Reload + adcl 932(%esp), %eax + movl %eax, 32(%esp) # 4-byte Spill + adcl 936(%esp), %ebp + movl %ebp, 36(%esp) # 4-byte Spill + movl 44(%esp), %eax # 4-byte Reload + adcl 940(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 56(%esp), %esi # 4-byte Reload + adcl 944(%esp), %esi + movl 52(%esp), %eax # 4-byte Reload + adcl 948(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 952(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 956(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 960(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 964(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 968(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 972(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 976(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + adcl 980(%esp), %edi + movl %edi, 48(%esp) # 4-byte Spill + sbbl %edi, %edi + movl %ecx, %eax + movl %ecx, %ebp + imull 28(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 872(%esp), %ecx + movl 1580(%esp), %edx + calll .LmulPv416x32 + andl $1, %edi + addl 872(%esp), %ebp + movl 32(%esp), %eax # 4-byte Reload + adcl 876(%esp), %eax + movl %eax, 32(%esp) # 4-byte Spill + movl 36(%esp), %eax # 4-byte Reload + adcl 880(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 44(%esp), %ebp # 4-byte Reload + adcl 884(%esp), %ebp + adcl 888(%esp), %esi + movl %esi, 56(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 892(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 896(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 900(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 80(%esp), %esi # 4-byte Reload + adcl 904(%esp), %esi + movl 64(%esp), %eax # 4-byte Reload + adcl 908(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 912(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 916(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 920(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 924(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + adcl $0, %edi + movl %edi, 40(%esp) # 4-byte Spill + movl 1576(%esp), %eax + movl 24(%eax), %eax + movl %eax, (%esp) + leal 816(%esp), %ecx + movl 1572(%esp), %eax + movl %eax, %edx + calll .LmulPv416x32 + movl 32(%esp), %ecx # 4-byte Reload + addl 816(%esp), %ecx + movl %ecx, 32(%esp) # 4-byte Spill + movl 36(%esp), %eax # 4-byte Reload + adcl 820(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + adcl 824(%esp), %ebp + movl %ebp, 44(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 828(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 832(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 836(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 840(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + adcl 844(%esp), %esi + movl %esi, 80(%esp) # 4-byte Spill + movl 64(%esp), %ebp # 4-byte Reload + adcl 848(%esp), %ebp + movl 76(%esp), %eax # 4-byte Reload + adcl 852(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 84(%esp), %edi # 4-byte Reload + adcl 856(%esp), %edi + movl 60(%esp), %eax # 4-byte Reload + adcl 860(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 864(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 40(%esp), %eax # 4-byte Reload + adcl 868(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + sbbl %esi, %esi + movl %ecx, %eax + imull 28(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 760(%esp), %ecx + movl 1580(%esp), %edx + calll .LmulPv416x32 + andl $1, %esi + movl %esi, %eax + movl 32(%esp), %ecx # 4-byte Reload + addl 760(%esp), %ecx + movl 36(%esp), %ecx # 4-byte Reload + adcl 764(%esp), %ecx + movl %ecx, 36(%esp) # 4-byte Spill + movl 44(%esp), %ecx # 4-byte Reload + adcl 768(%esp), %ecx + movl %ecx, 44(%esp) # 4-byte Spill + movl 56(%esp), %ecx # 4-byte Reload + adcl 772(%esp), %ecx + movl %ecx, 56(%esp) # 4-byte Spill + movl 52(%esp), %ecx # 4-byte Reload + adcl 776(%esp), %ecx + movl %ecx, 52(%esp) # 4-byte Spill + movl 68(%esp), %ecx # 4-byte Reload + adcl 780(%esp), %ecx + movl %ecx, 68(%esp) # 4-byte Spill + movl 72(%esp), %esi # 4-byte Reload + adcl 784(%esp), %esi + movl 80(%esp), %ecx # 4-byte Reload + adcl 788(%esp), %ecx + movl %ecx, 80(%esp) # 4-byte Spill + adcl 792(%esp), %ebp + movl %ebp, 64(%esp) # 4-byte Spill + movl 76(%esp), %ecx # 4-byte Reload + adcl 796(%esp), %ecx + movl %ecx, 76(%esp) # 4-byte Spill + adcl 800(%esp), %edi + movl %edi, 84(%esp) # 4-byte Spill + movl 60(%esp), %ecx # 4-byte Reload + adcl 804(%esp), %ecx + movl %ecx, 60(%esp) # 4-byte Spill + movl 48(%esp), %ecx # 4-byte Reload + adcl 808(%esp), %ecx + movl %ecx, 48(%esp) # 4-byte Spill + movl 40(%esp), %edi # 4-byte Reload + adcl 812(%esp), %edi + adcl $0, %eax + movl %eax, 32(%esp) # 4-byte Spill + movl 1576(%esp), %eax + movl 28(%eax), %eax + movl %eax, (%esp) + leal 704(%esp), %ecx + movl 1572(%esp), %edx + calll .LmulPv416x32 + movl 36(%esp), %eax # 4-byte Reload + addl 704(%esp), %eax + movl 44(%esp), %ecx # 4-byte Reload + adcl 708(%esp), %ecx + movl %ecx, 44(%esp) # 4-byte Spill + movl 56(%esp), %ecx # 4-byte Reload + adcl 712(%esp), %ecx + movl %ecx, 56(%esp) # 4-byte Spill + movl 52(%esp), %ecx # 4-byte Reload + adcl 716(%esp), %ecx + movl %ecx, 52(%esp) # 4-byte Spill + movl 68(%esp), %ebp # 4-byte Reload + adcl 720(%esp), %ebp + adcl 724(%esp), %esi + movl %esi, 72(%esp) # 4-byte Spill + movl 80(%esp), %ecx # 4-byte Reload + adcl 728(%esp), %ecx + movl %ecx, 80(%esp) # 4-byte Spill + movl 64(%esp), %ecx # 4-byte Reload + adcl 732(%esp), %ecx + movl %ecx, 64(%esp) # 4-byte Spill + movl 76(%esp), %ecx # 4-byte Reload + adcl 736(%esp), %ecx + movl %ecx, 76(%esp) # 4-byte Spill + movl 84(%esp), %ecx # 4-byte Reload + adcl 740(%esp), %ecx + movl %ecx, 84(%esp) # 4-byte Spill + movl 60(%esp), %ecx # 4-byte Reload + adcl 744(%esp), %ecx + movl %ecx, 60(%esp) # 4-byte Spill + movl 48(%esp), %ecx # 4-byte Reload + adcl 748(%esp), %ecx + movl %ecx, 48(%esp) # 4-byte Spill + adcl 752(%esp), %edi + movl %edi, 40(%esp) # 4-byte Spill + movl 32(%esp), %ecx # 4-byte Reload + adcl 756(%esp), %ecx + movl %ecx, 32(%esp) # 4-byte Spill + sbbl %edi, %edi + movl %eax, %esi + imull 28(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 648(%esp), %ecx + movl 1580(%esp), %edx + calll .LmulPv416x32 + movl %edi, %eax + andl $1, %eax + addl 648(%esp), %esi + movl 44(%esp), %ecx # 4-byte Reload + adcl 652(%esp), %ecx + movl %ecx, 44(%esp) # 4-byte Spill + movl 56(%esp), %ecx # 4-byte Reload + adcl 656(%esp), %ecx + movl %ecx, 56(%esp) # 4-byte Spill + movl 52(%esp), %ecx # 4-byte Reload + adcl 660(%esp), %ecx + movl %ecx, 52(%esp) # 4-byte Spill + adcl 664(%esp), %ebp + movl %ebp, 68(%esp) # 4-byte Spill + movl 72(%esp), %ecx # 4-byte Reload + adcl 668(%esp), %ecx + movl %ecx, 72(%esp) # 4-byte Spill + movl 80(%esp), %edi # 4-byte Reload + adcl 672(%esp), %edi + movl 64(%esp), %esi # 4-byte Reload + adcl 676(%esp), %esi + movl 76(%esp), %ecx # 4-byte Reload + adcl 680(%esp), %ecx + movl %ecx, 76(%esp) # 4-byte Spill + movl 84(%esp), %ecx # 4-byte Reload + adcl 684(%esp), %ecx + movl %ecx, 84(%esp) # 4-byte Spill + movl 60(%esp), %ecx # 4-byte Reload + adcl 688(%esp), %ecx + movl %ecx, 60(%esp) # 4-byte Spill + movl 48(%esp), %ecx # 4-byte Reload + adcl 692(%esp), %ecx + movl %ecx, 48(%esp) # 4-byte Spill + movl 40(%esp), %ecx # 4-byte Reload + adcl 696(%esp), %ecx + movl %ecx, 40(%esp) # 4-byte Spill + movl 32(%esp), %ecx # 4-byte Reload + adcl 700(%esp), %ecx + movl %ecx, 32(%esp) # 4-byte Spill + adcl $0, %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 1576(%esp), %eax + movl 32(%eax), %eax + movl %eax, (%esp) + leal 592(%esp), %ecx + movl 1572(%esp), %edx + calll .LmulPv416x32 + movl 44(%esp), %ecx # 4-byte Reload + addl 592(%esp), %ecx + movl 56(%esp), %eax # 4-byte Reload + adcl 596(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 52(%esp), %ebp # 4-byte Reload + adcl 600(%esp), %ebp + movl 68(%esp), %eax # 4-byte Reload + adcl 604(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 608(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + adcl 612(%esp), %edi + movl %edi, 80(%esp) # 4-byte Spill + adcl 616(%esp), %esi + movl %esi, 64(%esp) # 4-byte Spill + movl 76(%esp), %edi # 4-byte Reload + adcl 620(%esp), %edi + movl 84(%esp), %eax # 4-byte Reload + adcl 624(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 628(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 632(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 40(%esp), %eax # 4-byte Reload + adcl 636(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 32(%esp), %eax # 4-byte Reload + adcl 640(%esp), %eax + movl %eax, 32(%esp) # 4-byte Spill + movl 36(%esp), %eax # 4-byte Reload + adcl 644(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + sbbl %eax, %eax + movl %eax, 44(%esp) # 4-byte Spill + movl %ecx, %eax + movl %ecx, %esi + imull 28(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 536(%esp), %ecx + movl 1580(%esp), %edx + calll .LmulPv416x32 + movl 44(%esp), %ecx # 4-byte Reload + andl $1, %ecx + addl 536(%esp), %esi + movl 56(%esp), %eax # 4-byte Reload + adcl 540(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + adcl 544(%esp), %ebp + movl %ebp, 52(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 548(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 552(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 556(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 64(%esp), %esi # 4-byte Reload + adcl 560(%esp), %esi + adcl 564(%esp), %edi + movl %edi, 76(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 568(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 60(%esp), %edi # 4-byte Reload + adcl 572(%esp), %edi + movl 48(%esp), %ebp # 4-byte Reload + adcl 576(%esp), %ebp + movl 40(%esp), %eax # 4-byte Reload + adcl 580(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 32(%esp), %eax # 4-byte Reload + adcl 584(%esp), %eax + movl %eax, 32(%esp) # 4-byte Spill + movl 36(%esp), %eax # 4-byte Reload + adcl 588(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + adcl $0, %ecx + movl %ecx, 44(%esp) # 4-byte Spill + movl 1576(%esp), %eax + movl 36(%eax), %eax + movl %eax, (%esp) + leal 480(%esp), %ecx + movl 1572(%esp), %edx + calll .LmulPv416x32 + movl 56(%esp), %ecx # 4-byte Reload + addl 480(%esp), %ecx + movl 52(%esp), %eax # 4-byte Reload + adcl 484(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 488(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 492(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 496(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + adcl 500(%esp), %esi + movl %esi, 64(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 504(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 508(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + adcl 512(%esp), %edi + movl %edi, 60(%esp) # 4-byte Spill + adcl 516(%esp), %ebp + movl %ebp, 48(%esp) # 4-byte Spill + movl 40(%esp), %eax # 4-byte Reload + adcl 520(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 32(%esp), %eax # 4-byte Reload + adcl 524(%esp), %eax + movl %eax, 32(%esp) # 4-byte Spill + movl 36(%esp), %eax # 4-byte Reload + adcl 528(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 44(%esp), %eax # 4-byte Reload + adcl 532(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + sbbl %edi, %edi + movl %ecx, %eax + movl %ecx, %esi + imull 28(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 424(%esp), %ecx + movl 1580(%esp), %edx + calll .LmulPv416x32 + movl %edi, %ecx + andl $1, %ecx + addl 424(%esp), %esi + movl 52(%esp), %ebp # 4-byte Reload + adcl 428(%esp), %ebp + movl 68(%esp), %edi # 4-byte Reload + adcl 432(%esp), %edi + movl 72(%esp), %eax # 4-byte Reload + adcl 436(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 440(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 64(%esp), %esi # 4-byte Reload + adcl 444(%esp), %esi + movl 76(%esp), %eax # 4-byte Reload + adcl 448(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 452(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 456(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 460(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 40(%esp), %eax # 4-byte Reload + adcl 464(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 32(%esp), %eax # 4-byte Reload + adcl 468(%esp), %eax + movl %eax, 32(%esp) # 4-byte Spill + movl 36(%esp), %eax # 4-byte Reload + adcl 472(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 44(%esp), %eax # 4-byte Reload + adcl 476(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + adcl $0, %ecx + movl %ecx, 56(%esp) # 4-byte Spill + movl 1576(%esp), %eax + movl 40(%eax), %eax + movl %eax, (%esp) + leal 368(%esp), %ecx + movl 1572(%esp), %edx + calll .LmulPv416x32 + addl 368(%esp), %ebp + adcl 372(%esp), %edi + movl %edi, 68(%esp) # 4-byte Spill + movl 72(%esp), %edi # 4-byte Reload + adcl 376(%esp), %edi + movl 80(%esp), %eax # 4-byte Reload + adcl 380(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + adcl 384(%esp), %esi + movl %esi, 64(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 388(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 84(%esp), %esi # 4-byte Reload + adcl 392(%esp), %esi + movl 60(%esp), %eax # 4-byte Reload + adcl 396(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 400(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 40(%esp), %eax # 4-byte Reload + adcl 404(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 32(%esp), %eax # 4-byte Reload + adcl 408(%esp), %eax + movl %eax, 32(%esp) # 4-byte Spill + movl 36(%esp), %eax # 4-byte Reload + adcl 412(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 44(%esp), %eax # 4-byte Reload + adcl 416(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 420(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + sbbl %eax, %eax + movl %eax, 52(%esp) # 4-byte Spill + movl %ebp, %eax + imull 28(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 312(%esp), %ecx + movl 1580(%esp), %edx + calll .LmulPv416x32 + movl 52(%esp), %ecx # 4-byte Reload + andl $1, %ecx + addl 312(%esp), %ebp + movl 68(%esp), %eax # 4-byte Reload + adcl 316(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + adcl 320(%esp), %edi + movl %edi, 72(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 324(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 64(%esp), %ebp # 4-byte Reload + adcl 328(%esp), %ebp + movl 76(%esp), %eax # 4-byte Reload + adcl 332(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + adcl 336(%esp), %esi + movl %esi, 84(%esp) # 4-byte Spill + movl 60(%esp), %edi # 4-byte Reload + adcl 340(%esp), %edi + movl 48(%esp), %esi # 4-byte Reload + adcl 344(%esp), %esi + movl 40(%esp), %eax # 4-byte Reload + adcl 348(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 32(%esp), %eax # 4-byte Reload + adcl 352(%esp), %eax + movl %eax, 32(%esp) # 4-byte Spill + movl 36(%esp), %eax # 4-byte Reload + adcl 356(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 44(%esp), %eax # 4-byte Reload + adcl 360(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 364(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + adcl $0, %ecx + movl %ecx, 52(%esp) # 4-byte Spill + movl 1576(%esp), %eax + movl 44(%eax), %eax + movl %eax, (%esp) + leal 256(%esp), %ecx + movl 1572(%esp), %edx + calll .LmulPv416x32 + movl 68(%esp), %ecx # 4-byte Reload + addl 256(%esp), %ecx + movl 72(%esp), %eax # 4-byte Reload + adcl 260(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 264(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + adcl 268(%esp), %ebp + movl %ebp, 64(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 272(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 276(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + adcl 280(%esp), %edi + movl %edi, 60(%esp) # 4-byte Spill + adcl 284(%esp), %esi + movl %esi, 48(%esp) # 4-byte Spill + movl 40(%esp), %edi # 4-byte Reload + adcl 288(%esp), %edi + movl 32(%esp), %eax # 4-byte Reload + adcl 292(%esp), %eax + movl %eax, 32(%esp) # 4-byte Spill + movl 36(%esp), %eax # 4-byte Reload + adcl 296(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 44(%esp), %eax # 4-byte Reload + adcl 300(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 304(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 308(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + sbbl %ebp, %ebp + movl %ecx, %eax + movl %ecx, %esi + imull 28(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 200(%esp), %ecx + movl 1580(%esp), %edx + calll .LmulPv416x32 + andl $1, %ebp + movl %ebp, %ecx + addl 200(%esp), %esi + movl 72(%esp), %eax # 4-byte Reload + adcl 204(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 80(%esp), %ebp # 4-byte Reload + adcl 208(%esp), %ebp + movl 64(%esp), %esi # 4-byte Reload + adcl 212(%esp), %esi + movl 76(%esp), %eax # 4-byte Reload + adcl 216(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 220(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 224(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 228(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + adcl 232(%esp), %edi + movl %edi, 40(%esp) # 4-byte Spill + movl 32(%esp), %edi # 4-byte Reload + adcl 236(%esp), %edi + movl 36(%esp), %eax # 4-byte Reload + adcl 240(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 44(%esp), %eax # 4-byte Reload + adcl 244(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 248(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 252(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + adcl $0, %ecx + movl %ecx, 68(%esp) # 4-byte Spill + movl 1576(%esp), %eax + movl 48(%eax), %eax + movl %eax, (%esp) + leal 144(%esp), %ecx + movl 1572(%esp), %edx + calll .LmulPv416x32 + movl 72(%esp), %ecx # 4-byte Reload + addl 144(%esp), %ecx + adcl 148(%esp), %ebp + movl %ebp, 80(%esp) # 4-byte Spill + adcl 152(%esp), %esi + movl %esi, 64(%esp) # 4-byte Spill + movl 76(%esp), %ebp # 4-byte Reload + adcl 156(%esp), %ebp + movl 84(%esp), %eax # 4-byte Reload + adcl 160(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 164(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 168(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 40(%esp), %eax # 4-byte Reload + adcl 172(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + adcl 176(%esp), %edi + movl %edi, 32(%esp) # 4-byte Spill + movl 36(%esp), %eax # 4-byte Reload + adcl 180(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 44(%esp), %eax # 4-byte Reload + adcl 184(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 188(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 192(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 196(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + sbbl %edi, %edi + movl 28(%esp), %eax # 4-byte Reload + imull %ecx, %eax + movl %ecx, %esi + movl %eax, (%esp) + leal 88(%esp), %ecx + movl 1580(%esp), %edx + calll .LmulPv416x32 + andl $1, %edi + addl 88(%esp), %esi + movl 80(%esp), %eax # 4-byte Reload + movl 84(%esp), %esi # 4-byte Reload + adcl 92(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 64(%esp), %ecx # 4-byte Reload + adcl 96(%esp), %ecx + movl %ecx, 64(%esp) # 4-byte Spill + adcl 100(%esp), %ebp + movl %ebp, 76(%esp) # 4-byte Spill + adcl 104(%esp), %esi + movl %esi, 84(%esp) # 4-byte Spill + movl 60(%esp), %edx # 4-byte Reload + adcl 108(%esp), %edx + movl %edx, 60(%esp) # 4-byte Spill + movl 48(%esp), %ebx # 4-byte Reload + adcl 112(%esp), %ebx + movl %ebx, 48(%esp) # 4-byte Spill + movl 40(%esp), %ebx # 4-byte Reload + adcl 116(%esp), %ebx + movl %ebx, 40(%esp) # 4-byte Spill + movl 32(%esp), %ebx # 4-byte Reload + adcl 120(%esp), %ebx + movl %ebx, 32(%esp) # 4-byte Spill + movl 36(%esp), %ebx # 4-byte Reload + adcl 124(%esp), %ebx + movl %ebx, 36(%esp) # 4-byte Spill + movl 44(%esp), %ebx # 4-byte Reload + adcl 128(%esp), %ebx + movl %ebx, 44(%esp) # 4-byte Spill + movl 56(%esp), %ebx # 4-byte Reload + adcl 132(%esp), %ebx + movl %ebx, 56(%esp) # 4-byte Spill + movl 52(%esp), %ebx # 4-byte Reload + adcl 136(%esp), %ebx + movl %ebx, 52(%esp) # 4-byte Spill + movl 68(%esp), %ebx # 4-byte Reload + adcl 140(%esp), %ebx + movl %ebx, 68(%esp) # 4-byte Spill + adcl $0, %edi + movl 1580(%esp), %ebx + subl (%ebx), %eax + sbbl 4(%ebx), %ecx + sbbl 8(%ebx), %ebp + sbbl 12(%ebx), %esi + movl %esi, 4(%esp) # 4-byte Spill + sbbl 16(%ebx), %edx + movl %edx, 8(%esp) # 4-byte Spill + movl 48(%esp), %edx # 4-byte Reload + sbbl 20(%ebx), %edx + movl %edx, 12(%esp) # 4-byte Spill + movl 40(%esp), %edx # 4-byte Reload + sbbl 24(%ebx), %edx + movl %edx, 16(%esp) # 4-byte Spill + movl 32(%esp), %edx # 4-byte Reload + sbbl 28(%ebx), %edx + movl 36(%esp), %esi # 4-byte Reload + sbbl 32(%ebx), %esi + movl %esi, 20(%esp) # 4-byte Spill + movl 44(%esp), %esi # 4-byte Reload + sbbl 36(%ebx), %esi + movl %esi, 24(%esp) # 4-byte Spill + movl 56(%esp), %esi # 4-byte Reload + sbbl 40(%ebx), %esi + movl %esi, 28(%esp) # 4-byte Spill + movl 52(%esp), %esi # 4-byte Reload + sbbl 44(%ebx), %esi + movl %esi, 72(%esp) # 4-byte Spill + movl 68(%esp), %esi # 4-byte Reload + sbbl 48(%ebx), %esi + movl %esi, %ebx + sbbl $0, %edi + andl $1, %edi + jne .LBB194_2 +# BB#1: + movl %edx, 32(%esp) # 4-byte Spill +.LBB194_2: + movl %edi, %edx + testb %dl, %dl + movl 80(%esp), %edx # 4-byte Reload + jne .LBB194_4 +# BB#3: + movl %eax, %edx +.LBB194_4: + movl 1568(%esp), %eax + movl %edx, (%eax) + movl 64(%esp), %esi # 4-byte Reload + jne .LBB194_6 +# BB#5: + movl %ecx, %esi +.LBB194_6: + movl %esi, 4(%eax) + jne .LBB194_8 +# BB#7: + movl %ebp, 76(%esp) # 4-byte Spill +.LBB194_8: + movl 76(%esp), %ecx # 4-byte Reload + movl %ecx, 8(%eax) + movl 60(%esp), %ebp # 4-byte Reload + jne .LBB194_10 +# BB#9: + movl 4(%esp), %ecx # 4-byte Reload + movl %ecx, 84(%esp) # 4-byte Spill +.LBB194_10: + movl 84(%esp), %ecx # 4-byte Reload + movl %ecx, 12(%eax) + jne .LBB194_12 +# BB#11: + movl 8(%esp), %ebp # 4-byte Reload +.LBB194_12: + movl %ebp, 16(%eax) + movl 48(%esp), %ecx # 4-byte Reload + jne .LBB194_14 +# BB#13: + movl 12(%esp), %ecx # 4-byte Reload +.LBB194_14: + movl %ecx, 20(%eax) + movl 40(%esp), %ecx # 4-byte Reload + jne .LBB194_16 +# BB#15: + movl 16(%esp), %ecx # 4-byte Reload +.LBB194_16: + movl %ecx, 24(%eax) + movl 32(%esp), %ecx # 4-byte Reload + movl %ecx, 28(%eax) + movl 36(%esp), %ecx # 4-byte Reload + jne .LBB194_18 +# BB#17: + movl 20(%esp), %ecx # 4-byte Reload +.LBB194_18: + movl %ecx, 32(%eax) + movl 44(%esp), %ecx # 4-byte Reload + jne .LBB194_20 +# BB#19: + movl 24(%esp), %ecx # 4-byte Reload +.LBB194_20: + movl %ecx, 36(%eax) + movl 56(%esp), %ecx # 4-byte Reload + jne .LBB194_22 +# BB#21: + movl 28(%esp), %ecx # 4-byte Reload +.LBB194_22: + movl %ecx, 40(%eax) + movl 52(%esp), %ecx # 4-byte Reload + jne .LBB194_24 +# BB#23: + movl 72(%esp), %ecx # 4-byte Reload +.LBB194_24: + movl %ecx, 44(%eax) + movl 68(%esp), %ecx # 4-byte Reload + jne .LBB194_26 +# BB#25: + movl %ebx, %ecx +.LBB194_26: + movl %ecx, 48(%eax) + addl $1548, %esp # imm = 0x60C + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end194: + .size mcl_fp_mont13Lbmi2, .Lfunc_end194-mcl_fp_mont13Lbmi2 + + .globl mcl_fp_montNF13Lbmi2 + .align 16, 0x90 + .type mcl_fp_montNF13Lbmi2,@function +mcl_fp_montNF13Lbmi2: # @mcl_fp_montNF13Lbmi2 +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $1548, %esp # imm = 0x60C + calll .L195$pb +.L195$pb: + popl %ebx +.Ltmp36: + addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp36-.L195$pb), %ebx + movl 1580(%esp), %eax + movl -4(%eax), %esi + movl %esi, 32(%esp) # 4-byte Spill + movl 1576(%esp), %eax + movl (%eax), %eax + movl %eax, (%esp) + leal 1488(%esp), %ecx + movl 1572(%esp), %edx + calll .LmulPv416x32 + movl 1488(%esp), %edi + movl 1492(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl %edi, %eax + imull %esi, %eax + movl 1540(%esp), %ecx + movl %ecx, 84(%esp) # 4-byte Spill + movl 1536(%esp), %ecx + movl %ecx, 76(%esp) # 4-byte Spill + movl 1532(%esp), %ecx + movl %ecx, 72(%esp) # 4-byte Spill + movl 1528(%esp), %ecx + movl %ecx, 60(%esp) # 4-byte Spill + movl 1524(%esp), %ecx + movl %ecx, 56(%esp) # 4-byte Spill + movl 1520(%esp), %ecx + movl %ecx, 52(%esp) # 4-byte Spill + movl 1516(%esp), %ecx + movl %ecx, 44(%esp) # 4-byte Spill + movl 1512(%esp), %ecx + movl %ecx, 36(%esp) # 4-byte Spill + movl 1508(%esp), %ecx + movl %ecx, 48(%esp) # 4-byte Spill + movl 1504(%esp), %ecx + movl %ecx, 40(%esp) # 4-byte Spill + movl 1500(%esp), %esi + movl 1496(%esp), %ebp + movl %eax, (%esp) + leal 1432(%esp), %ecx + movl 1580(%esp), %edx + calll .LmulPv416x32 + addl 1432(%esp), %edi + movl 80(%esp), %eax # 4-byte Reload + adcl 1436(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + adcl 1440(%esp), %ebp + adcl 1444(%esp), %esi + movl %esi, 64(%esp) # 4-byte Spill + movl 40(%esp), %eax # 4-byte Reload + adcl 1448(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 1452(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 36(%esp), %eax # 4-byte Reload + adcl 1456(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 44(%esp), %eax # 4-byte Reload + adcl 1460(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 1464(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 1468(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 60(%esp), %esi # 4-byte Reload + adcl 1472(%esp), %esi + movl 72(%esp), %eax # 4-byte Reload + adcl 1476(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 1480(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 84(%esp), %edi # 4-byte Reload + adcl 1484(%esp), %edi + movl 1576(%esp), %eax + movl 4(%eax), %eax + movl %eax, (%esp) + leal 1376(%esp), %ecx + movl 1572(%esp), %edx + calll .LmulPv416x32 + movl 1428(%esp), %ecx + movl 80(%esp), %edx # 4-byte Reload + addl 1376(%esp), %edx + adcl 1380(%esp), %ebp + movl 64(%esp), %eax # 4-byte Reload + adcl 1384(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 40(%esp), %eax # 4-byte Reload + adcl 1388(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 1392(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 36(%esp), %eax # 4-byte Reload + adcl 1396(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 44(%esp), %eax # 4-byte Reload + adcl 1400(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 1404(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 1408(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + adcl 1412(%esp), %esi + movl %esi, 60(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 1416(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 1420(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + adcl 1424(%esp), %edi + movl %edi, 84(%esp) # 4-byte Spill + adcl $0, %ecx + movl %ecx, 80(%esp) # 4-byte Spill + movl %edx, %esi + movl %esi, %eax + imull 32(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 1320(%esp), %ecx + movl 1580(%esp), %edx + calll .LmulPv416x32 + addl 1320(%esp), %esi + adcl 1324(%esp), %ebp + movl 64(%esp), %eax # 4-byte Reload + adcl 1328(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 40(%esp), %eax # 4-byte Reload + adcl 1332(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 1336(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 36(%esp), %esi # 4-byte Reload + adcl 1340(%esp), %esi + movl 44(%esp), %eax # 4-byte Reload + adcl 1344(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 1348(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 1352(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 1356(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 72(%esp), %edi # 4-byte Reload + adcl 1360(%esp), %edi + movl 76(%esp), %eax # 4-byte Reload + adcl 1364(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 1368(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 1372(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 1576(%esp), %eax + movl 8(%eax), %eax + movl %eax, (%esp) + leal 1264(%esp), %ecx + movl 1572(%esp), %edx + calll .LmulPv416x32 + movl 1316(%esp), %eax + addl 1264(%esp), %ebp + movl 64(%esp), %ecx # 4-byte Reload + adcl 1268(%esp), %ecx + movl %ecx, 64(%esp) # 4-byte Spill + movl 40(%esp), %ecx # 4-byte Reload + adcl 1272(%esp), %ecx + movl %ecx, 40(%esp) # 4-byte Spill + movl 48(%esp), %ecx # 4-byte Reload + adcl 1276(%esp), %ecx + movl %ecx, 48(%esp) # 4-byte Spill + adcl 1280(%esp), %esi + movl %esi, 36(%esp) # 4-byte Spill + movl 44(%esp), %esi # 4-byte Reload + adcl 1284(%esp), %esi + movl 52(%esp), %ecx # 4-byte Reload + adcl 1288(%esp), %ecx + movl %ecx, 52(%esp) # 4-byte Spill + movl 56(%esp), %ecx # 4-byte Reload + adcl 1292(%esp), %ecx + movl %ecx, 56(%esp) # 4-byte Spill + movl 60(%esp), %ecx # 4-byte Reload + adcl 1296(%esp), %ecx + movl %ecx, 60(%esp) # 4-byte Spill + adcl 1300(%esp), %edi + movl 76(%esp), %ecx # 4-byte Reload + adcl 1304(%esp), %ecx + movl %ecx, 76(%esp) # 4-byte Spill + movl 84(%esp), %ecx # 4-byte Reload + adcl 1308(%esp), %ecx + movl %ecx, 84(%esp) # 4-byte Spill + movl 80(%esp), %ecx # 4-byte Reload + adcl 1312(%esp), %ecx + movl %ecx, 80(%esp) # 4-byte Spill + adcl $0, %eax + movl %eax, 68(%esp) # 4-byte Spill + movl %ebp, %eax + imull 32(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 1208(%esp), %ecx + movl 1580(%esp), %edx + calll .LmulPv416x32 + addl 1208(%esp), %ebp + movl 64(%esp), %eax # 4-byte Reload + adcl 1212(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 40(%esp), %eax # 4-byte Reload + adcl 1216(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 1220(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 36(%esp), %eax # 4-byte Reload + adcl 1224(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + adcl 1228(%esp), %esi + movl %esi, 44(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 1232(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 1236(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 1240(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + adcl 1244(%esp), %edi + movl %edi, 72(%esp) # 4-byte Spill + movl 76(%esp), %esi # 4-byte Reload + adcl 1248(%esp), %esi + movl 84(%esp), %edi # 4-byte Reload + adcl 1252(%esp), %edi + movl 80(%esp), %ebp # 4-byte Reload + adcl 1256(%esp), %ebp + movl 68(%esp), %eax # 4-byte Reload + adcl 1260(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 1576(%esp), %eax + movl 12(%eax), %eax + movl %eax, (%esp) + leal 1152(%esp), %ecx + movl 1572(%esp), %edx + calll .LmulPv416x32 + movl 1204(%esp), %eax + movl 64(%esp), %edx # 4-byte Reload + addl 1152(%esp), %edx + movl 40(%esp), %ecx # 4-byte Reload + adcl 1156(%esp), %ecx + movl %ecx, 40(%esp) # 4-byte Spill + movl 48(%esp), %ecx # 4-byte Reload + adcl 1160(%esp), %ecx + movl %ecx, 48(%esp) # 4-byte Spill + movl 36(%esp), %ecx # 4-byte Reload + adcl 1164(%esp), %ecx + movl %ecx, 36(%esp) # 4-byte Spill + movl 44(%esp), %ecx # 4-byte Reload + adcl 1168(%esp), %ecx + movl %ecx, 44(%esp) # 4-byte Spill + movl 52(%esp), %ecx # 4-byte Reload + adcl 1172(%esp), %ecx + movl %ecx, 52(%esp) # 4-byte Spill + movl 56(%esp), %ecx # 4-byte Reload + adcl 1176(%esp), %ecx + movl %ecx, 56(%esp) # 4-byte Spill + movl 60(%esp), %ecx # 4-byte Reload + adcl 1180(%esp), %ecx + movl %ecx, 60(%esp) # 4-byte Spill + movl 72(%esp), %ecx # 4-byte Reload + adcl 1184(%esp), %ecx + movl %ecx, 72(%esp) # 4-byte Spill + adcl 1188(%esp), %esi + movl %esi, 76(%esp) # 4-byte Spill + adcl 1192(%esp), %edi + movl %edi, 84(%esp) # 4-byte Spill + adcl 1196(%esp), %ebp + movl 68(%esp), %ecx # 4-byte Reload + adcl 1200(%esp), %ecx + movl %ecx, 68(%esp) # 4-byte Spill + adcl $0, %eax + movl %eax, 64(%esp) # 4-byte Spill + movl %edx, %esi + movl %esi, %eax + imull 32(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 1096(%esp), %ecx + movl 1580(%esp), %eax + movl %eax, %edx + calll .LmulPv416x32 + addl 1096(%esp), %esi + movl 40(%esp), %eax # 4-byte Reload + adcl 1100(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 1104(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 36(%esp), %eax # 4-byte Reload + adcl 1108(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 44(%esp), %eax # 4-byte Reload + adcl 1112(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 52(%esp), %esi # 4-byte Reload + adcl 1116(%esp), %esi + movl 56(%esp), %edi # 4-byte Reload + adcl 1120(%esp), %edi + movl 60(%esp), %eax # 4-byte Reload + adcl 1124(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 1128(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 1132(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 1136(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + adcl 1140(%esp), %ebp + movl %ebp, 80(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 1144(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 64(%esp), %ebp # 4-byte Reload + adcl 1148(%esp), %ebp + movl 1576(%esp), %eax + movl 16(%eax), %eax + movl %eax, (%esp) + leal 1040(%esp), %ecx + movl 1572(%esp), %edx + calll .LmulPv416x32 + movl 1092(%esp), %eax + movl 40(%esp), %edx # 4-byte Reload + addl 1040(%esp), %edx + movl 48(%esp), %ecx # 4-byte Reload + adcl 1044(%esp), %ecx + movl %ecx, 48(%esp) # 4-byte Spill + movl 36(%esp), %ecx # 4-byte Reload + adcl 1048(%esp), %ecx + movl %ecx, 36(%esp) # 4-byte Spill + movl 44(%esp), %ecx # 4-byte Reload + adcl 1052(%esp), %ecx + movl %ecx, 44(%esp) # 4-byte Spill + adcl 1056(%esp), %esi + movl %esi, 52(%esp) # 4-byte Spill + adcl 1060(%esp), %edi + movl %edi, 56(%esp) # 4-byte Spill + movl 60(%esp), %ecx # 4-byte Reload + adcl 1064(%esp), %ecx + movl %ecx, 60(%esp) # 4-byte Spill + movl 72(%esp), %ecx # 4-byte Reload + adcl 1068(%esp), %ecx + movl %ecx, 72(%esp) # 4-byte Spill + movl 76(%esp), %ecx # 4-byte Reload + adcl 1072(%esp), %ecx + movl %ecx, 76(%esp) # 4-byte Spill + movl 84(%esp), %ecx # 4-byte Reload + adcl 1076(%esp), %ecx + movl %ecx, 84(%esp) # 4-byte Spill + movl 80(%esp), %ecx # 4-byte Reload + adcl 1080(%esp), %ecx + movl %ecx, 80(%esp) # 4-byte Spill + movl 68(%esp), %ecx # 4-byte Reload + adcl 1084(%esp), %ecx + movl %ecx, 68(%esp) # 4-byte Spill + adcl 1088(%esp), %ebp + movl %ebp, 64(%esp) # 4-byte Spill + movl %eax, %esi + adcl $0, %esi + movl %edx, %edi + movl %edi, %eax + imull 32(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 984(%esp), %ecx + movl 1580(%esp), %edx + calll .LmulPv416x32 + addl 984(%esp), %edi + movl 48(%esp), %eax # 4-byte Reload + adcl 988(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 36(%esp), %eax # 4-byte Reload + adcl 992(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 44(%esp), %edi # 4-byte Reload + adcl 996(%esp), %edi + movl 52(%esp), %eax # 4-byte Reload + adcl 1000(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 1004(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 60(%esp), %ebp # 4-byte Reload + adcl 1008(%esp), %ebp + movl 72(%esp), %eax # 4-byte Reload + adcl 1012(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 1016(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 1020(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 1024(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 1028(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 1032(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + adcl 1036(%esp), %esi + movl %esi, 40(%esp) # 4-byte Spill + movl 1576(%esp), %eax + movl 20(%eax), %eax + movl %eax, (%esp) + leal 928(%esp), %ecx + movl 1572(%esp), %edx + calll .LmulPv416x32 + movl 980(%esp), %eax + movl 48(%esp), %ecx # 4-byte Reload + addl 928(%esp), %ecx + movl 36(%esp), %edx # 4-byte Reload + adcl 932(%esp), %edx + movl %edx, 36(%esp) # 4-byte Spill + adcl 936(%esp), %edi + movl %edi, 44(%esp) # 4-byte Spill + movl 52(%esp), %edx # 4-byte Reload + adcl 940(%esp), %edx + movl %edx, 52(%esp) # 4-byte Spill + movl 56(%esp), %edx # 4-byte Reload + adcl 944(%esp), %edx + movl %edx, 56(%esp) # 4-byte Spill + adcl 948(%esp), %ebp + movl 72(%esp), %edx # 4-byte Reload + adcl 952(%esp), %edx + movl %edx, 72(%esp) # 4-byte Spill + movl 76(%esp), %edx # 4-byte Reload + adcl 956(%esp), %edx + movl %edx, 76(%esp) # 4-byte Spill + movl 84(%esp), %edx # 4-byte Reload + adcl 960(%esp), %edx + movl %edx, 84(%esp) # 4-byte Spill + movl 80(%esp), %edx # 4-byte Reload + adcl 964(%esp), %edx + movl %edx, 80(%esp) # 4-byte Spill + movl 68(%esp), %esi # 4-byte Reload + adcl 968(%esp), %esi + movl 64(%esp), %edx # 4-byte Reload + adcl 972(%esp), %edx + movl %edx, 64(%esp) # 4-byte Spill + movl 40(%esp), %edx # 4-byte Reload + adcl 976(%esp), %edx + movl %edx, 40(%esp) # 4-byte Spill + adcl $0, %eax + movl %eax, 48(%esp) # 4-byte Spill + movl %ecx, %edi + movl %edi, %eax + imull 32(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 872(%esp), %ecx + movl 1580(%esp), %edx + calll .LmulPv416x32 + addl 872(%esp), %edi + movl 36(%esp), %edi # 4-byte Reload + adcl 876(%esp), %edi + movl 44(%esp), %eax # 4-byte Reload + adcl 880(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 884(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 888(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + adcl 892(%esp), %ebp + movl 72(%esp), %eax # 4-byte Reload + adcl 896(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 900(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 904(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 908(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + adcl 912(%esp), %esi + movl %esi, 68(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 916(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 40(%esp), %eax # 4-byte Reload + adcl 920(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 924(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 1576(%esp), %eax + movl 24(%eax), %eax + movl %eax, (%esp) + leal 816(%esp), %ecx + movl 1572(%esp), %edx + calll .LmulPv416x32 + movl 868(%esp), %edx + addl 816(%esp), %edi + movl 44(%esp), %eax # 4-byte Reload + adcl 820(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 824(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 828(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + adcl 832(%esp), %ebp + movl %ebp, 60(%esp) # 4-byte Spill + movl 72(%esp), %esi # 4-byte Reload + adcl 836(%esp), %esi + movl 76(%esp), %eax # 4-byte Reload + adcl 840(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 844(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 848(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 852(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 856(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 40(%esp), %ebp # 4-byte Reload + adcl 860(%esp), %ebp + movl 48(%esp), %eax # 4-byte Reload + adcl 864(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + adcl $0, %edx + movl %edx, 36(%esp) # 4-byte Spill + movl %edi, %eax + imull 32(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 760(%esp), %ecx + movl 1580(%esp), %edx + calll .LmulPv416x32 + addl 760(%esp), %edi + movl 44(%esp), %eax # 4-byte Reload + adcl 764(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 768(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 772(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 776(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + adcl 780(%esp), %esi + movl %esi, 72(%esp) # 4-byte Spill + movl 76(%esp), %esi # 4-byte Reload + adcl 784(%esp), %esi + movl 84(%esp), %edi # 4-byte Reload + adcl 788(%esp), %edi + movl 80(%esp), %eax # 4-byte Reload + adcl 792(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 796(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 800(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + adcl 804(%esp), %ebp + movl 48(%esp), %eax # 4-byte Reload + adcl 808(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 36(%esp), %eax # 4-byte Reload + adcl 812(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 1576(%esp), %eax + movl 28(%eax), %eax + movl %eax, (%esp) + leal 704(%esp), %ecx + movl 1572(%esp), %edx + calll .LmulPv416x32 + movl 756(%esp), %eax + movl 44(%esp), %ecx # 4-byte Reload + addl 704(%esp), %ecx + movl 52(%esp), %edx # 4-byte Reload + adcl 708(%esp), %edx + movl %edx, 52(%esp) # 4-byte Spill + movl 56(%esp), %edx # 4-byte Reload + adcl 712(%esp), %edx + movl %edx, 56(%esp) # 4-byte Spill + movl 60(%esp), %edx # 4-byte Reload + adcl 716(%esp), %edx + movl %edx, 60(%esp) # 4-byte Spill + movl 72(%esp), %edx # 4-byte Reload + adcl 720(%esp), %edx + movl %edx, 72(%esp) # 4-byte Spill + adcl 724(%esp), %esi + movl %esi, 76(%esp) # 4-byte Spill + adcl 728(%esp), %edi + movl %edi, 84(%esp) # 4-byte Spill + movl 80(%esp), %esi # 4-byte Reload + adcl 732(%esp), %esi + movl 68(%esp), %edx # 4-byte Reload + adcl 736(%esp), %edx + movl %edx, 68(%esp) # 4-byte Spill + movl 64(%esp), %edx # 4-byte Reload + adcl 740(%esp), %edx + movl %edx, 64(%esp) # 4-byte Spill + adcl 744(%esp), %ebp + movl 48(%esp), %edx # 4-byte Reload + adcl 748(%esp), %edx + movl %edx, 48(%esp) # 4-byte Spill + movl 36(%esp), %edx # 4-byte Reload + adcl 752(%esp), %edx + movl %edx, 36(%esp) # 4-byte Spill + adcl $0, %eax + movl %eax, 44(%esp) # 4-byte Spill + movl %ecx, %eax + movl %ecx, %edi + imull 32(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 648(%esp), %ecx + movl 1580(%esp), %edx + calll .LmulPv416x32 + addl 648(%esp), %edi + movl 52(%esp), %eax # 4-byte Reload + adcl 652(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 656(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 660(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 664(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 668(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 672(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + adcl 676(%esp), %esi + movl %esi, 80(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 680(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 684(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + adcl 688(%esp), %ebp + movl %ebp, 40(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 692(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 36(%esp), %edi # 4-byte Reload + adcl 696(%esp), %edi + movl 44(%esp), %eax # 4-byte Reload + adcl 700(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 1576(%esp), %eax + movl 32(%eax), %eax + movl %eax, (%esp) + leal 592(%esp), %ecx + movl 1572(%esp), %edx + calll .LmulPv416x32 + movl 644(%esp), %edx + movl 52(%esp), %ecx # 4-byte Reload + addl 592(%esp), %ecx + movl 56(%esp), %esi # 4-byte Reload + adcl 596(%esp), %esi + movl 60(%esp), %eax # 4-byte Reload + adcl 600(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 604(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 608(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 612(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 616(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 620(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 64(%esp), %ebp # 4-byte Reload + adcl 624(%esp), %ebp + movl 40(%esp), %eax # 4-byte Reload + adcl 628(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 632(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + adcl 636(%esp), %edi + movl %edi, 36(%esp) # 4-byte Spill + movl 44(%esp), %eax # 4-byte Reload + adcl 640(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + adcl $0, %edx + movl %edx, 52(%esp) # 4-byte Spill + movl %ecx, %eax + movl %ecx, %edi + imull 32(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 536(%esp), %ecx + movl 1580(%esp), %edx + calll .LmulPv416x32 + addl 536(%esp), %edi + adcl 540(%esp), %esi + movl %esi, 56(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 544(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 548(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 552(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 84(%esp), %edi # 4-byte Reload + adcl 556(%esp), %edi + movl 80(%esp), %eax # 4-byte Reload + adcl 560(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 68(%esp), %esi # 4-byte Reload + adcl 564(%esp), %esi + adcl 568(%esp), %ebp + movl %ebp, 64(%esp) # 4-byte Spill + movl 40(%esp), %ebp # 4-byte Reload + adcl 572(%esp), %ebp + movl 48(%esp), %eax # 4-byte Reload + adcl 576(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 36(%esp), %eax # 4-byte Reload + adcl 580(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 44(%esp), %eax # 4-byte Reload + adcl 584(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 588(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 1576(%esp), %eax + movl 36(%eax), %eax + movl %eax, (%esp) + leal 480(%esp), %ecx + movl 1572(%esp), %edx + calll .LmulPv416x32 + movl 532(%esp), %edx + movl 56(%esp), %ecx # 4-byte Reload + addl 480(%esp), %ecx + movl 60(%esp), %eax # 4-byte Reload + adcl 484(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 488(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 492(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + adcl 496(%esp), %edi + movl %edi, 84(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 500(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + adcl 504(%esp), %esi + movl %esi, 68(%esp) # 4-byte Spill + movl 64(%esp), %edi # 4-byte Reload + adcl 508(%esp), %edi + adcl 512(%esp), %ebp + movl %ebp, 40(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 516(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 36(%esp), %eax # 4-byte Reload + adcl 520(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 44(%esp), %eax # 4-byte Reload + adcl 524(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 52(%esp), %ebp # 4-byte Reload + adcl 528(%esp), %ebp + adcl $0, %edx + movl %edx, 56(%esp) # 4-byte Spill + movl %ecx, %eax + movl %ecx, %esi + imull 32(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 424(%esp), %ecx + movl 1580(%esp), %edx + calll .LmulPv416x32 + addl 424(%esp), %esi + movl 60(%esp), %eax # 4-byte Reload + adcl 428(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 432(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 436(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 440(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 444(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 448(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl %edi, %esi + adcl 452(%esp), %esi + movl 40(%esp), %eax # 4-byte Reload + adcl 456(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 48(%esp), %edi # 4-byte Reload + adcl 460(%esp), %edi + movl 36(%esp), %eax # 4-byte Reload + adcl 464(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 44(%esp), %eax # 4-byte Reload + adcl 468(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + adcl 472(%esp), %ebp + movl %ebp, 52(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 476(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 1576(%esp), %eax + movl 40(%eax), %eax + movl %eax, (%esp) + leal 368(%esp), %ecx + movl 1572(%esp), %edx + calll .LmulPv416x32 + movl 420(%esp), %edx + movl 60(%esp), %ecx # 4-byte Reload + addl 368(%esp), %ecx + movl 72(%esp), %ebp # 4-byte Reload + adcl 372(%esp), %ebp + movl 76(%esp), %eax # 4-byte Reload + adcl 376(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 380(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 384(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 388(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + adcl 392(%esp), %esi + movl %esi, 64(%esp) # 4-byte Spill + movl 40(%esp), %eax # 4-byte Reload + adcl 396(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + adcl 400(%esp), %edi + movl %edi, 48(%esp) # 4-byte Spill + movl 36(%esp), %edi # 4-byte Reload + adcl 404(%esp), %edi + movl 44(%esp), %eax # 4-byte Reload + adcl 408(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 412(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 416(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + adcl $0, %edx + movl %edx, 60(%esp) # 4-byte Spill + movl %ecx, %eax + movl %ecx, %esi + imull 32(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 312(%esp), %ecx + movl 1580(%esp), %edx + calll .LmulPv416x32 + addl 312(%esp), %esi + adcl 316(%esp), %ebp + movl %ebp, 72(%esp) # 4-byte Spill + movl 76(%esp), %ebp # 4-byte Reload + adcl 320(%esp), %ebp + movl 84(%esp), %eax # 4-byte Reload + adcl 324(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 328(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 68(%esp), %esi # 4-byte Reload + adcl 332(%esp), %esi + movl 64(%esp), %eax # 4-byte Reload + adcl 336(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 40(%esp), %eax # 4-byte Reload + adcl 340(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 344(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + adcl 348(%esp), %edi + movl 44(%esp), %eax # 4-byte Reload + adcl 352(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 356(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 360(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 364(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 1576(%esp), %eax + movl 44(%eax), %eax + movl %eax, (%esp) + leal 256(%esp), %ecx + movl 1572(%esp), %edx + calll .LmulPv416x32 + movl 308(%esp), %edx + movl 72(%esp), %ecx # 4-byte Reload + addl 256(%esp), %ecx + adcl 260(%esp), %ebp + movl 84(%esp), %eax # 4-byte Reload + adcl 264(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 268(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + adcl 272(%esp), %esi + movl %esi, 68(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 276(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 40(%esp), %eax # 4-byte Reload + adcl 280(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 284(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + adcl 288(%esp), %edi + movl %edi, 36(%esp) # 4-byte Spill + movl 44(%esp), %eax # 4-byte Reload + adcl 292(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 296(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 300(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 304(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + adcl $0, %edx + movl %edx, 72(%esp) # 4-byte Spill + movl %ecx, %eax + movl %ecx, %esi + imull 32(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 200(%esp), %ecx + movl 1580(%esp), %edx + calll .LmulPv416x32 + addl 200(%esp), %esi + adcl 204(%esp), %ebp + movl %ebp, 76(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 208(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 80(%esp), %ebp # 4-byte Reload + adcl 212(%esp), %ebp + movl 68(%esp), %esi # 4-byte Reload + adcl 216(%esp), %esi + movl 64(%esp), %eax # 4-byte Reload + adcl 220(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 40(%esp), %eax # 4-byte Reload + adcl 224(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 48(%esp), %edi # 4-byte Reload + adcl 228(%esp), %edi + movl 36(%esp), %eax # 4-byte Reload + adcl 232(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 44(%esp), %eax # 4-byte Reload + adcl 236(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 240(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 244(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 248(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 252(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 1576(%esp), %eax + movl 48(%eax), %eax + movl %eax, (%esp) + leal 144(%esp), %ecx + movl 1572(%esp), %edx + calll .LmulPv416x32 + movl 196(%esp), %edx + movl 76(%esp), %ecx # 4-byte Reload + addl 144(%esp), %ecx + movl 84(%esp), %eax # 4-byte Reload + adcl 148(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + adcl 152(%esp), %ebp + movl %ebp, 80(%esp) # 4-byte Spill + adcl 156(%esp), %esi + movl %esi, 68(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 160(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 40(%esp), %ebp # 4-byte Reload + adcl 164(%esp), %ebp + adcl 168(%esp), %edi + movl %edi, 48(%esp) # 4-byte Spill + movl 36(%esp), %eax # 4-byte Reload + adcl 172(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 44(%esp), %eax # 4-byte Reload + adcl 176(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 180(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 184(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 188(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 192(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + adcl $0, %edx + movl %edx, 76(%esp) # 4-byte Spill + movl 32(%esp), %eax # 4-byte Reload + imull %ecx, %eax + movl %ecx, %esi + movl %eax, (%esp) + leal 88(%esp), %ecx + movl 1580(%esp), %edx + calll .LmulPv416x32 + addl 88(%esp), %esi + movl 84(%esp), %eax # 4-byte Reload + movl 68(%esp), %edi # 4-byte Reload + adcl 92(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 80(%esp), %ecx # 4-byte Reload + adcl 96(%esp), %ecx + movl %ecx, 80(%esp) # 4-byte Spill + adcl 100(%esp), %edi + movl 64(%esp), %ebx # 4-byte Reload + adcl 104(%esp), %ebx + movl %ebx, 64(%esp) # 4-byte Spill + adcl 108(%esp), %ebp + movl %ebp, 40(%esp) # 4-byte Spill + movl %ebp, %esi + movl 48(%esp), %edx # 4-byte Reload + adcl 112(%esp), %edx + movl %edx, 48(%esp) # 4-byte Spill + movl 36(%esp), %edx # 4-byte Reload + adcl 116(%esp), %edx + movl %edx, 36(%esp) # 4-byte Spill + movl 44(%esp), %edx # 4-byte Reload + adcl 120(%esp), %edx + movl %edx, 44(%esp) # 4-byte Spill + movl 52(%esp), %edx # 4-byte Reload + adcl 124(%esp), %edx + movl %edx, 52(%esp) # 4-byte Spill + movl 56(%esp), %edx # 4-byte Reload + adcl 128(%esp), %edx + movl %edx, 56(%esp) # 4-byte Spill + movl 60(%esp), %edx # 4-byte Reload + adcl 132(%esp), %edx + movl %edx, 60(%esp) # 4-byte Spill + movl 72(%esp), %edx # 4-byte Reload + adcl 136(%esp), %edx + movl %edx, 72(%esp) # 4-byte Spill + movl 76(%esp), %edx # 4-byte Reload + adcl 140(%esp), %edx + movl %edx, 76(%esp) # 4-byte Spill + movl %eax, %edx + movl 1580(%esp), %eax + subl (%eax), %edx + movl %ecx, %ebp + sbbl 4(%eax), %ebp + movl %edi, %ecx + sbbl 8(%eax), %ecx + sbbl 12(%eax), %ebx + sbbl 16(%eax), %esi + movl %esi, 4(%esp) # 4-byte Spill + movl 48(%esp), %esi # 4-byte Reload + sbbl 20(%eax), %esi + movl %esi, 8(%esp) # 4-byte Spill + movl 36(%esp), %esi # 4-byte Reload + sbbl 24(%eax), %esi + movl %esi, 12(%esp) # 4-byte Spill + movl 44(%esp), %esi # 4-byte Reload + sbbl 28(%eax), %esi + movl %esi, 16(%esp) # 4-byte Spill + movl 52(%esp), %esi # 4-byte Reload + sbbl 32(%eax), %esi + movl %esi, 20(%esp) # 4-byte Spill + movl 56(%esp), %esi # 4-byte Reload + sbbl 36(%eax), %esi + movl %esi, 24(%esp) # 4-byte Spill + movl 60(%esp), %esi # 4-byte Reload + sbbl 40(%eax), %esi + movl %esi, 28(%esp) # 4-byte Spill + movl 72(%esp), %esi # 4-byte Reload + sbbl 44(%eax), %esi + movl %esi, 32(%esp) # 4-byte Spill + movl 76(%esp), %esi # 4-byte Reload + sbbl 48(%eax), %esi + movl %esi, 68(%esp) # 4-byte Spill + movl %esi, %eax + sarl $31, %eax + testl %eax, %eax + movl 84(%esp), %eax # 4-byte Reload + js .LBB195_2 +# BB#1: + movl %edx, %eax +.LBB195_2: + movl 1568(%esp), %edx + movl %eax, (%edx) + movl 80(%esp), %esi # 4-byte Reload + js .LBB195_4 +# BB#3: + movl %ebp, %esi +.LBB195_4: + movl %esi, 4(%edx) + movl 64(%esp), %eax # 4-byte Reload + js .LBB195_6 +# BB#5: + movl %ecx, %edi +.LBB195_6: + movl %edi, 8(%edx) + js .LBB195_8 +# BB#7: + movl %ebx, %eax +.LBB195_8: + movl %eax, 12(%edx) + movl 40(%esp), %eax # 4-byte Reload + js .LBB195_10 +# BB#9: + movl 4(%esp), %eax # 4-byte Reload +.LBB195_10: + movl %eax, 16(%edx) + movl 48(%esp), %eax # 4-byte Reload + js .LBB195_12 +# BB#11: + movl 8(%esp), %eax # 4-byte Reload +.LBB195_12: + movl %eax, 20(%edx) + movl 36(%esp), %eax # 4-byte Reload + js .LBB195_14 +# BB#13: + movl 12(%esp), %eax # 4-byte Reload +.LBB195_14: + movl %eax, 24(%edx) + movl 44(%esp), %eax # 4-byte Reload + js .LBB195_16 +# BB#15: + movl 16(%esp), %eax # 4-byte Reload +.LBB195_16: + movl %eax, 28(%edx) + movl 52(%esp), %eax # 4-byte Reload + js .LBB195_18 +# BB#17: + movl 20(%esp), %eax # 4-byte Reload +.LBB195_18: + movl %eax, 32(%edx) + movl 56(%esp), %eax # 4-byte Reload + js .LBB195_20 +# BB#19: + movl 24(%esp), %eax # 4-byte Reload +.LBB195_20: + movl %eax, 36(%edx) + movl 60(%esp), %eax # 4-byte Reload + js .LBB195_22 +# BB#21: + movl 28(%esp), %eax # 4-byte Reload +.LBB195_22: + movl %eax, 40(%edx) + movl 72(%esp), %eax # 4-byte Reload + js .LBB195_24 +# BB#23: + movl 32(%esp), %eax # 4-byte Reload +.LBB195_24: + movl %eax, 44(%edx) + movl 76(%esp), %eax # 4-byte Reload + js .LBB195_26 +# BB#25: + movl 68(%esp), %eax # 4-byte Reload +.LBB195_26: + movl %eax, 48(%edx) + addl $1548, %esp # imm = 0x60C + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end195: + .size mcl_fp_montNF13Lbmi2, .Lfunc_end195-mcl_fp_montNF13Lbmi2 + + .globl mcl_fp_montRed13Lbmi2 + .align 16, 0x90 + .type mcl_fp_montRed13Lbmi2,@function +mcl_fp_montRed13Lbmi2: # @mcl_fp_montRed13Lbmi2 +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $892, %esp # imm = 0x37C + calll .L196$pb +.L196$pb: + popl %eax +.Ltmp37: + addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp37-.L196$pb), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 920(%esp), %edx + movl -4(%edx), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 916(%esp), %ecx + movl (%ecx), %ebx + movl %ebx, 76(%esp) # 4-byte Spill + movl 4(%ecx), %edi + movl %edi, 80(%esp) # 4-byte Spill + imull %eax, %ebx + movl 100(%ecx), %esi + movl %esi, 92(%esp) # 4-byte Spill + movl 96(%ecx), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 92(%ecx), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 88(%ecx), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 84(%ecx), %esi + movl %esi, 116(%esp) # 4-byte Spill + movl 80(%ecx), %esi + movl %esi, 136(%esp) # 4-byte Spill + movl 76(%ecx), %esi + movl %esi, 144(%esp) # 4-byte Spill + movl 72(%ecx), %eax + movl %eax, 132(%esp) # 4-byte Spill + movl 68(%ecx), %esi + movl %esi, 128(%esp) # 4-byte Spill + movl 64(%ecx), %esi + movl %esi, 148(%esp) # 4-byte Spill + movl 60(%ecx), %esi + movl %esi, 152(%esp) # 4-byte Spill + movl 56(%ecx), %esi + movl %esi, 140(%esp) # 4-byte Spill + movl 52(%ecx), %esi + movl %esi, 156(%esp) # 4-byte Spill + movl 48(%ecx), %eax + movl %eax, 120(%esp) # 4-byte Spill + movl 44(%ecx), %ebp + movl %ebp, 124(%esp) # 4-byte Spill + movl 40(%ecx), %ebp + movl %ebp, 108(%esp) # 4-byte Spill + movl 36(%ecx), %ebp + movl %ebp, 104(%esp) # 4-byte Spill + movl 32(%ecx), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 28(%ecx), %ebp + movl 24(%ecx), %edi + movl 20(%ecx), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 16(%ecx), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 12(%ecx), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 8(%ecx), %esi + movl (%edx), %ecx + movl %ecx, 12(%esp) # 4-byte Spill + movl 48(%edx), %ecx + movl %ecx, 52(%esp) # 4-byte Spill + movl 44(%edx), %ecx + movl %ecx, 48(%esp) # 4-byte Spill + movl 40(%edx), %ecx + movl %ecx, 44(%esp) # 4-byte Spill + movl 36(%edx), %ecx + movl %ecx, 40(%esp) # 4-byte Spill + movl 32(%edx), %ecx + movl %ecx, 36(%esp) # 4-byte Spill + movl 28(%edx), %ecx + movl %ecx, 32(%esp) # 4-byte Spill + movl 24(%edx), %ecx + movl %ecx, 28(%esp) # 4-byte Spill + movl 20(%edx), %ecx + movl %ecx, 24(%esp) # 4-byte Spill + movl 16(%edx), %ecx + movl %ecx, 20(%esp) # 4-byte Spill + movl 12(%edx), %ecx + movl %ecx, 16(%esp) # 4-byte Spill + movl 8(%edx), %ecx + movl %ecx, 8(%esp) # 4-byte Spill + movl 4(%edx), %ecx + movl %ecx, 4(%esp) # 4-byte Spill + movl %ebx, (%esp) + leal 832(%esp), %ecx + movl 84(%esp), %ebx # 4-byte Reload + calll .LmulPv416x32 + movl 76(%esp), %eax # 4-byte Reload + addl 832(%esp), %eax + movl 80(%esp), %ecx # 4-byte Reload + adcl 836(%esp), %ecx + adcl 840(%esp), %esi + movl %esi, 80(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 844(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 848(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 852(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + adcl 856(%esp), %edi + movl %edi, 68(%esp) # 4-byte Spill + adcl 860(%esp), %ebp + movl %ebp, 76(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 864(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 104(%esp), %eax # 4-byte Reload + adcl 868(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl 872(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 124(%esp), %eax # 4-byte Reload + adcl 876(%esp), %eax + movl %eax, 124(%esp) # 4-byte Spill + movl 120(%esp), %eax # 4-byte Reload + adcl 880(%esp), %eax + movl %eax, 120(%esp) # 4-byte Spill + movl 156(%esp), %eax # 4-byte Reload + adcl 884(%esp), %eax + movl %eax, 156(%esp) # 4-byte Spill + adcl $0, 140(%esp) # 4-byte Folded Spill + adcl $0, 152(%esp) # 4-byte Folded Spill + adcl $0, 148(%esp) # 4-byte Folded Spill + adcl $0, 128(%esp) # 4-byte Folded Spill + adcl $0, 132(%esp) # 4-byte Folded Spill + adcl $0, 144(%esp) # 4-byte Folded Spill + adcl $0, 136(%esp) # 4-byte Folded Spill + adcl $0, 116(%esp) # 4-byte Folded Spill + movl 112(%esp), %ebp # 4-byte Reload + adcl $0, %ebp + adcl $0, 96(%esp) # 4-byte Folded Spill + adcl $0, 100(%esp) # 4-byte Folded Spill + adcl $0, 92(%esp) # 4-byte Folded Spill + sbbl %esi, %esi + movl %ecx, %eax + movl %ecx, %edi + imull 72(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 776(%esp), %ecx + movl 920(%esp), %edx + movl 84(%esp), %ebx # 4-byte Reload + calll .LmulPv416x32 + andl $1, %esi + addl 776(%esp), %edi + movl 80(%esp), %ecx # 4-byte Reload + adcl 780(%esp), %ecx + movl 56(%esp), %eax # 4-byte Reload + adcl 784(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 788(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 792(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 796(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 800(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 804(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 104(%esp), %eax # 4-byte Reload + adcl 808(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl 812(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 124(%esp), %eax # 4-byte Reload + adcl 816(%esp), %eax + movl %eax, 124(%esp) # 4-byte Spill + movl 120(%esp), %eax # 4-byte Reload + adcl 820(%esp), %eax + movl %eax, 120(%esp) # 4-byte Spill + movl 156(%esp), %eax # 4-byte Reload + adcl 824(%esp), %eax + movl %eax, 156(%esp) # 4-byte Spill + movl 140(%esp), %eax # 4-byte Reload + adcl 828(%esp), %eax + movl %eax, 140(%esp) # 4-byte Spill + adcl $0, 152(%esp) # 4-byte Folded Spill + adcl $0, 148(%esp) # 4-byte Folded Spill + adcl $0, 128(%esp) # 4-byte Folded Spill + movl 132(%esp), %eax # 4-byte Reload + adcl $0, %eax + movl %eax, %edi + adcl $0, 144(%esp) # 4-byte Folded Spill + adcl $0, 136(%esp) # 4-byte Folded Spill + adcl $0, 116(%esp) # 4-byte Folded Spill + adcl $0, %ebp + movl %ebp, 112(%esp) # 4-byte Spill + movl 96(%esp), %ebp # 4-byte Reload + adcl $0, %ebp + adcl $0, 100(%esp) # 4-byte Folded Spill + adcl $0, 92(%esp) # 4-byte Folded Spill + adcl $0, %esi + movl %esi, 80(%esp) # 4-byte Spill + movl %ecx, %esi + movl %esi, %eax + imull 72(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 720(%esp), %ecx + movl 920(%esp), %edx + movl 84(%esp), %ebx # 4-byte Reload + calll .LmulPv416x32 + addl 720(%esp), %esi + movl 56(%esp), %esi # 4-byte Reload + adcl 724(%esp), %esi + movl 60(%esp), %eax # 4-byte Reload + adcl 728(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 732(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 736(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 740(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 744(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 104(%esp), %eax # 4-byte Reload + adcl 748(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl 752(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 124(%esp), %eax # 4-byte Reload + adcl 756(%esp), %eax + movl %eax, 124(%esp) # 4-byte Spill + movl 120(%esp), %eax # 4-byte Reload + adcl 760(%esp), %eax + movl %eax, 120(%esp) # 4-byte Spill + movl 156(%esp), %eax # 4-byte Reload + adcl 764(%esp), %eax + movl %eax, 156(%esp) # 4-byte Spill + movl 140(%esp), %eax # 4-byte Reload + adcl 768(%esp), %eax + movl %eax, 140(%esp) # 4-byte Spill + movl 152(%esp), %eax # 4-byte Reload + adcl 772(%esp), %eax + movl %eax, 152(%esp) # 4-byte Spill + adcl $0, 148(%esp) # 4-byte Folded Spill + adcl $0, 128(%esp) # 4-byte Folded Spill + adcl $0, %edi + movl %edi, 132(%esp) # 4-byte Spill + adcl $0, 144(%esp) # 4-byte Folded Spill + adcl $0, 136(%esp) # 4-byte Folded Spill + adcl $0, 116(%esp) # 4-byte Folded Spill + adcl $0, 112(%esp) # 4-byte Folded Spill + adcl $0, %ebp + movl 100(%esp), %edi # 4-byte Reload + adcl $0, %edi + adcl $0, 92(%esp) # 4-byte Folded Spill + adcl $0, 80(%esp) # 4-byte Folded Spill + movl %esi, %eax + imull 72(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 664(%esp), %ecx + movl 920(%esp), %edx + movl 84(%esp), %ebx # 4-byte Reload + calll .LmulPv416x32 + addl 664(%esp), %esi + movl 60(%esp), %ecx # 4-byte Reload + adcl 668(%esp), %ecx + movl 64(%esp), %eax # 4-byte Reload + adcl 672(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 676(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 680(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 684(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 104(%esp), %eax # 4-byte Reload + adcl 688(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl 692(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 124(%esp), %eax # 4-byte Reload + adcl 696(%esp), %eax + movl %eax, 124(%esp) # 4-byte Spill + movl 120(%esp), %eax # 4-byte Reload + adcl 700(%esp), %eax + movl %eax, 120(%esp) # 4-byte Spill + movl 156(%esp), %eax # 4-byte Reload + adcl 704(%esp), %eax + movl %eax, 156(%esp) # 4-byte Spill + movl 140(%esp), %eax # 4-byte Reload + adcl 708(%esp), %eax + movl %eax, 140(%esp) # 4-byte Spill + movl 152(%esp), %eax # 4-byte Reload + adcl 712(%esp), %eax + movl %eax, 152(%esp) # 4-byte Spill + movl 148(%esp), %eax # 4-byte Reload + adcl 716(%esp), %eax + movl %eax, 148(%esp) # 4-byte Spill + adcl $0, 128(%esp) # 4-byte Folded Spill + adcl $0, 132(%esp) # 4-byte Folded Spill + adcl $0, 144(%esp) # 4-byte Folded Spill + adcl $0, 136(%esp) # 4-byte Folded Spill + adcl $0, 116(%esp) # 4-byte Folded Spill + adcl $0, 112(%esp) # 4-byte Folded Spill + adcl $0, %ebp + movl %ebp, 96(%esp) # 4-byte Spill + adcl $0, %edi + movl %edi, 100(%esp) # 4-byte Spill + movl 92(%esp), %esi # 4-byte Reload + adcl $0, %esi + adcl $0, 80(%esp) # 4-byte Folded Spill + movl %ecx, %edi + movl %edi, %eax + imull 72(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 608(%esp), %ecx + movl 920(%esp), %edx + movl 84(%esp), %ebx # 4-byte Reload + calll .LmulPv416x32 + addl 608(%esp), %edi + movl 64(%esp), %ecx # 4-byte Reload + adcl 612(%esp), %ecx + movl 68(%esp), %eax # 4-byte Reload + adcl 616(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 620(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 624(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 104(%esp), %eax # 4-byte Reload + adcl 628(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl 632(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 124(%esp), %eax # 4-byte Reload + adcl 636(%esp), %eax + movl %eax, 124(%esp) # 4-byte Spill + movl 120(%esp), %eax # 4-byte Reload + adcl 640(%esp), %eax + movl %eax, 120(%esp) # 4-byte Spill + movl 156(%esp), %eax # 4-byte Reload + adcl 644(%esp), %eax + movl %eax, 156(%esp) # 4-byte Spill + movl 140(%esp), %eax # 4-byte Reload + adcl 648(%esp), %eax + movl %eax, 140(%esp) # 4-byte Spill + movl 152(%esp), %eax # 4-byte Reload + adcl 652(%esp), %eax + movl %eax, 152(%esp) # 4-byte Spill + movl 148(%esp), %eax # 4-byte Reload + adcl 656(%esp), %eax + movl %eax, 148(%esp) # 4-byte Spill + movl 128(%esp), %eax # 4-byte Reload + adcl 660(%esp), %eax + movl %eax, 128(%esp) # 4-byte Spill + adcl $0, 132(%esp) # 4-byte Folded Spill + movl 144(%esp), %edi # 4-byte Reload + adcl $0, %edi + adcl $0, 136(%esp) # 4-byte Folded Spill + adcl $0, 116(%esp) # 4-byte Folded Spill + movl 112(%esp), %ebp # 4-byte Reload + adcl $0, %ebp + adcl $0, 96(%esp) # 4-byte Folded Spill + adcl $0, 100(%esp) # 4-byte Folded Spill + adcl $0, %esi + movl %esi, 92(%esp) # 4-byte Spill + adcl $0, 80(%esp) # 4-byte Folded Spill + movl %ecx, %esi + movl %esi, %eax + imull 72(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 552(%esp), %ecx + movl 920(%esp), %edx + movl 84(%esp), %ebx # 4-byte Reload + calll .LmulPv416x32 + addl 552(%esp), %esi + movl 68(%esp), %ecx # 4-byte Reload + adcl 556(%esp), %ecx + movl 76(%esp), %eax # 4-byte Reload + adcl 560(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 564(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 104(%esp), %eax # 4-byte Reload + adcl 568(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl 572(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 124(%esp), %eax # 4-byte Reload + adcl 576(%esp), %eax + movl %eax, 124(%esp) # 4-byte Spill + movl 120(%esp), %eax # 4-byte Reload + adcl 580(%esp), %eax + movl %eax, 120(%esp) # 4-byte Spill + movl 156(%esp), %eax # 4-byte Reload + adcl 584(%esp), %eax + movl %eax, 156(%esp) # 4-byte Spill + movl 140(%esp), %eax # 4-byte Reload + adcl 588(%esp), %eax + movl %eax, 140(%esp) # 4-byte Spill + movl 152(%esp), %eax # 4-byte Reload + adcl 592(%esp), %eax + movl %eax, 152(%esp) # 4-byte Spill + movl 148(%esp), %eax # 4-byte Reload + adcl 596(%esp), %eax + movl %eax, 148(%esp) # 4-byte Spill + movl 128(%esp), %eax # 4-byte Reload + adcl 600(%esp), %eax + movl %eax, 128(%esp) # 4-byte Spill + movl 132(%esp), %eax # 4-byte Reload + adcl 604(%esp), %eax + movl %eax, 132(%esp) # 4-byte Spill + adcl $0, %edi + movl %edi, 144(%esp) # 4-byte Spill + movl 136(%esp), %esi # 4-byte Reload + adcl $0, %esi + adcl $0, 116(%esp) # 4-byte Folded Spill + adcl $0, %ebp + movl %ebp, 112(%esp) # 4-byte Spill + adcl $0, 96(%esp) # 4-byte Folded Spill + adcl $0, 100(%esp) # 4-byte Folded Spill + adcl $0, 92(%esp) # 4-byte Folded Spill + adcl $0, 80(%esp) # 4-byte Folded Spill + movl %ecx, %edi + movl %edi, %eax + imull 72(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 496(%esp), %ecx + movl 920(%esp), %edx + movl 84(%esp), %ebx # 4-byte Reload + calll .LmulPv416x32 + addl 496(%esp), %edi + movl 76(%esp), %ecx # 4-byte Reload + adcl 500(%esp), %ecx + movl 88(%esp), %eax # 4-byte Reload + adcl 504(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 104(%esp), %eax # 4-byte Reload + adcl 508(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl 512(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 124(%esp), %eax # 4-byte Reload + adcl 516(%esp), %eax + movl %eax, 124(%esp) # 4-byte Spill + movl 120(%esp), %eax # 4-byte Reload + adcl 520(%esp), %eax + movl %eax, 120(%esp) # 4-byte Spill + movl 156(%esp), %eax # 4-byte Reload + adcl 524(%esp), %eax + movl %eax, 156(%esp) # 4-byte Spill + movl 140(%esp), %eax # 4-byte Reload + adcl 528(%esp), %eax + movl %eax, 140(%esp) # 4-byte Spill + movl 152(%esp), %ebp # 4-byte Reload + adcl 532(%esp), %ebp + movl 148(%esp), %edi # 4-byte Reload + adcl 536(%esp), %edi + movl 128(%esp), %eax # 4-byte Reload + adcl 540(%esp), %eax + movl %eax, 128(%esp) # 4-byte Spill + movl 132(%esp), %eax # 4-byte Reload + adcl 544(%esp), %eax + movl %eax, 132(%esp) # 4-byte Spill + movl 144(%esp), %eax # 4-byte Reload + adcl 548(%esp), %eax + movl %eax, 144(%esp) # 4-byte Spill + adcl $0, %esi + movl %esi, 136(%esp) # 4-byte Spill + adcl $0, 116(%esp) # 4-byte Folded Spill + adcl $0, 112(%esp) # 4-byte Folded Spill + adcl $0, 96(%esp) # 4-byte Folded Spill + adcl $0, 100(%esp) # 4-byte Folded Spill + adcl $0, 92(%esp) # 4-byte Folded Spill + adcl $0, 80(%esp) # 4-byte Folded Spill + movl %ecx, %eax + movl %ecx, %esi + imull 72(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 440(%esp), %ecx + movl 920(%esp), %edx + movl 84(%esp), %ebx # 4-byte Reload + calll .LmulPv416x32 + addl 440(%esp), %esi + movl 88(%esp), %ecx # 4-byte Reload + adcl 444(%esp), %ecx + movl 104(%esp), %eax # 4-byte Reload + adcl 448(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl 452(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 124(%esp), %eax # 4-byte Reload + adcl 456(%esp), %eax + movl %eax, 124(%esp) # 4-byte Spill + movl 120(%esp), %eax # 4-byte Reload + adcl 460(%esp), %eax + movl %eax, 120(%esp) # 4-byte Spill + movl 156(%esp), %eax # 4-byte Reload + adcl 464(%esp), %eax + movl %eax, 156(%esp) # 4-byte Spill + movl 140(%esp), %eax # 4-byte Reload + adcl 468(%esp), %eax + movl %eax, 140(%esp) # 4-byte Spill + adcl 472(%esp), %ebp + movl %ebp, 152(%esp) # 4-byte Spill + adcl 476(%esp), %edi + movl %edi, 148(%esp) # 4-byte Spill + movl 128(%esp), %eax # 4-byte Reload + adcl 480(%esp), %eax + movl %eax, 128(%esp) # 4-byte Spill + movl 132(%esp), %eax # 4-byte Reload + adcl 484(%esp), %eax + movl %eax, 132(%esp) # 4-byte Spill + movl 144(%esp), %eax # 4-byte Reload + adcl 488(%esp), %eax + movl %eax, 144(%esp) # 4-byte Spill + movl 136(%esp), %eax # 4-byte Reload + adcl 492(%esp), %eax + movl %eax, 136(%esp) # 4-byte Spill + adcl $0, 116(%esp) # 4-byte Folded Spill + adcl $0, 112(%esp) # 4-byte Folded Spill + adcl $0, 96(%esp) # 4-byte Folded Spill + adcl $0, 100(%esp) # 4-byte Folded Spill + adcl $0, 92(%esp) # 4-byte Folded Spill + adcl $0, 80(%esp) # 4-byte Folded Spill + movl %ecx, %esi + movl %esi, %eax + imull 72(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 384(%esp), %ecx + movl 920(%esp), %edx + movl 84(%esp), %ebx # 4-byte Reload + calll .LmulPv416x32 + addl 384(%esp), %esi + movl 104(%esp), %ecx # 4-byte Reload + adcl 388(%esp), %ecx + movl %ecx, 104(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl 392(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 124(%esp), %eax # 4-byte Reload + adcl 396(%esp), %eax + movl %eax, 124(%esp) # 4-byte Spill + movl 120(%esp), %eax # 4-byte Reload + adcl 400(%esp), %eax + movl %eax, 120(%esp) # 4-byte Spill + movl 156(%esp), %ebp # 4-byte Reload + adcl 404(%esp), %ebp + movl 140(%esp), %edi # 4-byte Reload + adcl 408(%esp), %edi + movl 152(%esp), %eax # 4-byte Reload + adcl 412(%esp), %eax + movl %eax, 152(%esp) # 4-byte Spill + movl 148(%esp), %eax # 4-byte Reload + adcl 416(%esp), %eax + movl %eax, 148(%esp) # 4-byte Spill + movl 128(%esp), %esi # 4-byte Reload + adcl 420(%esp), %esi + movl 132(%esp), %eax # 4-byte Reload + adcl 424(%esp), %eax + movl %eax, 132(%esp) # 4-byte Spill + movl 144(%esp), %eax # 4-byte Reload + adcl 428(%esp), %eax + movl %eax, 144(%esp) # 4-byte Spill + movl 136(%esp), %eax # 4-byte Reload + adcl 432(%esp), %eax + movl %eax, 136(%esp) # 4-byte Spill + movl 116(%esp), %eax # 4-byte Reload + adcl 436(%esp), %eax + movl %eax, 116(%esp) # 4-byte Spill + adcl $0, 112(%esp) # 4-byte Folded Spill + adcl $0, 96(%esp) # 4-byte Folded Spill + adcl $0, 100(%esp) # 4-byte Folded Spill + adcl $0, 92(%esp) # 4-byte Folded Spill + adcl $0, 80(%esp) # 4-byte Folded Spill + movl %ecx, %eax + imull 72(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 328(%esp), %ecx + movl 920(%esp), %eax + movl %eax, %edx + movl 84(%esp), %ebx # 4-byte Reload + calll .LmulPv416x32 + movl 104(%esp), %eax # 4-byte Reload + addl 328(%esp), %eax + movl 108(%esp), %ecx # 4-byte Reload + adcl 332(%esp), %ecx + movl 124(%esp), %eax # 4-byte Reload + adcl 336(%esp), %eax + movl %eax, 124(%esp) # 4-byte Spill + movl 120(%esp), %eax # 4-byte Reload + adcl 340(%esp), %eax + movl %eax, 120(%esp) # 4-byte Spill + adcl 344(%esp), %ebp + movl %ebp, 156(%esp) # 4-byte Spill + adcl 348(%esp), %edi + movl %edi, 140(%esp) # 4-byte Spill + movl 152(%esp), %eax # 4-byte Reload + adcl 352(%esp), %eax + movl %eax, 152(%esp) # 4-byte Spill + movl 148(%esp), %eax # 4-byte Reload + adcl 356(%esp), %eax + movl %eax, 148(%esp) # 4-byte Spill + adcl 360(%esp), %esi + movl %esi, 128(%esp) # 4-byte Spill + movl 132(%esp), %eax # 4-byte Reload + adcl 364(%esp), %eax + movl %eax, 132(%esp) # 4-byte Spill + movl 144(%esp), %eax # 4-byte Reload + adcl 368(%esp), %eax + movl %eax, 144(%esp) # 4-byte Spill + movl 136(%esp), %eax # 4-byte Reload + adcl 372(%esp), %eax + movl %eax, 136(%esp) # 4-byte Spill + movl 116(%esp), %eax # 4-byte Reload + adcl 376(%esp), %eax + movl %eax, 116(%esp) # 4-byte Spill + movl 112(%esp), %eax # 4-byte Reload + adcl 380(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + adcl $0, 96(%esp) # 4-byte Folded Spill + adcl $0, 100(%esp) # 4-byte Folded Spill + adcl $0, 92(%esp) # 4-byte Folded Spill + movl 80(%esp), %ebp # 4-byte Reload + adcl $0, %ebp + movl %ecx, %edi + movl %edi, %eax + movl 72(%esp), %esi # 4-byte Reload + imull %esi, %eax + movl %eax, (%esp) + leal 272(%esp), %ecx + movl 920(%esp), %edx + movl 84(%esp), %ebx # 4-byte Reload + calll .LmulPv416x32 + addl 272(%esp), %edi + movl 124(%esp), %eax # 4-byte Reload + adcl 276(%esp), %eax + movl 120(%esp), %edi # 4-byte Reload + adcl 280(%esp), %edi + movl 156(%esp), %ecx # 4-byte Reload + adcl 284(%esp), %ecx + movl %ecx, 156(%esp) # 4-byte Spill + movl 140(%esp), %ecx # 4-byte Reload + adcl 288(%esp), %ecx + movl %ecx, 140(%esp) # 4-byte Spill + movl 152(%esp), %ecx # 4-byte Reload + adcl 292(%esp), %ecx + movl %ecx, 152(%esp) # 4-byte Spill + movl 148(%esp), %ecx # 4-byte Reload + adcl 296(%esp), %ecx + movl %ecx, 148(%esp) # 4-byte Spill + movl 128(%esp), %ecx # 4-byte Reload + adcl 300(%esp), %ecx + movl %ecx, 128(%esp) # 4-byte Spill + movl 132(%esp), %ecx # 4-byte Reload + adcl 304(%esp), %ecx + movl %ecx, 132(%esp) # 4-byte Spill + movl 144(%esp), %ecx # 4-byte Reload + adcl 308(%esp), %ecx + movl %ecx, 144(%esp) # 4-byte Spill + movl 136(%esp), %ecx # 4-byte Reload + adcl 312(%esp), %ecx + movl %ecx, 136(%esp) # 4-byte Spill + movl 116(%esp), %ecx # 4-byte Reload + adcl 316(%esp), %ecx + movl %ecx, 116(%esp) # 4-byte Spill + movl 112(%esp), %ecx # 4-byte Reload + adcl 320(%esp), %ecx + movl %ecx, 112(%esp) # 4-byte Spill + movl 96(%esp), %ecx # 4-byte Reload + adcl 324(%esp), %ecx + movl %ecx, 96(%esp) # 4-byte Spill + adcl $0, 100(%esp) # 4-byte Folded Spill + adcl $0, 92(%esp) # 4-byte Folded Spill + adcl $0, %ebp + movl %ebp, 80(%esp) # 4-byte Spill + movl %eax, %ebp + imull %esi, %eax + movl %eax, (%esp) + leal 216(%esp), %ecx + movl 920(%esp), %edx + movl 84(%esp), %ebx # 4-byte Reload + calll .LmulPv416x32 + addl 216(%esp), %ebp + movl %edi, %ecx + adcl 220(%esp), %ecx + movl 156(%esp), %eax # 4-byte Reload + adcl 224(%esp), %eax + movl %eax, 156(%esp) # 4-byte Spill + movl 140(%esp), %ebp # 4-byte Reload + adcl 228(%esp), %ebp + movl 152(%esp), %eax # 4-byte Reload + adcl 232(%esp), %eax + movl %eax, 152(%esp) # 4-byte Spill + movl 148(%esp), %eax # 4-byte Reload + adcl 236(%esp), %eax + movl %eax, 148(%esp) # 4-byte Spill + movl 128(%esp), %eax # 4-byte Reload + adcl 240(%esp), %eax + movl %eax, 128(%esp) # 4-byte Spill + movl 132(%esp), %edi # 4-byte Reload + adcl 244(%esp), %edi + movl 144(%esp), %eax # 4-byte Reload + adcl 248(%esp), %eax + movl %eax, 144(%esp) # 4-byte Spill + movl 136(%esp), %eax # 4-byte Reload + adcl 252(%esp), %eax + movl %eax, 136(%esp) # 4-byte Spill + movl 116(%esp), %eax # 4-byte Reload + adcl 256(%esp), %eax + movl %eax, 116(%esp) # 4-byte Spill + movl 112(%esp), %eax # 4-byte Reload + adcl 260(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 264(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 268(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + adcl $0, 92(%esp) # 4-byte Folded Spill + adcl $0, 80(%esp) # 4-byte Folded Spill + movl %esi, %eax + imull %ecx, %eax + movl %ecx, %esi + movl %eax, (%esp) + leal 160(%esp), %ecx + movl 920(%esp), %edx + movl 84(%esp), %ebx # 4-byte Reload + calll .LmulPv416x32 + addl 160(%esp), %esi + movl 156(%esp), %eax # 4-byte Reload + adcl 164(%esp), %eax + movl %eax, 156(%esp) # 4-byte Spill + adcl 168(%esp), %ebp + movl %ebp, 140(%esp) # 4-byte Spill + movl %ebp, %ebx + movl 152(%esp), %ecx # 4-byte Reload + adcl 172(%esp), %ecx + movl %ecx, 152(%esp) # 4-byte Spill + movl 148(%esp), %ebp # 4-byte Reload + adcl 176(%esp), %ebp + movl %ebp, 148(%esp) # 4-byte Spill + movl 128(%esp), %eax # 4-byte Reload + adcl 180(%esp), %eax + movl %eax, 128(%esp) # 4-byte Spill + movl %eax, %edx + movl %edi, %eax + adcl 184(%esp), %eax + movl %eax, 132(%esp) # 4-byte Spill + movl 144(%esp), %eax # 4-byte Reload + adcl 188(%esp), %eax + movl %eax, 144(%esp) # 4-byte Spill + movl 136(%esp), %eax # 4-byte Reload + adcl 192(%esp), %eax + movl %eax, 136(%esp) # 4-byte Spill + movl 116(%esp), %eax # 4-byte Reload + adcl 196(%esp), %eax + movl %eax, 116(%esp) # 4-byte Spill + movl 112(%esp), %eax # 4-byte Reload + adcl 200(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 204(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 208(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 212(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 80(%esp), %esi # 4-byte Reload + adcl $0, %esi + movl 156(%esp), %edi # 4-byte Reload + subl 12(%esp), %edi # 4-byte Folded Reload + sbbl 4(%esp), %ebx # 4-byte Folded Reload + sbbl 8(%esp), %ecx # 4-byte Folded Reload + sbbl 16(%esp), %ebp # 4-byte Folded Reload + sbbl 20(%esp), %edx # 4-byte Folded Reload + movl %edx, 72(%esp) # 4-byte Spill + movl 132(%esp), %edx # 4-byte Reload + sbbl 24(%esp), %edx # 4-byte Folded Reload + movl %edx, 76(%esp) # 4-byte Spill + movl 144(%esp), %edx # 4-byte Reload + sbbl 28(%esp), %edx # 4-byte Folded Reload + movl %edx, 80(%esp) # 4-byte Spill + movl 136(%esp), %edx # 4-byte Reload + sbbl 32(%esp), %edx # 4-byte Folded Reload + movl %edx, 84(%esp) # 4-byte Spill + movl 116(%esp), %edx # 4-byte Reload + sbbl 36(%esp), %edx # 4-byte Folded Reload + movl %edx, 88(%esp) # 4-byte Spill + movl 112(%esp), %edx # 4-byte Reload + sbbl 40(%esp), %edx # 4-byte Folded Reload + movl %edx, 104(%esp) # 4-byte Spill + movl 96(%esp), %edx # 4-byte Reload + sbbl 44(%esp), %edx # 4-byte Folded Reload + movl %edx, 108(%esp) # 4-byte Spill + movl 100(%esp), %edx # 4-byte Reload + sbbl 48(%esp), %edx # 4-byte Folded Reload + movl %edx, 120(%esp) # 4-byte Spill + movl %eax, %edx + movl %esi, %eax + sbbl 52(%esp), %edx # 4-byte Folded Reload + movl %edx, 124(%esp) # 4-byte Spill + sbbl $0, %eax + andl $1, %eax + jne .LBB196_2 +# BB#1: + movl %ebp, 148(%esp) # 4-byte Spill +.LBB196_2: + testb %al, %al + movl 156(%esp), %ebp # 4-byte Reload + jne .LBB196_4 +# BB#3: + movl %edi, %ebp +.LBB196_4: + movl 912(%esp), %edi + movl %ebp, (%edi) + movl 140(%esp), %ebp # 4-byte Reload + jne .LBB196_6 +# BB#5: + movl %ebx, %ebp +.LBB196_6: + movl %ebp, 4(%edi) + movl 152(%esp), %ebx # 4-byte Reload + jne .LBB196_8 +# BB#7: + movl %ecx, %ebx +.LBB196_8: + movl %ebx, 8(%edi) + movl 148(%esp), %esi # 4-byte Reload + movl %esi, 12(%edi) + movl 116(%esp), %ebx # 4-byte Reload + movl 128(%esp), %esi # 4-byte Reload + jne .LBB196_10 +# BB#9: + movl 72(%esp), %esi # 4-byte Reload +.LBB196_10: + movl %esi, 16(%edi) + movl 112(%esp), %esi # 4-byte Reload + movl 132(%esp), %edx # 4-byte Reload + jne .LBB196_12 +# BB#11: + movl 76(%esp), %edx # 4-byte Reload +.LBB196_12: + movl %edx, 20(%edi) + movl 96(%esp), %edx # 4-byte Reload + movl 144(%esp), %ecx # 4-byte Reload + jne .LBB196_14 +# BB#13: + movl 80(%esp), %ecx # 4-byte Reload +.LBB196_14: + movl %ecx, 24(%edi) + movl 100(%esp), %ecx # 4-byte Reload + movl 136(%esp), %eax # 4-byte Reload + jne .LBB196_16 +# BB#15: + movl 84(%esp), %eax # 4-byte Reload +.LBB196_16: + movl %eax, 28(%edi) + movl 92(%esp), %eax # 4-byte Reload + jne .LBB196_18 +# BB#17: + movl 88(%esp), %ebx # 4-byte Reload +.LBB196_18: + movl %ebx, 32(%edi) + jne .LBB196_20 +# BB#19: + movl 104(%esp), %esi # 4-byte Reload +.LBB196_20: + movl %esi, 36(%edi) + jne .LBB196_22 +# BB#21: + movl 108(%esp), %edx # 4-byte Reload +.LBB196_22: + movl %edx, 40(%edi) + jne .LBB196_24 +# BB#23: + movl 120(%esp), %ecx # 4-byte Reload +.LBB196_24: + movl %ecx, 44(%edi) + jne .LBB196_26 +# BB#25: + movl 124(%esp), %eax # 4-byte Reload +.LBB196_26: + movl %eax, 48(%edi) + addl $892, %esp # imm = 0x37C + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end196: + .size mcl_fp_montRed13Lbmi2, .Lfunc_end196-mcl_fp_montRed13Lbmi2 + + .globl mcl_fp_addPre13Lbmi2 + .align 16, 0x90 + .type mcl_fp_addPre13Lbmi2,@function +mcl_fp_addPre13Lbmi2: # @mcl_fp_addPre13Lbmi2 +# BB#0: + pushl %ebx + pushl %edi + pushl %esi + movl 24(%esp), %eax + movl (%eax), %edx + movl 4(%eax), %esi + movl 20(%esp), %ecx + addl (%ecx), %edx + adcl 4(%ecx), %esi + movl 8(%eax), %edi + adcl 8(%ecx), %edi + movl 16(%esp), %ebx + movl %edx, (%ebx) + movl 12(%ecx), %edx + movl %esi, 4(%ebx) + movl 16(%ecx), %esi + adcl 12(%eax), %edx + adcl 16(%eax), %esi + movl %edi, 8(%ebx) + movl 20(%eax), %edi + movl %edx, 12(%ebx) + movl 20(%ecx), %edx + adcl %edi, %edx + movl 24(%eax), %edi + movl %esi, 16(%ebx) + movl 24(%ecx), %esi + adcl %edi, %esi + movl 28(%eax), %edi + movl %edx, 20(%ebx) + movl 28(%ecx), %edx + adcl %edi, %edx + movl 32(%eax), %edi + movl %esi, 24(%ebx) + movl 32(%ecx), %esi + adcl %edi, %esi + movl 36(%eax), %edi + movl %edx, 28(%ebx) + movl 36(%ecx), %edx + adcl %edi, %edx + movl 40(%eax), %edi + movl %esi, 32(%ebx) + movl 40(%ecx), %esi + adcl %edi, %esi + movl 44(%eax), %edi + movl %edx, 36(%ebx) + movl 44(%ecx), %edx + adcl %edi, %edx + movl %esi, 40(%ebx) + movl %edx, 44(%ebx) + movl 48(%eax), %eax + movl 48(%ecx), %ecx + adcl %eax, %ecx + movl %ecx, 48(%ebx) + sbbl %eax, %eax + andl $1, %eax + popl %esi + popl %edi + popl %ebx + retl +.Lfunc_end197: + .size mcl_fp_addPre13Lbmi2, .Lfunc_end197-mcl_fp_addPre13Lbmi2 + + .globl mcl_fp_subPre13Lbmi2 + .align 16, 0x90 + .type mcl_fp_subPre13Lbmi2,@function +mcl_fp_subPre13Lbmi2: # @mcl_fp_subPre13Lbmi2 +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + movl 24(%esp), %ecx + movl (%ecx), %esi + movl 4(%ecx), %edi + xorl %eax, %eax + movl 28(%esp), %edx + subl (%edx), %esi + sbbl 4(%edx), %edi + movl 8(%ecx), %ebx + sbbl 8(%edx), %ebx + movl 20(%esp), %ebp + movl %esi, (%ebp) + movl 12(%ecx), %esi + sbbl 12(%edx), %esi + movl %edi, 4(%ebp) + movl 16(%ecx), %edi + sbbl 16(%edx), %edi + movl %ebx, 8(%ebp) + movl 20(%edx), %ebx + movl %esi, 12(%ebp) + movl 20(%ecx), %esi + sbbl %ebx, %esi + movl 24(%edx), %ebx + movl %edi, 16(%ebp) + movl 24(%ecx), %edi + sbbl %ebx, %edi + movl 28(%edx), %ebx + movl %esi, 20(%ebp) + movl 28(%ecx), %esi + sbbl %ebx, %esi + movl 32(%edx), %ebx + movl %edi, 24(%ebp) + movl 32(%ecx), %edi + sbbl %ebx, %edi + movl 36(%edx), %ebx + movl %esi, 28(%ebp) + movl 36(%ecx), %esi + sbbl %ebx, %esi + movl 40(%edx), %ebx + movl %edi, 32(%ebp) + movl 40(%ecx), %edi + sbbl %ebx, %edi + movl 44(%edx), %ebx + movl %esi, 36(%ebp) + movl 44(%ecx), %esi + sbbl %ebx, %esi + movl %edi, 40(%ebp) + movl %esi, 44(%ebp) + movl 48(%edx), %edx + movl 48(%ecx), %ecx + sbbl %edx, %ecx + movl %ecx, 48(%ebp) + sbbl $0, %eax + andl $1, %eax + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end198: + .size mcl_fp_subPre13Lbmi2, .Lfunc_end198-mcl_fp_subPre13Lbmi2 + + .globl mcl_fp_shr1_13Lbmi2 + .align 16, 0x90 + .type mcl_fp_shr1_13Lbmi2,@function +mcl_fp_shr1_13Lbmi2: # @mcl_fp_shr1_13Lbmi2 +# BB#0: + pushl %esi + movl 12(%esp), %eax + movl (%eax), %edx + movl 4(%eax), %esi + shrdl $1, %esi, %edx + movl 8(%esp), %ecx + movl %edx, (%ecx) + movl 8(%eax), %edx + shrdl $1, %edx, %esi + movl %esi, 4(%ecx) + movl 12(%eax), %esi + shrdl $1, %esi, %edx + movl %edx, 8(%ecx) + movl 16(%eax), %edx + shrdl $1, %edx, %esi + movl %esi, 12(%ecx) + movl 20(%eax), %esi + shrdl $1, %esi, %edx + movl %edx, 16(%ecx) + movl 24(%eax), %edx + shrdl $1, %edx, %esi + movl %esi, 20(%ecx) + movl 28(%eax), %esi + shrdl $1, %esi, %edx + movl %edx, 24(%ecx) + movl 32(%eax), %edx + shrdl $1, %edx, %esi + movl %esi, 28(%ecx) + movl 36(%eax), %esi + shrdl $1, %esi, %edx + movl %edx, 32(%ecx) + movl 40(%eax), %edx + shrdl $1, %edx, %esi + movl %esi, 36(%ecx) + movl 44(%eax), %esi + shrdl $1, %esi, %edx + movl %edx, 40(%ecx) + movl 48(%eax), %eax + shrdl $1, %eax, %esi + movl %esi, 44(%ecx) + shrl %eax + movl %eax, 48(%ecx) + popl %esi + retl +.Lfunc_end199: + .size mcl_fp_shr1_13Lbmi2, .Lfunc_end199-mcl_fp_shr1_13Lbmi2 + + .globl mcl_fp_add13Lbmi2 + .align 16, 0x90 + .type mcl_fp_add13Lbmi2,@function +mcl_fp_add13Lbmi2: # @mcl_fp_add13Lbmi2 +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $40, %esp + movl 68(%esp), %ebp + movl (%ebp), %ecx + movl 4(%ebp), %eax + movl 64(%esp), %ebx + addl (%ebx), %ecx + movl %ecx, 4(%esp) # 4-byte Spill + adcl 4(%ebx), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 8(%ebp), %eax + adcl 8(%ebx), %eax + movl %eax, 32(%esp) # 4-byte Spill + movl 12(%ebx), %ecx + movl 16(%ebx), %eax + adcl 12(%ebp), %ecx + movl %ecx, 28(%esp) # 4-byte Spill + adcl 16(%ebp), %eax + movl %eax, 24(%esp) # 4-byte Spill + movl 20(%ebx), %eax + adcl 20(%ebp), %eax + movl %eax, 20(%esp) # 4-byte Spill + movl 24(%ebx), %eax + adcl 24(%ebp), %eax + movl %eax, 16(%esp) # 4-byte Spill + movl 28(%ebx), %eax + adcl 28(%ebp), %eax + movl %eax, 12(%esp) # 4-byte Spill + movl 32(%ebx), %eax + adcl 32(%ebp), %eax + movl %eax, 8(%esp) # 4-byte Spill + movl 36(%ebx), %ecx + adcl 36(%ebp), %ecx + movl %ecx, (%esp) # 4-byte Spill + movl 40(%ebx), %edi + adcl 40(%ebp), %edi + movl 44(%ebx), %edx + adcl 44(%ebp), %edx + movl 48(%ebx), %esi + adcl 48(%ebp), %esi + movl 60(%esp), %ebp + movl 4(%esp), %ebx # 4-byte Reload + movl %ebx, (%ebp) + movl 36(%esp), %eax # 4-byte Reload + movl %eax, 4(%ebp) + movl 32(%esp), %eax # 4-byte Reload + movl %eax, 8(%ebp) + movl 28(%esp), %eax # 4-byte Reload + movl %eax, 12(%ebp) + movl 24(%esp), %eax # 4-byte Reload + movl %eax, 16(%ebp) + movl 20(%esp), %eax # 4-byte Reload + movl %eax, 20(%ebp) + movl 16(%esp), %eax # 4-byte Reload + movl %eax, 24(%ebp) + movl 12(%esp), %eax # 4-byte Reload + movl %eax, 28(%ebp) + movl 8(%esp), %eax # 4-byte Reload + movl %eax, 32(%ebp) + movl %ecx, 36(%ebp) + movl %edi, 40(%ebp) + movl %edx, 44(%ebp) + movl %esi, 48(%ebp) + sbbl %eax, %eax + andl $1, %eax + movl 72(%esp), %ecx + subl (%ecx), %ebx + movl %ebx, 4(%esp) # 4-byte Spill + movl 36(%esp), %ebx # 4-byte Reload + sbbl 4(%ecx), %ebx + movl %ebx, 36(%esp) # 4-byte Spill + movl 32(%esp), %ebx # 4-byte Reload + sbbl 8(%ecx), %ebx + movl %ebx, 32(%esp) # 4-byte Spill + movl 28(%esp), %ebx # 4-byte Reload + sbbl 12(%ecx), %ebx + movl %ebx, 28(%esp) # 4-byte Spill + movl 24(%esp), %ebx # 4-byte Reload + sbbl 16(%ecx), %ebx + movl %ebx, 24(%esp) # 4-byte Spill + movl 20(%esp), %ebx # 4-byte Reload + sbbl 20(%ecx), %ebx + movl %ebx, 20(%esp) # 4-byte Spill + movl 16(%esp), %ebx # 4-byte Reload + sbbl 24(%ecx), %ebx + movl %ebx, 16(%esp) # 4-byte Spill + movl 12(%esp), %ebx # 4-byte Reload + sbbl 28(%ecx), %ebx + movl %ebx, 12(%esp) # 4-byte Spill + movl 8(%esp), %ebx # 4-byte Reload + sbbl 32(%ecx), %ebx + movl %ebx, 8(%esp) # 4-byte Spill + movl (%esp), %ebx # 4-byte Reload + sbbl 36(%ecx), %ebx + sbbl 40(%ecx), %edi + sbbl 44(%ecx), %edx + sbbl 48(%ecx), %esi + sbbl $0, %eax + testb $1, %al + jne .LBB200_2 +# BB#1: # %nocarry + movl 4(%esp), %eax # 4-byte Reload + movl %eax, (%ebp) + movl 36(%esp), %eax # 4-byte Reload + movl %eax, 4(%ebp) + movl 32(%esp), %eax # 4-byte Reload + movl %eax, 8(%ebp) + movl 28(%esp), %eax # 4-byte Reload + movl %eax, 12(%ebp) + movl 24(%esp), %eax # 4-byte Reload + movl %eax, 16(%ebp) + movl 20(%esp), %eax # 4-byte Reload + movl %eax, 20(%ebp) + movl 16(%esp), %eax # 4-byte Reload + movl %eax, 24(%ebp) + movl 12(%esp), %eax # 4-byte Reload + movl %eax, 28(%ebp) + movl 8(%esp), %eax # 4-byte Reload + movl %eax, 32(%ebp) + movl %ebx, 36(%ebp) + movl %edi, 40(%ebp) + movl %edx, 44(%ebp) + movl %esi, 48(%ebp) +.LBB200_2: # %carry + addl $40, %esp + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end200: + .size mcl_fp_add13Lbmi2, .Lfunc_end200-mcl_fp_add13Lbmi2 + + .globl mcl_fp_addNF13Lbmi2 + .align 16, 0x90 + .type mcl_fp_addNF13Lbmi2,@function +mcl_fp_addNF13Lbmi2: # @mcl_fp_addNF13Lbmi2 +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $100, %esp + movl 128(%esp), %esi + movl (%esi), %ecx + movl 4(%esi), %eax + movl 124(%esp), %edx + addl (%edx), %ecx + movl %ecx, 64(%esp) # 4-byte Spill + adcl 4(%edx), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 48(%esi), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 44(%esi), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 40(%esi), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 36(%esi), %ecx + movl %ecx, 84(%esp) # 4-byte Spill + movl 32(%esi), %ebp + movl 28(%esi), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 24(%esi), %eax + movl 20(%esi), %ebx + movl 16(%esi), %edi + movl 12(%esi), %ecx + movl 8(%esi), %esi + adcl 8(%edx), %esi + movl %esi, 48(%esp) # 4-byte Spill + adcl 12(%edx), %ecx + movl %ecx, 52(%esp) # 4-byte Spill + adcl 16(%edx), %edi + movl %edi, 56(%esp) # 4-byte Spill + adcl 20(%edx), %ebx + movl %ebx, 60(%esp) # 4-byte Spill + adcl 24(%edx), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 28(%edx), %eax + movl %eax, 96(%esp) # 4-byte Spill + adcl 32(%edx), %ebp + movl %ebp, 72(%esp) # 4-byte Spill + movl 84(%esp), %ebp # 4-byte Reload + adcl 36(%edx), %ebp + movl %ebp, 84(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 40(%edx), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 44(%edx), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 48(%edx), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 132(%esp), %edx + movl 64(%esp), %eax # 4-byte Reload + subl (%edx), %eax + movl 68(%esp), %ebp # 4-byte Reload + sbbl 4(%edx), %ebp + movl %ebp, (%esp) # 4-byte Spill + sbbl 8(%edx), %esi + movl %esi, 4(%esp) # 4-byte Spill + sbbl 12(%edx), %ecx + movl %ecx, 8(%esp) # 4-byte Spill + sbbl 16(%edx), %edi + movl %edi, 12(%esp) # 4-byte Spill + sbbl 20(%edx), %ebx + movl %ebx, 16(%esp) # 4-byte Spill + movl 76(%esp), %ebp # 4-byte Reload + sbbl 24(%edx), %ebp + movl %ebp, 20(%esp) # 4-byte Spill + movl 96(%esp), %ecx # 4-byte Reload + sbbl 28(%edx), %ecx + movl %ecx, 24(%esp) # 4-byte Spill + movl 72(%esp), %ecx # 4-byte Reload + sbbl 32(%edx), %ecx + movl %ecx, 28(%esp) # 4-byte Spill + movl 84(%esp), %esi # 4-byte Reload + movl %esi, %ecx + movl %esi, %ebp + sbbl 36(%edx), %ecx + movl %ecx, 32(%esp) # 4-byte Spill + movl 80(%esp), %esi # 4-byte Reload + movl %esi, %ecx + movl %esi, %edi + sbbl 40(%edx), %ecx + movl %ecx, 36(%esp) # 4-byte Spill + movl 92(%esp), %ecx # 4-byte Reload + sbbl 44(%edx), %ecx + movl %ecx, 40(%esp) # 4-byte Spill + movl 88(%esp), %ebx # 4-byte Reload + sbbl 48(%edx), %ebx + movl %ebx, 44(%esp) # 4-byte Spill + sarl $31, %ebx + testl %ebx, %ebx + movl 64(%esp), %edx # 4-byte Reload + js .LBB201_2 +# BB#1: + movl %eax, %edx +.LBB201_2: + movl 120(%esp), %esi + movl %edx, (%esi) + movl 68(%esp), %edx # 4-byte Reload + js .LBB201_4 +# BB#3: + movl (%esp), %edx # 4-byte Reload +.LBB201_4: + movl %edx, 4(%esi) + movl %edi, %edx + movl 52(%esp), %ebx # 4-byte Reload + movl 48(%esp), %eax # 4-byte Reload + js .LBB201_6 +# BB#5: + movl 4(%esp), %eax # 4-byte Reload +.LBB201_6: + movl %eax, 8(%esi) + movl %ebp, %edi + movl 60(%esp), %eax # 4-byte Reload + js .LBB201_8 +# BB#7: + movl 8(%esp), %ebx # 4-byte Reload +.LBB201_8: + movl %ebx, 12(%esi) + movl 96(%esp), %ebp # 4-byte Reload + movl 56(%esp), %ecx # 4-byte Reload + js .LBB201_10 +# BB#9: + movl 12(%esp), %ecx # 4-byte Reload +.LBB201_10: + movl %ecx, 16(%esi) + movl 92(%esp), %ecx # 4-byte Reload + js .LBB201_12 +# BB#11: + movl 16(%esp), %eax # 4-byte Reload +.LBB201_12: + movl %eax, 20(%esi) + movl 72(%esp), %ebx # 4-byte Reload + js .LBB201_14 +# BB#13: + movl 20(%esp), %eax # 4-byte Reload + movl %eax, 76(%esp) # 4-byte Spill +.LBB201_14: + movl 76(%esp), %eax # 4-byte Reload + movl %eax, 24(%esi) + js .LBB201_16 +# BB#15: + movl 24(%esp), %ebp # 4-byte Reload +.LBB201_16: + movl %ebp, 28(%esi) + js .LBB201_18 +# BB#17: + movl 28(%esp), %ebx # 4-byte Reload +.LBB201_18: + movl %ebx, 32(%esi) + js .LBB201_20 +# BB#19: + movl 32(%esp), %edi # 4-byte Reload +.LBB201_20: + movl %edi, 36(%esi) + js .LBB201_22 +# BB#21: + movl 36(%esp), %edx # 4-byte Reload +.LBB201_22: + movl %edx, 40(%esi) + js .LBB201_24 +# BB#23: + movl 40(%esp), %ecx # 4-byte Reload +.LBB201_24: + movl %ecx, 44(%esi) + movl 88(%esp), %eax # 4-byte Reload + js .LBB201_26 +# BB#25: + movl 44(%esp), %eax # 4-byte Reload +.LBB201_26: + movl %eax, 48(%esi) + addl $100, %esp + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end201: + .size mcl_fp_addNF13Lbmi2, .Lfunc_end201-mcl_fp_addNF13Lbmi2 + + .globl mcl_fp_sub13Lbmi2 + .align 16, 0x90 + .type mcl_fp_sub13Lbmi2,@function +mcl_fp_sub13Lbmi2: # @mcl_fp_sub13Lbmi2 +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $44, %esp + movl 68(%esp), %esi + movl (%esi), %eax + movl 4(%esi), %ecx + xorl %ebx, %ebx + movl 72(%esp), %edi + subl (%edi), %eax + movl %eax, 12(%esp) # 4-byte Spill + sbbl 4(%edi), %ecx + movl %ecx, 28(%esp) # 4-byte Spill + movl 8(%esi), %eax + sbbl 8(%edi), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 12(%esi), %eax + sbbl 12(%edi), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 16(%esi), %eax + sbbl 16(%edi), %eax + movl %eax, 32(%esp) # 4-byte Spill + movl 20(%esi), %eax + sbbl 20(%edi), %eax + movl %eax, 24(%esp) # 4-byte Spill + movl 24(%esi), %eax + sbbl 24(%edi), %eax + movl %eax, 20(%esp) # 4-byte Spill + movl 28(%esi), %eax + sbbl 28(%edi), %eax + movl %eax, 16(%esp) # 4-byte Spill + movl 32(%esi), %edx + sbbl 32(%edi), %edx + movl %edx, 8(%esp) # 4-byte Spill + movl 36(%esi), %ecx + sbbl 36(%edi), %ecx + movl %ecx, 4(%esp) # 4-byte Spill + movl 40(%esi), %eax + sbbl 40(%edi), %eax + movl %eax, (%esp) # 4-byte Spill + movl 44(%esi), %ebp + sbbl 44(%edi), %ebp + movl 48(%esi), %esi + sbbl 48(%edi), %esi + sbbl $0, %ebx + testb $1, %bl + movl 64(%esp), %ebx + movl 12(%esp), %edi # 4-byte Reload + movl %edi, (%ebx) + movl 28(%esp), %edi # 4-byte Reload + movl %edi, 4(%ebx) + movl 36(%esp), %edi # 4-byte Reload + movl %edi, 8(%ebx) + movl 40(%esp), %edi # 4-byte Reload + movl %edi, 12(%ebx) + movl 32(%esp), %edi # 4-byte Reload + movl %edi, 16(%ebx) + movl 24(%esp), %edi # 4-byte Reload + movl %edi, 20(%ebx) + movl 20(%esp), %edi # 4-byte Reload + movl %edi, 24(%ebx) + movl 16(%esp), %edi # 4-byte Reload + movl %edi, 28(%ebx) + movl %edx, 32(%ebx) + movl %ecx, 36(%ebx) + movl %eax, 40(%ebx) + movl %ebp, 44(%ebx) + movl %esi, 48(%ebx) + je .LBB202_2 +# BB#1: # %carry + movl %esi, %edi + movl 76(%esp), %esi + movl 12(%esp), %ecx # 4-byte Reload + addl (%esi), %ecx + movl %ecx, (%ebx) + movl 28(%esp), %edx # 4-byte Reload + adcl 4(%esi), %edx + movl %edx, 4(%ebx) + movl 36(%esp), %ecx # 4-byte Reload + adcl 8(%esi), %ecx + movl 12(%esi), %eax + adcl 40(%esp), %eax # 4-byte Folded Reload + movl %ecx, 8(%ebx) + movl 16(%esi), %ecx + adcl 32(%esp), %ecx # 4-byte Folded Reload + movl %eax, 12(%ebx) + movl 20(%esi), %eax + adcl 24(%esp), %eax # 4-byte Folded Reload + movl %ecx, 16(%ebx) + movl 24(%esi), %ecx + adcl 20(%esp), %ecx # 4-byte Folded Reload + movl %eax, 20(%ebx) + movl 28(%esi), %eax + adcl 16(%esp), %eax # 4-byte Folded Reload + movl %ecx, 24(%ebx) + movl 32(%esi), %ecx + adcl 8(%esp), %ecx # 4-byte Folded Reload + movl %eax, 28(%ebx) + movl 36(%esi), %eax + adcl 4(%esp), %eax # 4-byte Folded Reload + movl %ecx, 32(%ebx) + movl 40(%esi), %ecx + adcl (%esp), %ecx # 4-byte Folded Reload + movl %eax, 36(%ebx) + movl %ecx, 40(%ebx) + movl 44(%esi), %eax + adcl %ebp, %eax + movl %eax, 44(%ebx) + movl 48(%esi), %eax + adcl %edi, %eax + movl %eax, 48(%ebx) +.LBB202_2: # %nocarry + addl $44, %esp + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end202: + .size mcl_fp_sub13Lbmi2, .Lfunc_end202-mcl_fp_sub13Lbmi2 + + .globl mcl_fp_subNF13Lbmi2 + .align 16, 0x90 + .type mcl_fp_subNF13Lbmi2,@function +mcl_fp_subNF13Lbmi2: # @mcl_fp_subNF13Lbmi2 +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $80, %esp + movl 104(%esp), %ecx + movl (%ecx), %edx + movl 4(%ecx), %eax + movl 108(%esp), %edi + subl (%edi), %edx + movl %edx, 48(%esp) # 4-byte Spill + sbbl 4(%edi), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 48(%ecx), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 44(%ecx), %edx + movl %edx, 72(%esp) # 4-byte Spill + movl 40(%ecx), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 36(%ecx), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 32(%ecx), %ebp + movl 28(%ecx), %ebx + movl 24(%ecx), %esi + movl %esi, 76(%esp) # 4-byte Spill + movl 20(%ecx), %esi + movl 16(%ecx), %edx + movl 12(%ecx), %eax + movl 8(%ecx), %ecx + sbbl 8(%edi), %ecx + movl %ecx, 24(%esp) # 4-byte Spill + sbbl 12(%edi), %eax + movl %eax, 28(%esp) # 4-byte Spill + sbbl 16(%edi), %edx + movl %edx, 32(%esp) # 4-byte Spill + sbbl 20(%edi), %esi + movl %esi, 36(%esp) # 4-byte Spill + movl 76(%esp), %ecx # 4-byte Reload + sbbl 24(%edi), %ecx + movl %ecx, 76(%esp) # 4-byte Spill + sbbl 28(%edi), %ebx + movl %ebx, 40(%esp) # 4-byte Spill + sbbl 32(%edi), %ebp + movl %ebp, 44(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + sbbl 36(%edi), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + sbbl 40(%edi), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 72(%esp), %ecx # 4-byte Reload + sbbl 44(%edi), %ecx + movl %ecx, 72(%esp) # 4-byte Spill + movl 60(%esp), %edx # 4-byte Reload + sbbl 48(%edi), %edx + movl %edx, 60(%esp) # 4-byte Spill + movl %edx, %esi + sarl $31, %esi + movl %esi, %ecx + shldl $1, %edx, %ecx + movl 112(%esp), %edi + movl 4(%edi), %eax + andl %ecx, %eax + movl %eax, 56(%esp) # 4-byte Spill + andl (%edi), %ecx + movl 48(%edi), %eax + andl %esi, %eax + movl %eax, 20(%esp) # 4-byte Spill + movl 44(%edi), %eax + andl %esi, %eax + movl %eax, 16(%esp) # 4-byte Spill + movl 40(%edi), %eax + andl %esi, %eax + movl %eax, 12(%esp) # 4-byte Spill + movl 36(%edi), %eax + andl %esi, %eax + movl %eax, 8(%esp) # 4-byte Spill + movl 32(%edi), %eax + andl %esi, %eax + movl %eax, 4(%esp) # 4-byte Spill + movl 28(%edi), %eax + andl %esi, %eax + movl %eax, (%esp) # 4-byte Spill + movl 24(%edi), %ebp + andl %esi, %ebp + movl 20(%edi), %ebx + andl %esi, %ebx + movl 16(%edi), %edx + andl %esi, %edx + rorxl $31, %esi, %eax + andl 12(%edi), %esi + andl 8(%edi), %eax + addl 48(%esp), %ecx # 4-byte Folded Reload + movl 56(%esp), %edi # 4-byte Reload + adcl 52(%esp), %edi # 4-byte Folded Reload + movl %edi, 56(%esp) # 4-byte Spill + movl 100(%esp), %edi + movl %ecx, (%edi) + adcl 24(%esp), %eax # 4-byte Folded Reload + movl 56(%esp), %ecx # 4-byte Reload + movl %ecx, 4(%edi) + adcl 28(%esp), %esi # 4-byte Folded Reload + movl %eax, 8(%edi) + adcl 32(%esp), %edx # 4-byte Folded Reload + movl %esi, 12(%edi) + adcl 36(%esp), %ebx # 4-byte Folded Reload + movl %edx, 16(%edi) + adcl 76(%esp), %ebp # 4-byte Folded Reload + movl %ebx, 20(%edi) + movl (%esp), %eax # 4-byte Reload + adcl 40(%esp), %eax # 4-byte Folded Reload + movl %ebp, 24(%edi) + movl 4(%esp), %ecx # 4-byte Reload + adcl 44(%esp), %ecx # 4-byte Folded Reload + movl %eax, 28(%edi) + movl 8(%esp), %eax # 4-byte Reload + adcl 64(%esp), %eax # 4-byte Folded Reload + movl %ecx, 32(%edi) + movl 12(%esp), %ecx # 4-byte Reload + adcl 68(%esp), %ecx # 4-byte Folded Reload + movl %eax, 36(%edi) + movl 16(%esp), %eax # 4-byte Reload + adcl 72(%esp), %eax # 4-byte Folded Reload + movl %ecx, 40(%edi) + movl %eax, 44(%edi) + movl 20(%esp), %eax # 4-byte Reload + adcl 60(%esp), %eax # 4-byte Folded Reload + movl %eax, 48(%edi) + addl $80, %esp + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end203: + .size mcl_fp_subNF13Lbmi2, .Lfunc_end203-mcl_fp_subNF13Lbmi2 + + .globl mcl_fpDbl_add13Lbmi2 + .align 16, 0x90 + .type mcl_fpDbl_add13Lbmi2,@function +mcl_fpDbl_add13Lbmi2: # @mcl_fpDbl_add13Lbmi2 +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $96, %esp + movl 124(%esp), %ecx + movl 120(%esp), %esi + movl 12(%esi), %edi + movl 16(%esi), %edx + movl 8(%ecx), %ebx + movl (%ecx), %ebp + addl (%esi), %ebp + movl 116(%esp), %eax + movl %ebp, (%eax) + movl 4(%ecx), %ebp + adcl 4(%esi), %ebp + adcl 8(%esi), %ebx + adcl 12(%ecx), %edi + adcl 16(%ecx), %edx + movl %ebp, 4(%eax) + movl 60(%ecx), %ebp + movl %ebx, 8(%eax) + movl 20(%ecx), %ebx + movl %edi, 12(%eax) + movl 20(%esi), %edi + adcl %ebx, %edi + movl 24(%ecx), %ebx + movl %edx, 16(%eax) + movl 24(%esi), %edx + adcl %ebx, %edx + movl 28(%ecx), %ebx + movl %edi, 20(%eax) + movl 28(%esi), %edi + adcl %ebx, %edi + movl 32(%ecx), %ebx + movl %edx, 24(%eax) + movl 32(%esi), %edx + adcl %ebx, %edx + movl 36(%ecx), %ebx + movl %edi, 28(%eax) + movl 36(%esi), %edi + adcl %ebx, %edi + movl 40(%ecx), %ebx + movl %edx, 32(%eax) + movl 40(%esi), %edx + adcl %ebx, %edx + movl 44(%ecx), %ebx + movl %edi, 36(%eax) + movl 44(%esi), %edi + adcl %ebx, %edi + movl 48(%ecx), %ebx + movl %edx, 40(%eax) + movl 48(%esi), %edx + adcl %ebx, %edx + movl 52(%ecx), %ebx + movl %edi, 44(%eax) + movl 52(%esi), %edi + adcl %ebx, %edi + movl %edi, 76(%esp) # 4-byte Spill + movl 56(%ecx), %edi + movl %edx, 48(%eax) + movl 56(%esi), %eax + adcl %edi, %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 60(%esi), %eax + adcl %ebp, %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 64(%ecx), %edx + movl 64(%esi), %eax + adcl %edx, %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 68(%ecx), %edx + movl 68(%esi), %eax + adcl %edx, %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 72(%ecx), %edx + movl 72(%esi), %eax + adcl %edx, %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 76(%ecx), %edx + movl 76(%esi), %eax + adcl %edx, %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 80(%ecx), %edx + movl 80(%esi), %eax + adcl %edx, %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 84(%ecx), %edx + movl 84(%esi), %eax + adcl %edx, %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 88(%ecx), %edx + movl 88(%esi), %edi + adcl %edx, %edi + movl %edi, 48(%esp) # 4-byte Spill + movl 92(%ecx), %edx + movl 92(%esi), %eax + adcl %edx, %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 96(%ecx), %edx + movl 96(%esi), %ebx + adcl %edx, %ebx + movl %ebx, 56(%esp) # 4-byte Spill + movl 100(%ecx), %ecx + movl 100(%esi), %esi + adcl %ecx, %esi + sbbl %edx, %edx + andl $1, %edx + movl 128(%esp), %ebp + movl 76(%esp), %ecx # 4-byte Reload + subl (%ebp), %ecx + movl %ecx, 32(%esp) # 4-byte Spill + movl 80(%esp), %ecx # 4-byte Reload + sbbl 4(%ebp), %ecx + movl %ecx, 28(%esp) # 4-byte Spill + movl 84(%esp), %ecx # 4-byte Reload + sbbl 8(%ebp), %ecx + movl %ecx, 24(%esp) # 4-byte Spill + movl 88(%esp), %ecx # 4-byte Reload + sbbl 12(%ebp), %ecx + movl %ecx, 20(%esp) # 4-byte Spill + movl 92(%esp), %ecx # 4-byte Reload + sbbl 16(%ebp), %ecx + movl %ecx, 16(%esp) # 4-byte Spill + movl 60(%esp), %ecx # 4-byte Reload + sbbl 20(%ebp), %ecx + movl %ecx, 12(%esp) # 4-byte Spill + movl 64(%esp), %ecx # 4-byte Reload + sbbl 24(%ebp), %ecx + movl %ecx, 8(%esp) # 4-byte Spill + movl 68(%esp), %ecx # 4-byte Reload + sbbl 28(%ebp), %ecx + movl %ecx, 4(%esp) # 4-byte Spill + movl 72(%esp), %ecx # 4-byte Reload + sbbl 32(%ebp), %ecx + movl %ecx, (%esp) # 4-byte Spill + sbbl 36(%ebp), %edi + movl %edi, 36(%esp) # 4-byte Spill + sbbl 40(%ebp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl %ebx, %eax + movl %esi, %ebx + sbbl 44(%ebp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl %ebx, %ecx + sbbl 48(%ebp), %ecx + sbbl $0, %edx + andl $1, %edx + jne .LBB204_2 +# BB#1: + movl %ecx, %ebx +.LBB204_2: + testb %dl, %dl + movl 76(%esp), %ecx # 4-byte Reload + movl 72(%esp), %edx # 4-byte Reload + movl 68(%esp), %esi # 4-byte Reload + movl 64(%esp), %edi # 4-byte Reload + movl 60(%esp), %ebp # 4-byte Reload + jne .LBB204_4 +# BB#3: + movl (%esp), %edx # 4-byte Reload + movl 4(%esp), %esi # 4-byte Reload + movl 8(%esp), %edi # 4-byte Reload + movl 12(%esp), %ebp # 4-byte Reload + movl 16(%esp), %eax # 4-byte Reload + movl %eax, 92(%esp) # 4-byte Spill + movl 20(%esp), %eax # 4-byte Reload + movl %eax, 88(%esp) # 4-byte Spill + movl 24(%esp), %eax # 4-byte Reload + movl %eax, 84(%esp) # 4-byte Spill + movl 28(%esp), %eax # 4-byte Reload + movl %eax, 80(%esp) # 4-byte Spill + movl 32(%esp), %ecx # 4-byte Reload +.LBB204_4: + movl 116(%esp), %eax + movl %ecx, 52(%eax) + movl 80(%esp), %ecx # 4-byte Reload + movl %ecx, 56(%eax) + movl 84(%esp), %ecx # 4-byte Reload + movl %ecx, 60(%eax) + movl 88(%esp), %ecx # 4-byte Reload + movl %ecx, 64(%eax) + movl 92(%esp), %ecx # 4-byte Reload + movl %ecx, 68(%eax) + movl %ebp, 72(%eax) + movl %edi, 76(%eax) + movl %esi, 80(%eax) + movl %edx, 84(%eax) + movl 56(%esp), %ecx # 4-byte Reload + movl 52(%esp), %edx # 4-byte Reload + movl 48(%esp), %esi # 4-byte Reload + jne .LBB204_6 +# BB#5: + movl 36(%esp), %esi # 4-byte Reload +.LBB204_6: + movl %esi, 88(%eax) + jne .LBB204_8 +# BB#7: + movl 40(%esp), %edx # 4-byte Reload +.LBB204_8: + movl %edx, 92(%eax) + jne .LBB204_10 +# BB#9: + movl 44(%esp), %ecx # 4-byte Reload +.LBB204_10: + movl %ecx, 96(%eax) + movl %ebx, 100(%eax) + addl $96, %esp + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end204: + .size mcl_fpDbl_add13Lbmi2, .Lfunc_end204-mcl_fpDbl_add13Lbmi2 + + .globl mcl_fpDbl_sub13Lbmi2 + .align 16, 0x90 + .type mcl_fpDbl_sub13Lbmi2,@function +mcl_fpDbl_sub13Lbmi2: # @mcl_fpDbl_sub13Lbmi2 +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $84, %esp + movl 108(%esp), %edi + movl (%edi), %eax + movl 4(%edi), %edx + movl 112(%esp), %ebx + subl (%ebx), %eax + sbbl 4(%ebx), %edx + movl 8(%edi), %esi + sbbl 8(%ebx), %esi + movl 104(%esp), %ecx + movl %eax, (%ecx) + movl 12(%edi), %eax + sbbl 12(%ebx), %eax + movl %edx, 4(%ecx) + movl 16(%edi), %edx + sbbl 16(%ebx), %edx + movl %esi, 8(%ecx) + movl 20(%ebx), %esi + movl %eax, 12(%ecx) + movl 20(%edi), %eax + sbbl %esi, %eax + movl 24(%ebx), %esi + movl %edx, 16(%ecx) + movl 24(%edi), %edx + sbbl %esi, %edx + movl 28(%ebx), %esi + movl %eax, 20(%ecx) + movl 28(%edi), %eax + sbbl %esi, %eax + movl 32(%ebx), %esi + movl %edx, 24(%ecx) + movl 32(%edi), %edx + sbbl %esi, %edx + movl 36(%ebx), %esi + movl %eax, 28(%ecx) + movl 36(%edi), %eax + sbbl %esi, %eax + movl 40(%ebx), %esi + movl %edx, 32(%ecx) + movl 40(%edi), %edx + sbbl %esi, %edx + movl 44(%ebx), %esi + movl %eax, 36(%ecx) + movl 44(%edi), %eax + sbbl %esi, %eax + movl 48(%ebx), %esi + movl %edx, 40(%ecx) + movl 48(%edi), %edx + sbbl %esi, %edx + movl 52(%ebx), %esi + movl %eax, 44(%ecx) + movl 52(%edi), %eax + sbbl %esi, %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 56(%ebx), %eax + movl %edx, 48(%ecx) + movl 56(%edi), %edx + sbbl %eax, %edx + movl %edx, 28(%esp) # 4-byte Spill + movl 60(%ebx), %eax + movl 60(%edi), %edx + sbbl %eax, %edx + movl %edx, 32(%esp) # 4-byte Spill + movl 64(%ebx), %eax + movl 64(%edi), %edx + sbbl %eax, %edx + movl %edx, 40(%esp) # 4-byte Spill + movl 68(%ebx), %eax + movl 68(%edi), %edx + sbbl %eax, %edx + movl %edx, 48(%esp) # 4-byte Spill + movl 72(%ebx), %eax + movl 72(%edi), %edx + sbbl %eax, %edx + movl %edx, 52(%esp) # 4-byte Spill + movl 76(%ebx), %eax + movl 76(%edi), %edx + sbbl %eax, %edx + movl %edx, 56(%esp) # 4-byte Spill + movl 80(%ebx), %eax + movl 80(%edi), %edx + sbbl %eax, %edx + movl %edx, 60(%esp) # 4-byte Spill + movl 84(%ebx), %eax + movl 84(%edi), %edx + sbbl %eax, %edx + movl %edx, 64(%esp) # 4-byte Spill + movl 88(%ebx), %eax + movl 88(%edi), %edx + sbbl %eax, %edx + movl %edx, 68(%esp) # 4-byte Spill + movl 92(%ebx), %eax + movl 92(%edi), %edx + sbbl %eax, %edx + movl %edx, 72(%esp) # 4-byte Spill + movl 96(%ebx), %eax + movl 96(%edi), %edx + sbbl %eax, %edx + movl %edx, 76(%esp) # 4-byte Spill + movl 100(%ebx), %eax + movl 100(%edi), %edx + sbbl %eax, %edx + movl %edx, 80(%esp) # 4-byte Spill + movl $0, %eax + sbbl $0, %eax + andl $1, %eax + movl 116(%esp), %edi + jne .LBB205_1 +# BB#2: + movl $0, 44(%esp) # 4-byte Folded Spill + jmp .LBB205_3 +.LBB205_1: + movl 48(%edi), %edx + movl %edx, 44(%esp) # 4-byte Spill +.LBB205_3: + testb %al, %al + jne .LBB205_4 +# BB#5: + movl $0, 16(%esp) # 4-byte Folded Spill + movl $0, %ebx + jmp .LBB205_6 +.LBB205_4: + movl (%edi), %ebx + movl 4(%edi), %eax + movl %eax, 16(%esp) # 4-byte Spill +.LBB205_6: + jne .LBB205_7 +# BB#8: + movl $0, 24(%esp) # 4-byte Folded Spill + jmp .LBB205_9 +.LBB205_7: + movl 44(%edi), %eax + movl %eax, 24(%esp) # 4-byte Spill +.LBB205_9: + jne .LBB205_10 +# BB#11: + movl $0, 20(%esp) # 4-byte Folded Spill + jmp .LBB205_12 +.LBB205_10: + movl 40(%edi), %eax + movl %eax, 20(%esp) # 4-byte Spill +.LBB205_12: + jne .LBB205_13 +# BB#14: + movl $0, 12(%esp) # 4-byte Folded Spill + jmp .LBB205_15 +.LBB205_13: + movl 36(%edi), %eax + movl %eax, 12(%esp) # 4-byte Spill +.LBB205_15: + jne .LBB205_16 +# BB#17: + movl $0, 8(%esp) # 4-byte Folded Spill + jmp .LBB205_18 +.LBB205_16: + movl 32(%edi), %eax + movl %eax, 8(%esp) # 4-byte Spill +.LBB205_18: + jne .LBB205_19 +# BB#20: + movl $0, 4(%esp) # 4-byte Folded Spill + jmp .LBB205_21 +.LBB205_19: + movl 28(%edi), %eax + movl %eax, 4(%esp) # 4-byte Spill +.LBB205_21: + jne .LBB205_22 +# BB#23: + movl $0, (%esp) # 4-byte Folded Spill + jmp .LBB205_24 +.LBB205_22: + movl 24(%edi), %eax + movl %eax, (%esp) # 4-byte Spill +.LBB205_24: + jne .LBB205_25 +# BB#26: + movl $0, %eax + jmp .LBB205_27 +.LBB205_25: + movl 20(%edi), %eax +.LBB205_27: + jne .LBB205_28 +# BB#29: + movl $0, %edx + jmp .LBB205_30 +.LBB205_28: + movl 16(%edi), %edx +.LBB205_30: + jne .LBB205_31 +# BB#32: + movl $0, %esi + jmp .LBB205_33 +.LBB205_31: + movl 12(%edi), %esi +.LBB205_33: + jne .LBB205_34 +# BB#35: + xorl %edi, %edi + jmp .LBB205_36 +.LBB205_34: + movl 8(%edi), %edi +.LBB205_36: + addl 36(%esp), %ebx # 4-byte Folded Reload + movl 16(%esp), %ebp # 4-byte Reload + adcl 28(%esp), %ebp # 4-byte Folded Reload + movl %ebx, 52(%ecx) + adcl 32(%esp), %edi # 4-byte Folded Reload + movl %ebp, 56(%ecx) + adcl 40(%esp), %esi # 4-byte Folded Reload + movl %edi, 60(%ecx) + adcl 48(%esp), %edx # 4-byte Folded Reload + movl %esi, 64(%ecx) + adcl 52(%esp), %eax # 4-byte Folded Reload + movl %edx, 68(%ecx) + movl (%esp), %edx # 4-byte Reload + adcl 56(%esp), %edx # 4-byte Folded Reload + movl %eax, 72(%ecx) + movl 4(%esp), %eax # 4-byte Reload + adcl 60(%esp), %eax # 4-byte Folded Reload + movl %edx, 76(%ecx) + movl 8(%esp), %edx # 4-byte Reload + adcl 64(%esp), %edx # 4-byte Folded Reload + movl %eax, 80(%ecx) + movl 12(%esp), %eax # 4-byte Reload + adcl 68(%esp), %eax # 4-byte Folded Reload + movl %edx, 84(%ecx) + movl 20(%esp), %edx # 4-byte Reload + adcl 72(%esp), %edx # 4-byte Folded Reload + movl %eax, 88(%ecx) + movl 24(%esp), %eax # 4-byte Reload + adcl 76(%esp), %eax # 4-byte Folded Reload + movl %edx, 92(%ecx) + movl %eax, 96(%ecx) + movl 44(%esp), %eax # 4-byte Reload + adcl 80(%esp), %eax # 4-byte Folded Reload + movl %eax, 100(%ecx) + addl $84, %esp + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end205: + .size mcl_fpDbl_sub13Lbmi2, .Lfunc_end205-mcl_fpDbl_sub13Lbmi2 + + .align 16, 0x90 + .type .LmulPv448x32,@function +.LmulPv448x32: # @mulPv448x32 +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $48, %esp + movl %edx, %eax + movl 68(%esp), %ebx + movl %ebx, %edx + mulxl 4(%eax), %edi, %esi + movl %ebx, %edx + mulxl (%eax), %ebp, %edx + movl %ebp, 44(%esp) # 4-byte Spill + addl %edi, %edx + movl %edx, 40(%esp) # 4-byte Spill + movl %ebx, %edx + mulxl 8(%eax), %edx, %edi + adcl %esi, %edx + movl %edx, 36(%esp) # 4-byte Spill + movl %ebx, %edx + mulxl 12(%eax), %edx, %esi + adcl %edi, %edx + movl %edx, 32(%esp) # 4-byte Spill + movl %ebx, %edx + mulxl 16(%eax), %edx, %edi + adcl %esi, %edx + movl %edx, 28(%esp) # 4-byte Spill + movl %ebx, %edx + mulxl 20(%eax), %edx, %esi + adcl %edi, %edx + movl %edx, 24(%esp) # 4-byte Spill + movl %ebx, %edx + mulxl 24(%eax), %edx, %edi + adcl %esi, %edx + movl %edx, 20(%esp) # 4-byte Spill + movl %ebx, %edx + mulxl 28(%eax), %edx, %esi + adcl %edi, %edx + movl %edx, 16(%esp) # 4-byte Spill + movl %ebx, %edx + mulxl 32(%eax), %edx, %edi + adcl %esi, %edx + movl %edx, 12(%esp) # 4-byte Spill + movl %ebx, %edx + mulxl 36(%eax), %edx, %esi + adcl %edi, %edx + movl %edx, 8(%esp) # 4-byte Spill + movl %ebx, %edx + mulxl 40(%eax), %edi, %ebp + adcl %esi, %edi + movl %ebx, %edx + mulxl 44(%eax), %esi, %edx + movl %edx, (%esp) # 4-byte Spill + adcl %ebp, %esi + movl %ebx, %edx + mulxl 48(%eax), %edx, %ebp + movl %ebp, 4(%esp) # 4-byte Spill + adcl (%esp), %edx # 4-byte Folded Reload + movl 44(%esp), %ebp # 4-byte Reload + movl %ebp, (%ecx) + movl 40(%esp), %ebp # 4-byte Reload + movl %ebp, 4(%ecx) + movl 36(%esp), %ebp # 4-byte Reload + movl %ebp, 8(%ecx) + movl 32(%esp), %ebp # 4-byte Reload + movl %ebp, 12(%ecx) + movl 28(%esp), %ebp # 4-byte Reload + movl %ebp, 16(%ecx) + movl 24(%esp), %ebp # 4-byte Reload + movl %ebp, 20(%ecx) + movl 20(%esp), %ebp # 4-byte Reload + movl %ebp, 24(%ecx) + movl 16(%esp), %ebp # 4-byte Reload + movl %ebp, 28(%ecx) + movl 12(%esp), %ebp # 4-byte Reload + movl %ebp, 32(%ecx) + movl 8(%esp), %ebp # 4-byte Reload + movl %ebp, 36(%ecx) + movl %edi, 40(%ecx) + movl %esi, 44(%ecx) + movl %edx, 48(%ecx) + movl %ebx, %edx + mulxl 52(%eax), %eax, %edx + adcl 4(%esp), %eax # 4-byte Folded Reload + movl %eax, 52(%ecx) + adcl $0, %edx + movl %edx, 56(%ecx) + movl %ecx, %eax + addl $48, %esp + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end206: + .size .LmulPv448x32, .Lfunc_end206-.LmulPv448x32 + + .globl mcl_fp_mulUnitPre14Lbmi2 + .align 16, 0x90 + .type mcl_fp_mulUnitPre14Lbmi2,@function +mcl_fp_mulUnitPre14Lbmi2: # @mcl_fp_mulUnitPre14Lbmi2 +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $108, %esp + calll .L207$pb +.L207$pb: + popl %ebx +.Ltmp38: + addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp38-.L207$pb), %ebx + movl 136(%esp), %eax + movl %eax, (%esp) + leal 48(%esp), %ecx + movl 132(%esp), %edx + calll .LmulPv448x32 + movl 104(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 100(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 96(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 92(%esp), %eax + movl %eax, 32(%esp) # 4-byte Spill + movl 88(%esp), %eax + movl %eax, 28(%esp) # 4-byte Spill + movl 84(%esp), %eax + movl %eax, 24(%esp) # 4-byte Spill + movl 80(%esp), %eax + movl %eax, 20(%esp) # 4-byte Spill + movl 76(%esp), %eax + movl %eax, 16(%esp) # 4-byte Spill + movl 72(%esp), %eax + movl %eax, 12(%esp) # 4-byte Spill + movl 68(%esp), %ebp + movl 64(%esp), %ebx + movl 60(%esp), %edi + movl 56(%esp), %esi + movl 48(%esp), %edx + movl 52(%esp), %ecx + movl 128(%esp), %eax + movl %edx, (%eax) + movl %ecx, 4(%eax) + movl %esi, 8(%eax) + movl %edi, 12(%eax) + movl %ebx, 16(%eax) + movl %ebp, 20(%eax) + movl 12(%esp), %ecx # 4-byte Reload + movl %ecx, 24(%eax) + movl 16(%esp), %ecx # 4-byte Reload + movl %ecx, 28(%eax) + movl 20(%esp), %ecx # 4-byte Reload + movl %ecx, 32(%eax) + movl 24(%esp), %ecx # 4-byte Reload + movl %ecx, 36(%eax) + movl 28(%esp), %ecx # 4-byte Reload + movl %ecx, 40(%eax) + movl 32(%esp), %ecx # 4-byte Reload + movl %ecx, 44(%eax) + movl 36(%esp), %ecx # 4-byte Reload + movl %ecx, 48(%eax) + movl 40(%esp), %ecx # 4-byte Reload + movl %ecx, 52(%eax) + movl 44(%esp), %ecx # 4-byte Reload + movl %ecx, 56(%eax) + addl $108, %esp + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end207: + .size mcl_fp_mulUnitPre14Lbmi2, .Lfunc_end207-mcl_fp_mulUnitPre14Lbmi2 + + .globl mcl_fpDbl_mulPre14Lbmi2 + .align 16, 0x90 + .type mcl_fpDbl_mulPre14Lbmi2,@function +mcl_fpDbl_mulPre14Lbmi2: # @mcl_fpDbl_mulPre14Lbmi2 +# BB#0: + pushl %ebp + movl %esp, %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $268, %esp # imm = 0x10C + calll .L208$pb +.L208$pb: + popl %ebx +.Ltmp39: + addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp39-.L208$pb), %ebx + movl %ebx, -192(%ebp) # 4-byte Spill + movl 16(%ebp), %esi + movl %esi, 8(%esp) + movl 12(%ebp), %edi + movl %edi, 4(%esp) + movl 8(%ebp), %eax + movl %eax, (%esp) + calll mcl_fpDbl_mulPre7Lbmi2@PLT + leal 28(%esi), %eax + movl %eax, 8(%esp) + leal 28(%edi), %eax + movl %eax, 4(%esp) + movl 8(%ebp), %eax + leal 56(%eax), %eax + movl %eax, (%esp) + calll mcl_fpDbl_mulPre7Lbmi2@PLT + movl 44(%edi), %eax + movl %eax, -156(%ebp) # 4-byte Spill + movl 40(%edi), %eax + movl 36(%edi), %edx + movl (%edi), %edi + movl 12(%ebp), %ecx + movl 4(%ecx), %ecx + movl 12(%ebp), %ebx + addl 28(%ebx), %edi + movl %edi, -180(%ebp) # 4-byte Spill + movl 12(%ebp), %edi + adcl 32(%edi), %ecx + movl %ecx, -200(%ebp) # 4-byte Spill + adcl 8(%edi), %edx + movl %edx, -212(%ebp) # 4-byte Spill + adcl 12(%edi), %eax + movl %eax, -196(%ebp) # 4-byte Spill + movl -156(%ebp), %eax # 4-byte Reload + adcl 16(%edi), %eax + movl %eax, -156(%ebp) # 4-byte Spill + movl %eax, %ebx + seto %al + lahf + movl %eax, %eax + movl %eax, -128(%ebp) # 4-byte Spill + movl (%esi), %eax + addl 28(%esi), %eax + movl %eax, -216(%ebp) # 4-byte Spill + movl 4(%esi), %eax + adcl 32(%esi), %eax + movl %eax, -164(%ebp) # 4-byte Spill + movl 36(%esi), %eax + adcl 8(%esi), %eax + movl %eax, -168(%ebp) # 4-byte Spill + movl 40(%esi), %eax + adcl 12(%esi), %eax + movl %eax, -172(%ebp) # 4-byte Spill + movl 44(%esi), %eax + adcl 16(%esi), %eax + movl %eax, -176(%ebp) # 4-byte Spill + movl 48(%esi), %ecx + adcl 20(%esi), %ecx + movl 52(%esi), %eax + adcl 24(%esi), %eax + pushl %eax + seto %al + lahf + movl %eax, %esi + popl %eax + movl %esi, -220(%ebp) # 4-byte Spill + movl %ebx, %esi + movl %edx, -184(%ebp) # 4-byte Spill + movl -180(%ebp), %edx # 4-byte Reload + movl %edx, -188(%ebp) # 4-byte Spill + jb .LBB208_2 +# BB#1: + xorl %esi, %esi + movl $0, -184(%ebp) # 4-byte Folded Spill + movl $0, -188(%ebp) # 4-byte Folded Spill +.LBB208_2: + movl %esi, -204(%ebp) # 4-byte Spill + movl 52(%edi), %esi + movl 48(%edi), %ebx + movl -128(%ebp), %edx # 4-byte Reload + pushl %eax + movl %edx, %eax + addb $127, %al + sahf + popl %eax + adcl 20(%edi), %ebx + movl %ebx, -160(%ebp) # 4-byte Spill + adcl 24(%edi), %esi + movl %esi, -208(%ebp) # 4-byte Spill + movl %eax, -148(%ebp) # 4-byte Spill + movl %ecx, -152(%ebp) # 4-byte Spill + movl -176(%ebp), %esi # 4-byte Reload + movl %esi, -128(%ebp) # 4-byte Spill + movl -172(%ebp), %esi # 4-byte Reload + movl %esi, -132(%ebp) # 4-byte Spill + movl -168(%ebp), %esi # 4-byte Reload + movl %esi, -136(%ebp) # 4-byte Spill + movl -164(%ebp), %esi # 4-byte Reload + movl %esi, -140(%ebp) # 4-byte Spill + movl -216(%ebp), %ebx # 4-byte Reload + movl %ebx, -144(%ebp) # 4-byte Spill + jb .LBB208_4 +# BB#3: + movl $0, -148(%ebp) # 4-byte Folded Spill + movl $0, -152(%ebp) # 4-byte Folded Spill + movl $0, -128(%ebp) # 4-byte Folded Spill + movl $0, -132(%ebp) # 4-byte Folded Spill + movl $0, -136(%ebp) # 4-byte Folded Spill + movl $0, -140(%ebp) # 4-byte Folded Spill + movl $0, -144(%ebp) # 4-byte Folded Spill +.LBB208_4: + movl -180(%ebp), %edx # 4-byte Reload + movl %edx, -96(%ebp) + movl -200(%ebp), %esi # 4-byte Reload + movl %esi, -92(%ebp) + movl -212(%ebp), %edx # 4-byte Reload + movl %edx, -88(%ebp) + movl -196(%ebp), %edi # 4-byte Reload + movl %edi, -84(%ebp) + movl -156(%ebp), %edx # 4-byte Reload + movl %edx, -80(%ebp) + movl %ebx, -124(%ebp) + movl -164(%ebp), %edx # 4-byte Reload + movl %edx, -120(%ebp) + movl -168(%ebp), %edx # 4-byte Reload + movl %edx, -116(%ebp) + movl -172(%ebp), %edx # 4-byte Reload + movl %edx, -112(%ebp) + movl -176(%ebp), %edx # 4-byte Reload + movl %edx, -108(%ebp) + movl %ecx, -104(%ebp) + movl %edi, %ebx + movl %esi, %edi + movl %eax, -100(%ebp) + sbbl %edx, %edx + movl -160(%ebp), %eax # 4-byte Reload + movl %eax, -76(%ebp) + movl -208(%ebp), %esi # 4-byte Reload + movl %esi, -72(%ebp) + movl -220(%ebp), %ecx # 4-byte Reload + pushl %eax + movl %ecx, %eax + addb $127, %al + sahf + popl %eax + jb .LBB208_6 +# BB#5: + movl $0, %esi + movl $0, %eax + movl $0, %ebx + movl $0, %edi +.LBB208_6: + movl %eax, -160(%ebp) # 4-byte Spill + sbbl %eax, %eax + leal -124(%ebp), %ecx + movl %ecx, 8(%esp) + leal -96(%ebp), %ecx + movl %ecx, 4(%esp) + leal -68(%ebp), %ecx + movl %ecx, (%esp) + andl %eax, %edx + movl -188(%ebp), %eax # 4-byte Reload + addl %eax, -144(%ebp) # 4-byte Folded Spill + adcl %edi, -140(%ebp) # 4-byte Folded Spill + movl -184(%ebp), %eax # 4-byte Reload + adcl %eax, -136(%ebp) # 4-byte Folded Spill + adcl %ebx, -132(%ebp) # 4-byte Folded Spill + movl -204(%ebp), %eax # 4-byte Reload + adcl %eax, -128(%ebp) # 4-byte Folded Spill + movl -152(%ebp), %edi # 4-byte Reload + adcl -160(%ebp), %edi # 4-byte Folded Reload + adcl %esi, -148(%ebp) # 4-byte Folded Spill + sbbl %esi, %esi + andl $1, %esi + andl $1, %edx + movl %edx, -156(%ebp) # 4-byte Spill + movl -192(%ebp), %ebx # 4-byte Reload + calll mcl_fpDbl_mulPre7Lbmi2@PLT + movl -144(%ebp), %eax # 4-byte Reload + addl -40(%ebp), %eax + movl %eax, -144(%ebp) # 4-byte Spill + movl -140(%ebp), %eax # 4-byte Reload + adcl -36(%ebp), %eax + movl %eax, -140(%ebp) # 4-byte Spill + movl -136(%ebp), %eax # 4-byte Reload + adcl -32(%ebp), %eax + movl %eax, -136(%ebp) # 4-byte Spill + movl -132(%ebp), %eax # 4-byte Reload + adcl -28(%ebp), %eax + movl %eax, -132(%ebp) # 4-byte Spill + movl -128(%ebp), %eax # 4-byte Reload + adcl -24(%ebp), %eax + movl %eax, -128(%ebp) # 4-byte Spill + adcl -20(%ebp), %edi + movl -148(%ebp), %eax # 4-byte Reload + adcl -16(%ebp), %eax + movl %eax, -148(%ebp) # 4-byte Spill + adcl %esi, -156(%ebp) # 4-byte Folded Spill + movl -68(%ebp), %eax + movl 8(%ebp), %esi + subl (%esi), %eax + movl %eax, -172(%ebp) # 4-byte Spill + movl -64(%ebp), %ecx + sbbl 4(%esi), %ecx + movl -60(%ebp), %eax + sbbl 8(%esi), %eax + movl %eax, -160(%ebp) # 4-byte Spill + movl -56(%ebp), %edx + sbbl 12(%esi), %edx + movl -52(%ebp), %ebx + sbbl 16(%esi), %ebx + movl -48(%ebp), %eax + sbbl 20(%esi), %eax + movl %eax, -164(%ebp) # 4-byte Spill + movl -44(%ebp), %eax + sbbl 24(%esi), %eax + movl %eax, -168(%ebp) # 4-byte Spill + movl 28(%esi), %eax + movl %eax, -176(%ebp) # 4-byte Spill + sbbl %eax, -144(%ebp) # 4-byte Folded Spill + movl 32(%esi), %eax + movl %eax, -180(%ebp) # 4-byte Spill + sbbl %eax, -140(%ebp) # 4-byte Folded Spill + movl 36(%esi), %eax + movl %eax, -184(%ebp) # 4-byte Spill + sbbl %eax, -136(%ebp) # 4-byte Folded Spill + movl 40(%esi), %eax + movl %eax, -188(%ebp) # 4-byte Spill + sbbl %eax, -132(%ebp) # 4-byte Folded Spill + movl 44(%esi), %eax + movl %eax, -192(%ebp) # 4-byte Spill + sbbl %eax, -128(%ebp) # 4-byte Folded Spill + movl 48(%esi), %eax + movl %eax, -196(%ebp) # 4-byte Spill + sbbl %eax, %edi + movl %edi, -152(%ebp) # 4-byte Spill + movl 52(%esi), %eax + movl %eax, -200(%ebp) # 4-byte Spill + movl -148(%ebp), %edi # 4-byte Reload + sbbl %eax, %edi + sbbl $0, -156(%ebp) # 4-byte Folded Spill + movl 56(%esi), %eax + movl %eax, -228(%ebp) # 4-byte Spill + subl %eax, -172(%ebp) # 4-byte Folded Spill + movl 60(%esi), %eax + movl %eax, -232(%ebp) # 4-byte Spill + sbbl %eax, %ecx + movl 64(%esi), %eax + movl %eax, -236(%ebp) # 4-byte Spill + sbbl %eax, -160(%ebp) # 4-byte Folded Spill + movl 68(%esi), %eax + movl %eax, -240(%ebp) # 4-byte Spill + sbbl %eax, %edx + movl 72(%esi), %eax + movl %eax, -244(%ebp) # 4-byte Spill + sbbl %eax, %ebx + movl 76(%esi), %eax + movl %eax, -248(%ebp) # 4-byte Spill + sbbl %eax, -164(%ebp) # 4-byte Folded Spill + movl 80(%esi), %eax + movl %eax, -252(%ebp) # 4-byte Spill + sbbl %eax, -168(%ebp) # 4-byte Folded Spill + movl 84(%esi), %eax + movl %eax, -256(%ebp) # 4-byte Spill + sbbl %eax, -144(%ebp) # 4-byte Folded Spill + movl 88(%esi), %eax + movl %eax, -208(%ebp) # 4-byte Spill + sbbl %eax, -140(%ebp) # 4-byte Folded Spill + movl 92(%esi), %eax + movl %eax, -212(%ebp) # 4-byte Spill + sbbl %eax, -136(%ebp) # 4-byte Folded Spill + movl 96(%esi), %eax + movl %eax, -216(%ebp) # 4-byte Spill + sbbl %eax, -132(%ebp) # 4-byte Folded Spill + movl 100(%esi), %eax + movl %eax, -220(%ebp) # 4-byte Spill + sbbl %eax, -128(%ebp) # 4-byte Folded Spill + movl 104(%esi), %eax + movl %eax, -224(%ebp) # 4-byte Spill + sbbl %eax, -152(%ebp) # 4-byte Folded Spill + movl 108(%esi), %eax + movl %eax, -204(%ebp) # 4-byte Spill + sbbl %eax, %edi + movl %edi, -148(%ebp) # 4-byte Spill + movl -156(%ebp), %edi # 4-byte Reload + sbbl $0, %edi + movl -172(%ebp), %eax # 4-byte Reload + addl -176(%ebp), %eax # 4-byte Folded Reload + adcl -180(%ebp), %ecx # 4-byte Folded Reload + movl %eax, 28(%esi) + movl -160(%ebp), %eax # 4-byte Reload + adcl -184(%ebp), %eax # 4-byte Folded Reload + movl %ecx, 32(%esi) + adcl -188(%ebp), %edx # 4-byte Folded Reload + movl %eax, 36(%esi) + adcl -192(%ebp), %ebx # 4-byte Folded Reload + movl %edx, 40(%esi) + movl -164(%ebp), %eax # 4-byte Reload + adcl -196(%ebp), %eax # 4-byte Folded Reload + movl %ebx, 44(%esi) + movl -168(%ebp), %ecx # 4-byte Reload + adcl -200(%ebp), %ecx # 4-byte Folded Reload + movl %eax, 48(%esi) + movl -144(%ebp), %eax # 4-byte Reload + adcl -228(%ebp), %eax # 4-byte Folded Reload + movl %ecx, 52(%esi) + movl -140(%ebp), %ecx # 4-byte Reload + adcl -232(%ebp), %ecx # 4-byte Folded Reload + movl %eax, 56(%esi) + movl -136(%ebp), %eax # 4-byte Reload + adcl -236(%ebp), %eax # 4-byte Folded Reload + movl %ecx, 60(%esi) + movl -132(%ebp), %ecx # 4-byte Reload + adcl -240(%ebp), %ecx # 4-byte Folded Reload + movl %eax, 64(%esi) + movl -128(%ebp), %eax # 4-byte Reload + adcl -244(%ebp), %eax # 4-byte Folded Reload + movl %ecx, 68(%esi) + movl -152(%ebp), %ecx # 4-byte Reload + adcl -248(%ebp), %ecx # 4-byte Folded Reload + movl %eax, 72(%esi) + movl -148(%ebp), %eax # 4-byte Reload + adcl -252(%ebp), %eax # 4-byte Folded Reload + movl %ecx, 76(%esi) + adcl -256(%ebp), %edi # 4-byte Folded Reload + movl %eax, 80(%esi) + movl %edi, 84(%esi) + movl -208(%ebp), %eax # 4-byte Reload + adcl $0, %eax + movl %eax, 88(%esi) + movl -212(%ebp), %eax # 4-byte Reload + adcl $0, %eax + movl %eax, 92(%esi) + movl -216(%ebp), %eax # 4-byte Reload + adcl $0, %eax + movl %eax, 96(%esi) + movl -220(%ebp), %eax # 4-byte Reload + adcl $0, %eax + movl %eax, 100(%esi) + movl -224(%ebp), %eax # 4-byte Reload + adcl $0, %eax + movl %eax, 104(%esi) + movl -204(%ebp), %eax # 4-byte Reload + adcl $0, %eax + movl %eax, 108(%esi) + addl $268, %esp # imm = 0x10C + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end208: + .size mcl_fpDbl_mulPre14Lbmi2, .Lfunc_end208-mcl_fpDbl_mulPre14Lbmi2 + + .globl mcl_fpDbl_sqrPre14Lbmi2 + .align 16, 0x90 + .type mcl_fpDbl_sqrPre14Lbmi2,@function +mcl_fpDbl_sqrPre14Lbmi2: # @mcl_fpDbl_sqrPre14Lbmi2 +# BB#0: + pushl %ebp + movl %esp, %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $268, %esp # imm = 0x10C + calll .L209$pb +.L209$pb: + popl %ebx +.Ltmp40: + addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp40-.L209$pb), %ebx + movl %ebx, -172(%ebp) # 4-byte Spill + movl 12(%ebp), %edi + movl %edi, 8(%esp) + movl %edi, 4(%esp) + movl 8(%ebp), %esi + movl %esi, (%esp) + calll mcl_fpDbl_mulPre7Lbmi2@PLT + leal 28(%edi), %eax + movl %eax, 8(%esp) + movl %eax, 4(%esp) + leal 56(%esi), %eax + movl %eax, (%esp) + calll mcl_fpDbl_mulPre7Lbmi2@PLT + movl 48(%edi), %eax + movl 44(%edi), %ecx + movl 36(%edi), %edx + movl (%edi), %esi + movl 4(%edi), %ebx + addl 28(%edi), %esi + adcl 32(%edi), %ebx + movl %ebx, -164(%ebp) # 4-byte Spill + adcl 8(%edi), %edx + movl %edx, -160(%ebp) # 4-byte Spill + movl 40(%edi), %edx + adcl 12(%edi), %edx + adcl 16(%edi), %ecx + movl %ecx, -180(%ebp) # 4-byte Spill + adcl 20(%edi), %eax + movl %eax, -176(%ebp) # 4-byte Spill + movl 52(%edi), %ecx + adcl 24(%edi), %ecx + seto %al + lahf + movl %eax, %eax + movl %eax, -184(%ebp) # 4-byte Spill + seto %al + lahf + movl %eax, %eax + movl %eax, -152(%ebp) # 4-byte Spill + seto %al + lahf + movl %eax, %eax + movl %eax, -148(%ebp) # 4-byte Spill + seto %al + lahf + movl %eax, %eax + movl %eax, -144(%ebp) # 4-byte Spill + seto %al + lahf + movl %eax, %eax + movl %eax, -140(%ebp) # 4-byte Spill + seto %al + lahf + movl %eax, %eax + movl %eax, -136(%ebp) # 4-byte Spill + seto %al + lahf + movl %eax, %edi + seto %al + lahf + movl %eax, %eax + sbbl %ebx, %ebx + movl %ebx, -128(%ebp) # 4-byte Spill + movl %eax, %eax + addb $127, %al + sahf + jb .LBB209_1 +# BB#2: + movl %esi, -168(%ebp) # 4-byte Spill + movl $0, -132(%ebp) # 4-byte Folded Spill + jmp .LBB209_3 +.LBB209_1: + leal (%esi,%esi), %eax + movl %esi, -168(%ebp) # 4-byte Spill + movl %eax, -132(%ebp) # 4-byte Spill +.LBB209_3: + movl %edi, %eax + addb $127, %al + sahf + movl -180(%ebp), %ebx # 4-byte Reload + jb .LBB209_4 +# BB#5: + movl $0, -156(%ebp) # 4-byte Folded Spill + jmp .LBB209_6 +.LBB209_4: + movl -164(%ebp), %eax # 4-byte Reload + movl -168(%ebp), %esi # 4-byte Reload + shldl $1, %esi, %eax + movl %eax, -156(%ebp) # 4-byte Spill +.LBB209_6: + movl -176(%ebp), %edi # 4-byte Reload + movl -136(%ebp), %eax # 4-byte Reload + movl %eax, %eax + addb $127, %al + sahf + jb .LBB209_7 +# BB#8: + movl $0, -136(%ebp) # 4-byte Folded Spill + jmp .LBB209_9 +.LBB209_7: + movl -160(%ebp), %eax # 4-byte Reload + movl -164(%ebp), %esi # 4-byte Reload + shldl $1, %esi, %eax + movl %eax, -136(%ebp) # 4-byte Spill +.LBB209_9: + movl %ebx, %esi + movl -140(%ebp), %eax # 4-byte Reload + movl %eax, %eax + addb $127, %al + sahf + jb .LBB209_10 +# BB#11: + movl $0, -140(%ebp) # 4-byte Folded Spill + jmp .LBB209_12 +.LBB209_10: + movl %edx, %eax + movl -160(%ebp), %ebx # 4-byte Reload + shldl $1, %ebx, %eax + movl %eax, -140(%ebp) # 4-byte Spill +.LBB209_12: + movl -144(%ebp), %eax # 4-byte Reload + movl %eax, %eax + addb $127, %al + sahf + jb .LBB209_13 +# BB#14: + movl $0, -144(%ebp) # 4-byte Folded Spill + jmp .LBB209_15 +.LBB209_13: + movl %esi, %eax + shldl $1, %edx, %eax + movl %eax, -144(%ebp) # 4-byte Spill +.LBB209_15: + movl -148(%ebp), %eax # 4-byte Reload + movl %eax, %eax + addb $127, %al + sahf + jb .LBB209_16 +# BB#17: + movl $0, -148(%ebp) # 4-byte Folded Spill + jmp .LBB209_18 +.LBB209_16: + movl %edi, %eax + shldl $1, %esi, %eax + movl %eax, -148(%ebp) # 4-byte Spill +.LBB209_18: + movl -152(%ebp), %eax # 4-byte Reload + movl %eax, %eax + addb $127, %al + sahf + jb .LBB209_19 +# BB#20: + movl $0, -152(%ebp) # 4-byte Folded Spill + jmp .LBB209_21 +.LBB209_19: + movl %ecx, %eax + shldl $1, %edi, %eax + movl %eax, -152(%ebp) # 4-byte Spill +.LBB209_21: + movl -168(%ebp), %eax # 4-byte Reload + movl %eax, -96(%ebp) + movl %eax, -124(%ebp) + movl -164(%ebp), %eax # 4-byte Reload + movl %eax, -92(%ebp) + movl %eax, -120(%ebp) + movl -160(%ebp), %eax # 4-byte Reload + movl %eax, -88(%ebp) + movl %eax, -116(%ebp) + movl %edx, -84(%ebp) + movl %edx, -112(%ebp) + movl %esi, -80(%ebp) + movl %esi, -108(%ebp) + movl %edi, -76(%ebp) + movl %edi, -104(%ebp) + movl %ecx, -72(%ebp) + movl %ecx, -100(%ebp) + movl -184(%ebp), %eax # 4-byte Reload + movl %eax, %eax + addb $127, %al + sahf + jb .LBB209_22 +# BB#23: + xorl %edi, %edi + jmp .LBB209_24 +.LBB209_22: + shrl $31, %ecx + movl %ecx, %edi +.LBB209_24: + leal -68(%ebp), %eax + movl %eax, (%esp) + leal -96(%ebp), %eax + movl %eax, 4(%esp) + leal -124(%ebp), %eax + movl %eax, 8(%esp) + movl -128(%ebp), %esi # 4-byte Reload + andl $1, %esi + movl -172(%ebp), %ebx # 4-byte Reload + calll mcl_fpDbl_mulPre7Lbmi2@PLT + movl -132(%ebp), %eax # 4-byte Reload + addl -40(%ebp), %eax + movl %eax, -132(%ebp) # 4-byte Spill + movl -156(%ebp), %eax # 4-byte Reload + adcl -36(%ebp), %eax + movl -136(%ebp), %ecx # 4-byte Reload + adcl -32(%ebp), %ecx + movl %ecx, -136(%ebp) # 4-byte Spill + movl -140(%ebp), %ecx # 4-byte Reload + adcl -28(%ebp), %ecx + movl %ecx, -140(%ebp) # 4-byte Spill + movl -144(%ebp), %ecx # 4-byte Reload + adcl -24(%ebp), %ecx + movl %ecx, -144(%ebp) # 4-byte Spill + movl -148(%ebp), %ecx # 4-byte Reload + adcl -20(%ebp), %ecx + movl %ecx, -148(%ebp) # 4-byte Spill + movl -152(%ebp), %ecx # 4-byte Reload + adcl -16(%ebp), %ecx + movl %ecx, -152(%ebp) # 4-byte Spill + adcl %edi, %esi + movl %esi, -128(%ebp) # 4-byte Spill + movl -68(%ebp), %ecx + movl 8(%ebp), %esi + subl (%esi), %ecx + movl %ecx, -204(%ebp) # 4-byte Spill + movl -64(%ebp), %edi + sbbl 4(%esi), %edi + movl -60(%ebp), %edx + sbbl 8(%esi), %edx + movl %edx, -160(%ebp) # 4-byte Spill + movl -56(%ebp), %edx + sbbl 12(%esi), %edx + movl %edx, -168(%ebp) # 4-byte Spill + movl -52(%ebp), %ebx + sbbl 16(%esi), %ebx + movl -48(%ebp), %ecx + sbbl 20(%esi), %ecx + movl %ecx, -172(%ebp) # 4-byte Spill + movl -44(%ebp), %edx + sbbl 24(%esi), %edx + movl %edx, -164(%ebp) # 4-byte Spill + movl 28(%esi), %edx + movl %edx, -176(%ebp) # 4-byte Spill + sbbl %edx, -132(%ebp) # 4-byte Folded Spill + movl 32(%esi), %ecx + movl %ecx, -180(%ebp) # 4-byte Spill + sbbl %ecx, %eax + movl %eax, -156(%ebp) # 4-byte Spill + movl 36(%esi), %eax + movl %eax, -184(%ebp) # 4-byte Spill + sbbl %eax, -136(%ebp) # 4-byte Folded Spill + movl 40(%esi), %eax + movl %eax, -188(%ebp) # 4-byte Spill + sbbl %eax, -140(%ebp) # 4-byte Folded Spill + movl 44(%esi), %eax + movl %eax, -192(%ebp) # 4-byte Spill + sbbl %eax, -144(%ebp) # 4-byte Folded Spill + movl 48(%esi), %eax + movl %eax, -196(%ebp) # 4-byte Spill + sbbl %eax, -148(%ebp) # 4-byte Folded Spill + movl 52(%esi), %eax + movl %eax, -200(%ebp) # 4-byte Spill + sbbl %eax, -152(%ebp) # 4-byte Folded Spill + movl -128(%ebp), %ecx # 4-byte Reload + sbbl $0, %ecx + movl 56(%esi), %eax + movl %eax, -228(%ebp) # 4-byte Spill + movl -204(%ebp), %edx # 4-byte Reload + subl %eax, %edx + movl 60(%esi), %eax + movl %eax, -232(%ebp) # 4-byte Spill + sbbl %eax, %edi + movl 64(%esi), %eax + movl %eax, -236(%ebp) # 4-byte Spill + sbbl %eax, -160(%ebp) # 4-byte Folded Spill + movl 68(%esi), %eax + movl %eax, -240(%ebp) # 4-byte Spill + sbbl %eax, -168(%ebp) # 4-byte Folded Spill + movl 72(%esi), %eax + movl %eax, -244(%ebp) # 4-byte Spill + sbbl %eax, %ebx + movl 76(%esi), %eax + movl %eax, -248(%ebp) # 4-byte Spill + sbbl %eax, -172(%ebp) # 4-byte Folded Spill + movl 80(%esi), %eax + movl %eax, -252(%ebp) # 4-byte Spill + sbbl %eax, -164(%ebp) # 4-byte Folded Spill + movl 84(%esi), %eax + movl %eax, -256(%ebp) # 4-byte Spill + sbbl %eax, -132(%ebp) # 4-byte Folded Spill + movl 88(%esi), %eax + movl %eax, -204(%ebp) # 4-byte Spill + sbbl %eax, -156(%ebp) # 4-byte Folded Spill + movl 92(%esi), %eax + movl %eax, -208(%ebp) # 4-byte Spill + sbbl %eax, -136(%ebp) # 4-byte Folded Spill + movl 96(%esi), %eax + movl %eax, -212(%ebp) # 4-byte Spill + sbbl %eax, -140(%ebp) # 4-byte Folded Spill + movl 100(%esi), %eax + movl %eax, -216(%ebp) # 4-byte Spill + sbbl %eax, -144(%ebp) # 4-byte Folded Spill + movl 104(%esi), %eax + movl %eax, -220(%ebp) # 4-byte Spill + sbbl %eax, -148(%ebp) # 4-byte Folded Spill + movl 108(%esi), %eax + movl %eax, -224(%ebp) # 4-byte Spill + sbbl %eax, -152(%ebp) # 4-byte Folded Spill + sbbl $0, %ecx + movl %ecx, -128(%ebp) # 4-byte Spill + movl %edx, %eax + addl -176(%ebp), %eax # 4-byte Folded Reload + adcl -180(%ebp), %edi # 4-byte Folded Reload + movl %eax, 28(%esi) + movl -160(%ebp), %eax # 4-byte Reload + adcl -184(%ebp), %eax # 4-byte Folded Reload + movl %edi, 32(%esi) + movl -168(%ebp), %ecx # 4-byte Reload + adcl -188(%ebp), %ecx # 4-byte Folded Reload + movl %eax, 36(%esi) + adcl -192(%ebp), %ebx # 4-byte Folded Reload + movl %ecx, 40(%esi) + movl -172(%ebp), %eax # 4-byte Reload + adcl -196(%ebp), %eax # 4-byte Folded Reload + movl %ebx, 44(%esi) + movl -164(%ebp), %ecx # 4-byte Reload + adcl -200(%ebp), %ecx # 4-byte Folded Reload + movl %eax, 48(%esi) + movl -132(%ebp), %eax # 4-byte Reload + adcl -228(%ebp), %eax # 4-byte Folded Reload + movl %ecx, 52(%esi) + movl -156(%ebp), %edx # 4-byte Reload + adcl -232(%ebp), %edx # 4-byte Folded Reload + movl %eax, 56(%esi) + movl -136(%ebp), %ecx # 4-byte Reload + adcl -236(%ebp), %ecx # 4-byte Folded Reload + movl %edx, 60(%esi) + movl -140(%ebp), %eax # 4-byte Reload + adcl -240(%ebp), %eax # 4-byte Folded Reload + movl %ecx, 64(%esi) + movl -144(%ebp), %ecx # 4-byte Reload + adcl -244(%ebp), %ecx # 4-byte Folded Reload + movl %eax, 68(%esi) + movl -148(%ebp), %eax # 4-byte Reload + adcl -248(%ebp), %eax # 4-byte Folded Reload + movl %ecx, 72(%esi) + movl -152(%ebp), %ecx # 4-byte Reload + adcl -252(%ebp), %ecx # 4-byte Folded Reload + movl %eax, 76(%esi) + movl -128(%ebp), %eax # 4-byte Reload + adcl -256(%ebp), %eax # 4-byte Folded Reload + movl %ecx, 80(%esi) + movl %eax, 84(%esi) + movl -204(%ebp), %eax # 4-byte Reload + adcl $0, %eax + movl %eax, 88(%esi) + movl -208(%ebp), %eax # 4-byte Reload + adcl $0, %eax + movl %eax, 92(%esi) + movl -212(%ebp), %eax # 4-byte Reload + adcl $0, %eax + movl %eax, 96(%esi) + movl -216(%ebp), %eax # 4-byte Reload + adcl $0, %eax + movl %eax, 100(%esi) + movl -220(%ebp), %eax # 4-byte Reload + adcl $0, %eax + movl %eax, 104(%esi) + movl -224(%ebp), %eax # 4-byte Reload + adcl $0, %eax + movl %eax, 108(%esi) + addl $268, %esp # imm = 0x10C + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end209: + .size mcl_fpDbl_sqrPre14Lbmi2, .Lfunc_end209-mcl_fpDbl_sqrPre14Lbmi2 + + .globl mcl_fp_mont14Lbmi2 + .align 16, 0x90 + .type mcl_fp_mont14Lbmi2,@function +mcl_fp_mont14Lbmi2: # @mcl_fp_mont14Lbmi2 +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $1900, %esp # imm = 0x76C + calll .L210$pb +.L210$pb: + popl %ebx +.Ltmp41: + addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp41-.L210$pb), %ebx + movl 1932(%esp), %eax + movl -4(%eax), %esi + movl %esi, 48(%esp) # 4-byte Spill + movl 1928(%esp), %eax + movl (%eax), %eax + movl %eax, (%esp) + leal 1840(%esp), %ecx + movl 1924(%esp), %edx + calll .LmulPv448x32 + movl 1840(%esp), %edi + movl 1844(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl %edi, %eax + imull %esi, %eax + movl 1896(%esp), %ecx + movl %ecx, 108(%esp) # 4-byte Spill + movl 1892(%esp), %ecx + movl %ecx, 104(%esp) # 4-byte Spill + movl 1888(%esp), %ecx + movl %ecx, 96(%esp) # 4-byte Spill + movl 1884(%esp), %ecx + movl %ecx, 88(%esp) # 4-byte Spill + movl 1880(%esp), %ecx + movl %ecx, 80(%esp) # 4-byte Spill + movl 1876(%esp), %ecx + movl %ecx, 64(%esp) # 4-byte Spill + movl 1872(%esp), %ecx + movl %ecx, 56(%esp) # 4-byte Spill + movl 1868(%esp), %ecx + movl %ecx, 52(%esp) # 4-byte Spill + movl 1864(%esp), %ecx + movl %ecx, 60(%esp) # 4-byte Spill + movl 1860(%esp), %esi + movl 1856(%esp), %ebp + movl 1852(%esp), %ecx + movl %ecx, 72(%esp) # 4-byte Spill + movl 1848(%esp), %ecx + movl %ecx, 84(%esp) # 4-byte Spill + movl %eax, (%esp) + leal 1776(%esp), %ecx + movl 1932(%esp), %edx + calll .LmulPv448x32 + addl 1776(%esp), %edi + movl 100(%esp), %eax # 4-byte Reload + adcl 1780(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 1784(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 1788(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + adcl 1792(%esp), %ebp + adcl 1796(%esp), %esi + movl 60(%esp), %eax # 4-byte Reload + adcl 1800(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 1804(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 1808(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 1812(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 1816(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 1820(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 1824(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 104(%esp), %eax # 4-byte Reload + adcl 1828(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl 1832(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + sbbl %edi, %edi + movl 1928(%esp), %eax + movl 4(%eax), %eax + movl %eax, (%esp) + leal 1712(%esp), %ecx + movl 1924(%esp), %edx + calll .LmulPv448x32 + andl $1, %edi + movl %edi, %edx + movl 100(%esp), %ecx # 4-byte Reload + addl 1712(%esp), %ecx + movl 84(%esp), %eax # 4-byte Reload + adcl 1716(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 1720(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + adcl 1724(%esp), %ebp + movl %ebp, 76(%esp) # 4-byte Spill + adcl 1728(%esp), %esi + movl %esi, 68(%esp) # 4-byte Spill + movl 60(%esp), %esi # 4-byte Reload + adcl 1732(%esp), %esi + movl 52(%esp), %eax # 4-byte Reload + adcl 1736(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 1740(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 64(%esp), %edi # 4-byte Reload + adcl 1744(%esp), %edi + movl 80(%esp), %eax # 4-byte Reload + adcl 1748(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 1752(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 1756(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 104(%esp), %eax # 4-byte Reload + adcl 1760(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl 1764(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + adcl 1768(%esp), %edx + movl %edx, 92(%esp) # 4-byte Spill + sbbl %eax, %eax + movl %eax, 100(%esp) # 4-byte Spill + movl %ecx, %eax + movl %ecx, %ebp + imull 48(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 1648(%esp), %ecx + movl 1932(%esp), %edx + calll .LmulPv448x32 + movl 100(%esp), %eax # 4-byte Reload + andl $1, %eax + addl 1648(%esp), %ebp + movl 84(%esp), %ecx # 4-byte Reload + adcl 1652(%esp), %ecx + movl %ecx, 84(%esp) # 4-byte Spill + movl 72(%esp), %ecx # 4-byte Reload + adcl 1656(%esp), %ecx + movl %ecx, 72(%esp) # 4-byte Spill + movl 76(%esp), %ecx # 4-byte Reload + adcl 1660(%esp), %ecx + movl %ecx, 76(%esp) # 4-byte Spill + movl 68(%esp), %ecx # 4-byte Reload + adcl 1664(%esp), %ecx + movl %ecx, 68(%esp) # 4-byte Spill + adcl 1668(%esp), %esi + movl %esi, 60(%esp) # 4-byte Spill + movl 52(%esp), %ebp # 4-byte Reload + adcl 1672(%esp), %ebp + movl 56(%esp), %ecx # 4-byte Reload + adcl 1676(%esp), %ecx + movl %ecx, 56(%esp) # 4-byte Spill + adcl 1680(%esp), %edi + movl %edi, 64(%esp) # 4-byte Spill + movl 80(%esp), %ecx # 4-byte Reload + adcl 1684(%esp), %ecx + movl %ecx, 80(%esp) # 4-byte Spill + movl 88(%esp), %ecx # 4-byte Reload + adcl 1688(%esp), %ecx + movl %ecx, 88(%esp) # 4-byte Spill + movl 96(%esp), %ecx # 4-byte Reload + adcl 1692(%esp), %ecx + movl %ecx, 96(%esp) # 4-byte Spill + movl 104(%esp), %ecx # 4-byte Reload + adcl 1696(%esp), %ecx + movl %ecx, 104(%esp) # 4-byte Spill + movl 108(%esp), %ecx # 4-byte Reload + adcl 1700(%esp), %ecx + movl %ecx, 108(%esp) # 4-byte Spill + movl 92(%esp), %esi # 4-byte Reload + adcl 1704(%esp), %esi + adcl $0, %eax + movl %eax, %edi + movl 1928(%esp), %eax + movl 8(%eax), %eax + movl %eax, (%esp) + leal 1584(%esp), %ecx + movl 1924(%esp), %edx + calll .LmulPv448x32 + movl 84(%esp), %ecx # 4-byte Reload + addl 1584(%esp), %ecx + movl 72(%esp), %eax # 4-byte Reload + adcl 1588(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 1592(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 1596(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 1600(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + adcl 1604(%esp), %ebp + movl %ebp, 52(%esp) # 4-byte Spill + movl 56(%esp), %ebp # 4-byte Reload + adcl 1608(%esp), %ebp + movl 64(%esp), %eax # 4-byte Reload + adcl 1612(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 1616(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 1620(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 1624(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 104(%esp), %eax # 4-byte Reload + adcl 1628(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl 1632(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + adcl 1636(%esp), %esi + movl %esi, 92(%esp) # 4-byte Spill + adcl 1640(%esp), %edi + movl %edi, 100(%esp) # 4-byte Spill + sbbl %esi, %esi + movl %ecx, %edi + movl %edi, %eax + imull 48(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 1520(%esp), %ecx + movl 1932(%esp), %edx + calll .LmulPv448x32 + andl $1, %esi + movl %esi, %ecx + addl 1520(%esp), %edi + movl 72(%esp), %eax # 4-byte Reload + adcl 1524(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 1528(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 1532(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 1536(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 1540(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + adcl 1544(%esp), %ebp + movl %ebp, 56(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 1548(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 1552(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 1556(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 1560(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 104(%esp), %ebp # 4-byte Reload + adcl 1564(%esp), %ebp + movl 108(%esp), %esi # 4-byte Reload + adcl 1568(%esp), %esi + movl 92(%esp), %edi # 4-byte Reload + adcl 1572(%esp), %edi + movl 100(%esp), %eax # 4-byte Reload + adcl 1576(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + adcl $0, %ecx + movl %ecx, 84(%esp) # 4-byte Spill + movl 1928(%esp), %eax + movl 12(%eax), %eax + movl %eax, (%esp) + leal 1456(%esp), %ecx + movl 1924(%esp), %edx + calll .LmulPv448x32 + movl 72(%esp), %ecx # 4-byte Reload + addl 1456(%esp), %ecx + movl 76(%esp), %eax # 4-byte Reload + adcl 1460(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 1464(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 1468(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 1472(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 1476(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 1480(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 1484(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 1488(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 1492(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + adcl 1496(%esp), %ebp + movl %ebp, 104(%esp) # 4-byte Spill + adcl 1500(%esp), %esi + movl %esi, 108(%esp) # 4-byte Spill + adcl 1504(%esp), %edi + movl %edi, 92(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 1508(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 1512(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + sbbl %edi, %edi + movl %ecx, %esi + movl %esi, %eax + imull 48(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 1392(%esp), %ecx + movl 1932(%esp), %eax + movl %eax, %edx + calll .LmulPv448x32 + andl $1, %edi + movl %edi, %eax + addl 1392(%esp), %esi + movl 76(%esp), %ecx # 4-byte Reload + adcl 1396(%esp), %ecx + movl %ecx, 76(%esp) # 4-byte Spill + movl 68(%esp), %ecx # 4-byte Reload + adcl 1400(%esp), %ecx + movl %ecx, 68(%esp) # 4-byte Spill + movl 60(%esp), %ecx # 4-byte Reload + adcl 1404(%esp), %ecx + movl %ecx, 60(%esp) # 4-byte Spill + movl 52(%esp), %ecx # 4-byte Reload + adcl 1408(%esp), %ecx + movl %ecx, 52(%esp) # 4-byte Spill + movl 56(%esp), %ecx # 4-byte Reload + adcl 1412(%esp), %ecx + movl %ecx, 56(%esp) # 4-byte Spill + movl 64(%esp), %ecx # 4-byte Reload + adcl 1416(%esp), %ecx + movl %ecx, 64(%esp) # 4-byte Spill + movl 80(%esp), %esi # 4-byte Reload + adcl 1420(%esp), %esi + movl 88(%esp), %ebp # 4-byte Reload + adcl 1424(%esp), %ebp + movl 96(%esp), %edi # 4-byte Reload + adcl 1428(%esp), %edi + movl 104(%esp), %ecx # 4-byte Reload + adcl 1432(%esp), %ecx + movl %ecx, 104(%esp) # 4-byte Spill + movl 108(%esp), %ecx # 4-byte Reload + adcl 1436(%esp), %ecx + movl %ecx, 108(%esp) # 4-byte Spill + movl 92(%esp), %ecx # 4-byte Reload + adcl 1440(%esp), %ecx + movl %ecx, 92(%esp) # 4-byte Spill + movl 100(%esp), %ecx # 4-byte Reload + adcl 1444(%esp), %ecx + movl %ecx, 100(%esp) # 4-byte Spill + movl 84(%esp), %ecx # 4-byte Reload + adcl 1448(%esp), %ecx + movl %ecx, 84(%esp) # 4-byte Spill + adcl $0, %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 1928(%esp), %eax + movl 16(%eax), %eax + movl %eax, (%esp) + leal 1328(%esp), %ecx + movl 1924(%esp), %eax + movl %eax, %edx + calll .LmulPv448x32 + movl 76(%esp), %ecx # 4-byte Reload + addl 1328(%esp), %ecx + movl 68(%esp), %eax # 4-byte Reload + adcl 1332(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 1336(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 1340(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 1344(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 1348(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + adcl 1352(%esp), %esi + movl %esi, 80(%esp) # 4-byte Spill + adcl 1356(%esp), %ebp + movl %ebp, 88(%esp) # 4-byte Spill + adcl 1360(%esp), %edi + movl %edi, 96(%esp) # 4-byte Spill + movl 104(%esp), %eax # 4-byte Reload + adcl 1364(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl 1368(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 1372(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 1376(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 1380(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 72(%esp), %edi # 4-byte Reload + adcl 1384(%esp), %edi + sbbl %esi, %esi + movl %ecx, %ebp + movl %ebp, %eax + imull 48(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 1264(%esp), %ecx + movl 1932(%esp), %edx + calll .LmulPv448x32 + andl $1, %esi + movl %esi, %ecx + addl 1264(%esp), %ebp + movl 68(%esp), %eax # 4-byte Reload + adcl 1268(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 1272(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 1276(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 1280(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 64(%esp), %ebp # 4-byte Reload + adcl 1284(%esp), %ebp + movl 80(%esp), %eax # 4-byte Reload + adcl 1288(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 1292(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 1296(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 104(%esp), %eax # 4-byte Reload + adcl 1300(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl 1304(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 1308(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 1312(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 84(%esp), %esi # 4-byte Reload + adcl 1316(%esp), %esi + adcl 1320(%esp), %edi + movl %edi, 72(%esp) # 4-byte Spill + adcl $0, %ecx + movl %ecx, 76(%esp) # 4-byte Spill + movl 1928(%esp), %eax + movl 20(%eax), %eax + movl %eax, (%esp) + leal 1200(%esp), %ecx + movl 1924(%esp), %edx + calll .LmulPv448x32 + movl 68(%esp), %eax # 4-byte Reload + addl 1200(%esp), %eax + movl 60(%esp), %ecx # 4-byte Reload + adcl 1204(%esp), %ecx + movl %ecx, 60(%esp) # 4-byte Spill + movl 52(%esp), %ecx # 4-byte Reload + adcl 1208(%esp), %ecx + movl %ecx, 52(%esp) # 4-byte Spill + movl 56(%esp), %edi # 4-byte Reload + adcl 1212(%esp), %edi + adcl 1216(%esp), %ebp + movl %ebp, 64(%esp) # 4-byte Spill + movl 80(%esp), %ecx # 4-byte Reload + adcl 1220(%esp), %ecx + movl %ecx, 80(%esp) # 4-byte Spill + movl 88(%esp), %ecx # 4-byte Reload + adcl 1224(%esp), %ecx + movl %ecx, 88(%esp) # 4-byte Spill + movl 96(%esp), %ecx # 4-byte Reload + adcl 1228(%esp), %ecx + movl %ecx, 96(%esp) # 4-byte Spill + movl 104(%esp), %ecx # 4-byte Reload + adcl 1232(%esp), %ecx + movl %ecx, 104(%esp) # 4-byte Spill + movl 108(%esp), %ecx # 4-byte Reload + adcl 1236(%esp), %ecx + movl %ecx, 108(%esp) # 4-byte Spill + movl 92(%esp), %ecx # 4-byte Reload + adcl 1240(%esp), %ecx + movl %ecx, 92(%esp) # 4-byte Spill + movl 100(%esp), %ecx # 4-byte Reload + adcl 1244(%esp), %ecx + movl %ecx, 100(%esp) # 4-byte Spill + adcl 1248(%esp), %esi + movl %esi, 84(%esp) # 4-byte Spill + movl 72(%esp), %ecx # 4-byte Reload + adcl 1252(%esp), %ecx + movl %ecx, 72(%esp) # 4-byte Spill + movl 76(%esp), %ecx # 4-byte Reload + adcl 1256(%esp), %ecx + movl %ecx, 76(%esp) # 4-byte Spill + sbbl %esi, %esi + movl %eax, %ebp + imull 48(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 1136(%esp), %ecx + movl 1932(%esp), %edx + calll .LmulPv448x32 + andl $1, %esi + movl %esi, %ecx + addl 1136(%esp), %ebp + movl 60(%esp), %eax # 4-byte Reload + adcl 1140(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 52(%esp), %ebp # 4-byte Reload + adcl 1144(%esp), %ebp + adcl 1148(%esp), %edi + movl %edi, 56(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 1152(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 1156(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 1160(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 1164(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 104(%esp), %eax # 4-byte Reload + adcl 1168(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 108(%esp), %esi # 4-byte Reload + adcl 1172(%esp), %esi + movl 92(%esp), %eax # 4-byte Reload + adcl 1176(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 100(%esp), %edi # 4-byte Reload + adcl 1180(%esp), %edi + movl 84(%esp), %eax # 4-byte Reload + adcl 1184(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 1188(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 1192(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + adcl $0, %ecx + movl %ecx, 68(%esp) # 4-byte Spill + movl 1928(%esp), %eax + movl 24(%eax), %eax + movl %eax, (%esp) + leal 1072(%esp), %ecx + movl 1924(%esp), %edx + calll .LmulPv448x32 + movl 60(%esp), %eax # 4-byte Reload + addl 1072(%esp), %eax + adcl 1076(%esp), %ebp + movl %ebp, 52(%esp) # 4-byte Spill + movl 56(%esp), %ecx # 4-byte Reload + adcl 1080(%esp), %ecx + movl %ecx, 56(%esp) # 4-byte Spill + movl 64(%esp), %ecx # 4-byte Reload + adcl 1084(%esp), %ecx + movl %ecx, 64(%esp) # 4-byte Spill + movl 80(%esp), %ecx # 4-byte Reload + adcl 1088(%esp), %ecx + movl %ecx, 80(%esp) # 4-byte Spill + movl 88(%esp), %ecx # 4-byte Reload + adcl 1092(%esp), %ecx + movl %ecx, 88(%esp) # 4-byte Spill + movl 96(%esp), %ecx # 4-byte Reload + adcl 1096(%esp), %ecx + movl %ecx, 96(%esp) # 4-byte Spill + movl 104(%esp), %ecx # 4-byte Reload + adcl 1100(%esp), %ecx + movl %ecx, 104(%esp) # 4-byte Spill + adcl 1104(%esp), %esi + movl %esi, 108(%esp) # 4-byte Spill + movl 92(%esp), %ebp # 4-byte Reload + adcl 1108(%esp), %ebp + adcl 1112(%esp), %edi + movl %edi, 100(%esp) # 4-byte Spill + movl 84(%esp), %ecx # 4-byte Reload + adcl 1116(%esp), %ecx + movl %ecx, 84(%esp) # 4-byte Spill + movl 72(%esp), %ecx # 4-byte Reload + adcl 1120(%esp), %ecx + movl %ecx, 72(%esp) # 4-byte Spill + movl 76(%esp), %ecx # 4-byte Reload + adcl 1124(%esp), %ecx + movl %ecx, 76(%esp) # 4-byte Spill + movl 68(%esp), %ecx # 4-byte Reload + adcl 1128(%esp), %ecx + movl %ecx, 68(%esp) # 4-byte Spill + sbbl %esi, %esi + movl %eax, %edi + imull 48(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 1008(%esp), %ecx + movl 1932(%esp), %edx + calll .LmulPv448x32 + andl $1, %esi + movl %esi, %ecx + addl 1008(%esp), %edi + movl 52(%esp), %eax # 4-byte Reload + adcl 1012(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 1016(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 64(%esp), %esi # 4-byte Reload + adcl 1020(%esp), %esi + movl 80(%esp), %eax # 4-byte Reload + adcl 1024(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 1028(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 1032(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 104(%esp), %edi # 4-byte Reload + adcl 1036(%esp), %edi + movl 108(%esp), %eax # 4-byte Reload + adcl 1040(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + adcl 1044(%esp), %ebp + movl %ebp, 92(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 1048(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 84(%esp), %ebp # 4-byte Reload + adcl 1052(%esp), %ebp + movl 72(%esp), %eax # 4-byte Reload + adcl 1056(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 1060(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 1064(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + adcl $0, %ecx + movl %ecx, 60(%esp) # 4-byte Spill + movl 1928(%esp), %eax + movl 28(%eax), %eax + movl %eax, (%esp) + leal 944(%esp), %ecx + movl 1924(%esp), %edx + calll .LmulPv448x32 + movl 52(%esp), %eax # 4-byte Reload + addl 944(%esp), %eax + movl 56(%esp), %ecx # 4-byte Reload + adcl 948(%esp), %ecx + movl %ecx, 56(%esp) # 4-byte Spill + adcl 952(%esp), %esi + movl %esi, 64(%esp) # 4-byte Spill + movl 80(%esp), %ecx # 4-byte Reload + adcl 956(%esp), %ecx + movl %ecx, 80(%esp) # 4-byte Spill + movl 88(%esp), %ecx # 4-byte Reload + adcl 960(%esp), %ecx + movl %ecx, 88(%esp) # 4-byte Spill + movl 96(%esp), %esi # 4-byte Reload + adcl 964(%esp), %esi + adcl 968(%esp), %edi + movl %edi, 104(%esp) # 4-byte Spill + movl 108(%esp), %ecx # 4-byte Reload + adcl 972(%esp), %ecx + movl %ecx, 108(%esp) # 4-byte Spill + movl 92(%esp), %ecx # 4-byte Reload + adcl 976(%esp), %ecx + movl %ecx, 92(%esp) # 4-byte Spill + movl 100(%esp), %ecx # 4-byte Reload + adcl 980(%esp), %ecx + movl %ecx, 100(%esp) # 4-byte Spill + adcl 984(%esp), %ebp + movl %ebp, 84(%esp) # 4-byte Spill + movl 72(%esp), %ecx # 4-byte Reload + adcl 988(%esp), %ecx + movl %ecx, 72(%esp) # 4-byte Spill + movl 76(%esp), %ecx # 4-byte Reload + adcl 992(%esp), %ecx + movl %ecx, 76(%esp) # 4-byte Spill + movl 68(%esp), %ecx # 4-byte Reload + adcl 996(%esp), %ecx + movl %ecx, 68(%esp) # 4-byte Spill + movl 60(%esp), %ecx # 4-byte Reload + adcl 1000(%esp), %ecx + movl %ecx, 60(%esp) # 4-byte Spill + sbbl %ebp, %ebp + movl %eax, %edi + imull 48(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 880(%esp), %ecx + movl 1932(%esp), %edx + calll .LmulPv448x32 + andl $1, %ebp + addl 880(%esp), %edi + movl 56(%esp), %eax # 4-byte Reload + adcl 884(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 888(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 892(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 88(%esp), %edi # 4-byte Reload + adcl 896(%esp), %edi + adcl 900(%esp), %esi + movl %esi, 96(%esp) # 4-byte Spill + movl 104(%esp), %eax # 4-byte Reload + adcl 904(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl 908(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 912(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 916(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 920(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 72(%esp), %esi # 4-byte Reload + adcl 924(%esp), %esi + movl 76(%esp), %eax # 4-byte Reload + adcl 928(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 932(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 936(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + adcl $0, %ebp + movl %ebp, 52(%esp) # 4-byte Spill + movl 1928(%esp), %eax + movl 32(%eax), %eax + movl %eax, (%esp) + leal 816(%esp), %ecx + movl 1924(%esp), %edx + calll .LmulPv448x32 + movl 56(%esp), %ecx # 4-byte Reload + addl 816(%esp), %ecx + movl 64(%esp), %eax # 4-byte Reload + adcl 820(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 80(%esp), %ebp # 4-byte Reload + adcl 824(%esp), %ebp + adcl 828(%esp), %edi + movl %edi, 88(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 832(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 104(%esp), %eax # 4-byte Reload + adcl 836(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl 840(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 844(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 848(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 852(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + adcl 856(%esp), %esi + movl %esi, 72(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 860(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 864(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 868(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 52(%esp), %esi # 4-byte Reload + adcl 872(%esp), %esi + sbbl %eax, %eax + movl %eax, 56(%esp) # 4-byte Spill + movl %ecx, %eax + movl %ecx, %edi + imull 48(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 752(%esp), %ecx + movl 1932(%esp), %edx + calll .LmulPv448x32 + movl 56(%esp), %eax # 4-byte Reload + andl $1, %eax + addl 752(%esp), %edi + movl 64(%esp), %ecx # 4-byte Reload + adcl 756(%esp), %ecx + movl %ecx, 64(%esp) # 4-byte Spill + adcl 760(%esp), %ebp + movl %ebp, 80(%esp) # 4-byte Spill + movl 88(%esp), %ecx # 4-byte Reload + adcl 764(%esp), %ecx + movl %ecx, 88(%esp) # 4-byte Spill + movl 96(%esp), %ecx # 4-byte Reload + adcl 768(%esp), %ecx + movl %ecx, 96(%esp) # 4-byte Spill + movl 104(%esp), %ecx # 4-byte Reload + adcl 772(%esp), %ecx + movl %ecx, 104(%esp) # 4-byte Spill + movl 108(%esp), %ecx # 4-byte Reload + adcl 776(%esp), %ecx + movl %ecx, 108(%esp) # 4-byte Spill + movl 92(%esp), %ecx # 4-byte Reload + adcl 780(%esp), %ecx + movl %ecx, 92(%esp) # 4-byte Spill + movl 100(%esp), %ecx # 4-byte Reload + adcl 784(%esp), %ecx + movl %ecx, 100(%esp) # 4-byte Spill + movl 84(%esp), %ecx # 4-byte Reload + adcl 788(%esp), %ecx + movl %ecx, 84(%esp) # 4-byte Spill + movl 72(%esp), %ecx # 4-byte Reload + adcl 792(%esp), %ecx + movl %ecx, 72(%esp) # 4-byte Spill + movl 76(%esp), %ebp # 4-byte Reload + adcl 796(%esp), %ebp + movl 68(%esp), %edi # 4-byte Reload + adcl 800(%esp), %edi + movl 60(%esp), %ecx # 4-byte Reload + adcl 804(%esp), %ecx + movl %ecx, 60(%esp) # 4-byte Spill + adcl 808(%esp), %esi + adcl $0, %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 1928(%esp), %eax + movl 36(%eax), %eax + movl %eax, (%esp) + leal 688(%esp), %ecx + movl 1924(%esp), %edx + calll .LmulPv448x32 + movl 64(%esp), %ecx # 4-byte Reload + addl 688(%esp), %ecx + movl 80(%esp), %eax # 4-byte Reload + adcl 692(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 696(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 700(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 104(%esp), %eax # 4-byte Reload + adcl 704(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl 708(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 712(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 716(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 720(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 724(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + adcl 728(%esp), %ebp + movl %ebp, 76(%esp) # 4-byte Spill + adcl 732(%esp), %edi + movl %edi, 68(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 736(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + adcl 740(%esp), %esi + movl %esi, 52(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 744(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + sbbl %edi, %edi + movl %ecx, %eax + movl %ecx, %esi + imull 48(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 624(%esp), %ecx + movl 1932(%esp), %edx + calll .LmulPv448x32 + movl %edi, %ecx + andl $1, %ecx + addl 624(%esp), %esi + movl 80(%esp), %eax # 4-byte Reload + adcl 628(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 632(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 96(%esp), %ebp # 4-byte Reload + adcl 636(%esp), %ebp + movl 104(%esp), %eax # 4-byte Reload + adcl 640(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl 644(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 92(%esp), %esi # 4-byte Reload + adcl 648(%esp), %esi + movl 100(%esp), %edi # 4-byte Reload + adcl 652(%esp), %edi + movl 84(%esp), %eax # 4-byte Reload + adcl 656(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 660(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 664(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 668(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 672(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 676(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 680(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + adcl $0, %ecx + movl %ecx, 64(%esp) # 4-byte Spill + movl 1928(%esp), %eax + movl 40(%eax), %eax + movl %eax, (%esp) + leal 560(%esp), %ecx + movl 1924(%esp), %edx + calll .LmulPv448x32 + movl 80(%esp), %ecx # 4-byte Reload + addl 560(%esp), %ecx + movl 88(%esp), %eax # 4-byte Reload + adcl 564(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + adcl 568(%esp), %ebp + movl %ebp, 96(%esp) # 4-byte Spill + movl 104(%esp), %eax # 4-byte Reload + adcl 572(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl 576(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + adcl 580(%esp), %esi + movl %esi, 92(%esp) # 4-byte Spill + adcl 584(%esp), %edi + movl %edi, 100(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 588(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 72(%esp), %ebp # 4-byte Reload + adcl 592(%esp), %ebp + movl 76(%esp), %eax # 4-byte Reload + adcl 596(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 600(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 604(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 608(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 612(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 616(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + sbbl %edi, %edi + movl %ecx, %eax + movl %ecx, %esi + imull 48(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 496(%esp), %ecx + movl 1932(%esp), %edx + calll .LmulPv448x32 + andl $1, %edi + movl %edi, %ecx + addl 496(%esp), %esi + movl 88(%esp), %eax # 4-byte Reload + adcl 500(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 504(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 104(%esp), %edi # 4-byte Reload + adcl 508(%esp), %edi + movl 108(%esp), %eax # 4-byte Reload + adcl 512(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 516(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 100(%esp), %esi # 4-byte Reload + adcl 520(%esp), %esi + movl 84(%esp), %eax # 4-byte Reload + adcl 524(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + adcl 528(%esp), %ebp + movl %ebp, 72(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 532(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 536(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 60(%esp), %ebp # 4-byte Reload + adcl 540(%esp), %ebp + movl 52(%esp), %eax # 4-byte Reload + adcl 544(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 548(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 552(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + adcl $0, %ecx + movl %ecx, 80(%esp) # 4-byte Spill + movl 1928(%esp), %eax + movl 44(%eax), %eax + movl %eax, (%esp) + leal 432(%esp), %ecx + movl 1924(%esp), %edx + calll .LmulPv448x32 + movl 88(%esp), %ecx # 4-byte Reload + addl 432(%esp), %ecx + movl 96(%esp), %eax # 4-byte Reload + adcl 436(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + adcl 440(%esp), %edi + movl %edi, 104(%esp) # 4-byte Spill + movl 108(%esp), %edi # 4-byte Reload + adcl 444(%esp), %edi + movl 92(%esp), %eax # 4-byte Reload + adcl 448(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + adcl 452(%esp), %esi + movl %esi, 100(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 456(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 460(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 464(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 468(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + adcl 472(%esp), %ebp + movl %ebp, 60(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 476(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 480(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 484(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 488(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + sbbl %esi, %esi + movl %ecx, %eax + movl %ecx, %ebp + imull 48(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 368(%esp), %ecx + movl 1932(%esp), %edx + calll .LmulPv448x32 + andl $1, %esi + movl %esi, %ecx + addl 368(%esp), %ebp + movl 96(%esp), %eax # 4-byte Reload + adcl 372(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 104(%esp), %esi # 4-byte Reload + adcl 376(%esp), %esi + adcl 380(%esp), %edi + movl 92(%esp), %eax # 4-byte Reload + adcl 384(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 388(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 84(%esp), %ebp # 4-byte Reload + adcl 392(%esp), %ebp + movl 72(%esp), %eax # 4-byte Reload + adcl 396(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 400(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 404(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 408(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 412(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 416(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 420(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 424(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + adcl $0, %ecx + movl %ecx, 88(%esp) # 4-byte Spill + movl 1928(%esp), %eax + movl 48(%eax), %eax + movl %eax, (%esp) + leal 304(%esp), %ecx + movl 1924(%esp), %edx + calll .LmulPv448x32 + movl 96(%esp), %ecx # 4-byte Reload + addl 304(%esp), %ecx + adcl 308(%esp), %esi + movl %esi, 104(%esp) # 4-byte Spill + adcl 312(%esp), %edi + movl 92(%esp), %eax # 4-byte Reload + adcl 316(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 320(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + adcl 324(%esp), %ebp + movl %ebp, 84(%esp) # 4-byte Spill + movl 72(%esp), %esi # 4-byte Reload + adcl 328(%esp), %esi + movl 76(%esp), %eax # 4-byte Reload + adcl 332(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 336(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 340(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 344(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 348(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 352(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 356(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 360(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + sbbl %eax, %eax + movl %eax, 96(%esp) # 4-byte Spill + movl %ecx, %eax + movl %ecx, %ebp + imull 48(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 240(%esp), %ecx + movl 1932(%esp), %edx + calll .LmulPv448x32 + movl 96(%esp), %ecx # 4-byte Reload + andl $1, %ecx + addl 240(%esp), %ebp + movl 104(%esp), %eax # 4-byte Reload + adcl 244(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + adcl 248(%esp), %edi + movl %edi, 108(%esp) # 4-byte Spill + movl 92(%esp), %edi # 4-byte Reload + adcl 252(%esp), %edi + movl 100(%esp), %ebp # 4-byte Reload + adcl 256(%esp), %ebp + movl 84(%esp), %eax # 4-byte Reload + adcl 260(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + adcl 264(%esp), %esi + movl %esi, 72(%esp) # 4-byte Spill + movl 76(%esp), %esi # 4-byte Reload + adcl 268(%esp), %esi + movl 68(%esp), %eax # 4-byte Reload + adcl 272(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 276(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 280(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 284(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 288(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 292(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 296(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + adcl $0, %ecx + movl %ecx, 96(%esp) # 4-byte Spill + movl 1928(%esp), %eax + movl 52(%eax), %eax + movl %eax, (%esp) + leal 176(%esp), %ecx + movl 1924(%esp), %edx + calll .LmulPv448x32 + movl 104(%esp), %ecx # 4-byte Reload + addl 176(%esp), %ecx + movl 108(%esp), %eax # 4-byte Reload + adcl 180(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + adcl 184(%esp), %edi + movl %edi, 92(%esp) # 4-byte Spill + adcl 188(%esp), %ebp + movl %ebp, 100(%esp) # 4-byte Spill + movl 84(%esp), %edi # 4-byte Reload + adcl 192(%esp), %edi + movl 72(%esp), %eax # 4-byte Reload + adcl 196(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + adcl 200(%esp), %esi + movl %esi, 76(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 204(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 208(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 212(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 216(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 220(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 224(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 228(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 232(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + sbbl %ebp, %ebp + movl 48(%esp), %eax # 4-byte Reload + imull %ecx, %eax + movl %ecx, %esi + movl %eax, (%esp) + leal 112(%esp), %ecx + movl 1932(%esp), %edx + calll .LmulPv448x32 + andl $1, %ebp + addl 112(%esp), %esi + movl 100(%esp), %esi # 4-byte Reload + movl 108(%esp), %eax # 4-byte Reload + adcl 116(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 92(%esp), %edx # 4-byte Reload + adcl 120(%esp), %edx + movl %edx, 92(%esp) # 4-byte Spill + adcl 124(%esp), %esi + movl %esi, 100(%esp) # 4-byte Spill + adcl 128(%esp), %edi + movl %edi, 84(%esp) # 4-byte Spill + movl 72(%esp), %ecx # 4-byte Reload + adcl 132(%esp), %ecx + movl %ecx, 72(%esp) # 4-byte Spill + movl %ecx, %ebx + movl 76(%esp), %ecx # 4-byte Reload + adcl 136(%esp), %ecx + movl %ecx, 76(%esp) # 4-byte Spill + movl 68(%esp), %ecx # 4-byte Reload + adcl 140(%esp), %ecx + movl %ecx, 68(%esp) # 4-byte Spill + movl 60(%esp), %ecx # 4-byte Reload + adcl 144(%esp), %ecx + movl %ecx, 60(%esp) # 4-byte Spill + movl 52(%esp), %ecx # 4-byte Reload + adcl 148(%esp), %ecx + movl %ecx, 52(%esp) # 4-byte Spill + movl 56(%esp), %ecx # 4-byte Reload + adcl 152(%esp), %ecx + movl %ecx, 56(%esp) # 4-byte Spill + movl 64(%esp), %ecx # 4-byte Reload + adcl 156(%esp), %ecx + movl %ecx, 64(%esp) # 4-byte Spill + movl 80(%esp), %ecx # 4-byte Reload + adcl 160(%esp), %ecx + movl %ecx, 80(%esp) # 4-byte Spill + movl 88(%esp), %ecx # 4-byte Reload + adcl 164(%esp), %ecx + movl %ecx, 88(%esp) # 4-byte Spill + movl 96(%esp), %ecx # 4-byte Reload + adcl 168(%esp), %ecx + movl %ecx, 96(%esp) # 4-byte Spill + adcl $0, %ebp + movl 1932(%esp), %ecx + subl (%ecx), %eax + sbbl 4(%ecx), %edx + sbbl 8(%ecx), %esi + sbbl 12(%ecx), %edi + movl %edi, 16(%esp) # 4-byte Spill + sbbl 16(%ecx), %ebx + movl %ebx, 20(%esp) # 4-byte Spill + movl 76(%esp), %edi # 4-byte Reload + sbbl 20(%ecx), %edi + movl %edi, 24(%esp) # 4-byte Spill + movl 68(%esp), %edi # 4-byte Reload + sbbl 24(%ecx), %edi + movl %edi, 28(%esp) # 4-byte Spill + movl 60(%esp), %ebx # 4-byte Reload + sbbl 28(%ecx), %ebx + movl 52(%esp), %edi # 4-byte Reload + sbbl 32(%ecx), %edi + movl %edi, 32(%esp) # 4-byte Spill + movl 56(%esp), %edi # 4-byte Reload + sbbl 36(%ecx), %edi + movl %edi, 36(%esp) # 4-byte Spill + movl 64(%esp), %edi # 4-byte Reload + sbbl 40(%ecx), %edi + movl %edi, 40(%esp) # 4-byte Spill + movl 80(%esp), %edi # 4-byte Reload + sbbl 44(%ecx), %edi + movl %edi, 44(%esp) # 4-byte Spill + movl 88(%esp), %edi # 4-byte Reload + sbbl 48(%ecx), %edi + movl %edi, 48(%esp) # 4-byte Spill + movl 96(%esp), %edi # 4-byte Reload + sbbl 52(%ecx), %edi + movl %ebp, %ecx + movl %edi, 104(%esp) # 4-byte Spill + sbbl $0, %ecx + andl $1, %ecx + jne .LBB210_2 +# BB#1: + movl %ebx, 60(%esp) # 4-byte Spill +.LBB210_2: + testb %cl, %cl + movl 108(%esp), %ebx # 4-byte Reload + jne .LBB210_4 +# BB#3: + movl %eax, %ebx +.LBB210_4: + movl 1920(%esp), %eax + movl %ebx, (%eax) + movl 92(%esp), %edi # 4-byte Reload + movl 72(%esp), %ecx # 4-byte Reload + jne .LBB210_6 +# BB#5: + movl %edx, %edi +.LBB210_6: + movl %edi, 4(%eax) + jne .LBB210_8 +# BB#7: + movl %esi, 100(%esp) # 4-byte Spill +.LBB210_8: + movl 100(%esp), %edx # 4-byte Reload + movl %edx, 8(%eax) + jne .LBB210_10 +# BB#9: + movl 16(%esp), %edx # 4-byte Reload + movl %edx, 84(%esp) # 4-byte Spill +.LBB210_10: + movl 84(%esp), %edx # 4-byte Reload + movl %edx, 12(%eax) + jne .LBB210_12 +# BB#11: + movl 20(%esp), %ecx # 4-byte Reload +.LBB210_12: + movl %ecx, 16(%eax) + movl 76(%esp), %ecx # 4-byte Reload + jne .LBB210_14 +# BB#13: + movl 24(%esp), %ecx # 4-byte Reload +.LBB210_14: + movl %ecx, 20(%eax) + movl 68(%esp), %ecx # 4-byte Reload + jne .LBB210_16 +# BB#15: + movl 28(%esp), %ecx # 4-byte Reload +.LBB210_16: + movl %ecx, 24(%eax) + movl 60(%esp), %ecx # 4-byte Reload + movl %ecx, 28(%eax) + movl 52(%esp), %ecx # 4-byte Reload + jne .LBB210_18 +# BB#17: + movl 32(%esp), %ecx # 4-byte Reload +.LBB210_18: + movl %ecx, 32(%eax) + movl 56(%esp), %ecx # 4-byte Reload + jne .LBB210_20 +# BB#19: + movl 36(%esp), %ecx # 4-byte Reload +.LBB210_20: + movl %ecx, 36(%eax) + movl 64(%esp), %ecx # 4-byte Reload + jne .LBB210_22 +# BB#21: + movl 40(%esp), %ecx # 4-byte Reload +.LBB210_22: + movl %ecx, 40(%eax) + movl 80(%esp), %ecx # 4-byte Reload + jne .LBB210_24 +# BB#23: + movl 44(%esp), %ecx # 4-byte Reload +.LBB210_24: + movl %ecx, 44(%eax) + movl 88(%esp), %ecx # 4-byte Reload + jne .LBB210_26 +# BB#25: + movl 48(%esp), %ecx # 4-byte Reload +.LBB210_26: + movl %ecx, 48(%eax) + movl 96(%esp), %ecx # 4-byte Reload + jne .LBB210_28 +# BB#27: + movl 104(%esp), %ecx # 4-byte Reload +.LBB210_28: + movl %ecx, 52(%eax) + addl $1900, %esp # imm = 0x76C + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end210: + .size mcl_fp_mont14Lbmi2, .Lfunc_end210-mcl_fp_mont14Lbmi2 + + .globl mcl_fp_montNF14Lbmi2 + .align 16, 0x90 + .type mcl_fp_montNF14Lbmi2,@function +mcl_fp_montNF14Lbmi2: # @mcl_fp_montNF14Lbmi2 +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $1884, %esp # imm = 0x75C + calll .L211$pb +.L211$pb: + popl %ebx +.Ltmp42: + addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp42-.L211$pb), %ebx + movl 1916(%esp), %eax + movl -4(%eax), %esi + movl %esi, 36(%esp) # 4-byte Spill + movl 1912(%esp), %eax + movl (%eax), %eax + movl %eax, (%esp) + leal 1824(%esp), %ecx + movl 1908(%esp), %edx + calll .LmulPv448x32 + movl 1824(%esp), %edi + movl 1828(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl %edi, %eax + imull %esi, %eax + movl 1880(%esp), %ecx + movl %ecx, 92(%esp) # 4-byte Spill + movl 1876(%esp), %ecx + movl %ecx, 80(%esp) # 4-byte Spill + movl 1872(%esp), %ecx + movl %ecx, 76(%esp) # 4-byte Spill + movl 1868(%esp), %ecx + movl %ecx, 84(%esp) # 4-byte Spill + movl 1864(%esp), %ecx + movl %ecx, 52(%esp) # 4-byte Spill + movl 1860(%esp), %ecx + movl %ecx, 56(%esp) # 4-byte Spill + movl 1856(%esp), %ecx + movl %ecx, 48(%esp) # 4-byte Spill + movl 1852(%esp), %ecx + movl %ecx, 44(%esp) # 4-byte Spill + movl 1848(%esp), %ecx + movl %ecx, 40(%esp) # 4-byte Spill + movl 1844(%esp), %ecx + movl %ecx, 60(%esp) # 4-byte Spill + movl 1840(%esp), %esi + movl 1836(%esp), %ebp + movl 1832(%esp), %ecx + movl %ecx, 88(%esp) # 4-byte Spill + movl %eax, (%esp) + leal 1760(%esp), %ecx + movl 1916(%esp), %edx + calll .LmulPv448x32 + addl 1760(%esp), %edi + movl 68(%esp), %eax # 4-byte Reload + adcl 1764(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 1768(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + adcl 1772(%esp), %ebp + movl %ebp, 72(%esp) # 4-byte Spill + adcl 1776(%esp), %esi + movl 60(%esp), %eax # 4-byte Reload + adcl 1780(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 40(%esp), %eax # 4-byte Reload + adcl 1784(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 44(%esp), %eax # 4-byte Reload + adcl 1788(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 1792(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 1796(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 1800(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 1804(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 76(%esp), %edi # 4-byte Reload + adcl 1808(%esp), %edi + movl 80(%esp), %eax # 4-byte Reload + adcl 1812(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 92(%esp), %ebp # 4-byte Reload + adcl 1816(%esp), %ebp + movl 1912(%esp), %eax + movl 4(%eax), %eax + movl %eax, (%esp) + leal 1696(%esp), %ecx + movl 1908(%esp), %edx + calll .LmulPv448x32 + movl 1752(%esp), %eax + movl 68(%esp), %edx # 4-byte Reload + addl 1696(%esp), %edx + movl 88(%esp), %ecx # 4-byte Reload + adcl 1700(%esp), %ecx + movl %ecx, 88(%esp) # 4-byte Spill + movl 72(%esp), %ecx # 4-byte Reload + adcl 1704(%esp), %ecx + movl %ecx, 72(%esp) # 4-byte Spill + adcl 1708(%esp), %esi + movl %esi, 68(%esp) # 4-byte Spill + movl 60(%esp), %ecx # 4-byte Reload + adcl 1712(%esp), %ecx + movl %ecx, 60(%esp) # 4-byte Spill + movl 40(%esp), %ecx # 4-byte Reload + adcl 1716(%esp), %ecx + movl %ecx, 40(%esp) # 4-byte Spill + movl 44(%esp), %ecx # 4-byte Reload + adcl 1720(%esp), %ecx + movl %ecx, 44(%esp) # 4-byte Spill + movl 48(%esp), %ecx # 4-byte Reload + adcl 1724(%esp), %ecx + movl %ecx, 48(%esp) # 4-byte Spill + movl 56(%esp), %ecx # 4-byte Reload + adcl 1728(%esp), %ecx + movl %ecx, 56(%esp) # 4-byte Spill + movl 52(%esp), %ecx # 4-byte Reload + adcl 1732(%esp), %ecx + movl %ecx, 52(%esp) # 4-byte Spill + movl 84(%esp), %ecx # 4-byte Reload + adcl 1736(%esp), %ecx + movl %ecx, 84(%esp) # 4-byte Spill + adcl 1740(%esp), %edi + movl %edi, 76(%esp) # 4-byte Spill + movl 80(%esp), %edi # 4-byte Reload + adcl 1744(%esp), %edi + adcl 1748(%esp), %ebp + movl %ebp, 92(%esp) # 4-byte Spill + adcl $0, %eax + movl %eax, %ebp + movl %edx, %eax + movl %edx, %esi + imull 36(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 1632(%esp), %ecx + movl 1916(%esp), %eax + movl %eax, %edx + calll .LmulPv448x32 + addl 1632(%esp), %esi + movl 88(%esp), %eax # 4-byte Reload + adcl 1636(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 1640(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 1644(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 1648(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 40(%esp), %eax # 4-byte Reload + adcl 1652(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 44(%esp), %eax # 4-byte Reload + adcl 1656(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 1660(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 56(%esp), %esi # 4-byte Reload + adcl 1664(%esp), %esi + movl 52(%esp), %eax # 4-byte Reload + adcl 1668(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 1672(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 1676(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + adcl 1680(%esp), %edi + movl 92(%esp), %eax # 4-byte Reload + adcl 1684(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + adcl 1688(%esp), %ebp + movl 1912(%esp), %eax + movl 8(%eax), %eax + movl %eax, (%esp) + leal 1568(%esp), %ecx + movl 1908(%esp), %edx + calll .LmulPv448x32 + movl 1624(%esp), %eax + movl 88(%esp), %edx # 4-byte Reload + addl 1568(%esp), %edx + movl 72(%esp), %ecx # 4-byte Reload + adcl 1572(%esp), %ecx + movl %ecx, 72(%esp) # 4-byte Spill + movl 68(%esp), %ecx # 4-byte Reload + adcl 1576(%esp), %ecx + movl %ecx, 68(%esp) # 4-byte Spill + movl 60(%esp), %ecx # 4-byte Reload + adcl 1580(%esp), %ecx + movl %ecx, 60(%esp) # 4-byte Spill + movl 40(%esp), %ecx # 4-byte Reload + adcl 1584(%esp), %ecx + movl %ecx, 40(%esp) # 4-byte Spill + movl 44(%esp), %ecx # 4-byte Reload + adcl 1588(%esp), %ecx + movl %ecx, 44(%esp) # 4-byte Spill + movl 48(%esp), %ecx # 4-byte Reload + adcl 1592(%esp), %ecx + movl %ecx, 48(%esp) # 4-byte Spill + adcl 1596(%esp), %esi + movl %esi, 56(%esp) # 4-byte Spill + movl 52(%esp), %ecx # 4-byte Reload + adcl 1600(%esp), %ecx + movl %ecx, 52(%esp) # 4-byte Spill + movl 84(%esp), %ecx # 4-byte Reload + adcl 1604(%esp), %ecx + movl %ecx, 84(%esp) # 4-byte Spill + movl 76(%esp), %ecx # 4-byte Reload + adcl 1608(%esp), %ecx + movl %ecx, 76(%esp) # 4-byte Spill + adcl 1612(%esp), %edi + movl 92(%esp), %ecx # 4-byte Reload + adcl 1616(%esp), %ecx + movl %ecx, 92(%esp) # 4-byte Spill + adcl 1620(%esp), %ebp + movl %ebp, 88(%esp) # 4-byte Spill + adcl $0, %eax + movl %eax, %ebp + movl %edx, %esi + movl %esi, %eax + imull 36(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 1504(%esp), %ecx + movl 1916(%esp), %edx + calll .LmulPv448x32 + addl 1504(%esp), %esi + movl 72(%esp), %eax # 4-byte Reload + adcl 1508(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 1512(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 1516(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 40(%esp), %eax # 4-byte Reload + adcl 1520(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 44(%esp), %eax # 4-byte Reload + adcl 1524(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 1528(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 1532(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 1536(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 1540(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 76(%esp), %esi # 4-byte Reload + adcl 1544(%esp), %esi + adcl 1548(%esp), %edi + movl %edi, 80(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 1552(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 1556(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + adcl 1560(%esp), %ebp + movl 1912(%esp), %eax + movl 12(%eax), %eax + movl %eax, (%esp) + leal 1440(%esp), %ecx + movl 1908(%esp), %eax + movl %eax, %edx + calll .LmulPv448x32 + movl 1496(%esp), %eax + movl 72(%esp), %edx # 4-byte Reload + addl 1440(%esp), %edx + movl 68(%esp), %ecx # 4-byte Reload + adcl 1444(%esp), %ecx + movl %ecx, 68(%esp) # 4-byte Spill + movl 60(%esp), %ecx # 4-byte Reload + adcl 1448(%esp), %ecx + movl %ecx, 60(%esp) # 4-byte Spill + movl 40(%esp), %ecx # 4-byte Reload + adcl 1452(%esp), %ecx + movl %ecx, 40(%esp) # 4-byte Spill + movl 44(%esp), %ecx # 4-byte Reload + adcl 1456(%esp), %ecx + movl %ecx, 44(%esp) # 4-byte Spill + movl 48(%esp), %ecx # 4-byte Reload + adcl 1460(%esp), %ecx + movl %ecx, 48(%esp) # 4-byte Spill + movl 56(%esp), %edi # 4-byte Reload + adcl 1464(%esp), %edi + movl 52(%esp), %ecx # 4-byte Reload + adcl 1468(%esp), %ecx + movl %ecx, 52(%esp) # 4-byte Spill + movl 84(%esp), %ecx # 4-byte Reload + adcl 1472(%esp), %ecx + movl %ecx, 84(%esp) # 4-byte Spill + adcl 1476(%esp), %esi + movl %esi, 76(%esp) # 4-byte Spill + movl 80(%esp), %ecx # 4-byte Reload + adcl 1480(%esp), %ecx + movl %ecx, 80(%esp) # 4-byte Spill + movl 92(%esp), %ecx # 4-byte Reload + adcl 1484(%esp), %ecx + movl %ecx, 92(%esp) # 4-byte Spill + movl 88(%esp), %esi # 4-byte Reload + adcl 1488(%esp), %esi + adcl 1492(%esp), %ebp + movl %ebp, 64(%esp) # 4-byte Spill + adcl $0, %eax + movl %eax, 72(%esp) # 4-byte Spill + movl %edx, %eax + movl %edx, %ebp + imull 36(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 1376(%esp), %ecx + movl 1916(%esp), %edx + calll .LmulPv448x32 + addl 1376(%esp), %ebp + movl 68(%esp), %eax # 4-byte Reload + adcl 1380(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 1384(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 40(%esp), %eax # 4-byte Reload + adcl 1388(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 44(%esp), %eax # 4-byte Reload + adcl 1392(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 1396(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + adcl 1400(%esp), %edi + movl %edi, 56(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 1404(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 1408(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 1412(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 1416(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 1420(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + adcl 1424(%esp), %esi + movl %esi, 88(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 1428(%esp), %eax + movl %eax, %ebp + movl 72(%esp), %eax # 4-byte Reload + adcl 1432(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 1912(%esp), %eax + movl 16(%eax), %eax + movl %eax, (%esp) + leal 1312(%esp), %ecx + movl 1908(%esp), %edx + calll .LmulPv448x32 + movl 1368(%esp), %eax + movl 68(%esp), %edx # 4-byte Reload + addl 1312(%esp), %edx + movl 60(%esp), %ecx # 4-byte Reload + adcl 1316(%esp), %ecx + movl %ecx, 60(%esp) # 4-byte Spill + movl 40(%esp), %ecx # 4-byte Reload + adcl 1320(%esp), %ecx + movl %ecx, 40(%esp) # 4-byte Spill + movl 44(%esp), %ecx # 4-byte Reload + adcl 1324(%esp), %ecx + movl %ecx, 44(%esp) # 4-byte Spill + movl 48(%esp), %edi # 4-byte Reload + adcl 1328(%esp), %edi + movl 56(%esp), %ecx # 4-byte Reload + adcl 1332(%esp), %ecx + movl %ecx, 56(%esp) # 4-byte Spill + movl 52(%esp), %ecx # 4-byte Reload + adcl 1336(%esp), %ecx + movl %ecx, 52(%esp) # 4-byte Spill + movl 84(%esp), %esi # 4-byte Reload + adcl 1340(%esp), %esi + movl 76(%esp), %ecx # 4-byte Reload + adcl 1344(%esp), %ecx + movl %ecx, 76(%esp) # 4-byte Spill + movl 80(%esp), %ecx # 4-byte Reload + adcl 1348(%esp), %ecx + movl %ecx, 80(%esp) # 4-byte Spill + movl 92(%esp), %ecx # 4-byte Reload + adcl 1352(%esp), %ecx + movl %ecx, 92(%esp) # 4-byte Spill + movl 88(%esp), %ecx # 4-byte Reload + adcl 1356(%esp), %ecx + movl %ecx, 88(%esp) # 4-byte Spill + adcl 1360(%esp), %ebp + movl %ebp, 64(%esp) # 4-byte Spill + movl 72(%esp), %ecx # 4-byte Reload + adcl 1364(%esp), %ecx + movl %ecx, 72(%esp) # 4-byte Spill + adcl $0, %eax + movl %eax, 68(%esp) # 4-byte Spill + movl %edx, %ebp + movl %ebp, %eax + imull 36(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 1248(%esp), %ecx + movl 1916(%esp), %edx + calll .LmulPv448x32 + addl 1248(%esp), %ebp + movl 60(%esp), %eax # 4-byte Reload + adcl 1252(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 40(%esp), %eax # 4-byte Reload + adcl 1256(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 44(%esp), %eax # 4-byte Reload + adcl 1260(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + adcl 1264(%esp), %edi + movl %edi, 48(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 1268(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 52(%esp), %ebp # 4-byte Reload + adcl 1272(%esp), %ebp + adcl 1276(%esp), %esi + movl %esi, 84(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 1280(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 80(%esp), %edi # 4-byte Reload + adcl 1284(%esp), %edi + movl 92(%esp), %eax # 4-byte Reload + adcl 1288(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 1292(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 1296(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 72(%esp), %esi # 4-byte Reload + adcl 1300(%esp), %esi + movl 68(%esp), %eax # 4-byte Reload + adcl 1304(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 1912(%esp), %eax + movl 20(%eax), %eax + movl %eax, (%esp) + leal 1184(%esp), %ecx + movl 1908(%esp), %edx + calll .LmulPv448x32 + movl 1240(%esp), %edx + movl 60(%esp), %ecx # 4-byte Reload + addl 1184(%esp), %ecx + movl 40(%esp), %eax # 4-byte Reload + adcl 1188(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 44(%esp), %eax # 4-byte Reload + adcl 1192(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 1196(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 1200(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + adcl 1204(%esp), %ebp + movl %ebp, 52(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 1208(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 1212(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + adcl 1216(%esp), %edi + movl %edi, 80(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 1220(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 1224(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 1228(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + adcl 1232(%esp), %esi + movl %esi, 72(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 1236(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + adcl $0, %edx + movl %edx, 60(%esp) # 4-byte Spill + movl %ecx, %esi + movl %esi, %eax + imull 36(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 1120(%esp), %ecx + movl 1916(%esp), %edx + calll .LmulPv448x32 + addl 1120(%esp), %esi + movl 40(%esp), %ebp # 4-byte Reload + adcl 1124(%esp), %ebp + movl 44(%esp), %edi # 4-byte Reload + adcl 1128(%esp), %edi + movl 48(%esp), %eax # 4-byte Reload + adcl 1132(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 1136(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 1140(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 1144(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 1148(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 1152(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 92(%esp), %esi # 4-byte Reload + adcl 1156(%esp), %esi + movl 88(%esp), %eax # 4-byte Reload + adcl 1160(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 1164(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 1168(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 1172(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 1176(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 1912(%esp), %eax + movl 24(%eax), %eax + movl %eax, (%esp) + leal 1056(%esp), %ecx + movl 1908(%esp), %edx + calll .LmulPv448x32 + movl 1112(%esp), %eax + movl %ebp, %ecx + addl 1056(%esp), %ecx + adcl 1060(%esp), %edi + movl %edi, 44(%esp) # 4-byte Spill + movl 48(%esp), %edx # 4-byte Reload + adcl 1064(%esp), %edx + movl %edx, 48(%esp) # 4-byte Spill + movl 56(%esp), %edx # 4-byte Reload + adcl 1068(%esp), %edx + movl %edx, 56(%esp) # 4-byte Spill + movl 52(%esp), %edx # 4-byte Reload + adcl 1072(%esp), %edx + movl %edx, 52(%esp) # 4-byte Spill + movl 84(%esp), %edx # 4-byte Reload + adcl 1076(%esp), %edx + movl %edx, 84(%esp) # 4-byte Spill + movl 76(%esp), %edx # 4-byte Reload + adcl 1080(%esp), %edx + movl %edx, 76(%esp) # 4-byte Spill + movl 80(%esp), %edx # 4-byte Reload + adcl 1084(%esp), %edx + movl %edx, 80(%esp) # 4-byte Spill + adcl 1088(%esp), %esi + movl %esi, 92(%esp) # 4-byte Spill + movl 88(%esp), %edx # 4-byte Reload + adcl 1092(%esp), %edx + movl %edx, 88(%esp) # 4-byte Spill + movl 64(%esp), %edx # 4-byte Reload + adcl 1096(%esp), %edx + movl %edx, 64(%esp) # 4-byte Spill + movl 72(%esp), %edx # 4-byte Reload + adcl 1100(%esp), %edx + movl %edx, 72(%esp) # 4-byte Spill + movl 68(%esp), %ebp # 4-byte Reload + adcl 1104(%esp), %ebp + movl 60(%esp), %edx # 4-byte Reload + adcl 1108(%esp), %edx + movl %edx, 60(%esp) # 4-byte Spill + adcl $0, %eax + movl %eax, %esi + movl %ecx, %edi + movl %edi, %eax + imull 36(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 992(%esp), %ecx + movl 1916(%esp), %edx + calll .LmulPv448x32 + addl 992(%esp), %edi + movl 44(%esp), %eax # 4-byte Reload + adcl 996(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 1000(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 1004(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 52(%esp), %edi # 4-byte Reload + adcl 1008(%esp), %edi + movl 84(%esp), %eax # 4-byte Reload + adcl 1012(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 1016(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 1020(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 1024(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 1028(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 1032(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 1036(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + adcl 1040(%esp), %ebp + movl %ebp, 68(%esp) # 4-byte Spill + movl 60(%esp), %ebp # 4-byte Reload + adcl 1044(%esp), %ebp + adcl 1048(%esp), %esi + movl 1912(%esp), %eax + movl 28(%eax), %eax + movl %eax, (%esp) + leal 928(%esp), %ecx + movl 1908(%esp), %edx + calll .LmulPv448x32 + movl 984(%esp), %eax + movl 44(%esp), %ecx # 4-byte Reload + addl 928(%esp), %ecx + movl 48(%esp), %edx # 4-byte Reload + adcl 932(%esp), %edx + movl %edx, 48(%esp) # 4-byte Spill + movl 56(%esp), %edx # 4-byte Reload + adcl 936(%esp), %edx + movl %edx, 56(%esp) # 4-byte Spill + adcl 940(%esp), %edi + movl 84(%esp), %edx # 4-byte Reload + adcl 944(%esp), %edx + movl %edx, 84(%esp) # 4-byte Spill + movl 76(%esp), %edx # 4-byte Reload + adcl 948(%esp), %edx + movl %edx, 76(%esp) # 4-byte Spill + movl 80(%esp), %edx # 4-byte Reload + adcl 952(%esp), %edx + movl %edx, 80(%esp) # 4-byte Spill + movl 92(%esp), %edx # 4-byte Reload + adcl 956(%esp), %edx + movl %edx, 92(%esp) # 4-byte Spill + movl 88(%esp), %edx # 4-byte Reload + adcl 960(%esp), %edx + movl %edx, 88(%esp) # 4-byte Spill + movl 64(%esp), %edx # 4-byte Reload + adcl 964(%esp), %edx + movl %edx, 64(%esp) # 4-byte Spill + movl 72(%esp), %edx # 4-byte Reload + adcl 968(%esp), %edx + movl %edx, 72(%esp) # 4-byte Spill + movl 68(%esp), %edx # 4-byte Reload + adcl 972(%esp), %edx + movl %edx, 68(%esp) # 4-byte Spill + adcl 976(%esp), %ebp + movl %ebp, 60(%esp) # 4-byte Spill + adcl 980(%esp), %esi + movl %esi, %ebp + adcl $0, %eax + movl %eax, 40(%esp) # 4-byte Spill + movl %ecx, %eax + movl %ecx, %esi + imull 36(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 864(%esp), %ecx + movl 1916(%esp), %edx + calll .LmulPv448x32 + addl 864(%esp), %esi + movl 48(%esp), %eax # 4-byte Reload + adcl 868(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 872(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + adcl 876(%esp), %edi + movl 84(%esp), %eax # 4-byte Reload + adcl 880(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 76(%esp), %esi # 4-byte Reload + adcl 884(%esp), %esi + movl 80(%esp), %eax # 4-byte Reload + adcl 888(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 892(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 896(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 900(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 904(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 908(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 912(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + adcl 916(%esp), %ebp + movl %ebp, 44(%esp) # 4-byte Spill + movl 40(%esp), %ebp # 4-byte Reload + adcl 920(%esp), %ebp + movl 1912(%esp), %eax + movl 32(%eax), %eax + movl %eax, (%esp) + leal 800(%esp), %ecx + movl 1908(%esp), %edx + calll .LmulPv448x32 + movl 856(%esp), %edx + movl 48(%esp), %ecx # 4-byte Reload + addl 800(%esp), %ecx + movl 56(%esp), %eax # 4-byte Reload + adcl 804(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + adcl 808(%esp), %edi + movl %edi, 52(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 812(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + adcl 816(%esp), %esi + movl %esi, 76(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 820(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 824(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 88(%esp), %esi # 4-byte Reload + adcl 828(%esp), %esi + movl 64(%esp), %eax # 4-byte Reload + adcl 832(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 836(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 840(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 844(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 44(%esp), %eax # 4-byte Reload + adcl 848(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + adcl 852(%esp), %ebp + movl %ebp, 40(%esp) # 4-byte Spill + adcl $0, %edx + movl %edx, 48(%esp) # 4-byte Spill + movl %ecx, %eax + movl %ecx, %edi + imull 36(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 736(%esp), %ecx + movl 1916(%esp), %edx + calll .LmulPv448x32 + addl 736(%esp), %edi + movl 56(%esp), %eax # 4-byte Reload + adcl 740(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 744(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 748(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 752(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 756(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 760(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + adcl 764(%esp), %esi + movl %esi, 88(%esp) # 4-byte Spill + movl 64(%esp), %ebp # 4-byte Reload + adcl 768(%esp), %ebp + movl 72(%esp), %esi # 4-byte Reload + adcl 772(%esp), %esi + movl 68(%esp), %eax # 4-byte Reload + adcl 776(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 60(%esp), %edi # 4-byte Reload + adcl 780(%esp), %edi + movl 44(%esp), %eax # 4-byte Reload + adcl 784(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 40(%esp), %eax # 4-byte Reload + adcl 788(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 792(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 1912(%esp), %eax + movl 36(%eax), %eax + movl %eax, (%esp) + leal 672(%esp), %ecx + movl 1908(%esp), %edx + calll .LmulPv448x32 + movl 728(%esp), %edx + movl 56(%esp), %ecx # 4-byte Reload + addl 672(%esp), %ecx + movl 52(%esp), %eax # 4-byte Reload + adcl 676(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 680(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 684(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 688(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 692(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 696(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + adcl 700(%esp), %ebp + movl %ebp, 64(%esp) # 4-byte Spill + adcl 704(%esp), %esi + movl %esi, 72(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 708(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + adcl 712(%esp), %edi + movl %edi, 60(%esp) # 4-byte Spill + movl 44(%esp), %eax # 4-byte Reload + adcl 716(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 40(%esp), %eax # 4-byte Reload + adcl 720(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 724(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + adcl $0, %edx + movl %edx, 56(%esp) # 4-byte Spill + movl %ecx, %eax + movl %ecx, %esi + imull 36(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 608(%esp), %ecx + movl 1916(%esp), %edx + calll .LmulPv448x32 + addl 608(%esp), %esi + movl 52(%esp), %eax # 4-byte Reload + adcl 612(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 84(%esp), %ebp # 4-byte Reload + adcl 616(%esp), %ebp + movl 76(%esp), %eax # 4-byte Reload + adcl 620(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 80(%esp), %edi # 4-byte Reload + adcl 624(%esp), %edi + movl 92(%esp), %eax # 4-byte Reload + adcl 628(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 632(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 636(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 640(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 68(%esp), %esi # 4-byte Reload + adcl 644(%esp), %esi + movl 60(%esp), %eax # 4-byte Reload + adcl 648(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 44(%esp), %eax # 4-byte Reload + adcl 652(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 40(%esp), %eax # 4-byte Reload + adcl 656(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 660(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 664(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 1912(%esp), %eax + movl 40(%eax), %eax + movl %eax, (%esp) + leal 544(%esp), %ecx + movl 1908(%esp), %edx + calll .LmulPv448x32 + movl 600(%esp), %edx + movl 52(%esp), %ecx # 4-byte Reload + addl 544(%esp), %ecx + adcl 548(%esp), %ebp + movl %ebp, 84(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 552(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + adcl 556(%esp), %edi + movl %edi, 80(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 560(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 564(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 64(%esp), %ebp # 4-byte Reload + adcl 568(%esp), %ebp + movl 72(%esp), %eax # 4-byte Reload + adcl 572(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + adcl 576(%esp), %esi + movl %esi, 68(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 580(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 44(%esp), %eax # 4-byte Reload + adcl 584(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 40(%esp), %eax # 4-byte Reload + adcl 588(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 592(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 596(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + adcl $0, %edx + movl %edx, 52(%esp) # 4-byte Spill + movl %ecx, %eax + movl %ecx, %esi + imull 36(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 480(%esp), %ecx + movl 1916(%esp), %edx + calll .LmulPv448x32 + addl 480(%esp), %esi + movl 84(%esp), %eax # 4-byte Reload + adcl 484(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 76(%esp), %edi # 4-byte Reload + adcl 488(%esp), %edi + movl 80(%esp), %eax # 4-byte Reload + adcl 492(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 92(%esp), %esi # 4-byte Reload + adcl 496(%esp), %esi + movl 88(%esp), %eax # 4-byte Reload + adcl 500(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + adcl 504(%esp), %ebp + movl %ebp, 64(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 508(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 512(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 516(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 44(%esp), %eax # 4-byte Reload + adcl 520(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 40(%esp), %eax # 4-byte Reload + adcl 524(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 528(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 56(%esp), %ebp # 4-byte Reload + adcl 532(%esp), %ebp + movl 52(%esp), %eax # 4-byte Reload + adcl 536(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 1912(%esp), %eax + movl 44(%eax), %eax + movl %eax, (%esp) + leal 416(%esp), %ecx + movl 1908(%esp), %edx + calll .LmulPv448x32 + movl 472(%esp), %edx + movl 84(%esp), %ecx # 4-byte Reload + addl 416(%esp), %ecx + adcl 420(%esp), %edi + movl %edi, 76(%esp) # 4-byte Spill + movl 80(%esp), %edi # 4-byte Reload + adcl 424(%esp), %edi + adcl 428(%esp), %esi + movl %esi, 92(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 432(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 436(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 440(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 444(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 448(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 44(%esp), %eax # 4-byte Reload + adcl 452(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 40(%esp), %eax # 4-byte Reload + adcl 456(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 460(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + adcl 464(%esp), %ebp + movl %ebp, 56(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 468(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + adcl $0, %edx + movl %edx, 84(%esp) # 4-byte Spill + movl %ecx, %eax + movl %ecx, %esi + imull 36(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 352(%esp), %ecx + movl 1916(%esp), %edx + calll .LmulPv448x32 + addl 352(%esp), %esi + movl 76(%esp), %eax # 4-byte Reload + adcl 356(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + adcl 360(%esp), %edi + movl %edi, 80(%esp) # 4-byte Spill + movl 92(%esp), %esi # 4-byte Reload + adcl 364(%esp), %esi + movl 88(%esp), %eax # 4-byte Reload + adcl 368(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 372(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 376(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 380(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 384(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 44(%esp), %edi # 4-byte Reload + adcl 388(%esp), %edi + movl 40(%esp), %ebp # 4-byte Reload + adcl 392(%esp), %ebp + movl 48(%esp), %eax # 4-byte Reload + adcl 396(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 400(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 404(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 408(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 1912(%esp), %eax + movl 48(%eax), %eax + movl %eax, (%esp) + leal 288(%esp), %ecx + movl 1908(%esp), %edx + calll .LmulPv448x32 + movl 344(%esp), %edx + movl 76(%esp), %ecx # 4-byte Reload + addl 288(%esp), %ecx + movl 80(%esp), %eax # 4-byte Reload + adcl 292(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + adcl 296(%esp), %esi + movl %esi, 92(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 300(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 304(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 308(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 312(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 316(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + adcl 320(%esp), %edi + adcl 324(%esp), %ebp + movl %ebp, 40(%esp) # 4-byte Spill + movl 48(%esp), %ebp # 4-byte Reload + adcl 328(%esp), %ebp + movl 56(%esp), %eax # 4-byte Reload + adcl 332(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 336(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 340(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + adcl $0, %edx + movl %edx, 76(%esp) # 4-byte Spill + movl %ecx, %eax + movl %ecx, %esi + imull 36(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 224(%esp), %ecx + movl 1916(%esp), %edx + calll .LmulPv448x32 + addl 224(%esp), %esi + movl 80(%esp), %eax # 4-byte Reload + adcl 228(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 92(%esp), %esi # 4-byte Reload + adcl 232(%esp), %esi + movl 88(%esp), %eax # 4-byte Reload + adcl 236(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 240(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 244(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 248(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 252(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + adcl 256(%esp), %edi + movl %edi, 44(%esp) # 4-byte Spill + movl 40(%esp), %edi # 4-byte Reload + adcl 260(%esp), %edi + adcl 264(%esp), %ebp + movl %ebp, 48(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 268(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 272(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 276(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 280(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 1912(%esp), %eax + movl 52(%eax), %eax + movl %eax, (%esp) + leal 160(%esp), %ecx + movl 1908(%esp), %edx + calll .LmulPv448x32 + movl 216(%esp), %edx + movl 80(%esp), %ecx # 4-byte Reload + addl 160(%esp), %ecx + adcl 164(%esp), %esi + movl %esi, 92(%esp) # 4-byte Spill + movl 88(%esp), %ebp # 4-byte Reload + adcl 168(%esp), %ebp + movl 64(%esp), %eax # 4-byte Reload + adcl 172(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 176(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 180(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 184(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 44(%esp), %eax # 4-byte Reload + adcl 188(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + adcl 192(%esp), %edi + movl %edi, 40(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 196(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 200(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 204(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 208(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 212(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + adcl $0, %edx + movl %edx, 80(%esp) # 4-byte Spill + movl 36(%esp), %eax # 4-byte Reload + imull %ecx, %eax + movl %ecx, %esi + movl %eax, (%esp) + leal 96(%esp), %ecx + movl 1916(%esp), %edx + calll .LmulPv448x32 + addl 96(%esp), %esi + movl 64(%esp), %esi # 4-byte Reload + movl 92(%esp), %eax # 4-byte Reload + adcl 100(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 72(%esp), %edi # 4-byte Reload + adcl 104(%esp), %ebp + movl %ebp, 88(%esp) # 4-byte Spill + movl %ebp, %ebx + adcl 108(%esp), %esi + adcl 112(%esp), %edi + movl 68(%esp), %edx # 4-byte Reload + adcl 116(%esp), %edx + movl %edx, 68(%esp) # 4-byte Spill + movl 60(%esp), %edx # 4-byte Reload + adcl 120(%esp), %edx + movl %edx, 60(%esp) # 4-byte Spill + movl 44(%esp), %edx # 4-byte Reload + adcl 124(%esp), %edx + movl %edx, 44(%esp) # 4-byte Spill + movl 40(%esp), %edx # 4-byte Reload + adcl 128(%esp), %edx + movl %edx, 40(%esp) # 4-byte Spill + movl 48(%esp), %edx # 4-byte Reload + adcl 132(%esp), %edx + movl %edx, 48(%esp) # 4-byte Spill + movl 56(%esp), %edx # 4-byte Reload + adcl 136(%esp), %edx + movl %edx, 56(%esp) # 4-byte Spill + movl 52(%esp), %edx # 4-byte Reload + adcl 140(%esp), %edx + movl %edx, 52(%esp) # 4-byte Spill + movl 84(%esp), %edx # 4-byte Reload + adcl 144(%esp), %edx + movl %edx, 84(%esp) # 4-byte Spill + movl 76(%esp), %edx # 4-byte Reload + adcl 148(%esp), %edx + movl %edx, 76(%esp) # 4-byte Spill + movl 80(%esp), %edx # 4-byte Reload + adcl 152(%esp), %edx + movl %edx, 80(%esp) # 4-byte Spill + movl %eax, %edx + movl 1916(%esp), %ebp + subl (%ebp), %edx + sbbl 4(%ebp), %ebx + movl %esi, %eax + sbbl 8(%ebp), %eax + movl %edi, %ecx + sbbl 12(%ebp), %ecx + movl %ecx, 4(%esp) # 4-byte Spill + movl 68(%esp), %ecx # 4-byte Reload + sbbl 16(%ebp), %ecx + movl %ecx, 8(%esp) # 4-byte Spill + movl 60(%esp), %ecx # 4-byte Reload + sbbl 20(%ebp), %ecx + movl %ecx, 12(%esp) # 4-byte Spill + movl 44(%esp), %ecx # 4-byte Reload + sbbl 24(%ebp), %ecx + movl %ecx, 16(%esp) # 4-byte Spill + movl 40(%esp), %ecx # 4-byte Reload + sbbl 28(%ebp), %ecx + movl %ecx, 20(%esp) # 4-byte Spill + movl 48(%esp), %ecx # 4-byte Reload + sbbl 32(%ebp), %ecx + movl %ecx, 24(%esp) # 4-byte Spill + movl 56(%esp), %ecx # 4-byte Reload + sbbl 36(%ebp), %ecx + movl %ecx, 28(%esp) # 4-byte Spill + movl 52(%esp), %ecx # 4-byte Reload + sbbl 40(%ebp), %ecx + movl %ecx, 32(%esp) # 4-byte Spill + movl 84(%esp), %ecx # 4-byte Reload + sbbl 44(%ebp), %ecx + movl %ecx, 36(%esp) # 4-byte Spill + movl 76(%esp), %ecx # 4-byte Reload + sbbl 48(%ebp), %ecx + movl %ecx, 64(%esp) # 4-byte Spill + movl 80(%esp), %ecx # 4-byte Reload + sbbl 52(%ebp), %ecx + movl %ecx, 72(%esp) # 4-byte Spill + sarl $31, %ecx + testl %ecx, %ecx + movl 92(%esp), %ebp # 4-byte Reload + js .LBB211_2 +# BB#1: + movl %edx, %ebp +.LBB211_2: + movl 1904(%esp), %edx + movl %ebp, (%edx) + movl 88(%esp), %ebp # 4-byte Reload + js .LBB211_4 +# BB#3: + movl %ebx, %ebp +.LBB211_4: + movl %ebp, 4(%edx) + js .LBB211_6 +# BB#5: + movl %eax, %esi +.LBB211_6: + movl %esi, 8(%edx) + js .LBB211_8 +# BB#7: + movl 4(%esp), %edi # 4-byte Reload +.LBB211_8: + movl %edi, 12(%edx) + movl 68(%esp), %eax # 4-byte Reload + js .LBB211_10 +# BB#9: + movl 8(%esp), %eax # 4-byte Reload +.LBB211_10: + movl %eax, 16(%edx) + movl 60(%esp), %eax # 4-byte Reload + js .LBB211_12 +# BB#11: + movl 12(%esp), %eax # 4-byte Reload +.LBB211_12: + movl %eax, 20(%edx) + movl 44(%esp), %eax # 4-byte Reload + js .LBB211_14 +# BB#13: + movl 16(%esp), %eax # 4-byte Reload +.LBB211_14: + movl %eax, 24(%edx) + movl 40(%esp), %eax # 4-byte Reload + js .LBB211_16 +# BB#15: + movl 20(%esp), %eax # 4-byte Reload +.LBB211_16: + movl %eax, 28(%edx) + movl 48(%esp), %eax # 4-byte Reload + js .LBB211_18 +# BB#17: + movl 24(%esp), %eax # 4-byte Reload +.LBB211_18: + movl %eax, 32(%edx) + movl 56(%esp), %eax # 4-byte Reload + js .LBB211_20 +# BB#19: + movl 28(%esp), %eax # 4-byte Reload +.LBB211_20: + movl %eax, 36(%edx) + movl 52(%esp), %eax # 4-byte Reload + js .LBB211_22 +# BB#21: + movl 32(%esp), %eax # 4-byte Reload +.LBB211_22: + movl %eax, 40(%edx) + movl 84(%esp), %eax # 4-byte Reload + js .LBB211_24 +# BB#23: + movl 36(%esp), %eax # 4-byte Reload +.LBB211_24: + movl %eax, 44(%edx) + movl 76(%esp), %eax # 4-byte Reload + js .LBB211_26 +# BB#25: + movl 64(%esp), %eax # 4-byte Reload +.LBB211_26: + movl %eax, 48(%edx) + movl 80(%esp), %eax # 4-byte Reload + js .LBB211_28 +# BB#27: + movl 72(%esp), %eax # 4-byte Reload +.LBB211_28: + movl %eax, 52(%edx) + addl $1884, %esp # imm = 0x75C + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end211: + .size mcl_fp_montNF14Lbmi2, .Lfunc_end211-mcl_fp_montNF14Lbmi2 + + .globl mcl_fp_montRed14Lbmi2 + .align 16, 0x90 + .type mcl_fp_montRed14Lbmi2,@function +mcl_fp_montRed14Lbmi2: # @mcl_fp_montRed14Lbmi2 +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $1068, %esp # imm = 0x42C + calll .L212$pb +.L212$pb: + popl %eax +.Ltmp43: + addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp43-.L212$pb), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 1096(%esp), %edx + movl -4(%edx), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 1092(%esp), %ecx + movl (%ecx), %ebx + movl %ebx, 92(%esp) # 4-byte Spill + movl 4(%ecx), %edi + movl %edi, 96(%esp) # 4-byte Spill + imull %eax, %ebx + movl 108(%ecx), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 104(%ecx), %esi + movl %esi, 108(%esp) # 4-byte Spill + movl 100(%ecx), %esi + movl %esi, 120(%esp) # 4-byte Spill + movl 96(%ecx), %eax + movl %eax, 128(%esp) # 4-byte Spill + movl 92(%ecx), %esi + movl %esi, 140(%esp) # 4-byte Spill + movl 88(%ecx), %eax + movl %eax, 124(%esp) # 4-byte Spill + movl 84(%ecx), %eax + movl %eax, 152(%esp) # 4-byte Spill + movl 80(%ecx), %eax + movl %eax, 156(%esp) # 4-byte Spill + movl 76(%ecx), %eax + movl %eax, 148(%esp) # 4-byte Spill + movl 72(%ecx), %esi + movl %esi, 136(%esp) # 4-byte Spill + movl 68(%ecx), %esi + movl %esi, 168(%esp) # 4-byte Spill + movl 64(%ecx), %esi + movl %esi, 164(%esp) # 4-byte Spill + movl 60(%ecx), %eax + movl %eax, 160(%esp) # 4-byte Spill + movl 56(%ecx), %eax + movl %eax, 172(%esp) # 4-byte Spill + movl 52(%ecx), %edi + movl %edi, 144(%esp) # 4-byte Spill + movl 48(%ecx), %eax + movl %eax, 132(%esp) # 4-byte Spill + movl 44(%ecx), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 40(%ecx), %edi + movl %edi, 116(%esp) # 4-byte Spill + movl 36(%ecx), %ebp + movl 32(%ecx), %edi + movl 28(%ecx), %esi + movl 24(%ecx), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 20(%ecx), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 16(%ecx), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 12(%ecx), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 8(%ecx), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl (%edx), %ecx + movl %ecx, 16(%esp) # 4-byte Spill + movl 52(%edx), %ecx + movl %ecx, 60(%esp) # 4-byte Spill + movl 48(%edx), %ecx + movl %ecx, 56(%esp) # 4-byte Spill + movl 44(%edx), %ecx + movl %ecx, 52(%esp) # 4-byte Spill + movl 40(%edx), %ecx + movl %ecx, 48(%esp) # 4-byte Spill + movl 36(%edx), %ecx + movl %ecx, 44(%esp) # 4-byte Spill + movl 32(%edx), %ecx + movl %ecx, 40(%esp) # 4-byte Spill + movl 28(%edx), %ecx + movl %ecx, 36(%esp) # 4-byte Spill + movl 24(%edx), %ecx + movl %ecx, 32(%esp) # 4-byte Spill + movl 20(%edx), %ecx + movl %ecx, 28(%esp) # 4-byte Spill + movl 16(%edx), %ecx + movl %ecx, 24(%esp) # 4-byte Spill + movl 12(%edx), %ecx + movl %ecx, 20(%esp) # 4-byte Spill + movl 8(%edx), %ecx + movl %ecx, 12(%esp) # 4-byte Spill + movl 4(%edx), %ecx + movl %ecx, 8(%esp) # 4-byte Spill + movl %ebx, (%esp) + leal 1008(%esp), %ecx + movl 100(%esp), %ebx # 4-byte Reload + calll .LmulPv448x32 + movl 92(%esp), %eax # 4-byte Reload + addl 1008(%esp), %eax + movl 96(%esp), %ecx # 4-byte Reload + adcl 1012(%esp), %ecx + movl 84(%esp), %eax # 4-byte Reload + adcl 1016(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 1020(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 1024(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 1028(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 1032(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + adcl 1036(%esp), %esi + movl %esi, 80(%esp) # 4-byte Spill + adcl 1040(%esp), %edi + movl %edi, 96(%esp) # 4-byte Spill + adcl 1044(%esp), %ebp + movl %ebp, 92(%esp) # 4-byte Spill + movl 116(%esp), %eax # 4-byte Reload + adcl 1048(%esp), %eax + movl %eax, 116(%esp) # 4-byte Spill + movl 112(%esp), %ebp # 4-byte Reload + adcl 1052(%esp), %ebp + movl 132(%esp), %eax # 4-byte Reload + adcl 1056(%esp), %eax + movl %eax, 132(%esp) # 4-byte Spill + movl 144(%esp), %eax # 4-byte Reload + adcl 1060(%esp), %eax + movl %eax, 144(%esp) # 4-byte Spill + movl 172(%esp), %eax # 4-byte Reload + adcl 1064(%esp), %eax + movl %eax, 172(%esp) # 4-byte Spill + adcl $0, 160(%esp) # 4-byte Folded Spill + adcl $0, 164(%esp) # 4-byte Folded Spill + adcl $0, 168(%esp) # 4-byte Folded Spill + adcl $0, 136(%esp) # 4-byte Folded Spill + adcl $0, 148(%esp) # 4-byte Folded Spill + adcl $0, 156(%esp) # 4-byte Folded Spill + adcl $0, 152(%esp) # 4-byte Folded Spill + adcl $0, 124(%esp) # 4-byte Folded Spill + adcl $0, 140(%esp) # 4-byte Folded Spill + adcl $0, 128(%esp) # 4-byte Folded Spill + adcl $0, 120(%esp) # 4-byte Folded Spill + adcl $0, 108(%esp) # 4-byte Folded Spill + adcl $0, 104(%esp) # 4-byte Folded Spill + sbbl %edi, %edi + movl %ecx, %esi + movl %esi, %eax + imull 88(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 944(%esp), %ecx + movl 1096(%esp), %edx + movl 100(%esp), %ebx # 4-byte Reload + calll .LmulPv448x32 + movl %edi, %ecx + andl $1, %ecx + addl 944(%esp), %esi + movl 84(%esp), %edx # 4-byte Reload + adcl 948(%esp), %edx + movl 64(%esp), %eax # 4-byte Reload + adcl 952(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 956(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 960(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 964(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 968(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 972(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 92(%esp), %edi # 4-byte Reload + adcl 976(%esp), %edi + movl 116(%esp), %eax # 4-byte Reload + adcl 980(%esp), %eax + movl %eax, 116(%esp) # 4-byte Spill + adcl 984(%esp), %ebp + movl %ebp, 112(%esp) # 4-byte Spill + movl 132(%esp), %eax # 4-byte Reload + adcl 988(%esp), %eax + movl %eax, 132(%esp) # 4-byte Spill + movl 144(%esp), %eax # 4-byte Reload + adcl 992(%esp), %eax + movl %eax, 144(%esp) # 4-byte Spill + movl 172(%esp), %eax # 4-byte Reload + adcl 996(%esp), %eax + movl %eax, 172(%esp) # 4-byte Spill + movl 160(%esp), %esi # 4-byte Reload + adcl 1000(%esp), %esi + adcl $0, 164(%esp) # 4-byte Folded Spill + adcl $0, 168(%esp) # 4-byte Folded Spill + adcl $0, 136(%esp) # 4-byte Folded Spill + adcl $0, 148(%esp) # 4-byte Folded Spill + adcl $0, 156(%esp) # 4-byte Folded Spill + adcl $0, 152(%esp) # 4-byte Folded Spill + adcl $0, 124(%esp) # 4-byte Folded Spill + adcl $0, 140(%esp) # 4-byte Folded Spill + adcl $0, 128(%esp) # 4-byte Folded Spill + adcl $0, 120(%esp) # 4-byte Folded Spill + adcl $0, 108(%esp) # 4-byte Folded Spill + adcl $0, 104(%esp) # 4-byte Folded Spill + adcl $0, %ecx + movl %ecx, 84(%esp) # 4-byte Spill + movl %edx, %eax + movl %edx, %ebp + imull 88(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 880(%esp), %ecx + movl 1096(%esp), %edx + movl 100(%esp), %ebx # 4-byte Reload + calll .LmulPv448x32 + addl 880(%esp), %ebp + movl 64(%esp), %ecx # 4-byte Reload + adcl 884(%esp), %ecx + movl 68(%esp), %eax # 4-byte Reload + adcl 888(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 892(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 896(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 900(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 904(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + adcl 908(%esp), %edi + movl %edi, 92(%esp) # 4-byte Spill + movl 116(%esp), %eax # 4-byte Reload + adcl 912(%esp), %eax + movl %eax, 116(%esp) # 4-byte Spill + movl 112(%esp), %eax # 4-byte Reload + adcl 916(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 132(%esp), %edi # 4-byte Reload + adcl 920(%esp), %edi + movl 144(%esp), %eax # 4-byte Reload + adcl 924(%esp), %eax + movl %eax, 144(%esp) # 4-byte Spill + movl 172(%esp), %eax # 4-byte Reload + adcl 928(%esp), %eax + movl %eax, 172(%esp) # 4-byte Spill + adcl 932(%esp), %esi + movl %esi, 160(%esp) # 4-byte Spill + movl 164(%esp), %eax # 4-byte Reload + adcl 936(%esp), %eax + movl %eax, 164(%esp) # 4-byte Spill + adcl $0, 168(%esp) # 4-byte Folded Spill + adcl $0, 136(%esp) # 4-byte Folded Spill + adcl $0, 148(%esp) # 4-byte Folded Spill + adcl $0, 156(%esp) # 4-byte Folded Spill + movl 152(%esp), %ebp # 4-byte Reload + adcl $0, %ebp + adcl $0, 124(%esp) # 4-byte Folded Spill + adcl $0, 140(%esp) # 4-byte Folded Spill + adcl $0, 128(%esp) # 4-byte Folded Spill + adcl $0, 120(%esp) # 4-byte Folded Spill + adcl $0, 108(%esp) # 4-byte Folded Spill + adcl $0, 104(%esp) # 4-byte Folded Spill + adcl $0, 84(%esp) # 4-byte Folded Spill + movl %ecx, %eax + movl %ecx, %esi + imull 88(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 816(%esp), %ecx + movl 1096(%esp), %edx + movl 100(%esp), %ebx # 4-byte Reload + calll .LmulPv448x32 + addl 816(%esp), %esi + movl 68(%esp), %ecx # 4-byte Reload + adcl 820(%esp), %ecx + movl 72(%esp), %eax # 4-byte Reload + adcl 824(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 828(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 832(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 836(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 840(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 116(%esp), %eax # 4-byte Reload + adcl 844(%esp), %eax + movl %eax, 116(%esp) # 4-byte Spill + movl 112(%esp), %eax # 4-byte Reload + adcl 848(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + adcl 852(%esp), %edi + movl %edi, 132(%esp) # 4-byte Spill + movl 144(%esp), %eax # 4-byte Reload + adcl 856(%esp), %eax + movl %eax, 144(%esp) # 4-byte Spill + movl 172(%esp), %eax # 4-byte Reload + adcl 860(%esp), %eax + movl %eax, 172(%esp) # 4-byte Spill + movl 160(%esp), %eax # 4-byte Reload + adcl 864(%esp), %eax + movl %eax, 160(%esp) # 4-byte Spill + movl 164(%esp), %eax # 4-byte Reload + adcl 868(%esp), %eax + movl %eax, 164(%esp) # 4-byte Spill + movl 168(%esp), %eax # 4-byte Reload + adcl 872(%esp), %eax + movl %eax, 168(%esp) # 4-byte Spill + adcl $0, 136(%esp) # 4-byte Folded Spill + adcl $0, 148(%esp) # 4-byte Folded Spill + adcl $0, 156(%esp) # 4-byte Folded Spill + adcl $0, %ebp + movl %ebp, 152(%esp) # 4-byte Spill + adcl $0, 124(%esp) # 4-byte Folded Spill + adcl $0, 140(%esp) # 4-byte Folded Spill + movl 128(%esp), %edi # 4-byte Reload + adcl $0, %edi + adcl $0, 120(%esp) # 4-byte Folded Spill + adcl $0, 108(%esp) # 4-byte Folded Spill + movl 104(%esp), %esi # 4-byte Reload + adcl $0, %esi + adcl $0, 84(%esp) # 4-byte Folded Spill + movl %ecx, %ebp + movl %ebp, %eax + imull 88(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 752(%esp), %ecx + movl 1096(%esp), %edx + movl 100(%esp), %ebx # 4-byte Reload + calll .LmulPv448x32 + addl 752(%esp), %ebp + movl 72(%esp), %ecx # 4-byte Reload + adcl 756(%esp), %ecx + movl 76(%esp), %eax # 4-byte Reload + adcl 760(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 764(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 768(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 772(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 116(%esp), %eax # 4-byte Reload + adcl 776(%esp), %eax + movl %eax, 116(%esp) # 4-byte Spill + movl 112(%esp), %eax # 4-byte Reload + adcl 780(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 132(%esp), %eax # 4-byte Reload + adcl 784(%esp), %eax + movl %eax, 132(%esp) # 4-byte Spill + movl 144(%esp), %eax # 4-byte Reload + adcl 788(%esp), %eax + movl %eax, 144(%esp) # 4-byte Spill + movl 172(%esp), %eax # 4-byte Reload + adcl 792(%esp), %eax + movl %eax, 172(%esp) # 4-byte Spill + movl 160(%esp), %eax # 4-byte Reload + adcl 796(%esp), %eax + movl %eax, 160(%esp) # 4-byte Spill + movl 164(%esp), %eax # 4-byte Reload + adcl 800(%esp), %eax + movl %eax, 164(%esp) # 4-byte Spill + movl 168(%esp), %eax # 4-byte Reload + adcl 804(%esp), %eax + movl %eax, 168(%esp) # 4-byte Spill + movl 136(%esp), %eax # 4-byte Reload + adcl 808(%esp), %eax + movl %eax, 136(%esp) # 4-byte Spill + adcl $0, 148(%esp) # 4-byte Folded Spill + movl 156(%esp), %ebp # 4-byte Reload + adcl $0, %ebp + adcl $0, 152(%esp) # 4-byte Folded Spill + adcl $0, 124(%esp) # 4-byte Folded Spill + adcl $0, 140(%esp) # 4-byte Folded Spill + adcl $0, %edi + movl %edi, 128(%esp) # 4-byte Spill + adcl $0, 120(%esp) # 4-byte Folded Spill + adcl $0, 108(%esp) # 4-byte Folded Spill + adcl $0, %esi + movl %esi, 104(%esp) # 4-byte Spill + adcl $0, 84(%esp) # 4-byte Folded Spill + movl %ecx, %esi + movl %esi, %eax + imull 88(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 688(%esp), %ecx + movl 1096(%esp), %edx + movl 100(%esp), %ebx # 4-byte Reload + calll .LmulPv448x32 + addl 688(%esp), %esi + movl 76(%esp), %ecx # 4-byte Reload + adcl 692(%esp), %ecx + movl 80(%esp), %eax # 4-byte Reload + adcl 696(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 700(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 704(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 116(%esp), %eax # 4-byte Reload + adcl 708(%esp), %eax + movl %eax, 116(%esp) # 4-byte Spill + movl 112(%esp), %eax # 4-byte Reload + adcl 712(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 132(%esp), %eax # 4-byte Reload + adcl 716(%esp), %eax + movl %eax, 132(%esp) # 4-byte Spill + movl 144(%esp), %eax # 4-byte Reload + adcl 720(%esp), %eax + movl %eax, 144(%esp) # 4-byte Spill + movl 172(%esp), %eax # 4-byte Reload + adcl 724(%esp), %eax + movl %eax, 172(%esp) # 4-byte Spill + movl 160(%esp), %eax # 4-byte Reload + adcl 728(%esp), %eax + movl %eax, 160(%esp) # 4-byte Spill + movl 164(%esp), %eax # 4-byte Reload + adcl 732(%esp), %eax + movl %eax, 164(%esp) # 4-byte Spill + movl 168(%esp), %eax # 4-byte Reload + adcl 736(%esp), %eax + movl %eax, 168(%esp) # 4-byte Spill + movl 136(%esp), %eax # 4-byte Reload + adcl 740(%esp), %eax + movl %eax, 136(%esp) # 4-byte Spill + movl 148(%esp), %eax # 4-byte Reload + adcl 744(%esp), %eax + movl %eax, 148(%esp) # 4-byte Spill + adcl $0, %ebp + movl %ebp, 156(%esp) # 4-byte Spill + adcl $0, 152(%esp) # 4-byte Folded Spill + adcl $0, 124(%esp) # 4-byte Folded Spill + movl 140(%esp), %edi # 4-byte Reload + adcl $0, %edi + adcl $0, 128(%esp) # 4-byte Folded Spill + adcl $0, 120(%esp) # 4-byte Folded Spill + movl 108(%esp), %esi # 4-byte Reload + adcl $0, %esi + adcl $0, 104(%esp) # 4-byte Folded Spill + adcl $0, 84(%esp) # 4-byte Folded Spill + movl %ecx, %ebp + movl %ebp, %eax + imull 88(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 624(%esp), %ecx + movl 1096(%esp), %edx + movl 100(%esp), %ebx # 4-byte Reload + calll .LmulPv448x32 + addl 624(%esp), %ebp + movl 80(%esp), %ecx # 4-byte Reload + adcl 628(%esp), %ecx + movl 96(%esp), %eax # 4-byte Reload + adcl 632(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 636(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 116(%esp), %eax # 4-byte Reload + adcl 640(%esp), %eax + movl %eax, 116(%esp) # 4-byte Spill + movl 112(%esp), %eax # 4-byte Reload + adcl 644(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 132(%esp), %eax # 4-byte Reload + adcl 648(%esp), %eax + movl %eax, 132(%esp) # 4-byte Spill + movl 144(%esp), %eax # 4-byte Reload + adcl 652(%esp), %eax + movl %eax, 144(%esp) # 4-byte Spill + movl 172(%esp), %eax # 4-byte Reload + adcl 656(%esp), %eax + movl %eax, 172(%esp) # 4-byte Spill + movl 160(%esp), %eax # 4-byte Reload + adcl 660(%esp), %eax + movl %eax, 160(%esp) # 4-byte Spill + movl 164(%esp), %ebp # 4-byte Reload + adcl 664(%esp), %ebp + movl 168(%esp), %eax # 4-byte Reload + adcl 668(%esp), %eax + movl %eax, 168(%esp) # 4-byte Spill + movl 136(%esp), %eax # 4-byte Reload + adcl 672(%esp), %eax + movl %eax, 136(%esp) # 4-byte Spill + movl 148(%esp), %eax # 4-byte Reload + adcl 676(%esp), %eax + movl %eax, 148(%esp) # 4-byte Spill + movl 156(%esp), %eax # 4-byte Reload + adcl 680(%esp), %eax + movl %eax, 156(%esp) # 4-byte Spill + adcl $0, 152(%esp) # 4-byte Folded Spill + adcl $0, 124(%esp) # 4-byte Folded Spill + adcl $0, %edi + movl %edi, 140(%esp) # 4-byte Spill + adcl $0, 128(%esp) # 4-byte Folded Spill + adcl $0, 120(%esp) # 4-byte Folded Spill + adcl $0, %esi + movl %esi, 108(%esp) # 4-byte Spill + adcl $0, 104(%esp) # 4-byte Folded Spill + adcl $0, 84(%esp) # 4-byte Folded Spill + movl %ecx, %esi + movl %esi, %eax + imull 88(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 560(%esp), %ecx + movl 1096(%esp), %edx + movl 100(%esp), %ebx # 4-byte Reload + calll .LmulPv448x32 + addl 560(%esp), %esi + movl 96(%esp), %ecx # 4-byte Reload + adcl 564(%esp), %ecx + movl 92(%esp), %eax # 4-byte Reload + adcl 568(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 116(%esp), %eax # 4-byte Reload + adcl 572(%esp), %eax + movl %eax, 116(%esp) # 4-byte Spill + movl 112(%esp), %eax # 4-byte Reload + adcl 576(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 132(%esp), %eax # 4-byte Reload + adcl 580(%esp), %eax + movl %eax, 132(%esp) # 4-byte Spill + movl 144(%esp), %eax # 4-byte Reload + adcl 584(%esp), %eax + movl %eax, 144(%esp) # 4-byte Spill + movl 172(%esp), %eax # 4-byte Reload + adcl 588(%esp), %eax + movl %eax, 172(%esp) # 4-byte Spill + movl 160(%esp), %eax # 4-byte Reload + adcl 592(%esp), %eax + movl %eax, 160(%esp) # 4-byte Spill + adcl 596(%esp), %ebp + movl %ebp, 164(%esp) # 4-byte Spill + movl 168(%esp), %edi # 4-byte Reload + adcl 600(%esp), %edi + movl 136(%esp), %eax # 4-byte Reload + adcl 604(%esp), %eax + movl %eax, 136(%esp) # 4-byte Spill + movl 148(%esp), %eax # 4-byte Reload + adcl 608(%esp), %eax + movl %eax, 148(%esp) # 4-byte Spill + movl 156(%esp), %eax # 4-byte Reload + adcl 612(%esp), %eax + movl %eax, 156(%esp) # 4-byte Spill + movl 152(%esp), %eax # 4-byte Reload + adcl 616(%esp), %eax + movl %eax, 152(%esp) # 4-byte Spill + adcl $0, 124(%esp) # 4-byte Folded Spill + adcl $0, 140(%esp) # 4-byte Folded Spill + adcl $0, 128(%esp) # 4-byte Folded Spill + movl 120(%esp), %esi # 4-byte Reload + adcl $0, %esi + adcl $0, 108(%esp) # 4-byte Folded Spill + adcl $0, 104(%esp) # 4-byte Folded Spill + adcl $0, 84(%esp) # 4-byte Folded Spill + movl %ecx, %ebp + movl %ebp, %eax + imull 88(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 496(%esp), %ecx + movl 1096(%esp), %eax + movl %eax, %edx + movl 100(%esp), %ebx # 4-byte Reload + calll .LmulPv448x32 + addl 496(%esp), %ebp + movl 92(%esp), %ecx # 4-byte Reload + adcl 500(%esp), %ecx + movl 116(%esp), %eax # 4-byte Reload + adcl 504(%esp), %eax + movl %eax, 116(%esp) # 4-byte Spill + movl 112(%esp), %eax # 4-byte Reload + adcl 508(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 132(%esp), %eax # 4-byte Reload + adcl 512(%esp), %eax + movl %eax, 132(%esp) # 4-byte Spill + movl 144(%esp), %ebp # 4-byte Reload + adcl 516(%esp), %ebp + movl 172(%esp), %eax # 4-byte Reload + adcl 520(%esp), %eax + movl %eax, 172(%esp) # 4-byte Spill + movl 160(%esp), %eax # 4-byte Reload + adcl 524(%esp), %eax + movl %eax, 160(%esp) # 4-byte Spill + movl 164(%esp), %eax # 4-byte Reload + adcl 528(%esp), %eax + movl %eax, 164(%esp) # 4-byte Spill + adcl 532(%esp), %edi + movl %edi, 168(%esp) # 4-byte Spill + movl 136(%esp), %edi # 4-byte Reload + adcl 536(%esp), %edi + movl 148(%esp), %eax # 4-byte Reload + adcl 540(%esp), %eax + movl %eax, 148(%esp) # 4-byte Spill + movl 156(%esp), %eax # 4-byte Reload + adcl 544(%esp), %eax + movl %eax, 156(%esp) # 4-byte Spill + movl 152(%esp), %eax # 4-byte Reload + adcl 548(%esp), %eax + movl %eax, 152(%esp) # 4-byte Spill + movl 124(%esp), %eax # 4-byte Reload + adcl 552(%esp), %eax + movl %eax, 124(%esp) # 4-byte Spill + adcl $0, 140(%esp) # 4-byte Folded Spill + adcl $0, 128(%esp) # 4-byte Folded Spill + adcl $0, %esi + movl %esi, 120(%esp) # 4-byte Spill + adcl $0, 108(%esp) # 4-byte Folded Spill + adcl $0, 104(%esp) # 4-byte Folded Spill + adcl $0, 84(%esp) # 4-byte Folded Spill + movl %ecx, %eax + movl %ecx, %esi + imull 88(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 432(%esp), %ecx + movl 1096(%esp), %edx + movl 100(%esp), %ebx # 4-byte Reload + calll .LmulPv448x32 + addl 432(%esp), %esi + movl 116(%esp), %eax # 4-byte Reload + adcl 436(%esp), %eax + movl 112(%esp), %ecx # 4-byte Reload + adcl 440(%esp), %ecx + movl %ecx, 112(%esp) # 4-byte Spill + movl 132(%esp), %ecx # 4-byte Reload + adcl 444(%esp), %ecx + movl %ecx, 132(%esp) # 4-byte Spill + adcl 448(%esp), %ebp + movl %ebp, 144(%esp) # 4-byte Spill + movl 172(%esp), %ecx # 4-byte Reload + adcl 452(%esp), %ecx + movl %ecx, 172(%esp) # 4-byte Spill + movl 160(%esp), %ebp # 4-byte Reload + adcl 456(%esp), %ebp + movl 164(%esp), %ecx # 4-byte Reload + adcl 460(%esp), %ecx + movl %ecx, 164(%esp) # 4-byte Spill + movl 168(%esp), %ecx # 4-byte Reload + adcl 464(%esp), %ecx + movl %ecx, 168(%esp) # 4-byte Spill + adcl 468(%esp), %edi + movl %edi, 136(%esp) # 4-byte Spill + movl 148(%esp), %ecx # 4-byte Reload + adcl 472(%esp), %ecx + movl %ecx, 148(%esp) # 4-byte Spill + movl 156(%esp), %ecx # 4-byte Reload + adcl 476(%esp), %ecx + movl %ecx, 156(%esp) # 4-byte Spill + movl 152(%esp), %ecx # 4-byte Reload + adcl 480(%esp), %ecx + movl %ecx, 152(%esp) # 4-byte Spill + movl 124(%esp), %ecx # 4-byte Reload + adcl 484(%esp), %ecx + movl %ecx, 124(%esp) # 4-byte Spill + movl 140(%esp), %ecx # 4-byte Reload + adcl 488(%esp), %ecx + movl %ecx, 140(%esp) # 4-byte Spill + adcl $0, 128(%esp) # 4-byte Folded Spill + adcl $0, 120(%esp) # 4-byte Folded Spill + adcl $0, 108(%esp) # 4-byte Folded Spill + adcl $0, 104(%esp) # 4-byte Folded Spill + adcl $0, 84(%esp) # 4-byte Folded Spill + movl %eax, %esi + movl 88(%esp), %edi # 4-byte Reload + imull %edi, %eax + movl %eax, (%esp) + leal 368(%esp), %ecx + movl 1096(%esp), %edx + movl 100(%esp), %ebx # 4-byte Reload + calll .LmulPv448x32 + addl 368(%esp), %esi + movl 112(%esp), %eax # 4-byte Reload + adcl 372(%esp), %eax + movl 132(%esp), %ecx # 4-byte Reload + adcl 376(%esp), %ecx + movl %ecx, 132(%esp) # 4-byte Spill + movl 144(%esp), %ecx # 4-byte Reload + adcl 380(%esp), %ecx + movl %ecx, 144(%esp) # 4-byte Spill + movl 172(%esp), %esi # 4-byte Reload + adcl 384(%esp), %esi + adcl 388(%esp), %ebp + movl %ebp, 160(%esp) # 4-byte Spill + movl 164(%esp), %ecx # 4-byte Reload + adcl 392(%esp), %ecx + movl %ecx, 164(%esp) # 4-byte Spill + movl 168(%esp), %ecx # 4-byte Reload + adcl 396(%esp), %ecx + movl %ecx, 168(%esp) # 4-byte Spill + movl 136(%esp), %ecx # 4-byte Reload + adcl 400(%esp), %ecx + movl %ecx, 136(%esp) # 4-byte Spill + movl 148(%esp), %ecx # 4-byte Reload + adcl 404(%esp), %ecx + movl %ecx, 148(%esp) # 4-byte Spill + movl 156(%esp), %ecx # 4-byte Reload + adcl 408(%esp), %ecx + movl %ecx, 156(%esp) # 4-byte Spill + movl 152(%esp), %ecx # 4-byte Reload + adcl 412(%esp), %ecx + movl %ecx, 152(%esp) # 4-byte Spill + movl 124(%esp), %ecx # 4-byte Reload + adcl 416(%esp), %ecx + movl %ecx, 124(%esp) # 4-byte Spill + movl 140(%esp), %ecx # 4-byte Reload + adcl 420(%esp), %ecx + movl %ecx, 140(%esp) # 4-byte Spill + movl 128(%esp), %ecx # 4-byte Reload + adcl 424(%esp), %ecx + movl %ecx, 128(%esp) # 4-byte Spill + adcl $0, 120(%esp) # 4-byte Folded Spill + adcl $0, 108(%esp) # 4-byte Folded Spill + adcl $0, 104(%esp) # 4-byte Folded Spill + adcl $0, 84(%esp) # 4-byte Folded Spill + movl %eax, %ebp + imull %edi, %eax + movl %eax, (%esp) + leal 304(%esp), %ecx + movl 1096(%esp), %edx + movl 100(%esp), %ebx # 4-byte Reload + calll .LmulPv448x32 + addl 304(%esp), %ebp + movl 132(%esp), %edi # 4-byte Reload + adcl 308(%esp), %edi + movl 144(%esp), %eax # 4-byte Reload + adcl 312(%esp), %eax + movl %eax, 144(%esp) # 4-byte Spill + movl %esi, %ebp + adcl 316(%esp), %ebp + movl 160(%esp), %eax # 4-byte Reload + adcl 320(%esp), %eax + movl %eax, 160(%esp) # 4-byte Spill + movl 164(%esp), %eax # 4-byte Reload + adcl 324(%esp), %eax + movl %eax, 164(%esp) # 4-byte Spill + movl 168(%esp), %eax # 4-byte Reload + adcl 328(%esp), %eax + movl %eax, 168(%esp) # 4-byte Spill + movl 136(%esp), %esi # 4-byte Reload + adcl 332(%esp), %esi + movl 148(%esp), %eax # 4-byte Reload + adcl 336(%esp), %eax + movl %eax, 148(%esp) # 4-byte Spill + movl 156(%esp), %eax # 4-byte Reload + adcl 340(%esp), %eax + movl %eax, 156(%esp) # 4-byte Spill + movl 152(%esp), %eax # 4-byte Reload + adcl 344(%esp), %eax + movl %eax, 152(%esp) # 4-byte Spill + movl 124(%esp), %eax # 4-byte Reload + adcl 348(%esp), %eax + movl %eax, 124(%esp) # 4-byte Spill + movl 140(%esp), %eax # 4-byte Reload + adcl 352(%esp), %eax + movl %eax, 140(%esp) # 4-byte Spill + movl 128(%esp), %eax # 4-byte Reload + adcl 356(%esp), %eax + movl %eax, 128(%esp) # 4-byte Spill + movl 120(%esp), %eax # 4-byte Reload + adcl 360(%esp), %eax + movl %eax, 120(%esp) # 4-byte Spill + adcl $0, 108(%esp) # 4-byte Folded Spill + adcl $0, 104(%esp) # 4-byte Folded Spill + adcl $0, 84(%esp) # 4-byte Folded Spill + movl %edi, %eax + imull 88(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 240(%esp), %ecx + movl 1096(%esp), %edx + movl 100(%esp), %ebx # 4-byte Reload + calll .LmulPv448x32 + addl 240(%esp), %edi + movl 144(%esp), %ecx # 4-byte Reload + adcl 244(%esp), %ecx + adcl 248(%esp), %ebp + movl %ebp, 172(%esp) # 4-byte Spill + movl 160(%esp), %eax # 4-byte Reload + adcl 252(%esp), %eax + movl %eax, 160(%esp) # 4-byte Spill + movl 164(%esp), %eax # 4-byte Reload + adcl 256(%esp), %eax + movl %eax, 164(%esp) # 4-byte Spill + movl 168(%esp), %eax # 4-byte Reload + adcl 260(%esp), %eax + movl %eax, 168(%esp) # 4-byte Spill + adcl 264(%esp), %esi + movl %esi, 136(%esp) # 4-byte Spill + movl 148(%esp), %edi # 4-byte Reload + adcl 268(%esp), %edi + movl 156(%esp), %eax # 4-byte Reload + adcl 272(%esp), %eax + movl %eax, 156(%esp) # 4-byte Spill + movl 152(%esp), %eax # 4-byte Reload + adcl 276(%esp), %eax + movl %eax, 152(%esp) # 4-byte Spill + movl 124(%esp), %ebp # 4-byte Reload + adcl 280(%esp), %ebp + movl 140(%esp), %eax # 4-byte Reload + adcl 284(%esp), %eax + movl %eax, 140(%esp) # 4-byte Spill + movl 128(%esp), %eax # 4-byte Reload + adcl 288(%esp), %eax + movl %eax, 128(%esp) # 4-byte Spill + movl 120(%esp), %eax # 4-byte Reload + adcl 292(%esp), %eax + movl %eax, 120(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl 296(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + adcl $0, 104(%esp) # 4-byte Folded Spill + adcl $0, 84(%esp) # 4-byte Folded Spill + movl 88(%esp), %eax # 4-byte Reload + imull %ecx, %eax + movl %ecx, %esi + movl %eax, (%esp) + leal 176(%esp), %ecx + movl 1096(%esp), %edx + movl 100(%esp), %ebx # 4-byte Reload + calll .LmulPv448x32 + addl 176(%esp), %esi + movl 172(%esp), %eax # 4-byte Reload + adcl 180(%esp), %eax + movl %eax, 172(%esp) # 4-byte Spill + movl 160(%esp), %eax # 4-byte Reload + adcl 184(%esp), %eax + movl %eax, 160(%esp) # 4-byte Spill + movl 164(%esp), %ebx # 4-byte Reload + adcl 188(%esp), %ebx + movl %ebx, 164(%esp) # 4-byte Spill + movl 168(%esp), %eax # 4-byte Reload + adcl 192(%esp), %eax + movl %eax, 168(%esp) # 4-byte Spill + movl 136(%esp), %edx # 4-byte Reload + adcl 196(%esp), %edx + movl %edx, 136(%esp) # 4-byte Spill + movl %edi, %eax + adcl 200(%esp), %eax + movl %eax, 148(%esp) # 4-byte Spill + movl 156(%esp), %eax # 4-byte Reload + adcl 204(%esp), %eax + movl %eax, 156(%esp) # 4-byte Spill + movl 152(%esp), %eax # 4-byte Reload + adcl 208(%esp), %eax + movl %eax, 152(%esp) # 4-byte Spill + adcl 212(%esp), %ebp + movl %ebp, 124(%esp) # 4-byte Spill + movl 140(%esp), %eax # 4-byte Reload + adcl 216(%esp), %eax + movl %eax, 140(%esp) # 4-byte Spill + movl 128(%esp), %eax # 4-byte Reload + adcl 220(%esp), %eax + movl %eax, 128(%esp) # 4-byte Spill + movl 120(%esp), %eax # 4-byte Reload + adcl 224(%esp), %eax + movl %eax, 120(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl 228(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 104(%esp), %ecx # 4-byte Reload + adcl 232(%esp), %ecx + movl 84(%esp), %esi # 4-byte Reload + adcl $0, %esi + movl 172(%esp), %edi # 4-byte Reload + subl 16(%esp), %edi # 4-byte Folded Reload + movl 160(%esp), %ebp # 4-byte Reload + sbbl 8(%esp), %ebp # 4-byte Folded Reload + sbbl 12(%esp), %ebx # 4-byte Folded Reload + movl 168(%esp), %eax # 4-byte Reload + sbbl 20(%esp), %eax # 4-byte Folded Reload + sbbl 24(%esp), %edx # 4-byte Folded Reload + movl %edx, 80(%esp) # 4-byte Spill + movl 148(%esp), %edx # 4-byte Reload + sbbl 28(%esp), %edx # 4-byte Folded Reload + movl %edx, 84(%esp) # 4-byte Spill + movl 156(%esp), %edx # 4-byte Reload + sbbl 32(%esp), %edx # 4-byte Folded Reload + movl %edx, 88(%esp) # 4-byte Spill + movl 152(%esp), %edx # 4-byte Reload + sbbl 36(%esp), %edx # 4-byte Folded Reload + movl %edx, 92(%esp) # 4-byte Spill + movl 124(%esp), %edx # 4-byte Reload + sbbl 40(%esp), %edx # 4-byte Folded Reload + movl %edx, 96(%esp) # 4-byte Spill + movl 140(%esp), %edx # 4-byte Reload + sbbl 44(%esp), %edx # 4-byte Folded Reload + movl %edx, 100(%esp) # 4-byte Spill + movl 128(%esp), %edx # 4-byte Reload + sbbl 48(%esp), %edx # 4-byte Folded Reload + movl %edx, 112(%esp) # 4-byte Spill + movl 120(%esp), %edx # 4-byte Reload + sbbl 52(%esp), %edx # 4-byte Folded Reload + movl %edx, 116(%esp) # 4-byte Spill + movl 108(%esp), %edx # 4-byte Reload + sbbl 56(%esp), %edx # 4-byte Folded Reload + movl %edx, 132(%esp) # 4-byte Spill + movl %ecx, %edx + sbbl 60(%esp), %edx # 4-byte Folded Reload + movl %edx, 144(%esp) # 4-byte Spill + sbbl $0, %esi + andl $1, %esi + jne .LBB212_2 +# BB#1: + movl %eax, 168(%esp) # 4-byte Spill +.LBB212_2: + movl %esi, %edx + testb %dl, %dl + movl 172(%esp), %eax # 4-byte Reload + jne .LBB212_4 +# BB#3: + movl %edi, %eax +.LBB212_4: + movl 1088(%esp), %edi + movl %eax, (%edi) + movl %ecx, 104(%esp) # 4-byte Spill + movl 156(%esp), %eax # 4-byte Reload + movl 160(%esp), %ecx # 4-byte Reload + jne .LBB212_6 +# BB#5: + movl %ebp, %ecx +.LBB212_6: + movl %ecx, 4(%edi) + movl 108(%esp), %ecx # 4-byte Reload + movl 164(%esp), %ebp # 4-byte Reload + jne .LBB212_8 +# BB#7: + movl %ebx, %ebp +.LBB212_8: + movl %ebp, 8(%edi) + movl 168(%esp), %ebx # 4-byte Reload + movl %ebx, 12(%edi) + movl 124(%esp), %ebp # 4-byte Reload + movl 136(%esp), %ebx # 4-byte Reload + jne .LBB212_10 +# BB#9: + movl 80(%esp), %ebx # 4-byte Reload +.LBB212_10: + movl %ebx, 16(%edi) + movl 140(%esp), %ebx # 4-byte Reload + movl 148(%esp), %esi # 4-byte Reload + jne .LBB212_12 +# BB#11: + movl 84(%esp), %esi # 4-byte Reload +.LBB212_12: + movl %esi, 20(%edi) + movl 128(%esp), %esi # 4-byte Reload + jne .LBB212_14 +# BB#13: + movl 88(%esp), %eax # 4-byte Reload +.LBB212_14: + movl %eax, 24(%edi) + movl 120(%esp), %edx # 4-byte Reload + jne .LBB212_16 +# BB#15: + movl 92(%esp), %eax # 4-byte Reload + movl %eax, 152(%esp) # 4-byte Spill +.LBB212_16: + movl 152(%esp), %eax # 4-byte Reload + movl %eax, 28(%edi) + jne .LBB212_18 +# BB#17: + movl 96(%esp), %ebp # 4-byte Reload +.LBB212_18: + movl %ebp, 32(%edi) + jne .LBB212_20 +# BB#19: + movl 100(%esp), %ebx # 4-byte Reload +.LBB212_20: + movl %ebx, 36(%edi) + jne .LBB212_22 +# BB#21: + movl 112(%esp), %esi # 4-byte Reload +.LBB212_22: + movl %esi, 40(%edi) + jne .LBB212_24 +# BB#23: + movl 116(%esp), %edx # 4-byte Reload +.LBB212_24: + movl %edx, 44(%edi) + jne .LBB212_26 +# BB#25: + movl 132(%esp), %ecx # 4-byte Reload +.LBB212_26: + movl %ecx, 48(%edi) + movl 104(%esp), %eax # 4-byte Reload + jne .LBB212_28 +# BB#27: + movl 144(%esp), %eax # 4-byte Reload +.LBB212_28: + movl %eax, 52(%edi) + addl $1068, %esp # imm = 0x42C + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end212: + .size mcl_fp_montRed14Lbmi2, .Lfunc_end212-mcl_fp_montRed14Lbmi2 + + .globl mcl_fp_addPre14Lbmi2 + .align 16, 0x90 + .type mcl_fp_addPre14Lbmi2,@function +mcl_fp_addPre14Lbmi2: # @mcl_fp_addPre14Lbmi2 +# BB#0: + pushl %ebx + pushl %edi + pushl %esi + movl 24(%esp), %eax + movl (%eax), %edx + movl 4(%eax), %esi + movl 20(%esp), %ecx + addl (%ecx), %edx + adcl 4(%ecx), %esi + movl 8(%eax), %edi + adcl 8(%ecx), %edi + movl 16(%esp), %ebx + movl %edx, (%ebx) + movl 12(%ecx), %edx + movl %esi, 4(%ebx) + movl 16(%ecx), %esi + adcl 12(%eax), %edx + adcl 16(%eax), %esi + movl %edi, 8(%ebx) + movl 20(%eax), %edi + movl %edx, 12(%ebx) + movl 20(%ecx), %edx + adcl %edi, %edx + movl 24(%eax), %edi + movl %esi, 16(%ebx) + movl 24(%ecx), %esi + adcl %edi, %esi + movl 28(%eax), %edi + movl %edx, 20(%ebx) + movl 28(%ecx), %edx + adcl %edi, %edx + movl 32(%eax), %edi + movl %esi, 24(%ebx) + movl 32(%ecx), %esi + adcl %edi, %esi + movl 36(%eax), %edi + movl %edx, 28(%ebx) + movl 36(%ecx), %edx + adcl %edi, %edx + movl 40(%eax), %edi + movl %esi, 32(%ebx) + movl 40(%ecx), %esi + adcl %edi, %esi + movl 44(%eax), %edi + movl %edx, 36(%ebx) + movl 44(%ecx), %edx + adcl %edi, %edx + movl 48(%eax), %edi + movl %esi, 40(%ebx) + movl 48(%ecx), %esi + adcl %edi, %esi + movl %edx, 44(%ebx) + movl %esi, 48(%ebx) + movl 52(%eax), %eax + movl 52(%ecx), %ecx + adcl %eax, %ecx + movl %ecx, 52(%ebx) + sbbl %eax, %eax + andl $1, %eax + popl %esi + popl %edi + popl %ebx + retl +.Lfunc_end213: + .size mcl_fp_addPre14Lbmi2, .Lfunc_end213-mcl_fp_addPre14Lbmi2 + + .globl mcl_fp_subPre14Lbmi2 + .align 16, 0x90 + .type mcl_fp_subPre14Lbmi2,@function +mcl_fp_subPre14Lbmi2: # @mcl_fp_subPre14Lbmi2 +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + movl 24(%esp), %ecx + movl (%ecx), %esi + movl 4(%ecx), %edi + xorl %eax, %eax + movl 28(%esp), %edx + subl (%edx), %esi + sbbl 4(%edx), %edi + movl 8(%ecx), %ebx + sbbl 8(%edx), %ebx + movl 20(%esp), %ebp + movl %esi, (%ebp) + movl 12(%ecx), %esi + sbbl 12(%edx), %esi + movl %edi, 4(%ebp) + movl 16(%ecx), %edi + sbbl 16(%edx), %edi + movl %ebx, 8(%ebp) + movl 20(%edx), %ebx + movl %esi, 12(%ebp) + movl 20(%ecx), %esi + sbbl %ebx, %esi + movl 24(%edx), %ebx + movl %edi, 16(%ebp) + movl 24(%ecx), %edi + sbbl %ebx, %edi + movl 28(%edx), %ebx + movl %esi, 20(%ebp) + movl 28(%ecx), %esi + sbbl %ebx, %esi + movl 32(%edx), %ebx + movl %edi, 24(%ebp) + movl 32(%ecx), %edi + sbbl %ebx, %edi + movl 36(%edx), %ebx + movl %esi, 28(%ebp) + movl 36(%ecx), %esi + sbbl %ebx, %esi + movl 40(%edx), %ebx + movl %edi, 32(%ebp) + movl 40(%ecx), %edi + sbbl %ebx, %edi + movl 44(%edx), %ebx + movl %esi, 36(%ebp) + movl 44(%ecx), %esi + sbbl %ebx, %esi + movl 48(%edx), %ebx + movl %edi, 40(%ebp) + movl 48(%ecx), %edi + sbbl %ebx, %edi + movl %esi, 44(%ebp) + movl %edi, 48(%ebp) + movl 52(%edx), %edx + movl 52(%ecx), %ecx + sbbl %edx, %ecx + movl %ecx, 52(%ebp) + sbbl $0, %eax + andl $1, %eax + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end214: + .size mcl_fp_subPre14Lbmi2, .Lfunc_end214-mcl_fp_subPre14Lbmi2 + + .globl mcl_fp_shr1_14Lbmi2 + .align 16, 0x90 + .type mcl_fp_shr1_14Lbmi2,@function +mcl_fp_shr1_14Lbmi2: # @mcl_fp_shr1_14Lbmi2 +# BB#0: + pushl %esi + movl 12(%esp), %eax + movl (%eax), %edx + movl 4(%eax), %esi + shrdl $1, %esi, %edx + movl 8(%esp), %ecx + movl %edx, (%ecx) + movl 8(%eax), %edx + shrdl $1, %edx, %esi + movl %esi, 4(%ecx) + movl 12(%eax), %esi + shrdl $1, %esi, %edx + movl %edx, 8(%ecx) + movl 16(%eax), %edx + shrdl $1, %edx, %esi + movl %esi, 12(%ecx) + movl 20(%eax), %esi + shrdl $1, %esi, %edx + movl %edx, 16(%ecx) + movl 24(%eax), %edx + shrdl $1, %edx, %esi + movl %esi, 20(%ecx) + movl 28(%eax), %esi + shrdl $1, %esi, %edx + movl %edx, 24(%ecx) + movl 32(%eax), %edx + shrdl $1, %edx, %esi + movl %esi, 28(%ecx) + movl 36(%eax), %esi + shrdl $1, %esi, %edx + movl %edx, 32(%ecx) + movl 40(%eax), %edx + shrdl $1, %edx, %esi + movl %esi, 36(%ecx) + movl 44(%eax), %esi + shrdl $1, %esi, %edx + movl %edx, 40(%ecx) + movl 48(%eax), %edx + shrdl $1, %edx, %esi + movl %esi, 44(%ecx) + movl 52(%eax), %eax + shrdl $1, %eax, %edx + movl %edx, 48(%ecx) + shrl %eax + movl %eax, 52(%ecx) + popl %esi + retl +.Lfunc_end215: + .size mcl_fp_shr1_14Lbmi2, .Lfunc_end215-mcl_fp_shr1_14Lbmi2 + + .globl mcl_fp_add14Lbmi2 + .align 16, 0x90 + .type mcl_fp_add14Lbmi2,@function +mcl_fp_add14Lbmi2: # @mcl_fp_add14Lbmi2 +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $44, %esp + movl 72(%esp), %eax + movl (%eax), %edx + movl 4(%eax), %ecx + movl 68(%esp), %ebp + addl (%ebp), %edx + movl %edx, 4(%esp) # 4-byte Spill + adcl 4(%ebp), %ecx + movl %ecx, 40(%esp) # 4-byte Spill + movl 8(%eax), %ecx + adcl 8(%ebp), %ecx + movl %ecx, 36(%esp) # 4-byte Spill + movl 12(%ebp), %edx + movl 16(%ebp), %ecx + adcl 12(%eax), %edx + movl %edx, 32(%esp) # 4-byte Spill + adcl 16(%eax), %ecx + movl %ecx, 28(%esp) # 4-byte Spill + movl 20(%ebp), %ecx + adcl 20(%eax), %ecx + movl %ecx, 24(%esp) # 4-byte Spill + movl 24(%ebp), %ecx + adcl 24(%eax), %ecx + movl %ecx, 20(%esp) # 4-byte Spill + movl 28(%ebp), %ecx + adcl 28(%eax), %ecx + movl %ecx, 16(%esp) # 4-byte Spill + movl 32(%ebp), %ecx + adcl 32(%eax), %ecx + movl %ecx, 12(%esp) # 4-byte Spill + movl 36(%ebp), %ecx + adcl 36(%eax), %ecx + movl %ecx, 8(%esp) # 4-byte Spill + movl 40(%ebp), %edx + adcl 40(%eax), %edx + movl %edx, (%esp) # 4-byte Spill + movl 44(%ebp), %ebx + adcl 44(%eax), %ebx + movl 48(%ebp), %esi + adcl 48(%eax), %esi + movl 52(%ebp), %edi + adcl 52(%eax), %edi + movl 64(%esp), %eax + movl 4(%esp), %ebp # 4-byte Reload + movl %ebp, (%eax) + movl 40(%esp), %ecx # 4-byte Reload + movl %ecx, 4(%eax) + movl 36(%esp), %ecx # 4-byte Reload + movl %ecx, 8(%eax) + movl 32(%esp), %ecx # 4-byte Reload + movl %ecx, 12(%eax) + movl 28(%esp), %ecx # 4-byte Reload + movl %ecx, 16(%eax) + movl 24(%esp), %ecx # 4-byte Reload + movl %ecx, 20(%eax) + movl 20(%esp), %ecx # 4-byte Reload + movl %ecx, 24(%eax) + movl 16(%esp), %ecx # 4-byte Reload + movl %ecx, 28(%eax) + movl 12(%esp), %ecx # 4-byte Reload + movl %ecx, 32(%eax) + movl 8(%esp), %ecx # 4-byte Reload + movl %ecx, 36(%eax) + movl %edx, 40(%eax) + movl %ebx, 44(%eax) + movl %esi, 48(%eax) + movl %edi, 52(%eax) + sbbl %ecx, %ecx + andl $1, %ecx + movl 76(%esp), %edx + subl (%edx), %ebp + movl %ebp, 4(%esp) # 4-byte Spill + movl 40(%esp), %ebp # 4-byte Reload + sbbl 4(%edx), %ebp + movl %ebp, 40(%esp) # 4-byte Spill + movl 36(%esp), %ebp # 4-byte Reload + sbbl 8(%edx), %ebp + movl %ebp, 36(%esp) # 4-byte Spill + movl 32(%esp), %ebp # 4-byte Reload + sbbl 12(%edx), %ebp + movl %ebp, 32(%esp) # 4-byte Spill + movl 28(%esp), %ebp # 4-byte Reload + sbbl 16(%edx), %ebp + movl %ebp, 28(%esp) # 4-byte Spill + movl 24(%esp), %ebp # 4-byte Reload + sbbl 20(%edx), %ebp + movl %ebp, 24(%esp) # 4-byte Spill + movl 20(%esp), %ebp # 4-byte Reload + sbbl 24(%edx), %ebp + movl %ebp, 20(%esp) # 4-byte Spill + movl 16(%esp), %ebp # 4-byte Reload + sbbl 28(%edx), %ebp + movl %ebp, 16(%esp) # 4-byte Spill + movl 12(%esp), %ebp # 4-byte Reload + sbbl 32(%edx), %ebp + movl %ebp, 12(%esp) # 4-byte Spill + movl 8(%esp), %ebp # 4-byte Reload + sbbl 36(%edx), %ebp + movl %ebp, 8(%esp) # 4-byte Spill + movl (%esp), %ebp # 4-byte Reload + sbbl 40(%edx), %ebp + sbbl 44(%edx), %ebx + sbbl 48(%edx), %esi + sbbl 52(%edx), %edi + sbbl $0, %ecx + testb $1, %cl + jne .LBB216_2 +# BB#1: # %nocarry + movl 4(%esp), %ecx # 4-byte Reload + movl %ecx, (%eax) + movl 40(%esp), %ecx # 4-byte Reload + movl %ecx, 4(%eax) + movl 36(%esp), %ecx # 4-byte Reload + movl %ecx, 8(%eax) + movl 32(%esp), %ecx # 4-byte Reload + movl %ecx, 12(%eax) + movl 28(%esp), %ecx # 4-byte Reload + movl %ecx, 16(%eax) + movl 24(%esp), %ecx # 4-byte Reload + movl %ecx, 20(%eax) + movl 20(%esp), %ecx # 4-byte Reload + movl %ecx, 24(%eax) + movl 16(%esp), %ecx # 4-byte Reload + movl %ecx, 28(%eax) + movl 12(%esp), %ecx # 4-byte Reload + movl %ecx, 32(%eax) + movl 8(%esp), %ecx # 4-byte Reload + movl %ecx, 36(%eax) + movl %ebp, 40(%eax) + movl %ebx, 44(%eax) + movl %esi, 48(%eax) + movl %edi, 52(%eax) +.LBB216_2: # %carry + addl $44, %esp + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end216: + .size mcl_fp_add14Lbmi2, .Lfunc_end216-mcl_fp_add14Lbmi2 + + .globl mcl_fp_addNF14Lbmi2 + .align 16, 0x90 + .type mcl_fp_addNF14Lbmi2,@function +mcl_fp_addNF14Lbmi2: # @mcl_fp_addNF14Lbmi2 +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $112, %esp + movl 140(%esp), %eax + movl (%eax), %edx + movl 4(%eax), %esi + movl 136(%esp), %ecx + addl (%ecx), %edx + movl %edx, 72(%esp) # 4-byte Spill + adcl 4(%ecx), %esi + movl %esi, 76(%esp) # 4-byte Spill + movl 52(%eax), %edx + movl %edx, 92(%esp) # 4-byte Spill + movl 48(%eax), %edx + movl %edx, 96(%esp) # 4-byte Spill + movl 44(%eax), %edx + movl %edx, 88(%esp) # 4-byte Spill + movl 40(%eax), %ebp + movl 36(%eax), %edx + movl %edx, 104(%esp) # 4-byte Spill + movl 32(%eax), %edx + movl %edx, 100(%esp) # 4-byte Spill + movl 28(%eax), %edx + movl %edx, 108(%esp) # 4-byte Spill + movl 24(%eax), %edx + movl %edx, 84(%esp) # 4-byte Spill + movl 20(%eax), %ebx + movl 16(%eax), %edi + movl 12(%eax), %esi + movl 8(%eax), %edx + adcl 8(%ecx), %edx + movl %edx, 56(%esp) # 4-byte Spill + adcl 12(%ecx), %esi + movl %esi, 60(%esp) # 4-byte Spill + adcl 16(%ecx), %edi + movl %edi, 64(%esp) # 4-byte Spill + adcl 20(%ecx), %ebx + movl %ebx, 68(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 24(%ecx), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl 28(%ecx), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 32(%ecx), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 104(%esp), %eax # 4-byte Reload + adcl 36(%ecx), %eax + movl %eax, 104(%esp) # 4-byte Spill + adcl 40(%ecx), %ebp + movl %ebp, 80(%esp) # 4-byte Spill + movl 88(%esp), %ebp # 4-byte Reload + adcl 44(%ecx), %ebp + movl %ebp, 88(%esp) # 4-byte Spill + movl 96(%esp), %ebp # 4-byte Reload + adcl 48(%ecx), %ebp + movl %ebp, 96(%esp) # 4-byte Spill + movl 92(%esp), %ebp # 4-byte Reload + adcl 52(%ecx), %ebp + movl %ebp, 92(%esp) # 4-byte Spill + movl 144(%esp), %ecx + movl 72(%esp), %eax # 4-byte Reload + subl (%ecx), %eax + movl %eax, (%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + sbbl 4(%ecx), %eax + movl %eax, 4(%esp) # 4-byte Spill + sbbl 8(%ecx), %edx + movl %edx, 8(%esp) # 4-byte Spill + sbbl 12(%ecx), %esi + movl %esi, 12(%esp) # 4-byte Spill + sbbl 16(%ecx), %edi + movl %edi, 16(%esp) # 4-byte Spill + sbbl 20(%ecx), %ebx + movl %ebx, 20(%esp) # 4-byte Spill + movl 84(%esp), %edx # 4-byte Reload + movl %edx, %eax + sbbl 24(%ecx), %eax + movl %eax, 24(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + sbbl 28(%ecx), %eax + movl %eax, 28(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + sbbl 32(%ecx), %eax + movl %eax, 32(%esp) # 4-byte Spill + movl 104(%esp), %esi # 4-byte Reload + sbbl 36(%ecx), %esi + movl %esi, 36(%esp) # 4-byte Spill + movl 80(%esp), %esi # 4-byte Reload + sbbl 40(%ecx), %esi + movl %esi, 40(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + movl %eax, %esi + movl %eax, %ebp + sbbl 44(%ecx), %esi + movl %esi, 44(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + movl %eax, %esi + sbbl 48(%ecx), %esi + movl %esi, 48(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + movl %eax, %edi + sbbl 52(%ecx), %edi + movl %edi, 52(%esp) # 4-byte Spill + movl %edi, %ecx + sarl $31, %ecx + testl %ecx, %ecx + movl 72(%esp), %ecx # 4-byte Reload + js .LBB217_2 +# BB#1: + movl (%esp), %ecx # 4-byte Reload +.LBB217_2: + movl 132(%esp), %edi + movl %ecx, (%edi) + movl 76(%esp), %eax # 4-byte Reload + js .LBB217_4 +# BB#3: + movl 4(%esp), %eax # 4-byte Reload +.LBB217_4: + movl %eax, 4(%edi) + movl %edx, %ecx + movl 64(%esp), %eax # 4-byte Reload + movl 56(%esp), %edx # 4-byte Reload + js .LBB217_6 +# BB#5: + movl 8(%esp), %edx # 4-byte Reload +.LBB217_6: + movl %edx, 8(%edi) + movl %ebp, %edx + movl 104(%esp), %ebx # 4-byte Reload + movl 60(%esp), %ebp # 4-byte Reload + js .LBB217_8 +# BB#7: + movl 12(%esp), %ebp # 4-byte Reload +.LBB217_8: + movl %ebp, 12(%edi) + movl 100(%esp), %ebp # 4-byte Reload + js .LBB217_10 +# BB#9: + movl 16(%esp), %eax # 4-byte Reload +.LBB217_10: + movl %eax, 16(%edi) + movl 80(%esp), %esi # 4-byte Reload + js .LBB217_12 +# BB#11: + movl 20(%esp), %eax # 4-byte Reload + movl %eax, 68(%esp) # 4-byte Spill +.LBB217_12: + movl 68(%esp), %eax # 4-byte Reload + movl %eax, 20(%edi) + js .LBB217_14 +# BB#13: + movl 24(%esp), %ecx # 4-byte Reload +.LBB217_14: + movl %ecx, 24(%edi) + js .LBB217_16 +# BB#15: + movl 28(%esp), %eax # 4-byte Reload + movl %eax, 108(%esp) # 4-byte Spill +.LBB217_16: + movl 108(%esp), %ecx # 4-byte Reload + movl %ecx, 28(%edi) + js .LBB217_18 +# BB#17: + movl 32(%esp), %ebp # 4-byte Reload +.LBB217_18: + movl %ebp, 32(%edi) + js .LBB217_20 +# BB#19: + movl 36(%esp), %ebx # 4-byte Reload +.LBB217_20: + movl %ebx, 36(%edi) + js .LBB217_22 +# BB#21: + movl 40(%esp), %esi # 4-byte Reload +.LBB217_22: + movl %esi, 40(%edi) + movl 96(%esp), %eax # 4-byte Reload + js .LBB217_24 +# BB#23: + movl 44(%esp), %edx # 4-byte Reload +.LBB217_24: + movl %edx, 44(%edi) + movl 92(%esp), %ecx # 4-byte Reload + js .LBB217_26 +# BB#25: + movl 48(%esp), %eax # 4-byte Reload +.LBB217_26: + movl %eax, 48(%edi) + js .LBB217_28 +# BB#27: + movl 52(%esp), %ecx # 4-byte Reload +.LBB217_28: + movl %ecx, 52(%edi) + addl $112, %esp + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end217: + .size mcl_fp_addNF14Lbmi2, .Lfunc_end217-mcl_fp_addNF14Lbmi2 + + .globl mcl_fp_sub14Lbmi2 + .align 16, 0x90 + .type mcl_fp_sub14Lbmi2,@function +mcl_fp_sub14Lbmi2: # @mcl_fp_sub14Lbmi2 +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $52, %esp + movl 76(%esp), %esi + movl (%esi), %eax + movl 4(%esi), %ecx + xorl %ebx, %ebx + movl 80(%esp), %edi + subl (%edi), %eax + movl %eax, 44(%esp) # 4-byte Spill + sbbl 4(%edi), %ecx + movl %ecx, 16(%esp) # 4-byte Spill + movl 8(%esi), %eax + sbbl 8(%edi), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 12(%esi), %eax + sbbl 12(%edi), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 16(%esi), %eax + sbbl 16(%edi), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 20(%esi), %eax + sbbl 20(%edi), %eax + movl %eax, 32(%esp) # 4-byte Spill + movl 24(%esi), %eax + sbbl 24(%edi), %eax + movl %eax, 28(%esp) # 4-byte Spill + movl 28(%esi), %eax + sbbl 28(%edi), %eax + movl %eax, 24(%esp) # 4-byte Spill + movl 32(%esi), %eax + sbbl 32(%edi), %eax + movl %eax, 20(%esp) # 4-byte Spill + movl 36(%esi), %edx + sbbl 36(%edi), %edx + movl %edx, 12(%esp) # 4-byte Spill + movl 40(%esi), %ecx + sbbl 40(%edi), %ecx + movl %ecx, 8(%esp) # 4-byte Spill + movl 44(%esi), %eax + sbbl 44(%edi), %eax + movl %eax, 4(%esp) # 4-byte Spill + movl 48(%esi), %ebp + sbbl 48(%edi), %ebp + movl 52(%esi), %esi + sbbl 52(%edi), %esi + sbbl $0, %ebx + testb $1, %bl + movl 72(%esp), %ebx + movl 44(%esp), %edi # 4-byte Reload + movl %edi, (%ebx) + movl 16(%esp), %edi # 4-byte Reload + movl %edi, 4(%ebx) + movl 36(%esp), %edi # 4-byte Reload + movl %edi, 8(%ebx) + movl 48(%esp), %edi # 4-byte Reload + movl %edi, 12(%ebx) + movl 40(%esp), %edi # 4-byte Reload + movl %edi, 16(%ebx) + movl 32(%esp), %edi # 4-byte Reload + movl %edi, 20(%ebx) + movl 28(%esp), %edi # 4-byte Reload + movl %edi, 24(%ebx) + movl 24(%esp), %edi # 4-byte Reload + movl %edi, 28(%ebx) + movl 20(%esp), %edi # 4-byte Reload + movl %edi, 32(%ebx) + movl %edx, 36(%ebx) + movl %ecx, 40(%ebx) + movl %eax, 44(%ebx) + movl %ebp, 48(%ebx) + movl %esi, 52(%ebx) + je .LBB218_2 +# BB#1: # %carry + movl %esi, (%esp) # 4-byte Spill + movl 84(%esp), %esi + movl 44(%esp), %ecx # 4-byte Reload + addl (%esi), %ecx + movl %ecx, (%ebx) + movl 16(%esp), %edx # 4-byte Reload + adcl 4(%esi), %edx + movl %edx, 4(%ebx) + movl 36(%esp), %edi # 4-byte Reload + adcl 8(%esi), %edi + movl 12(%esi), %eax + adcl 48(%esp), %eax # 4-byte Folded Reload + movl %edi, 8(%ebx) + movl 16(%esi), %ecx + adcl 40(%esp), %ecx # 4-byte Folded Reload + movl %eax, 12(%ebx) + movl 20(%esi), %eax + adcl 32(%esp), %eax # 4-byte Folded Reload + movl %ecx, 16(%ebx) + movl 24(%esi), %ecx + adcl 28(%esp), %ecx # 4-byte Folded Reload + movl %eax, 20(%ebx) + movl 28(%esi), %eax + adcl 24(%esp), %eax # 4-byte Folded Reload + movl %ecx, 24(%ebx) + movl 32(%esi), %ecx + adcl 20(%esp), %ecx # 4-byte Folded Reload + movl %eax, 28(%ebx) + movl 36(%esi), %eax + adcl 12(%esp), %eax # 4-byte Folded Reload + movl %ecx, 32(%ebx) + movl 40(%esi), %ecx + adcl 8(%esp), %ecx # 4-byte Folded Reload + movl %eax, 36(%ebx) + movl 44(%esi), %eax + adcl 4(%esp), %eax # 4-byte Folded Reload + movl %ecx, 40(%ebx) + movl %eax, 44(%ebx) + movl 48(%esi), %eax + adcl %ebp, %eax + movl %eax, 48(%ebx) + movl 52(%esi), %eax + adcl (%esp), %eax # 4-byte Folded Reload + movl %eax, 52(%ebx) +.LBB218_2: # %nocarry + addl $52, %esp + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end218: + .size mcl_fp_sub14Lbmi2, .Lfunc_end218-mcl_fp_sub14Lbmi2 + + .globl mcl_fp_subNF14Lbmi2 + .align 16, 0x90 + .type mcl_fp_subNF14Lbmi2,@function +mcl_fp_subNF14Lbmi2: # @mcl_fp_subNF14Lbmi2 +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $88, %esp + movl 112(%esp), %ecx + movl 52(%ecx), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl (%ecx), %edx + movl 4(%ecx), %eax + movl 116(%esp), %edi + subl (%edi), %edx + movl %edx, 56(%esp) # 4-byte Spill + sbbl 4(%edi), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 48(%ecx), %edx + movl %edx, 80(%esp) # 4-byte Spill + movl 44(%ecx), %edx + movl %edx, 76(%esp) # 4-byte Spill + movl 40(%ecx), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 36(%ecx), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 32(%ecx), %ebp + movl 28(%ecx), %esi + movl %esi, 84(%esp) # 4-byte Spill + movl 24(%ecx), %ebx + movl 20(%ecx), %esi + movl 16(%ecx), %edx + movl 12(%ecx), %eax + movl 8(%ecx), %ecx + sbbl 8(%edi), %ecx + movl %ecx, 32(%esp) # 4-byte Spill + sbbl 12(%edi), %eax + movl %eax, 36(%esp) # 4-byte Spill + sbbl 16(%edi), %edx + movl %edx, 40(%esp) # 4-byte Spill + sbbl 20(%edi), %esi + movl %esi, 44(%esp) # 4-byte Spill + sbbl 24(%edi), %ebx + movl %ebx, 48(%esp) # 4-byte Spill + movl 84(%esp), %ecx # 4-byte Reload + sbbl 28(%edi), %ecx + movl %ecx, 84(%esp) # 4-byte Spill + sbbl 32(%edi), %ebp + movl %ebp, 52(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + sbbl 36(%edi), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + sbbl 40(%edi), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 76(%esp), %ecx # 4-byte Reload + sbbl 44(%edi), %ecx + movl %ecx, 76(%esp) # 4-byte Spill + movl 80(%esp), %ecx # 4-byte Reload + sbbl 48(%edi), %ecx + movl %ecx, 80(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + sbbl 52(%edi), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl %eax, %esi + sarl $31, %esi + movl %esi, %ecx + addl %ecx, %ecx + movl %esi, %ebp + adcl %ebp, %ebp + shrl $31, %eax + orl %ecx, %eax + movl 120(%esp), %edi + andl 4(%edi), %ebp + andl (%edi), %eax + movl 52(%edi), %ecx + andl %esi, %ecx + movl %ecx, 28(%esp) # 4-byte Spill + movl 48(%edi), %ecx + andl %esi, %ecx + movl %ecx, 24(%esp) # 4-byte Spill + movl 44(%edi), %ecx + andl %esi, %ecx + movl %ecx, 20(%esp) # 4-byte Spill + movl 40(%edi), %ecx + andl %esi, %ecx + movl %ecx, 16(%esp) # 4-byte Spill + movl 36(%edi), %ecx + andl %esi, %ecx + movl %ecx, 12(%esp) # 4-byte Spill + movl 32(%edi), %ecx + andl %esi, %ecx + movl %ecx, 8(%esp) # 4-byte Spill + movl 28(%edi), %ecx + andl %esi, %ecx + movl %ecx, 4(%esp) # 4-byte Spill + movl 24(%edi), %ecx + andl %esi, %ecx + movl %ecx, (%esp) # 4-byte Spill + movl 20(%edi), %ebx + andl %esi, %ebx + movl 16(%edi), %edx + andl %esi, %edx + movl 12(%edi), %ecx + andl %esi, %ecx + andl 8(%edi), %esi + addl 56(%esp), %eax # 4-byte Folded Reload + adcl 60(%esp), %ebp # 4-byte Folded Reload + movl 108(%esp), %edi + movl %eax, (%edi) + adcl 32(%esp), %esi # 4-byte Folded Reload + movl %ebp, 4(%edi) + adcl 36(%esp), %ecx # 4-byte Folded Reload + movl %esi, 8(%edi) + adcl 40(%esp), %edx # 4-byte Folded Reload + movl %ecx, 12(%edi) + adcl 44(%esp), %ebx # 4-byte Folded Reload + movl %edx, 16(%edi) + movl (%esp), %eax # 4-byte Reload + adcl 48(%esp), %eax # 4-byte Folded Reload + movl %ebx, 20(%edi) + movl 4(%esp), %ecx # 4-byte Reload + adcl 84(%esp), %ecx # 4-byte Folded Reload + movl %eax, 24(%edi) + movl 8(%esp), %eax # 4-byte Reload + adcl 52(%esp), %eax # 4-byte Folded Reload + movl %ecx, 28(%edi) + movl 12(%esp), %ecx # 4-byte Reload + adcl 68(%esp), %ecx # 4-byte Folded Reload + movl %eax, 32(%edi) + movl 16(%esp), %eax # 4-byte Reload + adcl 72(%esp), %eax # 4-byte Folded Reload + movl %ecx, 36(%edi) + movl 20(%esp), %ecx # 4-byte Reload + adcl 76(%esp), %ecx # 4-byte Folded Reload + movl %eax, 40(%edi) + movl 24(%esp), %eax # 4-byte Reload + adcl 80(%esp), %eax # 4-byte Folded Reload + movl %ecx, 44(%edi) + movl %eax, 48(%edi) + movl 28(%esp), %eax # 4-byte Reload + adcl 64(%esp), %eax # 4-byte Folded Reload + movl %eax, 52(%edi) + addl $88, %esp + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end219: + .size mcl_fp_subNF14Lbmi2, .Lfunc_end219-mcl_fp_subNF14Lbmi2 + + .globl mcl_fpDbl_add14Lbmi2 + .align 16, 0x90 + .type mcl_fpDbl_add14Lbmi2,@function +mcl_fpDbl_add14Lbmi2: # @mcl_fpDbl_add14Lbmi2 +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $100, %esp + movl 128(%esp), %ecx + movl 124(%esp), %esi + movl 12(%esi), %edi + movl 16(%esi), %edx + movl 8(%ecx), %ebx + movl (%ecx), %ebp + addl (%esi), %ebp + movl 120(%esp), %eax + movl %ebp, (%eax) + movl 4(%ecx), %ebp + adcl 4(%esi), %ebp + adcl 8(%esi), %ebx + adcl 12(%ecx), %edi + adcl 16(%ecx), %edx + movl %ebp, 4(%eax) + movl 64(%ecx), %ebp + movl %ebx, 8(%eax) + movl 20(%ecx), %ebx + movl %edi, 12(%eax) + movl 20(%esi), %edi + adcl %ebx, %edi + movl 24(%ecx), %ebx + movl %edx, 16(%eax) + movl 24(%esi), %edx + adcl %ebx, %edx + movl 28(%ecx), %ebx + movl %edi, 20(%eax) + movl 28(%esi), %edi + adcl %ebx, %edi + movl 32(%ecx), %ebx + movl %edx, 24(%eax) + movl 32(%esi), %edx + adcl %ebx, %edx + movl 36(%ecx), %ebx + movl %edi, 28(%eax) + movl 36(%esi), %edi + adcl %ebx, %edi + movl 40(%ecx), %ebx + movl %edx, 32(%eax) + movl 40(%esi), %edx + adcl %ebx, %edx + movl 44(%ecx), %ebx + movl %edi, 36(%eax) + movl 44(%esi), %edi + adcl %ebx, %edi + movl 48(%ecx), %ebx + movl %edx, 40(%eax) + movl 48(%esi), %edx + adcl %ebx, %edx + movl 52(%ecx), %ebx + movl %edi, 44(%eax) + movl 52(%esi), %edi + adcl %ebx, %edi + movl 56(%ecx), %ebx + movl %edx, 48(%eax) + movl 56(%esi), %edx + adcl %ebx, %edx + movl %edx, 72(%esp) # 4-byte Spill + movl 60(%ecx), %edx + movl %edi, 52(%eax) + movl 60(%esi), %eax + adcl %edx, %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 64(%esi), %eax + adcl %ebp, %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 68(%ecx), %edx + movl 68(%esi), %eax + adcl %edx, %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 72(%ecx), %edx + movl 72(%esi), %eax + adcl %edx, %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 76(%ecx), %edx + movl 76(%esi), %eax + adcl %edx, %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 80(%ecx), %edx + movl 80(%esi), %eax + adcl %edx, %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 84(%ecx), %edx + movl 84(%esi), %eax + adcl %edx, %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 88(%ecx), %edx + movl 88(%esi), %eax + adcl %edx, %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 92(%ecx), %edx + movl 92(%esi), %eax + adcl %edx, %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 96(%ecx), %edx + movl 96(%esi), %eax + adcl %edx, %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 100(%ecx), %edx + movl 100(%esi), %edi + adcl %edx, %edi + movl %edi, 52(%esp) # 4-byte Spill + movl 104(%ecx), %edx + movl 104(%esi), %ebx + adcl %edx, %ebx + movl %ebx, 56(%esp) # 4-byte Spill + movl 108(%ecx), %ecx + movl 108(%esi), %esi + adcl %ecx, %esi + sbbl %edx, %edx + andl $1, %edx + movl 132(%esp), %ebp + movl 72(%esp), %ecx # 4-byte Reload + subl (%ebp), %ecx + movl %ecx, 32(%esp) # 4-byte Spill + movl 76(%esp), %ecx # 4-byte Reload + sbbl 4(%ebp), %ecx + movl %ecx, 28(%esp) # 4-byte Spill + movl 80(%esp), %ecx # 4-byte Reload + sbbl 8(%ebp), %ecx + movl %ecx, 24(%esp) # 4-byte Spill + movl 84(%esp), %ecx # 4-byte Reload + sbbl 12(%ebp), %ecx + movl %ecx, 20(%esp) # 4-byte Spill + movl 88(%esp), %ecx # 4-byte Reload + sbbl 16(%ebp), %ecx + movl %ecx, 16(%esp) # 4-byte Spill + movl 92(%esp), %ecx # 4-byte Reload + sbbl 20(%ebp), %ecx + movl %ecx, 12(%esp) # 4-byte Spill + movl 96(%esp), %ecx # 4-byte Reload + sbbl 24(%ebp), %ecx + movl %ecx, 8(%esp) # 4-byte Spill + movl 60(%esp), %ecx # 4-byte Reload + sbbl 28(%ebp), %ecx + movl %ecx, 4(%esp) # 4-byte Spill + movl 64(%esp), %ecx # 4-byte Reload + sbbl 32(%ebp), %ecx + movl %ecx, (%esp) # 4-byte Spill + movl 68(%esp), %ecx # 4-byte Reload + sbbl 36(%ebp), %ecx + sbbl 40(%ebp), %eax + movl %eax, 36(%esp) # 4-byte Spill + sbbl 44(%ebp), %edi + movl %edi, 40(%esp) # 4-byte Spill + movl %ebx, %eax + movl %esi, %ebx + sbbl 48(%ebp), %eax + movl %eax, 44(%esp) # 4-byte Spill + sbbl 52(%ebp), %esi + sbbl $0, %edx + andl $1, %edx + jne .LBB220_2 +# BB#1: + movl %esi, %ebx +.LBB220_2: + testb %dl, %dl + movl 72(%esp), %eax # 4-byte Reload + movl 68(%esp), %edx # 4-byte Reload + movl 64(%esp), %edi # 4-byte Reload + movl 60(%esp), %ebp # 4-byte Reload + jne .LBB220_4 +# BB#3: + movl %ecx, %edx + movl (%esp), %edi # 4-byte Reload + movl 4(%esp), %ebp # 4-byte Reload + movl 8(%esp), %eax # 4-byte Reload + movl %eax, 96(%esp) # 4-byte Spill + movl 12(%esp), %eax # 4-byte Reload + movl %eax, 92(%esp) # 4-byte Spill + movl 16(%esp), %eax # 4-byte Reload + movl %eax, 88(%esp) # 4-byte Spill + movl 20(%esp), %eax # 4-byte Reload + movl %eax, 84(%esp) # 4-byte Spill + movl 24(%esp), %eax # 4-byte Reload + movl %eax, 80(%esp) # 4-byte Spill + movl 28(%esp), %eax # 4-byte Reload + movl %eax, 76(%esp) # 4-byte Spill + movl 32(%esp), %eax # 4-byte Reload +.LBB220_4: + movl 120(%esp), %esi + movl %eax, 56(%esi) + movl 76(%esp), %eax # 4-byte Reload + movl %eax, 60(%esi) + movl 80(%esp), %eax # 4-byte Reload + movl %eax, 64(%esi) + movl 84(%esp), %eax # 4-byte Reload + movl %eax, 68(%esi) + movl 88(%esp), %eax # 4-byte Reload + movl %eax, 72(%esi) + movl 92(%esp), %eax # 4-byte Reload + movl %eax, 76(%esi) + movl 96(%esp), %eax # 4-byte Reload + movl %eax, 80(%esi) + movl %ebp, 84(%esi) + movl %edi, 88(%esi) + movl %edx, 92(%esi) + movl 52(%esp), %edx # 4-byte Reload + movl 48(%esp), %eax # 4-byte Reload + jne .LBB220_6 +# BB#5: + movl 36(%esp), %eax # 4-byte Reload +.LBB220_6: + movl %eax, 96(%esi) + movl 56(%esp), %ecx # 4-byte Reload + jne .LBB220_8 +# BB#7: + movl 40(%esp), %edx # 4-byte Reload +.LBB220_8: + movl %edx, 100(%esi) + jne .LBB220_10 +# BB#9: + movl 44(%esp), %ecx # 4-byte Reload +.LBB220_10: + movl %ecx, 104(%esi) + movl %ebx, 108(%esi) + addl $100, %esp + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end220: + .size mcl_fpDbl_add14Lbmi2, .Lfunc_end220-mcl_fpDbl_add14Lbmi2 + + .globl mcl_fpDbl_sub14Lbmi2 + .align 16, 0x90 + .type mcl_fpDbl_sub14Lbmi2,@function +mcl_fpDbl_sub14Lbmi2: # @mcl_fpDbl_sub14Lbmi2 +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $96, %esp + movl 120(%esp), %ebx + movl (%ebx), %eax + movl 4(%ebx), %edx + movl 124(%esp), %ebp + subl (%ebp), %eax + sbbl 4(%ebp), %edx + movl 8(%ebx), %esi + sbbl 8(%ebp), %esi + movl 116(%esp), %ecx + movl %eax, (%ecx) + movl 12(%ebx), %eax + sbbl 12(%ebp), %eax + movl %edx, 4(%ecx) + movl 16(%ebx), %edx + sbbl 16(%ebp), %edx + movl %esi, 8(%ecx) + movl 20(%ebp), %esi + movl %eax, 12(%ecx) + movl 20(%ebx), %eax + sbbl %esi, %eax + movl 24(%ebp), %esi + movl %edx, 16(%ecx) + movl 24(%ebx), %edx + sbbl %esi, %edx + movl 28(%ebp), %esi + movl %eax, 20(%ecx) + movl 28(%ebx), %eax + sbbl %esi, %eax + movl 32(%ebp), %esi + movl %edx, 24(%ecx) + movl 32(%ebx), %edx + sbbl %esi, %edx + movl 36(%ebp), %esi + movl %eax, 28(%ecx) + movl 36(%ebx), %eax + sbbl %esi, %eax + movl 40(%ebp), %esi + movl %edx, 32(%ecx) + movl 40(%ebx), %edx + sbbl %esi, %edx + movl 44(%ebp), %esi + movl %eax, 36(%ecx) + movl 44(%ebx), %eax + sbbl %esi, %eax + movl 48(%ebp), %esi + movl %edx, 40(%ecx) + movl 48(%ebx), %edx + sbbl %esi, %edx + movl 52(%ebp), %esi + movl %eax, 44(%ecx) + movl 52(%ebx), %eax + sbbl %esi, %eax + movl 56(%ebp), %esi + movl %edx, 48(%ecx) + movl 56(%ebx), %edx + sbbl %esi, %edx + movl %edx, 44(%esp) # 4-byte Spill + movl 60(%ebp), %edx + movl %eax, 52(%ecx) + movl 60(%ebx), %eax + sbbl %edx, %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 64(%ebp), %eax + movl 64(%ebx), %edx + sbbl %eax, %edx + movl %edx, 40(%esp) # 4-byte Spill + movl 68(%ebp), %eax + movl 68(%ebx), %edx + sbbl %eax, %edx + movl %edx, 48(%esp) # 4-byte Spill + movl 72(%ebp), %eax + movl 72(%ebx), %edx + sbbl %eax, %edx + movl %edx, 52(%esp) # 4-byte Spill + movl 76(%ebp), %eax + movl 76(%ebx), %edx + sbbl %eax, %edx + movl %edx, 60(%esp) # 4-byte Spill + movl 80(%ebp), %eax + movl 80(%ebx), %edx + sbbl %eax, %edx + movl %edx, 64(%esp) # 4-byte Spill + movl 84(%ebp), %eax + movl 84(%ebx), %edx + sbbl %eax, %edx + movl %edx, 68(%esp) # 4-byte Spill + movl 88(%ebp), %eax + movl 88(%ebx), %edx + sbbl %eax, %edx + movl %edx, 72(%esp) # 4-byte Spill + movl 92(%ebp), %eax + movl 92(%ebx), %edx + sbbl %eax, %edx + movl %edx, 76(%esp) # 4-byte Spill + movl 96(%ebp), %eax + movl 96(%ebx), %edx + sbbl %eax, %edx + movl %edx, 80(%esp) # 4-byte Spill + movl 100(%ebp), %eax + movl 100(%ebx), %edx + sbbl %eax, %edx + movl %edx, 84(%esp) # 4-byte Spill + movl 104(%ebp), %eax + movl 104(%ebx), %edx + sbbl %eax, %edx + movl %edx, 88(%esp) # 4-byte Spill + movl 108(%ebp), %eax + movl 108(%ebx), %edx + sbbl %eax, %edx + movl %edx, 92(%esp) # 4-byte Spill + movl $0, %eax + sbbl $0, %eax + andl $1, %eax + movl 128(%esp), %ebp + jne .LBB221_1 +# BB#2: + movl $0, 56(%esp) # 4-byte Folded Spill + jmp .LBB221_3 +.LBB221_1: + movl 52(%ebp), %edx + movl %edx, 56(%esp) # 4-byte Spill +.LBB221_3: + testb %al, %al + jne .LBB221_4 +# BB#5: + movl $0, 24(%esp) # 4-byte Folded Spill + movl $0, 20(%esp) # 4-byte Folded Spill + jmp .LBB221_6 +.LBB221_4: + movl (%ebp), %eax + movl %eax, 20(%esp) # 4-byte Spill + movl 4(%ebp), %eax + movl %eax, 24(%esp) # 4-byte Spill +.LBB221_6: + jne .LBB221_7 +# BB#8: + movl $0, 32(%esp) # 4-byte Folded Spill + jmp .LBB221_9 +.LBB221_7: + movl 48(%ebp), %eax + movl %eax, 32(%esp) # 4-byte Spill +.LBB221_9: + jne .LBB221_10 +# BB#11: + movl $0, 28(%esp) # 4-byte Folded Spill + jmp .LBB221_12 +.LBB221_10: + movl 44(%ebp), %eax + movl %eax, 28(%esp) # 4-byte Spill +.LBB221_12: + jne .LBB221_13 +# BB#14: + movl $0, 16(%esp) # 4-byte Folded Spill + jmp .LBB221_15 +.LBB221_13: + movl 40(%ebp), %eax + movl %eax, 16(%esp) # 4-byte Spill +.LBB221_15: + jne .LBB221_16 +# BB#17: + movl $0, 12(%esp) # 4-byte Folded Spill + jmp .LBB221_18 +.LBB221_16: + movl 36(%ebp), %eax + movl %eax, 12(%esp) # 4-byte Spill +.LBB221_18: + jne .LBB221_19 +# BB#20: + movl $0, 8(%esp) # 4-byte Folded Spill + jmp .LBB221_21 +.LBB221_19: + movl 32(%ebp), %eax + movl %eax, 8(%esp) # 4-byte Spill +.LBB221_21: + jne .LBB221_22 +# BB#23: + movl $0, 4(%esp) # 4-byte Folded Spill + jmp .LBB221_24 +.LBB221_22: + movl 28(%ebp), %eax + movl %eax, 4(%esp) # 4-byte Spill +.LBB221_24: + jne .LBB221_25 +# BB#26: + movl $0, (%esp) # 4-byte Folded Spill + jmp .LBB221_27 +.LBB221_25: + movl 24(%ebp), %eax + movl %eax, (%esp) # 4-byte Spill +.LBB221_27: + jne .LBB221_28 +# BB#29: + movl $0, %esi + jmp .LBB221_30 +.LBB221_28: + movl 20(%ebp), %esi +.LBB221_30: + jne .LBB221_31 +# BB#32: + movl $0, %edi + jmp .LBB221_33 +.LBB221_31: + movl 16(%ebp), %edi +.LBB221_33: + jne .LBB221_34 +# BB#35: + movl $0, %ebx + jmp .LBB221_36 +.LBB221_34: + movl 12(%ebp), %ebx +.LBB221_36: + jne .LBB221_37 +# BB#38: + xorl %ebp, %ebp + jmp .LBB221_39 +.LBB221_37: + movl 8(%ebp), %ebp +.LBB221_39: + movl 20(%esp), %edx # 4-byte Reload + addl 44(%esp), %edx # 4-byte Folded Reload + movl 24(%esp), %eax # 4-byte Reload + adcl 36(%esp), %eax # 4-byte Folded Reload + movl %edx, 56(%ecx) + adcl 40(%esp), %ebp # 4-byte Folded Reload + movl %eax, 60(%ecx) + adcl 48(%esp), %ebx # 4-byte Folded Reload + movl %ebp, 64(%ecx) + adcl 52(%esp), %edi # 4-byte Folded Reload + movl %ebx, 68(%ecx) + adcl 60(%esp), %esi # 4-byte Folded Reload + movl %edi, 72(%ecx) + movl (%esp), %eax # 4-byte Reload + adcl 64(%esp), %eax # 4-byte Folded Reload + movl %esi, 76(%ecx) + movl 4(%esp), %edx # 4-byte Reload + adcl 68(%esp), %edx # 4-byte Folded Reload + movl %eax, 80(%ecx) + movl 8(%esp), %eax # 4-byte Reload + adcl 72(%esp), %eax # 4-byte Folded Reload + movl %edx, 84(%ecx) + movl 12(%esp), %edx # 4-byte Reload + adcl 76(%esp), %edx # 4-byte Folded Reload + movl %eax, 88(%ecx) + movl 16(%esp), %eax # 4-byte Reload + adcl 80(%esp), %eax # 4-byte Folded Reload + movl %edx, 92(%ecx) + movl 28(%esp), %edx # 4-byte Reload + adcl 84(%esp), %edx # 4-byte Folded Reload + movl %eax, 96(%ecx) + movl 32(%esp), %eax # 4-byte Reload + adcl 88(%esp), %eax # 4-byte Folded Reload + movl %edx, 100(%ecx) + movl %eax, 104(%ecx) + movl 56(%esp), %eax # 4-byte Reload + adcl 92(%esp), %eax # 4-byte Folded Reload + movl %eax, 108(%ecx) + addl $96, %esp + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end221: + .size mcl_fpDbl_sub14Lbmi2, .Lfunc_end221-mcl_fpDbl_sub14Lbmi2 + + .align 16, 0x90 + .type .LmulPv480x32,@function +.LmulPv480x32: # @mulPv480x32 +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $52, %esp + movl %edx, %eax + movl 72(%esp), %edi + movl %edi, %edx + mulxl 4(%eax), %ebx, %esi + movl %edi, %edx + mulxl (%eax), %ebp, %edx + movl %ebp, 48(%esp) # 4-byte Spill + addl %ebx, %edx + movl %edx, 44(%esp) # 4-byte Spill + movl %edi, %edx + mulxl 8(%eax), %edx, %ebx + adcl %esi, %edx + movl %edx, 40(%esp) # 4-byte Spill + movl %edi, %edx + mulxl 12(%eax), %edx, %esi + adcl %ebx, %edx + movl %edx, 36(%esp) # 4-byte Spill + movl %edi, %edx + mulxl 16(%eax), %edx, %ebx + adcl %esi, %edx + movl %edx, 32(%esp) # 4-byte Spill + movl %edi, %edx + mulxl 20(%eax), %edx, %esi + adcl %ebx, %edx + movl %edx, 28(%esp) # 4-byte Spill + movl %edi, %edx + mulxl 24(%eax), %edx, %ebx + adcl %esi, %edx + movl %edx, 24(%esp) # 4-byte Spill + movl %edi, %edx + mulxl 28(%eax), %edx, %esi + adcl %ebx, %edx + movl %edx, 20(%esp) # 4-byte Spill + movl %edi, %edx + mulxl 32(%eax), %edx, %ebx + adcl %esi, %edx + movl %edx, 16(%esp) # 4-byte Spill + movl %edi, %edx + mulxl 36(%eax), %edx, %esi + adcl %ebx, %edx + movl %edx, 12(%esp) # 4-byte Spill + movl %edi, %edx + mulxl 40(%eax), %edx, %ebp + adcl %esi, %edx + movl %edx, 8(%esp) # 4-byte Spill + movl %edi, %edx + mulxl 44(%eax), %ebx, %edx + movl %edx, 4(%esp) # 4-byte Spill + adcl %ebp, %ebx + movl %edi, %edx + mulxl 48(%eax), %esi, %edx + movl %edx, (%esp) # 4-byte Spill + adcl 4(%esp), %esi # 4-byte Folded Reload + movl %edi, %edx + mulxl 52(%eax), %edx, %ebp + movl %ebp, 4(%esp) # 4-byte Spill + adcl (%esp), %edx # 4-byte Folded Reload + movl 48(%esp), %ebp # 4-byte Reload + movl %ebp, (%ecx) + movl 44(%esp), %ebp # 4-byte Reload + movl %ebp, 4(%ecx) + movl 40(%esp), %ebp # 4-byte Reload + movl %ebp, 8(%ecx) + movl 36(%esp), %ebp # 4-byte Reload + movl %ebp, 12(%ecx) + movl 32(%esp), %ebp # 4-byte Reload + movl %ebp, 16(%ecx) + movl 28(%esp), %ebp # 4-byte Reload + movl %ebp, 20(%ecx) + movl 24(%esp), %ebp # 4-byte Reload + movl %ebp, 24(%ecx) + movl 20(%esp), %ebp # 4-byte Reload + movl %ebp, 28(%ecx) + movl 16(%esp), %ebp # 4-byte Reload + movl %ebp, 32(%ecx) + movl 12(%esp), %ebp # 4-byte Reload + movl %ebp, 36(%ecx) + movl 8(%esp), %ebp # 4-byte Reload + movl %ebp, 40(%ecx) + movl %ebx, 44(%ecx) + movl %esi, 48(%ecx) + movl %edx, 52(%ecx) + movl %edi, %edx + mulxl 56(%eax), %eax, %edx + adcl 4(%esp), %eax # 4-byte Folded Reload + movl %eax, 56(%ecx) + adcl $0, %edx + movl %edx, 60(%ecx) + movl %ecx, %eax + addl $52, %esp + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end222: + .size .LmulPv480x32, .Lfunc_end222-.LmulPv480x32 + + .globl mcl_fp_mulUnitPre15Lbmi2 + .align 16, 0x90 + .type mcl_fp_mulUnitPre15Lbmi2,@function +mcl_fp_mulUnitPre15Lbmi2: # @mcl_fp_mulUnitPre15Lbmi2 +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $124, %esp + calll .L223$pb +.L223$pb: + popl %ebx +.Ltmp44: + addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp44-.L223$pb), %ebx + movl 152(%esp), %eax + movl %eax, (%esp) + leal 56(%esp), %ecx + movl 148(%esp), %edx + calll .LmulPv480x32 + movl 116(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 112(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 108(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 104(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 100(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 96(%esp), %eax + movl %eax, 32(%esp) # 4-byte Spill + movl 92(%esp), %eax + movl %eax, 28(%esp) # 4-byte Spill + movl 88(%esp), %eax + movl %eax, 24(%esp) # 4-byte Spill + movl 84(%esp), %eax + movl %eax, 20(%esp) # 4-byte Spill + movl 80(%esp), %eax + movl %eax, 16(%esp) # 4-byte Spill + movl 76(%esp), %ebp + movl 72(%esp), %ebx + movl 68(%esp), %edi + movl 64(%esp), %esi + movl 56(%esp), %edx + movl 60(%esp), %ecx + movl 144(%esp), %eax + movl %edx, (%eax) + movl %ecx, 4(%eax) + movl %esi, 8(%eax) + movl %edi, 12(%eax) + movl %ebx, 16(%eax) + movl %ebp, 20(%eax) + movl 16(%esp), %ecx # 4-byte Reload + movl %ecx, 24(%eax) + movl 20(%esp), %ecx # 4-byte Reload + movl %ecx, 28(%eax) + movl 24(%esp), %ecx # 4-byte Reload + movl %ecx, 32(%eax) + movl 28(%esp), %ecx # 4-byte Reload + movl %ecx, 36(%eax) + movl 32(%esp), %ecx # 4-byte Reload + movl %ecx, 40(%eax) + movl 36(%esp), %ecx # 4-byte Reload + movl %ecx, 44(%eax) + movl 40(%esp), %ecx # 4-byte Reload + movl %ecx, 48(%eax) + movl 44(%esp), %ecx # 4-byte Reload + movl %ecx, 52(%eax) + movl 48(%esp), %ecx # 4-byte Reload + movl %ecx, 56(%eax) + movl 52(%esp), %ecx # 4-byte Reload + movl %ecx, 60(%eax) + addl $124, %esp + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end223: + .size mcl_fp_mulUnitPre15Lbmi2, .Lfunc_end223-mcl_fp_mulUnitPre15Lbmi2 + + .globl mcl_fpDbl_mulPre15Lbmi2 + .align 16, 0x90 + .type mcl_fpDbl_mulPre15Lbmi2,@function +mcl_fpDbl_mulPre15Lbmi2: # @mcl_fpDbl_mulPre15Lbmi2 +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $1084, %esp # imm = 0x43C + calll .L224$pb +.L224$pb: + popl %esi +.Ltmp45: + addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp45-.L224$pb), %esi + movl %esi, 116(%esp) # 4-byte Spill + movl 1112(%esp), %eax + movl (%eax), %eax + movl %eax, (%esp) + leal 1016(%esp), %ecx + movl 1108(%esp), %edi + movl %edi, %edx + movl %esi, %ebx + calll .LmulPv480x32 + movl 1076(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 1072(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 1068(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 1064(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 1060(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 1056(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 1052(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 1048(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 1044(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 1040(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 1036(%esp), %eax + movl %eax, 28(%esp) # 4-byte Spill + movl 1032(%esp), %eax + movl %eax, 24(%esp) # 4-byte Spill + movl 1028(%esp), %eax + movl %eax, 20(%esp) # 4-byte Spill + movl 1024(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 1016(%esp), %eax + movl 1020(%esp), %ebp + movl 1104(%esp), %ecx + movl %eax, (%ecx) + movl 1112(%esp), %eax + movl 4(%eax), %eax + movl %eax, (%esp) + leal 952(%esp), %ecx + movl %edi, %edx + movl %esi, %ebx + calll .LmulPv480x32 + addl 952(%esp), %ebp + movl %ebp, 16(%esp) # 4-byte Spill + movl 1012(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 1008(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 1004(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 1000(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 996(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 992(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 988(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 984(%esp), %eax + movl %eax, 32(%esp) # 4-byte Spill + movl 980(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 976(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 972(%esp), %edi + movl 968(%esp), %esi + movl 964(%esp), %edx + movl 956(%esp), %eax + movl 960(%esp), %ecx + movl 1104(%esp), %ebp + movl 16(%esp), %ebx # 4-byte Reload + movl %ebx, 4(%ebp) + adcl 72(%esp), %eax # 4-byte Folded Reload + movl %eax, 72(%esp) # 4-byte Spill + adcl 20(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 16(%esp) # 4-byte Spill + adcl 24(%esp), %edx # 4-byte Folded Reload + movl %edx, 20(%esp) # 4-byte Spill + adcl 28(%esp), %esi # 4-byte Folded Reload + movl %esi, 24(%esp) # 4-byte Spill + adcl 36(%esp), %edi # 4-byte Folded Reload + movl %edi, 28(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 44(%esp), %eax # 4-byte Folded Reload + movl %eax, 68(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 52(%esp), %eax # 4-byte Folded Reload + movl %eax, 60(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl %eax, 32(%esp) # 4-byte Folded Spill + movl 76(%esp), %eax # 4-byte Reload + adcl %eax, 40(%esp) # 4-byte Folded Spill + movl 80(%esp), %eax # 4-byte Reload + adcl %eax, 48(%esp) # 4-byte Folded Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 88(%esp), %eax # 4-byte Folded Reload + movl %eax, 84(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 96(%esp), %eax # 4-byte Folded Reload + movl %eax, 92(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 104(%esp), %eax # 4-byte Folded Reload + movl %eax, 100(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl 112(%esp), %eax # 4-byte Folded Reload + movl %eax, 108(%esp) # 4-byte Spill + adcl $0, 56(%esp) # 4-byte Folded Spill + movl 1112(%esp), %eax + movl 8(%eax), %eax + movl %eax, (%esp) + leal 888(%esp), %ecx + movl 1108(%esp), %edx + movl 116(%esp), %ebx # 4-byte Reload + calll .LmulPv480x32 + movl 72(%esp), %eax # 4-byte Reload + addl 888(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 948(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 944(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 940(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 936(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 932(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 928(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 924(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 920(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 916(%esp), %ebx + movl 912(%esp), %edi + movl 908(%esp), %esi + movl 904(%esp), %edx + movl 900(%esp), %ecx + movl 892(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 896(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 1104(%esp), %eax + movl 72(%esp), %ebp # 4-byte Reload + movl %ebp, 8(%eax) + movl 16(%esp), %eax # 4-byte Reload + adcl %eax, 112(%esp) # 4-byte Folded Spill + movl 20(%esp), %eax # 4-byte Reload + adcl %eax, 36(%esp) # 4-byte Folded Spill + adcl 24(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 16(%esp) # 4-byte Spill + adcl 28(%esp), %edx # 4-byte Folded Reload + movl %edx, 20(%esp) # 4-byte Spill + adcl 68(%esp), %esi # 4-byte Folded Reload + movl %esi, 24(%esp) # 4-byte Spill + adcl 60(%esp), %edi # 4-byte Folded Reload + movl %edi, 28(%esp) # 4-byte Spill + adcl 32(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 12(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 40(%esp), %eax # 4-byte Folded Reload + movl %eax, 64(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl %eax, 52(%esp) # 4-byte Folded Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 84(%esp), %eax # 4-byte Folded Reload + movl %eax, 76(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 92(%esp), %eax # 4-byte Folded Reload + movl %eax, 80(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 100(%esp), %eax # 4-byte Folded Reload + movl %eax, 88(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 108(%esp), %eax # 4-byte Folded Reload + movl %eax, 96(%esp) # 4-byte Spill + movl 104(%esp), %eax # 4-byte Reload + adcl 56(%esp), %eax # 4-byte Folded Reload + movl %eax, 104(%esp) # 4-byte Spill + adcl $0, 44(%esp) # 4-byte Folded Spill + movl 1112(%esp), %eax + movl 12(%eax), %eax + movl %eax, (%esp) + leal 824(%esp), %ecx + movl 1108(%esp), %edx + movl 116(%esp), %ebx # 4-byte Reload + calll .LmulPv480x32 + movl 112(%esp), %eax # 4-byte Reload + addl 824(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 884(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 880(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 876(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 872(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 868(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 864(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 860(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 856(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 852(%esp), %ebx + movl 848(%esp), %edi + movl 844(%esp), %esi + movl 840(%esp), %edx + movl 836(%esp), %ecx + movl 828(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 832(%esp), %eax + movl %eax, 32(%esp) # 4-byte Spill + movl 1104(%esp), %eax + movl 112(%esp), %ebp # 4-byte Reload + movl %ebp, 12(%eax) + movl 60(%esp), %ebp # 4-byte Reload + adcl 36(%esp), %ebp # 4-byte Folded Reload + movl 16(%esp), %eax # 4-byte Reload + adcl %eax, 32(%esp) # 4-byte Folded Spill + adcl 20(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 16(%esp) # 4-byte Spill + adcl 24(%esp), %edx # 4-byte Folded Reload + movl %edx, 20(%esp) # 4-byte Spill + adcl 28(%esp), %esi # 4-byte Folded Reload + movl %esi, 24(%esp) # 4-byte Spill + adcl 12(%esp), %edi # 4-byte Folded Reload + movl %edi, 28(%esp) # 4-byte Spill + adcl 64(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 12(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 52(%esp), %eax # 4-byte Folded Reload + movl %eax, 68(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl %eax, 48(%esp) # 4-byte Folded Spill + movl 80(%esp), %eax # 4-byte Reload + adcl %eax, 56(%esp) # 4-byte Folded Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 88(%esp), %eax # 4-byte Folded Reload + movl %eax, 72(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 96(%esp), %eax # 4-byte Folded Reload + movl %eax, 84(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 104(%esp), %eax # 4-byte Folded Reload + movl %eax, 92(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 44(%esp), %eax # 4-byte Folded Reload + movl %eax, 100(%esp) # 4-byte Spill + adcl $0, 40(%esp) # 4-byte Folded Spill + movl 1112(%esp), %eax + movl 16(%eax), %eax + movl %eax, (%esp) + leal 760(%esp), %ecx + movl 1108(%esp), %edx + movl 116(%esp), %ebx # 4-byte Reload + calll .LmulPv480x32 + addl 760(%esp), %ebp + movl %ebp, 60(%esp) # 4-byte Spill + movl 820(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 816(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 812(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 808(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 804(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 800(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 796(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 792(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 788(%esp), %ebx + movl 784(%esp), %edi + movl 780(%esp), %esi + movl 776(%esp), %edx + movl 772(%esp), %ecx + movl 764(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 768(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 1104(%esp), %eax + movl 60(%esp), %ebp # 4-byte Reload + movl %ebp, 16(%eax) + movl 112(%esp), %eax # 4-byte Reload + adcl 32(%esp), %eax # 4-byte Folded Reload + movl %eax, 112(%esp) # 4-byte Spill + movl 16(%esp), %eax # 4-byte Reload + adcl %eax, 36(%esp) # 4-byte Folded Spill + adcl 20(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 20(%esp) # 4-byte Spill + adcl 24(%esp), %edx # 4-byte Folded Reload + movl %edx, 24(%esp) # 4-byte Spill + adcl 28(%esp), %esi # 4-byte Folded Reload + movl %esi, 28(%esp) # 4-byte Spill + adcl 12(%esp), %edi # 4-byte Folded Reload + movl %edi, 60(%esp) # 4-byte Spill + adcl 68(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 12(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 48(%esp), %eax # 4-byte Folded Reload + movl %eax, 64(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl 56(%esp), %eax # 4-byte Folded Reload + movl %eax, 108(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl %eax, 52(%esp) # 4-byte Folded Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 84(%esp), %eax # 4-byte Folded Reload + movl %eax, 76(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 92(%esp), %eax # 4-byte Folded Reload + movl %eax, 80(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 100(%esp), %eax # 4-byte Folded Reload + movl %eax, 88(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 40(%esp), %eax # 4-byte Folded Reload + movl %eax, 96(%esp) # 4-byte Spill + adcl $0, 44(%esp) # 4-byte Folded Spill + movl 1112(%esp), %eax + movl 20(%eax), %eax + movl %eax, (%esp) + leal 696(%esp), %ecx + movl 1108(%esp), %edx + movl 116(%esp), %ebx # 4-byte Reload + calll .LmulPv480x32 + movl 112(%esp), %eax # 4-byte Reload + addl 696(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 756(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 752(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 748(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 744(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 740(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 736(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 732(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 728(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 724(%esp), %ebx + movl 720(%esp), %edi + movl 716(%esp), %esi + movl 712(%esp), %edx + movl 708(%esp), %ecx + movl 700(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 704(%esp), %eax + movl %eax, 32(%esp) # 4-byte Spill + movl 1104(%esp), %eax + movl 112(%esp), %ebp # 4-byte Reload + movl %ebp, 20(%eax) + movl 56(%esp), %ebp # 4-byte Reload + adcl 36(%esp), %ebp # 4-byte Folded Reload + movl 20(%esp), %eax # 4-byte Reload + adcl %eax, 32(%esp) # 4-byte Folded Spill + adcl 24(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 16(%esp) # 4-byte Spill + adcl 28(%esp), %edx # 4-byte Folded Reload + movl %edx, 20(%esp) # 4-byte Spill + adcl 60(%esp), %esi # 4-byte Folded Reload + movl %esi, 24(%esp) # 4-byte Spill + adcl 12(%esp), %edi # 4-byte Folded Reload + movl %edi, 28(%esp) # 4-byte Spill + adcl 64(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 64(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 108(%esp), %eax # 4-byte Folded Reload + movl %eax, 68(%esp) # 4-byte Spill + movl 104(%esp), %eax # 4-byte Reload + adcl 52(%esp), %eax # 4-byte Folded Reload + movl %eax, 104(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl %eax, 48(%esp) # 4-byte Folded Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 80(%esp), %eax # 4-byte Folded Reload + movl %eax, 72(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 88(%esp), %eax # 4-byte Folded Reload + movl %eax, 84(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 96(%esp), %eax # 4-byte Folded Reload + movl %eax, 92(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 44(%esp), %eax # 4-byte Folded Reload + movl %eax, 100(%esp) # 4-byte Spill + adcl $0, 40(%esp) # 4-byte Folded Spill + movl 1112(%esp), %eax + movl 24(%eax), %eax + movl %eax, (%esp) + leal 632(%esp), %ecx + movl 1108(%esp), %edx + movl 116(%esp), %ebx # 4-byte Reload + calll .LmulPv480x32 + addl 632(%esp), %ebp + movl %ebp, 56(%esp) # 4-byte Spill + movl 692(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 688(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 684(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 680(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 676(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 672(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 668(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 664(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 660(%esp), %ebx + movl 656(%esp), %edi + movl 652(%esp), %esi + movl 648(%esp), %edx + movl 644(%esp), %ecx + movl 636(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 640(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 1104(%esp), %eax + movl 56(%esp), %ebp # 4-byte Reload + movl %ebp, 24(%eax) + movl 112(%esp), %eax # 4-byte Reload + adcl 32(%esp), %eax # 4-byte Folded Reload + movl %eax, 112(%esp) # 4-byte Spill + movl 16(%esp), %eax # 4-byte Reload + adcl %eax, 36(%esp) # 4-byte Folded Spill + adcl 20(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 16(%esp) # 4-byte Spill + adcl 24(%esp), %edx # 4-byte Folded Reload + movl %edx, 20(%esp) # 4-byte Spill + adcl 28(%esp), %esi # 4-byte Folded Reload + movl %esi, 24(%esp) # 4-byte Spill + adcl 64(%esp), %edi # 4-byte Folded Reload + movl %edi, 28(%esp) # 4-byte Spill + adcl 68(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 68(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 104(%esp), %eax # 4-byte Folded Reload + movl %eax, 60(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl 48(%esp), %eax # 4-byte Folded Reload + movl %eax, 108(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl %eax, 52(%esp) # 4-byte Folded Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 84(%esp), %eax # 4-byte Folded Reload + movl %eax, 76(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 92(%esp), %eax # 4-byte Folded Reload + movl %eax, 80(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 100(%esp), %eax # 4-byte Folded Reload + movl %eax, 88(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 40(%esp), %eax # 4-byte Folded Reload + movl %eax, 96(%esp) # 4-byte Spill + adcl $0, 44(%esp) # 4-byte Folded Spill + movl 1112(%esp), %eax + movl 28(%eax), %eax + movl %eax, (%esp) + leal 568(%esp), %ecx + movl 1108(%esp), %edx + movl 116(%esp), %ebx # 4-byte Reload + calll .LmulPv480x32 + movl 112(%esp), %eax # 4-byte Reload + addl 568(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 628(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 624(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 620(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 616(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 612(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 608(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 604(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 600(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 596(%esp), %ebx + movl 592(%esp), %edi + movl 588(%esp), %esi + movl 584(%esp), %edx + movl 580(%esp), %ecx + movl 572(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 576(%esp), %eax + movl %eax, 32(%esp) # 4-byte Spill + movl 1104(%esp), %eax + movl 112(%esp), %ebp # 4-byte Reload + movl %ebp, 28(%eax) + movl 56(%esp), %ebp # 4-byte Reload + adcl 36(%esp), %ebp # 4-byte Folded Reload + movl 16(%esp), %eax # 4-byte Reload + adcl %eax, 32(%esp) # 4-byte Folded Spill + adcl 20(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 16(%esp) # 4-byte Spill + adcl 24(%esp), %edx # 4-byte Folded Reload + movl %edx, 20(%esp) # 4-byte Spill + adcl 28(%esp), %esi # 4-byte Folded Reload + movl %esi, 24(%esp) # 4-byte Spill + adcl 68(%esp), %edi # 4-byte Folded Reload + movl %edi, 28(%esp) # 4-byte Spill + adcl 60(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 12(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 108(%esp), %eax # 4-byte Folded Reload + movl %eax, 64(%esp) # 4-byte Spill + movl 104(%esp), %eax # 4-byte Reload + adcl 52(%esp), %eax # 4-byte Folded Reload + movl %eax, 104(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl %eax, 48(%esp) # 4-byte Folded Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 80(%esp), %eax # 4-byte Folded Reload + movl %eax, 72(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 88(%esp), %eax # 4-byte Folded Reload + movl %eax, 84(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 96(%esp), %eax # 4-byte Folded Reload + movl %eax, 92(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 44(%esp), %eax # 4-byte Folded Reload + movl %eax, 100(%esp) # 4-byte Spill + adcl $0, 40(%esp) # 4-byte Folded Spill + movl 1112(%esp), %eax + movl 32(%eax), %eax + movl %eax, (%esp) + leal 504(%esp), %ecx + movl 1108(%esp), %edx + movl 116(%esp), %ebx # 4-byte Reload + calll .LmulPv480x32 + addl 504(%esp), %ebp + movl %ebp, 56(%esp) # 4-byte Spill + movl 564(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 560(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 556(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 552(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 548(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 544(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 540(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 536(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 532(%esp), %ebx + movl 528(%esp), %edi + movl 524(%esp), %esi + movl 520(%esp), %edx + movl 516(%esp), %ecx + movl 508(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 512(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 1104(%esp), %eax + movl 56(%esp), %ebp # 4-byte Reload + movl %ebp, 32(%eax) + movl 112(%esp), %eax # 4-byte Reload + adcl 32(%esp), %eax # 4-byte Folded Reload + movl %eax, 112(%esp) # 4-byte Spill + movl 16(%esp), %eax # 4-byte Reload + adcl %eax, 36(%esp) # 4-byte Folded Spill + adcl 20(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 16(%esp) # 4-byte Spill + adcl 24(%esp), %edx # 4-byte Folded Reload + movl %edx, 20(%esp) # 4-byte Spill + adcl 28(%esp), %esi # 4-byte Folded Reload + movl %esi, 24(%esp) # 4-byte Spill + adcl 12(%esp), %edi # 4-byte Folded Reload + movl %edi, 28(%esp) # 4-byte Spill + adcl 64(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 12(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 104(%esp), %eax # 4-byte Folded Reload + movl %eax, 60(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl 48(%esp), %eax # 4-byte Folded Reload + movl %eax, 108(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl %eax, 52(%esp) # 4-byte Folded Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 84(%esp), %eax # 4-byte Folded Reload + movl %eax, 68(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 92(%esp), %eax # 4-byte Folded Reload + movl %eax, 80(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 100(%esp), %eax # 4-byte Folded Reload + movl %eax, 88(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 40(%esp), %eax # 4-byte Folded Reload + movl %eax, 96(%esp) # 4-byte Spill + adcl $0, 44(%esp) # 4-byte Folded Spill + movl 1112(%esp), %eax + movl 36(%eax), %eax + movl %eax, (%esp) + leal 440(%esp), %ecx + movl 1108(%esp), %edx + movl 116(%esp), %ebx # 4-byte Reload + calll .LmulPv480x32 + movl 112(%esp), %eax # 4-byte Reload + addl 440(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 500(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 496(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 492(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 488(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 484(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 480(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 476(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 472(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 468(%esp), %ebx + movl 464(%esp), %edi + movl 460(%esp), %esi + movl 456(%esp), %edx + movl 452(%esp), %ecx + movl 444(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 448(%esp), %eax + movl %eax, 32(%esp) # 4-byte Spill + movl 1104(%esp), %eax + movl 112(%esp), %ebp # 4-byte Reload + movl %ebp, 36(%eax) + movl 56(%esp), %ebp # 4-byte Reload + adcl 36(%esp), %ebp # 4-byte Folded Reload + movl 16(%esp), %eax # 4-byte Reload + adcl %eax, 32(%esp) # 4-byte Folded Spill + adcl 20(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 16(%esp) # 4-byte Spill + adcl 24(%esp), %edx # 4-byte Folded Reload + movl %edx, 20(%esp) # 4-byte Spill + adcl 28(%esp), %esi # 4-byte Folded Reload + movl %esi, 24(%esp) # 4-byte Spill + adcl 12(%esp), %edi # 4-byte Folded Reload + movl %edi, 28(%esp) # 4-byte Spill + adcl 60(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 12(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 108(%esp), %eax # 4-byte Folded Reload + movl %eax, 64(%esp) # 4-byte Spill + movl 104(%esp), %eax # 4-byte Reload + adcl 52(%esp), %eax # 4-byte Folded Reload + movl %eax, 104(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl %eax, 48(%esp) # 4-byte Folded Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 80(%esp), %eax # 4-byte Folded Reload + movl %eax, 72(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 88(%esp), %eax # 4-byte Folded Reload + movl %eax, 76(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 96(%esp), %eax # 4-byte Folded Reload + movl %eax, 84(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 44(%esp), %eax # 4-byte Folded Reload + movl %eax, 92(%esp) # 4-byte Spill + adcl $0, 40(%esp) # 4-byte Folded Spill + movl 1112(%esp), %eax + movl 40(%eax), %eax + movl %eax, (%esp) + leal 376(%esp), %ecx + movl 1108(%esp), %edx + movl 116(%esp), %ebx # 4-byte Reload + calll .LmulPv480x32 + addl 376(%esp), %ebp + movl %ebp, 56(%esp) # 4-byte Spill + movl 436(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 432(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 428(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 424(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 420(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 416(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 412(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 408(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 404(%esp), %ebx + movl 400(%esp), %edi + movl 396(%esp), %esi + movl 392(%esp), %edx + movl 388(%esp), %ecx + movl 380(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 384(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 1104(%esp), %eax + movl 56(%esp), %ebp # 4-byte Reload + movl %ebp, 40(%eax) + movl 112(%esp), %eax # 4-byte Reload + adcl 32(%esp), %eax # 4-byte Folded Reload + movl %eax, 112(%esp) # 4-byte Spill + movl 16(%esp), %eax # 4-byte Reload + adcl %eax, 36(%esp) # 4-byte Folded Spill + adcl 20(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 20(%esp) # 4-byte Spill + adcl 24(%esp), %edx # 4-byte Folded Reload + movl %edx, 24(%esp) # 4-byte Spill + adcl 28(%esp), %esi # 4-byte Folded Reload + movl %esi, 28(%esp) # 4-byte Spill + adcl 12(%esp), %edi # 4-byte Folded Reload + movl %edi, 32(%esp) # 4-byte Spill + adcl 64(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 16(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 104(%esp), %eax # 4-byte Folded Reload + movl %eax, 60(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 48(%esp), %eax # 4-byte Folded Reload + movl %eax, 100(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl %eax, 52(%esp) # 4-byte Folded Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 76(%esp), %eax # 4-byte Folded Reload + movl %eax, 68(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 84(%esp), %eax # 4-byte Folded Reload + movl %eax, 80(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 92(%esp), %eax # 4-byte Folded Reload + movl %eax, 88(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 40(%esp), %eax # 4-byte Folded Reload + movl %eax, 96(%esp) # 4-byte Spill + adcl $0, 44(%esp) # 4-byte Folded Spill + movl 1112(%esp), %eax + movl 44(%eax), %eax + movl %eax, (%esp) + leal 312(%esp), %ecx + movl 1108(%esp), %edx + movl 116(%esp), %ebx # 4-byte Reload + calll .LmulPv480x32 + movl 112(%esp), %eax # 4-byte Reload + addl 312(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 372(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 368(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 364(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 360(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 356(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 352(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 348(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 344(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 340(%esp), %ebx + movl 336(%esp), %edi + movl 332(%esp), %esi + movl 328(%esp), %edx + movl 324(%esp), %ecx + movl 316(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 320(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 1104(%esp), %eax + movl 112(%esp), %ebp # 4-byte Reload + movl %ebp, 44(%eax) + movl 36(%esp), %eax # 4-byte Reload + adcl %eax, 108(%esp) # 4-byte Folded Spill + movl 20(%esp), %eax # 4-byte Reload + adcl %eax, 40(%esp) # 4-byte Folded Spill + adcl 24(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 20(%esp) # 4-byte Spill + adcl 28(%esp), %edx # 4-byte Folded Reload + movl %edx, 24(%esp) # 4-byte Spill + adcl 32(%esp), %esi # 4-byte Folded Reload + movl %esi, 28(%esp) # 4-byte Spill + adcl 16(%esp), %edi # 4-byte Folded Reload + movl %edi, 32(%esp) # 4-byte Spill + adcl 60(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 16(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 100(%esp), %eax # 4-byte Folded Reload + movl %eax, 64(%esp) # 4-byte Spill + movl 104(%esp), %eax # 4-byte Reload + adcl 52(%esp), %eax # 4-byte Folded Reload + movl %eax, 104(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl %eax, 56(%esp) # 4-byte Folded Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 80(%esp), %eax # 4-byte Folded Reload + movl %eax, 72(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 88(%esp), %eax # 4-byte Folded Reload + movl %eax, 76(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 96(%esp), %eax # 4-byte Folded Reload + movl %eax, 84(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 44(%esp), %eax # 4-byte Folded Reload + movl %eax, 92(%esp) # 4-byte Spill + adcl $0, 48(%esp) # 4-byte Folded Spill + movl 1112(%esp), %eax + movl 48(%eax), %eax + movl %eax, (%esp) + leal 248(%esp), %ecx + movl 1108(%esp), %eax + movl %eax, %edx + movl 116(%esp), %ebx # 4-byte Reload + calll .LmulPv480x32 + movl 108(%esp), %eax # 4-byte Reload + addl 248(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 308(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 304(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 300(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 296(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 292(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 288(%esp), %esi + movl %esi, 52(%esp) # 4-byte Spill + movl 284(%esp), %esi + movl %esi, 100(%esp) # 4-byte Spill + movl 280(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 276(%esp), %ebx + movl 272(%esp), %edi + movl 268(%esp), %edx + movl 264(%esp), %ecx + movl 260(%esp), %eax + movl 252(%esp), %esi + movl %esi, 112(%esp) # 4-byte Spill + movl 256(%esp), %esi + movl %esi, 36(%esp) # 4-byte Spill + movl 108(%esp), %esi # 4-byte Reload + movl 1104(%esp), %ebp + movl %esi, 48(%ebp) + movl 112(%esp), %esi # 4-byte Reload + adcl 40(%esp), %esi # 4-byte Folded Reload + movl %esi, 112(%esp) # 4-byte Spill + movl 20(%esp), %esi # 4-byte Reload + adcl %esi, 36(%esp) # 4-byte Folded Spill + adcl 24(%esp), %eax # 4-byte Folded Reload + movl %eax, 12(%esp) # 4-byte Spill + adcl 28(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 20(%esp) # 4-byte Spill + adcl 32(%esp), %edx # 4-byte Folded Reload + movl %edx, 24(%esp) # 4-byte Spill + adcl 16(%esp), %edi # 4-byte Folded Reload + movl %edi, 28(%esp) # 4-byte Spill + adcl 64(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 32(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 104(%esp), %eax # 4-byte Folded Reload + movl %eax, 60(%esp) # 4-byte Spill + movl 100(%esp), %esi # 4-byte Reload + adcl 56(%esp), %esi # 4-byte Folded Reload + movl %esi, 100(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl %eax, 52(%esp) # 4-byte Folded Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 76(%esp), %eax # 4-byte Folded Reload + movl %eax, 68(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 84(%esp), %eax # 4-byte Folded Reload + movl %eax, 80(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 92(%esp), %eax # 4-byte Folded Reload + movl %eax, 88(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 48(%esp), %eax # 4-byte Folded Reload + movl %eax, 96(%esp) # 4-byte Spill + adcl $0, 44(%esp) # 4-byte Folded Spill + movl 1112(%esp), %eax + movl 52(%eax), %eax + movl %eax, (%esp) + leal 184(%esp), %ecx + movl 1108(%esp), %edx + movl 116(%esp), %ebx # 4-byte Reload + calll .LmulPv480x32 + movl 112(%esp), %eax # 4-byte Reload + addl 184(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 244(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 240(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 236(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 232(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 228(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 224(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 220(%esp), %ebp + movl %ebp, 64(%esp) # 4-byte Spill + movl 216(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 212(%esp), %ebx + movl 208(%esp), %edx + movl 204(%esp), %ecx + movl 200(%esp), %esi + movl %esi, 40(%esp) # 4-byte Spill + movl 196(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 188(%esp), %eax + movl 192(%esp), %esi + movl 112(%esp), %ebp # 4-byte Reload + movl 1104(%esp), %edi + movl %ebp, 52(%edi) + adcl 36(%esp), %eax # 4-byte Folded Reload + movl %eax, 112(%esp) # 4-byte Spill + movl %esi, %ebp + adcl 12(%esp), %ebp # 4-byte Folded Reload + movl 72(%esp), %esi # 4-byte Reload + adcl 20(%esp), %esi # 4-byte Folded Reload + movl 24(%esp), %edi # 4-byte Reload + adcl %edi, 40(%esp) # 4-byte Folded Spill + adcl 28(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 28(%esp) # 4-byte Spill + adcl 32(%esp), %edx # 4-byte Folded Reload + movl %edx, 36(%esp) # 4-byte Spill + adcl 60(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 60(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 100(%esp), %eax # 4-byte Folded Reload + movl %eax, 76(%esp) # 4-byte Spill + movl 52(%esp), %ebx # 4-byte Reload + adcl %ebx, 64(%esp) # 4-byte Folded Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 68(%esp), %eax # 4-byte Folded Reload + movl %eax, 84(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 80(%esp), %eax # 4-byte Folded Reload + movl %eax, 92(%esp) # 4-byte Spill + movl 104(%esp), %eax # 4-byte Reload + adcl 88(%esp), %eax # 4-byte Folded Reload + movl %eax, 104(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl 96(%esp), %eax # 4-byte Folded Reload + movl %eax, 108(%esp) # 4-byte Spill + movl 44(%esp), %eax # 4-byte Reload + adcl %eax, 48(%esp) # 4-byte Folded Spill + adcl $0, 56(%esp) # 4-byte Folded Spill + movl 1112(%esp), %eax + movl 56(%eax), %eax + movl %eax, (%esp) + leal 120(%esp), %ecx + movl 1108(%esp), %edx + movl 116(%esp), %ebx # 4-byte Reload + calll .LmulPv480x32 + movl 112(%esp), %eax # 4-byte Reload + addl 120(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + adcl 124(%esp), %ebp + movl %ebp, 32(%esp) # 4-byte Spill + adcl 128(%esp), %esi + movl %esi, 72(%esp) # 4-byte Spill + movl 180(%esp), %eax + movl %eax, 116(%esp) # 4-byte Spill + movl 176(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 172(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 168(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 164(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 160(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 156(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 152(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 148(%esp), %ebp + movl 144(%esp), %edi + movl 140(%esp), %esi + movl 136(%esp), %edx + movl 132(%esp), %ecx + movl 1104(%esp), %eax + movl 112(%esp), %ebx # 4-byte Reload + movl %ebx, 56(%eax) + movl 32(%esp), %ebx # 4-byte Reload + movl %ebx, 60(%eax) + adcl 40(%esp), %ecx # 4-byte Folded Reload + movl 72(%esp), %ebx # 4-byte Reload + movl %ebx, 64(%eax) + adcl 28(%esp), %edx # 4-byte Folded Reload + movl %ecx, 68(%eax) + adcl 36(%esp), %esi # 4-byte Folded Reload + movl %edx, 72(%eax) + adcl 60(%esp), %edi # 4-byte Folded Reload + movl %esi, 76(%eax) + adcl 76(%esp), %ebp # 4-byte Folded Reload + movl %edi, 80(%eax) + movl 44(%esp), %edx # 4-byte Reload + adcl 64(%esp), %edx # 4-byte Folded Reload + movl %ebp, 84(%eax) + movl 52(%esp), %ecx # 4-byte Reload + adcl 84(%esp), %ecx # 4-byte Folded Reload + movl %edx, 88(%eax) + movl 68(%esp), %edx # 4-byte Reload + adcl 92(%esp), %edx # 4-byte Folded Reload + movl %ecx, 92(%eax) + movl 80(%esp), %ecx # 4-byte Reload + adcl 104(%esp), %ecx # 4-byte Folded Reload + movl %edx, 96(%eax) + movl 88(%esp), %edx # 4-byte Reload + adcl 108(%esp), %edx # 4-byte Folded Reload + movl %ecx, 100(%eax) + movl 96(%esp), %ecx # 4-byte Reload + adcl 48(%esp), %ecx # 4-byte Folded Reload + movl %edx, 104(%eax) + movl %ecx, 108(%eax) + movl 100(%esp), %ecx # 4-byte Reload + adcl 56(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 112(%eax) + movl 116(%esp), %ecx # 4-byte Reload + adcl $0, %ecx + movl %ecx, 116(%eax) + addl $1084, %esp # imm = 0x43C + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end224: + .size mcl_fpDbl_mulPre15Lbmi2, .Lfunc_end224-mcl_fpDbl_mulPre15Lbmi2 + + .globl mcl_fpDbl_sqrPre15Lbmi2 + .align 16, 0x90 + .type mcl_fpDbl_sqrPre15Lbmi2,@function +mcl_fpDbl_sqrPre15Lbmi2: # @mcl_fpDbl_sqrPre15Lbmi2 +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $1084, %esp # imm = 0x43C + calll .L225$pb +.L225$pb: + popl %ebx +.Ltmp46: + addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp46-.L225$pb), %ebx + movl %ebx, 116(%esp) # 4-byte Spill + movl 1108(%esp), %edx + movl (%edx), %eax + movl %eax, (%esp) + leal 1016(%esp), %ecx + movl %edx, %edi + movl %ebx, %esi + calll .LmulPv480x32 + movl 1076(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 1072(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 1068(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 1064(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 1060(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 1056(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 1052(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 1048(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 1044(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 1040(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 1036(%esp), %eax + movl %eax, 28(%esp) # 4-byte Spill + movl 1032(%esp), %eax + movl %eax, 24(%esp) # 4-byte Spill + movl 1028(%esp), %eax + movl %eax, 20(%esp) # 4-byte Spill + movl 1024(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 1016(%esp), %eax + movl 1020(%esp), %ebp + movl 1104(%esp), %ecx + movl %eax, (%ecx) + movl %edi, %edx + movl 4(%edx), %eax + movl %eax, (%esp) + leal 952(%esp), %ecx + movl %esi, %ebx + calll .LmulPv480x32 + addl 952(%esp), %ebp + movl %ebp, 16(%esp) # 4-byte Spill + movl 1012(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 1008(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 1004(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 1000(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 996(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 992(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 988(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 984(%esp), %eax + movl %eax, 32(%esp) # 4-byte Spill + movl 980(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 976(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 972(%esp), %edi + movl 968(%esp), %esi + movl 964(%esp), %edx + movl 956(%esp), %eax + movl 960(%esp), %ecx + movl 1104(%esp), %ebp + movl 16(%esp), %ebx # 4-byte Reload + movl %ebx, 4(%ebp) + adcl 72(%esp), %eax # 4-byte Folded Reload + movl %eax, 72(%esp) # 4-byte Spill + adcl 20(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 16(%esp) # 4-byte Spill + adcl 24(%esp), %edx # 4-byte Folded Reload + movl %edx, 20(%esp) # 4-byte Spill + adcl 28(%esp), %esi # 4-byte Folded Reload + movl %esi, 24(%esp) # 4-byte Spill + adcl 36(%esp), %edi # 4-byte Folded Reload + movl %edi, 28(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 44(%esp), %eax # 4-byte Folded Reload + movl %eax, 68(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 52(%esp), %eax # 4-byte Folded Reload + movl %eax, 60(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl %eax, 32(%esp) # 4-byte Folded Spill + movl 76(%esp), %eax # 4-byte Reload + adcl %eax, 40(%esp) # 4-byte Folded Spill + movl 80(%esp), %eax # 4-byte Reload + adcl %eax, 48(%esp) # 4-byte Folded Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 88(%esp), %eax # 4-byte Folded Reload + movl %eax, 84(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 96(%esp), %eax # 4-byte Folded Reload + movl %eax, 92(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 104(%esp), %eax # 4-byte Folded Reload + movl %eax, 100(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl 112(%esp), %eax # 4-byte Folded Reload + movl %eax, 108(%esp) # 4-byte Spill + adcl $0, 56(%esp) # 4-byte Folded Spill + movl 1108(%esp), %edx + movl 8(%edx), %eax + movl %eax, (%esp) + leal 888(%esp), %ecx + movl 116(%esp), %ebx # 4-byte Reload + calll .LmulPv480x32 + movl 72(%esp), %eax # 4-byte Reload + addl 888(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 948(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 944(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 940(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 936(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 932(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 928(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 924(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 920(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 916(%esp), %ebx + movl 912(%esp), %edi + movl 908(%esp), %esi + movl 904(%esp), %edx + movl 900(%esp), %ecx + movl 892(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 896(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 1104(%esp), %eax + movl 72(%esp), %ebp # 4-byte Reload + movl %ebp, 8(%eax) + movl 16(%esp), %eax # 4-byte Reload + adcl %eax, 112(%esp) # 4-byte Folded Spill + movl 20(%esp), %eax # 4-byte Reload + adcl %eax, 36(%esp) # 4-byte Folded Spill + adcl 24(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 16(%esp) # 4-byte Spill + adcl 28(%esp), %edx # 4-byte Folded Reload + movl %edx, 20(%esp) # 4-byte Spill + adcl 68(%esp), %esi # 4-byte Folded Reload + movl %esi, 24(%esp) # 4-byte Spill + adcl 60(%esp), %edi # 4-byte Folded Reload + movl %edi, 28(%esp) # 4-byte Spill + adcl 32(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 12(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 40(%esp), %eax # 4-byte Folded Reload + movl %eax, 64(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl %eax, 52(%esp) # 4-byte Folded Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 84(%esp), %eax # 4-byte Folded Reload + movl %eax, 76(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 92(%esp), %eax # 4-byte Folded Reload + movl %eax, 80(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 100(%esp), %eax # 4-byte Folded Reload + movl %eax, 88(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 108(%esp), %eax # 4-byte Folded Reload + movl %eax, 96(%esp) # 4-byte Spill + movl 104(%esp), %eax # 4-byte Reload + adcl 56(%esp), %eax # 4-byte Folded Reload + movl %eax, 104(%esp) # 4-byte Spill + adcl $0, 44(%esp) # 4-byte Folded Spill + movl 1108(%esp), %edx + movl 12(%edx), %eax + movl %eax, (%esp) + leal 824(%esp), %ecx + movl 116(%esp), %ebx # 4-byte Reload + calll .LmulPv480x32 + movl 112(%esp), %eax # 4-byte Reload + addl 824(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 884(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 880(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 876(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 872(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 868(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 864(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 860(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 856(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 852(%esp), %ebx + movl 848(%esp), %edi + movl 844(%esp), %esi + movl 840(%esp), %edx + movl 836(%esp), %ecx + movl 828(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 832(%esp), %eax + movl %eax, 32(%esp) # 4-byte Spill + movl 1104(%esp), %eax + movl 112(%esp), %ebp # 4-byte Reload + movl %ebp, 12(%eax) + movl 60(%esp), %ebp # 4-byte Reload + adcl 36(%esp), %ebp # 4-byte Folded Reload + movl 16(%esp), %eax # 4-byte Reload + adcl %eax, 32(%esp) # 4-byte Folded Spill + adcl 20(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 16(%esp) # 4-byte Spill + adcl 24(%esp), %edx # 4-byte Folded Reload + movl %edx, 20(%esp) # 4-byte Spill + adcl 28(%esp), %esi # 4-byte Folded Reload + movl %esi, 24(%esp) # 4-byte Spill + adcl 12(%esp), %edi # 4-byte Folded Reload + movl %edi, 28(%esp) # 4-byte Spill + adcl 64(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 12(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 52(%esp), %eax # 4-byte Folded Reload + movl %eax, 68(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl %eax, 48(%esp) # 4-byte Folded Spill + movl 80(%esp), %eax # 4-byte Reload + adcl %eax, 56(%esp) # 4-byte Folded Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 88(%esp), %eax # 4-byte Folded Reload + movl %eax, 72(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 96(%esp), %eax # 4-byte Folded Reload + movl %eax, 84(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 104(%esp), %eax # 4-byte Folded Reload + movl %eax, 92(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 44(%esp), %eax # 4-byte Folded Reload + movl %eax, 100(%esp) # 4-byte Spill + adcl $0, 40(%esp) # 4-byte Folded Spill + movl 1108(%esp), %edx + movl 16(%edx), %eax + movl %eax, (%esp) + leal 760(%esp), %ecx + movl 116(%esp), %ebx # 4-byte Reload + calll .LmulPv480x32 + addl 760(%esp), %ebp + movl %ebp, 60(%esp) # 4-byte Spill + movl 820(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 816(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 812(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 808(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 804(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 800(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 796(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 792(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 788(%esp), %ebx + movl 784(%esp), %edi + movl 780(%esp), %esi + movl 776(%esp), %edx + movl 772(%esp), %ecx + movl 764(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 768(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 1104(%esp), %eax + movl 60(%esp), %ebp # 4-byte Reload + movl %ebp, 16(%eax) + movl 112(%esp), %eax # 4-byte Reload + adcl 32(%esp), %eax # 4-byte Folded Reload + movl %eax, 112(%esp) # 4-byte Spill + movl 16(%esp), %eax # 4-byte Reload + adcl %eax, 36(%esp) # 4-byte Folded Spill + adcl 20(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 20(%esp) # 4-byte Spill + adcl 24(%esp), %edx # 4-byte Folded Reload + movl %edx, 24(%esp) # 4-byte Spill + adcl 28(%esp), %esi # 4-byte Folded Reload + movl %esi, 28(%esp) # 4-byte Spill + adcl 12(%esp), %edi # 4-byte Folded Reload + movl %edi, 60(%esp) # 4-byte Spill + adcl 68(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 12(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 48(%esp), %eax # 4-byte Folded Reload + movl %eax, 64(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl 56(%esp), %eax # 4-byte Folded Reload + movl %eax, 108(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl %eax, 52(%esp) # 4-byte Folded Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 84(%esp), %eax # 4-byte Folded Reload + movl %eax, 76(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 92(%esp), %eax # 4-byte Folded Reload + movl %eax, 80(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 100(%esp), %eax # 4-byte Folded Reload + movl %eax, 88(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 40(%esp), %eax # 4-byte Folded Reload + movl %eax, 96(%esp) # 4-byte Spill + adcl $0, 44(%esp) # 4-byte Folded Spill + movl 1108(%esp), %edx + movl 20(%edx), %eax + movl %eax, (%esp) + leal 696(%esp), %ecx + movl 116(%esp), %ebx # 4-byte Reload + calll .LmulPv480x32 + movl 112(%esp), %eax # 4-byte Reload + addl 696(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 756(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 752(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 748(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 744(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 740(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 736(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 732(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 728(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 724(%esp), %ebx + movl 720(%esp), %edi + movl 716(%esp), %esi + movl 712(%esp), %edx + movl 708(%esp), %ecx + movl 700(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 704(%esp), %eax + movl %eax, 32(%esp) # 4-byte Spill + movl 1104(%esp), %eax + movl 112(%esp), %ebp # 4-byte Reload + movl %ebp, 20(%eax) + movl 56(%esp), %ebp # 4-byte Reload + adcl 36(%esp), %ebp # 4-byte Folded Reload + movl 20(%esp), %eax # 4-byte Reload + adcl %eax, 32(%esp) # 4-byte Folded Spill + adcl 24(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 16(%esp) # 4-byte Spill + adcl 28(%esp), %edx # 4-byte Folded Reload + movl %edx, 20(%esp) # 4-byte Spill + adcl 60(%esp), %esi # 4-byte Folded Reload + movl %esi, 24(%esp) # 4-byte Spill + adcl 12(%esp), %edi # 4-byte Folded Reload + movl %edi, 28(%esp) # 4-byte Spill + adcl 64(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 64(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 108(%esp), %eax # 4-byte Folded Reload + movl %eax, 68(%esp) # 4-byte Spill + movl 104(%esp), %eax # 4-byte Reload + adcl 52(%esp), %eax # 4-byte Folded Reload + movl %eax, 104(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl %eax, 48(%esp) # 4-byte Folded Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 80(%esp), %eax # 4-byte Folded Reload + movl %eax, 72(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 88(%esp), %eax # 4-byte Folded Reload + movl %eax, 84(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 96(%esp), %eax # 4-byte Folded Reload + movl %eax, 92(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 44(%esp), %eax # 4-byte Folded Reload + movl %eax, 100(%esp) # 4-byte Spill + adcl $0, 40(%esp) # 4-byte Folded Spill + movl 1108(%esp), %edx + movl 24(%edx), %eax + movl %eax, (%esp) + leal 632(%esp), %ecx + movl 116(%esp), %ebx # 4-byte Reload + calll .LmulPv480x32 + addl 632(%esp), %ebp + movl %ebp, 56(%esp) # 4-byte Spill + movl 692(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 688(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 684(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 680(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 676(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 672(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 668(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 664(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 660(%esp), %ebx + movl 656(%esp), %edi + movl 652(%esp), %esi + movl 648(%esp), %edx + movl 644(%esp), %ecx + movl 636(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 640(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 1104(%esp), %eax + movl 56(%esp), %ebp # 4-byte Reload + movl %ebp, 24(%eax) + movl 112(%esp), %eax # 4-byte Reload + adcl 32(%esp), %eax # 4-byte Folded Reload + movl %eax, 112(%esp) # 4-byte Spill + movl 16(%esp), %eax # 4-byte Reload + adcl %eax, 36(%esp) # 4-byte Folded Spill + adcl 20(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 16(%esp) # 4-byte Spill + adcl 24(%esp), %edx # 4-byte Folded Reload + movl %edx, 20(%esp) # 4-byte Spill + adcl 28(%esp), %esi # 4-byte Folded Reload + movl %esi, 24(%esp) # 4-byte Spill + adcl 64(%esp), %edi # 4-byte Folded Reload + movl %edi, 28(%esp) # 4-byte Spill + adcl 68(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 68(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 104(%esp), %eax # 4-byte Folded Reload + movl %eax, 60(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl 48(%esp), %eax # 4-byte Folded Reload + movl %eax, 108(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl %eax, 52(%esp) # 4-byte Folded Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 84(%esp), %eax # 4-byte Folded Reload + movl %eax, 76(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 92(%esp), %eax # 4-byte Folded Reload + movl %eax, 80(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 100(%esp), %eax # 4-byte Folded Reload + movl %eax, 88(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 40(%esp), %eax # 4-byte Folded Reload + movl %eax, 96(%esp) # 4-byte Spill + adcl $0, 44(%esp) # 4-byte Folded Spill + movl 1108(%esp), %edx + movl 28(%edx), %eax + movl %eax, (%esp) + leal 568(%esp), %ecx + movl 116(%esp), %ebx # 4-byte Reload + calll .LmulPv480x32 + movl 112(%esp), %eax # 4-byte Reload + addl 568(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 628(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 624(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 620(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 616(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 612(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 608(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 604(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 600(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 596(%esp), %ebx + movl 592(%esp), %edi + movl 588(%esp), %esi + movl 584(%esp), %edx + movl 580(%esp), %ecx + movl 572(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 576(%esp), %eax + movl %eax, 32(%esp) # 4-byte Spill + movl 1104(%esp), %eax + movl 112(%esp), %ebp # 4-byte Reload + movl %ebp, 28(%eax) + movl 56(%esp), %ebp # 4-byte Reload + adcl 36(%esp), %ebp # 4-byte Folded Reload + movl 16(%esp), %eax # 4-byte Reload + adcl %eax, 32(%esp) # 4-byte Folded Spill + adcl 20(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 16(%esp) # 4-byte Spill + adcl 24(%esp), %edx # 4-byte Folded Reload + movl %edx, 20(%esp) # 4-byte Spill + adcl 28(%esp), %esi # 4-byte Folded Reload + movl %esi, 24(%esp) # 4-byte Spill + adcl 68(%esp), %edi # 4-byte Folded Reload + movl %edi, 28(%esp) # 4-byte Spill + adcl 60(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 12(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 108(%esp), %eax # 4-byte Folded Reload + movl %eax, 64(%esp) # 4-byte Spill + movl 104(%esp), %eax # 4-byte Reload + adcl 52(%esp), %eax # 4-byte Folded Reload + movl %eax, 104(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl %eax, 48(%esp) # 4-byte Folded Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 80(%esp), %eax # 4-byte Folded Reload + movl %eax, 72(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 88(%esp), %eax # 4-byte Folded Reload + movl %eax, 84(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 96(%esp), %eax # 4-byte Folded Reload + movl %eax, 92(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 44(%esp), %eax # 4-byte Folded Reload + movl %eax, 100(%esp) # 4-byte Spill + adcl $0, 40(%esp) # 4-byte Folded Spill + movl 1108(%esp), %edx + movl 32(%edx), %eax + movl %eax, (%esp) + leal 504(%esp), %ecx + movl 116(%esp), %ebx # 4-byte Reload + calll .LmulPv480x32 + addl 504(%esp), %ebp + movl %ebp, 56(%esp) # 4-byte Spill + movl 564(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 560(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 556(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 552(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 548(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 544(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 540(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 536(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 532(%esp), %ebx + movl 528(%esp), %edi + movl 524(%esp), %esi + movl 520(%esp), %edx + movl 516(%esp), %ecx + movl 508(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 512(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 1104(%esp), %eax + movl 56(%esp), %ebp # 4-byte Reload + movl %ebp, 32(%eax) + movl 112(%esp), %eax # 4-byte Reload + adcl 32(%esp), %eax # 4-byte Folded Reload + movl %eax, 112(%esp) # 4-byte Spill + movl 16(%esp), %eax # 4-byte Reload + adcl %eax, 36(%esp) # 4-byte Folded Spill + adcl 20(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 16(%esp) # 4-byte Spill + adcl 24(%esp), %edx # 4-byte Folded Reload + movl %edx, 20(%esp) # 4-byte Spill + adcl 28(%esp), %esi # 4-byte Folded Reload + movl %esi, 24(%esp) # 4-byte Spill + adcl 12(%esp), %edi # 4-byte Folded Reload + movl %edi, 28(%esp) # 4-byte Spill + adcl 64(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 12(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 104(%esp), %eax # 4-byte Folded Reload + movl %eax, 60(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl 48(%esp), %eax # 4-byte Folded Reload + movl %eax, 108(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl %eax, 52(%esp) # 4-byte Folded Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 84(%esp), %eax # 4-byte Folded Reload + movl %eax, 68(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 92(%esp), %eax # 4-byte Folded Reload + movl %eax, 80(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 100(%esp), %eax # 4-byte Folded Reload + movl %eax, 88(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 40(%esp), %eax # 4-byte Folded Reload + movl %eax, 96(%esp) # 4-byte Spill + adcl $0, 44(%esp) # 4-byte Folded Spill + movl 1108(%esp), %edx + movl 36(%edx), %eax + movl %eax, (%esp) + leal 440(%esp), %ecx + movl 116(%esp), %ebx # 4-byte Reload + calll .LmulPv480x32 + movl 112(%esp), %eax # 4-byte Reload + addl 440(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 500(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 496(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 492(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 488(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 484(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 480(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 476(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 472(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 468(%esp), %ebx + movl 464(%esp), %edi + movl 460(%esp), %esi + movl 456(%esp), %edx + movl 452(%esp), %ecx + movl 444(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 448(%esp), %eax + movl %eax, 32(%esp) # 4-byte Spill + movl 1104(%esp), %eax + movl 112(%esp), %ebp # 4-byte Reload + movl %ebp, 36(%eax) + movl 56(%esp), %ebp # 4-byte Reload + adcl 36(%esp), %ebp # 4-byte Folded Reload + movl 16(%esp), %eax # 4-byte Reload + adcl %eax, 32(%esp) # 4-byte Folded Spill + adcl 20(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 16(%esp) # 4-byte Spill + adcl 24(%esp), %edx # 4-byte Folded Reload + movl %edx, 20(%esp) # 4-byte Spill + adcl 28(%esp), %esi # 4-byte Folded Reload + movl %esi, 24(%esp) # 4-byte Spill + adcl 12(%esp), %edi # 4-byte Folded Reload + movl %edi, 28(%esp) # 4-byte Spill + adcl 60(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 12(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 108(%esp), %eax # 4-byte Folded Reload + movl %eax, 64(%esp) # 4-byte Spill + movl 104(%esp), %eax # 4-byte Reload + adcl 52(%esp), %eax # 4-byte Folded Reload + movl %eax, 104(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl %eax, 48(%esp) # 4-byte Folded Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 80(%esp), %eax # 4-byte Folded Reload + movl %eax, 72(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 88(%esp), %eax # 4-byte Folded Reload + movl %eax, 76(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 96(%esp), %eax # 4-byte Folded Reload + movl %eax, 84(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 44(%esp), %eax # 4-byte Folded Reload + movl %eax, 92(%esp) # 4-byte Spill + adcl $0, 40(%esp) # 4-byte Folded Spill + movl 1108(%esp), %edx + movl 40(%edx), %eax + movl %eax, (%esp) + leal 376(%esp), %ecx + movl 116(%esp), %ebx # 4-byte Reload + calll .LmulPv480x32 + addl 376(%esp), %ebp + movl %ebp, 56(%esp) # 4-byte Spill + movl 436(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 432(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 428(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 424(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 420(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 416(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 412(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 408(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 404(%esp), %ebx + movl 400(%esp), %edi + movl 396(%esp), %esi + movl 392(%esp), %edx + movl 388(%esp), %ecx + movl 380(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 384(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 1104(%esp), %eax + movl 56(%esp), %ebp # 4-byte Reload + movl %ebp, 40(%eax) + movl 112(%esp), %eax # 4-byte Reload + adcl 32(%esp), %eax # 4-byte Folded Reload + movl %eax, 112(%esp) # 4-byte Spill + movl 16(%esp), %eax # 4-byte Reload + adcl %eax, 36(%esp) # 4-byte Folded Spill + adcl 20(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 20(%esp) # 4-byte Spill + adcl 24(%esp), %edx # 4-byte Folded Reload + movl %edx, 24(%esp) # 4-byte Spill + adcl 28(%esp), %esi # 4-byte Folded Reload + movl %esi, 28(%esp) # 4-byte Spill + adcl 12(%esp), %edi # 4-byte Folded Reload + movl %edi, 32(%esp) # 4-byte Spill + adcl 64(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 16(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 104(%esp), %eax # 4-byte Folded Reload + movl %eax, 60(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 48(%esp), %eax # 4-byte Folded Reload + movl %eax, 100(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl %eax, 52(%esp) # 4-byte Folded Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 76(%esp), %eax # 4-byte Folded Reload + movl %eax, 68(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 84(%esp), %eax # 4-byte Folded Reload + movl %eax, 80(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 92(%esp), %eax # 4-byte Folded Reload + movl %eax, 88(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 40(%esp), %eax # 4-byte Folded Reload + movl %eax, 96(%esp) # 4-byte Spill + adcl $0, 44(%esp) # 4-byte Folded Spill + movl 1108(%esp), %edx + movl 44(%edx), %eax + movl %eax, (%esp) + leal 312(%esp), %ecx + movl 116(%esp), %ebx # 4-byte Reload + calll .LmulPv480x32 + movl 112(%esp), %eax # 4-byte Reload + addl 312(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 372(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 368(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 364(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 360(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 356(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 352(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 348(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 344(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 340(%esp), %ebx + movl 336(%esp), %edi + movl 332(%esp), %esi + movl 328(%esp), %edx + movl 324(%esp), %ecx + movl 316(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 320(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 1104(%esp), %eax + movl 112(%esp), %ebp # 4-byte Reload + movl %ebp, 44(%eax) + movl 36(%esp), %eax # 4-byte Reload + adcl %eax, 108(%esp) # 4-byte Folded Spill + movl 20(%esp), %eax # 4-byte Reload + adcl %eax, 40(%esp) # 4-byte Folded Spill + adcl 24(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 20(%esp) # 4-byte Spill + adcl 28(%esp), %edx # 4-byte Folded Reload + movl %edx, 24(%esp) # 4-byte Spill + adcl 32(%esp), %esi # 4-byte Folded Reload + movl %esi, 28(%esp) # 4-byte Spill + adcl 16(%esp), %edi # 4-byte Folded Reload + movl %edi, 32(%esp) # 4-byte Spill + adcl 60(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 16(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 100(%esp), %eax # 4-byte Folded Reload + movl %eax, 64(%esp) # 4-byte Spill + movl 104(%esp), %eax # 4-byte Reload + adcl 52(%esp), %eax # 4-byte Folded Reload + movl %eax, 104(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl %eax, 56(%esp) # 4-byte Folded Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 80(%esp), %eax # 4-byte Folded Reload + movl %eax, 72(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 88(%esp), %eax # 4-byte Folded Reload + movl %eax, 76(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 96(%esp), %eax # 4-byte Folded Reload + movl %eax, 84(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 44(%esp), %eax # 4-byte Folded Reload + movl %eax, 92(%esp) # 4-byte Spill + adcl $0, 48(%esp) # 4-byte Folded Spill + movl 1108(%esp), %edx + movl 48(%edx), %eax + movl %eax, (%esp) + leal 248(%esp), %ecx + movl 116(%esp), %ebx # 4-byte Reload + calll .LmulPv480x32 + movl 108(%esp), %eax # 4-byte Reload + addl 248(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 308(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 304(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 300(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 296(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 292(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 288(%esp), %esi + movl %esi, 52(%esp) # 4-byte Spill + movl 284(%esp), %esi + movl %esi, 100(%esp) # 4-byte Spill + movl 280(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 276(%esp), %ebx + movl 272(%esp), %edi + movl 268(%esp), %edx + movl 264(%esp), %ecx + movl 260(%esp), %eax + movl 252(%esp), %esi + movl %esi, 112(%esp) # 4-byte Spill + movl 256(%esp), %esi + movl %esi, 36(%esp) # 4-byte Spill + movl 108(%esp), %esi # 4-byte Reload + movl 1104(%esp), %ebp + movl %esi, 48(%ebp) + movl 112(%esp), %esi # 4-byte Reload + adcl 40(%esp), %esi # 4-byte Folded Reload + movl %esi, 112(%esp) # 4-byte Spill + movl 20(%esp), %esi # 4-byte Reload + adcl %esi, 36(%esp) # 4-byte Folded Spill + adcl 24(%esp), %eax # 4-byte Folded Reload + movl %eax, 12(%esp) # 4-byte Spill + adcl 28(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 20(%esp) # 4-byte Spill + adcl 32(%esp), %edx # 4-byte Folded Reload + movl %edx, 24(%esp) # 4-byte Spill + adcl 16(%esp), %edi # 4-byte Folded Reload + movl %edi, 28(%esp) # 4-byte Spill + adcl 64(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 32(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 104(%esp), %eax # 4-byte Folded Reload + movl %eax, 60(%esp) # 4-byte Spill + movl 100(%esp), %esi # 4-byte Reload + adcl 56(%esp), %esi # 4-byte Folded Reload + movl %esi, 100(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl %eax, 52(%esp) # 4-byte Folded Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 76(%esp), %eax # 4-byte Folded Reload + movl %eax, 68(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 84(%esp), %eax # 4-byte Folded Reload + movl %eax, 80(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 92(%esp), %eax # 4-byte Folded Reload + movl %eax, 88(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 48(%esp), %eax # 4-byte Folded Reload + movl %eax, 96(%esp) # 4-byte Spill + adcl $0, 44(%esp) # 4-byte Folded Spill + movl 1108(%esp), %edx + movl 52(%edx), %eax + movl %eax, (%esp) + leal 184(%esp), %ecx + movl 116(%esp), %ebx # 4-byte Reload + calll .LmulPv480x32 + movl 112(%esp), %eax # 4-byte Reload + addl 184(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 244(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 240(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 236(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 232(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 228(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 224(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 220(%esp), %ebp + movl %ebp, 64(%esp) # 4-byte Spill + movl 216(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 212(%esp), %ebx + movl 208(%esp), %edx + movl 204(%esp), %ecx + movl 200(%esp), %esi + movl %esi, 40(%esp) # 4-byte Spill + movl 196(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 188(%esp), %eax + movl 192(%esp), %esi + movl 112(%esp), %ebp # 4-byte Reload + movl 1104(%esp), %edi + movl %ebp, 52(%edi) + adcl 36(%esp), %eax # 4-byte Folded Reload + movl %eax, 112(%esp) # 4-byte Spill + movl %esi, %ebp + adcl 12(%esp), %ebp # 4-byte Folded Reload + movl 72(%esp), %esi # 4-byte Reload + adcl 20(%esp), %esi # 4-byte Folded Reload + movl 24(%esp), %edi # 4-byte Reload + adcl %edi, 40(%esp) # 4-byte Folded Spill + adcl 28(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 28(%esp) # 4-byte Spill + adcl 32(%esp), %edx # 4-byte Folded Reload + movl %edx, 36(%esp) # 4-byte Spill + adcl 60(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 60(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 100(%esp), %eax # 4-byte Folded Reload + movl %eax, 76(%esp) # 4-byte Spill + movl 52(%esp), %ebx # 4-byte Reload + adcl %ebx, 64(%esp) # 4-byte Folded Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 68(%esp), %eax # 4-byte Folded Reload + movl %eax, 84(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 80(%esp), %eax # 4-byte Folded Reload + movl %eax, 92(%esp) # 4-byte Spill + movl 104(%esp), %eax # 4-byte Reload + adcl 88(%esp), %eax # 4-byte Folded Reload + movl %eax, 104(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl 96(%esp), %eax # 4-byte Folded Reload + movl %eax, 108(%esp) # 4-byte Spill + movl 44(%esp), %eax # 4-byte Reload + adcl %eax, 48(%esp) # 4-byte Folded Spill + adcl $0, 56(%esp) # 4-byte Folded Spill + movl 1108(%esp), %edx + movl 56(%edx), %eax + movl %eax, (%esp) + leal 120(%esp), %ecx + movl 116(%esp), %ebx # 4-byte Reload + calll .LmulPv480x32 + movl 112(%esp), %eax # 4-byte Reload + addl 120(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + adcl 124(%esp), %ebp + movl %ebp, 32(%esp) # 4-byte Spill + adcl 128(%esp), %esi + movl %esi, 72(%esp) # 4-byte Spill + movl 180(%esp), %eax + movl %eax, 116(%esp) # 4-byte Spill + movl 176(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 172(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 168(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 164(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 160(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 156(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 152(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 148(%esp), %ebp + movl 144(%esp), %edi + movl 140(%esp), %esi + movl 136(%esp), %edx + movl 132(%esp), %ecx + movl 1104(%esp), %eax + movl 112(%esp), %ebx # 4-byte Reload + movl %ebx, 56(%eax) + movl 32(%esp), %ebx # 4-byte Reload + movl %ebx, 60(%eax) + adcl 40(%esp), %ecx # 4-byte Folded Reload + movl 72(%esp), %ebx # 4-byte Reload + movl %ebx, 64(%eax) + adcl 28(%esp), %edx # 4-byte Folded Reload + movl %ecx, 68(%eax) + adcl 36(%esp), %esi # 4-byte Folded Reload + movl %edx, 72(%eax) + adcl 60(%esp), %edi # 4-byte Folded Reload + movl %esi, 76(%eax) + adcl 76(%esp), %ebp # 4-byte Folded Reload + movl %edi, 80(%eax) + movl 44(%esp), %edx # 4-byte Reload + adcl 64(%esp), %edx # 4-byte Folded Reload + movl %ebp, 84(%eax) + movl 52(%esp), %ecx # 4-byte Reload + adcl 84(%esp), %ecx # 4-byte Folded Reload + movl %edx, 88(%eax) + movl 68(%esp), %edx # 4-byte Reload + adcl 92(%esp), %edx # 4-byte Folded Reload + movl %ecx, 92(%eax) + movl 80(%esp), %ecx # 4-byte Reload + adcl 104(%esp), %ecx # 4-byte Folded Reload + movl %edx, 96(%eax) + movl 88(%esp), %edx # 4-byte Reload + adcl 108(%esp), %edx # 4-byte Folded Reload + movl %ecx, 100(%eax) + movl 96(%esp), %ecx # 4-byte Reload + adcl 48(%esp), %ecx # 4-byte Folded Reload + movl %edx, 104(%eax) + movl %ecx, 108(%eax) + movl 100(%esp), %ecx # 4-byte Reload + adcl 56(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 112(%eax) + movl 116(%esp), %ecx # 4-byte Reload + adcl $0, %ecx + movl %ecx, 116(%eax) + addl $1084, %esp # imm = 0x43C + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end225: + .size mcl_fpDbl_sqrPre15Lbmi2, .Lfunc_end225-mcl_fpDbl_sqrPre15Lbmi2 + + .globl mcl_fp_mont15Lbmi2 + .align 16, 0x90 + .type mcl_fp_mont15Lbmi2,@function +mcl_fp_mont15Lbmi2: # @mcl_fp_mont15Lbmi2 +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $2044, %esp # imm = 0x7FC + calll .L226$pb +.L226$pb: + popl %ebx +.Ltmp47: + addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp47-.L226$pb), %ebx + movl 2076(%esp), %eax + movl -4(%eax), %esi + movl %esi, 52(%esp) # 4-byte Spill + movl 2072(%esp), %eax + movl (%eax), %eax + movl %eax, (%esp) + leal 1976(%esp), %ecx + movl 2068(%esp), %edx + calll .LmulPv480x32 + movl 1976(%esp), %ebp + movl 1980(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl %ebp, %eax + imull %esi, %eax + movl 2036(%esp), %ecx + movl %ecx, 108(%esp) # 4-byte Spill + movl 2032(%esp), %ecx + movl %ecx, 104(%esp) # 4-byte Spill + movl 2028(%esp), %ecx + movl %ecx, 112(%esp) # 4-byte Spill + movl 2024(%esp), %ecx + movl %ecx, 100(%esp) # 4-byte Spill + movl 2020(%esp), %ecx + movl %ecx, 76(%esp) # 4-byte Spill + movl 2016(%esp), %ecx + movl %ecx, 72(%esp) # 4-byte Spill + movl 2012(%esp), %ecx + movl %ecx, 68(%esp) # 4-byte Spill + movl 2008(%esp), %ecx + movl %ecx, 60(%esp) # 4-byte Spill + movl 2004(%esp), %ecx + movl %ecx, 56(%esp) # 4-byte Spill + movl 2000(%esp), %edi + movl 1996(%esp), %esi + movl 1992(%esp), %ecx + movl %ecx, 92(%esp) # 4-byte Spill + movl 1988(%esp), %ecx + movl %ecx, 84(%esp) # 4-byte Spill + movl 1984(%esp), %ecx + movl %ecx, 96(%esp) # 4-byte Spill + movl %eax, (%esp) + leal 1912(%esp), %ecx + movl 2076(%esp), %edx + calll .LmulPv480x32 + addl 1912(%esp), %ebp + movl 88(%esp), %eax # 4-byte Reload + adcl 1916(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 1920(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 1924(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 1928(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + adcl 1932(%esp), %esi + adcl 1936(%esp), %edi + movl 56(%esp), %eax # 4-byte Reload + adcl 1940(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 1944(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 1948(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 1952(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 1956(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 1960(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 112(%esp), %eax # 4-byte Reload + adcl 1964(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 104(%esp), %eax # 4-byte Reload + adcl 1968(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 108(%esp), %ebp # 4-byte Reload + adcl 1972(%esp), %ebp + sbbl %eax, %eax + movl %eax, 116(%esp) # 4-byte Spill + movl 2072(%esp), %eax + movl 4(%eax), %eax + movl %eax, (%esp) + leal 1848(%esp), %ecx + movl 2068(%esp), %edx + calll .LmulPv480x32 + movl 116(%esp), %eax # 4-byte Reload + andl $1, %eax + movl 88(%esp), %edx # 4-byte Reload + addl 1848(%esp), %edx + movl 96(%esp), %ecx # 4-byte Reload + adcl 1852(%esp), %ecx + movl %ecx, 96(%esp) # 4-byte Spill + movl 84(%esp), %ecx # 4-byte Reload + adcl 1856(%esp), %ecx + movl %ecx, 84(%esp) # 4-byte Spill + movl 92(%esp), %ecx # 4-byte Reload + adcl 1860(%esp), %ecx + movl %ecx, 92(%esp) # 4-byte Spill + adcl 1864(%esp), %esi + movl %esi, 80(%esp) # 4-byte Spill + adcl 1868(%esp), %edi + movl 56(%esp), %ecx # 4-byte Reload + adcl 1872(%esp), %ecx + movl %ecx, 56(%esp) # 4-byte Spill + movl 60(%esp), %ecx # 4-byte Reload + adcl 1876(%esp), %ecx + movl %ecx, 60(%esp) # 4-byte Spill + movl 68(%esp), %ecx # 4-byte Reload + adcl 1880(%esp), %ecx + movl %ecx, 68(%esp) # 4-byte Spill + movl 72(%esp), %ecx # 4-byte Reload + adcl 1884(%esp), %ecx + movl %ecx, 72(%esp) # 4-byte Spill + movl 76(%esp), %ecx # 4-byte Reload + adcl 1888(%esp), %ecx + movl %ecx, 76(%esp) # 4-byte Spill + movl 100(%esp), %ecx # 4-byte Reload + adcl 1892(%esp), %ecx + movl %ecx, 100(%esp) # 4-byte Spill + movl 112(%esp), %ecx # 4-byte Reload + adcl 1896(%esp), %ecx + movl %ecx, 112(%esp) # 4-byte Spill + movl 104(%esp), %ecx # 4-byte Reload + adcl 1900(%esp), %ecx + movl %ecx, 104(%esp) # 4-byte Spill + adcl 1904(%esp), %ebp + movl %ebp, 108(%esp) # 4-byte Spill + adcl 1908(%esp), %eax + movl %eax, 116(%esp) # 4-byte Spill + sbbl %ebp, %ebp + movl %edx, %eax + movl %edx, %esi + imull 52(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 1784(%esp), %ecx + movl 2076(%esp), %eax + movl %eax, %edx + calll .LmulPv480x32 + andl $1, %ebp + movl %ebp, %ecx + addl 1784(%esp), %esi + movl 96(%esp), %eax # 4-byte Reload + adcl 1788(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 1792(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 1796(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 1800(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + adcl 1804(%esp), %edi + movl %edi, 64(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 1808(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 60(%esp), %edi # 4-byte Reload + adcl 1812(%esp), %edi + movl 68(%esp), %eax # 4-byte Reload + adcl 1816(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 1820(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 1824(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 1828(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 112(%esp), %eax # 4-byte Reload + adcl 1832(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 104(%esp), %esi # 4-byte Reload + adcl 1836(%esp), %esi + movl 108(%esp), %ebp # 4-byte Reload + adcl 1840(%esp), %ebp + movl 116(%esp), %eax # 4-byte Reload + adcl 1844(%esp), %eax + movl %eax, 116(%esp) # 4-byte Spill + adcl $0, %ecx + movl %ecx, 88(%esp) # 4-byte Spill + movl 2072(%esp), %eax + movl 8(%eax), %eax + movl %eax, (%esp) + leal 1720(%esp), %ecx + movl 2068(%esp), %edx + calll .LmulPv480x32 + movl 96(%esp), %ecx # 4-byte Reload + addl 1720(%esp), %ecx + movl %ecx, 96(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 1724(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 1728(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 1732(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 1736(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 1740(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + adcl 1744(%esp), %edi + movl %edi, 60(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 1748(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 1752(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 76(%esp), %edi # 4-byte Reload + adcl 1756(%esp), %edi + movl 100(%esp), %eax # 4-byte Reload + adcl 1760(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 112(%esp), %eax # 4-byte Reload + adcl 1764(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + adcl 1768(%esp), %esi + movl %esi, 104(%esp) # 4-byte Spill + adcl 1772(%esp), %ebp + movl %ebp, 108(%esp) # 4-byte Spill + movl 116(%esp), %eax # 4-byte Reload + adcl 1776(%esp), %eax + movl %eax, 116(%esp) # 4-byte Spill + movl 88(%esp), %esi # 4-byte Reload + adcl 1780(%esp), %esi + sbbl %ebp, %ebp + movl %ecx, %eax + imull 52(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 1656(%esp), %ecx + movl 2076(%esp), %edx + calll .LmulPv480x32 + andl $1, %ebp + movl %ebp, %ecx + movl 96(%esp), %eax # 4-byte Reload + addl 1656(%esp), %eax + movl 84(%esp), %eax # 4-byte Reload + adcl 1660(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 1664(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 1668(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 1672(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 1676(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 1680(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 1684(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 72(%esp), %ebp # 4-byte Reload + adcl 1688(%esp), %ebp + adcl 1692(%esp), %edi + movl %edi, 76(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 1696(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 112(%esp), %eax # 4-byte Reload + adcl 1700(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 104(%esp), %eax # 4-byte Reload + adcl 1704(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl 1708(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 116(%esp), %edi # 4-byte Reload + adcl 1712(%esp), %edi + adcl 1716(%esp), %esi + movl %esi, 88(%esp) # 4-byte Spill + adcl $0, %ecx + movl %ecx, 96(%esp) # 4-byte Spill + movl 2072(%esp), %eax + movl 12(%eax), %eax + movl %eax, (%esp) + leal 1592(%esp), %ecx + movl 2068(%esp), %eax + movl %eax, %edx + calll .LmulPv480x32 + movl 84(%esp), %ecx # 4-byte Reload + addl 1592(%esp), %ecx + movl 92(%esp), %eax # 4-byte Reload + adcl 1596(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 1600(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 1604(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 1608(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 1612(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 1616(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + adcl 1620(%esp), %ebp + movl %ebp, 72(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 1624(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 100(%esp), %esi # 4-byte Reload + adcl 1628(%esp), %esi + movl 112(%esp), %eax # 4-byte Reload + adcl 1632(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 104(%esp), %eax # 4-byte Reload + adcl 1636(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl 1640(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + adcl 1644(%esp), %edi + movl %edi, 116(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 1648(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 1652(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + sbbl %edi, %edi + movl %ecx, %ebp + movl %ebp, %eax + imull 52(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 1528(%esp), %ecx + movl 2076(%esp), %edx + calll .LmulPv480x32 + andl $1, %edi + movl %edi, %ecx + addl 1528(%esp), %ebp + movl 92(%esp), %eax # 4-byte Reload + adcl 1532(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 1536(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 1540(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 56(%esp), %ebp # 4-byte Reload + adcl 1544(%esp), %ebp + movl 60(%esp), %eax # 4-byte Reload + adcl 1548(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 1552(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 1556(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 1560(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + adcl 1564(%esp), %esi + movl %esi, 100(%esp) # 4-byte Spill + movl 112(%esp), %edi # 4-byte Reload + adcl 1568(%esp), %edi + movl 104(%esp), %esi # 4-byte Reload + adcl 1572(%esp), %esi + movl 108(%esp), %eax # 4-byte Reload + adcl 1576(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 116(%esp), %eax # 4-byte Reload + adcl 1580(%esp), %eax + movl %eax, 116(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 1584(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 1588(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + adcl $0, %ecx + movl %ecx, 84(%esp) # 4-byte Spill + movl 2072(%esp), %eax + movl 16(%eax), %eax + movl %eax, (%esp) + leal 1464(%esp), %ecx + movl 2068(%esp), %edx + calll .LmulPv480x32 + movl 92(%esp), %ecx # 4-byte Reload + addl 1464(%esp), %ecx + movl 80(%esp), %eax # 4-byte Reload + adcl 1468(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 1472(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + adcl 1476(%esp), %ebp + movl %ebp, 56(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 1480(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 68(%esp), %ebp # 4-byte Reload + adcl 1484(%esp), %ebp + movl 72(%esp), %eax # 4-byte Reload + adcl 1488(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 1492(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 1496(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + adcl 1500(%esp), %edi + movl %edi, 112(%esp) # 4-byte Spill + adcl 1504(%esp), %esi + movl %esi, 104(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl 1508(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 116(%esp), %esi # 4-byte Reload + adcl 1512(%esp), %esi + movl 88(%esp), %eax # 4-byte Reload + adcl 1516(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 1520(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 1524(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + sbbl %eax, %eax + movl %eax, 92(%esp) # 4-byte Spill + movl %ecx, %eax + movl %ecx, %edi + imull 52(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 1400(%esp), %ecx + movl 2076(%esp), %edx + calll .LmulPv480x32 + movl 92(%esp), %eax # 4-byte Reload + andl $1, %eax + addl 1400(%esp), %edi + movl 80(%esp), %ecx # 4-byte Reload + adcl 1404(%esp), %ecx + movl %ecx, 80(%esp) # 4-byte Spill + movl 64(%esp), %ecx # 4-byte Reload + adcl 1408(%esp), %ecx + movl %ecx, 64(%esp) # 4-byte Spill + movl 56(%esp), %ecx # 4-byte Reload + adcl 1412(%esp), %ecx + movl %ecx, 56(%esp) # 4-byte Spill + movl 60(%esp), %ecx # 4-byte Reload + adcl 1416(%esp), %ecx + movl %ecx, 60(%esp) # 4-byte Spill + adcl 1420(%esp), %ebp + movl %ebp, 68(%esp) # 4-byte Spill + movl 72(%esp), %ecx # 4-byte Reload + adcl 1424(%esp), %ecx + movl %ecx, 72(%esp) # 4-byte Spill + movl 76(%esp), %ecx # 4-byte Reload + adcl 1428(%esp), %ecx + movl %ecx, 76(%esp) # 4-byte Spill + movl 100(%esp), %edi # 4-byte Reload + adcl 1432(%esp), %edi + movl 112(%esp), %ecx # 4-byte Reload + adcl 1436(%esp), %ecx + movl %ecx, 112(%esp) # 4-byte Spill + movl 104(%esp), %ecx # 4-byte Reload + adcl 1440(%esp), %ecx + movl %ecx, 104(%esp) # 4-byte Spill + movl 108(%esp), %ecx # 4-byte Reload + adcl 1444(%esp), %ecx + movl %ecx, 108(%esp) # 4-byte Spill + adcl 1448(%esp), %esi + movl %esi, %ebp + movl 88(%esp), %esi # 4-byte Reload + adcl 1452(%esp), %esi + movl 96(%esp), %ecx # 4-byte Reload + adcl 1456(%esp), %ecx + movl %ecx, 96(%esp) # 4-byte Spill + movl 84(%esp), %ecx # 4-byte Reload + adcl 1460(%esp), %ecx + movl %ecx, 84(%esp) # 4-byte Spill + adcl $0, %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 2072(%esp), %eax + movl 20(%eax), %eax + movl %eax, (%esp) + leal 1336(%esp), %ecx + movl 2068(%esp), %edx + calll .LmulPv480x32 + movl 80(%esp), %ecx # 4-byte Reload + addl 1336(%esp), %ecx + movl 64(%esp), %eax # 4-byte Reload + adcl 1340(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 1344(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 1348(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 1352(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 1356(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 1360(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + adcl 1364(%esp), %edi + movl %edi, 100(%esp) # 4-byte Spill + movl 112(%esp), %eax # 4-byte Reload + adcl 1368(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 104(%esp), %eax # 4-byte Reload + adcl 1372(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl 1376(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + adcl 1380(%esp), %ebp + movl %ebp, 116(%esp) # 4-byte Spill + adcl 1384(%esp), %esi + movl %esi, %ebp + movl 96(%esp), %eax # 4-byte Reload + adcl 1388(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 84(%esp), %esi # 4-byte Reload + adcl 1392(%esp), %esi + movl 92(%esp), %eax # 4-byte Reload + adcl 1396(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + sbbl %eax, %eax + movl %eax, 80(%esp) # 4-byte Spill + movl %ecx, %eax + movl %ecx, %edi + imull 52(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 1272(%esp), %ecx + movl 2076(%esp), %edx + calll .LmulPv480x32 + movl 80(%esp), %eax # 4-byte Reload + andl $1, %eax + addl 1272(%esp), %edi + movl 64(%esp), %ecx # 4-byte Reload + adcl 1276(%esp), %ecx + movl %ecx, 64(%esp) # 4-byte Spill + movl 56(%esp), %ecx # 4-byte Reload + adcl 1280(%esp), %ecx + movl %ecx, 56(%esp) # 4-byte Spill + movl 60(%esp), %ecx # 4-byte Reload + adcl 1284(%esp), %ecx + movl %ecx, 60(%esp) # 4-byte Spill + movl 68(%esp), %ecx # 4-byte Reload + adcl 1288(%esp), %ecx + movl %ecx, 68(%esp) # 4-byte Spill + movl 72(%esp), %ecx # 4-byte Reload + adcl 1292(%esp), %ecx + movl %ecx, 72(%esp) # 4-byte Spill + movl 76(%esp), %ecx # 4-byte Reload + adcl 1296(%esp), %ecx + movl %ecx, 76(%esp) # 4-byte Spill + movl 100(%esp), %ecx # 4-byte Reload + adcl 1300(%esp), %ecx + movl %ecx, 100(%esp) # 4-byte Spill + movl 112(%esp), %ecx # 4-byte Reload + adcl 1304(%esp), %ecx + movl %ecx, 112(%esp) # 4-byte Spill + movl 104(%esp), %ecx # 4-byte Reload + adcl 1308(%esp), %ecx + movl %ecx, 104(%esp) # 4-byte Spill + movl 108(%esp), %ecx # 4-byte Reload + adcl 1312(%esp), %ecx + movl %ecx, 108(%esp) # 4-byte Spill + movl 116(%esp), %ecx # 4-byte Reload + adcl 1316(%esp), %ecx + movl %ecx, 116(%esp) # 4-byte Spill + adcl 1320(%esp), %ebp + movl %ebp, 88(%esp) # 4-byte Spill + movl 96(%esp), %ecx # 4-byte Reload + adcl 1324(%esp), %ecx + movl %ecx, 96(%esp) # 4-byte Spill + adcl 1328(%esp), %esi + movl %esi, 84(%esp) # 4-byte Spill + movl 92(%esp), %ecx # 4-byte Reload + adcl 1332(%esp), %ecx + movl %ecx, 92(%esp) # 4-byte Spill + adcl $0, %eax + movl %eax, %edi + movl 2072(%esp), %eax + movl 24(%eax), %eax + movl %eax, (%esp) + leal 1208(%esp), %ecx + movl 2068(%esp), %edx + calll .LmulPv480x32 + movl 64(%esp), %ecx # 4-byte Reload + addl 1208(%esp), %ecx + movl %ecx, 64(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 1212(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 1216(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 1220(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 1224(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 1228(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 100(%esp), %ebp # 4-byte Reload + adcl 1232(%esp), %ebp + movl 112(%esp), %eax # 4-byte Reload + adcl 1236(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 104(%esp), %eax # 4-byte Reload + adcl 1240(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 108(%esp), %esi # 4-byte Reload + adcl 1244(%esp), %esi + movl 116(%esp), %eax # 4-byte Reload + adcl 1248(%esp), %eax + movl %eax, 116(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 1252(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 1256(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 1260(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 1264(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + adcl 1268(%esp), %edi + movl %edi, 80(%esp) # 4-byte Spill + sbbl %edi, %edi + movl %ecx, %eax + imull 52(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 1144(%esp), %ecx + movl 2076(%esp), %edx + calll .LmulPv480x32 + andl $1, %edi + movl %edi, %ecx + movl 64(%esp), %eax # 4-byte Reload + addl 1144(%esp), %eax + movl 56(%esp), %eax # 4-byte Reload + adcl 1148(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 1152(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 68(%esp), %edi # 4-byte Reload + adcl 1156(%esp), %edi + movl 72(%esp), %eax # 4-byte Reload + adcl 1160(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 1164(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + adcl 1168(%esp), %ebp + movl %ebp, 100(%esp) # 4-byte Spill + movl 112(%esp), %eax # 4-byte Reload + adcl 1172(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 104(%esp), %eax # 4-byte Reload + adcl 1176(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + adcl 1180(%esp), %esi + movl %esi, 108(%esp) # 4-byte Spill + movl 116(%esp), %eax # 4-byte Reload + adcl 1184(%esp), %eax + movl %eax, 116(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 1188(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 1192(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 84(%esp), %ebp # 4-byte Reload + adcl 1196(%esp), %ebp + movl 92(%esp), %eax # 4-byte Reload + adcl 1200(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 1204(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + adcl $0, %ecx + movl %ecx, 64(%esp) # 4-byte Spill + movl 2072(%esp), %eax + movl 28(%eax), %eax + movl %eax, (%esp) + leal 1080(%esp), %ecx + movl 2068(%esp), %edx + calll .LmulPv480x32 + movl 56(%esp), %ecx # 4-byte Reload + addl 1080(%esp), %ecx + movl 60(%esp), %eax # 4-byte Reload + adcl 1084(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + adcl 1088(%esp), %edi + movl %edi, 68(%esp) # 4-byte Spill + movl 72(%esp), %esi # 4-byte Reload + adcl 1092(%esp), %esi + movl 76(%esp), %eax # 4-byte Reload + adcl 1096(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 1100(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 112(%esp), %eax # 4-byte Reload + adcl 1104(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 104(%esp), %eax # 4-byte Reload + adcl 1108(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl 1112(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 116(%esp), %eax # 4-byte Reload + adcl 1116(%esp), %eax + movl %eax, 116(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 1120(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 1124(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + adcl 1128(%esp), %ebp + movl %ebp, 84(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 1132(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 1136(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 1140(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + sbbl %edi, %edi + movl %ecx, %eax + movl %ecx, %ebp + imull 52(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 1016(%esp), %ecx + movl 2076(%esp), %edx + calll .LmulPv480x32 + andl $1, %edi + movl %edi, %ecx + addl 1016(%esp), %ebp + movl 60(%esp), %eax # 4-byte Reload + adcl 1020(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 1024(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + adcl 1028(%esp), %esi + movl %esi, 72(%esp) # 4-byte Spill + movl 76(%esp), %edi # 4-byte Reload + adcl 1032(%esp), %edi + movl 100(%esp), %eax # 4-byte Reload + adcl 1036(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 112(%esp), %eax # 4-byte Reload + adcl 1040(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 104(%esp), %esi # 4-byte Reload + adcl 1044(%esp), %esi + movl 108(%esp), %eax # 4-byte Reload + adcl 1048(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 116(%esp), %eax # 4-byte Reload + adcl 1052(%esp), %eax + movl %eax, 116(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 1056(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 96(%esp), %ebp # 4-byte Reload + adcl 1060(%esp), %ebp + movl 84(%esp), %eax # 4-byte Reload + adcl 1064(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 1068(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 1072(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 1076(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + adcl $0, %ecx + movl %ecx, 56(%esp) # 4-byte Spill + movl 2072(%esp), %eax + movl 32(%eax), %eax + movl %eax, (%esp) + leal 952(%esp), %ecx + movl 2068(%esp), %edx + calll .LmulPv480x32 + movl 60(%esp), %ecx # 4-byte Reload + addl 952(%esp), %ecx + movl 68(%esp), %eax # 4-byte Reload + adcl 956(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 960(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + adcl 964(%esp), %edi + movl 100(%esp), %eax # 4-byte Reload + adcl 968(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 112(%esp), %eax # 4-byte Reload + adcl 972(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + adcl 976(%esp), %esi + movl %esi, 104(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl 980(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 116(%esp), %eax # 4-byte Reload + adcl 984(%esp), %eax + movl %eax, 116(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 988(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + adcl 992(%esp), %ebp + movl %ebp, 96(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 996(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 1000(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 1004(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 1008(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 1012(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + sbbl %ebp, %ebp + movl %ecx, %eax + movl %ecx, %esi + imull 52(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 888(%esp), %ecx + movl 2076(%esp), %edx + calll .LmulPv480x32 + movl %ebp, %eax + andl $1, %eax + addl 888(%esp), %esi + movl 68(%esp), %ecx # 4-byte Reload + adcl 892(%esp), %ecx + movl %ecx, 68(%esp) # 4-byte Spill + movl 72(%esp), %ecx # 4-byte Reload + adcl 896(%esp), %ecx + movl %ecx, 72(%esp) # 4-byte Spill + adcl 900(%esp), %edi + movl 100(%esp), %ecx # 4-byte Reload + adcl 904(%esp), %ecx + movl %ecx, 100(%esp) # 4-byte Spill + movl 112(%esp), %ecx # 4-byte Reload + adcl 908(%esp), %ecx + movl %ecx, 112(%esp) # 4-byte Spill + movl 104(%esp), %ecx # 4-byte Reload + adcl 912(%esp), %ecx + movl %ecx, 104(%esp) # 4-byte Spill + movl 108(%esp), %ecx # 4-byte Reload + adcl 916(%esp), %ecx + movl %ecx, 108(%esp) # 4-byte Spill + movl 116(%esp), %ecx # 4-byte Reload + adcl 920(%esp), %ecx + movl %ecx, 116(%esp) # 4-byte Spill + movl 88(%esp), %ebp # 4-byte Reload + adcl 924(%esp), %ebp + movl 96(%esp), %ecx # 4-byte Reload + adcl 928(%esp), %ecx + movl %ecx, 96(%esp) # 4-byte Spill + movl 84(%esp), %ecx # 4-byte Reload + adcl 932(%esp), %ecx + movl %ecx, 84(%esp) # 4-byte Spill + movl 92(%esp), %ecx # 4-byte Reload + adcl 936(%esp), %ecx + movl %ecx, 92(%esp) # 4-byte Spill + movl 80(%esp), %ecx # 4-byte Reload + adcl 940(%esp), %ecx + movl %ecx, 80(%esp) # 4-byte Spill + movl 64(%esp), %esi # 4-byte Reload + adcl 944(%esp), %esi + movl 56(%esp), %ecx # 4-byte Reload + adcl 948(%esp), %ecx + movl %ecx, 56(%esp) # 4-byte Spill + adcl $0, %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 2072(%esp), %eax + movl 36(%eax), %eax + movl %eax, (%esp) + leal 824(%esp), %ecx + movl 2068(%esp), %edx + calll .LmulPv480x32 + movl 68(%esp), %ecx # 4-byte Reload + addl 824(%esp), %ecx + movl 72(%esp), %eax # 4-byte Reload + adcl 828(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + adcl 832(%esp), %edi + movl %edi, 76(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 836(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 112(%esp), %eax # 4-byte Reload + adcl 840(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 104(%esp), %eax # 4-byte Reload + adcl 844(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl 848(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 116(%esp), %eax # 4-byte Reload + adcl 852(%esp), %eax + movl %eax, 116(%esp) # 4-byte Spill + adcl 856(%esp), %ebp + movl %ebp, 88(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 860(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 84(%esp), %ebp # 4-byte Reload + adcl 864(%esp), %ebp + movl 92(%esp), %eax # 4-byte Reload + adcl 868(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 80(%esp), %edi # 4-byte Reload + adcl 872(%esp), %edi + adcl 876(%esp), %esi + movl %esi, 64(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 880(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 884(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + sbbl %eax, %eax + movl %eax, 68(%esp) # 4-byte Spill + movl %ecx, %eax + movl %ecx, %esi + imull 52(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 760(%esp), %ecx + movl 2076(%esp), %edx + calll .LmulPv480x32 + movl 68(%esp), %ecx # 4-byte Reload + andl $1, %ecx + addl 760(%esp), %esi + movl 72(%esp), %eax # 4-byte Reload + adcl 764(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 768(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 772(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 112(%esp), %esi # 4-byte Reload + adcl 776(%esp), %esi + movl 104(%esp), %eax # 4-byte Reload + adcl 780(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl 784(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 116(%esp), %eax # 4-byte Reload + adcl 788(%esp), %eax + movl %eax, 116(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 792(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 796(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + adcl 800(%esp), %ebp + movl %ebp, 84(%esp) # 4-byte Spill + movl 92(%esp), %ebp # 4-byte Reload + adcl 804(%esp), %ebp + adcl 808(%esp), %edi + movl %edi, 80(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 812(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 56(%esp), %edi # 4-byte Reload + adcl 816(%esp), %edi + movl 60(%esp), %eax # 4-byte Reload + adcl 820(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + adcl $0, %ecx + movl %ecx, 68(%esp) # 4-byte Spill + movl 2072(%esp), %eax + movl 40(%eax), %eax + movl %eax, (%esp) + leal 696(%esp), %ecx + movl 2068(%esp), %edx + calll .LmulPv480x32 + movl 72(%esp), %ecx # 4-byte Reload + addl 696(%esp), %ecx + movl 76(%esp), %eax # 4-byte Reload + adcl 700(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 704(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + adcl 708(%esp), %esi + movl %esi, 112(%esp) # 4-byte Spill + movl 104(%esp), %eax # 4-byte Reload + adcl 712(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl 716(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 116(%esp), %eax # 4-byte Reload + adcl 720(%esp), %eax + movl %eax, 116(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 724(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 728(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 732(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + adcl 736(%esp), %ebp + movl %ebp, 92(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 740(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 744(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + adcl 748(%esp), %edi + movl %edi, 56(%esp) # 4-byte Spill + movl 60(%esp), %ebp # 4-byte Reload + adcl 752(%esp), %ebp + movl 68(%esp), %eax # 4-byte Reload + adcl 756(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + sbbl %edi, %edi + movl %ecx, %eax + movl %ecx, %esi + imull 52(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 632(%esp), %ecx + movl 2076(%esp), %edx + calll .LmulPv480x32 + andl $1, %edi + movl %edi, %ecx + addl 632(%esp), %esi + movl 76(%esp), %eax # 4-byte Reload + adcl 636(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 640(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 112(%esp), %eax # 4-byte Reload + adcl 644(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 104(%esp), %eax # 4-byte Reload + adcl 648(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl 652(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 116(%esp), %edi # 4-byte Reload + adcl 656(%esp), %edi + movl 88(%esp), %eax # 4-byte Reload + adcl 660(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 664(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 668(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 92(%esp), %esi # 4-byte Reload + adcl 672(%esp), %esi + movl 80(%esp), %eax # 4-byte Reload + adcl 676(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 680(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 684(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + adcl 688(%esp), %ebp + movl %ebp, 60(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 692(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + adcl $0, %ecx + movl %ecx, 72(%esp) # 4-byte Spill + movl 2072(%esp), %eax + movl 44(%eax), %eax + movl %eax, (%esp) + leal 568(%esp), %ecx + movl 2068(%esp), %edx + calll .LmulPv480x32 + movl 76(%esp), %ecx # 4-byte Reload + addl 568(%esp), %ecx + movl 100(%esp), %eax # 4-byte Reload + adcl 572(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 112(%esp), %eax # 4-byte Reload + adcl 576(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 104(%esp), %eax # 4-byte Reload + adcl 580(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl 584(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + adcl 588(%esp), %edi + movl %edi, 116(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 592(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 96(%esp), %ebp # 4-byte Reload + adcl 596(%esp), %ebp + movl 84(%esp), %eax # 4-byte Reload + adcl 600(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + adcl 604(%esp), %esi + movl %esi, 92(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 608(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 612(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 616(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 620(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 624(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 628(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + sbbl %edi, %edi + movl %ecx, %eax + movl %ecx, %esi + imull 52(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 504(%esp), %ecx + movl 2076(%esp), %edx + calll .LmulPv480x32 + andl $1, %edi + movl %edi, %ecx + addl 504(%esp), %esi + movl 100(%esp), %eax # 4-byte Reload + adcl 508(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 112(%esp), %eax # 4-byte Reload + adcl 512(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 104(%esp), %edi # 4-byte Reload + adcl 516(%esp), %edi + movl 108(%esp), %esi # 4-byte Reload + adcl 520(%esp), %esi + movl 116(%esp), %eax # 4-byte Reload + adcl 524(%esp), %eax + movl %eax, 116(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 528(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + adcl 532(%esp), %ebp + movl %ebp, 96(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 536(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 540(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 544(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 548(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 552(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 556(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 68(%esp), %ebp # 4-byte Reload + adcl 560(%esp), %ebp + movl 72(%esp), %eax # 4-byte Reload + adcl 564(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + adcl $0, %ecx + movl %ecx, 76(%esp) # 4-byte Spill + movl 2072(%esp), %eax + movl 48(%eax), %eax + movl %eax, (%esp) + leal 440(%esp), %ecx + movl 2068(%esp), %edx + calll .LmulPv480x32 + movl 100(%esp), %ecx # 4-byte Reload + addl 440(%esp), %ecx + movl 112(%esp), %eax # 4-byte Reload + adcl 444(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + adcl 448(%esp), %edi + movl %edi, 104(%esp) # 4-byte Spill + adcl 452(%esp), %esi + movl %esi, 108(%esp) # 4-byte Spill + movl 116(%esp), %eax # 4-byte Reload + adcl 456(%esp), %eax + movl %eax, 116(%esp) # 4-byte Spill + movl 88(%esp), %edi # 4-byte Reload + adcl 460(%esp), %edi + movl 96(%esp), %eax # 4-byte Reload + adcl 464(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 468(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 472(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 476(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 480(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 484(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 488(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + adcl 492(%esp), %ebp + movl %ebp, 68(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 496(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 500(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + sbbl %ebp, %ebp + movl %ecx, %eax + movl %ecx, %esi + imull 52(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 376(%esp), %ecx + movl 2076(%esp), %edx + calll .LmulPv480x32 + andl $1, %ebp + movl %ebp, %ecx + addl 376(%esp), %esi + movl 112(%esp), %eax # 4-byte Reload + adcl 380(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 104(%esp), %eax # 4-byte Reload + adcl 384(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 108(%esp), %esi # 4-byte Reload + adcl 388(%esp), %esi + movl 116(%esp), %eax # 4-byte Reload + adcl 392(%esp), %eax + movl %eax, 116(%esp) # 4-byte Spill + adcl 396(%esp), %edi + movl %edi, 88(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 400(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 84(%esp), %edi # 4-byte Reload + adcl 404(%esp), %edi + movl 92(%esp), %eax # 4-byte Reload + adcl 408(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 412(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 64(%esp), %ebp # 4-byte Reload + adcl 416(%esp), %ebp + movl 56(%esp), %eax # 4-byte Reload + adcl 420(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 424(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 428(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 432(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 436(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + adcl $0, %ecx + movl %ecx, 100(%esp) # 4-byte Spill + movl 2072(%esp), %eax + movl 52(%eax), %eax + movl %eax, (%esp) + leal 312(%esp), %ecx + movl 2068(%esp), %edx + calll .LmulPv480x32 + movl 112(%esp), %ecx # 4-byte Reload + addl 312(%esp), %ecx + movl 104(%esp), %eax # 4-byte Reload + adcl 316(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + adcl 320(%esp), %esi + movl %esi, 108(%esp) # 4-byte Spill + movl 116(%esp), %eax # 4-byte Reload + adcl 324(%esp), %eax + movl %eax, 116(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 328(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 332(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + adcl 336(%esp), %edi + movl %edi, 84(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 340(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 344(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + adcl 348(%esp), %ebp + movl %ebp, 64(%esp) # 4-byte Spill + movl 56(%esp), %ebp # 4-byte Reload + adcl 352(%esp), %ebp + movl 60(%esp), %eax # 4-byte Reload + adcl 356(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 360(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 364(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 368(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 372(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + sbbl %esi, %esi + movl %ecx, %eax + movl %ecx, %edi + imull 52(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 248(%esp), %ecx + movl 2076(%esp), %edx + calll .LmulPv480x32 + andl $1, %esi + movl %esi, %ecx + addl 248(%esp), %edi + movl 104(%esp), %esi # 4-byte Reload + adcl 252(%esp), %esi + movl 108(%esp), %edi # 4-byte Reload + adcl 256(%esp), %edi + movl 116(%esp), %eax # 4-byte Reload + adcl 260(%esp), %eax + movl %eax, 116(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 264(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 268(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 272(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 276(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 280(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 284(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + adcl 288(%esp), %ebp + movl %ebp, 56(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 292(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 296(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 300(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 304(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 308(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + adcl $0, %ecx + movl %ecx, 112(%esp) # 4-byte Spill + movl 2072(%esp), %eax + movl 56(%eax), %eax + movl %eax, (%esp) + leal 184(%esp), %ecx + movl 2068(%esp), %edx + calll .LmulPv480x32 + movl %esi, %ecx + movl 96(%esp), %esi # 4-byte Reload + addl 184(%esp), %ecx + adcl 188(%esp), %edi + movl %edi, 108(%esp) # 4-byte Spill + movl 116(%esp), %eax # 4-byte Reload + adcl 192(%esp), %eax + movl %eax, 116(%esp) # 4-byte Spill + movl 88(%esp), %ebp # 4-byte Reload + adcl 196(%esp), %ebp + adcl 200(%esp), %esi + movl 84(%esp), %eax # 4-byte Reload + adcl 204(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 208(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 212(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 216(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 220(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 224(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 228(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 232(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 236(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 240(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 112(%esp), %eax # 4-byte Reload + adcl 244(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + sbbl %eax, %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + imull %ecx, %eax + movl %ecx, %edi + movl %eax, (%esp) + leal 120(%esp), %ecx + movl 2076(%esp), %edx + calll .LmulPv480x32 + movl 104(%esp), %ebx # 4-byte Reload + andl $1, %ebx + addl 120(%esp), %edi + movl %ebp, %edi + movl 108(%esp), %eax # 4-byte Reload + adcl 124(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 116(%esp), %ecx # 4-byte Reload + adcl 128(%esp), %ecx + movl %ecx, 116(%esp) # 4-byte Spill + adcl 132(%esp), %edi + adcl 136(%esp), %esi + movl 84(%esp), %edx # 4-byte Reload + adcl 140(%esp), %edx + movl %edx, 84(%esp) # 4-byte Spill + movl 92(%esp), %edx # 4-byte Reload + adcl 144(%esp), %edx + movl %edx, 92(%esp) # 4-byte Spill + movl 80(%esp), %edx # 4-byte Reload + adcl 148(%esp), %edx + movl %edx, 80(%esp) # 4-byte Spill + movl 64(%esp), %edx # 4-byte Reload + adcl 152(%esp), %edx + movl %edx, 64(%esp) # 4-byte Spill + movl 56(%esp), %edx # 4-byte Reload + adcl 156(%esp), %edx + movl %edx, 56(%esp) # 4-byte Spill + movl 60(%esp), %edx # 4-byte Reload + adcl 160(%esp), %edx + movl %edx, 60(%esp) # 4-byte Spill + movl 68(%esp), %edx # 4-byte Reload + adcl 164(%esp), %edx + movl %edx, 68(%esp) # 4-byte Spill + movl 72(%esp), %edx # 4-byte Reload + adcl 168(%esp), %edx + movl %edx, 72(%esp) # 4-byte Spill + movl 76(%esp), %edx # 4-byte Reload + adcl 172(%esp), %edx + movl %edx, 76(%esp) # 4-byte Spill + movl 100(%esp), %edx # 4-byte Reload + adcl 176(%esp), %edx + movl %edx, 100(%esp) # 4-byte Spill + movl 112(%esp), %edx # 4-byte Reload + adcl 180(%esp), %edx + movl %edx, 112(%esp) # 4-byte Spill + adcl $0, %ebx + movl %ebx, 104(%esp) # 4-byte Spill + movl %eax, %edx + movl 2076(%esp), %ebp + subl (%ebp), %edx + sbbl 4(%ebp), %ecx + movl %edi, %eax + sbbl 8(%ebp), %eax + movl %esi, %ebx + sbbl 12(%ebp), %ebx + movl %ebx, 16(%esp) # 4-byte Spill + movl 84(%esp), %ebx # 4-byte Reload + sbbl 16(%ebp), %ebx + movl %ebx, 20(%esp) # 4-byte Spill + movl 92(%esp), %ebx # 4-byte Reload + sbbl 20(%ebp), %ebx + movl %ebx, 24(%esp) # 4-byte Spill + movl 80(%esp), %ebx # 4-byte Reload + sbbl 24(%ebp), %ebx + movl %ebx, 28(%esp) # 4-byte Spill + movl 64(%esp), %ebx # 4-byte Reload + sbbl 28(%ebp), %ebx + movl %ebx, 32(%esp) # 4-byte Spill + movl 56(%esp), %ebx # 4-byte Reload + sbbl 32(%ebp), %ebx + movl %ebx, 36(%esp) # 4-byte Spill + movl 60(%esp), %ebx # 4-byte Reload + sbbl 36(%ebp), %ebx + movl %ebx, 40(%esp) # 4-byte Spill + movl 68(%esp), %ebx # 4-byte Reload + sbbl 40(%ebp), %ebx + movl %ebx, 44(%esp) # 4-byte Spill + movl 72(%esp), %ebx # 4-byte Reload + sbbl 44(%ebp), %ebx + movl %ebx, 48(%esp) # 4-byte Spill + movl 76(%esp), %ebx # 4-byte Reload + sbbl 48(%ebp), %ebx + movl %ebx, 52(%esp) # 4-byte Spill + movl 100(%esp), %ebx # 4-byte Reload + sbbl 52(%ebp), %ebx + movl %ebx, 88(%esp) # 4-byte Spill + movl 112(%esp), %ebx # 4-byte Reload + sbbl 56(%ebp), %ebx + movl %ebx, 96(%esp) # 4-byte Spill + movl 104(%esp), %ebx # 4-byte Reload + movl 108(%esp), %ebp # 4-byte Reload + sbbl $0, %ebx + andl $1, %ebx + jne .LBB226_2 +# BB#1: + movl %edx, %ebp +.LBB226_2: + movl 2064(%esp), %edx + movl %ebp, (%edx) + testb %bl, %bl + movl 116(%esp), %ebp # 4-byte Reload + jne .LBB226_4 +# BB#3: + movl %ecx, %ebp +.LBB226_4: + movl %ebp, 4(%edx) + jne .LBB226_6 +# BB#5: + movl %eax, %edi +.LBB226_6: + movl %edi, 8(%edx) + jne .LBB226_8 +# BB#7: + movl 16(%esp), %esi # 4-byte Reload +.LBB226_8: + movl %esi, 12(%edx) + movl 84(%esp), %eax # 4-byte Reload + jne .LBB226_10 +# BB#9: + movl 20(%esp), %eax # 4-byte Reload +.LBB226_10: + movl %eax, 16(%edx) + movl 92(%esp), %eax # 4-byte Reload + jne .LBB226_12 +# BB#11: + movl 24(%esp), %eax # 4-byte Reload +.LBB226_12: + movl %eax, 20(%edx) + movl 80(%esp), %eax # 4-byte Reload + jne .LBB226_14 +# BB#13: + movl 28(%esp), %eax # 4-byte Reload +.LBB226_14: + movl %eax, 24(%edx) + movl 64(%esp), %eax # 4-byte Reload + jne .LBB226_16 +# BB#15: + movl 32(%esp), %eax # 4-byte Reload +.LBB226_16: + movl %eax, 28(%edx) + movl 56(%esp), %eax # 4-byte Reload + jne .LBB226_18 +# BB#17: + movl 36(%esp), %eax # 4-byte Reload +.LBB226_18: + movl %eax, 32(%edx) + movl 60(%esp), %eax # 4-byte Reload + jne .LBB226_20 +# BB#19: + movl 40(%esp), %eax # 4-byte Reload +.LBB226_20: + movl %eax, 36(%edx) + movl 68(%esp), %eax # 4-byte Reload + jne .LBB226_22 +# BB#21: + movl 44(%esp), %eax # 4-byte Reload +.LBB226_22: + movl %eax, 40(%edx) + movl 72(%esp), %eax # 4-byte Reload + jne .LBB226_24 +# BB#23: + movl 48(%esp), %eax # 4-byte Reload +.LBB226_24: + movl %eax, 44(%edx) + movl 76(%esp), %eax # 4-byte Reload + jne .LBB226_26 +# BB#25: + movl 52(%esp), %eax # 4-byte Reload +.LBB226_26: + movl %eax, 48(%edx) + movl 100(%esp), %eax # 4-byte Reload + jne .LBB226_28 +# BB#27: + movl 88(%esp), %eax # 4-byte Reload +.LBB226_28: + movl %eax, 52(%edx) + movl 112(%esp), %eax # 4-byte Reload + jne .LBB226_30 +# BB#29: + movl 96(%esp), %eax # 4-byte Reload +.LBB226_30: + movl %eax, 56(%edx) + addl $2044, %esp # imm = 0x7FC + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end226: + .size mcl_fp_mont15Lbmi2, .Lfunc_end226-mcl_fp_mont15Lbmi2 + + .globl mcl_fp_montNF15Lbmi2 + .align 16, 0x90 + .type mcl_fp_montNF15Lbmi2,@function +mcl_fp_montNF15Lbmi2: # @mcl_fp_montNF15Lbmi2 +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $2028, %esp # imm = 0x7EC + calll .L227$pb +.L227$pb: + popl %ebx +.Ltmp48: + addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp48-.L227$pb), %ebx + movl 2060(%esp), %eax + movl -4(%eax), %esi + movl %esi, 36(%esp) # 4-byte Spill + movl 2056(%esp), %eax + movl (%eax), %eax + movl %eax, (%esp) + leal 1960(%esp), %ecx + movl 2052(%esp), %edx + calll .LmulPv480x32 + movl 1960(%esp), %ebp + movl 1964(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl %ebp, %eax + imull %esi, %eax + movl 2020(%esp), %ecx + movl %ecx, 100(%esp) # 4-byte Spill + movl 2016(%esp), %ecx + movl %ecx, 96(%esp) # 4-byte Spill + movl 2012(%esp), %ecx + movl %ecx, 76(%esp) # 4-byte Spill + movl 2008(%esp), %ecx + movl %ecx, 80(%esp) # 4-byte Spill + movl 2004(%esp), %ecx + movl %ecx, 84(%esp) # 4-byte Spill + movl 2000(%esp), %ecx + movl %ecx, 60(%esp) # 4-byte Spill + movl 1996(%esp), %ecx + movl %ecx, 56(%esp) # 4-byte Spill + movl 1992(%esp), %ecx + movl %ecx, 52(%esp) # 4-byte Spill + movl 1988(%esp), %ecx + movl %ecx, 44(%esp) # 4-byte Spill + movl 1984(%esp), %ecx + movl %ecx, 48(%esp) # 4-byte Spill + movl 1980(%esp), %esi + movl 1976(%esp), %edi + movl 1972(%esp), %ecx + movl %ecx, 88(%esp) # 4-byte Spill + movl 1968(%esp), %ecx + movl %ecx, 68(%esp) # 4-byte Spill + movl %eax, (%esp) + leal 1896(%esp), %ecx + movl 2060(%esp), %edx + calll .LmulPv480x32 + addl 1896(%esp), %ebp + movl 92(%esp), %eax # 4-byte Reload + adcl 1900(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 1904(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 1908(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + adcl 1912(%esp), %edi + movl %edi, 72(%esp) # 4-byte Spill + adcl 1916(%esp), %esi + movl %esi, %edi + movl 48(%esp), %eax # 4-byte Reload + adcl 1920(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 44(%esp), %eax # 4-byte Reload + adcl 1924(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 1928(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 1932(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 1936(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 1940(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 80(%esp), %ebp # 4-byte Reload + adcl 1944(%esp), %ebp + movl 76(%esp), %esi # 4-byte Reload + adcl 1948(%esp), %esi + movl 96(%esp), %eax # 4-byte Reload + adcl 1952(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 1956(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 2056(%esp), %eax + movl 4(%eax), %eax + movl %eax, (%esp) + leal 1832(%esp), %ecx + movl 2052(%esp), %edx + calll .LmulPv480x32 + movl 1892(%esp), %eax + movl 92(%esp), %edx # 4-byte Reload + addl 1832(%esp), %edx + movl 68(%esp), %ecx # 4-byte Reload + adcl 1836(%esp), %ecx + movl %ecx, 68(%esp) # 4-byte Spill + movl 88(%esp), %ecx # 4-byte Reload + adcl 1840(%esp), %ecx + movl %ecx, 88(%esp) # 4-byte Spill + movl 72(%esp), %ecx # 4-byte Reload + adcl 1844(%esp), %ecx + movl %ecx, 72(%esp) # 4-byte Spill + adcl 1848(%esp), %edi + movl 48(%esp), %ecx # 4-byte Reload + adcl 1852(%esp), %ecx + movl %ecx, 48(%esp) # 4-byte Spill + movl 44(%esp), %ecx # 4-byte Reload + adcl 1856(%esp), %ecx + movl %ecx, 44(%esp) # 4-byte Spill + movl 52(%esp), %ecx # 4-byte Reload + adcl 1860(%esp), %ecx + movl %ecx, 52(%esp) # 4-byte Spill + movl 56(%esp), %ecx # 4-byte Reload + adcl 1864(%esp), %ecx + movl %ecx, 56(%esp) # 4-byte Spill + movl 60(%esp), %ecx # 4-byte Reload + adcl 1868(%esp), %ecx + movl %ecx, 60(%esp) # 4-byte Spill + movl 84(%esp), %ecx # 4-byte Reload + adcl 1872(%esp), %ecx + movl %ecx, 84(%esp) # 4-byte Spill + adcl 1876(%esp), %ebp + movl %ebp, 80(%esp) # 4-byte Spill + adcl 1880(%esp), %esi + movl %esi, 76(%esp) # 4-byte Spill + movl 96(%esp), %ebp # 4-byte Reload + adcl 1884(%esp), %ebp + movl 100(%esp), %ecx # 4-byte Reload + adcl 1888(%esp), %ecx + movl %ecx, 100(%esp) # 4-byte Spill + adcl $0, %eax + movl %eax, 92(%esp) # 4-byte Spill + movl %edx, %eax + movl %edx, %esi + imull 36(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 1768(%esp), %ecx + movl 2060(%esp), %edx + calll .LmulPv480x32 + addl 1768(%esp), %esi + movl 68(%esp), %eax # 4-byte Reload + adcl 1772(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 1776(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 1780(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + adcl 1784(%esp), %edi + movl %edi, 40(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 1788(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 44(%esp), %eax # 4-byte Reload + adcl 1792(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 1796(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 1800(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 60(%esp), %edi # 4-byte Reload + adcl 1804(%esp), %edi + movl 84(%esp), %eax # 4-byte Reload + adcl 1808(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 1812(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 1816(%esp), %eax + movl %eax, %esi + adcl 1820(%esp), %ebp + movl %ebp, 96(%esp) # 4-byte Spill + movl 100(%esp), %ebp # 4-byte Reload + adcl 1824(%esp), %ebp + movl 92(%esp), %eax # 4-byte Reload + adcl 1828(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 2056(%esp), %eax + movl 8(%eax), %eax + movl %eax, (%esp) + leal 1704(%esp), %ecx + movl 2052(%esp), %edx + calll .LmulPv480x32 + movl 1764(%esp), %eax + movl 68(%esp), %edx # 4-byte Reload + addl 1704(%esp), %edx + movl 88(%esp), %ecx # 4-byte Reload + adcl 1708(%esp), %ecx + movl %ecx, 88(%esp) # 4-byte Spill + movl 72(%esp), %ecx # 4-byte Reload + adcl 1712(%esp), %ecx + movl %ecx, 72(%esp) # 4-byte Spill + movl 40(%esp), %ecx # 4-byte Reload + adcl 1716(%esp), %ecx + movl %ecx, 40(%esp) # 4-byte Spill + movl 48(%esp), %ecx # 4-byte Reload + adcl 1720(%esp), %ecx + movl %ecx, 48(%esp) # 4-byte Spill + movl 44(%esp), %ecx # 4-byte Reload + adcl 1724(%esp), %ecx + movl %ecx, 44(%esp) # 4-byte Spill + movl 52(%esp), %ecx # 4-byte Reload + adcl 1728(%esp), %ecx + movl %ecx, 52(%esp) # 4-byte Spill + movl 56(%esp), %ecx # 4-byte Reload + adcl 1732(%esp), %ecx + movl %ecx, 56(%esp) # 4-byte Spill + adcl 1736(%esp), %edi + movl %edi, 60(%esp) # 4-byte Spill + movl 84(%esp), %ecx # 4-byte Reload + adcl 1740(%esp), %ecx + movl %ecx, 84(%esp) # 4-byte Spill + movl 80(%esp), %edi # 4-byte Reload + adcl 1744(%esp), %edi + adcl 1748(%esp), %esi + movl %esi, 76(%esp) # 4-byte Spill + movl 96(%esp), %ecx # 4-byte Reload + adcl 1752(%esp), %ecx + movl %ecx, 96(%esp) # 4-byte Spill + adcl 1756(%esp), %ebp + movl 92(%esp), %ecx # 4-byte Reload + adcl 1760(%esp), %ecx + movl %ecx, 92(%esp) # 4-byte Spill + adcl $0, %eax + movl %eax, 68(%esp) # 4-byte Spill + movl %edx, %eax + movl %edx, %esi + imull 36(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 1640(%esp), %ecx + movl 2060(%esp), %eax + movl %eax, %edx + calll .LmulPv480x32 + addl 1640(%esp), %esi + movl 88(%esp), %eax # 4-byte Reload + adcl 1644(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 1648(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 40(%esp), %eax # 4-byte Reload + adcl 1652(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 1656(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 44(%esp), %eax # 4-byte Reload + adcl 1660(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 1664(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 1668(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 1672(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 1676(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + adcl 1680(%esp), %edi + movl %edi, 80(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 1684(%esp), %eax + movl %eax, %esi + movl 96(%esp), %edi # 4-byte Reload + adcl 1688(%esp), %edi + adcl 1692(%esp), %ebp + movl %ebp, 100(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 1696(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 68(%esp), %ebp # 4-byte Reload + adcl 1700(%esp), %ebp + movl 2056(%esp), %eax + movl 12(%eax), %eax + movl %eax, (%esp) + leal 1576(%esp), %ecx + movl 2052(%esp), %edx + calll .LmulPv480x32 + movl 1636(%esp), %eax + movl 88(%esp), %edx # 4-byte Reload + addl 1576(%esp), %edx + movl 72(%esp), %ecx # 4-byte Reload + adcl 1580(%esp), %ecx + movl %ecx, 72(%esp) # 4-byte Spill + movl 40(%esp), %ecx # 4-byte Reload + adcl 1584(%esp), %ecx + movl %ecx, 40(%esp) # 4-byte Spill + movl 48(%esp), %ecx # 4-byte Reload + adcl 1588(%esp), %ecx + movl %ecx, 48(%esp) # 4-byte Spill + movl 44(%esp), %ecx # 4-byte Reload + adcl 1592(%esp), %ecx + movl %ecx, 44(%esp) # 4-byte Spill + movl 52(%esp), %ecx # 4-byte Reload + adcl 1596(%esp), %ecx + movl %ecx, 52(%esp) # 4-byte Spill + movl 56(%esp), %ecx # 4-byte Reload + adcl 1600(%esp), %ecx + movl %ecx, 56(%esp) # 4-byte Spill + movl 60(%esp), %ecx # 4-byte Reload + adcl 1604(%esp), %ecx + movl %ecx, 60(%esp) # 4-byte Spill + movl 84(%esp), %ecx # 4-byte Reload + adcl 1608(%esp), %ecx + movl %ecx, 84(%esp) # 4-byte Spill + movl 80(%esp), %ecx # 4-byte Reload + adcl 1612(%esp), %ecx + movl %ecx, 80(%esp) # 4-byte Spill + adcl 1616(%esp), %esi + adcl 1620(%esp), %edi + movl %edi, 96(%esp) # 4-byte Spill + movl 100(%esp), %ecx # 4-byte Reload + adcl 1624(%esp), %ecx + movl %ecx, 100(%esp) # 4-byte Spill + movl 92(%esp), %ecx # 4-byte Reload + adcl 1628(%esp), %ecx + movl %ecx, 92(%esp) # 4-byte Spill + adcl 1632(%esp), %ebp + movl %ebp, 68(%esp) # 4-byte Spill + adcl $0, %eax + movl %eax, 88(%esp) # 4-byte Spill + movl %edx, %edi + movl %edi, %eax + imull 36(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 1512(%esp), %ecx + movl 2060(%esp), %edx + calll .LmulPv480x32 + addl 1512(%esp), %edi + movl 72(%esp), %eax # 4-byte Reload + adcl 1516(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 40(%esp), %eax # 4-byte Reload + adcl 1520(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 1524(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 44(%esp), %eax # 4-byte Reload + adcl 1528(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 52(%esp), %edi # 4-byte Reload + adcl 1532(%esp), %edi + movl 56(%esp), %eax # 4-byte Reload + adcl 1536(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 1540(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 1544(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 80(%esp), %ebp # 4-byte Reload + adcl 1548(%esp), %ebp + adcl 1552(%esp), %esi + movl 96(%esp), %eax # 4-byte Reload + adcl 1556(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 1560(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 1564(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 1568(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 1572(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 2056(%esp), %eax + movl 16(%eax), %eax + movl %eax, (%esp) + leal 1448(%esp), %ecx + movl 2052(%esp), %edx + calll .LmulPv480x32 + movl 1508(%esp), %eax + movl 72(%esp), %edx # 4-byte Reload + addl 1448(%esp), %edx + movl 40(%esp), %ecx # 4-byte Reload + adcl 1452(%esp), %ecx + movl %ecx, 40(%esp) # 4-byte Spill + movl 48(%esp), %ecx # 4-byte Reload + adcl 1456(%esp), %ecx + movl %ecx, 48(%esp) # 4-byte Spill + movl 44(%esp), %ecx # 4-byte Reload + adcl 1460(%esp), %ecx + movl %ecx, 44(%esp) # 4-byte Spill + adcl 1464(%esp), %edi + movl %edi, 52(%esp) # 4-byte Spill + movl 56(%esp), %ecx # 4-byte Reload + adcl 1468(%esp), %ecx + movl %ecx, 56(%esp) # 4-byte Spill + movl 60(%esp), %edi # 4-byte Reload + adcl 1472(%esp), %edi + movl 84(%esp), %ecx # 4-byte Reload + adcl 1476(%esp), %ecx + movl %ecx, 84(%esp) # 4-byte Spill + adcl 1480(%esp), %ebp + movl %ebp, 80(%esp) # 4-byte Spill + adcl 1484(%esp), %esi + movl %esi, 76(%esp) # 4-byte Spill + movl 96(%esp), %ecx # 4-byte Reload + adcl 1488(%esp), %ecx + movl %ecx, 96(%esp) # 4-byte Spill + movl 100(%esp), %ecx # 4-byte Reload + adcl 1492(%esp), %ecx + movl %ecx, 100(%esp) # 4-byte Spill + movl 92(%esp), %ecx # 4-byte Reload + adcl 1496(%esp), %ecx + movl %ecx, 92(%esp) # 4-byte Spill + movl 68(%esp), %ecx # 4-byte Reload + adcl 1500(%esp), %ecx + movl %ecx, 68(%esp) # 4-byte Spill + movl 88(%esp), %ebp # 4-byte Reload + adcl 1504(%esp), %ebp + adcl $0, %eax + movl %eax, 72(%esp) # 4-byte Spill + movl %edx, %eax + movl %edx, %esi + imull 36(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 1384(%esp), %ecx + movl 2060(%esp), %edx + calll .LmulPv480x32 + addl 1384(%esp), %esi + movl 40(%esp), %eax # 4-byte Reload + adcl 1388(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 1392(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 44(%esp), %eax # 4-byte Reload + adcl 1396(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 1400(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 1404(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + adcl 1408(%esp), %edi + movl %edi, 60(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 1412(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 1416(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 1420(%esp), %eax + movl %eax, %esi + movl 96(%esp), %eax # 4-byte Reload + adcl 1424(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 1428(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 1432(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 1436(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + adcl 1440(%esp), %ebp + movl %ebp, 88(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 1444(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 2056(%esp), %eax + movl 20(%eax), %eax + movl %eax, (%esp) + leal 1320(%esp), %ecx + movl 2052(%esp), %edx + calll .LmulPv480x32 + movl 1380(%esp), %edx + movl 40(%esp), %ecx # 4-byte Reload + addl 1320(%esp), %ecx + movl %ecx, 40(%esp) # 4-byte Spill + movl 48(%esp), %ebp # 4-byte Reload + adcl 1324(%esp), %ebp + movl 44(%esp), %edi # 4-byte Reload + adcl 1328(%esp), %edi + movl 52(%esp), %eax # 4-byte Reload + adcl 1332(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 1336(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 1340(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 1344(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 1348(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + adcl 1352(%esp), %esi + movl %esi, 76(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 1356(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 1360(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 1364(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 68(%esp), %esi # 4-byte Reload + adcl 1368(%esp), %esi + movl 88(%esp), %eax # 4-byte Reload + adcl 1372(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 1376(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + adcl $0, %edx + movl %edx, 64(%esp) # 4-byte Spill + movl %ecx, %eax + imull 36(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 1256(%esp), %ecx + movl 2060(%esp), %edx + calll .LmulPv480x32 + movl 40(%esp), %eax # 4-byte Reload + addl 1256(%esp), %eax + adcl 1260(%esp), %ebp + movl %ebp, 48(%esp) # 4-byte Spill + adcl 1264(%esp), %edi + movl %edi, 44(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 1268(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 56(%esp), %ebp # 4-byte Reload + adcl 1272(%esp), %ebp + movl 60(%esp), %eax # 4-byte Reload + adcl 1276(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 1280(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 1284(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 1288(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 1292(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 100(%esp), %edi # 4-byte Reload + adcl 1296(%esp), %edi + movl 92(%esp), %eax # 4-byte Reload + adcl 1300(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + adcl 1304(%esp), %esi + movl %esi, 68(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 1308(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 72(%esp), %esi # 4-byte Reload + adcl 1312(%esp), %esi + movl 64(%esp), %eax # 4-byte Reload + adcl 1316(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 2056(%esp), %eax + movl 24(%eax), %eax + movl %eax, (%esp) + leal 1192(%esp), %ecx + movl 2052(%esp), %eax + movl %eax, %edx + calll .LmulPv480x32 + movl 1252(%esp), %eax + movl 48(%esp), %edx # 4-byte Reload + addl 1192(%esp), %edx + movl 44(%esp), %ecx # 4-byte Reload + adcl 1196(%esp), %ecx + movl %ecx, 44(%esp) # 4-byte Spill + movl 52(%esp), %ecx # 4-byte Reload + adcl 1200(%esp), %ecx + movl %ecx, 52(%esp) # 4-byte Spill + adcl 1204(%esp), %ebp + movl %ebp, 56(%esp) # 4-byte Spill + movl 60(%esp), %ecx # 4-byte Reload + adcl 1208(%esp), %ecx + movl %ecx, 60(%esp) # 4-byte Spill + movl 84(%esp), %ebp # 4-byte Reload + adcl 1212(%esp), %ebp + movl 80(%esp), %ecx # 4-byte Reload + adcl 1216(%esp), %ecx + movl %ecx, 80(%esp) # 4-byte Spill + movl 76(%esp), %ecx # 4-byte Reload + adcl 1220(%esp), %ecx + movl %ecx, 76(%esp) # 4-byte Spill + movl 96(%esp), %ecx # 4-byte Reload + adcl 1224(%esp), %ecx + movl %ecx, 96(%esp) # 4-byte Spill + adcl 1228(%esp), %edi + movl %edi, 100(%esp) # 4-byte Spill + movl 92(%esp), %ecx # 4-byte Reload + adcl 1232(%esp), %ecx + movl %ecx, 92(%esp) # 4-byte Spill + movl 68(%esp), %ecx # 4-byte Reload + adcl 1236(%esp), %ecx + movl %ecx, 68(%esp) # 4-byte Spill + movl 88(%esp), %ecx # 4-byte Reload + adcl 1240(%esp), %ecx + movl %ecx, 88(%esp) # 4-byte Spill + adcl 1244(%esp), %esi + movl %esi, 72(%esp) # 4-byte Spill + movl 64(%esp), %esi # 4-byte Reload + adcl 1248(%esp), %esi + adcl $0, %eax + movl %eax, 48(%esp) # 4-byte Spill + movl %edx, %edi + movl %edi, %eax + imull 36(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 1128(%esp), %ecx + movl 2060(%esp), %eax + movl %eax, %edx + calll .LmulPv480x32 + addl 1128(%esp), %edi + movl 44(%esp), %eax # 4-byte Reload + adcl 1132(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 1136(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 56(%esp), %edi # 4-byte Reload + adcl 1140(%esp), %edi + movl 60(%esp), %eax # 4-byte Reload + adcl 1144(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + adcl 1148(%esp), %ebp + movl %ebp, 84(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 1152(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 1156(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 1160(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 1164(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 92(%esp), %ebp # 4-byte Reload + adcl 1168(%esp), %ebp + movl 68(%esp), %eax # 4-byte Reload + adcl 1172(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 1176(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 1180(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + adcl 1184(%esp), %esi + movl %esi, 64(%esp) # 4-byte Spill + movl 48(%esp), %esi # 4-byte Reload + adcl 1188(%esp), %esi + movl 2056(%esp), %eax + movl 28(%eax), %eax + movl %eax, (%esp) + leal 1064(%esp), %ecx + movl 2052(%esp), %edx + calll .LmulPv480x32 + movl 1124(%esp), %eax + movl 44(%esp), %edx # 4-byte Reload + addl 1064(%esp), %edx + movl 52(%esp), %ecx # 4-byte Reload + adcl 1068(%esp), %ecx + movl %ecx, 52(%esp) # 4-byte Spill + adcl 1072(%esp), %edi + movl %edi, 56(%esp) # 4-byte Spill + movl 60(%esp), %ecx # 4-byte Reload + adcl 1076(%esp), %ecx + movl %ecx, 60(%esp) # 4-byte Spill + movl 84(%esp), %ecx # 4-byte Reload + adcl 1080(%esp), %ecx + movl %ecx, 84(%esp) # 4-byte Spill + movl 80(%esp), %edi # 4-byte Reload + adcl 1084(%esp), %edi + movl 76(%esp), %ecx # 4-byte Reload + adcl 1088(%esp), %ecx + movl %ecx, 76(%esp) # 4-byte Spill + movl 96(%esp), %ecx # 4-byte Reload + adcl 1092(%esp), %ecx + movl %ecx, 96(%esp) # 4-byte Spill + movl 100(%esp), %ecx # 4-byte Reload + adcl 1096(%esp), %ecx + movl %ecx, 100(%esp) # 4-byte Spill + adcl 1100(%esp), %ebp + movl %ebp, 92(%esp) # 4-byte Spill + movl 68(%esp), %ecx # 4-byte Reload + adcl 1104(%esp), %ecx + movl %ecx, 68(%esp) # 4-byte Spill + movl 88(%esp), %ecx # 4-byte Reload + adcl 1108(%esp), %ecx + movl %ecx, 88(%esp) # 4-byte Spill + movl 72(%esp), %ecx # 4-byte Reload + adcl 1112(%esp), %ecx + movl %ecx, 72(%esp) # 4-byte Spill + movl 64(%esp), %ecx # 4-byte Reload + adcl 1116(%esp), %ecx + movl %ecx, 64(%esp) # 4-byte Spill + adcl 1120(%esp), %esi + movl %esi, 48(%esp) # 4-byte Spill + adcl $0, %eax + movl %eax, %esi + movl %edx, %ebp + movl %ebp, %eax + imull 36(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 1000(%esp), %ecx + movl 2060(%esp), %edx + calll .LmulPv480x32 + addl 1000(%esp), %ebp + movl 52(%esp), %eax # 4-byte Reload + adcl 1004(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 1008(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 60(%esp), %ebp # 4-byte Reload + adcl 1012(%esp), %ebp + movl 84(%esp), %eax # 4-byte Reload + adcl 1016(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + adcl 1020(%esp), %edi + movl %edi, 80(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 1024(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 96(%esp), %edi # 4-byte Reload + adcl 1028(%esp), %edi + movl 100(%esp), %eax # 4-byte Reload + adcl 1032(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 1036(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 1040(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 1044(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 1048(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 1052(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 1056(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + adcl 1060(%esp), %esi + movl 2056(%esp), %eax + movl 32(%eax), %eax + movl %eax, (%esp) + leal 936(%esp), %ecx + movl 2052(%esp), %edx + calll .LmulPv480x32 + movl 996(%esp), %eax + movl 52(%esp), %edx # 4-byte Reload + addl 936(%esp), %edx + movl 56(%esp), %ecx # 4-byte Reload + adcl 940(%esp), %ecx + movl %ecx, 56(%esp) # 4-byte Spill + adcl 944(%esp), %ebp + movl %ebp, 60(%esp) # 4-byte Spill + movl 84(%esp), %ecx # 4-byte Reload + adcl 948(%esp), %ecx + movl %ecx, 84(%esp) # 4-byte Spill + movl 80(%esp), %ecx # 4-byte Reload + adcl 952(%esp), %ecx + movl %ecx, 80(%esp) # 4-byte Spill + movl 76(%esp), %ecx # 4-byte Reload + adcl 956(%esp), %ecx + movl %ecx, 76(%esp) # 4-byte Spill + adcl 960(%esp), %edi + movl %edi, 96(%esp) # 4-byte Spill + movl 100(%esp), %ecx # 4-byte Reload + adcl 964(%esp), %ecx + movl %ecx, 100(%esp) # 4-byte Spill + movl 92(%esp), %ecx # 4-byte Reload + adcl 968(%esp), %ecx + movl %ecx, 92(%esp) # 4-byte Spill + movl 68(%esp), %ecx # 4-byte Reload + adcl 972(%esp), %ecx + movl %ecx, 68(%esp) # 4-byte Spill + movl 88(%esp), %ecx # 4-byte Reload + adcl 976(%esp), %ecx + movl %ecx, 88(%esp) # 4-byte Spill + movl 72(%esp), %ecx # 4-byte Reload + adcl 980(%esp), %ecx + movl %ecx, 72(%esp) # 4-byte Spill + movl 64(%esp), %ecx # 4-byte Reload + adcl 984(%esp), %ecx + movl %ecx, 64(%esp) # 4-byte Spill + movl 48(%esp), %ecx # 4-byte Reload + adcl 988(%esp), %ecx + movl %ecx, 48(%esp) # 4-byte Spill + adcl 992(%esp), %esi + movl %esi, 52(%esp) # 4-byte Spill + adcl $0, %eax + movl %eax, %esi + movl %edx, %edi + movl %edi, %eax + imull 36(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 872(%esp), %ecx + movl 2060(%esp), %edx + calll .LmulPv480x32 + addl 872(%esp), %edi + movl 56(%esp), %ebp # 4-byte Reload + adcl 876(%esp), %ebp + movl 60(%esp), %edi # 4-byte Reload + adcl 880(%esp), %edi + movl 84(%esp), %eax # 4-byte Reload + adcl 884(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 888(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 892(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 896(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 900(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 904(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 908(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 912(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 916(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 920(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 924(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 928(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + adcl 932(%esp), %esi + movl %esi, 56(%esp) # 4-byte Spill + movl 2056(%esp), %eax + movl 36(%eax), %eax + movl %eax, (%esp) + leal 808(%esp), %ecx + movl 2052(%esp), %edx + calll .LmulPv480x32 + movl 868(%esp), %eax + movl %ebp, %ecx + addl 808(%esp), %ecx + adcl 812(%esp), %edi + movl %edi, 60(%esp) # 4-byte Spill + movl 84(%esp), %edx # 4-byte Reload + adcl 816(%esp), %edx + movl %edx, 84(%esp) # 4-byte Spill + movl 80(%esp), %edx # 4-byte Reload + adcl 820(%esp), %edx + movl %edx, 80(%esp) # 4-byte Spill + movl 76(%esp), %edx # 4-byte Reload + adcl 824(%esp), %edx + movl %edx, 76(%esp) # 4-byte Spill + movl 96(%esp), %edx # 4-byte Reload + adcl 828(%esp), %edx + movl %edx, 96(%esp) # 4-byte Spill + movl 100(%esp), %edx # 4-byte Reload + adcl 832(%esp), %edx + movl %edx, 100(%esp) # 4-byte Spill + movl 92(%esp), %edx # 4-byte Reload + adcl 836(%esp), %edx + movl %edx, 92(%esp) # 4-byte Spill + movl 68(%esp), %edi # 4-byte Reload + adcl 840(%esp), %edi + movl 88(%esp), %esi # 4-byte Reload + adcl 844(%esp), %esi + movl 72(%esp), %edx # 4-byte Reload + adcl 848(%esp), %edx + movl %edx, 72(%esp) # 4-byte Spill + movl 64(%esp), %edx # 4-byte Reload + adcl 852(%esp), %edx + movl %edx, 64(%esp) # 4-byte Spill + movl 48(%esp), %edx # 4-byte Reload + adcl 856(%esp), %edx + movl %edx, 48(%esp) # 4-byte Spill + movl 52(%esp), %edx # 4-byte Reload + adcl 860(%esp), %edx + movl %edx, 52(%esp) # 4-byte Spill + movl 56(%esp), %edx # 4-byte Reload + adcl 864(%esp), %edx + movl %edx, 56(%esp) # 4-byte Spill + adcl $0, %eax + movl %eax, 44(%esp) # 4-byte Spill + movl %ecx, %eax + movl %ecx, %ebp + imull 36(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 744(%esp), %ecx + movl 2060(%esp), %edx + calll .LmulPv480x32 + addl 744(%esp), %ebp + movl 60(%esp), %eax # 4-byte Reload + adcl 748(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 752(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 756(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 760(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 764(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 100(%esp), %ebp # 4-byte Reload + adcl 768(%esp), %ebp + movl 92(%esp), %eax # 4-byte Reload + adcl 772(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + adcl 776(%esp), %edi + adcl 780(%esp), %esi + movl %esi, 88(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 784(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 788(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 48(%esp), %esi # 4-byte Reload + adcl 792(%esp), %esi + movl 52(%esp), %eax # 4-byte Reload + adcl 796(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 800(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 44(%esp), %eax # 4-byte Reload + adcl 804(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 2056(%esp), %eax + movl 40(%eax), %eax + movl %eax, (%esp) + leal 680(%esp), %ecx + movl 2052(%esp), %edx + calll .LmulPv480x32 + movl 740(%esp), %eax + movl 60(%esp), %ecx # 4-byte Reload + addl 680(%esp), %ecx + movl 84(%esp), %edx # 4-byte Reload + adcl 684(%esp), %edx + movl %edx, 84(%esp) # 4-byte Spill + movl 80(%esp), %edx # 4-byte Reload + adcl 688(%esp), %edx + movl %edx, 80(%esp) # 4-byte Spill + movl 76(%esp), %edx # 4-byte Reload + adcl 692(%esp), %edx + movl %edx, 76(%esp) # 4-byte Spill + movl 96(%esp), %edx # 4-byte Reload + adcl 696(%esp), %edx + movl %edx, 96(%esp) # 4-byte Spill + adcl 700(%esp), %ebp + movl %ebp, 100(%esp) # 4-byte Spill + movl 92(%esp), %edx # 4-byte Reload + adcl 704(%esp), %edx + movl %edx, 92(%esp) # 4-byte Spill + adcl 708(%esp), %edi + movl 88(%esp), %edx # 4-byte Reload + adcl 712(%esp), %edx + movl %edx, 88(%esp) # 4-byte Spill + movl 72(%esp), %ebp # 4-byte Reload + adcl 716(%esp), %ebp + movl 64(%esp), %edx # 4-byte Reload + adcl 720(%esp), %edx + movl %edx, 64(%esp) # 4-byte Spill + adcl 724(%esp), %esi + movl %esi, 48(%esp) # 4-byte Spill + movl 52(%esp), %edx # 4-byte Reload + adcl 728(%esp), %edx + movl %edx, 52(%esp) # 4-byte Spill + movl 56(%esp), %edx # 4-byte Reload + adcl 732(%esp), %edx + movl %edx, 56(%esp) # 4-byte Spill + movl 44(%esp), %edx # 4-byte Reload + adcl 736(%esp), %edx + movl %edx, 44(%esp) # 4-byte Spill + adcl $0, %eax + movl %eax, 60(%esp) # 4-byte Spill + movl %ecx, %eax + movl %ecx, %esi + imull 36(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 616(%esp), %ecx + movl 2060(%esp), %edx + calll .LmulPv480x32 + addl 616(%esp), %esi + movl 84(%esp), %esi # 4-byte Reload + adcl 620(%esp), %esi + movl 80(%esp), %eax # 4-byte Reload + adcl 624(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 628(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 632(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 636(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 640(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + adcl 644(%esp), %edi + movl %edi, 68(%esp) # 4-byte Spill + movl 88(%esp), %edi # 4-byte Reload + adcl 648(%esp), %edi + adcl 652(%esp), %ebp + movl %ebp, 72(%esp) # 4-byte Spill + movl 64(%esp), %ebp # 4-byte Reload + adcl 656(%esp), %ebp + movl 48(%esp), %eax # 4-byte Reload + adcl 660(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 664(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 668(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 44(%esp), %eax # 4-byte Reload + adcl 672(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 676(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 2056(%esp), %eax + movl 44(%eax), %eax + movl %eax, (%esp) + leal 552(%esp), %ecx + movl 2052(%esp), %edx + calll .LmulPv480x32 + movl 612(%esp), %edx + movl %esi, %ecx + addl 552(%esp), %ecx + movl 80(%esp), %eax # 4-byte Reload + adcl 556(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 560(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 564(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 568(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 92(%esp), %esi # 4-byte Reload + adcl 572(%esp), %esi + movl 68(%esp), %eax # 4-byte Reload + adcl 576(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + adcl 580(%esp), %edi + movl %edi, 88(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 584(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + adcl 588(%esp), %ebp + movl %ebp, 64(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 592(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 596(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 600(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 44(%esp), %eax # 4-byte Reload + adcl 604(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 608(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + adcl $0, %edx + movl %edx, 84(%esp) # 4-byte Spill + movl %ecx, %eax + movl %ecx, %ebp + imull 36(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 488(%esp), %ecx + movl 2060(%esp), %edx + calll .LmulPv480x32 + addl 488(%esp), %ebp + movl 80(%esp), %eax # 4-byte Reload + adcl 492(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 496(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 500(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 504(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl %esi, %ebp + adcl 508(%esp), %ebp + movl 68(%esp), %edi # 4-byte Reload + adcl 512(%esp), %edi + movl 88(%esp), %eax # 4-byte Reload + adcl 516(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 520(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 524(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 48(%esp), %esi # 4-byte Reload + adcl 528(%esp), %esi + movl 52(%esp), %eax # 4-byte Reload + adcl 532(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 536(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 44(%esp), %eax # 4-byte Reload + adcl 540(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 544(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 548(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 2056(%esp), %eax + movl 48(%eax), %eax + movl %eax, (%esp) + leal 424(%esp), %ecx + movl 2052(%esp), %edx + calll .LmulPv480x32 + movl 484(%esp), %edx + movl 80(%esp), %ecx # 4-byte Reload + addl 424(%esp), %ecx + movl 76(%esp), %eax # 4-byte Reload + adcl 428(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 432(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 436(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + adcl 440(%esp), %ebp + movl %ebp, 92(%esp) # 4-byte Spill + adcl 444(%esp), %edi + movl %edi, 68(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 448(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 452(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 456(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl %esi, %edi + adcl 460(%esp), %edi + movl 52(%esp), %eax # 4-byte Reload + adcl 464(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 468(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 44(%esp), %eax # 4-byte Reload + adcl 472(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 476(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 480(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + adcl $0, %edx + movl %edx, 80(%esp) # 4-byte Spill + movl %ecx, %eax + movl %ecx, %ebp + imull 36(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 360(%esp), %ecx + movl 2060(%esp), %edx + calll .LmulPv480x32 + addl 360(%esp), %ebp + movl 76(%esp), %eax # 4-byte Reload + adcl 364(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 96(%esp), %esi # 4-byte Reload + adcl 368(%esp), %esi + movl 100(%esp), %eax # 4-byte Reload + adcl 372(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 92(%esp), %ebp # 4-byte Reload + adcl 376(%esp), %ebp + movl 68(%esp), %eax # 4-byte Reload + adcl 380(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 384(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 388(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 392(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + adcl 396(%esp), %edi + movl %edi, 48(%esp) # 4-byte Spill + movl 52(%esp), %edi # 4-byte Reload + adcl 400(%esp), %edi + movl 56(%esp), %eax # 4-byte Reload + adcl 404(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 44(%esp), %eax # 4-byte Reload + adcl 408(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 412(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 416(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 420(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 2056(%esp), %eax + movl 52(%eax), %eax + movl %eax, (%esp) + leal 296(%esp), %ecx + movl 2052(%esp), %edx + calll .LmulPv480x32 + movl 356(%esp), %edx + movl 76(%esp), %ecx # 4-byte Reload + addl 296(%esp), %ecx + adcl 300(%esp), %esi + movl %esi, 96(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 304(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + adcl 308(%esp), %ebp + movl %ebp, 92(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 312(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 316(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 320(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 324(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 328(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + adcl 332(%esp), %edi + movl %edi, 52(%esp) # 4-byte Spill + movl 56(%esp), %edi # 4-byte Reload + adcl 336(%esp), %edi + movl 44(%esp), %eax # 4-byte Reload + adcl 340(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 344(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 348(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 352(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + adcl $0, %edx + movl %edx, 76(%esp) # 4-byte Spill + movl %ecx, %eax + movl %ecx, %esi + imull 36(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 232(%esp), %ecx + movl 2060(%esp), %edx + calll .LmulPv480x32 + addl 232(%esp), %esi + movl 96(%esp), %eax # 4-byte Reload + adcl 236(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 100(%esp), %ebp # 4-byte Reload + adcl 240(%esp), %ebp + movl 92(%esp), %esi # 4-byte Reload + adcl 244(%esp), %esi + movl 68(%esp), %eax # 4-byte Reload + adcl 248(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 252(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 256(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 260(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 264(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 268(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + adcl 272(%esp), %edi + movl %edi, 56(%esp) # 4-byte Spill + movl 44(%esp), %edi # 4-byte Reload + adcl 276(%esp), %edi + movl 60(%esp), %eax # 4-byte Reload + adcl 280(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 284(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 288(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 292(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 2056(%esp), %eax + movl 56(%eax), %eax + movl %eax, (%esp) + leal 168(%esp), %ecx + movl 2052(%esp), %edx + calll .LmulPv480x32 + movl 228(%esp), %edx + movl 96(%esp), %ecx # 4-byte Reload + addl 168(%esp), %ecx + adcl 172(%esp), %ebp + movl %ebp, 100(%esp) # 4-byte Spill + adcl 176(%esp), %esi + movl %esi, 92(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 180(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 184(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 72(%esp), %esi # 4-byte Reload + adcl 188(%esp), %esi + movl 64(%esp), %eax # 4-byte Reload + adcl 192(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 196(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 200(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 204(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + adcl 208(%esp), %edi + movl %edi, 44(%esp) # 4-byte Spill + movl 60(%esp), %ebp # 4-byte Reload + adcl 212(%esp), %ebp + movl 84(%esp), %eax # 4-byte Reload + adcl 216(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 220(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 224(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + adcl $0, %edx + movl %edx, 96(%esp) # 4-byte Spill + movl 36(%esp), %eax # 4-byte Reload + imull %ecx, %eax + movl %ecx, %edi + movl %eax, (%esp) + leal 104(%esp), %ecx + movl 2060(%esp), %eax + movl %eax, %edx + calll .LmulPv480x32 + addl 104(%esp), %edi + movl 68(%esp), %edi # 4-byte Reload + movl 100(%esp), %eax # 4-byte Reload + adcl 108(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 92(%esp), %ecx # 4-byte Reload + adcl 112(%esp), %ecx + movl %ecx, 92(%esp) # 4-byte Spill + movl %ecx, %ebx + adcl 116(%esp), %edi + movl 88(%esp), %ecx # 4-byte Reload + adcl 120(%esp), %ecx + movl %ecx, 88(%esp) # 4-byte Spill + adcl 124(%esp), %esi + movl %esi, 72(%esp) # 4-byte Spill + movl 64(%esp), %ecx # 4-byte Reload + adcl 128(%esp), %ecx + movl %ecx, 64(%esp) # 4-byte Spill + movl 48(%esp), %ecx # 4-byte Reload + adcl 132(%esp), %ecx + movl %ecx, 48(%esp) # 4-byte Spill + movl 52(%esp), %ecx # 4-byte Reload + adcl 136(%esp), %ecx + movl %ecx, 52(%esp) # 4-byte Spill + movl 56(%esp), %ecx # 4-byte Reload + adcl 140(%esp), %ecx + movl %ecx, 56(%esp) # 4-byte Spill + movl 44(%esp), %ecx # 4-byte Reload + adcl 144(%esp), %ecx + movl %ecx, 44(%esp) # 4-byte Spill + adcl 148(%esp), %ebp + movl %ebp, 60(%esp) # 4-byte Spill + movl 84(%esp), %ecx # 4-byte Reload + adcl 152(%esp), %ecx + movl %ecx, 84(%esp) # 4-byte Spill + movl 80(%esp), %ecx # 4-byte Reload + adcl 156(%esp), %ecx + movl %ecx, 80(%esp) # 4-byte Spill + movl 76(%esp), %ecx # 4-byte Reload + adcl 160(%esp), %ecx + movl %ecx, 76(%esp) # 4-byte Spill + movl 96(%esp), %ecx # 4-byte Reload + adcl 164(%esp), %ecx + movl %ecx, 96(%esp) # 4-byte Spill + movl %eax, %edx + movl 2060(%esp), %ecx + subl (%ecx), %edx + movl %ebx, %ebp + sbbl 4(%ecx), %ebp + movl %edi, %ebx + sbbl 8(%ecx), %ebx + movl 88(%esp), %eax # 4-byte Reload + sbbl 12(%ecx), %eax + sbbl 16(%ecx), %esi + movl %esi, 4(%esp) # 4-byte Spill + movl 64(%esp), %esi # 4-byte Reload + sbbl 20(%ecx), %esi + movl %esi, 8(%esp) # 4-byte Spill + movl 48(%esp), %esi # 4-byte Reload + sbbl 24(%ecx), %esi + movl %esi, 12(%esp) # 4-byte Spill + movl 52(%esp), %esi # 4-byte Reload + sbbl 28(%ecx), %esi + movl %esi, 16(%esp) # 4-byte Spill + movl 56(%esp), %esi # 4-byte Reload + sbbl 32(%ecx), %esi + movl %esi, 20(%esp) # 4-byte Spill + movl 44(%esp), %esi # 4-byte Reload + sbbl 36(%ecx), %esi + movl %esi, 24(%esp) # 4-byte Spill + movl 60(%esp), %esi # 4-byte Reload + sbbl 40(%ecx), %esi + movl %esi, 28(%esp) # 4-byte Spill + movl 84(%esp), %esi # 4-byte Reload + sbbl 44(%ecx), %esi + movl %esi, 32(%esp) # 4-byte Spill + movl 80(%esp), %esi # 4-byte Reload + sbbl 48(%ecx), %esi + movl %esi, 36(%esp) # 4-byte Spill + movl 76(%esp), %esi # 4-byte Reload + sbbl 52(%ecx), %esi + movl %esi, 40(%esp) # 4-byte Spill + movl 96(%esp), %esi # 4-byte Reload + sbbl 56(%ecx), %esi + movl %esi, 68(%esp) # 4-byte Spill + movl %esi, %ecx + sarl $31, %ecx + testl %ecx, %ecx + movl 100(%esp), %ecx # 4-byte Reload + js .LBB227_2 +# BB#1: + movl %edx, %ecx +.LBB227_2: + movl 2048(%esp), %edx + movl %ecx, (%edx) + movl 92(%esp), %esi # 4-byte Reload + js .LBB227_4 +# BB#3: + movl %ebp, %esi +.LBB227_4: + movl %esi, 4(%edx) + movl 88(%esp), %ecx # 4-byte Reload + js .LBB227_6 +# BB#5: + movl %ebx, %edi +.LBB227_6: + movl %edi, 8(%edx) + js .LBB227_8 +# BB#7: + movl %eax, %ecx +.LBB227_8: + movl %ecx, 12(%edx) + movl 72(%esp), %eax # 4-byte Reload + js .LBB227_10 +# BB#9: + movl 4(%esp), %eax # 4-byte Reload +.LBB227_10: + movl %eax, 16(%edx) + movl 64(%esp), %eax # 4-byte Reload + js .LBB227_12 +# BB#11: + movl 8(%esp), %eax # 4-byte Reload +.LBB227_12: + movl %eax, 20(%edx) + movl 48(%esp), %eax # 4-byte Reload + js .LBB227_14 +# BB#13: + movl 12(%esp), %eax # 4-byte Reload +.LBB227_14: + movl %eax, 24(%edx) + movl 52(%esp), %eax # 4-byte Reload + js .LBB227_16 +# BB#15: + movl 16(%esp), %eax # 4-byte Reload +.LBB227_16: + movl %eax, 28(%edx) + movl 56(%esp), %eax # 4-byte Reload + js .LBB227_18 +# BB#17: + movl 20(%esp), %eax # 4-byte Reload +.LBB227_18: + movl %eax, 32(%edx) + movl 44(%esp), %eax # 4-byte Reload + js .LBB227_20 +# BB#19: + movl 24(%esp), %eax # 4-byte Reload +.LBB227_20: + movl %eax, 36(%edx) + movl 60(%esp), %eax # 4-byte Reload + js .LBB227_22 +# BB#21: + movl 28(%esp), %eax # 4-byte Reload +.LBB227_22: + movl %eax, 40(%edx) + movl 84(%esp), %eax # 4-byte Reload + js .LBB227_24 +# BB#23: + movl 32(%esp), %eax # 4-byte Reload +.LBB227_24: + movl %eax, 44(%edx) + movl 80(%esp), %eax # 4-byte Reload + js .LBB227_26 +# BB#25: + movl 36(%esp), %eax # 4-byte Reload +.LBB227_26: + movl %eax, 48(%edx) + movl 76(%esp), %eax # 4-byte Reload + js .LBB227_28 +# BB#27: + movl 40(%esp), %eax # 4-byte Reload +.LBB227_28: + movl %eax, 52(%edx) + movl 96(%esp), %eax # 4-byte Reload + js .LBB227_30 +# BB#29: + movl 68(%esp), %eax # 4-byte Reload +.LBB227_30: + movl %eax, 56(%edx) + addl $2028, %esp # imm = 0x7EC + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end227: + .size mcl_fp_montNF15Lbmi2, .Lfunc_end227-mcl_fp_montNF15Lbmi2 + + .globl mcl_fp_montRed15Lbmi2 + .align 16, 0x90 + .type mcl_fp_montRed15Lbmi2,@function +mcl_fp_montRed15Lbmi2: # @mcl_fp_montRed15Lbmi2 +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $1148, %esp # imm = 0x47C + calll .L228$pb +.L228$pb: + popl %eax +.Ltmp49: + addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp49-.L228$pb), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 1176(%esp), %edx + movl -4(%edx), %esi + movl %esi, 88(%esp) # 4-byte Spill + movl 1172(%esp), %ecx + movl (%ecx), %ebx + movl %ebx, 80(%esp) # 4-byte Spill + movl 4(%ecx), %edi + movl %edi, 84(%esp) # 4-byte Spill + imull %esi, %ebx + movl 116(%ecx), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 112(%ecx), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 108(%ecx), %esi + movl %esi, 116(%esp) # 4-byte Spill + movl 104(%ecx), %esi + movl %esi, 128(%esp) # 4-byte Spill + movl 100(%ecx), %eax + movl %eax, 132(%esp) # 4-byte Spill + movl 96(%ecx), %esi + movl %esi, 152(%esp) # 4-byte Spill + movl 92(%ecx), %eax + movl %eax, 136(%esp) # 4-byte Spill + movl 88(%ecx), %eax + movl %eax, 148(%esp) # 4-byte Spill + movl 84(%ecx), %eax + movl %eax, 160(%esp) # 4-byte Spill + movl 80(%ecx), %eax + movl %eax, 144(%esp) # 4-byte Spill + movl 76(%ecx), %esi + movl %esi, 168(%esp) # 4-byte Spill + movl 72(%ecx), %esi + movl %esi, 164(%esp) # 4-byte Spill + movl 68(%ecx), %esi + movl %esi, 176(%esp) # 4-byte Spill + movl 64(%ecx), %eax + movl %eax, 180(%esp) # 4-byte Spill + movl 60(%ecx), %eax + movl %eax, 172(%esp) # 4-byte Spill + movl 56(%ecx), %esi + movl %esi, 156(%esp) # 4-byte Spill + movl 52(%ecx), %esi + movl %esi, 140(%esp) # 4-byte Spill + movl 48(%ecx), %eax + movl %eax, 120(%esp) # 4-byte Spill + movl 44(%ecx), %esi + movl %esi, 124(%esp) # 4-byte Spill + movl 40(%ecx), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 36(%ecx), %edi + movl %edi, 100(%esp) # 4-byte Spill + movl 32(%ecx), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 28(%ecx), %ebp + movl 24(%ecx), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 20(%ecx), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 16(%ecx), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 12(%ecx), %edi + movl 8(%ecx), %esi + movl (%edx), %ecx + movl %ecx, 16(%esp) # 4-byte Spill + movl 56(%edx), %ecx + movl %ecx, 60(%esp) # 4-byte Spill + movl 52(%edx), %ecx + movl %ecx, 56(%esp) # 4-byte Spill + movl 48(%edx), %ecx + movl %ecx, 52(%esp) # 4-byte Spill + movl 44(%edx), %ecx + movl %ecx, 48(%esp) # 4-byte Spill + movl 40(%edx), %ecx + movl %ecx, 44(%esp) # 4-byte Spill + movl 36(%edx), %ecx + movl %ecx, 40(%esp) # 4-byte Spill + movl 32(%edx), %ecx + movl %ecx, 36(%esp) # 4-byte Spill + movl 28(%edx), %ecx + movl %ecx, 32(%esp) # 4-byte Spill + movl 24(%edx), %ecx + movl %ecx, 28(%esp) # 4-byte Spill + movl 20(%edx), %ecx + movl %ecx, 24(%esp) # 4-byte Spill + movl 16(%edx), %ecx + movl %ecx, 20(%esp) # 4-byte Spill + movl 12(%edx), %ecx + movl %ecx, 12(%esp) # 4-byte Spill + movl 8(%edx), %ecx + movl %ecx, 8(%esp) # 4-byte Spill + movl 4(%edx), %ecx + movl %ecx, 4(%esp) # 4-byte Spill + movl %ebx, (%esp) + leal 1080(%esp), %ecx + movl 92(%esp), %ebx # 4-byte Reload + calll .LmulPv480x32 + movl 80(%esp), %eax # 4-byte Reload + addl 1080(%esp), %eax + movl 84(%esp), %ecx # 4-byte Reload + adcl 1084(%esp), %ecx + adcl 1088(%esp), %esi + movl %esi, 84(%esp) # 4-byte Spill + adcl 1092(%esp), %edi + movl %edi, 64(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 1096(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 1100(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 1104(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + adcl 1108(%esp), %ebp + movl %ebp, 80(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 1112(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 1116(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 112(%esp), %eax # 4-byte Reload + adcl 1120(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 124(%esp), %eax # 4-byte Reload + adcl 1124(%esp), %eax + movl %eax, 124(%esp) # 4-byte Spill + movl 120(%esp), %eax # 4-byte Reload + adcl 1128(%esp), %eax + movl %eax, 120(%esp) # 4-byte Spill + movl 140(%esp), %eax # 4-byte Reload + adcl 1132(%esp), %eax + movl %eax, 140(%esp) # 4-byte Spill + movl 156(%esp), %eax # 4-byte Reload + adcl 1136(%esp), %eax + movl %eax, 156(%esp) # 4-byte Spill + movl 172(%esp), %eax # 4-byte Reload + adcl 1140(%esp), %eax + movl %eax, 172(%esp) # 4-byte Spill + adcl $0, 180(%esp) # 4-byte Folded Spill + adcl $0, 176(%esp) # 4-byte Folded Spill + adcl $0, 164(%esp) # 4-byte Folded Spill + adcl $0, 168(%esp) # 4-byte Folded Spill + adcl $0, 144(%esp) # 4-byte Folded Spill + adcl $0, 160(%esp) # 4-byte Folded Spill + movl 148(%esp), %edi # 4-byte Reload + adcl $0, %edi + adcl $0, 136(%esp) # 4-byte Folded Spill + adcl $0, 152(%esp) # 4-byte Folded Spill + adcl $0, 132(%esp) # 4-byte Folded Spill + adcl $0, 128(%esp) # 4-byte Folded Spill + adcl $0, 116(%esp) # 4-byte Folded Spill + adcl $0, 104(%esp) # 4-byte Folded Spill + adcl $0, 108(%esp) # 4-byte Folded Spill + sbbl %ebp, %ebp + movl %ecx, %eax + movl %ecx, %esi + imull 88(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 1016(%esp), %ecx + movl 1176(%esp), %edx + movl 92(%esp), %ebx # 4-byte Reload + calll .LmulPv480x32 + andl $1, %ebp + movl %ebp, %ecx + addl 1016(%esp), %esi + movl 84(%esp), %edx # 4-byte Reload + adcl 1020(%esp), %edx + movl 64(%esp), %eax # 4-byte Reload + adcl 1024(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 1028(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 1032(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 1036(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 1040(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 1044(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 1048(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 112(%esp), %eax # 4-byte Reload + adcl 1052(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 124(%esp), %eax # 4-byte Reload + adcl 1056(%esp), %eax + movl %eax, 124(%esp) # 4-byte Spill + movl 120(%esp), %ebp # 4-byte Reload + adcl 1060(%esp), %ebp + movl 140(%esp), %eax # 4-byte Reload + adcl 1064(%esp), %eax + movl %eax, 140(%esp) # 4-byte Spill + movl 156(%esp), %eax # 4-byte Reload + adcl 1068(%esp), %eax + movl %eax, 156(%esp) # 4-byte Spill + movl 172(%esp), %eax # 4-byte Reload + adcl 1072(%esp), %eax + movl %eax, 172(%esp) # 4-byte Spill + movl 180(%esp), %eax # 4-byte Reload + adcl 1076(%esp), %eax + movl %eax, 180(%esp) # 4-byte Spill + adcl $0, 176(%esp) # 4-byte Folded Spill + adcl $0, 164(%esp) # 4-byte Folded Spill + adcl $0, 168(%esp) # 4-byte Folded Spill + movl 144(%esp), %eax # 4-byte Reload + adcl $0, %eax + movl %eax, %esi + adcl $0, 160(%esp) # 4-byte Folded Spill + adcl $0, %edi + movl %edi, 148(%esp) # 4-byte Spill + adcl $0, 136(%esp) # 4-byte Folded Spill + adcl $0, 152(%esp) # 4-byte Folded Spill + adcl $0, 132(%esp) # 4-byte Folded Spill + adcl $0, 128(%esp) # 4-byte Folded Spill + adcl $0, 116(%esp) # 4-byte Folded Spill + adcl $0, 104(%esp) # 4-byte Folded Spill + adcl $0, 108(%esp) # 4-byte Folded Spill + adcl $0, %ecx + movl %ecx, 84(%esp) # 4-byte Spill + movl %edx, %edi + movl %edi, %eax + imull 88(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 952(%esp), %ecx + movl 1176(%esp), %edx + movl 92(%esp), %ebx # 4-byte Reload + calll .LmulPv480x32 + addl 952(%esp), %edi + movl 64(%esp), %ecx # 4-byte Reload + adcl 956(%esp), %ecx + movl 68(%esp), %eax # 4-byte Reload + adcl 960(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 964(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 968(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 972(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 976(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 980(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 112(%esp), %eax # 4-byte Reload + adcl 984(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 124(%esp), %eax # 4-byte Reload + adcl 988(%esp), %eax + movl %eax, 124(%esp) # 4-byte Spill + adcl 992(%esp), %ebp + movl %ebp, 120(%esp) # 4-byte Spill + movl 140(%esp), %eax # 4-byte Reload + adcl 996(%esp), %eax + movl %eax, 140(%esp) # 4-byte Spill + movl 156(%esp), %eax # 4-byte Reload + adcl 1000(%esp), %eax + movl %eax, 156(%esp) # 4-byte Spill + movl 172(%esp), %ebp # 4-byte Reload + adcl 1004(%esp), %ebp + movl 180(%esp), %eax # 4-byte Reload + adcl 1008(%esp), %eax + movl %eax, 180(%esp) # 4-byte Spill + movl 176(%esp), %eax # 4-byte Reload + adcl 1012(%esp), %eax + movl %eax, 176(%esp) # 4-byte Spill + adcl $0, 164(%esp) # 4-byte Folded Spill + adcl $0, 168(%esp) # 4-byte Folded Spill + adcl $0, %esi + movl %esi, 144(%esp) # 4-byte Spill + movl 160(%esp), %edi # 4-byte Reload + adcl $0, %edi + adcl $0, 148(%esp) # 4-byte Folded Spill + adcl $0, 136(%esp) # 4-byte Folded Spill + adcl $0, 152(%esp) # 4-byte Folded Spill + adcl $0, 132(%esp) # 4-byte Folded Spill + adcl $0, 128(%esp) # 4-byte Folded Spill + adcl $0, 116(%esp) # 4-byte Folded Spill + adcl $0, 104(%esp) # 4-byte Folded Spill + adcl $0, 108(%esp) # 4-byte Folded Spill + adcl $0, 84(%esp) # 4-byte Folded Spill + movl %ecx, %eax + movl %ecx, %esi + imull 88(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 888(%esp), %ecx + movl 1176(%esp), %edx + movl 92(%esp), %ebx # 4-byte Reload + calll .LmulPv480x32 + addl 888(%esp), %esi + movl 68(%esp), %esi # 4-byte Reload + adcl 892(%esp), %esi + movl 72(%esp), %eax # 4-byte Reload + adcl 896(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 900(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 904(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 908(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 912(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 112(%esp), %eax # 4-byte Reload + adcl 916(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 124(%esp), %eax # 4-byte Reload + adcl 920(%esp), %eax + movl %eax, 124(%esp) # 4-byte Spill + movl 120(%esp), %eax # 4-byte Reload + adcl 924(%esp), %eax + movl %eax, 120(%esp) # 4-byte Spill + movl 140(%esp), %eax # 4-byte Reload + adcl 928(%esp), %eax + movl %eax, 140(%esp) # 4-byte Spill + movl 156(%esp), %eax # 4-byte Reload + adcl 932(%esp), %eax + movl %eax, 156(%esp) # 4-byte Spill + adcl 936(%esp), %ebp + movl %ebp, 172(%esp) # 4-byte Spill + movl 180(%esp), %eax # 4-byte Reload + adcl 940(%esp), %eax + movl %eax, 180(%esp) # 4-byte Spill + movl 176(%esp), %eax # 4-byte Reload + adcl 944(%esp), %eax + movl %eax, 176(%esp) # 4-byte Spill + movl 164(%esp), %eax # 4-byte Reload + adcl 948(%esp), %eax + movl %eax, 164(%esp) # 4-byte Spill + adcl $0, 168(%esp) # 4-byte Folded Spill + adcl $0, 144(%esp) # 4-byte Folded Spill + adcl $0, %edi + movl %edi, 160(%esp) # 4-byte Spill + adcl $0, 148(%esp) # 4-byte Folded Spill + adcl $0, 136(%esp) # 4-byte Folded Spill + adcl $0, 152(%esp) # 4-byte Folded Spill + movl 132(%esp), %ebp # 4-byte Reload + adcl $0, %ebp + adcl $0, 128(%esp) # 4-byte Folded Spill + adcl $0, 116(%esp) # 4-byte Folded Spill + movl 104(%esp), %edi # 4-byte Reload + adcl $0, %edi + adcl $0, 108(%esp) # 4-byte Folded Spill + adcl $0, 84(%esp) # 4-byte Folded Spill + movl %esi, %eax + imull 88(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 824(%esp), %ecx + movl 1176(%esp), %edx + movl 92(%esp), %ebx # 4-byte Reload + calll .LmulPv480x32 + addl 824(%esp), %esi + movl 72(%esp), %ecx # 4-byte Reload + adcl 828(%esp), %ecx + movl 76(%esp), %eax # 4-byte Reload + adcl 832(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 836(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 840(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 844(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 112(%esp), %eax # 4-byte Reload + adcl 848(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 124(%esp), %eax # 4-byte Reload + adcl 852(%esp), %eax + movl %eax, 124(%esp) # 4-byte Spill + movl 120(%esp), %eax # 4-byte Reload + adcl 856(%esp), %eax + movl %eax, 120(%esp) # 4-byte Spill + movl 140(%esp), %eax # 4-byte Reload + adcl 860(%esp), %eax + movl %eax, 140(%esp) # 4-byte Spill + movl 156(%esp), %eax # 4-byte Reload + adcl 864(%esp), %eax + movl %eax, 156(%esp) # 4-byte Spill + movl 172(%esp), %eax # 4-byte Reload + adcl 868(%esp), %eax + movl %eax, 172(%esp) # 4-byte Spill + movl 180(%esp), %eax # 4-byte Reload + adcl 872(%esp), %eax + movl %eax, 180(%esp) # 4-byte Spill + movl 176(%esp), %eax # 4-byte Reload + adcl 876(%esp), %eax + movl %eax, 176(%esp) # 4-byte Spill + movl 164(%esp), %eax # 4-byte Reload + adcl 880(%esp), %eax + movl %eax, 164(%esp) # 4-byte Spill + movl 168(%esp), %eax # 4-byte Reload + adcl 884(%esp), %eax + movl %eax, 168(%esp) # 4-byte Spill + adcl $0, 144(%esp) # 4-byte Folded Spill + adcl $0, 160(%esp) # 4-byte Folded Spill + adcl $0, 148(%esp) # 4-byte Folded Spill + adcl $0, 136(%esp) # 4-byte Folded Spill + adcl $0, 152(%esp) # 4-byte Folded Spill + adcl $0, %ebp + movl %ebp, 132(%esp) # 4-byte Spill + adcl $0, 128(%esp) # 4-byte Folded Spill + adcl $0, 116(%esp) # 4-byte Folded Spill + adcl $0, %edi + movl %edi, 104(%esp) # 4-byte Spill + adcl $0, 108(%esp) # 4-byte Folded Spill + adcl $0, 84(%esp) # 4-byte Folded Spill + movl %ecx, %esi + movl %esi, %eax + imull 88(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 760(%esp), %ecx + movl 1176(%esp), %edx + movl 92(%esp), %ebx # 4-byte Reload + calll .LmulPv480x32 + addl 760(%esp), %esi + movl 76(%esp), %esi # 4-byte Reload + adcl 764(%esp), %esi + movl 80(%esp), %eax # 4-byte Reload + adcl 768(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 772(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 776(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 112(%esp), %eax # 4-byte Reload + adcl 780(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 124(%esp), %eax # 4-byte Reload + adcl 784(%esp), %eax + movl %eax, 124(%esp) # 4-byte Spill + movl 120(%esp), %eax # 4-byte Reload + adcl 788(%esp), %eax + movl %eax, 120(%esp) # 4-byte Spill + movl 140(%esp), %eax # 4-byte Reload + adcl 792(%esp), %eax + movl %eax, 140(%esp) # 4-byte Spill + movl 156(%esp), %eax # 4-byte Reload + adcl 796(%esp), %eax + movl %eax, 156(%esp) # 4-byte Spill + movl 172(%esp), %eax # 4-byte Reload + adcl 800(%esp), %eax + movl %eax, 172(%esp) # 4-byte Spill + movl 180(%esp), %eax # 4-byte Reload + adcl 804(%esp), %eax + movl %eax, 180(%esp) # 4-byte Spill + movl 176(%esp), %eax # 4-byte Reload + adcl 808(%esp), %eax + movl %eax, 176(%esp) # 4-byte Spill + movl 164(%esp), %eax # 4-byte Reload + adcl 812(%esp), %eax + movl %eax, 164(%esp) # 4-byte Spill + movl 168(%esp), %eax # 4-byte Reload + adcl 816(%esp), %eax + movl %eax, 168(%esp) # 4-byte Spill + movl 144(%esp), %eax # 4-byte Reload + adcl 820(%esp), %eax + movl %eax, 144(%esp) # 4-byte Spill + adcl $0, 160(%esp) # 4-byte Folded Spill + adcl $0, 148(%esp) # 4-byte Folded Spill + adcl $0, 136(%esp) # 4-byte Folded Spill + movl 152(%esp), %ebp # 4-byte Reload + adcl $0, %ebp + adcl $0, 132(%esp) # 4-byte Folded Spill + movl 128(%esp), %edi # 4-byte Reload + adcl $0, %edi + adcl $0, 116(%esp) # 4-byte Folded Spill + adcl $0, 104(%esp) # 4-byte Folded Spill + adcl $0, 108(%esp) # 4-byte Folded Spill + adcl $0, 84(%esp) # 4-byte Folded Spill + movl %esi, %eax + imull 88(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 696(%esp), %ecx + movl 1176(%esp), %edx + movl 92(%esp), %ebx # 4-byte Reload + calll .LmulPv480x32 + addl 696(%esp), %esi + movl 80(%esp), %ecx # 4-byte Reload + adcl 700(%esp), %ecx + movl 96(%esp), %eax # 4-byte Reload + adcl 704(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 708(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 112(%esp), %eax # 4-byte Reload + adcl 712(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 124(%esp), %eax # 4-byte Reload + adcl 716(%esp), %eax + movl %eax, 124(%esp) # 4-byte Spill + movl 120(%esp), %eax # 4-byte Reload + adcl 720(%esp), %eax + movl %eax, 120(%esp) # 4-byte Spill + movl 140(%esp), %eax # 4-byte Reload + adcl 724(%esp), %eax + movl %eax, 140(%esp) # 4-byte Spill + movl 156(%esp), %eax # 4-byte Reload + adcl 728(%esp), %eax + movl %eax, 156(%esp) # 4-byte Spill + movl 172(%esp), %eax # 4-byte Reload + adcl 732(%esp), %eax + movl %eax, 172(%esp) # 4-byte Spill + movl 180(%esp), %eax # 4-byte Reload + adcl 736(%esp), %eax + movl %eax, 180(%esp) # 4-byte Spill + movl 176(%esp), %eax # 4-byte Reload + adcl 740(%esp), %eax + movl %eax, 176(%esp) # 4-byte Spill + movl 164(%esp), %eax # 4-byte Reload + adcl 744(%esp), %eax + movl %eax, 164(%esp) # 4-byte Spill + movl 168(%esp), %eax # 4-byte Reload + adcl 748(%esp), %eax + movl %eax, 168(%esp) # 4-byte Spill + movl 144(%esp), %eax # 4-byte Reload + adcl 752(%esp), %eax + movl %eax, 144(%esp) # 4-byte Spill + movl 160(%esp), %eax # 4-byte Reload + adcl 756(%esp), %eax + movl %eax, 160(%esp) # 4-byte Spill + adcl $0, 148(%esp) # 4-byte Folded Spill + adcl $0, 136(%esp) # 4-byte Folded Spill + adcl $0, %ebp + movl %ebp, 152(%esp) # 4-byte Spill + adcl $0, 132(%esp) # 4-byte Folded Spill + adcl $0, %edi + movl %edi, 128(%esp) # 4-byte Spill + adcl $0, 116(%esp) # 4-byte Folded Spill + adcl $0, 104(%esp) # 4-byte Folded Spill + movl 108(%esp), %esi # 4-byte Reload + adcl $0, %esi + adcl $0, 84(%esp) # 4-byte Folded Spill + movl %ecx, %edi + movl %edi, %eax + imull 88(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 632(%esp), %ecx + movl 1176(%esp), %edx + movl 92(%esp), %ebx # 4-byte Reload + calll .LmulPv480x32 + addl 632(%esp), %edi + movl 96(%esp), %ecx # 4-byte Reload + adcl 636(%esp), %ecx + movl 100(%esp), %eax # 4-byte Reload + adcl 640(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 112(%esp), %eax # 4-byte Reload + adcl 644(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 124(%esp), %eax # 4-byte Reload + adcl 648(%esp), %eax + movl %eax, 124(%esp) # 4-byte Spill + movl 120(%esp), %eax # 4-byte Reload + adcl 652(%esp), %eax + movl %eax, 120(%esp) # 4-byte Spill + movl 140(%esp), %eax # 4-byte Reload + adcl 656(%esp), %eax + movl %eax, 140(%esp) # 4-byte Spill + movl 156(%esp), %eax # 4-byte Reload + adcl 660(%esp), %eax + movl %eax, 156(%esp) # 4-byte Spill + movl 172(%esp), %eax # 4-byte Reload + adcl 664(%esp), %eax + movl %eax, 172(%esp) # 4-byte Spill + movl 180(%esp), %eax # 4-byte Reload + adcl 668(%esp), %eax + movl %eax, 180(%esp) # 4-byte Spill + movl 176(%esp), %ebp # 4-byte Reload + adcl 672(%esp), %ebp + movl 164(%esp), %edi # 4-byte Reload + adcl 676(%esp), %edi + movl 168(%esp), %eax # 4-byte Reload + adcl 680(%esp), %eax + movl %eax, 168(%esp) # 4-byte Spill + movl 144(%esp), %eax # 4-byte Reload + adcl 684(%esp), %eax + movl %eax, 144(%esp) # 4-byte Spill + movl 160(%esp), %eax # 4-byte Reload + adcl 688(%esp), %eax + movl %eax, 160(%esp) # 4-byte Spill + movl 148(%esp), %eax # 4-byte Reload + adcl 692(%esp), %eax + movl %eax, 148(%esp) # 4-byte Spill + adcl $0, 136(%esp) # 4-byte Folded Spill + adcl $0, 152(%esp) # 4-byte Folded Spill + adcl $0, 132(%esp) # 4-byte Folded Spill + adcl $0, 128(%esp) # 4-byte Folded Spill + adcl $0, 116(%esp) # 4-byte Folded Spill + adcl $0, 104(%esp) # 4-byte Folded Spill + adcl $0, %esi + movl %esi, 108(%esp) # 4-byte Spill + adcl $0, 84(%esp) # 4-byte Folded Spill + movl %ecx, %eax + movl %ecx, %esi + imull 88(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 568(%esp), %ecx + movl 1176(%esp), %edx + movl 92(%esp), %ebx # 4-byte Reload + calll .LmulPv480x32 + addl 568(%esp), %esi + movl 100(%esp), %ecx # 4-byte Reload + adcl 572(%esp), %ecx + movl 112(%esp), %eax # 4-byte Reload + adcl 576(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 124(%esp), %eax # 4-byte Reload + adcl 580(%esp), %eax + movl %eax, 124(%esp) # 4-byte Spill + movl 120(%esp), %eax # 4-byte Reload + adcl 584(%esp), %eax + movl %eax, 120(%esp) # 4-byte Spill + movl 140(%esp), %eax # 4-byte Reload + adcl 588(%esp), %eax + movl %eax, 140(%esp) # 4-byte Spill + movl 156(%esp), %eax # 4-byte Reload + adcl 592(%esp), %eax + movl %eax, 156(%esp) # 4-byte Spill + movl 172(%esp), %eax # 4-byte Reload + adcl 596(%esp), %eax + movl %eax, 172(%esp) # 4-byte Spill + movl 180(%esp), %eax # 4-byte Reload + adcl 600(%esp), %eax + movl %eax, 180(%esp) # 4-byte Spill + adcl 604(%esp), %ebp + movl %ebp, 176(%esp) # 4-byte Spill + adcl 608(%esp), %edi + movl %edi, 164(%esp) # 4-byte Spill + movl 168(%esp), %eax # 4-byte Reload + adcl 612(%esp), %eax + movl %eax, 168(%esp) # 4-byte Spill + movl 144(%esp), %edi # 4-byte Reload + adcl 616(%esp), %edi + movl 160(%esp), %eax # 4-byte Reload + adcl 620(%esp), %eax + movl %eax, 160(%esp) # 4-byte Spill + movl 148(%esp), %eax # 4-byte Reload + adcl 624(%esp), %eax + movl %eax, 148(%esp) # 4-byte Spill + movl 136(%esp), %eax # 4-byte Reload + adcl 628(%esp), %eax + movl %eax, 136(%esp) # 4-byte Spill + adcl $0, 152(%esp) # 4-byte Folded Spill + adcl $0, 132(%esp) # 4-byte Folded Spill + adcl $0, 128(%esp) # 4-byte Folded Spill + adcl $0, 116(%esp) # 4-byte Folded Spill + adcl $0, 104(%esp) # 4-byte Folded Spill + adcl $0, 108(%esp) # 4-byte Folded Spill + movl 84(%esp), %ebp # 4-byte Reload + adcl $0, %ebp + movl %ecx, %esi + movl %esi, %eax + imull 88(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 504(%esp), %ecx + movl 1176(%esp), %eax + movl %eax, %edx + movl 92(%esp), %ebx # 4-byte Reload + calll .LmulPv480x32 + addl 504(%esp), %esi + movl 112(%esp), %ecx # 4-byte Reload + adcl 508(%esp), %ecx + movl 124(%esp), %eax # 4-byte Reload + adcl 512(%esp), %eax + movl %eax, 124(%esp) # 4-byte Spill + movl 120(%esp), %eax # 4-byte Reload + adcl 516(%esp), %eax + movl %eax, 120(%esp) # 4-byte Spill + movl 140(%esp), %eax # 4-byte Reload + adcl 520(%esp), %eax + movl %eax, 140(%esp) # 4-byte Spill + movl 156(%esp), %esi # 4-byte Reload + adcl 524(%esp), %esi + movl 172(%esp), %eax # 4-byte Reload + adcl 528(%esp), %eax + movl %eax, 172(%esp) # 4-byte Spill + movl 180(%esp), %eax # 4-byte Reload + adcl 532(%esp), %eax + movl %eax, 180(%esp) # 4-byte Spill + movl 176(%esp), %eax # 4-byte Reload + adcl 536(%esp), %eax + movl %eax, 176(%esp) # 4-byte Spill + movl 164(%esp), %eax # 4-byte Reload + adcl 540(%esp), %eax + movl %eax, 164(%esp) # 4-byte Spill + movl 168(%esp), %eax # 4-byte Reload + adcl 544(%esp), %eax + movl %eax, 168(%esp) # 4-byte Spill + adcl 548(%esp), %edi + movl %edi, 144(%esp) # 4-byte Spill + movl 160(%esp), %eax # 4-byte Reload + adcl 552(%esp), %eax + movl %eax, 160(%esp) # 4-byte Spill + movl 148(%esp), %eax # 4-byte Reload + adcl 556(%esp), %eax + movl %eax, 148(%esp) # 4-byte Spill + movl 136(%esp), %eax # 4-byte Reload + adcl 560(%esp), %eax + movl %eax, 136(%esp) # 4-byte Spill + movl 152(%esp), %eax # 4-byte Reload + adcl 564(%esp), %eax + movl %eax, 152(%esp) # 4-byte Spill + adcl $0, 132(%esp) # 4-byte Folded Spill + adcl $0, 128(%esp) # 4-byte Folded Spill + adcl $0, 116(%esp) # 4-byte Folded Spill + adcl $0, 104(%esp) # 4-byte Folded Spill + adcl $0, 108(%esp) # 4-byte Folded Spill + adcl $0, %ebp + movl %ecx, %edi + movl %edi, %eax + imull 88(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 440(%esp), %ecx + movl 1176(%esp), %edx + movl 92(%esp), %ebx # 4-byte Reload + calll .LmulPv480x32 + addl 440(%esp), %edi + movl 124(%esp), %ecx # 4-byte Reload + adcl 444(%esp), %ecx + movl 120(%esp), %eax # 4-byte Reload + adcl 448(%esp), %eax + movl %eax, 120(%esp) # 4-byte Spill + movl 140(%esp), %edi # 4-byte Reload + adcl 452(%esp), %edi + adcl 456(%esp), %esi + movl %esi, 156(%esp) # 4-byte Spill + movl 172(%esp), %eax # 4-byte Reload + adcl 460(%esp), %eax + movl %eax, 172(%esp) # 4-byte Spill + movl 180(%esp), %esi # 4-byte Reload + adcl 464(%esp), %esi + movl 176(%esp), %eax # 4-byte Reload + adcl 468(%esp), %eax + movl %eax, 176(%esp) # 4-byte Spill + movl 164(%esp), %eax # 4-byte Reload + adcl 472(%esp), %eax + movl %eax, 164(%esp) # 4-byte Spill + movl 168(%esp), %eax # 4-byte Reload + adcl 476(%esp), %eax + movl %eax, 168(%esp) # 4-byte Spill + movl 144(%esp), %eax # 4-byte Reload + adcl 480(%esp), %eax + movl %eax, 144(%esp) # 4-byte Spill + movl 160(%esp), %eax # 4-byte Reload + adcl 484(%esp), %eax + movl %eax, 160(%esp) # 4-byte Spill + movl 148(%esp), %eax # 4-byte Reload + adcl 488(%esp), %eax + movl %eax, 148(%esp) # 4-byte Spill + movl 136(%esp), %eax # 4-byte Reload + adcl 492(%esp), %eax + movl %eax, 136(%esp) # 4-byte Spill + movl 152(%esp), %eax # 4-byte Reload + adcl 496(%esp), %eax + movl %eax, 152(%esp) # 4-byte Spill + movl 132(%esp), %eax # 4-byte Reload + adcl 500(%esp), %eax + movl %eax, 132(%esp) # 4-byte Spill + adcl $0, 128(%esp) # 4-byte Folded Spill + adcl $0, 116(%esp) # 4-byte Folded Spill + adcl $0, 104(%esp) # 4-byte Folded Spill + adcl $0, 108(%esp) # 4-byte Folded Spill + adcl $0, %ebp + movl %ebp, 84(%esp) # 4-byte Spill + movl %ecx, %eax + movl %ecx, %ebp + imull 88(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 376(%esp), %ecx + movl 1176(%esp), %edx + movl 92(%esp), %ebx # 4-byte Reload + calll .LmulPv480x32 + addl 376(%esp), %ebp + movl 120(%esp), %ebp # 4-byte Reload + adcl 380(%esp), %ebp + adcl 384(%esp), %edi + movl %edi, 140(%esp) # 4-byte Spill + movl 156(%esp), %eax # 4-byte Reload + adcl 388(%esp), %eax + movl %eax, 156(%esp) # 4-byte Spill + movl 172(%esp), %edi # 4-byte Reload + adcl 392(%esp), %edi + adcl 396(%esp), %esi + movl %esi, 180(%esp) # 4-byte Spill + movl 176(%esp), %eax # 4-byte Reload + adcl 400(%esp), %eax + movl %eax, 176(%esp) # 4-byte Spill + movl 164(%esp), %eax # 4-byte Reload + adcl 404(%esp), %eax + movl %eax, 164(%esp) # 4-byte Spill + movl 168(%esp), %eax # 4-byte Reload + adcl 408(%esp), %eax + movl %eax, 168(%esp) # 4-byte Spill + movl 144(%esp), %esi # 4-byte Reload + adcl 412(%esp), %esi + movl 160(%esp), %eax # 4-byte Reload + adcl 416(%esp), %eax + movl %eax, 160(%esp) # 4-byte Spill + movl 148(%esp), %eax # 4-byte Reload + adcl 420(%esp), %eax + movl %eax, 148(%esp) # 4-byte Spill + movl 136(%esp), %eax # 4-byte Reload + adcl 424(%esp), %eax + movl %eax, 136(%esp) # 4-byte Spill + movl 152(%esp), %eax # 4-byte Reload + adcl 428(%esp), %eax + movl %eax, 152(%esp) # 4-byte Spill + movl 132(%esp), %eax # 4-byte Reload + adcl 432(%esp), %eax + movl %eax, 132(%esp) # 4-byte Spill + movl 128(%esp), %eax # 4-byte Reload + adcl 436(%esp), %eax + movl %eax, 128(%esp) # 4-byte Spill + adcl $0, 116(%esp) # 4-byte Folded Spill + adcl $0, 104(%esp) # 4-byte Folded Spill + adcl $0, 108(%esp) # 4-byte Folded Spill + adcl $0, 84(%esp) # 4-byte Folded Spill + movl %ebp, %eax + imull 88(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 312(%esp), %ecx + movl 1176(%esp), %edx + movl 92(%esp), %ebx # 4-byte Reload + calll .LmulPv480x32 + addl 312(%esp), %ebp + movl 140(%esp), %eax # 4-byte Reload + adcl 316(%esp), %eax + movl 156(%esp), %ecx # 4-byte Reload + adcl 320(%esp), %ecx + movl %ecx, 156(%esp) # 4-byte Spill + adcl 324(%esp), %edi + movl %edi, 172(%esp) # 4-byte Spill + movl 180(%esp), %ecx # 4-byte Reload + adcl 328(%esp), %ecx + movl %ecx, 180(%esp) # 4-byte Spill + movl 176(%esp), %ecx # 4-byte Reload + adcl 332(%esp), %ecx + movl %ecx, 176(%esp) # 4-byte Spill + movl 164(%esp), %ecx # 4-byte Reload + adcl 336(%esp), %ecx + movl %ecx, 164(%esp) # 4-byte Spill + movl 168(%esp), %ecx # 4-byte Reload + adcl 340(%esp), %ecx + movl %ecx, 168(%esp) # 4-byte Spill + adcl 344(%esp), %esi + movl %esi, 144(%esp) # 4-byte Spill + movl 160(%esp), %ecx # 4-byte Reload + adcl 348(%esp), %ecx + movl %ecx, 160(%esp) # 4-byte Spill + movl 148(%esp), %ebp # 4-byte Reload + adcl 352(%esp), %ebp + movl 136(%esp), %ecx # 4-byte Reload + adcl 356(%esp), %ecx + movl %ecx, 136(%esp) # 4-byte Spill + movl 152(%esp), %ecx # 4-byte Reload + adcl 360(%esp), %ecx + movl %ecx, 152(%esp) # 4-byte Spill + movl 132(%esp), %ecx # 4-byte Reload + adcl 364(%esp), %ecx + movl %ecx, 132(%esp) # 4-byte Spill + movl 128(%esp), %ecx # 4-byte Reload + adcl 368(%esp), %ecx + movl %ecx, 128(%esp) # 4-byte Spill + movl 116(%esp), %ecx # 4-byte Reload + adcl 372(%esp), %ecx + movl %ecx, 116(%esp) # 4-byte Spill + adcl $0, 104(%esp) # 4-byte Folded Spill + adcl $0, 108(%esp) # 4-byte Folded Spill + movl 84(%esp), %esi # 4-byte Reload + adcl $0, %esi + movl %eax, %edi + imull 88(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 248(%esp), %ecx + movl 1176(%esp), %edx + movl 92(%esp), %ebx # 4-byte Reload + calll .LmulPv480x32 + addl 248(%esp), %edi + movl 156(%esp), %ecx # 4-byte Reload + adcl 252(%esp), %ecx + movl 172(%esp), %eax # 4-byte Reload + adcl 256(%esp), %eax + movl %eax, 172(%esp) # 4-byte Spill + movl 180(%esp), %eax # 4-byte Reload + adcl 260(%esp), %eax + movl %eax, 180(%esp) # 4-byte Spill + movl 176(%esp), %eax # 4-byte Reload + adcl 264(%esp), %eax + movl %eax, 176(%esp) # 4-byte Spill + movl 164(%esp), %eax # 4-byte Reload + adcl 268(%esp), %eax + movl %eax, 164(%esp) # 4-byte Spill + movl 168(%esp), %eax # 4-byte Reload + adcl 272(%esp), %eax + movl %eax, 168(%esp) # 4-byte Spill + movl 144(%esp), %eax # 4-byte Reload + adcl 276(%esp), %eax + movl %eax, 144(%esp) # 4-byte Spill + movl 160(%esp), %eax # 4-byte Reload + adcl 280(%esp), %eax + movl %eax, 160(%esp) # 4-byte Spill + adcl 284(%esp), %ebp + movl %ebp, 148(%esp) # 4-byte Spill + movl 136(%esp), %edi # 4-byte Reload + adcl 288(%esp), %edi + movl 152(%esp), %eax # 4-byte Reload + adcl 292(%esp), %eax + movl %eax, 152(%esp) # 4-byte Spill + movl 132(%esp), %eax # 4-byte Reload + adcl 296(%esp), %eax + movl %eax, 132(%esp) # 4-byte Spill + movl 128(%esp), %eax # 4-byte Reload + adcl 300(%esp), %eax + movl %eax, 128(%esp) # 4-byte Spill + movl 116(%esp), %eax # 4-byte Reload + adcl 304(%esp), %eax + movl %eax, 116(%esp) # 4-byte Spill + movl 104(%esp), %ebp # 4-byte Reload + adcl 308(%esp), %ebp + adcl $0, 108(%esp) # 4-byte Folded Spill + adcl $0, %esi + movl %esi, 84(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + imull %ecx, %eax + movl %ecx, %esi + movl %eax, (%esp) + leal 184(%esp), %ecx + movl 1176(%esp), %edx + movl 92(%esp), %ebx # 4-byte Reload + calll .LmulPv480x32 + addl 184(%esp), %esi + movl 172(%esp), %edx # 4-byte Reload + adcl 188(%esp), %edx + movl %edx, 172(%esp) # 4-byte Spill + movl 180(%esp), %ecx # 4-byte Reload + adcl 192(%esp), %ecx + movl %ecx, 180(%esp) # 4-byte Spill + movl 176(%esp), %eax # 4-byte Reload + adcl 196(%esp), %eax + movl %eax, 176(%esp) # 4-byte Spill + movl 164(%esp), %eax # 4-byte Reload + adcl 200(%esp), %eax + movl %eax, 164(%esp) # 4-byte Spill + movl 168(%esp), %esi # 4-byte Reload + adcl 204(%esp), %esi + movl %esi, 168(%esp) # 4-byte Spill + movl 144(%esp), %eax # 4-byte Reload + adcl 208(%esp), %eax + movl %eax, 144(%esp) # 4-byte Spill + movl 160(%esp), %eax # 4-byte Reload + adcl 212(%esp), %eax + movl %eax, 160(%esp) # 4-byte Spill + movl 148(%esp), %eax # 4-byte Reload + adcl 216(%esp), %eax + movl %eax, 148(%esp) # 4-byte Spill + adcl 220(%esp), %edi + movl %edi, 136(%esp) # 4-byte Spill + movl 152(%esp), %eax # 4-byte Reload + adcl 224(%esp), %eax + movl %eax, 152(%esp) # 4-byte Spill + movl 132(%esp), %eax # 4-byte Reload + adcl 228(%esp), %eax + movl %eax, 132(%esp) # 4-byte Spill + movl 128(%esp), %eax # 4-byte Reload + adcl 232(%esp), %eax + movl %eax, 128(%esp) # 4-byte Spill + movl 116(%esp), %eax # 4-byte Reload + adcl 236(%esp), %eax + movl %eax, 116(%esp) # 4-byte Spill + adcl 240(%esp), %ebp + movl %ebp, 104(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl 244(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 84(%esp), %ebx # 4-byte Reload + adcl $0, %ebx + movl %edx, %eax + subl 16(%esp), %edx # 4-byte Folded Reload + sbbl 4(%esp), %ecx # 4-byte Folded Reload + movl 176(%esp), %eax # 4-byte Reload + sbbl 8(%esp), %eax # 4-byte Folded Reload + movl 164(%esp), %ebp # 4-byte Reload + sbbl 12(%esp), %ebp # 4-byte Folded Reload + sbbl 20(%esp), %esi # 4-byte Folded Reload + movl 144(%esp), %edi # 4-byte Reload + sbbl 24(%esp), %edi # 4-byte Folded Reload + movl %edi, 84(%esp) # 4-byte Spill + movl 160(%esp), %edi # 4-byte Reload + sbbl 28(%esp), %edi # 4-byte Folded Reload + movl %edi, 88(%esp) # 4-byte Spill + movl 148(%esp), %edi # 4-byte Reload + sbbl 32(%esp), %edi # 4-byte Folded Reload + movl %edi, 92(%esp) # 4-byte Spill + movl 136(%esp), %edi # 4-byte Reload + sbbl 36(%esp), %edi # 4-byte Folded Reload + movl %edi, 96(%esp) # 4-byte Spill + movl 152(%esp), %edi # 4-byte Reload + sbbl 40(%esp), %edi # 4-byte Folded Reload + movl %edi, 100(%esp) # 4-byte Spill + movl 132(%esp), %edi # 4-byte Reload + sbbl 44(%esp), %edi # 4-byte Folded Reload + movl %edi, 112(%esp) # 4-byte Spill + movl 128(%esp), %edi # 4-byte Reload + sbbl 48(%esp), %edi # 4-byte Folded Reload + movl %edi, 120(%esp) # 4-byte Spill + movl 116(%esp), %edi # 4-byte Reload + sbbl 52(%esp), %edi # 4-byte Folded Reload + movl %edi, 124(%esp) # 4-byte Spill + movl 104(%esp), %edi # 4-byte Reload + sbbl 56(%esp), %edi # 4-byte Folded Reload + movl %edi, 140(%esp) # 4-byte Spill + movl 108(%esp), %edi # 4-byte Reload + sbbl 60(%esp), %edi # 4-byte Folded Reload + movl %edi, 156(%esp) # 4-byte Spill + sbbl $0, %ebx + andl $1, %ebx + movl %ebx, %edi + jne .LBB228_2 +# BB#1: + movl %edx, 172(%esp) # 4-byte Spill +.LBB228_2: + movl 1168(%esp), %edx + movl 172(%esp), %ebx # 4-byte Reload + movl %ebx, (%edx) + movl %edi, %ebx + testb %bl, %bl + jne .LBB228_4 +# BB#3: + movl %ecx, 180(%esp) # 4-byte Spill +.LBB228_4: + movl 180(%esp), %ecx # 4-byte Reload + movl %ecx, 4(%edx) + movl 176(%esp), %ecx # 4-byte Reload + jne .LBB228_6 +# BB#5: + movl %eax, %ecx +.LBB228_6: + movl %ecx, 8(%edx) + movl 164(%esp), %eax # 4-byte Reload + jne .LBB228_8 +# BB#7: + movl %ebp, %eax +.LBB228_8: + movl %eax, 12(%edx) + movl 108(%esp), %ecx # 4-byte Reload + movl 148(%esp), %eax # 4-byte Reload + movl 168(%esp), %ebp # 4-byte Reload + jne .LBB228_10 +# BB#9: + movl %esi, %ebp +.LBB228_10: + movl %ebp, 16(%edx) + movl 152(%esp), %ebp # 4-byte Reload + movl 144(%esp), %ebx # 4-byte Reload + jne .LBB228_12 +# BB#11: + movl 84(%esp), %ebx # 4-byte Reload +.LBB228_12: + movl %ebx, 20(%edx) + movl 132(%esp), %ebx # 4-byte Reload + movl 160(%esp), %edi # 4-byte Reload + jne .LBB228_14 +# BB#13: + movl 88(%esp), %edi # 4-byte Reload +.LBB228_14: + movl %edi, 24(%edx) + movl 128(%esp), %edi # 4-byte Reload + jne .LBB228_16 +# BB#15: + movl 92(%esp), %eax # 4-byte Reload +.LBB228_16: + movl %eax, 28(%edx) + movl 116(%esp), %esi # 4-byte Reload + jne .LBB228_18 +# BB#17: + movl 96(%esp), %eax # 4-byte Reload + movl %eax, 136(%esp) # 4-byte Spill +.LBB228_18: + movl 136(%esp), %eax # 4-byte Reload + movl %eax, 32(%edx) + jne .LBB228_20 +# BB#19: + movl 100(%esp), %ebp # 4-byte Reload +.LBB228_20: + movl %ebp, 36(%edx) + movl 104(%esp), %eax # 4-byte Reload + jne .LBB228_22 +# BB#21: + movl 112(%esp), %ebx # 4-byte Reload +.LBB228_22: + movl %ebx, 40(%edx) + jne .LBB228_24 +# BB#23: + movl 120(%esp), %edi # 4-byte Reload +.LBB228_24: + movl %edi, 44(%edx) + jne .LBB228_26 +# BB#25: + movl 124(%esp), %esi # 4-byte Reload +.LBB228_26: + movl %esi, 48(%edx) + jne .LBB228_28 +# BB#27: + movl 140(%esp), %eax # 4-byte Reload +.LBB228_28: + movl %eax, 52(%edx) + jne .LBB228_30 +# BB#29: + movl 156(%esp), %ecx # 4-byte Reload +.LBB228_30: + movl %ecx, 56(%edx) + addl $1148, %esp # imm = 0x47C + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end228: + .size mcl_fp_montRed15Lbmi2, .Lfunc_end228-mcl_fp_montRed15Lbmi2 + + .globl mcl_fp_addPre15Lbmi2 + .align 16, 0x90 + .type mcl_fp_addPre15Lbmi2,@function +mcl_fp_addPre15Lbmi2: # @mcl_fp_addPre15Lbmi2 +# BB#0: + pushl %ebx + pushl %edi + pushl %esi + movl 24(%esp), %eax + movl (%eax), %edx + movl 4(%eax), %esi + movl 20(%esp), %ecx + addl (%ecx), %edx + adcl 4(%ecx), %esi + movl 8(%eax), %ebx + adcl 8(%ecx), %ebx + movl 16(%esp), %edi + movl %edx, (%edi) + movl 12(%ecx), %edx + movl %esi, 4(%edi) + movl 16(%ecx), %esi + adcl 12(%eax), %edx + adcl 16(%eax), %esi + movl %ebx, 8(%edi) + movl 20(%eax), %ebx + movl %edx, 12(%edi) + movl 20(%ecx), %edx + adcl %ebx, %edx + movl 24(%eax), %ebx + movl %esi, 16(%edi) + movl 24(%ecx), %esi + adcl %ebx, %esi + movl 28(%eax), %ebx + movl %edx, 20(%edi) + movl 28(%ecx), %edx + adcl %ebx, %edx + movl 32(%eax), %ebx + movl %esi, 24(%edi) + movl 32(%ecx), %esi + adcl %ebx, %esi + movl 36(%eax), %ebx + movl %edx, 28(%edi) + movl 36(%ecx), %edx + adcl %ebx, %edx + movl 40(%eax), %ebx + movl %esi, 32(%edi) + movl 40(%ecx), %esi + adcl %ebx, %esi + movl 44(%eax), %ebx + movl %edx, 36(%edi) + movl 44(%ecx), %edx + adcl %ebx, %edx + movl 48(%eax), %ebx + movl %esi, 40(%edi) + movl 48(%ecx), %esi + adcl %ebx, %esi + movl 52(%eax), %ebx + movl %edx, 44(%edi) + movl 52(%ecx), %edx + adcl %ebx, %edx + movl %esi, 48(%edi) + movl %edx, 52(%edi) + movl 56(%eax), %eax + movl 56(%ecx), %ecx + adcl %eax, %ecx + movl %ecx, 56(%edi) + sbbl %eax, %eax + andl $1, %eax + popl %esi + popl %edi + popl %ebx + retl +.Lfunc_end229: + .size mcl_fp_addPre15Lbmi2, .Lfunc_end229-mcl_fp_addPre15Lbmi2 + + .globl mcl_fp_subPre15Lbmi2 + .align 16, 0x90 + .type mcl_fp_subPre15Lbmi2,@function +mcl_fp_subPre15Lbmi2: # @mcl_fp_subPre15Lbmi2 +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + movl 24(%esp), %ecx + movl (%ecx), %esi + movl 4(%ecx), %edi + xorl %eax, %eax + movl 28(%esp), %edx + subl (%edx), %esi + sbbl 4(%edx), %edi + movl 8(%ecx), %ebp + sbbl 8(%edx), %ebp + movl 20(%esp), %ebx + movl %esi, (%ebx) + movl 12(%ecx), %esi + sbbl 12(%edx), %esi + movl %edi, 4(%ebx) + movl 16(%ecx), %edi + sbbl 16(%edx), %edi + movl %ebp, 8(%ebx) + movl 20(%edx), %ebp + movl %esi, 12(%ebx) + movl 20(%ecx), %esi + sbbl %ebp, %esi + movl 24(%edx), %ebp + movl %edi, 16(%ebx) + movl 24(%ecx), %edi + sbbl %ebp, %edi + movl 28(%edx), %ebp + movl %esi, 20(%ebx) + movl 28(%ecx), %esi + sbbl %ebp, %esi + movl 32(%edx), %ebp + movl %edi, 24(%ebx) + movl 32(%ecx), %edi + sbbl %ebp, %edi + movl 36(%edx), %ebp + movl %esi, 28(%ebx) + movl 36(%ecx), %esi + sbbl %ebp, %esi + movl 40(%edx), %ebp + movl %edi, 32(%ebx) + movl 40(%ecx), %edi + sbbl %ebp, %edi + movl 44(%edx), %ebp + movl %esi, 36(%ebx) + movl 44(%ecx), %esi + sbbl %ebp, %esi + movl 48(%edx), %ebp + movl %edi, 40(%ebx) + movl 48(%ecx), %edi + sbbl %ebp, %edi + movl 52(%edx), %ebp + movl %esi, 44(%ebx) + movl 52(%ecx), %esi + sbbl %ebp, %esi + movl %edi, 48(%ebx) + movl %esi, 52(%ebx) + movl 56(%edx), %edx + movl 56(%ecx), %ecx + sbbl %edx, %ecx + movl %ecx, 56(%ebx) + sbbl $0, %eax + andl $1, %eax + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end230: + .size mcl_fp_subPre15Lbmi2, .Lfunc_end230-mcl_fp_subPre15Lbmi2 + + .globl mcl_fp_shr1_15Lbmi2 + .align 16, 0x90 + .type mcl_fp_shr1_15Lbmi2,@function +mcl_fp_shr1_15Lbmi2: # @mcl_fp_shr1_15Lbmi2 +# BB#0: + pushl %esi + movl 12(%esp), %eax + movl (%eax), %edx + movl 4(%eax), %esi + shrdl $1, %esi, %edx + movl 8(%esp), %ecx + movl %edx, (%ecx) + movl 8(%eax), %edx + shrdl $1, %edx, %esi + movl %esi, 4(%ecx) + movl 12(%eax), %esi + shrdl $1, %esi, %edx + movl %edx, 8(%ecx) + movl 16(%eax), %edx + shrdl $1, %edx, %esi + movl %esi, 12(%ecx) + movl 20(%eax), %esi + shrdl $1, %esi, %edx + movl %edx, 16(%ecx) + movl 24(%eax), %edx + shrdl $1, %edx, %esi + movl %esi, 20(%ecx) + movl 28(%eax), %esi + shrdl $1, %esi, %edx + movl %edx, 24(%ecx) + movl 32(%eax), %edx + shrdl $1, %edx, %esi + movl %esi, 28(%ecx) + movl 36(%eax), %esi + shrdl $1, %esi, %edx + movl %edx, 32(%ecx) + movl 40(%eax), %edx + shrdl $1, %edx, %esi + movl %esi, 36(%ecx) + movl 44(%eax), %esi + shrdl $1, %esi, %edx + movl %edx, 40(%ecx) + movl 48(%eax), %edx + shrdl $1, %edx, %esi + movl %esi, 44(%ecx) + movl 52(%eax), %esi + shrdl $1, %esi, %edx + movl %edx, 48(%ecx) + movl 56(%eax), %eax + shrdl $1, %eax, %esi + movl %esi, 52(%ecx) + shrl %eax + movl %eax, 56(%ecx) + popl %esi + retl +.Lfunc_end231: + .size mcl_fp_shr1_15Lbmi2, .Lfunc_end231-mcl_fp_shr1_15Lbmi2 + + .globl mcl_fp_add15Lbmi2 + .align 16, 0x90 + .type mcl_fp_add15Lbmi2,@function +mcl_fp_add15Lbmi2: # @mcl_fp_add15Lbmi2 +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $48, %esp + movl 76(%esp), %ecx + movl (%ecx), %esi + movl 4(%ecx), %edx + movl 72(%esp), %eax + addl (%eax), %esi + movl %esi, 4(%esp) # 4-byte Spill + adcl 4(%eax), %edx + movl %edx, 44(%esp) # 4-byte Spill + movl 8(%ecx), %edx + adcl 8(%eax), %edx + movl %edx, 40(%esp) # 4-byte Spill + movl 12(%eax), %esi + movl 16(%eax), %edx + adcl 12(%ecx), %esi + movl %esi, 36(%esp) # 4-byte Spill + adcl 16(%ecx), %edx + movl %edx, 32(%esp) # 4-byte Spill + movl 20(%eax), %edx + adcl 20(%ecx), %edx + movl %edx, 28(%esp) # 4-byte Spill + movl 24(%eax), %edx + adcl 24(%ecx), %edx + movl %edx, 24(%esp) # 4-byte Spill + movl 28(%eax), %edx + adcl 28(%ecx), %edx + movl %edx, 20(%esp) # 4-byte Spill + movl 32(%eax), %edx + adcl 32(%ecx), %edx + movl %edx, 16(%esp) # 4-byte Spill + movl 36(%eax), %edx + adcl 36(%ecx), %edx + movl %edx, 12(%esp) # 4-byte Spill + movl 40(%eax), %edx + adcl 40(%ecx), %edx + movl %edx, 8(%esp) # 4-byte Spill + movl 44(%eax), %ebx + adcl 44(%ecx), %ebx + movl %ebx, (%esp) # 4-byte Spill + movl 48(%eax), %ebp + adcl 48(%ecx), %ebp + movl 52(%eax), %edi + adcl 52(%ecx), %edi + movl 56(%eax), %edx + adcl 56(%ecx), %edx + movl 68(%esp), %ecx + movl 4(%esp), %eax # 4-byte Reload + movl %eax, (%ecx) + movl 44(%esp), %esi # 4-byte Reload + movl %esi, 4(%ecx) + movl 40(%esp), %esi # 4-byte Reload + movl %esi, 8(%ecx) + movl 36(%esp), %esi # 4-byte Reload + movl %esi, 12(%ecx) + movl 32(%esp), %esi # 4-byte Reload + movl %esi, 16(%ecx) + movl 28(%esp), %esi # 4-byte Reload + movl %esi, 20(%ecx) + movl 24(%esp), %esi # 4-byte Reload + movl %esi, 24(%ecx) + movl 20(%esp), %esi # 4-byte Reload + movl %esi, 28(%ecx) + movl 16(%esp), %esi # 4-byte Reload + movl %esi, 32(%ecx) + movl 12(%esp), %esi # 4-byte Reload + movl %esi, 36(%ecx) + movl 8(%esp), %esi # 4-byte Reload + movl %esi, 40(%ecx) + movl %ebx, 44(%ecx) + movl %ebp, 48(%ecx) + movl %edi, 52(%ecx) + movl %edx, 56(%ecx) + sbbl %ebx, %ebx + andl $1, %ebx + movl 80(%esp), %esi + subl (%esi), %eax + movl %eax, 4(%esp) # 4-byte Spill + movl 44(%esp), %eax # 4-byte Reload + sbbl 4(%esi), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl %edx, %eax + movl 40(%esp), %edx # 4-byte Reload + sbbl 8(%esi), %edx + movl %edx, 40(%esp) # 4-byte Spill + movl 36(%esp), %edx # 4-byte Reload + sbbl 12(%esi), %edx + movl %edx, 36(%esp) # 4-byte Spill + movl 32(%esp), %edx # 4-byte Reload + sbbl 16(%esi), %edx + movl %edx, 32(%esp) # 4-byte Spill + movl 28(%esp), %edx # 4-byte Reload + sbbl 20(%esi), %edx + movl %edx, 28(%esp) # 4-byte Spill + movl 24(%esp), %edx # 4-byte Reload + sbbl 24(%esi), %edx + movl %edx, 24(%esp) # 4-byte Spill + movl 20(%esp), %edx # 4-byte Reload + sbbl 28(%esi), %edx + movl %edx, 20(%esp) # 4-byte Spill + movl 16(%esp), %edx # 4-byte Reload + sbbl 32(%esi), %edx + movl %edx, 16(%esp) # 4-byte Spill + movl 12(%esp), %edx # 4-byte Reload + sbbl 36(%esi), %edx + movl %edx, 12(%esp) # 4-byte Spill + movl 8(%esp), %edx # 4-byte Reload + sbbl 40(%esi), %edx + movl %edx, 8(%esp) # 4-byte Spill + movl (%esp), %edx # 4-byte Reload + sbbl 44(%esi), %edx + movl %edx, (%esp) # 4-byte Spill + sbbl 48(%esi), %ebp + sbbl 52(%esi), %edi + sbbl 56(%esi), %eax + sbbl $0, %ebx + testb $1, %bl + jne .LBB232_2 +# BB#1: # %nocarry + movl 4(%esp), %edx # 4-byte Reload + movl %edx, (%ecx) + movl 44(%esp), %edx # 4-byte Reload + movl %edx, 4(%ecx) + movl 40(%esp), %edx # 4-byte Reload + movl %edx, 8(%ecx) + movl 36(%esp), %edx # 4-byte Reload + movl %edx, 12(%ecx) + movl 32(%esp), %edx # 4-byte Reload + movl %edx, 16(%ecx) + movl 28(%esp), %edx # 4-byte Reload + movl %edx, 20(%ecx) + movl 24(%esp), %edx # 4-byte Reload + movl %edx, 24(%ecx) + movl 20(%esp), %edx # 4-byte Reload + movl %edx, 28(%ecx) + movl 16(%esp), %edx # 4-byte Reload + movl %edx, 32(%ecx) + movl 12(%esp), %edx # 4-byte Reload + movl %edx, 36(%ecx) + movl 8(%esp), %edx # 4-byte Reload + movl %edx, 40(%ecx) + movl (%esp), %edx # 4-byte Reload + movl %edx, 44(%ecx) + movl %ebp, 48(%ecx) + movl %edi, 52(%ecx) + movl %eax, 56(%ecx) +.LBB232_2: # %carry + addl $48, %esp + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end232: + .size mcl_fp_add15Lbmi2, .Lfunc_end232-mcl_fp_add15Lbmi2 + + .globl mcl_fp_addNF15Lbmi2 + .align 16, 0x90 + .type mcl_fp_addNF15Lbmi2,@function +mcl_fp_addNF15Lbmi2: # @mcl_fp_addNF15Lbmi2 +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $120, %esp + movl 148(%esp), %ecx + movl (%ecx), %eax + movl 4(%ecx), %edx + movl 144(%esp), %esi + addl (%esi), %eax + movl %eax, 80(%esp) # 4-byte Spill + adcl 4(%esi), %edx + movl %edx, 84(%esp) # 4-byte Spill + movl 56(%ecx), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 52(%ecx), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 48(%ecx), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 44(%ecx), %ebp + movl 40(%ecx), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 36(%ecx), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 32(%ecx), %eax + movl %eax, 116(%esp) # 4-byte Spill + movl 28(%ecx), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 24(%ecx), %eax + movl 20(%ecx), %ebx + movl 16(%ecx), %edi + movl 12(%ecx), %edx + movl 8(%ecx), %ecx + adcl 8(%esi), %ecx + movl %ecx, 76(%esp) # 4-byte Spill + adcl 12(%esi), %edx + movl %edx, 64(%esp) # 4-byte Spill + adcl 16(%esi), %edi + movl %edi, 68(%esp) # 4-byte Spill + adcl 20(%esi), %ebx + movl %ebx, 72(%esp) # 4-byte Spill + adcl 24(%esi), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 104(%esp), %eax # 4-byte Reload + adcl 28(%esi), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 116(%esp), %eax # 4-byte Reload + adcl 32(%esi), %eax + movl %eax, 116(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl 36(%esi), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 112(%esp), %eax # 4-byte Reload + adcl 40(%esi), %eax + movl %eax, 112(%esp) # 4-byte Spill + adcl 44(%esi), %ebp + movl %ebp, 60(%esp) # 4-byte Spill + movl 96(%esp), %ebp # 4-byte Reload + adcl 48(%esi), %ebp + movl %ebp, 96(%esp) # 4-byte Spill + movl 92(%esp), %ebp # 4-byte Reload + adcl 52(%esi), %ebp + movl %ebp, 92(%esp) # 4-byte Spill + movl 100(%esp), %ebp # 4-byte Reload + adcl 56(%esi), %ebp + movl %ebp, 100(%esp) # 4-byte Spill + movl 152(%esp), %esi + movl 80(%esp), %eax # 4-byte Reload + subl (%esi), %eax + movl %eax, (%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + sbbl 4(%esi), %eax + movl %eax, 4(%esp) # 4-byte Spill + sbbl 8(%esi), %ecx + movl %ecx, 8(%esp) # 4-byte Spill + sbbl 12(%esi), %edx + movl %edx, 12(%esp) # 4-byte Spill + sbbl 16(%esi), %edi + movl %edi, 16(%esp) # 4-byte Spill + sbbl 20(%esi), %ebx + movl %ebx, 20(%esp) # 4-byte Spill + movl 88(%esp), %ebx # 4-byte Reload + sbbl 24(%esi), %ebx + movl %ebx, 24(%esp) # 4-byte Spill + movl 104(%esp), %ebp # 4-byte Reload + sbbl 28(%esi), %ebp + movl %ebp, 28(%esp) # 4-byte Spill + movl 116(%esp), %eax # 4-byte Reload + sbbl 32(%esi), %eax + movl %eax, 32(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + sbbl 36(%esi), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 112(%esp), %eax # 4-byte Reload + sbbl 40(%esi), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 60(%esp), %edx # 4-byte Reload + movl %edx, %eax + sbbl 44(%esi), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + movl %eax, %edi + sbbl 48(%esi), %edi + movl %edi, 48(%esp) # 4-byte Spill + movl 92(%esp), %ecx # 4-byte Reload + movl %ecx, %edi + movl %ecx, %ebx + sbbl 52(%esi), %edi + movl %edi, 52(%esp) # 4-byte Spill + movl 100(%esp), %ecx # 4-byte Reload + movl %ecx, %edi + sbbl 56(%esi), %edi + movl %edi, 56(%esp) # 4-byte Spill + movl %edi, %esi + sarl $31, %esi + testl %esi, %esi + movl 80(%esp), %esi # 4-byte Reload + js .LBB233_2 +# BB#1: + movl (%esp), %esi # 4-byte Reload +.LBB233_2: + movl 140(%esp), %edi + movl %esi, (%edi) + movl 84(%esp), %ecx # 4-byte Reload + js .LBB233_4 +# BB#3: + movl 4(%esp), %ecx # 4-byte Reload +.LBB233_4: + movl %ecx, 4(%edi) + movl 104(%esp), %ecx # 4-byte Reload + movl 72(%esp), %esi # 4-byte Reload + js .LBB233_6 +# BB#5: + movl 8(%esp), %eax # 4-byte Reload + movl %eax, 76(%esp) # 4-byte Spill +.LBB233_6: + movl 76(%esp), %ebp # 4-byte Reload + movl %ebp, 8(%edi) + movl 64(%esp), %eax # 4-byte Reload + js .LBB233_8 +# BB#7: + movl 12(%esp), %eax # 4-byte Reload +.LBB233_8: + movl %eax, 12(%edi) + movl %ebx, %ebp + movl %edx, %eax + movl 68(%esp), %edx # 4-byte Reload + js .LBB233_10 +# BB#9: + movl 16(%esp), %edx # 4-byte Reload +.LBB233_10: + movl %edx, 16(%edi) + movl 112(%esp), %edx # 4-byte Reload + movl 108(%esp), %ebx # 4-byte Reload + js .LBB233_12 +# BB#11: + movl 20(%esp), %esi # 4-byte Reload +.LBB233_12: + movl %esi, 20(%edi) + js .LBB233_14 +# BB#13: + movl 24(%esp), %esi # 4-byte Reload + movl %esi, 88(%esp) # 4-byte Spill +.LBB233_14: + movl 88(%esp), %esi # 4-byte Reload + movl %esi, 24(%edi) + js .LBB233_16 +# BB#15: + movl 28(%esp), %ecx # 4-byte Reload +.LBB233_16: + movl %ecx, 28(%edi) + js .LBB233_18 +# BB#17: + movl 32(%esp), %ecx # 4-byte Reload + movl %ecx, 116(%esp) # 4-byte Spill +.LBB233_18: + movl 116(%esp), %ecx # 4-byte Reload + movl %ecx, 32(%edi) + js .LBB233_20 +# BB#19: + movl 36(%esp), %ebx # 4-byte Reload +.LBB233_20: + movl %ebx, 36(%edi) + js .LBB233_22 +# BB#21: + movl 40(%esp), %edx # 4-byte Reload +.LBB233_22: + movl %edx, 40(%edi) + js .LBB233_24 +# BB#23: + movl 44(%esp), %eax # 4-byte Reload +.LBB233_24: + movl %eax, 44(%edi) + movl 96(%esp), %eax # 4-byte Reload + js .LBB233_26 +# BB#25: + movl 48(%esp), %eax # 4-byte Reload +.LBB233_26: + movl %eax, 48(%edi) + js .LBB233_28 +# BB#27: + movl 52(%esp), %ebp # 4-byte Reload +.LBB233_28: + movl %ebp, 52(%edi) + movl 100(%esp), %eax # 4-byte Reload + js .LBB233_30 +# BB#29: + movl 56(%esp), %eax # 4-byte Reload +.LBB233_30: + movl %eax, 56(%edi) + addl $120, %esp + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end233: + .size mcl_fp_addNF15Lbmi2, .Lfunc_end233-mcl_fp_addNF15Lbmi2 + + .globl mcl_fp_sub15Lbmi2 + .align 16, 0x90 + .type mcl_fp_sub15Lbmi2,@function +mcl_fp_sub15Lbmi2: # @mcl_fp_sub15Lbmi2 +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $56, %esp + movl 80(%esp), %esi + movl (%esi), %eax + movl 4(%esi), %ecx + xorl %ebx, %ebx + movl 84(%esp), %edi + subl (%edi), %eax + movl %eax, 48(%esp) # 4-byte Spill + sbbl 4(%edi), %ecx + movl %ecx, 16(%esp) # 4-byte Spill + movl 8(%esi), %eax + sbbl 8(%edi), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 12(%esi), %eax + sbbl 12(%edi), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 16(%esi), %eax + sbbl 16(%edi), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 20(%esi), %eax + sbbl 20(%edi), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 24(%esi), %eax + sbbl 24(%edi), %eax + movl %eax, 32(%esp) # 4-byte Spill + movl 28(%esi), %eax + sbbl 28(%edi), %eax + movl %eax, 28(%esp) # 4-byte Spill + movl 32(%esi), %eax + sbbl 32(%edi), %eax + movl %eax, 24(%esp) # 4-byte Spill + movl 36(%esi), %eax + sbbl 36(%edi), %eax + movl %eax, 20(%esp) # 4-byte Spill + movl 40(%esi), %edx + sbbl 40(%edi), %edx + movl %edx, 12(%esp) # 4-byte Spill + movl 44(%esi), %ecx + sbbl 44(%edi), %ecx + movl %ecx, 8(%esp) # 4-byte Spill + movl 48(%esi), %eax + sbbl 48(%edi), %eax + movl %eax, 4(%esp) # 4-byte Spill + movl 52(%esi), %ebp + sbbl 52(%edi), %ebp + movl 56(%esi), %esi + sbbl 56(%edi), %esi + sbbl $0, %ebx + testb $1, %bl + movl 76(%esp), %ebx + movl 48(%esp), %edi # 4-byte Reload + movl %edi, (%ebx) + movl 16(%esp), %edi # 4-byte Reload + movl %edi, 4(%ebx) + movl 40(%esp), %edi # 4-byte Reload + movl %edi, 8(%ebx) + movl 52(%esp), %edi # 4-byte Reload + movl %edi, 12(%ebx) + movl 44(%esp), %edi # 4-byte Reload + movl %edi, 16(%ebx) + movl 36(%esp), %edi # 4-byte Reload + movl %edi, 20(%ebx) + movl 32(%esp), %edi # 4-byte Reload + movl %edi, 24(%ebx) + movl 28(%esp), %edi # 4-byte Reload + movl %edi, 28(%ebx) + movl 24(%esp), %edi # 4-byte Reload + movl %edi, 32(%ebx) + movl 20(%esp), %edi # 4-byte Reload + movl %edi, 36(%ebx) + movl %edx, 40(%ebx) + movl %ecx, 44(%ebx) + movl %eax, 48(%ebx) + movl %ebp, 52(%ebx) + movl %esi, 56(%ebx) + je .LBB234_2 +# BB#1: # %carry + movl %esi, (%esp) # 4-byte Spill + movl 88(%esp), %esi + movl 48(%esp), %ecx # 4-byte Reload + addl (%esi), %ecx + movl %ecx, (%ebx) + movl 16(%esp), %edx # 4-byte Reload + adcl 4(%esi), %edx + movl %edx, 4(%ebx) + movl 40(%esp), %edi # 4-byte Reload + adcl 8(%esi), %edi + movl 12(%esi), %eax + adcl 52(%esp), %eax # 4-byte Folded Reload + movl %edi, 8(%ebx) + movl 16(%esi), %ecx + adcl 44(%esp), %ecx # 4-byte Folded Reload + movl %eax, 12(%ebx) + movl 20(%esi), %eax + adcl 36(%esp), %eax # 4-byte Folded Reload + movl %ecx, 16(%ebx) + movl 24(%esi), %ecx + adcl 32(%esp), %ecx # 4-byte Folded Reload + movl %eax, 20(%ebx) + movl 28(%esi), %eax + adcl 28(%esp), %eax # 4-byte Folded Reload + movl %ecx, 24(%ebx) + movl 32(%esi), %ecx + adcl 24(%esp), %ecx # 4-byte Folded Reload + movl %eax, 28(%ebx) + movl 36(%esi), %eax + adcl 20(%esp), %eax # 4-byte Folded Reload + movl %ecx, 32(%ebx) + movl 40(%esi), %ecx + adcl 12(%esp), %ecx # 4-byte Folded Reload + movl %eax, 36(%ebx) + movl 44(%esi), %eax + adcl 8(%esp), %eax # 4-byte Folded Reload + movl %ecx, 40(%ebx) + movl 48(%esi), %ecx + adcl 4(%esp), %ecx # 4-byte Folded Reload + movl %eax, 44(%ebx) + movl %ecx, 48(%ebx) + movl 52(%esi), %eax + adcl %ebp, %eax + movl %eax, 52(%ebx) + movl 56(%esi), %eax + adcl (%esp), %eax # 4-byte Folded Reload + movl %eax, 56(%ebx) +.LBB234_2: # %nocarry + addl $56, %esp + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end234: + .size mcl_fp_sub15Lbmi2, .Lfunc_end234-mcl_fp_sub15Lbmi2 + + .globl mcl_fp_subNF15Lbmi2 + .align 16, 0x90 + .type mcl_fp_subNF15Lbmi2,@function +mcl_fp_subNF15Lbmi2: # @mcl_fp_subNF15Lbmi2 +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $96, %esp + movl 120(%esp), %ecx + movl (%ecx), %esi + movl 4(%ecx), %edx + movl 124(%esp), %edi + subl (%edi), %esi + movl %esi, 60(%esp) # 4-byte Spill + sbbl 4(%edi), %edx + movl %edx, 64(%esp) # 4-byte Spill + movl 56(%ecx), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 52(%ecx), %edx + movl %edx, 88(%esp) # 4-byte Spill + movl 48(%ecx), %edx + movl %edx, 84(%esp) # 4-byte Spill + movl 44(%ecx), %edx + movl %edx, 80(%esp) # 4-byte Spill + movl 40(%ecx), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 36(%ecx), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 32(%ecx), %esi + movl %esi, 92(%esp) # 4-byte Spill + movl 28(%ecx), %ebp + movl 24(%ecx), %ebx + movl 20(%ecx), %esi + movl 16(%ecx), %edx + movl 12(%ecx), %eax + movl 8(%ecx), %ecx + sbbl 8(%edi), %ecx + movl %ecx, 32(%esp) # 4-byte Spill + sbbl 12(%edi), %eax + movl %eax, 36(%esp) # 4-byte Spill + sbbl 16(%edi), %edx + movl %edx, 40(%esp) # 4-byte Spill + sbbl 20(%edi), %esi + movl %esi, 48(%esp) # 4-byte Spill + sbbl 24(%edi), %ebx + movl %ebx, 52(%esp) # 4-byte Spill + sbbl 28(%edi), %ebp + movl %ebp, 56(%esp) # 4-byte Spill + movl 92(%esp), %ecx # 4-byte Reload + sbbl 32(%edi), %ecx + movl %ecx, 92(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + sbbl 36(%edi), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + sbbl 40(%edi), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 80(%esp), %ecx # 4-byte Reload + sbbl 44(%edi), %ecx + movl %ecx, 80(%esp) # 4-byte Spill + movl 84(%esp), %ecx # 4-byte Reload + sbbl 48(%edi), %ecx + movl %ecx, 84(%esp) # 4-byte Spill + movl 88(%esp), %ecx # 4-byte Reload + sbbl 52(%edi), %ecx + movl %ecx, 88(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + sbbl 56(%edi), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl %eax, %ebp + sarl $31, %ebp + movl %ebp, %edi + shldl $1, %eax, %edi + movl 128(%esp), %edx + andl (%edx), %edi + movl 56(%edx), %eax + andl %ebp, %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 52(%edx), %eax + andl %ebp, %eax + movl %eax, 28(%esp) # 4-byte Spill + movl 48(%edx), %eax + andl %ebp, %eax + movl %eax, 24(%esp) # 4-byte Spill + movl 44(%edx), %eax + andl %ebp, %eax + movl %eax, 20(%esp) # 4-byte Spill + movl 40(%edx), %eax + andl %ebp, %eax + movl %eax, 16(%esp) # 4-byte Spill + movl 36(%edx), %eax + andl %ebp, %eax + movl %eax, 12(%esp) # 4-byte Spill + movl 32(%edx), %eax + andl %ebp, %eax + movl %eax, 8(%esp) # 4-byte Spill + movl 28(%edx), %eax + andl %ebp, %eax + movl %eax, 4(%esp) # 4-byte Spill + movl 24(%edx), %eax + andl %ebp, %eax + movl %eax, (%esp) # 4-byte Spill + movl 20(%edx), %ebx + andl %ebp, %ebx + movl 16(%edx), %esi + andl %ebp, %esi + movl 12(%edx), %ecx + andl %ebp, %ecx + movl 8(%edx), %eax + andl %ebp, %eax + andl 4(%edx), %ebp + addl 60(%esp), %edi # 4-byte Folded Reload + adcl 64(%esp), %ebp # 4-byte Folded Reload + movl 116(%esp), %edx + movl %edi, (%edx) + adcl 32(%esp), %eax # 4-byte Folded Reload + movl %ebp, 4(%edx) + adcl 36(%esp), %ecx # 4-byte Folded Reload + movl %eax, 8(%edx) + adcl 40(%esp), %esi # 4-byte Folded Reload + movl %ecx, 12(%edx) + adcl 48(%esp), %ebx # 4-byte Folded Reload + movl %esi, 16(%edx) + movl (%esp), %ecx # 4-byte Reload + adcl 52(%esp), %ecx # 4-byte Folded Reload + movl %ebx, 20(%edx) + movl 4(%esp), %eax # 4-byte Reload + adcl 56(%esp), %eax # 4-byte Folded Reload + movl %ecx, 24(%edx) + movl 8(%esp), %ecx # 4-byte Reload + adcl 92(%esp), %ecx # 4-byte Folded Reload + movl %eax, 28(%edx) + movl 12(%esp), %eax # 4-byte Reload + adcl 68(%esp), %eax # 4-byte Folded Reload + movl %ecx, 32(%edx) + movl 16(%esp), %ecx # 4-byte Reload + adcl 76(%esp), %ecx # 4-byte Folded Reload + movl %eax, 36(%edx) + movl 20(%esp), %eax # 4-byte Reload + adcl 80(%esp), %eax # 4-byte Folded Reload + movl %ecx, 40(%edx) + movl 24(%esp), %ecx # 4-byte Reload + adcl 84(%esp), %ecx # 4-byte Folded Reload + movl %eax, 44(%edx) + movl 28(%esp), %eax # 4-byte Reload + adcl 88(%esp), %eax # 4-byte Folded Reload + movl %ecx, 48(%edx) + movl %eax, 52(%edx) + movl 44(%esp), %eax # 4-byte Reload + adcl 72(%esp), %eax # 4-byte Folded Reload + movl %eax, 56(%edx) + addl $96, %esp + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end235: + .size mcl_fp_subNF15Lbmi2, .Lfunc_end235-mcl_fp_subNF15Lbmi2 + + .globl mcl_fpDbl_add15Lbmi2 + .align 16, 0x90 + .type mcl_fpDbl_add15Lbmi2,@function +mcl_fpDbl_add15Lbmi2: # @mcl_fpDbl_add15Lbmi2 +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $108, %esp + movl 136(%esp), %ecx + movl 132(%esp), %edx + movl 12(%edx), %edi + movl 16(%edx), %esi + movl 8(%ecx), %ebx + movl (%ecx), %ebp + addl (%edx), %ebp + movl 128(%esp), %eax + movl %ebp, (%eax) + movl 4(%ecx), %ebp + adcl 4(%edx), %ebp + adcl 8(%edx), %ebx + adcl 12(%ecx), %edi + adcl 16(%ecx), %esi + movl %ebp, 4(%eax) + movl 68(%ecx), %ebp + movl %ebx, 8(%eax) + movl 20(%ecx), %ebx + movl %edi, 12(%eax) + movl 20(%edx), %edi + adcl %ebx, %edi + movl 24(%ecx), %ebx + movl %esi, 16(%eax) + movl 24(%edx), %esi + adcl %ebx, %esi + movl 28(%ecx), %ebx + movl %edi, 20(%eax) + movl 28(%edx), %edi + adcl %ebx, %edi + movl 32(%ecx), %ebx + movl %esi, 24(%eax) + movl 32(%edx), %esi + adcl %ebx, %esi + movl 36(%ecx), %ebx + movl %edi, 28(%eax) + movl 36(%edx), %edi + adcl %ebx, %edi + movl 40(%ecx), %ebx + movl %esi, 32(%eax) + movl 40(%edx), %esi + adcl %ebx, %esi + movl 44(%ecx), %ebx + movl %edi, 36(%eax) + movl 44(%edx), %edi + adcl %ebx, %edi + movl 48(%ecx), %ebx + movl %esi, 40(%eax) + movl 48(%edx), %esi + adcl %ebx, %esi + movl 52(%ecx), %ebx + movl %edi, 44(%eax) + movl 52(%edx), %edi + adcl %ebx, %edi + movl 56(%ecx), %ebx + movl %esi, 48(%eax) + movl 56(%edx), %esi + adcl %ebx, %esi + movl 60(%ecx), %ebx + movl %edi, 52(%eax) + movl 60(%edx), %edi + adcl %ebx, %edi + movl %edi, 76(%esp) # 4-byte Spill + movl 64(%ecx), %edi + movl %esi, 56(%eax) + movl 64(%edx), %eax + adcl %edi, %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 68(%edx), %eax + adcl %ebp, %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 72(%ecx), %esi + movl 72(%edx), %eax + adcl %esi, %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 76(%ecx), %esi + movl 76(%edx), %eax + adcl %esi, %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 80(%ecx), %esi + movl 80(%edx), %eax + adcl %esi, %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 84(%ecx), %esi + movl 84(%edx), %eax + adcl %esi, %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 88(%ecx), %esi + movl 88(%edx), %eax + adcl %esi, %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 92(%ecx), %esi + movl 92(%edx), %eax + adcl %esi, %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 96(%ecx), %esi + movl 96(%edx), %eax + adcl %esi, %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 100(%ecx), %esi + movl 100(%edx), %eax + adcl %esi, %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 104(%ecx), %eax + movl 104(%edx), %esi + adcl %eax, %esi + movl %esi, 52(%esp) # 4-byte Spill + movl 108(%ecx), %edi + movl 108(%edx), %eax + adcl %edi, %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 112(%ecx), %ebx + movl 112(%edx), %edi + adcl %ebx, %edi + movl %edi, 60(%esp) # 4-byte Spill + movl 116(%ecx), %ecx + movl 116(%edx), %edx + adcl %ecx, %edx + sbbl %ebx, %ebx + andl $1, %ebx + movl 140(%esp), %ebp + movl 76(%esp), %ecx # 4-byte Reload + subl (%ebp), %ecx + movl %ecx, 36(%esp) # 4-byte Spill + movl 80(%esp), %ecx # 4-byte Reload + sbbl 4(%ebp), %ecx + movl %ecx, 32(%esp) # 4-byte Spill + movl 84(%esp), %ecx # 4-byte Reload + sbbl 8(%ebp), %ecx + movl %ecx, 28(%esp) # 4-byte Spill + movl 88(%esp), %ecx # 4-byte Reload + sbbl 12(%ebp), %ecx + movl %ecx, 24(%esp) # 4-byte Spill + movl 92(%esp), %ecx # 4-byte Reload + sbbl 16(%ebp), %ecx + movl %ecx, 20(%esp) # 4-byte Spill + movl 96(%esp), %ecx # 4-byte Reload + sbbl 20(%ebp), %ecx + movl %ecx, 16(%esp) # 4-byte Spill + movl 100(%esp), %ecx # 4-byte Reload + sbbl 24(%ebp), %ecx + movl %ecx, 12(%esp) # 4-byte Spill + movl 104(%esp), %ecx # 4-byte Reload + sbbl 28(%ebp), %ecx + movl %ecx, 8(%esp) # 4-byte Spill + movl 64(%esp), %ecx # 4-byte Reload + sbbl 32(%ebp), %ecx + movl %ecx, 4(%esp) # 4-byte Spill + movl 68(%esp), %ecx # 4-byte Reload + sbbl 36(%ebp), %ecx + movl %ecx, (%esp) # 4-byte Spill + movl 72(%esp), %ecx # 4-byte Reload + sbbl 40(%ebp), %ecx + sbbl 44(%ebp), %esi + movl %esi, 40(%esp) # 4-byte Spill + sbbl 48(%ebp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl %edi, %eax + movl %edx, %edi + sbbl 52(%ebp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl %edi, %esi + sbbl 56(%ebp), %esi + sbbl $0, %ebx + andl $1, %ebx + jne .LBB236_2 +# BB#1: + movl %esi, %edi +.LBB236_2: + testb %bl, %bl + movl 76(%esp), %eax # 4-byte Reload + movl 72(%esp), %esi # 4-byte Reload + movl 68(%esp), %ebx # 4-byte Reload + movl 64(%esp), %ebp # 4-byte Reload + jne .LBB236_4 +# BB#3: + movl %ecx, %esi + movl (%esp), %ebx # 4-byte Reload + movl 4(%esp), %ebp # 4-byte Reload + movl 8(%esp), %eax # 4-byte Reload + movl %eax, 104(%esp) # 4-byte Spill + movl 12(%esp), %eax # 4-byte Reload + movl %eax, 100(%esp) # 4-byte Spill + movl 16(%esp), %eax # 4-byte Reload + movl %eax, 96(%esp) # 4-byte Spill + movl 20(%esp), %eax # 4-byte Reload + movl %eax, 92(%esp) # 4-byte Spill + movl 24(%esp), %eax # 4-byte Reload + movl %eax, 88(%esp) # 4-byte Spill + movl 28(%esp), %eax # 4-byte Reload + movl %eax, 84(%esp) # 4-byte Spill + movl 32(%esp), %eax # 4-byte Reload + movl %eax, 80(%esp) # 4-byte Spill + movl 36(%esp), %eax # 4-byte Reload +.LBB236_4: + movl 128(%esp), %edx + movl %eax, 60(%edx) + movl 80(%esp), %eax # 4-byte Reload + movl %eax, 64(%edx) + movl 84(%esp), %eax # 4-byte Reload + movl %eax, 68(%edx) + movl 88(%esp), %eax # 4-byte Reload + movl %eax, 72(%edx) + movl 92(%esp), %eax # 4-byte Reload + movl %eax, 76(%edx) + movl 96(%esp), %eax # 4-byte Reload + movl %eax, 80(%edx) + movl 100(%esp), %eax # 4-byte Reload + movl %eax, 84(%edx) + movl 104(%esp), %eax # 4-byte Reload + movl %eax, 88(%edx) + movl %ebp, 92(%edx) + movl %ebx, 96(%edx) + movl %esi, 100(%edx) + movl 52(%esp), %eax # 4-byte Reload + jne .LBB236_6 +# BB#5: + movl 40(%esp), %eax # 4-byte Reload +.LBB236_6: + movl %eax, 104(%edx) + movl 60(%esp), %ecx # 4-byte Reload + movl 56(%esp), %eax # 4-byte Reload + jne .LBB236_8 +# BB#7: + movl 44(%esp), %eax # 4-byte Reload +.LBB236_8: + movl %eax, 108(%edx) + jne .LBB236_10 +# BB#9: + movl 48(%esp), %ecx # 4-byte Reload +.LBB236_10: + movl %ecx, 112(%edx) + movl %edi, 116(%edx) + addl $108, %esp + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end236: + .size mcl_fpDbl_add15Lbmi2, .Lfunc_end236-mcl_fpDbl_add15Lbmi2 + + .globl mcl_fpDbl_sub15Lbmi2 + .align 16, 0x90 + .type mcl_fpDbl_sub15Lbmi2,@function +mcl_fpDbl_sub15Lbmi2: # @mcl_fpDbl_sub15Lbmi2 +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $100, %esp + movl 124(%esp), %eax + movl (%eax), %edx + movl 4(%eax), %esi + movl 128(%esp), %ebp + subl (%ebp), %edx + sbbl 4(%ebp), %esi + movl 8(%eax), %edi + sbbl 8(%ebp), %edi + movl 120(%esp), %ecx + movl %edx, (%ecx) + movl 12(%eax), %edx + sbbl 12(%ebp), %edx + movl %esi, 4(%ecx) + movl 16(%eax), %esi + sbbl 16(%ebp), %esi + movl %edi, 8(%ecx) + movl 20(%ebp), %edi + movl %edx, 12(%ecx) + movl 20(%eax), %edx + sbbl %edi, %edx + movl 24(%ebp), %edi + movl %esi, 16(%ecx) + movl 24(%eax), %esi + sbbl %edi, %esi + movl 28(%ebp), %edi + movl %edx, 20(%ecx) + movl 28(%eax), %edx + sbbl %edi, %edx + movl 32(%ebp), %edi + movl %esi, 24(%ecx) + movl 32(%eax), %esi + sbbl %edi, %esi + movl 36(%ebp), %edi + movl %edx, 28(%ecx) + movl 36(%eax), %edx + sbbl %edi, %edx + movl 40(%ebp), %edi + movl %esi, 32(%ecx) + movl 40(%eax), %esi + sbbl %edi, %esi + movl 44(%ebp), %edi + movl %edx, 36(%ecx) + movl 44(%eax), %edx + sbbl %edi, %edx + movl 48(%ebp), %edi + movl %esi, 40(%ecx) + movl 48(%eax), %esi + sbbl %edi, %esi + movl 52(%ebp), %edi + movl %edx, 44(%ecx) + movl 52(%eax), %edx + sbbl %edi, %edx + movl 56(%ebp), %edi + movl %esi, 48(%ecx) + movl 56(%eax), %esi + sbbl %edi, %esi + movl 60(%ebp), %edi + movl %edx, 52(%ecx) + movl 60(%eax), %edx + sbbl %edi, %edx + movl %edx, 44(%esp) # 4-byte Spill + movl 64(%ebp), %edx + movl %esi, 56(%ecx) + movl 64(%eax), %esi + sbbl %edx, %esi + movl %esi, 36(%esp) # 4-byte Spill + movl 68(%ebp), %edx + movl 68(%eax), %esi + sbbl %edx, %esi + movl %esi, 40(%esp) # 4-byte Spill + movl 72(%ebp), %edx + movl 72(%eax), %esi + sbbl %edx, %esi + movl %esi, 48(%esp) # 4-byte Spill + movl 76(%ebp), %edx + movl 76(%eax), %esi + sbbl %edx, %esi + movl %esi, 52(%esp) # 4-byte Spill + movl 80(%ebp), %edx + movl 80(%eax), %esi + sbbl %edx, %esi + movl %esi, 56(%esp) # 4-byte Spill + movl 84(%ebp), %edx + movl 84(%eax), %esi + sbbl %edx, %esi + movl %esi, 64(%esp) # 4-byte Spill + movl 88(%ebp), %edx + movl 88(%eax), %esi + sbbl %edx, %esi + movl %esi, 68(%esp) # 4-byte Spill + movl 92(%ebp), %edx + movl 92(%eax), %esi + sbbl %edx, %esi + movl %esi, 72(%esp) # 4-byte Spill + movl 96(%ebp), %edx + movl 96(%eax), %esi + sbbl %edx, %esi + movl %esi, 76(%esp) # 4-byte Spill + movl 100(%ebp), %edx + movl 100(%eax), %esi + sbbl %edx, %esi + movl %esi, 80(%esp) # 4-byte Spill + movl 104(%ebp), %edx + movl 104(%eax), %esi + sbbl %edx, %esi + movl %esi, 84(%esp) # 4-byte Spill + movl 108(%ebp), %edx + movl 108(%eax), %esi + sbbl %edx, %esi + movl %esi, 88(%esp) # 4-byte Spill + movl 112(%ebp), %edx + movl 112(%eax), %esi + sbbl %edx, %esi + movl %esi, 92(%esp) # 4-byte Spill + movl 116(%ebp), %edx + movl 116(%eax), %eax + sbbl %edx, %eax + movl %eax, 96(%esp) # 4-byte Spill + movl $0, %eax + sbbl $0, %eax + andl $1, %eax + movl 132(%esp), %esi + jne .LBB237_1 +# BB#2: + movl $0, 60(%esp) # 4-byte Folded Spill + jmp .LBB237_3 +.LBB237_1: + movl 56(%esi), %edx + movl %edx, 60(%esp) # 4-byte Spill +.LBB237_3: + testb %al, %al + jne .LBB237_4 +# BB#5: + movl $0, 24(%esp) # 4-byte Folded Spill + movl $0, %ebx + jmp .LBB237_6 +.LBB237_4: + movl (%esi), %ebx + movl 4(%esi), %eax + movl %eax, 24(%esp) # 4-byte Spill +.LBB237_6: + jne .LBB237_7 +# BB#8: + movl $0, 32(%esp) # 4-byte Folded Spill + jmp .LBB237_9 +.LBB237_7: + movl 52(%esi), %eax + movl %eax, 32(%esp) # 4-byte Spill +.LBB237_9: + jne .LBB237_10 +# BB#11: + movl $0, 28(%esp) # 4-byte Folded Spill + jmp .LBB237_12 +.LBB237_10: + movl 48(%esi), %eax + movl %eax, 28(%esp) # 4-byte Spill +.LBB237_12: + jne .LBB237_13 +# BB#14: + movl $0, 20(%esp) # 4-byte Folded Spill + jmp .LBB237_15 +.LBB237_13: + movl 44(%esi), %eax + movl %eax, 20(%esp) # 4-byte Spill +.LBB237_15: + jne .LBB237_16 +# BB#17: + movl $0, 16(%esp) # 4-byte Folded Spill + jmp .LBB237_18 +.LBB237_16: + movl 40(%esi), %eax + movl %eax, 16(%esp) # 4-byte Spill +.LBB237_18: + jne .LBB237_19 +# BB#20: + movl $0, 12(%esp) # 4-byte Folded Spill + jmp .LBB237_21 +.LBB237_19: + movl 36(%esi), %eax + movl %eax, 12(%esp) # 4-byte Spill +.LBB237_21: + jne .LBB237_22 +# BB#23: + movl $0, 8(%esp) # 4-byte Folded Spill + jmp .LBB237_24 +.LBB237_22: + movl 32(%esi), %eax + movl %eax, 8(%esp) # 4-byte Spill +.LBB237_24: + jne .LBB237_25 +# BB#26: + movl $0, 4(%esp) # 4-byte Folded Spill + jmp .LBB237_27 +.LBB237_25: + movl 28(%esi), %eax + movl %eax, 4(%esp) # 4-byte Spill +.LBB237_27: + jne .LBB237_28 +# BB#29: + movl $0, (%esp) # 4-byte Folded Spill + jmp .LBB237_30 +.LBB237_28: + movl 24(%esi), %eax + movl %eax, (%esp) # 4-byte Spill +.LBB237_30: + jne .LBB237_31 +# BB#32: + movl $0, %edx + jmp .LBB237_33 +.LBB237_31: + movl 20(%esi), %edx +.LBB237_33: + jne .LBB237_34 +# BB#35: + movl $0, %ebp + jmp .LBB237_36 +.LBB237_34: + movl 16(%esi), %ebp +.LBB237_36: + jne .LBB237_37 +# BB#38: + movl $0, %eax + jmp .LBB237_39 +.LBB237_37: + movl 12(%esi), %eax +.LBB237_39: + jne .LBB237_40 +# BB#41: + xorl %esi, %esi + jmp .LBB237_42 +.LBB237_40: + movl 8(%esi), %esi +.LBB237_42: + addl 44(%esp), %ebx # 4-byte Folded Reload + movl 24(%esp), %edi # 4-byte Reload + adcl 36(%esp), %edi # 4-byte Folded Reload + movl %ebx, 60(%ecx) + adcl 40(%esp), %esi # 4-byte Folded Reload + movl %edi, 64(%ecx) + adcl 48(%esp), %eax # 4-byte Folded Reload + movl %esi, 68(%ecx) + adcl 52(%esp), %ebp # 4-byte Folded Reload + movl %eax, 72(%ecx) + adcl 56(%esp), %edx # 4-byte Folded Reload + movl %ebp, 76(%ecx) + movl (%esp), %esi # 4-byte Reload + adcl 64(%esp), %esi # 4-byte Folded Reload + movl %edx, 80(%ecx) + movl 4(%esp), %eax # 4-byte Reload + adcl 68(%esp), %eax # 4-byte Folded Reload + movl %esi, 84(%ecx) + movl 8(%esp), %edx # 4-byte Reload + adcl 72(%esp), %edx # 4-byte Folded Reload + movl %eax, 88(%ecx) + movl 12(%esp), %eax # 4-byte Reload + adcl 76(%esp), %eax # 4-byte Folded Reload + movl %edx, 92(%ecx) + movl 16(%esp), %edx # 4-byte Reload + adcl 80(%esp), %edx # 4-byte Folded Reload + movl %eax, 96(%ecx) + movl 20(%esp), %eax # 4-byte Reload + adcl 84(%esp), %eax # 4-byte Folded Reload + movl %edx, 100(%ecx) + movl 28(%esp), %edx # 4-byte Reload + adcl 88(%esp), %edx # 4-byte Folded Reload + movl %eax, 104(%ecx) + movl 32(%esp), %eax # 4-byte Reload + adcl 92(%esp), %eax # 4-byte Folded Reload + movl %edx, 108(%ecx) + movl %eax, 112(%ecx) + movl 60(%esp), %eax # 4-byte Reload + adcl 96(%esp), %eax # 4-byte Folded Reload + movl %eax, 116(%ecx) + addl $100, %esp + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end237: + .size mcl_fpDbl_sub15Lbmi2, .Lfunc_end237-mcl_fpDbl_sub15Lbmi2 + + .align 16, 0x90 + .type .LmulPv512x32,@function +.LmulPv512x32: # @mulPv512x32 +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $56, %esp + movl %edx, %eax + movl 76(%esp), %edi + movl %edi, %edx + mulxl 4(%eax), %ebx, %esi + movl %edi, %edx + mulxl (%eax), %ebp, %edx + movl %ebp, 52(%esp) # 4-byte Spill + addl %ebx, %edx + movl %edx, 48(%esp) # 4-byte Spill + movl %edi, %edx + mulxl 8(%eax), %edx, %ebx + adcl %esi, %edx + movl %edx, 44(%esp) # 4-byte Spill + movl %edi, %edx + mulxl 12(%eax), %edx, %esi + adcl %ebx, %edx + movl %edx, 40(%esp) # 4-byte Spill + movl %edi, %edx + mulxl 16(%eax), %edx, %ebx + adcl %esi, %edx + movl %edx, 36(%esp) # 4-byte Spill + movl %edi, %edx + mulxl 20(%eax), %edx, %esi + adcl %ebx, %edx + movl %edx, 32(%esp) # 4-byte Spill + movl %edi, %edx + mulxl 24(%eax), %edx, %ebx + adcl %esi, %edx + movl %edx, 28(%esp) # 4-byte Spill + movl %edi, %edx + mulxl 28(%eax), %edx, %esi + adcl %ebx, %edx + movl %edx, 24(%esp) # 4-byte Spill + movl %edi, %edx + mulxl 32(%eax), %edx, %ebx + adcl %esi, %edx + movl %edx, 20(%esp) # 4-byte Spill + movl %edi, %edx + mulxl 36(%eax), %edx, %esi + adcl %ebx, %edx + movl %edx, 16(%esp) # 4-byte Spill + movl %edi, %edx + mulxl 40(%eax), %edx, %ebx + adcl %esi, %edx + movl %edx, 12(%esp) # 4-byte Spill + movl %edi, %edx + mulxl 44(%eax), %edx, %esi + adcl %ebx, %edx + movl %edx, 8(%esp) # 4-byte Spill + movl %edi, %edx + mulxl 48(%eax), %ebx, %ebp + adcl %esi, %ebx + movl %edi, %edx + mulxl 52(%eax), %esi, %edx + movl %edx, (%esp) # 4-byte Spill + adcl %ebp, %esi + movl %edi, %edx + mulxl 56(%eax), %edx, %ebp + movl %ebp, 4(%esp) # 4-byte Spill + adcl (%esp), %edx # 4-byte Folded Reload + movl 52(%esp), %ebp # 4-byte Reload + movl %ebp, (%ecx) + movl 48(%esp), %ebp # 4-byte Reload + movl %ebp, 4(%ecx) + movl 44(%esp), %ebp # 4-byte Reload + movl %ebp, 8(%ecx) + movl 40(%esp), %ebp # 4-byte Reload + movl %ebp, 12(%ecx) + movl 36(%esp), %ebp # 4-byte Reload + movl %ebp, 16(%ecx) + movl 32(%esp), %ebp # 4-byte Reload + movl %ebp, 20(%ecx) + movl 28(%esp), %ebp # 4-byte Reload + movl %ebp, 24(%ecx) + movl 24(%esp), %ebp # 4-byte Reload + movl %ebp, 28(%ecx) + movl 20(%esp), %ebp # 4-byte Reload + movl %ebp, 32(%ecx) + movl 16(%esp), %ebp # 4-byte Reload + movl %ebp, 36(%ecx) + movl 12(%esp), %ebp # 4-byte Reload + movl %ebp, 40(%ecx) + movl 8(%esp), %ebp # 4-byte Reload + movl %ebp, 44(%ecx) + movl %ebx, 48(%ecx) + movl %esi, 52(%ecx) + movl %edx, 56(%ecx) + movl %edi, %edx + mulxl 60(%eax), %eax, %edx + adcl 4(%esp), %eax # 4-byte Folded Reload + movl %eax, 60(%ecx) + adcl $0, %edx + movl %edx, 64(%ecx) + movl %ecx, %eax + addl $56, %esp + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end238: + .size .LmulPv512x32, .Lfunc_end238-.LmulPv512x32 + + .globl mcl_fp_mulUnitPre16Lbmi2 + .align 16, 0x90 + .type mcl_fp_mulUnitPre16Lbmi2,@function +mcl_fp_mulUnitPre16Lbmi2: # @mcl_fp_mulUnitPre16Lbmi2 +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $124, %esp + calll .L239$pb +.L239$pb: + popl %ebx +.Ltmp50: + addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp50-.L239$pb), %ebx + movl 152(%esp), %eax + movl %eax, (%esp) + leal 56(%esp), %ecx + movl 148(%esp), %edx + calll .LmulPv512x32 + movl 120(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 116(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 112(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 108(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 104(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 100(%esp), %eax + movl %eax, 32(%esp) # 4-byte Spill + movl 96(%esp), %eax + movl %eax, 28(%esp) # 4-byte Spill + movl 92(%esp), %eax + movl %eax, 24(%esp) # 4-byte Spill + movl 88(%esp), %eax + movl %eax, 20(%esp) # 4-byte Spill + movl 84(%esp), %eax + movl %eax, 16(%esp) # 4-byte Spill + movl 80(%esp), %eax + movl %eax, 12(%esp) # 4-byte Spill + movl 76(%esp), %ebp + movl 72(%esp), %ebx + movl 68(%esp), %edi + movl 64(%esp), %esi + movl 56(%esp), %edx + movl 60(%esp), %ecx + movl 144(%esp), %eax + movl %edx, (%eax) + movl %ecx, 4(%eax) + movl %esi, 8(%eax) + movl %edi, 12(%eax) + movl %ebx, 16(%eax) + movl %ebp, 20(%eax) + movl 12(%esp), %ecx # 4-byte Reload + movl %ecx, 24(%eax) + movl 16(%esp), %ecx # 4-byte Reload + movl %ecx, 28(%eax) + movl 20(%esp), %ecx # 4-byte Reload + movl %ecx, 32(%eax) + movl 24(%esp), %ecx # 4-byte Reload + movl %ecx, 36(%eax) + movl 28(%esp), %ecx # 4-byte Reload + movl %ecx, 40(%eax) + movl 32(%esp), %ecx # 4-byte Reload + movl %ecx, 44(%eax) + movl 36(%esp), %ecx # 4-byte Reload + movl %ecx, 48(%eax) + movl 40(%esp), %ecx # 4-byte Reload + movl %ecx, 52(%eax) + movl 44(%esp), %ecx # 4-byte Reload + movl %ecx, 56(%eax) + movl 48(%esp), %ecx # 4-byte Reload + movl %ecx, 60(%eax) + movl 52(%esp), %ecx # 4-byte Reload + movl %ecx, 64(%eax) + addl $124, %esp + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end239: + .size mcl_fp_mulUnitPre16Lbmi2, .Lfunc_end239-mcl_fp_mulUnitPre16Lbmi2 + + .globl mcl_fpDbl_mulPre16Lbmi2 + .align 16, 0x90 + .type mcl_fpDbl_mulPre16Lbmi2,@function +mcl_fpDbl_mulPre16Lbmi2: # @mcl_fpDbl_mulPre16Lbmi2 +# BB#0: + pushl %ebp + movl %esp, %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $300, %esp # imm = 0x12C + calll .L240$pb +.L240$pb: + popl %ebx +.Ltmp51: + addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp51-.L240$pb), %ebx + movl %ebx, -224(%ebp) # 4-byte Spill + movl 16(%ebp), %edi + movl %edi, 8(%esp) + movl 12(%ebp), %esi + movl %esi, 4(%esp) + movl 8(%ebp), %eax + movl %eax, (%esp) + calll mcl_fpDbl_mulPre8Lbmi2@PLT + leal 32(%edi), %eax + movl %eax, 8(%esp) + leal 32(%esi), %eax + movl %eax, 4(%esp) + movl 8(%ebp), %eax + leal 64(%eax), %eax + movl %eax, (%esp) + calll mcl_fpDbl_mulPre8Lbmi2@PLT + movl 52(%esi), %ebx + movl 48(%esi), %eax + movl 44(%esi), %ecx + movl 40(%esi), %edx + movl %edx, -176(%ebp) # 4-byte Spill + movl (%esi), %edi + movl 4(%esi), %edx + addl 32(%esi), %edi + movl %edi, -184(%ebp) # 4-byte Spill + movl %esi, %edi + adcl 36(%edi), %edx + movl %edx, -236(%ebp) # 4-byte Spill + movl -176(%ebp), %edx # 4-byte Reload + adcl 8(%edi), %edx + movl %edx, -176(%ebp) # 4-byte Spill + adcl 12(%edi), %ecx + movl %ecx, -232(%ebp) # 4-byte Spill + adcl 16(%edi), %eax + movl %eax, -212(%ebp) # 4-byte Spill + adcl 20(%edi), %ebx + movl %ebx, -228(%ebp) # 4-byte Spill + movl 56(%edi), %eax + adcl 24(%edi), %eax + movl %eax, -248(%ebp) # 4-byte Spill + pushl %eax + seto %al + lahf + movl %eax, %ecx + popl %eax + movl %ecx, -144(%ebp) # 4-byte Spill + movl 16(%ebp), %esi + movl (%esi), %ecx + addl 32(%esi), %ecx + movl %ecx, -188(%ebp) # 4-byte Spill + movl 4(%esi), %ecx + adcl 36(%esi), %ecx + movl %ecx, -192(%ebp) # 4-byte Spill + movl 40(%esi), %ecx + adcl 8(%esi), %ecx + movl %ecx, -196(%ebp) # 4-byte Spill + movl 44(%esi), %ecx + adcl 12(%esi), %ecx + movl %ecx, -200(%ebp) # 4-byte Spill + movl 48(%esi), %ecx + adcl 16(%esi), %ecx + movl %ecx, -204(%ebp) # 4-byte Spill + movl 52(%esi), %ecx + adcl 20(%esi), %ecx + movl %ecx, -208(%ebp) # 4-byte Spill + movl 56(%esi), %edx + adcl 24(%esi), %edx + movl 60(%esi), %ecx + adcl 28(%esi), %ecx + pushl %eax + seto %al + lahf + movl %eax, %ebx + popl %eax + movl %ebx, -252(%ebp) # 4-byte Spill + movl -212(%ebp), %ebx # 4-byte Reload + movl -176(%ebp), %esi # 4-byte Reload + movl %esi, -216(%ebp) # 4-byte Spill + movl -184(%ebp), %esi # 4-byte Reload + movl %esi, -220(%ebp) # 4-byte Spill + jb .LBB240_2 +# BB#1: + xorl %eax, %eax + xorl %ebx, %ebx + movl $0, -216(%ebp) # 4-byte Folded Spill + movl $0, -220(%ebp) # 4-byte Folded Spill +.LBB240_2: + movl %ebx, -244(%ebp) # 4-byte Spill + movl %eax, -240(%ebp) # 4-byte Spill + movl 60(%edi), %eax + movl -144(%ebp), %ebx # 4-byte Reload + pushl %eax + movl %ebx, %eax + addb $127, %al + sahf + popl %eax + adcl 28(%edi), %eax + movl %eax, -180(%ebp) # 4-byte Spill + movl %ecx, -172(%ebp) # 4-byte Spill + movl %edx, -144(%ebp) # 4-byte Spill + movl -208(%ebp), %eax # 4-byte Reload + movl %eax, -148(%ebp) # 4-byte Spill + movl -204(%ebp), %eax # 4-byte Reload + movl %eax, -152(%ebp) # 4-byte Spill + movl -200(%ebp), %eax # 4-byte Reload + movl %eax, -156(%ebp) # 4-byte Spill + movl -196(%ebp), %eax # 4-byte Reload + movl %eax, -160(%ebp) # 4-byte Spill + movl -192(%ebp), %eax # 4-byte Reload + movl %eax, -164(%ebp) # 4-byte Spill + movl -188(%ebp), %eax # 4-byte Reload + movl %eax, -168(%ebp) # 4-byte Spill + jb .LBB240_4 +# BB#3: + movl $0, -172(%ebp) # 4-byte Folded Spill + movl $0, -144(%ebp) # 4-byte Folded Spill + movl $0, -148(%ebp) # 4-byte Folded Spill + movl $0, -152(%ebp) # 4-byte Folded Spill + movl $0, -156(%ebp) # 4-byte Folded Spill + movl $0, -160(%ebp) # 4-byte Folded Spill + movl $0, -164(%ebp) # 4-byte Folded Spill + movl $0, -168(%ebp) # 4-byte Folded Spill +.LBB240_4: + movl -184(%ebp), %eax # 4-byte Reload + movl %eax, -108(%ebp) + movl -236(%ebp), %eax # 4-byte Reload + movl %eax, -104(%ebp) + movl -176(%ebp), %edi # 4-byte Reload + movl %edi, -100(%ebp) + movl -232(%ebp), %edi # 4-byte Reload + movl %edi, -96(%ebp) + movl -212(%ebp), %esi # 4-byte Reload + movl %esi, -92(%ebp) + movl -228(%ebp), %esi # 4-byte Reload + movl %esi, -88(%ebp) + movl -248(%ebp), %ebx # 4-byte Reload + movl %ebx, -84(%ebp) + movl -188(%ebp), %ebx # 4-byte Reload + movl %ebx, -140(%ebp) + movl -192(%ebp), %ebx # 4-byte Reload + movl %ebx, -136(%ebp) + movl -196(%ebp), %ebx # 4-byte Reload + movl %ebx, -132(%ebp) + movl -200(%ebp), %ebx # 4-byte Reload + movl %ebx, -128(%ebp) + movl -204(%ebp), %ebx # 4-byte Reload + movl %ebx, -124(%ebp) + movl -208(%ebp), %ebx # 4-byte Reload + movl %ebx, -120(%ebp) + movl %esi, %ebx + movl %edi, %esi + movl %eax, %edi + movl %edx, -116(%ebp) + movl %ecx, -112(%ebp) + sbbl %edx, %edx + movl -180(%ebp), %eax # 4-byte Reload + movl %eax, -80(%ebp) + movl -252(%ebp), %ecx # 4-byte Reload + pushl %eax + movl %ecx, %eax + addb $127, %al + sahf + popl %eax + jb .LBB240_6 +# BB#5: + movl $0, %eax + movl $0, %ebx + movl $0, %esi + movl $0, %edi +.LBB240_6: + movl %eax, -180(%ebp) # 4-byte Spill + sbbl %eax, %eax + leal -140(%ebp), %ecx + movl %ecx, 8(%esp) + leal -108(%ebp), %ecx + movl %ecx, 4(%esp) + leal -76(%ebp), %ecx + movl %ecx, (%esp) + andl %eax, %edx + movl -220(%ebp), %eax # 4-byte Reload + addl %eax, -168(%ebp) # 4-byte Folded Spill + adcl %edi, -164(%ebp) # 4-byte Folded Spill + movl -216(%ebp), %eax # 4-byte Reload + adcl %eax, -160(%ebp) # 4-byte Folded Spill + adcl %esi, -156(%ebp) # 4-byte Folded Spill + movl -244(%ebp), %eax # 4-byte Reload + adcl %eax, -152(%ebp) # 4-byte Folded Spill + adcl %ebx, -148(%ebp) # 4-byte Folded Spill + movl -144(%ebp), %eax # 4-byte Reload + adcl -240(%ebp), %eax # 4-byte Folded Reload + movl %eax, -144(%ebp) # 4-byte Spill + movl -172(%ebp), %edi # 4-byte Reload + adcl -180(%ebp), %edi # 4-byte Folded Reload + sbbl %esi, %esi + andl $1, %esi + andl $1, %edx + movl %edx, -176(%ebp) # 4-byte Spill + movl -224(%ebp), %ebx # 4-byte Reload + calll mcl_fpDbl_mulPre8Lbmi2@PLT + movl -168(%ebp), %eax # 4-byte Reload + addl -44(%ebp), %eax + movl %eax, -168(%ebp) # 4-byte Spill + movl -164(%ebp), %eax # 4-byte Reload + adcl -40(%ebp), %eax + movl %eax, -164(%ebp) # 4-byte Spill + movl -160(%ebp), %eax # 4-byte Reload + adcl -36(%ebp), %eax + movl %eax, -160(%ebp) # 4-byte Spill + movl -156(%ebp), %eax # 4-byte Reload + adcl -32(%ebp), %eax + movl %eax, -156(%ebp) # 4-byte Spill + movl -152(%ebp), %eax # 4-byte Reload + adcl -28(%ebp), %eax + movl %eax, -152(%ebp) # 4-byte Spill + movl -148(%ebp), %eax # 4-byte Reload + adcl -24(%ebp), %eax + movl %eax, -148(%ebp) # 4-byte Spill + movl -144(%ebp), %eax # 4-byte Reload + adcl -20(%ebp), %eax + movl %eax, -144(%ebp) # 4-byte Spill + adcl -16(%ebp), %edi + movl %edi, -172(%ebp) # 4-byte Spill + adcl %esi, -176(%ebp) # 4-byte Folded Spill + movl -76(%ebp), %eax + movl 8(%ebp), %esi + subl (%esi), %eax + movl %eax, -196(%ebp) # 4-byte Spill + movl -72(%ebp), %ecx + sbbl 4(%esi), %ecx + movl -68(%ebp), %eax + sbbl 8(%esi), %eax + movl %eax, -192(%ebp) # 4-byte Spill + movl -64(%ebp), %edx + sbbl 12(%esi), %edx + movl -60(%ebp), %ebx + sbbl 16(%esi), %ebx + movl -56(%ebp), %eax + sbbl 20(%esi), %eax + movl %eax, -180(%ebp) # 4-byte Spill + movl -52(%ebp), %eax + sbbl 24(%esi), %eax + movl %eax, -184(%ebp) # 4-byte Spill + movl -48(%ebp), %eax + sbbl 28(%esi), %eax + movl %eax, -188(%ebp) # 4-byte Spill + movl 32(%esi), %eax + movl %eax, -200(%ebp) # 4-byte Spill + sbbl %eax, -168(%ebp) # 4-byte Folded Spill + movl 36(%esi), %eax + movl %eax, -204(%ebp) # 4-byte Spill + sbbl %eax, -164(%ebp) # 4-byte Folded Spill + movl 40(%esi), %eax + movl %eax, -208(%ebp) # 4-byte Spill + sbbl %eax, -160(%ebp) # 4-byte Folded Spill + movl 44(%esi), %eax + movl %eax, -212(%ebp) # 4-byte Spill + sbbl %eax, -156(%ebp) # 4-byte Folded Spill + movl 48(%esi), %eax + movl %eax, -216(%ebp) # 4-byte Spill + sbbl %eax, -152(%ebp) # 4-byte Folded Spill + movl 52(%esi), %eax + movl %eax, -220(%ebp) # 4-byte Spill + sbbl %eax, -148(%ebp) # 4-byte Folded Spill + movl 56(%esi), %eax + movl %eax, -224(%ebp) # 4-byte Spill + movl -144(%ebp), %edi # 4-byte Reload + sbbl %eax, %edi + movl 60(%esi), %eax + movl %eax, -228(%ebp) # 4-byte Spill + sbbl %eax, -172(%ebp) # 4-byte Folded Spill + sbbl $0, -176(%ebp) # 4-byte Folded Spill + movl 64(%esi), %eax + movl %eax, -260(%ebp) # 4-byte Spill + subl %eax, -196(%ebp) # 4-byte Folded Spill + movl 68(%esi), %eax + movl %eax, -264(%ebp) # 4-byte Spill + sbbl %eax, %ecx + movl 72(%esi), %eax + movl %eax, -268(%ebp) # 4-byte Spill + sbbl %eax, -192(%ebp) # 4-byte Folded Spill + movl 76(%esi), %eax + movl %eax, -272(%ebp) # 4-byte Spill + sbbl %eax, %edx + movl 80(%esi), %eax + movl %eax, -276(%ebp) # 4-byte Spill + sbbl %eax, %ebx + movl 84(%esi), %eax + movl %eax, -280(%ebp) # 4-byte Spill + sbbl %eax, -180(%ebp) # 4-byte Folded Spill + movl 88(%esi), %eax + movl %eax, -284(%ebp) # 4-byte Spill + sbbl %eax, -184(%ebp) # 4-byte Folded Spill + movl 92(%esi), %eax + movl %eax, -288(%ebp) # 4-byte Spill + sbbl %eax, -188(%ebp) # 4-byte Folded Spill + movl 96(%esi), %eax + movl %eax, -292(%ebp) # 4-byte Spill + sbbl %eax, -168(%ebp) # 4-byte Folded Spill + movl 100(%esi), %eax + movl %eax, -236(%ebp) # 4-byte Spill + sbbl %eax, -164(%ebp) # 4-byte Folded Spill + movl 104(%esi), %eax + movl %eax, -240(%ebp) # 4-byte Spill + sbbl %eax, -160(%ebp) # 4-byte Folded Spill + movl 108(%esi), %eax + movl %eax, -244(%ebp) # 4-byte Spill + sbbl %eax, -156(%ebp) # 4-byte Folded Spill + movl 112(%esi), %eax + movl %eax, -248(%ebp) # 4-byte Spill + sbbl %eax, -152(%ebp) # 4-byte Folded Spill + movl 116(%esi), %eax + movl %eax, -252(%ebp) # 4-byte Spill + sbbl %eax, -148(%ebp) # 4-byte Folded Spill + movl 120(%esi), %eax + movl %eax, -232(%ebp) # 4-byte Spill + sbbl %eax, %edi + movl %edi, -144(%ebp) # 4-byte Spill + movl 124(%esi), %eax + movl %eax, -256(%ebp) # 4-byte Spill + sbbl %eax, -172(%ebp) # 4-byte Folded Spill + movl -176(%ebp), %edi # 4-byte Reload + sbbl $0, %edi + movl -196(%ebp), %eax # 4-byte Reload + addl -200(%ebp), %eax # 4-byte Folded Reload + adcl -204(%ebp), %ecx # 4-byte Folded Reload + movl %eax, 32(%esi) + movl -192(%ebp), %eax # 4-byte Reload + adcl -208(%ebp), %eax # 4-byte Folded Reload + movl %ecx, 36(%esi) + adcl -212(%ebp), %edx # 4-byte Folded Reload + movl %eax, 40(%esi) + adcl -216(%ebp), %ebx # 4-byte Folded Reload + movl %edx, 44(%esi) + movl -180(%ebp), %eax # 4-byte Reload + adcl -220(%ebp), %eax # 4-byte Folded Reload + movl %ebx, 48(%esi) + movl -184(%ebp), %ecx # 4-byte Reload + adcl -224(%ebp), %ecx # 4-byte Folded Reload + movl %eax, 52(%esi) + movl -188(%ebp), %edx # 4-byte Reload + adcl -228(%ebp), %edx # 4-byte Folded Reload + movl %ecx, 56(%esi) + movl -168(%ebp), %eax # 4-byte Reload + adcl -260(%ebp), %eax # 4-byte Folded Reload + movl %edx, 60(%esi) + movl -164(%ebp), %ecx # 4-byte Reload + adcl -264(%ebp), %ecx # 4-byte Folded Reload + movl %eax, 64(%esi) + movl -160(%ebp), %eax # 4-byte Reload + adcl -268(%ebp), %eax # 4-byte Folded Reload + movl %ecx, 68(%esi) + movl -156(%ebp), %ecx # 4-byte Reload + adcl -272(%ebp), %ecx # 4-byte Folded Reload + movl %eax, 72(%esi) + movl -152(%ebp), %eax # 4-byte Reload + adcl -276(%ebp), %eax # 4-byte Folded Reload + movl %ecx, 76(%esi) + movl -148(%ebp), %ecx # 4-byte Reload + adcl -280(%ebp), %ecx # 4-byte Folded Reload + movl %eax, 80(%esi) + movl -144(%ebp), %eax # 4-byte Reload + adcl -284(%ebp), %eax # 4-byte Folded Reload + movl %ecx, 84(%esi) + movl -172(%ebp), %ecx # 4-byte Reload + adcl -288(%ebp), %ecx # 4-byte Folded Reload + movl %eax, 88(%esi) + adcl -292(%ebp), %edi # 4-byte Folded Reload + movl %ecx, 92(%esi) + movl %edi, 96(%esi) + movl -236(%ebp), %eax # 4-byte Reload + adcl $0, %eax + movl %eax, 100(%esi) + movl -240(%ebp), %eax # 4-byte Reload + adcl $0, %eax + movl %eax, 104(%esi) + movl -244(%ebp), %eax # 4-byte Reload + adcl $0, %eax + movl %eax, 108(%esi) + movl -248(%ebp), %eax # 4-byte Reload + adcl $0, %eax + movl %eax, 112(%esi) + movl -252(%ebp), %eax # 4-byte Reload + adcl $0, %eax + movl %eax, 116(%esi) + movl -232(%ebp), %eax # 4-byte Reload + adcl $0, %eax + movl %eax, 120(%esi) + movl -256(%ebp), %eax # 4-byte Reload + adcl $0, %eax + movl %eax, 124(%esi) + addl $300, %esp # imm = 0x12C + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end240: + .size mcl_fpDbl_mulPre16Lbmi2, .Lfunc_end240-mcl_fpDbl_mulPre16Lbmi2 + + .globl mcl_fpDbl_sqrPre16Lbmi2 + .align 16, 0x90 + .type mcl_fpDbl_sqrPre16Lbmi2,@function +mcl_fpDbl_sqrPre16Lbmi2: # @mcl_fpDbl_sqrPre16Lbmi2 +# BB#0: + pushl %ebp + movl %esp, %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $300, %esp # imm = 0x12C + calll .L241$pb +.L241$pb: + popl %ebx +.Ltmp52: + addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp52-.L241$pb), %ebx + movl %ebx, -184(%ebp) # 4-byte Spill + movl 12(%ebp), %edi + movl %edi, 8(%esp) + movl %edi, 4(%esp) + movl 8(%ebp), %esi + movl %esi, (%esp) + calll mcl_fpDbl_mulPre8Lbmi2@PLT + leal 32(%edi), %eax + movl %eax, 8(%esp) + movl %eax, 4(%esp) + leal 64(%esi), %eax + movl %eax, (%esp) + calll mcl_fpDbl_mulPre8Lbmi2@PLT + movl 52(%edi), %eax + movl %eax, -180(%ebp) # 4-byte Spill + movl 48(%edi), %eax + movl 44(%edi), %ebx + movl 40(%edi), %esi + movl (%edi), %ecx + movl 4(%edi), %edx + addl 32(%edi), %ecx + movl %ecx, -192(%ebp) # 4-byte Spill + adcl 36(%edi), %edx + movl %edx, -196(%ebp) # 4-byte Spill + adcl 8(%edi), %esi + movl %esi, -188(%ebp) # 4-byte Spill + adcl 12(%edi), %ebx + adcl 16(%edi), %eax + movl %eax, -208(%ebp) # 4-byte Spill + movl -180(%ebp), %eax # 4-byte Reload + adcl 20(%edi), %eax + movl %eax, -180(%ebp) # 4-byte Spill + seto %al + lahf + movl %eax, %eax + movl %eax, -168(%ebp) # 4-byte Spill + addl %ecx, %ecx + movl %ecx, -164(%ebp) # 4-byte Spill + adcl %edx, %edx + movl %edx, -160(%ebp) # 4-byte Spill + adcl %esi, %esi + movl %esi, -156(%ebp) # 4-byte Spill + movl %ebx, %edx + movl %ebx, %esi + adcl %edx, %edx + movl %edx, -152(%ebp) # 4-byte Spill + movl -208(%ebp), %eax # 4-byte Reload + movl %eax, %edx + movl %eax, %ebx + adcl %edx, %edx + movl %edx, -148(%ebp) # 4-byte Spill + movl -180(%ebp), %edx # 4-byte Reload + adcl %edx, %edx + movl %edx, -144(%ebp) # 4-byte Spill + seto %al + lahf + movl %eax, %eax + movl %eax, -172(%ebp) # 4-byte Spill + movl 56(%edi), %edx + movl -168(%ebp), %eax # 4-byte Reload + movl %eax, %eax + addb $127, %al + sahf + adcl 24(%edi), %edx + movl 60(%edi), %ecx + adcl 28(%edi), %ecx + seto %al + lahf + movl %eax, %eax + movl %eax, -200(%ebp) # 4-byte Spill + seto %al + lahf + movl %eax, %eax + movl %eax, -204(%ebp) # 4-byte Spill + seto %al + lahf + movl %eax, %edi + sbbl %eax, %eax + movl %eax, -168(%ebp) # 4-byte Spill + movl %edi, %eax + addb $127, %al + sahf + jb .LBB241_2 +# BB#1: + movl $0, -144(%ebp) # 4-byte Folded Spill + movl $0, -148(%ebp) # 4-byte Folded Spill + movl $0, -152(%ebp) # 4-byte Folded Spill + movl $0, -156(%ebp) # 4-byte Folded Spill + movl $0, -160(%ebp) # 4-byte Folded Spill + movl $0, -164(%ebp) # 4-byte Folded Spill +.LBB241_2: + movl %edx, %eax + movl -172(%ebp), %edi # 4-byte Reload + pushl %eax + movl %edi, %eax + addb $127, %al + sahf + popl %eax + adcl %eax, %eax + movl %ecx, %edi + adcl %edi, %edi + movl %edi, -176(%ebp) # 4-byte Spill + movl -204(%ebp), %edi # 4-byte Reload + pushl %eax + movl %edi, %eax + addb $127, %al + sahf + popl %eax + jb .LBB241_4 +# BB#3: + movl $0, -176(%ebp) # 4-byte Folded Spill + xorl %eax, %eax +.LBB241_4: + movl %eax, -172(%ebp) # 4-byte Spill + movl -192(%ebp), %eax # 4-byte Reload + movl %eax, -108(%ebp) + movl %eax, -140(%ebp) + movl -196(%ebp), %eax # 4-byte Reload + movl %eax, -104(%ebp) + movl %eax, -136(%ebp) + movl -188(%ebp), %eax # 4-byte Reload + movl %eax, -100(%ebp) + movl %eax, -132(%ebp) + movl %esi, -96(%ebp) + movl %esi, -128(%ebp) + movl %ebx, -92(%ebp) + movl %ebx, -124(%ebp) + movl -180(%ebp), %eax # 4-byte Reload + movl %eax, -88(%ebp) + movl %eax, -120(%ebp) + movl %edx, -84(%ebp) + movl %edx, -116(%ebp) + movl %ecx, -80(%ebp) + movl %ecx, -112(%ebp) + movl -200(%ebp), %eax # 4-byte Reload + movl %eax, %eax + addb $127, %al + sahf + jb .LBB241_5 +# BB#6: + xorl %edi, %edi + jmp .LBB241_7 +.LBB241_5: + shrl $31, %ecx + movl %ecx, %edi +.LBB241_7: + leal -140(%ebp), %eax + movl %eax, 8(%esp) + leal -108(%ebp), %eax + movl %eax, 4(%esp) + leal -76(%ebp), %eax + movl %eax, (%esp) + movl -168(%ebp), %esi # 4-byte Reload + andl $1, %esi + movl -184(%ebp), %ebx # 4-byte Reload + calll mcl_fpDbl_mulPre8Lbmi2@PLT + movl -164(%ebp), %eax # 4-byte Reload + addl -44(%ebp), %eax + movl %eax, -164(%ebp) # 4-byte Spill + movl -160(%ebp), %eax # 4-byte Reload + adcl -40(%ebp), %eax + movl %eax, -160(%ebp) # 4-byte Spill + movl -156(%ebp), %eax # 4-byte Reload + adcl -36(%ebp), %eax + movl %eax, -156(%ebp) # 4-byte Spill + movl -152(%ebp), %eax # 4-byte Reload + adcl -32(%ebp), %eax + movl %eax, -152(%ebp) # 4-byte Spill + movl -148(%ebp), %eax # 4-byte Reload + adcl -28(%ebp), %eax + movl %eax, -148(%ebp) # 4-byte Spill + movl -144(%ebp), %eax # 4-byte Reload + adcl -24(%ebp), %eax + movl %eax, -144(%ebp) # 4-byte Spill + movl -172(%ebp), %eax # 4-byte Reload + adcl -20(%ebp), %eax + movl %eax, -172(%ebp) # 4-byte Spill + movl -176(%ebp), %eax # 4-byte Reload + adcl -16(%ebp), %eax + adcl %edi, %esi + movl %esi, -168(%ebp) # 4-byte Spill + movl -76(%ebp), %ecx + movl 8(%ebp), %esi + subl (%esi), %ecx + movl %ecx, -180(%ebp) # 4-byte Spill + movl -72(%ebp), %edi + sbbl 4(%esi), %edi + movl -68(%ebp), %edx + sbbl 8(%esi), %edx + movl %edx, -184(%ebp) # 4-byte Spill + movl -64(%ebp), %edx + sbbl 12(%esi), %edx + movl %edx, -192(%ebp) # 4-byte Spill + movl -60(%ebp), %ebx + sbbl 16(%esi), %ebx + movl %eax, %ecx + movl -56(%ebp), %eax + sbbl 20(%esi), %eax + movl %eax, -196(%ebp) # 4-byte Spill + movl -52(%ebp), %edx + sbbl 24(%esi), %edx + movl %edx, -188(%ebp) # 4-byte Spill + movl -48(%ebp), %edx + sbbl 28(%esi), %edx + movl 32(%esi), %eax + movl %eax, -200(%ebp) # 4-byte Spill + sbbl %eax, -164(%ebp) # 4-byte Folded Spill + movl 36(%esi), %eax + movl %eax, -204(%ebp) # 4-byte Spill + sbbl %eax, -160(%ebp) # 4-byte Folded Spill + movl 40(%esi), %eax + movl %eax, -208(%ebp) # 4-byte Spill + sbbl %eax, -156(%ebp) # 4-byte Folded Spill + movl 44(%esi), %eax + movl %eax, -212(%ebp) # 4-byte Spill + sbbl %eax, -152(%ebp) # 4-byte Folded Spill + movl 48(%esi), %eax + movl %eax, -216(%ebp) # 4-byte Spill + sbbl %eax, -148(%ebp) # 4-byte Folded Spill + movl 52(%esi), %eax + movl %eax, -220(%ebp) # 4-byte Spill + sbbl %eax, -144(%ebp) # 4-byte Folded Spill + movl 56(%esi), %eax + movl %eax, -224(%ebp) # 4-byte Spill + sbbl %eax, -172(%ebp) # 4-byte Folded Spill + movl 60(%esi), %eax + movl %eax, -228(%ebp) # 4-byte Spill + sbbl %eax, %ecx + movl %ecx, -176(%ebp) # 4-byte Spill + movl -168(%ebp), %eax # 4-byte Reload + sbbl $0, %eax + movl 64(%esi), %ecx + movl %ecx, -260(%ebp) # 4-byte Spill + subl %ecx, -180(%ebp) # 4-byte Folded Spill + movl 68(%esi), %ecx + movl %ecx, -264(%ebp) # 4-byte Spill + sbbl %ecx, %edi + movl 72(%esi), %ecx + movl %ecx, -268(%ebp) # 4-byte Spill + sbbl %ecx, -184(%ebp) # 4-byte Folded Spill + movl 76(%esi), %ecx + movl %ecx, -272(%ebp) # 4-byte Spill + sbbl %ecx, -192(%ebp) # 4-byte Folded Spill + movl 80(%esi), %ecx + movl %ecx, -276(%ebp) # 4-byte Spill + sbbl %ecx, %ebx + movl 84(%esi), %ecx + movl %ecx, -280(%ebp) # 4-byte Spill + sbbl %ecx, -196(%ebp) # 4-byte Folded Spill + movl 88(%esi), %ecx + movl %ecx, -284(%ebp) # 4-byte Spill + sbbl %ecx, -188(%ebp) # 4-byte Folded Spill + movl 92(%esi), %ecx + movl %ecx, -288(%ebp) # 4-byte Spill + sbbl %ecx, %edx + movl 96(%esi), %ecx + movl %ecx, -292(%ebp) # 4-byte Spill + sbbl %ecx, -164(%ebp) # 4-byte Folded Spill + movl 100(%esi), %ecx + movl %ecx, -232(%ebp) # 4-byte Spill + sbbl %ecx, -160(%ebp) # 4-byte Folded Spill + movl 104(%esi), %ecx + movl %ecx, -236(%ebp) # 4-byte Spill + sbbl %ecx, -156(%ebp) # 4-byte Folded Spill + movl 108(%esi), %ecx + movl %ecx, -240(%ebp) # 4-byte Spill + sbbl %ecx, -152(%ebp) # 4-byte Folded Spill + movl 112(%esi), %ecx + movl %ecx, -244(%ebp) # 4-byte Spill + sbbl %ecx, -148(%ebp) # 4-byte Folded Spill + movl 116(%esi), %ecx + movl %ecx, -248(%ebp) # 4-byte Spill + sbbl %ecx, -144(%ebp) # 4-byte Folded Spill + movl 120(%esi), %ecx + movl %ecx, -252(%ebp) # 4-byte Spill + sbbl %ecx, -172(%ebp) # 4-byte Folded Spill + movl 124(%esi), %ecx + movl %ecx, -256(%ebp) # 4-byte Spill + sbbl %ecx, -176(%ebp) # 4-byte Folded Spill + sbbl $0, %eax + movl %eax, -168(%ebp) # 4-byte Spill + movl -180(%ebp), %eax # 4-byte Reload + addl -200(%ebp), %eax # 4-byte Folded Reload + adcl -204(%ebp), %edi # 4-byte Folded Reload + movl %eax, 32(%esi) + movl -184(%ebp), %eax # 4-byte Reload + adcl -208(%ebp), %eax # 4-byte Folded Reload + movl %edi, 36(%esi) + movl -192(%ebp), %ecx # 4-byte Reload + adcl -212(%ebp), %ecx # 4-byte Folded Reload + movl %eax, 40(%esi) + adcl -216(%ebp), %ebx # 4-byte Folded Reload + movl %ecx, 44(%esi) + movl -196(%ebp), %ecx # 4-byte Reload + adcl -220(%ebp), %ecx # 4-byte Folded Reload + movl %ebx, 48(%esi) + movl -188(%ebp), %eax # 4-byte Reload + adcl -224(%ebp), %eax # 4-byte Folded Reload + movl %ecx, 52(%esi) + movl %edx, %ecx + adcl -228(%ebp), %ecx # 4-byte Folded Reload + movl %eax, 56(%esi) + movl -164(%ebp), %eax # 4-byte Reload + adcl -260(%ebp), %eax # 4-byte Folded Reload + movl %ecx, 60(%esi) + movl -160(%ebp), %ecx # 4-byte Reload + adcl -264(%ebp), %ecx # 4-byte Folded Reload + movl %eax, 64(%esi) + movl -156(%ebp), %eax # 4-byte Reload + adcl -268(%ebp), %eax # 4-byte Folded Reload + movl %ecx, 68(%esi) + movl -152(%ebp), %ecx # 4-byte Reload + adcl -272(%ebp), %ecx # 4-byte Folded Reload + movl %eax, 72(%esi) + movl -148(%ebp), %eax # 4-byte Reload + adcl -276(%ebp), %eax # 4-byte Folded Reload + movl %ecx, 76(%esi) + movl -144(%ebp), %ecx # 4-byte Reload + adcl -280(%ebp), %ecx # 4-byte Folded Reload + movl %eax, 80(%esi) + movl -172(%ebp), %eax # 4-byte Reload + adcl -284(%ebp), %eax # 4-byte Folded Reload + movl %ecx, 84(%esi) + movl -176(%ebp), %ecx # 4-byte Reload + adcl -288(%ebp), %ecx # 4-byte Folded Reload + movl %eax, 88(%esi) + movl -168(%ebp), %eax # 4-byte Reload + adcl -292(%ebp), %eax # 4-byte Folded Reload + movl %ecx, 92(%esi) + movl %eax, 96(%esi) + movl -232(%ebp), %eax # 4-byte Reload + adcl $0, %eax + movl %eax, 100(%esi) + movl -236(%ebp), %eax # 4-byte Reload + adcl $0, %eax + movl %eax, 104(%esi) + movl -240(%ebp), %eax # 4-byte Reload + adcl $0, %eax + movl %eax, 108(%esi) + movl -244(%ebp), %eax # 4-byte Reload + adcl $0, %eax + movl %eax, 112(%esi) + movl -248(%ebp), %eax # 4-byte Reload + adcl $0, %eax + movl %eax, 116(%esi) + movl -252(%ebp), %eax # 4-byte Reload + adcl $0, %eax + movl %eax, 120(%esi) + movl -256(%ebp), %eax # 4-byte Reload + adcl $0, %eax + movl %eax, 124(%esi) + addl $300, %esp # imm = 0x12C + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end241: + .size mcl_fpDbl_sqrPre16Lbmi2, .Lfunc_end241-mcl_fpDbl_sqrPre16Lbmi2 + + .globl mcl_fp_mont16Lbmi2 + .align 16, 0x90 + .type mcl_fp_mont16Lbmi2,@function +mcl_fp_mont16Lbmi2: # @mcl_fp_mont16Lbmi2 +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $2428, %esp # imm = 0x97C + calll .L242$pb +.L242$pb: + popl %ebx +.Ltmp53: + addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp53-.L242$pb), %ebx + movl 2460(%esp), %eax + movl -4(%eax), %esi + movl %esi, 56(%esp) # 4-byte Spill + movl 2456(%esp), %eax + movl (%eax), %eax + movl %eax, (%esp) + leal 2360(%esp), %ecx + movl 2452(%esp), %edx + calll .LmulPv512x32 + movl 2360(%esp), %ebp + movl 2364(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl %ebp, %eax + imull %esi, %eax + movl 2424(%esp), %ecx + movl %ecx, 124(%esp) # 4-byte Spill + movl 2420(%esp), %ecx + movl %ecx, 120(%esp) # 4-byte Spill + movl 2416(%esp), %ecx + movl %ecx, 116(%esp) # 4-byte Spill + movl 2412(%esp), %ecx + movl %ecx, 96(%esp) # 4-byte Spill + movl 2408(%esp), %ecx + movl %ecx, 92(%esp) # 4-byte Spill + movl 2404(%esp), %ecx + movl %ecx, 84(%esp) # 4-byte Spill + movl 2400(%esp), %ecx + movl %ecx, 76(%esp) # 4-byte Spill + movl 2396(%esp), %ecx + movl %ecx, 68(%esp) # 4-byte Spill + movl 2392(%esp), %ecx + movl %ecx, 64(%esp) # 4-byte Spill + movl 2388(%esp), %ecx + movl %ecx, 72(%esp) # 4-byte Spill + movl 2384(%esp), %ecx + movl %ecx, 60(%esp) # 4-byte Spill + movl 2380(%esp), %edi + movl 2376(%esp), %ecx + movl %ecx, 88(%esp) # 4-byte Spill + movl 2372(%esp), %ecx + movl %ecx, 100(%esp) # 4-byte Spill + movl 2368(%esp), %esi + movl %eax, (%esp) + leal 2288(%esp), %ecx + movl 2460(%esp), %edx + calll .LmulPv512x32 + addl 2288(%esp), %ebp + movl 112(%esp), %eax # 4-byte Reload + adcl 2292(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + adcl 2296(%esp), %esi + movl %esi, %ebp + movl 100(%esp), %eax # 4-byte Reload + adcl 2300(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 2304(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + adcl 2308(%esp), %edi + movl %edi, %esi + movl 60(%esp), %eax # 4-byte Reload + adcl 2312(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 2316(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 2320(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 2324(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 2328(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 2332(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 2336(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 2340(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 116(%esp), %eax # 4-byte Reload + adcl 2344(%esp), %eax + movl %eax, 116(%esp) # 4-byte Spill + movl 120(%esp), %eax # 4-byte Reload + adcl 2348(%esp), %eax + movl %eax, 120(%esp) # 4-byte Spill + movl 124(%esp), %eax # 4-byte Reload + adcl 2352(%esp), %eax + movl %eax, 124(%esp) # 4-byte Spill + sbbl %edi, %edi + movl 2456(%esp), %eax + movl 4(%eax), %eax + movl %eax, (%esp) + leal 2216(%esp), %ecx + movl 2452(%esp), %edx + calll .LmulPv512x32 + andl $1, %edi + movl 112(%esp), %ecx # 4-byte Reload + addl 2216(%esp), %ecx + adcl 2220(%esp), %ebp + movl %ebp, 112(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 2224(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 2228(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + adcl 2232(%esp), %esi + movl %esi, 80(%esp) # 4-byte Spill + movl 60(%esp), %esi # 4-byte Reload + adcl 2236(%esp), %esi + movl 72(%esp), %eax # 4-byte Reload + adcl 2240(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 2244(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 2248(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 2252(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 2256(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 2260(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 2264(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 116(%esp), %eax # 4-byte Reload + adcl 2268(%esp), %eax + movl %eax, 116(%esp) # 4-byte Spill + movl 120(%esp), %eax # 4-byte Reload + adcl 2272(%esp), %eax + movl %eax, 120(%esp) # 4-byte Spill + movl 124(%esp), %eax # 4-byte Reload + adcl 2276(%esp), %eax + movl %eax, 124(%esp) # 4-byte Spill + adcl 2280(%esp), %edi + movl %edi, 108(%esp) # 4-byte Spill + sbbl %edi, %edi + movl %ecx, %eax + movl %ecx, %ebp + imull 56(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 2144(%esp), %ecx + movl 2460(%esp), %edx + calll .LmulPv512x32 + andl $1, %edi + addl 2144(%esp), %ebp + movl 112(%esp), %eax # 4-byte Reload + adcl 2148(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 2152(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 2156(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 2160(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + adcl 2164(%esp), %esi + movl %esi, 60(%esp) # 4-byte Spill + movl 72(%esp), %ebp # 4-byte Reload + adcl 2168(%esp), %ebp + movl 64(%esp), %eax # 4-byte Reload + adcl 2172(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 2176(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 2180(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 2184(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 2188(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 2192(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 116(%esp), %eax # 4-byte Reload + adcl 2196(%esp), %eax + movl %eax, 116(%esp) # 4-byte Spill + movl 120(%esp), %eax # 4-byte Reload + adcl 2200(%esp), %eax + movl %eax, 120(%esp) # 4-byte Spill + movl 124(%esp), %eax # 4-byte Reload + adcl 2204(%esp), %eax + movl %eax, 124(%esp) # 4-byte Spill + movl 108(%esp), %esi # 4-byte Reload + adcl 2208(%esp), %esi + adcl $0, %edi + movl %edi, 104(%esp) # 4-byte Spill + movl 2456(%esp), %eax + movl 8(%eax), %eax + movl %eax, (%esp) + leal 2072(%esp), %ecx + movl 2452(%esp), %edx + calll .LmulPv512x32 + movl 112(%esp), %ecx # 4-byte Reload + addl 2072(%esp), %ecx + movl %ecx, 112(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 2076(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 2080(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 2084(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 2088(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + adcl 2092(%esp), %ebp + movl %ebp, 72(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 2096(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 2100(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 2104(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 84(%esp), %ebp # 4-byte Reload + adcl 2108(%esp), %ebp + movl 92(%esp), %eax # 4-byte Reload + adcl 2112(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 96(%esp), %edi # 4-byte Reload + adcl 2116(%esp), %edi + movl 116(%esp), %eax # 4-byte Reload + adcl 2120(%esp), %eax + movl %eax, 116(%esp) # 4-byte Spill + movl 120(%esp), %eax # 4-byte Reload + adcl 2124(%esp), %eax + movl %eax, 120(%esp) # 4-byte Spill + movl 124(%esp), %eax # 4-byte Reload + adcl 2128(%esp), %eax + movl %eax, 124(%esp) # 4-byte Spill + adcl 2132(%esp), %esi + movl %esi, 108(%esp) # 4-byte Spill + movl 104(%esp), %eax # 4-byte Reload + adcl 2136(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + sbbl %esi, %esi + movl %ecx, %eax + imull 56(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 2000(%esp), %ecx + movl 2460(%esp), %edx + calll .LmulPv512x32 + andl $1, %esi + movl %esi, %eax + movl 112(%esp), %ecx # 4-byte Reload + addl 2000(%esp), %ecx + movl 100(%esp), %ecx # 4-byte Reload + adcl 2004(%esp), %ecx + movl %ecx, 100(%esp) # 4-byte Spill + movl 88(%esp), %ecx # 4-byte Reload + adcl 2008(%esp), %ecx + movl %ecx, 88(%esp) # 4-byte Spill + movl 80(%esp), %ecx # 4-byte Reload + adcl 2012(%esp), %ecx + movl %ecx, 80(%esp) # 4-byte Spill + movl 60(%esp), %ecx # 4-byte Reload + adcl 2016(%esp), %ecx + movl %ecx, 60(%esp) # 4-byte Spill + movl 72(%esp), %ecx # 4-byte Reload + adcl 2020(%esp), %ecx + movl %ecx, 72(%esp) # 4-byte Spill + movl 64(%esp), %ecx # 4-byte Reload + adcl 2024(%esp), %ecx + movl %ecx, 64(%esp) # 4-byte Spill + movl 68(%esp), %ecx # 4-byte Reload + adcl 2028(%esp), %ecx + movl %ecx, 68(%esp) # 4-byte Spill + movl 76(%esp), %ecx # 4-byte Reload + adcl 2032(%esp), %ecx + movl %ecx, 76(%esp) # 4-byte Spill + adcl 2036(%esp), %ebp + movl %ebp, 84(%esp) # 4-byte Spill + movl 92(%esp), %ecx # 4-byte Reload + adcl 2040(%esp), %ecx + movl %ecx, 92(%esp) # 4-byte Spill + adcl 2044(%esp), %edi + movl 116(%esp), %ecx # 4-byte Reload + adcl 2048(%esp), %ecx + movl %ecx, 116(%esp) # 4-byte Spill + movl 120(%esp), %ebp # 4-byte Reload + adcl 2052(%esp), %ebp + movl 124(%esp), %esi # 4-byte Reload + adcl 2056(%esp), %esi + movl 108(%esp), %ecx # 4-byte Reload + adcl 2060(%esp), %ecx + movl %ecx, 108(%esp) # 4-byte Spill + movl 104(%esp), %ecx # 4-byte Reload + adcl 2064(%esp), %ecx + movl %ecx, 104(%esp) # 4-byte Spill + adcl $0, %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 2456(%esp), %eax + movl 12(%eax), %eax + movl %eax, (%esp) + leal 1928(%esp), %ecx + movl 2452(%esp), %edx + calll .LmulPv512x32 + movl 100(%esp), %ecx # 4-byte Reload + addl 1928(%esp), %ecx + movl 88(%esp), %eax # 4-byte Reload + adcl 1932(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 1936(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 1940(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 1944(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 1948(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 1952(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 1956(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 1960(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 1964(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + adcl 1968(%esp), %edi + movl 116(%esp), %eax # 4-byte Reload + adcl 1972(%esp), %eax + movl %eax, 116(%esp) # 4-byte Spill + adcl 1976(%esp), %ebp + movl %ebp, 120(%esp) # 4-byte Spill + adcl 1980(%esp), %esi + movl %esi, 124(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl 1984(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 104(%esp), %eax # 4-byte Reload + adcl 1988(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 112(%esp), %eax # 4-byte Reload + adcl 1992(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + sbbl %ebp, %ebp + movl %ecx, %esi + movl %esi, %eax + imull 56(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 1856(%esp), %ecx + movl 2460(%esp), %eax + movl %eax, %edx + calll .LmulPv512x32 + andl $1, %ebp + movl %ebp, %eax + addl 1856(%esp), %esi + movl 88(%esp), %ecx # 4-byte Reload + adcl 1860(%esp), %ecx + movl %ecx, 88(%esp) # 4-byte Spill + movl 80(%esp), %ecx # 4-byte Reload + adcl 1864(%esp), %ecx + movl %ecx, 80(%esp) # 4-byte Spill + movl 60(%esp), %ecx # 4-byte Reload + adcl 1868(%esp), %ecx + movl %ecx, 60(%esp) # 4-byte Spill + movl 72(%esp), %ecx # 4-byte Reload + adcl 1872(%esp), %ecx + movl %ecx, 72(%esp) # 4-byte Spill + movl 64(%esp), %ecx # 4-byte Reload + adcl 1876(%esp), %ecx + movl %ecx, 64(%esp) # 4-byte Spill + movl 68(%esp), %ecx # 4-byte Reload + adcl 1880(%esp), %ecx + movl %ecx, 68(%esp) # 4-byte Spill + movl 76(%esp), %ecx # 4-byte Reload + adcl 1884(%esp), %ecx + movl %ecx, 76(%esp) # 4-byte Spill + movl 84(%esp), %ecx # 4-byte Reload + adcl 1888(%esp), %ecx + movl %ecx, 84(%esp) # 4-byte Spill + movl 92(%esp), %esi # 4-byte Reload + adcl 1892(%esp), %esi + adcl 1896(%esp), %edi + movl %edi, 96(%esp) # 4-byte Spill + movl 116(%esp), %ebp # 4-byte Reload + adcl 1900(%esp), %ebp + movl 120(%esp), %ecx # 4-byte Reload + adcl 1904(%esp), %ecx + movl %ecx, 120(%esp) # 4-byte Spill + movl 124(%esp), %ecx # 4-byte Reload + adcl 1908(%esp), %ecx + movl %ecx, 124(%esp) # 4-byte Spill + movl 108(%esp), %ecx # 4-byte Reload + adcl 1912(%esp), %ecx + movl %ecx, 108(%esp) # 4-byte Spill + movl 104(%esp), %ecx # 4-byte Reload + adcl 1916(%esp), %ecx + movl %ecx, 104(%esp) # 4-byte Spill + movl 112(%esp), %ecx # 4-byte Reload + adcl 1920(%esp), %ecx + movl %ecx, 112(%esp) # 4-byte Spill + adcl $0, %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 2456(%esp), %eax + movl 16(%eax), %eax + movl %eax, (%esp) + leal 1784(%esp), %ecx + movl 2452(%esp), %eax + movl %eax, %edx + calll .LmulPv512x32 + movl 88(%esp), %ecx # 4-byte Reload + addl 1784(%esp), %ecx + movl 80(%esp), %eax # 4-byte Reload + adcl 1788(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 1792(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 1796(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 1800(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 68(%esp), %edi # 4-byte Reload + adcl 1804(%esp), %edi + movl 76(%esp), %eax # 4-byte Reload + adcl 1808(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 1812(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + adcl 1816(%esp), %esi + movl %esi, 92(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 1820(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + adcl 1824(%esp), %ebp + movl %ebp, 116(%esp) # 4-byte Spill + movl 120(%esp), %eax # 4-byte Reload + adcl 1828(%esp), %eax + movl %eax, 120(%esp) # 4-byte Spill + movl 124(%esp), %eax # 4-byte Reload + adcl 1832(%esp), %eax + movl %eax, 124(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl 1836(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 104(%esp), %eax # 4-byte Reload + adcl 1840(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 112(%esp), %eax # 4-byte Reload + adcl 1844(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 1848(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + sbbl %esi, %esi + movl %ecx, %eax + movl %ecx, %ebp + imull 56(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 1712(%esp), %ecx + movl 2460(%esp), %edx + calll .LmulPv512x32 + andl $1, %esi + movl %esi, %ecx + addl 1712(%esp), %ebp + movl 80(%esp), %eax # 4-byte Reload + adcl 1716(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 1720(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 1724(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 1728(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + adcl 1732(%esp), %edi + movl %edi, 68(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 1736(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 1740(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 92(%esp), %edi # 4-byte Reload + adcl 1744(%esp), %edi + movl 96(%esp), %eax # 4-byte Reload + adcl 1748(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 116(%esp), %eax # 4-byte Reload + adcl 1752(%esp), %eax + movl %eax, 116(%esp) # 4-byte Spill + movl 120(%esp), %eax # 4-byte Reload + adcl 1756(%esp), %eax + movl %eax, 120(%esp) # 4-byte Spill + movl 124(%esp), %eax # 4-byte Reload + adcl 1760(%esp), %eax + movl %eax, 124(%esp) # 4-byte Spill + movl 108(%esp), %ebp # 4-byte Reload + adcl 1764(%esp), %ebp + movl 104(%esp), %esi # 4-byte Reload + adcl 1768(%esp), %esi + movl 112(%esp), %eax # 4-byte Reload + adcl 1772(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 1776(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + adcl $0, %ecx + movl %ecx, 88(%esp) # 4-byte Spill + movl 2456(%esp), %eax + movl 20(%eax), %eax + movl %eax, (%esp) + leal 1640(%esp), %ecx + movl 2452(%esp), %edx + calll .LmulPv512x32 + movl 80(%esp), %ecx # 4-byte Reload + addl 1640(%esp), %ecx + movl 60(%esp), %eax # 4-byte Reload + adcl 1644(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 1648(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 1652(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 1656(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 1660(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 1664(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + adcl 1668(%esp), %edi + movl %edi, 92(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 1672(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 116(%esp), %eax # 4-byte Reload + adcl 1676(%esp), %eax + movl %eax, 116(%esp) # 4-byte Spill + movl 120(%esp), %eax # 4-byte Reload + adcl 1680(%esp), %eax + movl %eax, 120(%esp) # 4-byte Spill + movl 124(%esp), %eax # 4-byte Reload + adcl 1684(%esp), %eax + movl %eax, 124(%esp) # 4-byte Spill + adcl 1688(%esp), %ebp + movl %ebp, 108(%esp) # 4-byte Spill + adcl 1692(%esp), %esi + movl %esi, %edi + movl 112(%esp), %eax # 4-byte Reload + adcl 1696(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 1700(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 88(%esp), %esi # 4-byte Reload + adcl 1704(%esp), %esi + sbbl %eax, %eax + movl %eax, 80(%esp) # 4-byte Spill + movl %ecx, %eax + movl %ecx, %ebp + imull 56(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 1568(%esp), %ecx + movl 2460(%esp), %edx + calll .LmulPv512x32 + movl 80(%esp), %eax # 4-byte Reload + andl $1, %eax + addl 1568(%esp), %ebp + movl 60(%esp), %ecx # 4-byte Reload + adcl 1572(%esp), %ecx + movl %ecx, 60(%esp) # 4-byte Spill + movl 72(%esp), %ecx # 4-byte Reload + adcl 1576(%esp), %ecx + movl %ecx, 72(%esp) # 4-byte Spill + movl 64(%esp), %ecx # 4-byte Reload + adcl 1580(%esp), %ecx + movl %ecx, 64(%esp) # 4-byte Spill + movl 68(%esp), %ecx # 4-byte Reload + adcl 1584(%esp), %ecx + movl %ecx, 68(%esp) # 4-byte Spill + movl 76(%esp), %ebp # 4-byte Reload + adcl 1588(%esp), %ebp + movl 84(%esp), %ecx # 4-byte Reload + adcl 1592(%esp), %ecx + movl %ecx, 84(%esp) # 4-byte Spill + movl 92(%esp), %ecx # 4-byte Reload + adcl 1596(%esp), %ecx + movl %ecx, 92(%esp) # 4-byte Spill + movl 96(%esp), %ecx # 4-byte Reload + adcl 1600(%esp), %ecx + movl %ecx, 96(%esp) # 4-byte Spill + movl 116(%esp), %ecx # 4-byte Reload + adcl 1604(%esp), %ecx + movl %ecx, 116(%esp) # 4-byte Spill + movl 120(%esp), %ecx # 4-byte Reload + adcl 1608(%esp), %ecx + movl %ecx, 120(%esp) # 4-byte Spill + movl 124(%esp), %ecx # 4-byte Reload + adcl 1612(%esp), %ecx + movl %ecx, 124(%esp) # 4-byte Spill + movl 108(%esp), %ecx # 4-byte Reload + adcl 1616(%esp), %ecx + movl %ecx, 108(%esp) # 4-byte Spill + adcl 1620(%esp), %edi + movl %edi, 104(%esp) # 4-byte Spill + movl 112(%esp), %ecx # 4-byte Reload + adcl 1624(%esp), %ecx + movl %ecx, 112(%esp) # 4-byte Spill + movl 100(%esp), %ecx # 4-byte Reload + adcl 1628(%esp), %ecx + movl %ecx, 100(%esp) # 4-byte Spill + adcl 1632(%esp), %esi + movl %esi, 88(%esp) # 4-byte Spill + adcl $0, %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 2456(%esp), %eax + movl 24(%eax), %eax + movl %eax, (%esp) + leal 1496(%esp), %ecx + movl 2452(%esp), %edx + calll .LmulPv512x32 + movl 60(%esp), %ecx # 4-byte Reload + addl 1496(%esp), %ecx + movl %ecx, 60(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 1500(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 64(%esp), %edi # 4-byte Reload + adcl 1504(%esp), %edi + movl 68(%esp), %eax # 4-byte Reload + adcl 1508(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + adcl 1512(%esp), %ebp + movl %ebp, 76(%esp) # 4-byte Spill + movl 84(%esp), %esi # 4-byte Reload + adcl 1516(%esp), %esi + movl 92(%esp), %eax # 4-byte Reload + adcl 1520(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 1524(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 116(%esp), %eax # 4-byte Reload + adcl 1528(%esp), %eax + movl %eax, 116(%esp) # 4-byte Spill + movl 120(%esp), %eax # 4-byte Reload + adcl 1532(%esp), %eax + movl %eax, 120(%esp) # 4-byte Spill + movl 124(%esp), %eax # 4-byte Reload + adcl 1536(%esp), %eax + movl %eax, 124(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl 1540(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 104(%esp), %eax # 4-byte Reload + adcl 1544(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 112(%esp), %eax # 4-byte Reload + adcl 1548(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 1552(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 1556(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 1560(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + sbbl %ebp, %ebp + movl %ecx, %eax + imull 56(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 1424(%esp), %ecx + movl 2460(%esp), %eax + movl %eax, %edx + calll .LmulPv512x32 + andl $1, %ebp + movl 60(%esp), %eax # 4-byte Reload + addl 1424(%esp), %eax + movl 72(%esp), %eax # 4-byte Reload + adcl 1428(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + adcl 1432(%esp), %edi + movl %edi, 64(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 1436(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 1440(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + adcl 1444(%esp), %esi + movl %esi, 84(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 1448(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 1452(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 116(%esp), %eax # 4-byte Reload + adcl 1456(%esp), %eax + movl %eax, 116(%esp) # 4-byte Spill + movl 120(%esp), %eax # 4-byte Reload + adcl 1460(%esp), %eax + movl %eax, 120(%esp) # 4-byte Spill + movl 124(%esp), %eax # 4-byte Reload + adcl 1464(%esp), %eax + movl %eax, 124(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl 1468(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 104(%esp), %edi # 4-byte Reload + adcl 1472(%esp), %edi + movl 112(%esp), %eax # 4-byte Reload + adcl 1476(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 100(%esp), %esi # 4-byte Reload + adcl 1480(%esp), %esi + movl 88(%esp), %eax # 4-byte Reload + adcl 1484(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 1488(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + adcl $0, %ebp + movl 2456(%esp), %eax + movl 28(%eax), %eax + movl %eax, (%esp) + leal 1352(%esp), %ecx + movl 2452(%esp), %edx + calll .LmulPv512x32 + movl 72(%esp), %ecx # 4-byte Reload + addl 1352(%esp), %ecx + movl 64(%esp), %eax # 4-byte Reload + adcl 1356(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 1360(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 1364(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 1368(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 1372(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 1376(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 116(%esp), %eax # 4-byte Reload + adcl 1380(%esp), %eax + movl %eax, 116(%esp) # 4-byte Spill + movl 120(%esp), %eax # 4-byte Reload + adcl 1384(%esp), %eax + movl %eax, 120(%esp) # 4-byte Spill + movl 124(%esp), %eax # 4-byte Reload + adcl 1388(%esp), %eax + movl %eax, 124(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl 1392(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + adcl 1396(%esp), %edi + movl %edi, 104(%esp) # 4-byte Spill + movl 112(%esp), %eax # 4-byte Reload + adcl 1400(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + adcl 1404(%esp), %esi + movl %esi, 100(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 1408(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 80(%esp), %esi # 4-byte Reload + adcl 1412(%esp), %esi + adcl 1416(%esp), %ebp + movl %ebp, 72(%esp) # 4-byte Spill + sbbl %ebp, %ebp + movl %ecx, %eax + movl %ecx, %edi + imull 56(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 1280(%esp), %ecx + movl 2460(%esp), %edx + calll .LmulPv512x32 + andl $1, %ebp + movl %ebp, %eax + addl 1280(%esp), %edi + movl 64(%esp), %ecx # 4-byte Reload + adcl 1284(%esp), %ecx + movl %ecx, 64(%esp) # 4-byte Spill + movl 68(%esp), %ebp # 4-byte Reload + adcl 1288(%esp), %ebp + movl 76(%esp), %ecx # 4-byte Reload + adcl 1292(%esp), %ecx + movl %ecx, 76(%esp) # 4-byte Spill + movl 84(%esp), %ecx # 4-byte Reload + adcl 1296(%esp), %ecx + movl %ecx, 84(%esp) # 4-byte Spill + movl 92(%esp), %ecx # 4-byte Reload + adcl 1300(%esp), %ecx + movl %ecx, 92(%esp) # 4-byte Spill + movl 96(%esp), %ecx # 4-byte Reload + adcl 1304(%esp), %ecx + movl %ecx, 96(%esp) # 4-byte Spill + movl 116(%esp), %ecx # 4-byte Reload + adcl 1308(%esp), %ecx + movl %ecx, 116(%esp) # 4-byte Spill + movl 120(%esp), %ecx # 4-byte Reload + adcl 1312(%esp), %ecx + movl %ecx, 120(%esp) # 4-byte Spill + movl 124(%esp), %ecx # 4-byte Reload + adcl 1316(%esp), %ecx + movl %ecx, 124(%esp) # 4-byte Spill + movl 108(%esp), %ecx # 4-byte Reload + adcl 1320(%esp), %ecx + movl %ecx, 108(%esp) # 4-byte Spill + movl 104(%esp), %ecx # 4-byte Reload + adcl 1324(%esp), %ecx + movl %ecx, 104(%esp) # 4-byte Spill + movl 112(%esp), %ecx # 4-byte Reload + adcl 1328(%esp), %ecx + movl %ecx, 112(%esp) # 4-byte Spill + movl 100(%esp), %ecx # 4-byte Reload + adcl 1332(%esp), %ecx + movl %ecx, 100(%esp) # 4-byte Spill + movl 88(%esp), %ecx # 4-byte Reload + adcl 1336(%esp), %ecx + movl %ecx, 88(%esp) # 4-byte Spill + adcl 1340(%esp), %esi + movl %esi, 80(%esp) # 4-byte Spill + movl 72(%esp), %ecx # 4-byte Reload + adcl 1344(%esp), %ecx + movl %ecx, 72(%esp) # 4-byte Spill + adcl $0, %eax + movl %eax, %edi + movl 2456(%esp), %eax + movl 32(%eax), %eax + movl %eax, (%esp) + leal 1208(%esp), %ecx + movl 2452(%esp), %eax + movl %eax, %edx + calll .LmulPv512x32 + movl 64(%esp), %ecx # 4-byte Reload + addl 1208(%esp), %ecx + adcl 1212(%esp), %ebp + movl %ebp, 68(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 1216(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 1220(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 1224(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 1228(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 116(%esp), %eax # 4-byte Reload + adcl 1232(%esp), %eax + movl %eax, 116(%esp) # 4-byte Spill + movl 120(%esp), %eax # 4-byte Reload + adcl 1236(%esp), %eax + movl %eax, 120(%esp) # 4-byte Spill + movl 124(%esp), %eax # 4-byte Reload + adcl 1240(%esp), %eax + movl %eax, 124(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl 1244(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 104(%esp), %eax # 4-byte Reload + adcl 1248(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 112(%esp), %eax # 4-byte Reload + adcl 1252(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 1256(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 88(%esp), %esi # 4-byte Reload + adcl 1260(%esp), %esi + movl 80(%esp), %eax # 4-byte Reload + adcl 1264(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 1268(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + adcl 1272(%esp), %edi + movl %edi, 64(%esp) # 4-byte Spill + sbbl %edi, %edi + movl %ecx, %eax + movl %ecx, %ebp + imull 56(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 1136(%esp), %ecx + movl 2460(%esp), %edx + calll .LmulPv512x32 + andl $1, %edi + movl %edi, %ecx + addl 1136(%esp), %ebp + movl 68(%esp), %eax # 4-byte Reload + adcl 1140(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 1144(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 1148(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 1152(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 1156(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 116(%esp), %eax # 4-byte Reload + adcl 1160(%esp), %eax + movl %eax, 116(%esp) # 4-byte Spill + movl 120(%esp), %edi # 4-byte Reload + adcl 1164(%esp), %edi + movl 124(%esp), %eax # 4-byte Reload + adcl 1168(%esp), %eax + movl %eax, 124(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl 1172(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 104(%esp), %eax # 4-byte Reload + adcl 1176(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 112(%esp), %eax # 4-byte Reload + adcl 1180(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 1184(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + adcl 1188(%esp), %esi + movl %esi, 88(%esp) # 4-byte Spill + movl 80(%esp), %ebp # 4-byte Reload + adcl 1192(%esp), %ebp + movl 72(%esp), %eax # 4-byte Reload + adcl 1196(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 1200(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + adcl $0, %ecx + movl %ecx, 60(%esp) # 4-byte Spill + movl 2456(%esp), %eax + movl 36(%eax), %eax + movl %eax, (%esp) + leal 1064(%esp), %ecx + movl 2452(%esp), %edx + calll .LmulPv512x32 + movl 68(%esp), %ecx # 4-byte Reload + addl 1064(%esp), %ecx + movl 76(%esp), %eax # 4-byte Reload + adcl 1068(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 1072(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 1076(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 1080(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 116(%esp), %eax # 4-byte Reload + adcl 1084(%esp), %eax + movl %eax, 116(%esp) # 4-byte Spill + adcl 1088(%esp), %edi + movl %edi, 120(%esp) # 4-byte Spill + movl 124(%esp), %esi # 4-byte Reload + adcl 1092(%esp), %esi + movl 108(%esp), %eax # 4-byte Reload + adcl 1096(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 104(%esp), %eax # 4-byte Reload + adcl 1100(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 112(%esp), %eax # 4-byte Reload + adcl 1104(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 1108(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 1112(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + adcl 1116(%esp), %ebp + movl %ebp, 80(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 1120(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 1124(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 1128(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + sbbl %edi, %edi + movl %ecx, %eax + movl %ecx, %ebp + imull 56(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 992(%esp), %ecx + movl 2460(%esp), %edx + calll .LmulPv512x32 + movl %edi, %eax + andl $1, %eax + addl 992(%esp), %ebp + movl 76(%esp), %ecx # 4-byte Reload + adcl 996(%esp), %ecx + movl %ecx, 76(%esp) # 4-byte Spill + movl 84(%esp), %ecx # 4-byte Reload + adcl 1000(%esp), %ecx + movl %ecx, 84(%esp) # 4-byte Spill + movl 92(%esp), %ecx # 4-byte Reload + adcl 1004(%esp), %ecx + movl %ecx, 92(%esp) # 4-byte Spill + movl 96(%esp), %edi # 4-byte Reload + adcl 1008(%esp), %edi + movl 116(%esp), %ebp # 4-byte Reload + adcl 1012(%esp), %ebp + movl 120(%esp), %ecx # 4-byte Reload + adcl 1016(%esp), %ecx + movl %ecx, 120(%esp) # 4-byte Spill + adcl 1020(%esp), %esi + movl %esi, 124(%esp) # 4-byte Spill + movl 108(%esp), %ecx # 4-byte Reload + adcl 1024(%esp), %ecx + movl %ecx, 108(%esp) # 4-byte Spill + movl 104(%esp), %ecx # 4-byte Reload + adcl 1028(%esp), %ecx + movl %ecx, 104(%esp) # 4-byte Spill + movl 112(%esp), %esi # 4-byte Reload + adcl 1032(%esp), %esi + movl 100(%esp), %ecx # 4-byte Reload + adcl 1036(%esp), %ecx + movl %ecx, 100(%esp) # 4-byte Spill + movl 88(%esp), %ecx # 4-byte Reload + adcl 1040(%esp), %ecx + movl %ecx, 88(%esp) # 4-byte Spill + movl 80(%esp), %ecx # 4-byte Reload + adcl 1044(%esp), %ecx + movl %ecx, 80(%esp) # 4-byte Spill + movl 72(%esp), %ecx # 4-byte Reload + adcl 1048(%esp), %ecx + movl %ecx, 72(%esp) # 4-byte Spill + movl 64(%esp), %ecx # 4-byte Reload + adcl 1052(%esp), %ecx + movl %ecx, 64(%esp) # 4-byte Spill + movl 60(%esp), %ecx # 4-byte Reload + adcl 1056(%esp), %ecx + movl %ecx, 60(%esp) # 4-byte Spill + adcl $0, %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 2456(%esp), %eax + movl 40(%eax), %eax + movl %eax, (%esp) + leal 920(%esp), %ecx + movl 2452(%esp), %edx + calll .LmulPv512x32 + movl 76(%esp), %ecx # 4-byte Reload + addl 920(%esp), %ecx + movl 84(%esp), %eax # 4-byte Reload + adcl 924(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 928(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + adcl 932(%esp), %edi + movl %edi, 96(%esp) # 4-byte Spill + adcl 936(%esp), %ebp + movl %ebp, 116(%esp) # 4-byte Spill + movl 120(%esp), %eax # 4-byte Reload + adcl 940(%esp), %eax + movl %eax, 120(%esp) # 4-byte Spill + movl 124(%esp), %eax # 4-byte Reload + adcl 944(%esp), %eax + movl %eax, 124(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl 948(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 104(%esp), %eax # 4-byte Reload + adcl 952(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + adcl 956(%esp), %esi + movl %esi, 112(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 960(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 964(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 80(%esp), %ebp # 4-byte Reload + adcl 968(%esp), %ebp + movl 72(%esp), %eax # 4-byte Reload + adcl 972(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 976(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 980(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 984(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + sbbl %edi, %edi + movl %ecx, %esi + movl %esi, %eax + imull 56(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 848(%esp), %ecx + movl 2460(%esp), %edx + calll .LmulPv512x32 + andl $1, %edi + movl %edi, %ecx + addl 848(%esp), %esi + movl 84(%esp), %eax # 4-byte Reload + adcl 852(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 92(%esp), %edi # 4-byte Reload + adcl 856(%esp), %edi + movl 96(%esp), %eax # 4-byte Reload + adcl 860(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 116(%esp), %eax # 4-byte Reload + adcl 864(%esp), %eax + movl %eax, 116(%esp) # 4-byte Spill + movl 120(%esp), %esi # 4-byte Reload + adcl 868(%esp), %esi + movl 124(%esp), %eax # 4-byte Reload + adcl 872(%esp), %eax + movl %eax, 124(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl 876(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 104(%esp), %eax # 4-byte Reload + adcl 880(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 112(%esp), %eax # 4-byte Reload + adcl 884(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 888(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 892(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + adcl 896(%esp), %ebp + movl %ebp, 80(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 900(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 904(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 908(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 912(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + adcl $0, %ecx + movl %ecx, 76(%esp) # 4-byte Spill + movl 2456(%esp), %eax + movl 44(%eax), %eax + movl %eax, (%esp) + leal 776(%esp), %ecx + movl 2452(%esp), %edx + calll .LmulPv512x32 + movl 84(%esp), %ecx # 4-byte Reload + addl 776(%esp), %ecx + adcl 780(%esp), %edi + movl %edi, 92(%esp) # 4-byte Spill + movl 96(%esp), %edi # 4-byte Reload + adcl 784(%esp), %edi + movl 116(%esp), %eax # 4-byte Reload + adcl 788(%esp), %eax + movl %eax, 116(%esp) # 4-byte Spill + adcl 792(%esp), %esi + movl %esi, 120(%esp) # 4-byte Spill + movl 124(%esp), %eax # 4-byte Reload + adcl 796(%esp), %eax + movl %eax, 124(%esp) # 4-byte Spill + movl 108(%esp), %ebp # 4-byte Reload + adcl 800(%esp), %ebp + movl 104(%esp), %eax # 4-byte Reload + adcl 804(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 112(%esp), %eax # 4-byte Reload + adcl 808(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 812(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 816(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 820(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 824(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 828(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 832(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 836(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 840(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + sbbl %eax, %eax + movl %eax, 84(%esp) # 4-byte Spill + movl %ecx, %eax + movl %ecx, %esi + imull 56(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 704(%esp), %ecx + movl 2460(%esp), %edx + calll .LmulPv512x32 + movl 84(%esp), %ecx # 4-byte Reload + andl $1, %ecx + addl 704(%esp), %esi + movl 92(%esp), %eax # 4-byte Reload + adcl 708(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + adcl 712(%esp), %edi + movl %edi, 96(%esp) # 4-byte Spill + movl 116(%esp), %eax # 4-byte Reload + adcl 716(%esp), %eax + movl %eax, 116(%esp) # 4-byte Spill + movl 120(%esp), %eax # 4-byte Reload + adcl 720(%esp), %eax + movl %eax, 120(%esp) # 4-byte Spill + movl 124(%esp), %eax # 4-byte Reload + adcl 724(%esp), %eax + movl %eax, 124(%esp) # 4-byte Spill + movl %ebp, %esi + adcl 728(%esp), %esi + movl 104(%esp), %ebp # 4-byte Reload + adcl 732(%esp), %ebp + movl 112(%esp), %eax # 4-byte Reload + adcl 736(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 740(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 744(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 748(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 72(%esp), %edi # 4-byte Reload + adcl 752(%esp), %edi + movl 64(%esp), %eax # 4-byte Reload + adcl 756(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 760(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 764(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 768(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + adcl $0, %ecx + movl %ecx, 84(%esp) # 4-byte Spill + movl 2456(%esp), %eax + movl 48(%eax), %eax + movl %eax, (%esp) + leal 632(%esp), %ecx + movl 2452(%esp), %edx + calll .LmulPv512x32 + movl 92(%esp), %ecx # 4-byte Reload + addl 632(%esp), %ecx + movl 96(%esp), %eax # 4-byte Reload + adcl 636(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 116(%esp), %eax # 4-byte Reload + adcl 640(%esp), %eax + movl %eax, 116(%esp) # 4-byte Spill + movl 120(%esp), %eax # 4-byte Reload + adcl 644(%esp), %eax + movl %eax, 120(%esp) # 4-byte Spill + movl 124(%esp), %eax # 4-byte Reload + adcl 648(%esp), %eax + movl %eax, 124(%esp) # 4-byte Spill + adcl 652(%esp), %esi + movl %esi, 108(%esp) # 4-byte Spill + adcl 656(%esp), %ebp + movl %ebp, 104(%esp) # 4-byte Spill + movl 112(%esp), %eax # 4-byte Reload + adcl 660(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 100(%esp), %ebp # 4-byte Reload + adcl 664(%esp), %ebp + movl 88(%esp), %eax # 4-byte Reload + adcl 668(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 672(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + adcl 676(%esp), %edi + movl %edi, 72(%esp) # 4-byte Spill + movl 64(%esp), %edi # 4-byte Reload + adcl 680(%esp), %edi + movl 60(%esp), %eax # 4-byte Reload + adcl 684(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 688(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 692(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 696(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + sbbl %eax, %eax + movl %eax, 92(%esp) # 4-byte Spill + movl %ecx, %eax + movl %ecx, %esi + imull 56(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 560(%esp), %ecx + movl 2460(%esp), %edx + calll .LmulPv512x32 + movl 92(%esp), %ecx # 4-byte Reload + andl $1, %ecx + addl 560(%esp), %esi + movl 96(%esp), %eax # 4-byte Reload + adcl 564(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 116(%esp), %eax # 4-byte Reload + adcl 568(%esp), %eax + movl %eax, 116(%esp) # 4-byte Spill + movl 120(%esp), %eax # 4-byte Reload + adcl 572(%esp), %eax + movl %eax, 120(%esp) # 4-byte Spill + movl 124(%esp), %esi # 4-byte Reload + adcl 576(%esp), %esi + movl 108(%esp), %eax # 4-byte Reload + adcl 580(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 104(%esp), %eax # 4-byte Reload + adcl 584(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 112(%esp), %eax # 4-byte Reload + adcl 588(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + adcl 592(%esp), %ebp + movl 88(%esp), %eax # 4-byte Reload + adcl 596(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 600(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 604(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + adcl 608(%esp), %edi + movl %edi, 64(%esp) # 4-byte Spill + movl 60(%esp), %edi # 4-byte Reload + adcl 612(%esp), %edi + movl 68(%esp), %eax # 4-byte Reload + adcl 616(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 620(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 624(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + adcl $0, %ecx + movl %ecx, 92(%esp) # 4-byte Spill + movl 2456(%esp), %eax + movl 52(%eax), %eax + movl %eax, (%esp) + leal 488(%esp), %ecx + movl 2452(%esp), %edx + calll .LmulPv512x32 + movl 96(%esp), %ecx # 4-byte Reload + addl 488(%esp), %ecx + movl 116(%esp), %eax # 4-byte Reload + adcl 492(%esp), %eax + movl %eax, 116(%esp) # 4-byte Spill + movl 120(%esp), %eax # 4-byte Reload + adcl 496(%esp), %eax + movl %eax, 120(%esp) # 4-byte Spill + adcl 500(%esp), %esi + movl %esi, 124(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl 504(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 104(%esp), %esi # 4-byte Reload + adcl 508(%esp), %esi + movl 112(%esp), %eax # 4-byte Reload + adcl 512(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + adcl 516(%esp), %ebp + movl %ebp, 100(%esp) # 4-byte Spill + movl 88(%esp), %ebp # 4-byte Reload + adcl 520(%esp), %ebp + movl 80(%esp), %eax # 4-byte Reload + adcl 524(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 528(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 532(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + adcl 536(%esp), %edi + movl %edi, 60(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 540(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 544(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 548(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 552(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + sbbl %eax, %eax + movl %eax, 96(%esp) # 4-byte Spill + movl %ecx, %eax + movl %ecx, %edi + imull 56(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 416(%esp), %ecx + movl 2460(%esp), %edx + calll .LmulPv512x32 + movl 96(%esp), %ecx # 4-byte Reload + andl $1, %ecx + addl 416(%esp), %edi + movl 116(%esp), %eax # 4-byte Reload + adcl 420(%esp), %eax + movl %eax, 116(%esp) # 4-byte Spill + movl 120(%esp), %eax # 4-byte Reload + adcl 424(%esp), %eax + movl %eax, 120(%esp) # 4-byte Spill + movl 124(%esp), %eax # 4-byte Reload + adcl 428(%esp), %eax + movl %eax, 124(%esp) # 4-byte Spill + movl 108(%esp), %edi # 4-byte Reload + adcl 432(%esp), %edi + adcl 436(%esp), %esi + movl %esi, 104(%esp) # 4-byte Spill + movl 112(%esp), %esi # 4-byte Reload + adcl 440(%esp), %esi + movl 100(%esp), %eax # 4-byte Reload + adcl 444(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + adcl 448(%esp), %ebp + movl %ebp, 88(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 452(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 456(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 460(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 464(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 468(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 472(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 476(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 480(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + adcl $0, %ecx + movl %ecx, 96(%esp) # 4-byte Spill + movl 2456(%esp), %eax + movl 56(%eax), %eax + movl %eax, (%esp) + leal 344(%esp), %ecx + movl 2452(%esp), %edx + calll .LmulPv512x32 + movl 116(%esp), %ecx # 4-byte Reload + addl 344(%esp), %ecx + movl 120(%esp), %ebp # 4-byte Reload + adcl 348(%esp), %ebp + movl 124(%esp), %eax # 4-byte Reload + adcl 352(%esp), %eax + movl %eax, 124(%esp) # 4-byte Spill + adcl 356(%esp), %edi + movl %edi, 108(%esp) # 4-byte Spill + movl 104(%esp), %edi # 4-byte Reload + adcl 360(%esp), %edi + adcl 364(%esp), %esi + movl %esi, 112(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 368(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 372(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 376(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 380(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 384(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 388(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 392(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 396(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 400(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 404(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 408(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + sbbl %eax, %eax + movl %eax, 116(%esp) # 4-byte Spill + movl %ecx, %eax + movl %ecx, %esi + imull 56(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 272(%esp), %ecx + movl 2460(%esp), %edx + calll .LmulPv512x32 + movl 116(%esp), %ecx # 4-byte Reload + andl $1, %ecx + addl 272(%esp), %esi + adcl 276(%esp), %ebp + movl %ebp, 120(%esp) # 4-byte Spill + movl 124(%esp), %eax # 4-byte Reload + adcl 280(%esp), %eax + movl %eax, 124(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl 284(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + adcl 288(%esp), %edi + movl 112(%esp), %eax # 4-byte Reload + adcl 292(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 100(%esp), %esi # 4-byte Reload + adcl 296(%esp), %esi + movl 88(%esp), %eax # 4-byte Reload + adcl 300(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 304(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 72(%esp), %ebp # 4-byte Reload + adcl 308(%esp), %ebp + movl 64(%esp), %eax # 4-byte Reload + adcl 312(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 316(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 320(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 324(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 328(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 332(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 336(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + adcl $0, %ecx + movl %ecx, 116(%esp) # 4-byte Spill + movl 2456(%esp), %eax + movl 60(%eax), %eax + movl %eax, (%esp) + leal 200(%esp), %ecx + movl 2452(%esp), %edx + calll .LmulPv512x32 + movl 120(%esp), %ecx # 4-byte Reload + addl 200(%esp), %ecx + movl 124(%esp), %eax # 4-byte Reload + adcl 204(%esp), %eax + movl %eax, 124(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl 208(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + adcl 212(%esp), %edi + movl %edi, 104(%esp) # 4-byte Spill + movl 112(%esp), %eax # 4-byte Reload + adcl 216(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + adcl 220(%esp), %esi + movl %esi, 100(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 224(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 228(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + adcl 232(%esp), %ebp + movl %ebp, 72(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 236(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 240(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 68(%esp), %edi # 4-byte Reload + adcl 244(%esp), %edi + movl 76(%esp), %eax # 4-byte Reload + adcl 248(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 252(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 256(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 260(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 116(%esp), %eax # 4-byte Reload + adcl 264(%esp), %eax + movl %eax, 116(%esp) # 4-byte Spill + sbbl %ebp, %ebp + movl 56(%esp), %eax # 4-byte Reload + imull %ecx, %eax + movl %ecx, %esi + movl %eax, (%esp) + leal 128(%esp), %ecx + movl 2460(%esp), %eax + movl %eax, %edx + calll .LmulPv512x32 + andl $1, %ebp + addl 128(%esp), %esi + movl 104(%esp), %ebx # 4-byte Reload + movl 124(%esp), %eax # 4-byte Reload + adcl 132(%esp), %eax + movl %eax, 124(%esp) # 4-byte Spill + movl 108(%esp), %ecx # 4-byte Reload + adcl 136(%esp), %ecx + movl %ecx, 108(%esp) # 4-byte Spill + adcl 140(%esp), %ebx + movl %ebx, 104(%esp) # 4-byte Spill + movl 112(%esp), %edx # 4-byte Reload + adcl 144(%esp), %edx + movl %edx, 112(%esp) # 4-byte Spill + movl 100(%esp), %edx # 4-byte Reload + adcl 148(%esp), %edx + movl %edx, 100(%esp) # 4-byte Spill + movl 88(%esp), %edx # 4-byte Reload + adcl 152(%esp), %edx + movl %edx, 88(%esp) # 4-byte Spill + movl 80(%esp), %esi # 4-byte Reload + adcl 156(%esp), %esi + movl %esi, 80(%esp) # 4-byte Spill + movl 72(%esp), %edx # 4-byte Reload + adcl 160(%esp), %edx + movl %edx, 72(%esp) # 4-byte Spill + movl 64(%esp), %edx # 4-byte Reload + adcl 164(%esp), %edx + movl %edx, 64(%esp) # 4-byte Spill + movl 60(%esp), %edx # 4-byte Reload + adcl 168(%esp), %edx + movl %edx, 60(%esp) # 4-byte Spill + adcl 172(%esp), %edi + movl %edi, 68(%esp) # 4-byte Spill + movl 76(%esp), %edx # 4-byte Reload + adcl 176(%esp), %edx + movl %edx, 76(%esp) # 4-byte Spill + movl 84(%esp), %edx # 4-byte Reload + adcl 180(%esp), %edx + movl %edx, 84(%esp) # 4-byte Spill + movl 92(%esp), %edx # 4-byte Reload + adcl 184(%esp), %edx + movl %edx, 92(%esp) # 4-byte Spill + movl 96(%esp), %edx # 4-byte Reload + adcl 188(%esp), %edx + movl %edx, 96(%esp) # 4-byte Spill + movl 116(%esp), %edx # 4-byte Reload + adcl 192(%esp), %edx + movl %edx, 116(%esp) # 4-byte Spill + adcl $0, %ebp + movl %eax, %edx + movl 2460(%esp), %edi + subl (%edi), %edx + movl %ecx, %eax + sbbl 4(%edi), %eax + movl %ebx, %ecx + sbbl 8(%edi), %ecx + movl 112(%esp), %ebx # 4-byte Reload + sbbl 12(%edi), %ebx + movl %ebx, 12(%esp) # 4-byte Spill + movl 100(%esp), %ebx # 4-byte Reload + sbbl 16(%edi), %ebx + movl %ebx, 16(%esp) # 4-byte Spill + movl 88(%esp), %ebx # 4-byte Reload + sbbl 20(%edi), %ebx + movl %ebx, 20(%esp) # 4-byte Spill + sbbl 24(%edi), %esi + movl %esi, 24(%esp) # 4-byte Spill + movl 72(%esp), %esi # 4-byte Reload + sbbl 28(%edi), %esi + movl %esi, 28(%esp) # 4-byte Spill + movl 64(%esp), %esi # 4-byte Reload + sbbl 32(%edi), %esi + movl %esi, 32(%esp) # 4-byte Spill + movl 60(%esp), %esi # 4-byte Reload + sbbl 36(%edi), %esi + movl %esi, 36(%esp) # 4-byte Spill + movl 68(%esp), %esi # 4-byte Reload + sbbl 40(%edi), %esi + movl %esi, 40(%esp) # 4-byte Spill + movl 76(%esp), %esi # 4-byte Reload + sbbl 44(%edi), %esi + movl %esi, 44(%esp) # 4-byte Spill + movl 84(%esp), %esi # 4-byte Reload + sbbl 48(%edi), %esi + movl %esi, 48(%esp) # 4-byte Spill + movl 92(%esp), %esi # 4-byte Reload + sbbl 52(%edi), %esi + movl %esi, 52(%esp) # 4-byte Spill + movl 96(%esp), %esi # 4-byte Reload + sbbl 56(%edi), %esi + movl %esi, 56(%esp) # 4-byte Spill + movl 116(%esp), %esi # 4-byte Reload + sbbl 60(%edi), %esi + movl %esi, 120(%esp) # 4-byte Spill + movl 124(%esp), %edi # 4-byte Reload + sbbl $0, %ebp + andl $1, %ebp + movl %ebp, %ebx + jne .LBB242_2 +# BB#1: + movl %edx, %edi +.LBB242_2: + movl 2448(%esp), %edx + movl %edi, (%edx) + testb %bl, %bl + movl 108(%esp), %edi # 4-byte Reload + jne .LBB242_4 +# BB#3: + movl %eax, %edi +.LBB242_4: + movl %edi, 4(%edx) + jne .LBB242_6 +# BB#5: + movl %ecx, 104(%esp) # 4-byte Spill +.LBB242_6: + movl 104(%esp), %eax # 4-byte Reload + movl %eax, 8(%edx) + jne .LBB242_8 +# BB#7: + movl 12(%esp), %eax # 4-byte Reload + movl %eax, 112(%esp) # 4-byte Spill +.LBB242_8: + movl 112(%esp), %eax # 4-byte Reload + movl %eax, 12(%edx) + movl 100(%esp), %eax # 4-byte Reload + jne .LBB242_10 +# BB#9: + movl 16(%esp), %eax # 4-byte Reload +.LBB242_10: + movl %eax, 16(%edx) + movl 88(%esp), %eax # 4-byte Reload + jne .LBB242_12 +# BB#11: + movl 20(%esp), %eax # 4-byte Reload +.LBB242_12: + movl %eax, 20(%edx) + jne .LBB242_14 +# BB#13: + movl 24(%esp), %eax # 4-byte Reload + movl %eax, 80(%esp) # 4-byte Spill +.LBB242_14: + movl 80(%esp), %eax # 4-byte Reload + movl %eax, 24(%edx) + movl 72(%esp), %eax # 4-byte Reload + jne .LBB242_16 +# BB#15: + movl 28(%esp), %eax # 4-byte Reload +.LBB242_16: + movl %eax, 28(%edx) + movl 64(%esp), %eax # 4-byte Reload + jne .LBB242_18 +# BB#17: + movl 32(%esp), %eax # 4-byte Reload +.LBB242_18: + movl %eax, 32(%edx) + movl 60(%esp), %eax # 4-byte Reload + jne .LBB242_20 +# BB#19: + movl 36(%esp), %eax # 4-byte Reload +.LBB242_20: + movl %eax, 36(%edx) + movl 68(%esp), %eax # 4-byte Reload + jne .LBB242_22 +# BB#21: + movl 40(%esp), %eax # 4-byte Reload +.LBB242_22: + movl %eax, 40(%edx) + movl 76(%esp), %eax # 4-byte Reload + jne .LBB242_24 +# BB#23: + movl 44(%esp), %eax # 4-byte Reload +.LBB242_24: + movl %eax, 44(%edx) + movl 84(%esp), %eax # 4-byte Reload + jne .LBB242_26 +# BB#25: + movl 48(%esp), %eax # 4-byte Reload +.LBB242_26: + movl %eax, 48(%edx) + movl 92(%esp), %eax # 4-byte Reload + jne .LBB242_28 +# BB#27: + movl 52(%esp), %eax # 4-byte Reload +.LBB242_28: + movl %eax, 52(%edx) + movl 96(%esp), %eax # 4-byte Reload + jne .LBB242_30 +# BB#29: + movl 56(%esp), %eax # 4-byte Reload +.LBB242_30: + movl %eax, 56(%edx) + movl 116(%esp), %eax # 4-byte Reload + jne .LBB242_32 +# BB#31: + movl 120(%esp), %eax # 4-byte Reload +.LBB242_32: + movl %eax, 60(%edx) + addl $2428, %esp # imm = 0x97C + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end242: + .size mcl_fp_mont16Lbmi2, .Lfunc_end242-mcl_fp_mont16Lbmi2 + + .globl mcl_fp_montNF16Lbmi2 + .align 16, 0x90 + .type mcl_fp_montNF16Lbmi2,@function +mcl_fp_montNF16Lbmi2: # @mcl_fp_montNF16Lbmi2 +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $2412, %esp # imm = 0x96C + calll .L243$pb +.L243$pb: + popl %ebx +.Ltmp54: + addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp54-.L243$pb), %ebx + movl 2444(%esp), %eax + movl -4(%eax), %esi + movl %esi, 44(%esp) # 4-byte Spill + movl 2440(%esp), %eax + movl (%eax), %eax + movl %eax, (%esp) + leal 2344(%esp), %ecx + movl 2436(%esp), %edx + calll .LmulPv512x32 + movl 2344(%esp), %edi + movl 2348(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl %edi, %eax + imull %esi, %eax + movl 2408(%esp), %ecx + movl %ecx, 100(%esp) # 4-byte Spill + movl 2404(%esp), %ecx + movl %ecx, 104(%esp) # 4-byte Spill + movl 2400(%esp), %ecx + movl %ecx, 92(%esp) # 4-byte Spill + movl 2396(%esp), %ecx + movl %ecx, 96(%esp) # 4-byte Spill + movl 2392(%esp), %ecx + movl %ecx, 88(%esp) # 4-byte Spill + movl 2388(%esp), %ecx + movl %ecx, 72(%esp) # 4-byte Spill + movl 2384(%esp), %ecx + movl %ecx, 56(%esp) # 4-byte Spill + movl 2380(%esp), %ecx + movl %ecx, 52(%esp) # 4-byte Spill + movl 2376(%esp), %ecx + movl %ecx, 48(%esp) # 4-byte Spill + movl 2372(%esp), %ecx + movl %ecx, 60(%esp) # 4-byte Spill + movl 2368(%esp), %ecx + movl %ecx, 64(%esp) # 4-byte Spill + movl 2364(%esp), %ebp + movl 2360(%esp), %esi + movl 2356(%esp), %ecx + movl %ecx, 76(%esp) # 4-byte Spill + movl 2352(%esp), %ecx + movl %ecx, 84(%esp) # 4-byte Spill + movl %eax, (%esp) + leal 2272(%esp), %ecx + movl 2444(%esp), %edx + calll .LmulPv512x32 + addl 2272(%esp), %edi + movl 108(%esp), %eax # 4-byte Reload + adcl 2276(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 2280(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 2284(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + adcl 2288(%esp), %esi + movl %esi, 80(%esp) # 4-byte Spill + adcl 2292(%esp), %ebp + movl 64(%esp), %eax # 4-byte Reload + adcl 2296(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 2300(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 2304(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 52(%esp), %edi # 4-byte Reload + adcl 2308(%esp), %edi + movl 56(%esp), %eax # 4-byte Reload + adcl 2312(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 2316(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 2320(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 2324(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 92(%esp), %esi # 4-byte Reload + adcl 2328(%esp), %esi + movl 104(%esp), %eax # 4-byte Reload + adcl 2332(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 2336(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 2440(%esp), %eax + movl 4(%eax), %eax + movl %eax, (%esp) + leal 2200(%esp), %ecx + movl 2436(%esp), %edx + calll .LmulPv512x32 + movl 2264(%esp), %edx + movl 108(%esp), %ecx # 4-byte Reload + addl 2200(%esp), %ecx + movl 84(%esp), %eax # 4-byte Reload + adcl 2204(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 2208(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 2212(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + adcl 2216(%esp), %ebp + movl %ebp, 68(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 2220(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 2224(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 2228(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + adcl 2232(%esp), %edi + movl %edi, 52(%esp) # 4-byte Spill + movl 56(%esp), %ebp # 4-byte Reload + adcl 2236(%esp), %ebp + movl 72(%esp), %eax # 4-byte Reload + adcl 2240(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 2244(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 2248(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + adcl 2252(%esp), %esi + movl %esi, 92(%esp) # 4-byte Spill + movl 104(%esp), %eax # 4-byte Reload + adcl 2256(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 100(%esp), %esi # 4-byte Reload + adcl 2260(%esp), %esi + adcl $0, %edx + movl %edx, 108(%esp) # 4-byte Spill + movl %ecx, %edi + movl %edi, %eax + imull 44(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 2128(%esp), %ecx + movl 2444(%esp), %edx + calll .LmulPv512x32 + addl 2128(%esp), %edi + movl 84(%esp), %eax # 4-byte Reload + adcl 2132(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 2136(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 2140(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 2144(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 2148(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 2152(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 48(%esp), %edi # 4-byte Reload + adcl 2156(%esp), %edi + movl 52(%esp), %eax # 4-byte Reload + adcl 2160(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + adcl 2164(%esp), %ebp + movl %ebp, 56(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 2168(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 2172(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 2176(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 2180(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 104(%esp), %eax # 4-byte Reload + adcl 2184(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + adcl 2188(%esp), %esi + movl %esi, 100(%esp) # 4-byte Spill + movl 108(%esp), %esi # 4-byte Reload + adcl 2192(%esp), %esi + movl 2440(%esp), %eax + movl 8(%eax), %eax + movl %eax, (%esp) + leal 2056(%esp), %ecx + movl 2436(%esp), %edx + calll .LmulPv512x32 + movl 2120(%esp), %eax + movl 84(%esp), %edx # 4-byte Reload + addl 2056(%esp), %edx + movl 76(%esp), %ecx # 4-byte Reload + adcl 2060(%esp), %ecx + movl %ecx, 76(%esp) # 4-byte Spill + movl 80(%esp), %ecx # 4-byte Reload + adcl 2064(%esp), %ecx + movl %ecx, 80(%esp) # 4-byte Spill + movl 68(%esp), %ecx # 4-byte Reload + adcl 2068(%esp), %ecx + movl %ecx, 68(%esp) # 4-byte Spill + movl 64(%esp), %ecx # 4-byte Reload + adcl 2072(%esp), %ecx + movl %ecx, 64(%esp) # 4-byte Spill + movl 60(%esp), %ecx # 4-byte Reload + adcl 2076(%esp), %ecx + movl %ecx, 60(%esp) # 4-byte Spill + adcl 2080(%esp), %edi + movl %edi, %ebp + movl 52(%esp), %ecx # 4-byte Reload + adcl 2084(%esp), %ecx + movl %ecx, 52(%esp) # 4-byte Spill + movl 56(%esp), %ecx # 4-byte Reload + adcl 2088(%esp), %ecx + movl %ecx, 56(%esp) # 4-byte Spill + movl 72(%esp), %ecx # 4-byte Reload + adcl 2092(%esp), %ecx + movl %ecx, 72(%esp) # 4-byte Spill + movl 88(%esp), %ecx # 4-byte Reload + adcl 2096(%esp), %ecx + movl %ecx, 88(%esp) # 4-byte Spill + movl 96(%esp), %ecx # 4-byte Reload + adcl 2100(%esp), %ecx + movl %ecx, 96(%esp) # 4-byte Spill + movl 92(%esp), %ecx # 4-byte Reload + adcl 2104(%esp), %ecx + movl %ecx, 92(%esp) # 4-byte Spill + movl 104(%esp), %ecx # 4-byte Reload + adcl 2108(%esp), %ecx + movl %ecx, 104(%esp) # 4-byte Spill + movl 100(%esp), %ecx # 4-byte Reload + adcl 2112(%esp), %ecx + movl %ecx, 100(%esp) # 4-byte Spill + adcl 2116(%esp), %esi + movl %esi, 108(%esp) # 4-byte Spill + adcl $0, %eax + movl %eax, 84(%esp) # 4-byte Spill + movl %edx, %esi + movl %esi, %eax + imull 44(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 1984(%esp), %ecx + movl 2444(%esp), %edx + calll .LmulPv512x32 + addl 1984(%esp), %esi + movl 76(%esp), %eax # 4-byte Reload + adcl 1988(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 1992(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 1996(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 2000(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 60(%esp), %edi # 4-byte Reload + adcl 2004(%esp), %edi + adcl 2008(%esp), %ebp + movl %ebp, 48(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 2012(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 2016(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 2020(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 2024(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 2028(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 2032(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 104(%esp), %ebp # 4-byte Reload + adcl 2036(%esp), %ebp + movl 100(%esp), %esi # 4-byte Reload + adcl 2040(%esp), %esi + movl 108(%esp), %eax # 4-byte Reload + adcl 2044(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 2048(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 2440(%esp), %eax + movl 12(%eax), %eax + movl %eax, (%esp) + leal 1912(%esp), %ecx + movl 2436(%esp), %edx + calll .LmulPv512x32 + movl 1976(%esp), %eax + movl 76(%esp), %edx # 4-byte Reload + addl 1912(%esp), %edx + movl 80(%esp), %ecx # 4-byte Reload + adcl 1916(%esp), %ecx + movl %ecx, 80(%esp) # 4-byte Spill + movl 68(%esp), %ecx # 4-byte Reload + adcl 1920(%esp), %ecx + movl %ecx, 68(%esp) # 4-byte Spill + movl 64(%esp), %ecx # 4-byte Reload + adcl 1924(%esp), %ecx + movl %ecx, 64(%esp) # 4-byte Spill + adcl 1928(%esp), %edi + movl %edi, 60(%esp) # 4-byte Spill + movl 48(%esp), %ecx # 4-byte Reload + adcl 1932(%esp), %ecx + movl %ecx, 48(%esp) # 4-byte Spill + movl 52(%esp), %ecx # 4-byte Reload + adcl 1936(%esp), %ecx + movl %ecx, 52(%esp) # 4-byte Spill + movl 56(%esp), %ecx # 4-byte Reload + adcl 1940(%esp), %ecx + movl %ecx, 56(%esp) # 4-byte Spill + movl 72(%esp), %ecx # 4-byte Reload + adcl 1944(%esp), %ecx + movl %ecx, 72(%esp) # 4-byte Spill + movl 88(%esp), %edi # 4-byte Reload + adcl 1948(%esp), %edi + movl 96(%esp), %ecx # 4-byte Reload + adcl 1952(%esp), %ecx + movl %ecx, 96(%esp) # 4-byte Spill + movl 92(%esp), %ecx # 4-byte Reload + adcl 1956(%esp), %ecx + movl %ecx, 92(%esp) # 4-byte Spill + adcl 1960(%esp), %ebp + movl %ebp, 104(%esp) # 4-byte Spill + adcl 1964(%esp), %esi + movl 108(%esp), %ecx # 4-byte Reload + adcl 1968(%esp), %ecx + movl %ecx, 108(%esp) # 4-byte Spill + movl 84(%esp), %ecx # 4-byte Reload + adcl 1972(%esp), %ecx + movl %ecx, 84(%esp) # 4-byte Spill + adcl $0, %eax + movl %eax, 76(%esp) # 4-byte Spill + movl %edx, %ebp + movl %ebp, %eax + imull 44(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 1840(%esp), %ecx + movl 2444(%esp), %eax + movl %eax, %edx + calll .LmulPv512x32 + addl 1840(%esp), %ebp + movl 80(%esp), %eax # 4-byte Reload + adcl 1844(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 1848(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 1852(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 1856(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 1860(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 1864(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 1868(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 1872(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + adcl 1876(%esp), %edi + movl %edi, 88(%esp) # 4-byte Spill + movl 96(%esp), %edi # 4-byte Reload + adcl 1880(%esp), %edi + movl 92(%esp), %ebp # 4-byte Reload + adcl 1884(%esp), %ebp + movl 104(%esp), %eax # 4-byte Reload + adcl 1888(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + adcl 1892(%esp), %esi + movl %esi, 100(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl 1896(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 1900(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 76(%esp), %esi # 4-byte Reload + adcl 1904(%esp), %esi + movl 2440(%esp), %eax + movl 16(%eax), %eax + movl %eax, (%esp) + leal 1768(%esp), %ecx + movl 2436(%esp), %eax + movl %eax, %edx + calll .LmulPv512x32 + movl 1832(%esp), %edx + movl 80(%esp), %ecx # 4-byte Reload + addl 1768(%esp), %ecx + movl 68(%esp), %eax # 4-byte Reload + adcl 1772(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 1776(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 1780(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 1784(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 1788(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 1792(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 1796(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 1800(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + adcl 1804(%esp), %edi + movl %edi, 96(%esp) # 4-byte Spill + adcl 1808(%esp), %ebp + movl %ebp, 92(%esp) # 4-byte Spill + movl 104(%esp), %eax # 4-byte Reload + adcl 1812(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 1816(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl 1820(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 1824(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + adcl 1828(%esp), %esi + movl %esi, 76(%esp) # 4-byte Spill + adcl $0, %edx + movl %edx, 80(%esp) # 4-byte Spill + movl %ecx, %esi + movl %esi, %eax + imull 44(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 1696(%esp), %ecx + movl 2444(%esp), %edx + calll .LmulPv512x32 + addl 1696(%esp), %esi + movl 68(%esp), %eax # 4-byte Reload + adcl 1700(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 1704(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 1708(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 1712(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 52(%esp), %ebp # 4-byte Reload + adcl 1716(%esp), %ebp + movl 56(%esp), %edi # 4-byte Reload + adcl 1720(%esp), %edi + movl 72(%esp), %eax # 4-byte Reload + adcl 1724(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 1728(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 1732(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 1736(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 104(%esp), %esi # 4-byte Reload + adcl 1740(%esp), %esi + movl 100(%esp), %eax # 4-byte Reload + adcl 1744(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl 1748(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 1752(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 1756(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 1760(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 2440(%esp), %eax + movl 20(%eax), %eax + movl %eax, (%esp) + leal 1624(%esp), %ecx + movl 2436(%esp), %edx + calll .LmulPv512x32 + movl 1688(%esp), %edx + movl 68(%esp), %ecx # 4-byte Reload + addl 1624(%esp), %ecx + movl 64(%esp), %eax # 4-byte Reload + adcl 1628(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 1632(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 1636(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + adcl 1640(%esp), %ebp + movl %ebp, 52(%esp) # 4-byte Spill + adcl 1644(%esp), %edi + movl %edi, 56(%esp) # 4-byte Spill + movl 72(%esp), %ebp # 4-byte Reload + adcl 1648(%esp), %ebp + movl 88(%esp), %eax # 4-byte Reload + adcl 1652(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 1656(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 1660(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + adcl 1664(%esp), %esi + movl %esi, %edi + movl 100(%esp), %eax # 4-byte Reload + adcl 1668(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl 1672(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 1676(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 1680(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 1684(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + adcl $0, %edx + movl %edx, 68(%esp) # 4-byte Spill + movl %ecx, %esi + movl %esi, %eax + imull 44(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 1552(%esp), %ecx + movl 2444(%esp), %edx + calll .LmulPv512x32 + addl 1552(%esp), %esi + movl 64(%esp), %esi # 4-byte Reload + adcl 1556(%esp), %esi + movl 60(%esp), %eax # 4-byte Reload + adcl 1560(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 1564(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 1568(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 1572(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + adcl 1576(%esp), %ebp + movl %ebp, 72(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 1580(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 1584(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 1588(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + adcl 1592(%esp), %edi + movl %edi, 104(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 1596(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 108(%esp), %ebp # 4-byte Reload + adcl 1600(%esp), %ebp + movl 84(%esp), %eax # 4-byte Reload + adcl 1604(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 1608(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 1612(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 68(%esp), %edi # 4-byte Reload + adcl 1616(%esp), %edi + movl 2440(%esp), %eax + movl 24(%eax), %eax + movl %eax, (%esp) + leal 1480(%esp), %ecx + movl 2436(%esp), %edx + calll .LmulPv512x32 + movl 1544(%esp), %eax + addl 1480(%esp), %esi + movl 60(%esp), %edx # 4-byte Reload + adcl 1484(%esp), %edx + movl %edx, 60(%esp) # 4-byte Spill + movl 48(%esp), %edx # 4-byte Reload + adcl 1488(%esp), %edx + movl %edx, 48(%esp) # 4-byte Spill + movl 52(%esp), %edx # 4-byte Reload + adcl 1492(%esp), %edx + movl %edx, 52(%esp) # 4-byte Spill + movl 56(%esp), %edx # 4-byte Reload + adcl 1496(%esp), %edx + movl %edx, 56(%esp) # 4-byte Spill + movl 72(%esp), %edx # 4-byte Reload + adcl 1500(%esp), %edx + movl %edx, 72(%esp) # 4-byte Spill + movl 88(%esp), %edx # 4-byte Reload + adcl 1504(%esp), %edx + movl %edx, 88(%esp) # 4-byte Spill + movl 96(%esp), %edx # 4-byte Reload + adcl 1508(%esp), %edx + movl %edx, 96(%esp) # 4-byte Spill + movl 92(%esp), %edx # 4-byte Reload + adcl 1512(%esp), %edx + movl %edx, 92(%esp) # 4-byte Spill + movl 104(%esp), %edx # 4-byte Reload + adcl 1516(%esp), %edx + movl %edx, 104(%esp) # 4-byte Spill + movl 100(%esp), %edx # 4-byte Reload + adcl 1520(%esp), %edx + movl %edx, 100(%esp) # 4-byte Spill + adcl 1524(%esp), %ebp + movl %ebp, 108(%esp) # 4-byte Spill + movl 84(%esp), %edx # 4-byte Reload + adcl 1528(%esp), %edx + movl %edx, 84(%esp) # 4-byte Spill + movl 76(%esp), %edx # 4-byte Reload + adcl 1532(%esp), %edx + movl %edx, 76(%esp) # 4-byte Spill + movl 80(%esp), %ebp # 4-byte Reload + adcl 1536(%esp), %ebp + adcl 1540(%esp), %edi + movl %edi, 68(%esp) # 4-byte Spill + movl %eax, %edi + adcl $0, %edi + movl %esi, %eax + imull 44(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 1408(%esp), %ecx + movl 2444(%esp), %edx + calll .LmulPv512x32 + addl 1408(%esp), %esi + movl 60(%esp), %eax # 4-byte Reload + adcl 1412(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 48(%esp), %esi # 4-byte Reload + adcl 1416(%esp), %esi + movl 52(%esp), %eax # 4-byte Reload + adcl 1420(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 1424(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 1428(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 1432(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 1436(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 1440(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 104(%esp), %eax # 4-byte Reload + adcl 1444(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 1448(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl 1452(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 1456(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 1460(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + adcl 1464(%esp), %ebp + movl %ebp, 80(%esp) # 4-byte Spill + movl 68(%esp), %ebp # 4-byte Reload + adcl 1468(%esp), %ebp + adcl 1472(%esp), %edi + movl %edi, 64(%esp) # 4-byte Spill + movl 2440(%esp), %eax + movl 28(%eax), %eax + movl %eax, (%esp) + leal 1336(%esp), %ecx + movl 2436(%esp), %edx + calll .LmulPv512x32 + movl 1400(%esp), %eax + movl 60(%esp), %ecx # 4-byte Reload + addl 1336(%esp), %ecx + adcl 1340(%esp), %esi + movl %esi, 48(%esp) # 4-byte Spill + movl 52(%esp), %edx # 4-byte Reload + adcl 1344(%esp), %edx + movl %edx, 52(%esp) # 4-byte Spill + movl 56(%esp), %edx # 4-byte Reload + adcl 1348(%esp), %edx + movl %edx, 56(%esp) # 4-byte Spill + movl 72(%esp), %edx # 4-byte Reload + adcl 1352(%esp), %edx + movl %edx, 72(%esp) # 4-byte Spill + movl 88(%esp), %edx # 4-byte Reload + adcl 1356(%esp), %edx + movl %edx, 88(%esp) # 4-byte Spill + movl 96(%esp), %edx # 4-byte Reload + adcl 1360(%esp), %edx + movl %edx, 96(%esp) # 4-byte Spill + movl 92(%esp), %edx # 4-byte Reload + adcl 1364(%esp), %edx + movl %edx, 92(%esp) # 4-byte Spill + movl 104(%esp), %edx # 4-byte Reload + adcl 1368(%esp), %edx + movl %edx, 104(%esp) # 4-byte Spill + movl 100(%esp), %edx # 4-byte Reload + adcl 1372(%esp), %edx + movl %edx, 100(%esp) # 4-byte Spill + movl 108(%esp), %edx # 4-byte Reload + adcl 1376(%esp), %edx + movl %edx, 108(%esp) # 4-byte Spill + movl 84(%esp), %edi # 4-byte Reload + adcl 1380(%esp), %edi + movl 76(%esp), %esi # 4-byte Reload + adcl 1384(%esp), %esi + movl 80(%esp), %edx # 4-byte Reload + adcl 1388(%esp), %edx + movl %edx, 80(%esp) # 4-byte Spill + adcl 1392(%esp), %ebp + movl %ebp, 68(%esp) # 4-byte Spill + movl 64(%esp), %edx # 4-byte Reload + adcl 1396(%esp), %edx + movl %edx, 64(%esp) # 4-byte Spill + adcl $0, %eax + movl %eax, 60(%esp) # 4-byte Spill + movl %ecx, %eax + movl %ecx, %ebp + imull 44(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 1264(%esp), %ecx + movl 2444(%esp), %edx + calll .LmulPv512x32 + addl 1264(%esp), %ebp + movl 48(%esp), %eax # 4-byte Reload + adcl 1268(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 1272(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 1276(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 1280(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 1284(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 1288(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 1292(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 104(%esp), %eax # 4-byte Reload + adcl 1296(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 1300(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl 1304(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + adcl 1308(%esp), %edi + movl %edi, 84(%esp) # 4-byte Spill + adcl 1312(%esp), %esi + movl %esi, %ebp + movl 80(%esp), %eax # 4-byte Reload + adcl 1316(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 1320(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 1324(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 1328(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 2440(%esp), %eax + movl 32(%eax), %eax + movl %eax, (%esp) + leal 1192(%esp), %ecx + movl 2436(%esp), %edx + calll .LmulPv512x32 + movl 1256(%esp), %eax + movl 48(%esp), %ecx # 4-byte Reload + addl 1192(%esp), %ecx + movl 52(%esp), %edx # 4-byte Reload + adcl 1196(%esp), %edx + movl %edx, 52(%esp) # 4-byte Spill + movl 56(%esp), %edx # 4-byte Reload + adcl 1200(%esp), %edx + movl %edx, 56(%esp) # 4-byte Spill + movl 72(%esp), %esi # 4-byte Reload + adcl 1204(%esp), %esi + movl 88(%esp), %edx # 4-byte Reload + adcl 1208(%esp), %edx + movl %edx, 88(%esp) # 4-byte Spill + movl 96(%esp), %edi # 4-byte Reload + adcl 1212(%esp), %edi + movl 92(%esp), %edx # 4-byte Reload + adcl 1216(%esp), %edx + movl %edx, 92(%esp) # 4-byte Spill + movl 104(%esp), %edx # 4-byte Reload + adcl 1220(%esp), %edx + movl %edx, 104(%esp) # 4-byte Spill + movl 100(%esp), %edx # 4-byte Reload + adcl 1224(%esp), %edx + movl %edx, 100(%esp) # 4-byte Spill + movl 108(%esp), %edx # 4-byte Reload + adcl 1228(%esp), %edx + movl %edx, 108(%esp) # 4-byte Spill + movl 84(%esp), %edx # 4-byte Reload + adcl 1232(%esp), %edx + movl %edx, 84(%esp) # 4-byte Spill + adcl 1236(%esp), %ebp + movl %ebp, 76(%esp) # 4-byte Spill + movl 80(%esp), %edx # 4-byte Reload + adcl 1240(%esp), %edx + movl %edx, 80(%esp) # 4-byte Spill + movl 68(%esp), %edx # 4-byte Reload + adcl 1244(%esp), %edx + movl %edx, 68(%esp) # 4-byte Spill + movl 64(%esp), %edx # 4-byte Reload + adcl 1248(%esp), %edx + movl %edx, 64(%esp) # 4-byte Spill + movl 60(%esp), %edx # 4-byte Reload + adcl 1252(%esp), %edx + movl %edx, 60(%esp) # 4-byte Spill + adcl $0, %eax + movl %eax, 48(%esp) # 4-byte Spill + movl %ecx, %eax + movl %ecx, %ebp + imull 44(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 1120(%esp), %ecx + movl 2444(%esp), %edx + calll .LmulPv512x32 + addl 1120(%esp), %ebp + movl 52(%esp), %eax # 4-byte Reload + adcl 1124(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 1128(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + adcl 1132(%esp), %esi + movl %esi, 72(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 1136(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + adcl 1140(%esp), %edi + movl %edi, 96(%esp) # 4-byte Spill + movl 92(%esp), %ebp # 4-byte Reload + adcl 1144(%esp), %ebp + movl 104(%esp), %eax # 4-byte Reload + adcl 1148(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 1152(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl 1156(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 1160(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 1164(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 1168(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 1172(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 64(%esp), %edi # 4-byte Reload + adcl 1176(%esp), %edi + movl 60(%esp), %eax # 4-byte Reload + adcl 1180(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 1184(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 2440(%esp), %eax + movl 36(%eax), %eax + movl %eax, (%esp) + leal 1048(%esp), %ecx + movl 2436(%esp), %edx + calll .LmulPv512x32 + movl 1112(%esp), %edx + movl 52(%esp), %ecx # 4-byte Reload + addl 1048(%esp), %ecx + movl 56(%esp), %esi # 4-byte Reload + adcl 1052(%esp), %esi + movl 72(%esp), %eax # 4-byte Reload + adcl 1056(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 1060(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 1064(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + adcl 1068(%esp), %ebp + movl %ebp, 92(%esp) # 4-byte Spill + movl 104(%esp), %eax # 4-byte Reload + adcl 1072(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 100(%esp), %ebp # 4-byte Reload + adcl 1076(%esp), %ebp + movl 108(%esp), %eax # 4-byte Reload + adcl 1080(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 1084(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 1088(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 1092(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 1096(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + adcl 1100(%esp), %edi + movl %edi, 64(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 1104(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 1108(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + adcl $0, %edx + movl %edx, 52(%esp) # 4-byte Spill + movl %ecx, %eax + movl %ecx, %edi + imull 44(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 976(%esp), %ecx + movl 2444(%esp), %edx + calll .LmulPv512x32 + addl 976(%esp), %edi + adcl 980(%esp), %esi + movl %esi, 56(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 984(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 988(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 992(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 996(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 104(%esp), %edi # 4-byte Reload + adcl 1000(%esp), %edi + adcl 1004(%esp), %ebp + movl %ebp, 100(%esp) # 4-byte Spill + movl 108(%esp), %esi # 4-byte Reload + adcl 1008(%esp), %esi + movl 84(%esp), %eax # 4-byte Reload + adcl 1012(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 76(%esp), %ebp # 4-byte Reload + adcl 1016(%esp), %ebp + movl 80(%esp), %eax # 4-byte Reload + adcl 1020(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 1024(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 1028(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 1032(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 1036(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 1040(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 2440(%esp), %eax + movl 40(%eax), %eax + movl %eax, (%esp) + leal 904(%esp), %ecx + movl 2436(%esp), %edx + calll .LmulPv512x32 + movl 968(%esp), %ecx + movl 56(%esp), %eax # 4-byte Reload + addl 904(%esp), %eax + movl 72(%esp), %edx # 4-byte Reload + adcl 908(%esp), %edx + movl %edx, 72(%esp) # 4-byte Spill + movl 88(%esp), %edx # 4-byte Reload + adcl 912(%esp), %edx + movl %edx, 88(%esp) # 4-byte Spill + movl 96(%esp), %edx # 4-byte Reload + adcl 916(%esp), %edx + movl %edx, 96(%esp) # 4-byte Spill + movl 92(%esp), %edx # 4-byte Reload + adcl 920(%esp), %edx + movl %edx, 92(%esp) # 4-byte Spill + adcl 924(%esp), %edi + movl %edi, 104(%esp) # 4-byte Spill + movl 100(%esp), %edi # 4-byte Reload + adcl 928(%esp), %edi + adcl 932(%esp), %esi + movl %esi, 108(%esp) # 4-byte Spill + movl 84(%esp), %edx # 4-byte Reload + adcl 936(%esp), %edx + movl %edx, 84(%esp) # 4-byte Spill + adcl 940(%esp), %ebp + movl %ebp, 76(%esp) # 4-byte Spill + movl 80(%esp), %edx # 4-byte Reload + adcl 944(%esp), %edx + movl %edx, 80(%esp) # 4-byte Spill + movl 68(%esp), %edx # 4-byte Reload + adcl 948(%esp), %edx + movl %edx, 68(%esp) # 4-byte Spill + movl 64(%esp), %edx # 4-byte Reload + adcl 952(%esp), %edx + movl %edx, 64(%esp) # 4-byte Spill + movl 60(%esp), %edx # 4-byte Reload + adcl 956(%esp), %edx + movl %edx, 60(%esp) # 4-byte Spill + movl 48(%esp), %ebp # 4-byte Reload + adcl 960(%esp), %ebp + movl 52(%esp), %edx # 4-byte Reload + adcl 964(%esp), %edx + movl %edx, 52(%esp) # 4-byte Spill + adcl $0, %ecx + movl %ecx, 56(%esp) # 4-byte Spill + movl %eax, %esi + imull 44(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 832(%esp), %ecx + movl 2444(%esp), %edx + calll .LmulPv512x32 + addl 832(%esp), %esi + movl 72(%esp), %eax # 4-byte Reload + adcl 836(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 840(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 844(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 848(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 104(%esp), %eax # 4-byte Reload + adcl 852(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + adcl 856(%esp), %edi + movl %edi, 100(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl 860(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 864(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 868(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 80(%esp), %esi # 4-byte Reload + adcl 872(%esp), %esi + movl 68(%esp), %edi # 4-byte Reload + adcl 876(%esp), %edi + movl 64(%esp), %eax # 4-byte Reload + adcl 880(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 884(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + adcl 888(%esp), %ebp + movl %ebp, 48(%esp) # 4-byte Spill + movl 52(%esp), %ebp # 4-byte Reload + adcl 892(%esp), %ebp + movl 56(%esp), %eax # 4-byte Reload + adcl 896(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 2440(%esp), %eax + movl 44(%eax), %eax + movl %eax, (%esp) + leal 760(%esp), %ecx + movl 2436(%esp), %edx + calll .LmulPv512x32 + movl 824(%esp), %edx + movl 72(%esp), %ecx # 4-byte Reload + addl 760(%esp), %ecx + movl 88(%esp), %eax # 4-byte Reload + adcl 764(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 768(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 772(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 104(%esp), %eax # 4-byte Reload + adcl 776(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 780(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl 784(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 788(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 792(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + adcl 796(%esp), %esi + movl %esi, 80(%esp) # 4-byte Spill + adcl 800(%esp), %edi + movl %edi, 68(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 804(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 60(%esp), %edi # 4-byte Reload + adcl 808(%esp), %edi + movl 48(%esp), %eax # 4-byte Reload + adcl 812(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + adcl 816(%esp), %ebp + movl %ebp, 52(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 820(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + adcl $0, %edx + movl %edx, 72(%esp) # 4-byte Spill + movl %ecx, %eax + movl %ecx, %esi + imull 44(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 688(%esp), %ecx + movl 2444(%esp), %edx + calll .LmulPv512x32 + addl 688(%esp), %esi + movl 88(%esp), %eax # 4-byte Reload + adcl 692(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 696(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 700(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 104(%esp), %eax # 4-byte Reload + adcl 704(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 708(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl 712(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 84(%esp), %esi # 4-byte Reload + adcl 716(%esp), %esi + movl 76(%esp), %eax # 4-byte Reload + adcl 720(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 724(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 728(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 64(%esp), %ebp # 4-byte Reload + adcl 732(%esp), %ebp + adcl 736(%esp), %edi + movl %edi, 60(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 740(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 744(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 748(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 752(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 2440(%esp), %eax + movl 48(%eax), %eax + movl %eax, (%esp) + leal 616(%esp), %ecx + movl 2436(%esp), %edx + calll .LmulPv512x32 + movl 680(%esp), %edx + movl 88(%esp), %ecx # 4-byte Reload + addl 616(%esp), %ecx + movl 96(%esp), %eax # 4-byte Reload + adcl 620(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 92(%esp), %edi # 4-byte Reload + adcl 624(%esp), %edi + movl 104(%esp), %eax # 4-byte Reload + adcl 628(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 632(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl 636(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + adcl 640(%esp), %esi + movl %esi, 84(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 644(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 648(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 652(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + adcl 656(%esp), %ebp + movl %ebp, 64(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 660(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 664(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 668(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 56(%esp), %ebp # 4-byte Reload + adcl 672(%esp), %ebp + movl 72(%esp), %eax # 4-byte Reload + adcl 676(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + adcl $0, %edx + movl %edx, 88(%esp) # 4-byte Spill + movl %ecx, %eax + movl %ecx, %esi + imull 44(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 544(%esp), %ecx + movl 2444(%esp), %edx + calll .LmulPv512x32 + addl 544(%esp), %esi + movl 96(%esp), %eax # 4-byte Reload + adcl 548(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + adcl 552(%esp), %edi + movl %edi, 92(%esp) # 4-byte Spill + movl 104(%esp), %eax # 4-byte Reload + adcl 556(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 100(%esp), %edi # 4-byte Reload + adcl 560(%esp), %edi + movl 108(%esp), %esi # 4-byte Reload + adcl 564(%esp), %esi + movl 84(%esp), %eax # 4-byte Reload + adcl 568(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 572(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 576(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 580(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 584(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 588(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 592(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 596(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + adcl 600(%esp), %ebp + movl %ebp, 56(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 604(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 608(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 2440(%esp), %eax + movl 52(%eax), %eax + movl %eax, (%esp) + leal 472(%esp), %ecx + movl 2436(%esp), %edx + calll .LmulPv512x32 + movl 536(%esp), %edx + movl 96(%esp), %ecx # 4-byte Reload + addl 472(%esp), %ecx + movl 92(%esp), %eax # 4-byte Reload + adcl 476(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 104(%esp), %eax # 4-byte Reload + adcl 480(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + adcl 484(%esp), %edi + movl %edi, 100(%esp) # 4-byte Spill + adcl 488(%esp), %esi + movl %esi, 108(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 492(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 76(%esp), %ebp # 4-byte Reload + adcl 496(%esp), %ebp + movl 80(%esp), %eax # 4-byte Reload + adcl 500(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 504(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 508(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 512(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 516(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 520(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 524(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 528(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 532(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + adcl $0, %edx + movl %edx, 96(%esp) # 4-byte Spill + movl %ecx, %eax + movl %ecx, %edi + imull 44(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 400(%esp), %ecx + movl 2444(%esp), %edx + calll .LmulPv512x32 + addl 400(%esp), %edi + movl 92(%esp), %eax # 4-byte Reload + adcl 404(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 104(%esp), %eax # 4-byte Reload + adcl 408(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 100(%esp), %esi # 4-byte Reload + adcl 412(%esp), %esi + movl 108(%esp), %eax # 4-byte Reload + adcl 416(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 84(%esp), %edi # 4-byte Reload + adcl 420(%esp), %edi + adcl 424(%esp), %ebp + movl %ebp, 76(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 428(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 432(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 436(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 440(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 48(%esp), %ebp # 4-byte Reload + adcl 444(%esp), %ebp + movl 52(%esp), %eax # 4-byte Reload + adcl 448(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 452(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 456(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 460(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 464(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 2440(%esp), %eax + movl 56(%eax), %eax + movl %eax, (%esp) + leal 328(%esp), %ecx + movl 2436(%esp), %edx + calll .LmulPv512x32 + movl 392(%esp), %edx + movl 92(%esp), %ecx # 4-byte Reload + addl 328(%esp), %ecx + movl 104(%esp), %eax # 4-byte Reload + adcl 332(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + adcl 336(%esp), %esi + movl %esi, 100(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl 340(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + adcl 344(%esp), %edi + movl %edi, 84(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 348(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 80(%esp), %esi # 4-byte Reload + adcl 352(%esp), %esi + movl 68(%esp), %eax # 4-byte Reload + adcl 356(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 360(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 364(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + adcl 368(%esp), %ebp + movl %ebp, 48(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 372(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 376(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 380(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 384(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 388(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + adcl $0, %edx + movl %edx, 92(%esp) # 4-byte Spill + movl %ecx, %eax + movl %ecx, %ebp + imull 44(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 256(%esp), %ecx + movl 2444(%esp), %edx + calll .LmulPv512x32 + addl 256(%esp), %ebp + movl 104(%esp), %edi # 4-byte Reload + adcl 260(%esp), %edi + movl 100(%esp), %eax # 4-byte Reload + adcl 264(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 108(%esp), %ebp # 4-byte Reload + adcl 268(%esp), %ebp + movl 84(%esp), %eax # 4-byte Reload + adcl 272(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 276(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + adcl 280(%esp), %esi + movl %esi, 80(%esp) # 4-byte Spill + movl 68(%esp), %esi # 4-byte Reload + adcl 284(%esp), %esi + movl 64(%esp), %eax # 4-byte Reload + adcl 288(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 292(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 296(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 300(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 304(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 308(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 312(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 316(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 320(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 2440(%esp), %eax + movl 60(%eax), %eax + movl %eax, (%esp) + leal 184(%esp), %ecx + movl 2436(%esp), %edx + calll .LmulPv512x32 + movl 248(%esp), %edx + movl %edi, %ecx + addl 184(%esp), %ecx + movl 100(%esp), %edi # 4-byte Reload + adcl 188(%esp), %edi + adcl 192(%esp), %ebp + movl %ebp, 108(%esp) # 4-byte Spill + movl 84(%esp), %ebp # 4-byte Reload + adcl 196(%esp), %ebp + movl 76(%esp), %eax # 4-byte Reload + adcl 200(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 204(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + adcl 208(%esp), %esi + movl %esi, 68(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 212(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 216(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 220(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 224(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 228(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 232(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 236(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 240(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 244(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + adcl $0, %edx + movl %edx, 104(%esp) # 4-byte Spill + movl 44(%esp), %eax # 4-byte Reload + imull %ecx, %eax + movl %ecx, %esi + movl %eax, (%esp) + leal 112(%esp), %ecx + movl 2444(%esp), %edx + calll .LmulPv512x32 + addl 112(%esp), %esi + movl %edi, %eax + adcl 116(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 108(%esp), %edi # 4-byte Reload + adcl 120(%esp), %edi + movl %edi, 108(%esp) # 4-byte Spill + adcl 124(%esp), %ebp + movl 76(%esp), %ecx # 4-byte Reload + adcl 128(%esp), %ecx + movl %ecx, 76(%esp) # 4-byte Spill + movl %ecx, %ebx + movl 80(%esp), %ecx # 4-byte Reload + adcl 132(%esp), %ecx + movl %ecx, 80(%esp) # 4-byte Spill + movl 68(%esp), %ecx # 4-byte Reload + adcl 136(%esp), %ecx + movl %ecx, 68(%esp) # 4-byte Spill + movl 64(%esp), %ecx # 4-byte Reload + adcl 140(%esp), %ecx + movl %ecx, 64(%esp) # 4-byte Spill + movl 60(%esp), %ecx # 4-byte Reload + adcl 144(%esp), %ecx + movl %ecx, 60(%esp) # 4-byte Spill + movl 48(%esp), %ecx # 4-byte Reload + adcl 148(%esp), %ecx + movl %ecx, 48(%esp) # 4-byte Spill + movl 52(%esp), %ecx # 4-byte Reload + adcl 152(%esp), %ecx + movl %ecx, 52(%esp) # 4-byte Spill + movl 56(%esp), %ecx # 4-byte Reload + adcl 156(%esp), %ecx + movl %ecx, 56(%esp) # 4-byte Spill + movl 72(%esp), %ecx # 4-byte Reload + adcl 160(%esp), %ecx + movl %ecx, 72(%esp) # 4-byte Spill + movl 88(%esp), %ecx # 4-byte Reload + adcl 164(%esp), %ecx + movl %ecx, 88(%esp) # 4-byte Spill + movl 96(%esp), %ecx # 4-byte Reload + adcl 168(%esp), %ecx + movl %ecx, 96(%esp) # 4-byte Spill + movl 92(%esp), %ecx # 4-byte Reload + adcl 172(%esp), %ecx + movl %ecx, 92(%esp) # 4-byte Spill + movl 104(%esp), %ecx # 4-byte Reload + adcl 176(%esp), %ecx + movl %ecx, 104(%esp) # 4-byte Spill + movl %eax, %edx + movl 2444(%esp), %esi + subl (%esi), %edx + sbbl 4(%esi), %edi + movl %ebp, %ecx + sbbl 8(%esi), %ecx + movl %ebx, %eax + sbbl 12(%esi), %eax + movl 80(%esp), %ebx # 4-byte Reload + sbbl 16(%esi), %ebx + movl %ebx, 4(%esp) # 4-byte Spill + movl 68(%esp), %ebx # 4-byte Reload + sbbl 20(%esi), %ebx + movl %ebx, 8(%esp) # 4-byte Spill + movl 64(%esp), %ebx # 4-byte Reload + sbbl 24(%esi), %ebx + movl %ebx, 12(%esp) # 4-byte Spill + movl 60(%esp), %ebx # 4-byte Reload + sbbl 28(%esi), %ebx + movl %ebx, 16(%esp) # 4-byte Spill + movl 48(%esp), %ebx # 4-byte Reload + sbbl 32(%esi), %ebx + movl %ebx, 20(%esp) # 4-byte Spill + movl 52(%esp), %ebx # 4-byte Reload + sbbl 36(%esi), %ebx + movl %ebx, 24(%esp) # 4-byte Spill + movl 56(%esp), %ebx # 4-byte Reload + sbbl 40(%esi), %ebx + movl %ebx, 28(%esp) # 4-byte Spill + movl 72(%esp), %ebx # 4-byte Reload + sbbl 44(%esi), %ebx + movl %ebx, 32(%esp) # 4-byte Spill + movl 88(%esp), %ebx # 4-byte Reload + sbbl 48(%esi), %ebx + movl %ebx, 36(%esp) # 4-byte Spill + movl 96(%esp), %ebx # 4-byte Reload + sbbl 52(%esi), %ebx + movl %ebx, 40(%esp) # 4-byte Spill + movl 92(%esp), %ebx # 4-byte Reload + sbbl 56(%esi), %ebx + movl %ebx, 44(%esp) # 4-byte Spill + movl 104(%esp), %ebx # 4-byte Reload + sbbl 60(%esi), %ebx + movl %ebx, 84(%esp) # 4-byte Spill + movl 100(%esp), %esi # 4-byte Reload + testl %ebx, %ebx + js .LBB243_2 +# BB#1: + movl %edx, %esi +.LBB243_2: + movl 2432(%esp), %edx + movl %esi, (%edx) + movl 108(%esp), %esi # 4-byte Reload + js .LBB243_4 +# BB#3: + movl %edi, %esi +.LBB243_4: + movl %esi, 4(%edx) + js .LBB243_6 +# BB#5: + movl %ecx, %ebp +.LBB243_6: + movl %ebp, 8(%edx) + movl 76(%esp), %ecx # 4-byte Reload + js .LBB243_8 +# BB#7: + movl %eax, %ecx +.LBB243_8: + movl %ecx, 12(%edx) + movl 80(%esp), %eax # 4-byte Reload + js .LBB243_10 +# BB#9: + movl 4(%esp), %eax # 4-byte Reload +.LBB243_10: + movl %eax, 16(%edx) + movl 68(%esp), %eax # 4-byte Reload + js .LBB243_12 +# BB#11: + movl 8(%esp), %eax # 4-byte Reload +.LBB243_12: + movl %eax, 20(%edx) + movl 64(%esp), %eax # 4-byte Reload + js .LBB243_14 +# BB#13: + movl 12(%esp), %eax # 4-byte Reload +.LBB243_14: + movl %eax, 24(%edx) + movl 60(%esp), %eax # 4-byte Reload + js .LBB243_16 +# BB#15: + movl 16(%esp), %eax # 4-byte Reload +.LBB243_16: + movl %eax, 28(%edx) + movl 48(%esp), %eax # 4-byte Reload + js .LBB243_18 +# BB#17: + movl 20(%esp), %eax # 4-byte Reload +.LBB243_18: + movl %eax, 32(%edx) + movl 52(%esp), %eax # 4-byte Reload + js .LBB243_20 +# BB#19: + movl 24(%esp), %eax # 4-byte Reload +.LBB243_20: + movl %eax, 36(%edx) + movl 56(%esp), %eax # 4-byte Reload + js .LBB243_22 +# BB#21: + movl 28(%esp), %eax # 4-byte Reload +.LBB243_22: + movl %eax, 40(%edx) + movl 72(%esp), %eax # 4-byte Reload + js .LBB243_24 +# BB#23: + movl 32(%esp), %eax # 4-byte Reload +.LBB243_24: + movl %eax, 44(%edx) + movl 88(%esp), %eax # 4-byte Reload + js .LBB243_26 +# BB#25: + movl 36(%esp), %eax # 4-byte Reload +.LBB243_26: + movl %eax, 48(%edx) + movl 96(%esp), %eax # 4-byte Reload + js .LBB243_28 +# BB#27: + movl 40(%esp), %eax # 4-byte Reload +.LBB243_28: + movl %eax, 52(%edx) + movl 92(%esp), %eax # 4-byte Reload + js .LBB243_30 +# BB#29: + movl 44(%esp), %eax # 4-byte Reload +.LBB243_30: + movl %eax, 56(%edx) + movl 104(%esp), %eax # 4-byte Reload + js .LBB243_32 +# BB#31: + movl 84(%esp), %eax # 4-byte Reload +.LBB243_32: + movl %eax, 60(%edx) + addl $2412, %esp # imm = 0x96C + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end243: + .size mcl_fp_montNF16Lbmi2, .Lfunc_end243-mcl_fp_montNF16Lbmi2 + + .globl mcl_fp_montRed16Lbmi2 + .align 16, 0x90 + .type mcl_fp_montRed16Lbmi2,@function +mcl_fp_montRed16Lbmi2: # @mcl_fp_montRed16Lbmi2 +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $1356, %esp # imm = 0x54C + calll .L244$pb +.L244$pb: + popl %eax +.Ltmp55: + addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp55-.L244$pb), %eax + movl %eax, 116(%esp) # 4-byte Spill + movl 1384(%esp), %edx + movl -4(%edx), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 1380(%esp), %ecx + movl (%ecx), %ebx + movl %ebx, 112(%esp) # 4-byte Spill + movl 4(%ecx), %edi + movl %edi, 120(%esp) # 4-byte Spill + imull %eax, %ebx + movl 124(%ecx), %eax + movl %eax, 124(%esp) # 4-byte Spill + movl 120(%ecx), %eax + movl %eax, 128(%esp) # 4-byte Spill + movl 116(%ecx), %eax + movl %eax, 132(%esp) # 4-byte Spill + movl 112(%ecx), %eax + movl %eax, 144(%esp) # 4-byte Spill + movl 108(%ecx), %eax + movl %eax, 156(%esp) # 4-byte Spill + movl 104(%ecx), %esi + movl %esi, 152(%esp) # 4-byte Spill + movl 100(%ecx), %eax + movl %eax, 172(%esp) # 4-byte Spill + movl 96(%ecx), %eax + movl %eax, 176(%esp) # 4-byte Spill + movl 92(%ecx), %eax + movl %eax, 168(%esp) # 4-byte Spill + movl 88(%ecx), %eax + movl %eax, 188(%esp) # 4-byte Spill + movl 84(%ecx), %eax + movl %eax, 184(%esp) # 4-byte Spill + movl 80(%ecx), %edi + movl %edi, 148(%esp) # 4-byte Spill + movl 76(%ecx), %eax + movl %eax, 196(%esp) # 4-byte Spill + movl 72(%ecx), %esi + movl %esi, 192(%esp) # 4-byte Spill + movl 68(%ecx), %edi + movl %edi, 204(%esp) # 4-byte Spill + movl 64(%ecx), %esi + movl %esi, 200(%esp) # 4-byte Spill + movl 60(%ecx), %edi + movl %edi, 180(%esp) # 4-byte Spill + movl 56(%ecx), %edi + movl %edi, 164(%esp) # 4-byte Spill + movl 52(%ecx), %eax + movl %eax, 160(%esp) # 4-byte Spill + movl 48(%ecx), %eax + movl %eax, 136(%esp) # 4-byte Spill + movl 44(%ecx), %eax + movl %eax, 140(%esp) # 4-byte Spill + movl 40(%ecx), %ebp + movl 36(%ecx), %edi + movl 32(%ecx), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 28(%ecx), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 24(%ecx), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 20(%ecx), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 16(%ecx), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 12(%ecx), %esi + movl 8(%ecx), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl (%edx), %ecx + movl %ecx, 24(%esp) # 4-byte Spill + movl 60(%edx), %ecx + movl %ecx, 72(%esp) # 4-byte Spill + movl 56(%edx), %ecx + movl %ecx, 68(%esp) # 4-byte Spill + movl 52(%edx), %ecx + movl %ecx, 64(%esp) # 4-byte Spill + movl 48(%edx), %ecx + movl %ecx, 60(%esp) # 4-byte Spill + movl 44(%edx), %ecx + movl %ecx, 56(%esp) # 4-byte Spill + movl 40(%edx), %ecx + movl %ecx, 52(%esp) # 4-byte Spill + movl 36(%edx), %ecx + movl %ecx, 48(%esp) # 4-byte Spill + movl 32(%edx), %ecx + movl %ecx, 44(%esp) # 4-byte Spill + movl 28(%edx), %ecx + movl %ecx, 40(%esp) # 4-byte Spill + movl 24(%edx), %ecx + movl %ecx, 36(%esp) # 4-byte Spill + movl 20(%edx), %ecx + movl %ecx, 32(%esp) # 4-byte Spill + movl 16(%edx), %ecx + movl %ecx, 28(%esp) # 4-byte Spill + movl 12(%edx), %ecx + movl %ecx, 20(%esp) # 4-byte Spill + movl 8(%edx), %ecx + movl %ecx, 16(%esp) # 4-byte Spill + movl 4(%edx), %ecx + movl %ecx, 12(%esp) # 4-byte Spill + movl %ebx, (%esp) + leal 1288(%esp), %ecx + movl 116(%esp), %ebx # 4-byte Reload + calll .LmulPv512x32 + movl 112(%esp), %eax # 4-byte Reload + addl 1288(%esp), %eax + movl 120(%esp), %ecx # 4-byte Reload + adcl 1292(%esp), %ecx + movl 76(%esp), %eax # 4-byte Reload + adcl 1296(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + adcl 1300(%esp), %esi + movl %esi, 80(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 1304(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 1308(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 1312(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 1316(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 104(%esp), %eax # 4-byte Reload + adcl 1320(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + adcl 1324(%esp), %edi + movl %edi, 108(%esp) # 4-byte Spill + adcl 1328(%esp), %ebp + movl %ebp, 120(%esp) # 4-byte Spill + movl 140(%esp), %eax # 4-byte Reload + adcl 1332(%esp), %eax + movl %eax, 140(%esp) # 4-byte Spill + movl 136(%esp), %eax # 4-byte Reload + adcl 1336(%esp), %eax + movl %eax, 136(%esp) # 4-byte Spill + movl 160(%esp), %eax # 4-byte Reload + adcl 1340(%esp), %eax + movl %eax, 160(%esp) # 4-byte Spill + movl 164(%esp), %eax # 4-byte Reload + adcl 1344(%esp), %eax + movl %eax, 164(%esp) # 4-byte Spill + movl 180(%esp), %eax # 4-byte Reload + adcl 1348(%esp), %eax + movl %eax, 180(%esp) # 4-byte Spill + movl 200(%esp), %eax # 4-byte Reload + adcl 1352(%esp), %eax + movl %eax, 200(%esp) # 4-byte Spill + adcl $0, 204(%esp) # 4-byte Folded Spill + adcl $0, 192(%esp) # 4-byte Folded Spill + movl 196(%esp), %edi # 4-byte Reload + adcl $0, %edi + adcl $0, 148(%esp) # 4-byte Folded Spill + adcl $0, 184(%esp) # 4-byte Folded Spill + adcl $0, 188(%esp) # 4-byte Folded Spill + adcl $0, 168(%esp) # 4-byte Folded Spill + adcl $0, 176(%esp) # 4-byte Folded Spill + adcl $0, 172(%esp) # 4-byte Folded Spill + adcl $0, 152(%esp) # 4-byte Folded Spill + adcl $0, 156(%esp) # 4-byte Folded Spill + adcl $0, 144(%esp) # 4-byte Folded Spill + movl 132(%esp), %ebp # 4-byte Reload + adcl $0, %ebp + adcl $0, 128(%esp) # 4-byte Folded Spill + adcl $0, 124(%esp) # 4-byte Folded Spill + sbbl %eax, %eax + movl %eax, 112(%esp) # 4-byte Spill + movl %ecx, %eax + movl %ecx, %esi + imull 100(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 1216(%esp), %ecx + movl 1384(%esp), %edx + movl 116(%esp), %ebx # 4-byte Reload + calll .LmulPv512x32 + movl 112(%esp), %ecx # 4-byte Reload + andl $1, %ecx + addl 1216(%esp), %esi + movl 76(%esp), %edx # 4-byte Reload + adcl 1220(%esp), %edx + movl 80(%esp), %eax # 4-byte Reload + adcl 1224(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 1228(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 1232(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 1236(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 1240(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 104(%esp), %eax # 4-byte Reload + adcl 1244(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl 1248(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 120(%esp), %eax # 4-byte Reload + adcl 1252(%esp), %eax + movl %eax, 120(%esp) # 4-byte Spill + movl 140(%esp), %eax # 4-byte Reload + adcl 1256(%esp), %eax + movl %eax, 140(%esp) # 4-byte Spill + movl 136(%esp), %esi # 4-byte Reload + adcl 1260(%esp), %esi + movl 160(%esp), %eax # 4-byte Reload + adcl 1264(%esp), %eax + movl %eax, 160(%esp) # 4-byte Spill + movl 164(%esp), %eax # 4-byte Reload + adcl 1268(%esp), %eax + movl %eax, 164(%esp) # 4-byte Spill + movl 180(%esp), %eax # 4-byte Reload + adcl 1272(%esp), %eax + movl %eax, 180(%esp) # 4-byte Spill + movl 200(%esp), %eax # 4-byte Reload + adcl 1276(%esp), %eax + movl %eax, 200(%esp) # 4-byte Spill + movl 204(%esp), %eax # 4-byte Reload + adcl 1280(%esp), %eax + movl %eax, 204(%esp) # 4-byte Spill + adcl $0, 192(%esp) # 4-byte Folded Spill + adcl $0, %edi + movl %edi, 196(%esp) # 4-byte Spill + adcl $0, 148(%esp) # 4-byte Folded Spill + adcl $0, 184(%esp) # 4-byte Folded Spill + adcl $0, 188(%esp) # 4-byte Folded Spill + adcl $0, 168(%esp) # 4-byte Folded Spill + adcl $0, 176(%esp) # 4-byte Folded Spill + adcl $0, 172(%esp) # 4-byte Folded Spill + adcl $0, 152(%esp) # 4-byte Folded Spill + movl 156(%esp), %edi # 4-byte Reload + adcl $0, %edi + adcl $0, 144(%esp) # 4-byte Folded Spill + adcl $0, %ebp + movl %ebp, 132(%esp) # 4-byte Spill + adcl $0, 128(%esp) # 4-byte Folded Spill + adcl $0, 124(%esp) # 4-byte Folded Spill + adcl $0, %ecx + movl %ecx, 112(%esp) # 4-byte Spill + movl %edx, %ebp + movl %ebp, %eax + imull 100(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 1144(%esp), %ecx + movl 1384(%esp), %edx + movl 116(%esp), %ebx # 4-byte Reload + calll .LmulPv512x32 + addl 1144(%esp), %ebp + movl 80(%esp), %ecx # 4-byte Reload + adcl 1148(%esp), %ecx + movl 84(%esp), %eax # 4-byte Reload + adcl 1152(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 1156(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 1160(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 1164(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 104(%esp), %eax # 4-byte Reload + adcl 1168(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl 1172(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 120(%esp), %eax # 4-byte Reload + adcl 1176(%esp), %eax + movl %eax, 120(%esp) # 4-byte Spill + movl 140(%esp), %eax # 4-byte Reload + adcl 1180(%esp), %eax + movl %eax, 140(%esp) # 4-byte Spill + adcl 1184(%esp), %esi + movl %esi, 136(%esp) # 4-byte Spill + movl 160(%esp), %eax # 4-byte Reload + adcl 1188(%esp), %eax + movl %eax, 160(%esp) # 4-byte Spill + movl 164(%esp), %eax # 4-byte Reload + adcl 1192(%esp), %eax + movl %eax, 164(%esp) # 4-byte Spill + movl 180(%esp), %eax # 4-byte Reload + adcl 1196(%esp), %eax + movl %eax, 180(%esp) # 4-byte Spill + movl 200(%esp), %eax # 4-byte Reload + adcl 1200(%esp), %eax + movl %eax, 200(%esp) # 4-byte Spill + movl 204(%esp), %eax # 4-byte Reload + adcl 1204(%esp), %eax + movl %eax, 204(%esp) # 4-byte Spill + movl 192(%esp), %eax # 4-byte Reload + adcl 1208(%esp), %eax + movl %eax, 192(%esp) # 4-byte Spill + adcl $0, 196(%esp) # 4-byte Folded Spill + adcl $0, 148(%esp) # 4-byte Folded Spill + adcl $0, 184(%esp) # 4-byte Folded Spill + adcl $0, 188(%esp) # 4-byte Folded Spill + movl 168(%esp), %ebp # 4-byte Reload + adcl $0, %ebp + adcl $0, 176(%esp) # 4-byte Folded Spill + adcl $0, 172(%esp) # 4-byte Folded Spill + adcl $0, 152(%esp) # 4-byte Folded Spill + adcl $0, %edi + movl %edi, 156(%esp) # 4-byte Spill + adcl $0, 144(%esp) # 4-byte Folded Spill + adcl $0, 132(%esp) # 4-byte Folded Spill + movl 128(%esp), %edi # 4-byte Reload + adcl $0, %edi + adcl $0, 124(%esp) # 4-byte Folded Spill + adcl $0, 112(%esp) # 4-byte Folded Spill + movl %ecx, %eax + movl %ecx, %esi + imull 100(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 1072(%esp), %ecx + movl 1384(%esp), %edx + movl 116(%esp), %ebx # 4-byte Reload + calll .LmulPv512x32 + addl 1072(%esp), %esi + movl 84(%esp), %ecx # 4-byte Reload + adcl 1076(%esp), %ecx + movl 88(%esp), %eax # 4-byte Reload + adcl 1080(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 1084(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 1088(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 104(%esp), %eax # 4-byte Reload + adcl 1092(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl 1096(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 120(%esp), %eax # 4-byte Reload + adcl 1100(%esp), %eax + movl %eax, 120(%esp) # 4-byte Spill + movl 140(%esp), %eax # 4-byte Reload + adcl 1104(%esp), %eax + movl %eax, 140(%esp) # 4-byte Spill + movl 136(%esp), %eax # 4-byte Reload + adcl 1108(%esp), %eax + movl %eax, 136(%esp) # 4-byte Spill + movl 160(%esp), %eax # 4-byte Reload + adcl 1112(%esp), %eax + movl %eax, 160(%esp) # 4-byte Spill + movl 164(%esp), %eax # 4-byte Reload + adcl 1116(%esp), %eax + movl %eax, 164(%esp) # 4-byte Spill + movl 180(%esp), %eax # 4-byte Reload + adcl 1120(%esp), %eax + movl %eax, 180(%esp) # 4-byte Spill + movl 200(%esp), %eax # 4-byte Reload + adcl 1124(%esp), %eax + movl %eax, 200(%esp) # 4-byte Spill + movl 204(%esp), %eax # 4-byte Reload + adcl 1128(%esp), %eax + movl %eax, 204(%esp) # 4-byte Spill + movl 192(%esp), %eax # 4-byte Reload + adcl 1132(%esp), %eax + movl %eax, 192(%esp) # 4-byte Spill + movl 196(%esp), %eax # 4-byte Reload + adcl 1136(%esp), %eax + movl %eax, 196(%esp) # 4-byte Spill + adcl $0, 148(%esp) # 4-byte Folded Spill + adcl $0, 184(%esp) # 4-byte Folded Spill + adcl $0, 188(%esp) # 4-byte Folded Spill + adcl $0, %ebp + movl %ebp, 168(%esp) # 4-byte Spill + adcl $0, 176(%esp) # 4-byte Folded Spill + adcl $0, 172(%esp) # 4-byte Folded Spill + adcl $0, 152(%esp) # 4-byte Folded Spill + adcl $0, 156(%esp) # 4-byte Folded Spill + adcl $0, 144(%esp) # 4-byte Folded Spill + adcl $0, 132(%esp) # 4-byte Folded Spill + adcl $0, %edi + movl %edi, 128(%esp) # 4-byte Spill + movl 124(%esp), %esi # 4-byte Reload + adcl $0, %esi + adcl $0, 112(%esp) # 4-byte Folded Spill + movl %ecx, %edi + movl %edi, %eax + imull 100(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 1000(%esp), %ecx + movl 1384(%esp), %edx + movl 116(%esp), %ebx # 4-byte Reload + calll .LmulPv512x32 + addl 1000(%esp), %edi + movl 88(%esp), %ecx # 4-byte Reload + adcl 1004(%esp), %ecx + movl 92(%esp), %eax # 4-byte Reload + adcl 1008(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 1012(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 104(%esp), %eax # 4-byte Reload + adcl 1016(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl 1020(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 120(%esp), %eax # 4-byte Reload + adcl 1024(%esp), %eax + movl %eax, 120(%esp) # 4-byte Spill + movl 140(%esp), %eax # 4-byte Reload + adcl 1028(%esp), %eax + movl %eax, 140(%esp) # 4-byte Spill + movl 136(%esp), %eax # 4-byte Reload + adcl 1032(%esp), %eax + movl %eax, 136(%esp) # 4-byte Spill + movl 160(%esp), %eax # 4-byte Reload + adcl 1036(%esp), %eax + movl %eax, 160(%esp) # 4-byte Spill + movl 164(%esp), %eax # 4-byte Reload + adcl 1040(%esp), %eax + movl %eax, 164(%esp) # 4-byte Spill + movl 180(%esp), %eax # 4-byte Reload + adcl 1044(%esp), %eax + movl %eax, 180(%esp) # 4-byte Spill + movl 200(%esp), %eax # 4-byte Reload + adcl 1048(%esp), %eax + movl %eax, 200(%esp) # 4-byte Spill + movl 204(%esp), %eax # 4-byte Reload + adcl 1052(%esp), %eax + movl %eax, 204(%esp) # 4-byte Spill + movl 192(%esp), %eax # 4-byte Reload + adcl 1056(%esp), %eax + movl %eax, 192(%esp) # 4-byte Spill + movl 196(%esp), %eax # 4-byte Reload + adcl 1060(%esp), %eax + movl %eax, 196(%esp) # 4-byte Spill + movl 148(%esp), %eax # 4-byte Reload + adcl 1064(%esp), %eax + movl %eax, 148(%esp) # 4-byte Spill + adcl $0, 184(%esp) # 4-byte Folded Spill + movl 188(%esp), %ebp # 4-byte Reload + adcl $0, %ebp + adcl $0, 168(%esp) # 4-byte Folded Spill + adcl $0, 176(%esp) # 4-byte Folded Spill + movl 172(%esp), %edi # 4-byte Reload + adcl $0, %edi + adcl $0, 152(%esp) # 4-byte Folded Spill + adcl $0, 156(%esp) # 4-byte Folded Spill + adcl $0, 144(%esp) # 4-byte Folded Spill + adcl $0, 132(%esp) # 4-byte Folded Spill + adcl $0, 128(%esp) # 4-byte Folded Spill + adcl $0, %esi + movl %esi, 124(%esp) # 4-byte Spill + adcl $0, 112(%esp) # 4-byte Folded Spill + movl %ecx, %esi + movl %esi, %eax + imull 100(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 928(%esp), %ecx + movl 1384(%esp), %edx + movl 116(%esp), %ebx # 4-byte Reload + calll .LmulPv512x32 + addl 928(%esp), %esi + movl 92(%esp), %ecx # 4-byte Reload + adcl 932(%esp), %ecx + movl 96(%esp), %eax # 4-byte Reload + adcl 936(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 104(%esp), %eax # 4-byte Reload + adcl 940(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl 944(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 120(%esp), %eax # 4-byte Reload + adcl 948(%esp), %eax + movl %eax, 120(%esp) # 4-byte Spill + movl 140(%esp), %eax # 4-byte Reload + adcl 952(%esp), %eax + movl %eax, 140(%esp) # 4-byte Spill + movl 136(%esp), %eax # 4-byte Reload + adcl 956(%esp), %eax + movl %eax, 136(%esp) # 4-byte Spill + movl 160(%esp), %eax # 4-byte Reload + adcl 960(%esp), %eax + movl %eax, 160(%esp) # 4-byte Spill + movl 164(%esp), %eax # 4-byte Reload + adcl 964(%esp), %eax + movl %eax, 164(%esp) # 4-byte Spill + movl 180(%esp), %eax # 4-byte Reload + adcl 968(%esp), %eax + movl %eax, 180(%esp) # 4-byte Spill + movl 200(%esp), %eax # 4-byte Reload + adcl 972(%esp), %eax + movl %eax, 200(%esp) # 4-byte Spill + movl 204(%esp), %eax # 4-byte Reload + adcl 976(%esp), %eax + movl %eax, 204(%esp) # 4-byte Spill + movl 192(%esp), %eax # 4-byte Reload + adcl 980(%esp), %eax + movl %eax, 192(%esp) # 4-byte Spill + movl 196(%esp), %eax # 4-byte Reload + adcl 984(%esp), %eax + movl %eax, 196(%esp) # 4-byte Spill + movl 148(%esp), %eax # 4-byte Reload + adcl 988(%esp), %eax + movl %eax, 148(%esp) # 4-byte Spill + movl 184(%esp), %eax # 4-byte Reload + adcl 992(%esp), %eax + movl %eax, 184(%esp) # 4-byte Spill + adcl $0, %ebp + movl %ebp, 188(%esp) # 4-byte Spill + adcl $0, 168(%esp) # 4-byte Folded Spill + adcl $0, 176(%esp) # 4-byte Folded Spill + adcl $0, %edi + movl %edi, 172(%esp) # 4-byte Spill + adcl $0, 152(%esp) # 4-byte Folded Spill + adcl $0, 156(%esp) # 4-byte Folded Spill + movl 144(%esp), %esi # 4-byte Reload + adcl $0, %esi + adcl $0, 132(%esp) # 4-byte Folded Spill + adcl $0, 128(%esp) # 4-byte Folded Spill + adcl $0, 124(%esp) # 4-byte Folded Spill + adcl $0, 112(%esp) # 4-byte Folded Spill + movl %ecx, %edi + movl %edi, %eax + movl 100(%esp), %ebp # 4-byte Reload + imull %ebp, %eax + movl %eax, (%esp) + leal 856(%esp), %ecx + movl 1384(%esp), %edx + movl 116(%esp), %ebx # 4-byte Reload + calll .LmulPv512x32 + addl 856(%esp), %edi + movl 96(%esp), %ecx # 4-byte Reload + adcl 860(%esp), %ecx + movl 104(%esp), %eax # 4-byte Reload + adcl 864(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl 868(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 120(%esp), %eax # 4-byte Reload + adcl 872(%esp), %eax + movl %eax, 120(%esp) # 4-byte Spill + movl 140(%esp), %eax # 4-byte Reload + adcl 876(%esp), %eax + movl %eax, 140(%esp) # 4-byte Spill + movl 136(%esp), %eax # 4-byte Reload + adcl 880(%esp), %eax + movl %eax, 136(%esp) # 4-byte Spill + movl 160(%esp), %eax # 4-byte Reload + adcl 884(%esp), %eax + movl %eax, 160(%esp) # 4-byte Spill + movl 164(%esp), %eax # 4-byte Reload + adcl 888(%esp), %eax + movl %eax, 164(%esp) # 4-byte Spill + movl 180(%esp), %eax # 4-byte Reload + adcl 892(%esp), %eax + movl %eax, 180(%esp) # 4-byte Spill + movl 200(%esp), %eax # 4-byte Reload + adcl 896(%esp), %eax + movl %eax, 200(%esp) # 4-byte Spill + movl 204(%esp), %eax # 4-byte Reload + adcl 900(%esp), %eax + movl %eax, 204(%esp) # 4-byte Spill + movl 192(%esp), %eax # 4-byte Reload + adcl 904(%esp), %eax + movl %eax, 192(%esp) # 4-byte Spill + movl 196(%esp), %eax # 4-byte Reload + adcl 908(%esp), %eax + movl %eax, 196(%esp) # 4-byte Spill + movl 148(%esp), %eax # 4-byte Reload + adcl 912(%esp), %eax + movl %eax, 148(%esp) # 4-byte Spill + movl 184(%esp), %eax # 4-byte Reload + adcl 916(%esp), %eax + movl %eax, 184(%esp) # 4-byte Spill + movl 188(%esp), %eax # 4-byte Reload + adcl 920(%esp), %eax + movl %eax, 188(%esp) # 4-byte Spill + adcl $0, 168(%esp) # 4-byte Folded Spill + movl 176(%esp), %edi # 4-byte Reload + adcl $0, %edi + adcl $0, 172(%esp) # 4-byte Folded Spill + adcl $0, 152(%esp) # 4-byte Folded Spill + adcl $0, 156(%esp) # 4-byte Folded Spill + adcl $0, %esi + movl %esi, 144(%esp) # 4-byte Spill + adcl $0, 132(%esp) # 4-byte Folded Spill + adcl $0, 128(%esp) # 4-byte Folded Spill + adcl $0, 124(%esp) # 4-byte Folded Spill + adcl $0, 112(%esp) # 4-byte Folded Spill + movl %ecx, %esi + movl %esi, %eax + imull %ebp, %eax + movl %eax, (%esp) + leal 784(%esp), %ecx + movl 1384(%esp), %edx + movl 116(%esp), %ebx # 4-byte Reload + calll .LmulPv512x32 + addl 784(%esp), %esi + movl 104(%esp), %ecx # 4-byte Reload + adcl 788(%esp), %ecx + movl 108(%esp), %eax # 4-byte Reload + adcl 792(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 120(%esp), %eax # 4-byte Reload + adcl 796(%esp), %eax + movl %eax, 120(%esp) # 4-byte Spill + movl 140(%esp), %eax # 4-byte Reload + adcl 800(%esp), %eax + movl %eax, 140(%esp) # 4-byte Spill + movl 136(%esp), %eax # 4-byte Reload + adcl 804(%esp), %eax + movl %eax, 136(%esp) # 4-byte Spill + movl 160(%esp), %eax # 4-byte Reload + adcl 808(%esp), %eax + movl %eax, 160(%esp) # 4-byte Spill + movl 164(%esp), %eax # 4-byte Reload + adcl 812(%esp), %eax + movl %eax, 164(%esp) # 4-byte Spill + movl 180(%esp), %eax # 4-byte Reload + adcl 816(%esp), %eax + movl %eax, 180(%esp) # 4-byte Spill + movl 200(%esp), %eax # 4-byte Reload + adcl 820(%esp), %eax + movl %eax, 200(%esp) # 4-byte Spill + movl 204(%esp), %eax # 4-byte Reload + adcl 824(%esp), %eax + movl %eax, 204(%esp) # 4-byte Spill + movl 192(%esp), %ebp # 4-byte Reload + adcl 828(%esp), %ebp + movl 196(%esp), %eax # 4-byte Reload + adcl 832(%esp), %eax + movl %eax, 196(%esp) # 4-byte Spill + movl 148(%esp), %eax # 4-byte Reload + adcl 836(%esp), %eax + movl %eax, 148(%esp) # 4-byte Spill + movl 184(%esp), %eax # 4-byte Reload + adcl 840(%esp), %eax + movl %eax, 184(%esp) # 4-byte Spill + movl 188(%esp), %eax # 4-byte Reload + adcl 844(%esp), %eax + movl %eax, 188(%esp) # 4-byte Spill + movl 168(%esp), %eax # 4-byte Reload + adcl 848(%esp), %eax + movl %eax, 168(%esp) # 4-byte Spill + adcl $0, %edi + movl %edi, 176(%esp) # 4-byte Spill + adcl $0, 172(%esp) # 4-byte Folded Spill + adcl $0, 152(%esp) # 4-byte Folded Spill + movl 156(%esp), %esi # 4-byte Reload + adcl $0, %esi + adcl $0, 144(%esp) # 4-byte Folded Spill + adcl $0, 132(%esp) # 4-byte Folded Spill + adcl $0, 128(%esp) # 4-byte Folded Spill + adcl $0, 124(%esp) # 4-byte Folded Spill + adcl $0, 112(%esp) # 4-byte Folded Spill + movl %ecx, %edi + movl %edi, %eax + imull 100(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 712(%esp), %ecx + movl 1384(%esp), %edx + movl 116(%esp), %ebx # 4-byte Reload + calll .LmulPv512x32 + addl 712(%esp), %edi + movl 108(%esp), %ecx # 4-byte Reload + adcl 716(%esp), %ecx + movl 120(%esp), %eax # 4-byte Reload + adcl 720(%esp), %eax + movl %eax, 120(%esp) # 4-byte Spill + movl 140(%esp), %eax # 4-byte Reload + adcl 724(%esp), %eax + movl %eax, 140(%esp) # 4-byte Spill + movl 136(%esp), %eax # 4-byte Reload + adcl 728(%esp), %eax + movl %eax, 136(%esp) # 4-byte Spill + movl 160(%esp), %eax # 4-byte Reload + adcl 732(%esp), %eax + movl %eax, 160(%esp) # 4-byte Spill + movl 164(%esp), %eax # 4-byte Reload + adcl 736(%esp), %eax + movl %eax, 164(%esp) # 4-byte Spill + movl 180(%esp), %eax # 4-byte Reload + adcl 740(%esp), %eax + movl %eax, 180(%esp) # 4-byte Spill + movl 200(%esp), %eax # 4-byte Reload + adcl 744(%esp), %eax + movl %eax, 200(%esp) # 4-byte Spill + movl 204(%esp), %eax # 4-byte Reload + adcl 748(%esp), %eax + movl %eax, 204(%esp) # 4-byte Spill + adcl 752(%esp), %ebp + movl %ebp, 192(%esp) # 4-byte Spill + movl 196(%esp), %edi # 4-byte Reload + adcl 756(%esp), %edi + movl 148(%esp), %eax # 4-byte Reload + adcl 760(%esp), %eax + movl %eax, 148(%esp) # 4-byte Spill + movl 184(%esp), %eax # 4-byte Reload + adcl 764(%esp), %eax + movl %eax, 184(%esp) # 4-byte Spill + movl 188(%esp), %eax # 4-byte Reload + adcl 768(%esp), %eax + movl %eax, 188(%esp) # 4-byte Spill + movl 168(%esp), %eax # 4-byte Reload + adcl 772(%esp), %eax + movl %eax, 168(%esp) # 4-byte Spill + movl 176(%esp), %eax # 4-byte Reload + adcl 776(%esp), %eax + movl %eax, 176(%esp) # 4-byte Spill + adcl $0, 172(%esp) # 4-byte Folded Spill + adcl $0, 152(%esp) # 4-byte Folded Spill + adcl $0, %esi + movl %esi, 156(%esp) # 4-byte Spill + adcl $0, 144(%esp) # 4-byte Folded Spill + adcl $0, 132(%esp) # 4-byte Folded Spill + adcl $0, 128(%esp) # 4-byte Folded Spill + adcl $0, 124(%esp) # 4-byte Folded Spill + movl 112(%esp), %ebp # 4-byte Reload + adcl $0, %ebp + movl %ecx, %esi + movl %esi, %eax + imull 100(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 640(%esp), %ecx + movl 1384(%esp), %edx + movl 116(%esp), %ebx # 4-byte Reload + calll .LmulPv512x32 + addl 640(%esp), %esi + movl 120(%esp), %ecx # 4-byte Reload + adcl 644(%esp), %ecx + movl 140(%esp), %eax # 4-byte Reload + adcl 648(%esp), %eax + movl %eax, 140(%esp) # 4-byte Spill + movl 136(%esp), %eax # 4-byte Reload + adcl 652(%esp), %eax + movl %eax, 136(%esp) # 4-byte Spill + movl 160(%esp), %eax # 4-byte Reload + adcl 656(%esp), %eax + movl %eax, 160(%esp) # 4-byte Spill + movl 164(%esp), %eax # 4-byte Reload + adcl 660(%esp), %eax + movl %eax, 164(%esp) # 4-byte Spill + movl 180(%esp), %eax # 4-byte Reload + adcl 664(%esp), %eax + movl %eax, 180(%esp) # 4-byte Spill + movl 200(%esp), %esi # 4-byte Reload + adcl 668(%esp), %esi + movl 204(%esp), %eax # 4-byte Reload + adcl 672(%esp), %eax + movl %eax, 204(%esp) # 4-byte Spill + movl 192(%esp), %eax # 4-byte Reload + adcl 676(%esp), %eax + movl %eax, 192(%esp) # 4-byte Spill + adcl 680(%esp), %edi + movl %edi, 196(%esp) # 4-byte Spill + movl 148(%esp), %eax # 4-byte Reload + adcl 684(%esp), %eax + movl %eax, 148(%esp) # 4-byte Spill + movl 184(%esp), %eax # 4-byte Reload + adcl 688(%esp), %eax + movl %eax, 184(%esp) # 4-byte Spill + movl 188(%esp), %eax # 4-byte Reload + adcl 692(%esp), %eax + movl %eax, 188(%esp) # 4-byte Spill + movl 168(%esp), %eax # 4-byte Reload + adcl 696(%esp), %eax + movl %eax, 168(%esp) # 4-byte Spill + movl 176(%esp), %eax # 4-byte Reload + adcl 700(%esp), %eax + movl %eax, 176(%esp) # 4-byte Spill + movl 172(%esp), %eax # 4-byte Reload + adcl 704(%esp), %eax + movl %eax, 172(%esp) # 4-byte Spill + movl 152(%esp), %edi # 4-byte Reload + adcl $0, %edi + adcl $0, 156(%esp) # 4-byte Folded Spill + adcl $0, 144(%esp) # 4-byte Folded Spill + adcl $0, 132(%esp) # 4-byte Folded Spill + adcl $0, 128(%esp) # 4-byte Folded Spill + adcl $0, 124(%esp) # 4-byte Folded Spill + adcl $0, %ebp + movl %ebp, 112(%esp) # 4-byte Spill + movl %ecx, %ebp + movl %ebp, %eax + imull 100(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 568(%esp), %ecx + movl 1384(%esp), %eax + movl %eax, %edx + movl 116(%esp), %ebx # 4-byte Reload + calll .LmulPv512x32 + addl 568(%esp), %ebp + movl 140(%esp), %ecx # 4-byte Reload + adcl 572(%esp), %ecx + movl 136(%esp), %eax # 4-byte Reload + adcl 576(%esp), %eax + movl %eax, 136(%esp) # 4-byte Spill + movl 160(%esp), %eax # 4-byte Reload + adcl 580(%esp), %eax + movl %eax, 160(%esp) # 4-byte Spill + movl 164(%esp), %eax # 4-byte Reload + adcl 584(%esp), %eax + movl %eax, 164(%esp) # 4-byte Spill + movl 180(%esp), %ebp # 4-byte Reload + adcl 588(%esp), %ebp + adcl 592(%esp), %esi + movl %esi, 200(%esp) # 4-byte Spill + movl 204(%esp), %esi # 4-byte Reload + adcl 596(%esp), %esi + movl 192(%esp), %eax # 4-byte Reload + adcl 600(%esp), %eax + movl %eax, 192(%esp) # 4-byte Spill + movl 196(%esp), %eax # 4-byte Reload + adcl 604(%esp), %eax + movl %eax, 196(%esp) # 4-byte Spill + movl 148(%esp), %eax # 4-byte Reload + adcl 608(%esp), %eax + movl %eax, 148(%esp) # 4-byte Spill + movl 184(%esp), %eax # 4-byte Reload + adcl 612(%esp), %eax + movl %eax, 184(%esp) # 4-byte Spill + movl 188(%esp), %eax # 4-byte Reload + adcl 616(%esp), %eax + movl %eax, 188(%esp) # 4-byte Spill + movl 168(%esp), %eax # 4-byte Reload + adcl 620(%esp), %eax + movl %eax, 168(%esp) # 4-byte Spill + movl 176(%esp), %eax # 4-byte Reload + adcl 624(%esp), %eax + movl %eax, 176(%esp) # 4-byte Spill + movl 172(%esp), %eax # 4-byte Reload + adcl 628(%esp), %eax + movl %eax, 172(%esp) # 4-byte Spill + adcl 632(%esp), %edi + movl %edi, 152(%esp) # 4-byte Spill + adcl $0, 156(%esp) # 4-byte Folded Spill + adcl $0, 144(%esp) # 4-byte Folded Spill + adcl $0, 132(%esp) # 4-byte Folded Spill + adcl $0, 128(%esp) # 4-byte Folded Spill + adcl $0, 124(%esp) # 4-byte Folded Spill + adcl $0, 112(%esp) # 4-byte Folded Spill + movl %ecx, %edi + movl %edi, %eax + imull 100(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 496(%esp), %ecx + movl 1384(%esp), %edx + movl 116(%esp), %ebx # 4-byte Reload + calll .LmulPv512x32 + addl 496(%esp), %edi + movl 136(%esp), %ecx # 4-byte Reload + adcl 500(%esp), %ecx + movl 160(%esp), %eax # 4-byte Reload + adcl 504(%esp), %eax + movl %eax, 160(%esp) # 4-byte Spill + movl 164(%esp), %edi # 4-byte Reload + adcl 508(%esp), %edi + adcl 512(%esp), %ebp + movl %ebp, 180(%esp) # 4-byte Spill + movl 200(%esp), %eax # 4-byte Reload + adcl 516(%esp), %eax + movl %eax, 200(%esp) # 4-byte Spill + adcl 520(%esp), %esi + movl %esi, 204(%esp) # 4-byte Spill + movl 192(%esp), %eax # 4-byte Reload + adcl 524(%esp), %eax + movl %eax, 192(%esp) # 4-byte Spill + movl 196(%esp), %eax # 4-byte Reload + adcl 528(%esp), %eax + movl %eax, 196(%esp) # 4-byte Spill + movl 148(%esp), %ebp # 4-byte Reload + adcl 532(%esp), %ebp + movl 184(%esp), %eax # 4-byte Reload + adcl 536(%esp), %eax + movl %eax, 184(%esp) # 4-byte Spill + movl 188(%esp), %eax # 4-byte Reload + adcl 540(%esp), %eax + movl %eax, 188(%esp) # 4-byte Spill + movl 168(%esp), %eax # 4-byte Reload + adcl 544(%esp), %eax + movl %eax, 168(%esp) # 4-byte Spill + movl 176(%esp), %eax # 4-byte Reload + adcl 548(%esp), %eax + movl %eax, 176(%esp) # 4-byte Spill + movl 172(%esp), %eax # 4-byte Reload + adcl 552(%esp), %eax + movl %eax, 172(%esp) # 4-byte Spill + movl 152(%esp), %eax # 4-byte Reload + adcl 556(%esp), %eax + movl %eax, 152(%esp) # 4-byte Spill + movl 156(%esp), %eax # 4-byte Reload + adcl 560(%esp), %eax + movl %eax, 156(%esp) # 4-byte Spill + adcl $0, 144(%esp) # 4-byte Folded Spill + adcl $0, 132(%esp) # 4-byte Folded Spill + adcl $0, 128(%esp) # 4-byte Folded Spill + adcl $0, 124(%esp) # 4-byte Folded Spill + adcl $0, 112(%esp) # 4-byte Folded Spill + movl %ecx, %eax + movl %ecx, %esi + imull 100(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 424(%esp), %ecx + movl 1384(%esp), %edx + movl 116(%esp), %ebx # 4-byte Reload + calll .LmulPv512x32 + addl 424(%esp), %esi + movl 160(%esp), %eax # 4-byte Reload + adcl 428(%esp), %eax + adcl 432(%esp), %edi + movl %edi, 164(%esp) # 4-byte Spill + movl 180(%esp), %ecx # 4-byte Reload + adcl 436(%esp), %ecx + movl %ecx, 180(%esp) # 4-byte Spill + movl 200(%esp), %ecx # 4-byte Reload + adcl 440(%esp), %ecx + movl %ecx, 200(%esp) # 4-byte Spill + movl 204(%esp), %ecx # 4-byte Reload + adcl 444(%esp), %ecx + movl %ecx, 204(%esp) # 4-byte Spill + movl 192(%esp), %ecx # 4-byte Reload + adcl 448(%esp), %ecx + movl %ecx, 192(%esp) # 4-byte Spill + movl 196(%esp), %ecx # 4-byte Reload + adcl 452(%esp), %ecx + movl %ecx, 196(%esp) # 4-byte Spill + adcl 456(%esp), %ebp + movl 184(%esp), %ecx # 4-byte Reload + adcl 460(%esp), %ecx + movl %ecx, 184(%esp) # 4-byte Spill + movl 188(%esp), %ecx # 4-byte Reload + adcl 464(%esp), %ecx + movl %ecx, 188(%esp) # 4-byte Spill + movl 168(%esp), %ecx # 4-byte Reload + adcl 468(%esp), %ecx + movl %ecx, 168(%esp) # 4-byte Spill + movl 176(%esp), %ecx # 4-byte Reload + adcl 472(%esp), %ecx + movl %ecx, 176(%esp) # 4-byte Spill + movl 172(%esp), %ecx # 4-byte Reload + adcl 476(%esp), %ecx + movl %ecx, 172(%esp) # 4-byte Spill + movl 152(%esp), %ecx # 4-byte Reload + adcl 480(%esp), %ecx + movl %ecx, 152(%esp) # 4-byte Spill + movl 156(%esp), %ecx # 4-byte Reload + adcl 484(%esp), %ecx + movl %ecx, 156(%esp) # 4-byte Spill + movl 144(%esp), %ecx # 4-byte Reload + adcl 488(%esp), %ecx + movl %ecx, 144(%esp) # 4-byte Spill + movl 132(%esp), %edi # 4-byte Reload + adcl $0, %edi + adcl $0, 128(%esp) # 4-byte Folded Spill + adcl $0, 124(%esp) # 4-byte Folded Spill + adcl $0, 112(%esp) # 4-byte Folded Spill + movl %eax, %esi + imull 100(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 352(%esp), %ecx + movl 1384(%esp), %edx + movl 116(%esp), %ebx # 4-byte Reload + calll .LmulPv512x32 + addl 352(%esp), %esi + movl 164(%esp), %esi # 4-byte Reload + adcl 356(%esp), %esi + movl 180(%esp), %eax # 4-byte Reload + adcl 360(%esp), %eax + movl %eax, 180(%esp) # 4-byte Spill + movl 200(%esp), %eax # 4-byte Reload + adcl 364(%esp), %eax + movl %eax, 200(%esp) # 4-byte Spill + movl 204(%esp), %eax # 4-byte Reload + adcl 368(%esp), %eax + movl %eax, 204(%esp) # 4-byte Spill + movl 192(%esp), %eax # 4-byte Reload + adcl 372(%esp), %eax + movl %eax, 192(%esp) # 4-byte Spill + movl 196(%esp), %eax # 4-byte Reload + adcl 376(%esp), %eax + movl %eax, 196(%esp) # 4-byte Spill + adcl 380(%esp), %ebp + movl 184(%esp), %eax # 4-byte Reload + adcl 384(%esp), %eax + movl %eax, 184(%esp) # 4-byte Spill + movl 188(%esp), %eax # 4-byte Reload + adcl 388(%esp), %eax + movl %eax, 188(%esp) # 4-byte Spill + movl 168(%esp), %eax # 4-byte Reload + adcl 392(%esp), %eax + movl %eax, 168(%esp) # 4-byte Spill + movl 176(%esp), %eax # 4-byte Reload + adcl 396(%esp), %eax + movl %eax, 176(%esp) # 4-byte Spill + movl 172(%esp), %eax # 4-byte Reload + adcl 400(%esp), %eax + movl %eax, 172(%esp) # 4-byte Spill + movl 152(%esp), %eax # 4-byte Reload + adcl 404(%esp), %eax + movl %eax, 152(%esp) # 4-byte Spill + movl 156(%esp), %eax # 4-byte Reload + adcl 408(%esp), %eax + movl %eax, 156(%esp) # 4-byte Spill + movl 144(%esp), %eax # 4-byte Reload + adcl 412(%esp), %eax + movl %eax, 144(%esp) # 4-byte Spill + adcl 416(%esp), %edi + movl %edi, 132(%esp) # 4-byte Spill + movl 128(%esp), %edi # 4-byte Reload + adcl $0, %edi + adcl $0, 124(%esp) # 4-byte Folded Spill + adcl $0, 112(%esp) # 4-byte Folded Spill + movl %esi, %eax + imull 100(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 280(%esp), %ecx + movl 1384(%esp), %edx + movl 116(%esp), %ebx # 4-byte Reload + calll .LmulPv512x32 + addl 280(%esp), %esi + movl 180(%esp), %ecx # 4-byte Reload + adcl 284(%esp), %ecx + movl 200(%esp), %eax # 4-byte Reload + adcl 288(%esp), %eax + movl %eax, 200(%esp) # 4-byte Spill + movl 204(%esp), %eax # 4-byte Reload + adcl 292(%esp), %eax + movl %eax, 204(%esp) # 4-byte Spill + movl 192(%esp), %eax # 4-byte Reload + adcl 296(%esp), %eax + movl %eax, 192(%esp) # 4-byte Spill + movl 196(%esp), %eax # 4-byte Reload + adcl 300(%esp), %eax + movl %eax, 196(%esp) # 4-byte Spill + adcl 304(%esp), %ebp + movl %ebp, 148(%esp) # 4-byte Spill + movl 184(%esp), %eax # 4-byte Reload + adcl 308(%esp), %eax + movl %eax, 184(%esp) # 4-byte Spill + movl 188(%esp), %eax # 4-byte Reload + adcl 312(%esp), %eax + movl %eax, 188(%esp) # 4-byte Spill + movl 168(%esp), %esi # 4-byte Reload + adcl 316(%esp), %esi + movl 176(%esp), %eax # 4-byte Reload + adcl 320(%esp), %eax + movl %eax, 176(%esp) # 4-byte Spill + movl 172(%esp), %eax # 4-byte Reload + adcl 324(%esp), %eax + movl %eax, 172(%esp) # 4-byte Spill + movl 152(%esp), %eax # 4-byte Reload + adcl 328(%esp), %eax + movl %eax, 152(%esp) # 4-byte Spill + movl 156(%esp), %eax # 4-byte Reload + adcl 332(%esp), %eax + movl %eax, 156(%esp) # 4-byte Spill + movl 144(%esp), %eax # 4-byte Reload + adcl 336(%esp), %eax + movl %eax, 144(%esp) # 4-byte Spill + movl 132(%esp), %eax # 4-byte Reload + adcl 340(%esp), %eax + movl %eax, 132(%esp) # 4-byte Spill + adcl 344(%esp), %edi + movl %edi, 128(%esp) # 4-byte Spill + movl 124(%esp), %edi # 4-byte Reload + adcl $0, %edi + adcl $0, 112(%esp) # 4-byte Folded Spill + movl 100(%esp), %eax # 4-byte Reload + imull %ecx, %eax + movl %ecx, %ebp + movl %eax, (%esp) + leal 208(%esp), %ecx + movl 1384(%esp), %edx + movl 116(%esp), %ebx # 4-byte Reload + calll .LmulPv512x32 + addl 208(%esp), %ebp + movl 200(%esp), %edx # 4-byte Reload + adcl 212(%esp), %edx + movl %edx, 200(%esp) # 4-byte Spill + movl 204(%esp), %eax # 4-byte Reload + adcl 216(%esp), %eax + movl %eax, 204(%esp) # 4-byte Spill + movl 192(%esp), %ecx # 4-byte Reload + adcl 220(%esp), %ecx + movl %ecx, 192(%esp) # 4-byte Spill + movl 196(%esp), %eax # 4-byte Reload + adcl 224(%esp), %eax + movl %eax, 196(%esp) # 4-byte Spill + movl 148(%esp), %ebp # 4-byte Reload + adcl 228(%esp), %ebp + movl %ebp, 148(%esp) # 4-byte Spill + movl 184(%esp), %eax # 4-byte Reload + adcl 232(%esp), %eax + movl %eax, 184(%esp) # 4-byte Spill + movl %eax, %ebx + movl 188(%esp), %eax # 4-byte Reload + adcl 236(%esp), %eax + movl %eax, 188(%esp) # 4-byte Spill + adcl 240(%esp), %esi + movl %esi, 168(%esp) # 4-byte Spill + movl 176(%esp), %eax # 4-byte Reload + adcl 244(%esp), %eax + movl %eax, 176(%esp) # 4-byte Spill + movl 172(%esp), %eax # 4-byte Reload + adcl 248(%esp), %eax + movl %eax, 172(%esp) # 4-byte Spill + movl 152(%esp), %eax # 4-byte Reload + adcl 252(%esp), %eax + movl %eax, 152(%esp) # 4-byte Spill + movl 156(%esp), %eax # 4-byte Reload + adcl 256(%esp), %eax + movl %eax, 156(%esp) # 4-byte Spill + movl 144(%esp), %eax # 4-byte Reload + adcl 260(%esp), %eax + movl %eax, 144(%esp) # 4-byte Spill + movl 132(%esp), %eax # 4-byte Reload + adcl 264(%esp), %eax + movl %eax, 132(%esp) # 4-byte Spill + movl 128(%esp), %eax # 4-byte Reload + adcl 268(%esp), %eax + movl %eax, 128(%esp) # 4-byte Spill + adcl 272(%esp), %edi + movl %edi, 124(%esp) # 4-byte Spill + movl 112(%esp), %edi # 4-byte Reload + adcl $0, %edi + movl %edx, %eax + subl 24(%esp), %edx # 4-byte Folded Reload + movl 204(%esp), %esi # 4-byte Reload + sbbl 12(%esp), %esi # 4-byte Folded Reload + sbbl 16(%esp), %ecx # 4-byte Folded Reload + movl 196(%esp), %eax # 4-byte Reload + sbbl 20(%esp), %eax # 4-byte Folded Reload + sbbl 28(%esp), %ebp # 4-byte Folded Reload + sbbl 32(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 100(%esp) # 4-byte Spill + movl 188(%esp), %ebx # 4-byte Reload + sbbl 36(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 104(%esp) # 4-byte Spill + movl 168(%esp), %ebx # 4-byte Reload + sbbl 40(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 108(%esp) # 4-byte Spill + movl 176(%esp), %ebx # 4-byte Reload + sbbl 44(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 112(%esp) # 4-byte Spill + movl 172(%esp), %ebx # 4-byte Reload + sbbl 48(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 116(%esp) # 4-byte Spill + movl 152(%esp), %ebx # 4-byte Reload + sbbl 52(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 120(%esp) # 4-byte Spill + movl 156(%esp), %ebx # 4-byte Reload + sbbl 56(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 136(%esp) # 4-byte Spill + movl 144(%esp), %ebx # 4-byte Reload + sbbl 60(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 140(%esp) # 4-byte Spill + movl 132(%esp), %ebx # 4-byte Reload + sbbl 64(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 160(%esp) # 4-byte Spill + movl 128(%esp), %ebx # 4-byte Reload + sbbl 68(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 164(%esp) # 4-byte Spill + movl 124(%esp), %ebx # 4-byte Reload + sbbl 72(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 180(%esp) # 4-byte Spill + sbbl $0, %edi + andl $1, %edi + movl %edi, %ebx + jne .LBB244_2 +# BB#1: + movl %edx, 200(%esp) # 4-byte Spill +.LBB244_2: + movl 1376(%esp), %edx + movl 200(%esp), %edi # 4-byte Reload + movl %edi, (%edx) + testb %bl, %bl + jne .LBB244_4 +# BB#3: + movl %esi, 204(%esp) # 4-byte Spill +.LBB244_4: + movl 204(%esp), %esi # 4-byte Reload + movl %esi, 4(%edx) + movl 192(%esp), %esi # 4-byte Reload + jne .LBB244_6 +# BB#5: + movl %ecx, %esi +.LBB244_6: + movl %esi, 8(%edx) + movl 196(%esp), %ecx # 4-byte Reload + jne .LBB244_8 +# BB#7: + movl %eax, %ecx +.LBB244_8: + movl %ecx, 12(%edx) + movl 128(%esp), %esi # 4-byte Reload + movl 148(%esp), %eax # 4-byte Reload + jne .LBB244_10 +# BB#9: + movl %ebp, %eax +.LBB244_10: + movl %eax, 16(%edx) + movl 124(%esp), %ecx # 4-byte Reload + movl 176(%esp), %eax # 4-byte Reload + movl 184(%esp), %ebp # 4-byte Reload + jne .LBB244_12 +# BB#11: + movl 100(%esp), %ebp # 4-byte Reload +.LBB244_12: + movl %ebp, 20(%edx) + movl 152(%esp), %ebp # 4-byte Reload + movl 188(%esp), %ebx # 4-byte Reload + jne .LBB244_14 +# BB#13: + movl 104(%esp), %ebx # 4-byte Reload +.LBB244_14: + movl %ebx, 24(%edx) + movl 156(%esp), %ebx # 4-byte Reload + movl 168(%esp), %edi # 4-byte Reload + jne .LBB244_16 +# BB#15: + movl 108(%esp), %edi # 4-byte Reload +.LBB244_16: + movl %edi, 28(%edx) + movl 144(%esp), %edi # 4-byte Reload + jne .LBB244_18 +# BB#17: + movl 112(%esp), %eax # 4-byte Reload +.LBB244_18: + movl %eax, 32(%edx) + jne .LBB244_20 +# BB#19: + movl 116(%esp), %eax # 4-byte Reload + movl %eax, 172(%esp) # 4-byte Spill +.LBB244_20: + movl 172(%esp), %eax # 4-byte Reload + movl %eax, 36(%edx) + jne .LBB244_22 +# BB#21: + movl 120(%esp), %ebp # 4-byte Reload +.LBB244_22: + movl %ebp, 40(%edx) + movl 132(%esp), %eax # 4-byte Reload + jne .LBB244_24 +# BB#23: + movl 136(%esp), %ebx # 4-byte Reload +.LBB244_24: + movl %ebx, 44(%edx) + jne .LBB244_26 +# BB#25: + movl 140(%esp), %edi # 4-byte Reload +.LBB244_26: + movl %edi, 48(%edx) + jne .LBB244_28 +# BB#27: + movl 160(%esp), %eax # 4-byte Reload +.LBB244_28: + movl %eax, 52(%edx) + jne .LBB244_30 +# BB#29: + movl 164(%esp), %esi # 4-byte Reload +.LBB244_30: + movl %esi, 56(%edx) + jne .LBB244_32 +# BB#31: + movl 180(%esp), %ecx # 4-byte Reload +.LBB244_32: + movl %ecx, 60(%edx) + addl $1356, %esp # imm = 0x54C + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end244: + .size mcl_fp_montRed16Lbmi2, .Lfunc_end244-mcl_fp_montRed16Lbmi2 + + .globl mcl_fp_addPre16Lbmi2 + .align 16, 0x90 + .type mcl_fp_addPre16Lbmi2,@function +mcl_fp_addPre16Lbmi2: # @mcl_fp_addPre16Lbmi2 +# BB#0: + pushl %ebx + pushl %edi + pushl %esi + movl 24(%esp), %eax + movl (%eax), %edx + movl 4(%eax), %esi + movl 20(%esp), %ecx + addl (%ecx), %edx + adcl 4(%ecx), %esi + movl 8(%eax), %ebx + adcl 8(%ecx), %ebx + movl 16(%esp), %edi + movl %edx, (%edi) + movl 12(%ecx), %edx + movl %esi, 4(%edi) + movl 16(%ecx), %esi + adcl 12(%eax), %edx + adcl 16(%eax), %esi + movl %ebx, 8(%edi) + movl 20(%eax), %ebx + movl %edx, 12(%edi) + movl 20(%ecx), %edx + adcl %ebx, %edx + movl 24(%eax), %ebx + movl %esi, 16(%edi) + movl 24(%ecx), %esi + adcl %ebx, %esi + movl 28(%eax), %ebx + movl %edx, 20(%edi) + movl 28(%ecx), %edx + adcl %ebx, %edx + movl 32(%eax), %ebx + movl %esi, 24(%edi) + movl 32(%ecx), %esi + adcl %ebx, %esi + movl 36(%eax), %ebx + movl %edx, 28(%edi) + movl 36(%ecx), %edx + adcl %ebx, %edx + movl 40(%eax), %ebx + movl %esi, 32(%edi) + movl 40(%ecx), %esi + adcl %ebx, %esi + movl 44(%eax), %ebx + movl %edx, 36(%edi) + movl 44(%ecx), %edx + adcl %ebx, %edx + movl 48(%eax), %ebx + movl %esi, 40(%edi) + movl 48(%ecx), %esi + adcl %ebx, %esi + movl 52(%eax), %ebx + movl %edx, 44(%edi) + movl 52(%ecx), %edx + adcl %ebx, %edx + movl 56(%eax), %ebx + movl %esi, 48(%edi) + movl 56(%ecx), %esi + adcl %ebx, %esi + movl %edx, 52(%edi) + movl %esi, 56(%edi) + movl 60(%eax), %eax + movl 60(%ecx), %ecx + adcl %eax, %ecx + movl %ecx, 60(%edi) + sbbl %eax, %eax + andl $1, %eax + popl %esi + popl %edi + popl %ebx + retl +.Lfunc_end245: + .size mcl_fp_addPre16Lbmi2, .Lfunc_end245-mcl_fp_addPre16Lbmi2 + + .globl mcl_fp_subPre16Lbmi2 + .align 16, 0x90 + .type mcl_fp_subPre16Lbmi2,@function +mcl_fp_subPre16Lbmi2: # @mcl_fp_subPre16Lbmi2 +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + movl 24(%esp), %ecx + movl (%ecx), %esi + movl 4(%ecx), %edi + xorl %eax, %eax + movl 28(%esp), %edx + subl (%edx), %esi + sbbl 4(%edx), %edi + movl 8(%ecx), %ebp + sbbl 8(%edx), %ebp + movl 20(%esp), %ebx + movl %esi, (%ebx) + movl 12(%ecx), %esi + sbbl 12(%edx), %esi + movl %edi, 4(%ebx) + movl 16(%ecx), %edi + sbbl 16(%edx), %edi + movl %ebp, 8(%ebx) + movl 20(%edx), %ebp + movl %esi, 12(%ebx) + movl 20(%ecx), %esi + sbbl %ebp, %esi + movl 24(%edx), %ebp + movl %edi, 16(%ebx) + movl 24(%ecx), %edi + sbbl %ebp, %edi + movl 28(%edx), %ebp + movl %esi, 20(%ebx) + movl 28(%ecx), %esi + sbbl %ebp, %esi + movl 32(%edx), %ebp + movl %edi, 24(%ebx) + movl 32(%ecx), %edi + sbbl %ebp, %edi + movl 36(%edx), %ebp + movl %esi, 28(%ebx) + movl 36(%ecx), %esi + sbbl %ebp, %esi + movl 40(%edx), %ebp + movl %edi, 32(%ebx) + movl 40(%ecx), %edi + sbbl %ebp, %edi + movl 44(%edx), %ebp + movl %esi, 36(%ebx) + movl 44(%ecx), %esi + sbbl %ebp, %esi + movl 48(%edx), %ebp + movl %edi, 40(%ebx) + movl 48(%ecx), %edi + sbbl %ebp, %edi + movl 52(%edx), %ebp + movl %esi, 44(%ebx) + movl 52(%ecx), %esi + sbbl %ebp, %esi + movl 56(%edx), %ebp + movl %edi, 48(%ebx) + movl 56(%ecx), %edi + sbbl %ebp, %edi + movl %esi, 52(%ebx) + movl %edi, 56(%ebx) + movl 60(%edx), %edx + movl 60(%ecx), %ecx + sbbl %edx, %ecx + movl %ecx, 60(%ebx) + sbbl $0, %eax + andl $1, %eax + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end246: + .size mcl_fp_subPre16Lbmi2, .Lfunc_end246-mcl_fp_subPre16Lbmi2 + + .globl mcl_fp_shr1_16Lbmi2 + .align 16, 0x90 + .type mcl_fp_shr1_16Lbmi2,@function +mcl_fp_shr1_16Lbmi2: # @mcl_fp_shr1_16Lbmi2 +# BB#0: + pushl %esi + movl 12(%esp), %eax + movl (%eax), %edx + movl 4(%eax), %esi + shrdl $1, %esi, %edx + movl 8(%esp), %ecx + movl %edx, (%ecx) + movl 8(%eax), %edx + shrdl $1, %edx, %esi + movl %esi, 4(%ecx) + movl 12(%eax), %esi + shrdl $1, %esi, %edx + movl %edx, 8(%ecx) + movl 16(%eax), %edx + shrdl $1, %edx, %esi + movl %esi, 12(%ecx) + movl 20(%eax), %esi + shrdl $1, %esi, %edx + movl %edx, 16(%ecx) + movl 24(%eax), %edx + shrdl $1, %edx, %esi + movl %esi, 20(%ecx) + movl 28(%eax), %esi + shrdl $1, %esi, %edx + movl %edx, 24(%ecx) + movl 32(%eax), %edx + shrdl $1, %edx, %esi + movl %esi, 28(%ecx) + movl 36(%eax), %esi + shrdl $1, %esi, %edx + movl %edx, 32(%ecx) + movl 40(%eax), %edx + shrdl $1, %edx, %esi + movl %esi, 36(%ecx) + movl 44(%eax), %esi + shrdl $1, %esi, %edx + movl %edx, 40(%ecx) + movl 48(%eax), %edx + shrdl $1, %edx, %esi + movl %esi, 44(%ecx) + movl 52(%eax), %esi + shrdl $1, %esi, %edx + movl %edx, 48(%ecx) + movl 56(%eax), %edx + shrdl $1, %edx, %esi + movl %esi, 52(%ecx) + movl 60(%eax), %eax + shrdl $1, %eax, %edx + movl %edx, 56(%ecx) + shrl %eax + movl %eax, 60(%ecx) + popl %esi + retl +.Lfunc_end247: + .size mcl_fp_shr1_16Lbmi2, .Lfunc_end247-mcl_fp_shr1_16Lbmi2 + + .globl mcl_fp_add16Lbmi2 + .align 16, 0x90 + .type mcl_fp_add16Lbmi2,@function +mcl_fp_add16Lbmi2: # @mcl_fp_add16Lbmi2 +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $56, %esp + movl 84(%esp), %edx + movl (%edx), %esi + movl 4(%edx), %ebp + movl 80(%esp), %ecx + addl (%ecx), %esi + movl %esi, %ebx + adcl 4(%ecx), %ebp + movl 8(%edx), %eax + adcl 8(%ecx), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 12(%ecx), %esi + movl 16(%ecx), %edi + adcl 12(%edx), %esi + movl %esi, 48(%esp) # 4-byte Spill + adcl 16(%edx), %edi + movl %edi, 12(%esp) # 4-byte Spill + movl 20(%ecx), %eax + adcl 20(%edx), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 24(%ecx), %eax + adcl 24(%edx), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 28(%ecx), %eax + adcl 28(%edx), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 32(%ecx), %eax + adcl 32(%edx), %eax + movl %eax, 32(%esp) # 4-byte Spill + movl 36(%ecx), %eax + adcl 36(%edx), %eax + movl %eax, 28(%esp) # 4-byte Spill + movl 40(%ecx), %eax + adcl 40(%edx), %eax + movl %eax, 24(%esp) # 4-byte Spill + movl 44(%ecx), %eax + adcl 44(%edx), %eax + movl %eax, 20(%esp) # 4-byte Spill + movl 48(%ecx), %eax + adcl 48(%edx), %eax + movl %eax, 16(%esp) # 4-byte Spill + movl 52(%ecx), %eax + adcl 52(%edx), %eax + movl %eax, 8(%esp) # 4-byte Spill + movl 56(%ecx), %esi + adcl 56(%edx), %esi + movl 60(%ecx), %ecx + adcl 60(%edx), %ecx + movl 76(%esp), %edx + movl %ebx, (%edx) + movl %ebx, %eax + movl %ebp, 4(%edx) + movl 52(%esp), %ebx # 4-byte Reload + movl %ebx, 8(%edx) + movl 48(%esp), %ebx # 4-byte Reload + movl %ebx, 12(%edx) + movl %edi, 16(%edx) + movl 44(%esp), %edi # 4-byte Reload + movl %edi, 20(%edx) + movl 40(%esp), %edi # 4-byte Reload + movl %edi, 24(%edx) + movl 36(%esp), %edi # 4-byte Reload + movl %edi, 28(%edx) + movl 32(%esp), %edi # 4-byte Reload + movl %edi, 32(%edx) + movl 28(%esp), %edi # 4-byte Reload + movl %edi, 36(%edx) + movl 24(%esp), %edi # 4-byte Reload + movl %edi, 40(%edx) + movl 20(%esp), %edi # 4-byte Reload + movl %edi, 44(%edx) + movl 16(%esp), %edi # 4-byte Reload + movl %edi, 48(%edx) + movl 8(%esp), %edi # 4-byte Reload + movl %edi, 52(%edx) + movl %esi, 56(%edx) + movl %ecx, 60(%edx) + sbbl %ebx, %ebx + andl $1, %ebx + movl 88(%esp), %edi + subl (%edi), %eax + movl %eax, 4(%esp) # 4-byte Spill + sbbl 4(%edi), %ebp + movl %ebp, (%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + sbbl 8(%edi), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + sbbl 12(%edi), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 12(%esp), %eax # 4-byte Reload + sbbl 16(%edi), %eax + movl %eax, 12(%esp) # 4-byte Spill + movl 44(%esp), %eax # 4-byte Reload + sbbl 20(%edi), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 40(%esp), %eax # 4-byte Reload + sbbl 24(%edi), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 36(%esp), %eax # 4-byte Reload + sbbl 28(%edi), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 32(%esp), %eax # 4-byte Reload + sbbl 32(%edi), %eax + movl %eax, 32(%esp) # 4-byte Spill + movl 28(%esp), %eax # 4-byte Reload + sbbl 36(%edi), %eax + movl %eax, 28(%esp) # 4-byte Spill + movl 24(%esp), %eax # 4-byte Reload + sbbl 40(%edi), %eax + movl %eax, 24(%esp) # 4-byte Spill + movl 20(%esp), %eax # 4-byte Reload + sbbl 44(%edi), %eax + movl %eax, 20(%esp) # 4-byte Spill + movl 16(%esp), %eax # 4-byte Reload + sbbl 48(%edi), %eax + movl %eax, 16(%esp) # 4-byte Spill + movl 8(%esp), %eax # 4-byte Reload + sbbl 52(%edi), %eax + movl %eax, %ebp + sbbl 56(%edi), %esi + sbbl 60(%edi), %ecx + sbbl $0, %ebx + testb $1, %bl + jne .LBB248_2 +# BB#1: # %nocarry + movl 4(%esp), %edi # 4-byte Reload + movl %edi, (%edx) + movl (%esp), %edi # 4-byte Reload + movl %edi, 4(%edx) + movl 52(%esp), %edi # 4-byte Reload + movl %edi, 8(%edx) + movl 48(%esp), %edi # 4-byte Reload + movl %edi, 12(%edx) + movl 12(%esp), %edi # 4-byte Reload + movl %edi, 16(%edx) + movl 44(%esp), %edi # 4-byte Reload + movl %edi, 20(%edx) + movl 40(%esp), %edi # 4-byte Reload + movl %edi, 24(%edx) + movl 36(%esp), %edi # 4-byte Reload + movl %edi, 28(%edx) + movl 32(%esp), %edi # 4-byte Reload + movl %edi, 32(%edx) + movl 28(%esp), %edi # 4-byte Reload + movl %edi, 36(%edx) + movl 24(%esp), %edi # 4-byte Reload + movl %edi, 40(%edx) + movl 20(%esp), %edi # 4-byte Reload + movl %edi, 44(%edx) + movl 16(%esp), %edi # 4-byte Reload + movl %edi, 48(%edx) + movl %ebp, 52(%edx) + movl %esi, 56(%edx) + movl %ecx, 60(%edx) +.LBB248_2: # %carry + addl $56, %esp + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end248: + .size mcl_fp_add16Lbmi2, .Lfunc_end248-mcl_fp_add16Lbmi2 + + .globl mcl_fp_addNF16Lbmi2 + .align 16, 0x90 + .type mcl_fp_addNF16Lbmi2,@function +mcl_fp_addNF16Lbmi2: # @mcl_fp_addNF16Lbmi2 +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $124, %esp + movl 152(%esp), %edx + movl (%edx), %eax + movl 4(%edx), %ecx + movl 148(%esp), %esi + addl (%esi), %eax + movl %eax, 80(%esp) # 4-byte Spill + adcl 4(%esi), %ecx + movl %ecx, 84(%esp) # 4-byte Spill + movl 60(%edx), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 56(%edx), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 52(%edx), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 48(%edx), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 44(%edx), %edi + movl 40(%edx), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 36(%edx), %eax + movl %eax, 120(%esp) # 4-byte Spill + movl 32(%edx), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 28(%edx), %eax + movl %eax, 116(%esp) # 4-byte Spill + movl 24(%edx), %eax + movl 20(%edx), %ebp + movl 16(%edx), %ebx + movl 12(%edx), %ecx + movl 8(%edx), %edx + adcl 8(%esi), %edx + movl %edx, 60(%esp) # 4-byte Spill + adcl 12(%esi), %ecx + movl %ecx, 64(%esp) # 4-byte Spill + adcl 16(%esi), %ebx + movl %ebx, 68(%esp) # 4-byte Spill + adcl 20(%esi), %ebp + movl %ebp, 72(%esp) # 4-byte Spill + adcl 24(%esi), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 116(%esp), %eax # 4-byte Reload + adcl 28(%esi), %eax + movl %eax, 116(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 32(%esi), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 120(%esp), %eax # 4-byte Reload + adcl 36(%esi), %eax + movl %eax, 120(%esp) # 4-byte Spill + movl 104(%esp), %eax # 4-byte Reload + adcl 40(%esi), %eax + movl %eax, 104(%esp) # 4-byte Spill + adcl 44(%esi), %edi + movl %edi, 76(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl 48(%esi), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 92(%esp), %edi # 4-byte Reload + adcl 52(%esi), %edi + movl %edi, 92(%esp) # 4-byte Spill + movl 96(%esp), %edi # 4-byte Reload + adcl 56(%esi), %edi + movl %edi, 96(%esp) # 4-byte Spill + movl 112(%esp), %eax # 4-byte Reload + adcl 60(%esi), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 156(%esp), %edi + movl 80(%esp), %esi # 4-byte Reload + subl (%edi), %esi + movl 84(%esp), %eax # 4-byte Reload + sbbl 4(%edi), %eax + movl %eax, (%esp) # 4-byte Spill + sbbl 8(%edi), %edx + movl %edx, 4(%esp) # 4-byte Spill + sbbl 12(%edi), %ecx + movl %ecx, 8(%esp) # 4-byte Spill + sbbl 16(%edi), %ebx + movl %ebx, 12(%esp) # 4-byte Spill + sbbl 20(%edi), %ebp + movl %ebp, 16(%esp) # 4-byte Spill + movl 88(%esp), %ebp # 4-byte Reload + sbbl 24(%edi), %ebp + movl %ebp, 20(%esp) # 4-byte Spill + movl 116(%esp), %eax # 4-byte Reload + sbbl 28(%edi), %eax + movl %eax, 24(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + sbbl 32(%edi), %eax + movl %eax, 28(%esp) # 4-byte Spill + movl 120(%esp), %eax # 4-byte Reload + sbbl 36(%edi), %eax + movl %eax, 32(%esp) # 4-byte Spill + movl 104(%esp), %eax # 4-byte Reload + sbbl 40(%edi), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + movl %eax, %ecx + sbbl 44(%edi), %ecx + movl %ecx, 40(%esp) # 4-byte Spill + movl 108(%esp), %ecx # 4-byte Reload + sbbl 48(%edi), %ecx + movl %ecx, 44(%esp) # 4-byte Spill + movl 92(%esp), %ecx # 4-byte Reload + sbbl 52(%edi), %ecx + movl %ecx, 48(%esp) # 4-byte Spill + movl 96(%esp), %ecx # 4-byte Reload + movl %ecx, %ebx + sbbl 56(%edi), %ebx + movl %ebx, 52(%esp) # 4-byte Spill + movl 112(%esp), %ebx # 4-byte Reload + sbbl 60(%edi), %ebx + movl 80(%esp), %edi # 4-byte Reload + movl %ebx, 56(%esp) # 4-byte Spill + testl %ebx, %ebx + js .LBB249_2 +# BB#1: + movl %esi, %edi +.LBB249_2: + movl 144(%esp), %ebx + movl %edi, (%ebx) + movl 84(%esp), %edx # 4-byte Reload + js .LBB249_4 +# BB#3: + movl (%esp), %edx # 4-byte Reload +.LBB249_4: + movl %edx, 4(%ebx) + movl 68(%esp), %edx # 4-byte Reload + movl 60(%esp), %eax # 4-byte Reload + js .LBB249_6 +# BB#5: + movl 4(%esp), %eax # 4-byte Reload +.LBB249_6: + movl %eax, 8(%ebx) + movl 100(%esp), %eax # 4-byte Reload + movl 88(%esp), %ecx # 4-byte Reload + movl 64(%esp), %esi # 4-byte Reload + js .LBB249_8 +# BB#7: + movl 8(%esp), %esi # 4-byte Reload +.LBB249_8: + movl %esi, 12(%ebx) + movl 108(%esp), %esi # 4-byte Reload + js .LBB249_10 +# BB#9: + movl 12(%esp), %edx # 4-byte Reload +.LBB249_10: + movl %edx, 16(%ebx) + movl 112(%esp), %edi # 4-byte Reload + movl 104(%esp), %ebp # 4-byte Reload + js .LBB249_12 +# BB#11: + movl 16(%esp), %edx # 4-byte Reload + movl %edx, 72(%esp) # 4-byte Spill +.LBB249_12: + movl 72(%esp), %edx # 4-byte Reload + movl %edx, 20(%ebx) + js .LBB249_14 +# BB#13: + movl 20(%esp), %ecx # 4-byte Reload +.LBB249_14: + movl %ecx, 24(%ebx) + js .LBB249_16 +# BB#15: + movl 24(%esp), %ecx # 4-byte Reload + movl %ecx, 116(%esp) # 4-byte Spill +.LBB249_16: + movl 116(%esp), %ecx # 4-byte Reload + movl %ecx, 28(%ebx) + js .LBB249_18 +# BB#17: + movl 28(%esp), %eax # 4-byte Reload +.LBB249_18: + movl %eax, 32(%ebx) + movl 96(%esp), %ecx # 4-byte Reload + js .LBB249_20 +# BB#19: + movl 32(%esp), %eax # 4-byte Reload + movl %eax, 120(%esp) # 4-byte Spill +.LBB249_20: + movl 120(%esp), %eax # 4-byte Reload + movl %eax, 36(%ebx) + js .LBB249_22 +# BB#21: + movl 36(%esp), %ebp # 4-byte Reload +.LBB249_22: + movl %ebp, 40(%ebx) + movl 76(%esp), %eax # 4-byte Reload + js .LBB249_24 +# BB#23: + movl 40(%esp), %eax # 4-byte Reload +.LBB249_24: + movl %eax, 44(%ebx) + movl 92(%esp), %eax # 4-byte Reload + js .LBB249_26 +# BB#25: + movl 44(%esp), %esi # 4-byte Reload +.LBB249_26: + movl %esi, 48(%ebx) + js .LBB249_28 +# BB#27: + movl 48(%esp), %eax # 4-byte Reload +.LBB249_28: + movl %eax, 52(%ebx) + js .LBB249_30 +# BB#29: + movl 52(%esp), %ecx # 4-byte Reload +.LBB249_30: + movl %ecx, 56(%ebx) + js .LBB249_32 +# BB#31: + movl 56(%esp), %edi # 4-byte Reload +.LBB249_32: + movl %edi, 60(%ebx) + addl $124, %esp + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end249: + .size mcl_fp_addNF16Lbmi2, .Lfunc_end249-mcl_fp_addNF16Lbmi2 + + .globl mcl_fp_sub16Lbmi2 + .align 16, 0x90 + .type mcl_fp_sub16Lbmi2,@function +mcl_fp_sub16Lbmi2: # @mcl_fp_sub16Lbmi2 +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $60, %esp + movl 84(%esp), %esi + movl (%esi), %eax + movl 4(%esi), %ecx + xorl %ebx, %ebx + movl 88(%esp), %edi + subl (%edi), %eax + movl %eax, 52(%esp) # 4-byte Spill + sbbl 4(%edi), %ecx + movl %ecx, 16(%esp) # 4-byte Spill + movl 8(%esi), %eax + sbbl 8(%edi), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 12(%esi), %eax + sbbl 12(%edi), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 16(%esi), %eax + sbbl 16(%edi), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 20(%esi), %eax + sbbl 20(%edi), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 24(%esi), %eax + sbbl 24(%edi), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 28(%esi), %eax + sbbl 28(%edi), %eax + movl %eax, 32(%esp) # 4-byte Spill + movl 32(%esi), %eax + sbbl 32(%edi), %eax + movl %eax, 28(%esp) # 4-byte Spill + movl 36(%esi), %eax + sbbl 36(%edi), %eax + movl %eax, 24(%esp) # 4-byte Spill + movl 40(%esi), %eax + sbbl 40(%edi), %eax + movl %eax, 20(%esp) # 4-byte Spill + movl 44(%esi), %edx + sbbl 44(%edi), %edx + movl %edx, 12(%esp) # 4-byte Spill + movl 48(%esi), %ecx + sbbl 48(%edi), %ecx + movl %ecx, 8(%esp) # 4-byte Spill + movl 52(%esi), %eax + sbbl 52(%edi), %eax + movl %eax, 4(%esp) # 4-byte Spill + movl 56(%esi), %ebp + sbbl 56(%edi), %ebp + movl 60(%esi), %esi + sbbl 60(%edi), %esi + sbbl $0, %ebx + testb $1, %bl + movl 80(%esp), %ebx + movl 52(%esp), %edi # 4-byte Reload + movl %edi, (%ebx) + movl 16(%esp), %edi # 4-byte Reload + movl %edi, 4(%ebx) + movl 44(%esp), %edi # 4-byte Reload + movl %edi, 8(%ebx) + movl 56(%esp), %edi # 4-byte Reload + movl %edi, 12(%ebx) + movl 48(%esp), %edi # 4-byte Reload + movl %edi, 16(%ebx) + movl 40(%esp), %edi # 4-byte Reload + movl %edi, 20(%ebx) + movl 36(%esp), %edi # 4-byte Reload + movl %edi, 24(%ebx) + movl 32(%esp), %edi # 4-byte Reload + movl %edi, 28(%ebx) + movl 28(%esp), %edi # 4-byte Reload + movl %edi, 32(%ebx) + movl 24(%esp), %edi # 4-byte Reload + movl %edi, 36(%ebx) + movl 20(%esp), %edi # 4-byte Reload + movl %edi, 40(%ebx) + movl %edx, 44(%ebx) + movl %ecx, 48(%ebx) + movl %eax, 52(%ebx) + movl %ebp, 56(%ebx) + movl %esi, 60(%ebx) + je .LBB250_2 +# BB#1: # %carry + movl %esi, (%esp) # 4-byte Spill + movl 92(%esp), %esi + movl 52(%esp), %ecx # 4-byte Reload + addl (%esi), %ecx + movl %ecx, (%ebx) + movl 16(%esp), %edx # 4-byte Reload + adcl 4(%esi), %edx + movl %edx, 4(%ebx) + movl 44(%esp), %edi # 4-byte Reload + adcl 8(%esi), %edi + movl 12(%esi), %eax + adcl 56(%esp), %eax # 4-byte Folded Reload + movl %edi, 8(%ebx) + movl 16(%esi), %ecx + adcl 48(%esp), %ecx # 4-byte Folded Reload + movl %eax, 12(%ebx) + movl 20(%esi), %eax + adcl 40(%esp), %eax # 4-byte Folded Reload + movl %ecx, 16(%ebx) + movl 24(%esi), %ecx + adcl 36(%esp), %ecx # 4-byte Folded Reload + movl %eax, 20(%ebx) + movl 28(%esi), %eax + adcl 32(%esp), %eax # 4-byte Folded Reload + movl %ecx, 24(%ebx) + movl 32(%esi), %ecx + adcl 28(%esp), %ecx # 4-byte Folded Reload + movl %eax, 28(%ebx) + movl 36(%esi), %eax + adcl 24(%esp), %eax # 4-byte Folded Reload + movl %ecx, 32(%ebx) + movl 40(%esi), %ecx + adcl 20(%esp), %ecx # 4-byte Folded Reload + movl %eax, 36(%ebx) + movl 44(%esi), %eax + adcl 12(%esp), %eax # 4-byte Folded Reload + movl %ecx, 40(%ebx) + movl 48(%esi), %ecx + adcl 8(%esp), %ecx # 4-byte Folded Reload + movl %eax, 44(%ebx) + movl 52(%esi), %eax + adcl 4(%esp), %eax # 4-byte Folded Reload + movl %ecx, 48(%ebx) + movl %eax, 52(%ebx) + movl 56(%esi), %eax + adcl %ebp, %eax + movl %eax, 56(%ebx) + movl 60(%esi), %eax + adcl (%esp), %eax # 4-byte Folded Reload + movl %eax, 60(%ebx) +.LBB250_2: # %nocarry + addl $60, %esp + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end250: + .size mcl_fp_sub16Lbmi2, .Lfunc_end250-mcl_fp_sub16Lbmi2 + + .globl mcl_fp_subNF16Lbmi2 + .align 16, 0x90 + .type mcl_fp_subNF16Lbmi2,@function +mcl_fp_subNF16Lbmi2: # @mcl_fp_subNF16Lbmi2 +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $104, %esp + movl 128(%esp), %ecx + movl (%ecx), %esi + movl 4(%ecx), %edx + movl 132(%esp), %edi + subl (%edi), %esi + movl %esi, 64(%esp) # 4-byte Spill + sbbl 4(%edi), %edx + movl %edx, 68(%esp) # 4-byte Spill + movl 60(%ecx), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 56(%ecx), %edx + movl %edx, 96(%esp) # 4-byte Spill + movl 52(%ecx), %edx + movl %edx, 92(%esp) # 4-byte Spill + movl 48(%ecx), %edx + movl %edx, 88(%esp) # 4-byte Spill + movl 44(%ecx), %edx + movl %edx, 84(%esp) # 4-byte Spill + movl 40(%ecx), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 36(%ecx), %esi + movl %esi, 100(%esp) # 4-byte Spill + movl 32(%ecx), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 28(%ecx), %ebp + movl 24(%ecx), %ebx + movl 20(%ecx), %esi + movl 16(%ecx), %edx + movl 12(%ecx), %eax + movl 8(%ecx), %ecx + sbbl 8(%edi), %ecx + movl %ecx, 32(%esp) # 4-byte Spill + sbbl 12(%edi), %eax + movl %eax, 40(%esp) # 4-byte Spill + sbbl 16(%edi), %edx + movl %edx, 44(%esp) # 4-byte Spill + sbbl 20(%edi), %esi + movl %esi, 48(%esp) # 4-byte Spill + sbbl 24(%edi), %ebx + movl %ebx, 52(%esp) # 4-byte Spill + sbbl 28(%edi), %ebp + movl %ebp, 56(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + sbbl 32(%edi), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 100(%esp), %ecx # 4-byte Reload + sbbl 36(%edi), %ecx + movl %ecx, 100(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + sbbl 40(%edi), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 84(%esp), %ecx # 4-byte Reload + sbbl 44(%edi), %ecx + movl %ecx, 84(%esp) # 4-byte Spill + movl 88(%esp), %ecx # 4-byte Reload + sbbl 48(%edi), %ecx + movl %ecx, 88(%esp) # 4-byte Spill + movl 92(%esp), %ecx # 4-byte Reload + sbbl 52(%edi), %ecx + movl %ecx, 92(%esp) # 4-byte Spill + movl 96(%esp), %ecx # 4-byte Reload + sbbl 56(%edi), %ecx + movl %ecx, 96(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + sbbl 60(%edi), %eax + movl %eax, 80(%esp) # 4-byte Spill + sarl $31, %eax + movl 136(%esp), %esi + movl 60(%esi), %ecx + andl %eax, %ecx + movl %ecx, 60(%esp) # 4-byte Spill + movl 56(%esi), %ecx + andl %eax, %ecx + movl %ecx, 36(%esp) # 4-byte Spill + movl 52(%esi), %ecx + andl %eax, %ecx + movl %ecx, 28(%esp) # 4-byte Spill + movl 48(%esi), %ecx + andl %eax, %ecx + movl %ecx, 24(%esp) # 4-byte Spill + movl 44(%esi), %ecx + andl %eax, %ecx + movl %ecx, 20(%esp) # 4-byte Spill + movl 40(%esi), %ecx + andl %eax, %ecx + movl %ecx, 16(%esp) # 4-byte Spill + movl 36(%esi), %ecx + andl %eax, %ecx + movl %ecx, 12(%esp) # 4-byte Spill + movl 32(%esi), %ecx + andl %eax, %ecx + movl %ecx, 8(%esp) # 4-byte Spill + movl 28(%esi), %ecx + andl %eax, %ecx + movl %ecx, 4(%esp) # 4-byte Spill + movl 24(%esi), %ecx + andl %eax, %ecx + movl %ecx, (%esp) # 4-byte Spill + movl 20(%esi), %ebp + andl %eax, %ebp + movl 16(%esi), %ebx + andl %eax, %ebx + movl 12(%esi), %edi + andl %eax, %edi + movl 8(%esi), %edx + andl %eax, %edx + movl 4(%esi), %ecx + andl %eax, %ecx + andl (%esi), %eax + addl 64(%esp), %eax # 4-byte Folded Reload + adcl 68(%esp), %ecx # 4-byte Folded Reload + movl 124(%esp), %esi + movl %eax, (%esi) + adcl 32(%esp), %edx # 4-byte Folded Reload + movl %ecx, 4(%esi) + adcl 40(%esp), %edi # 4-byte Folded Reload + movl %edx, 8(%esi) + adcl 44(%esp), %ebx # 4-byte Folded Reload + movl %edi, 12(%esi) + adcl 48(%esp), %ebp # 4-byte Folded Reload + movl %ebx, 16(%esi) + movl (%esp), %eax # 4-byte Reload + adcl 52(%esp), %eax # 4-byte Folded Reload + movl %ebp, 20(%esi) + movl 4(%esp), %ecx # 4-byte Reload + adcl 56(%esp), %ecx # 4-byte Folded Reload + movl %eax, 24(%esi) + movl 8(%esp), %eax # 4-byte Reload + adcl 72(%esp), %eax # 4-byte Folded Reload + movl %ecx, 28(%esi) + movl 12(%esp), %ecx # 4-byte Reload + adcl 100(%esp), %ecx # 4-byte Folded Reload + movl %eax, 32(%esi) + movl 16(%esp), %eax # 4-byte Reload + adcl 76(%esp), %eax # 4-byte Folded Reload + movl %ecx, 36(%esi) + movl 20(%esp), %ecx # 4-byte Reload + adcl 84(%esp), %ecx # 4-byte Folded Reload + movl %eax, 40(%esi) + movl 24(%esp), %eax # 4-byte Reload + adcl 88(%esp), %eax # 4-byte Folded Reload + movl %ecx, 44(%esi) + movl 28(%esp), %ecx # 4-byte Reload + adcl 92(%esp), %ecx # 4-byte Folded Reload + movl %eax, 48(%esi) + movl 36(%esp), %eax # 4-byte Reload + adcl 96(%esp), %eax # 4-byte Folded Reload + movl %ecx, 52(%esi) + movl %eax, 56(%esi) + movl 60(%esp), %eax # 4-byte Reload + adcl 80(%esp), %eax # 4-byte Folded Reload + movl %eax, 60(%esi) + addl $104, %esp + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end251: + .size mcl_fp_subNF16Lbmi2, .Lfunc_end251-mcl_fp_subNF16Lbmi2 + + .globl mcl_fpDbl_add16Lbmi2 + .align 16, 0x90 + .type mcl_fpDbl_add16Lbmi2,@function +mcl_fpDbl_add16Lbmi2: # @mcl_fpDbl_add16Lbmi2 +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $116, %esp + movl 144(%esp), %ecx + movl (%ecx), %esi + movl 4(%ecx), %edx + movl 140(%esp), %ebx + addl (%ebx), %esi + adcl 4(%ebx), %edx + movl 8(%ecx), %edi + adcl 8(%ebx), %edi + movl 12(%ebx), %ebp + movl 136(%esp), %eax + movl %esi, (%eax) + movl 16(%ebx), %esi + adcl 12(%ecx), %ebp + adcl 16(%ecx), %esi + movl %edx, 4(%eax) + movl 72(%ecx), %edx + movl %edx, 112(%esp) # 4-byte Spill + movl %edi, 8(%eax) + movl 20(%ecx), %edx + movl %ebp, 12(%eax) + movl 20(%ebx), %edi + adcl %edx, %edi + movl 24(%ecx), %edx + movl %esi, 16(%eax) + movl 24(%ebx), %esi + adcl %edx, %esi + movl 28(%ecx), %edx + movl %edi, 20(%eax) + movl 28(%ebx), %edi + adcl %edx, %edi + movl 32(%ecx), %edx + movl %esi, 24(%eax) + movl 32(%ebx), %esi + adcl %edx, %esi + movl 36(%ecx), %edx + movl %edi, 28(%eax) + movl 36(%ebx), %edi + adcl %edx, %edi + movl 40(%ecx), %edx + movl %esi, 32(%eax) + movl 40(%ebx), %esi + adcl %edx, %esi + movl 44(%ecx), %edx + movl %edi, 36(%eax) + movl 44(%ebx), %edi + adcl %edx, %edi + movl 48(%ecx), %edx + movl %esi, 40(%eax) + movl 48(%ebx), %esi + adcl %edx, %esi + movl 52(%ecx), %edx + movl %edi, 44(%eax) + movl 52(%ebx), %edi + adcl %edx, %edi + movl 56(%ecx), %edx + movl %esi, 48(%eax) + movl 56(%ebx), %esi + adcl %edx, %esi + movl 60(%ecx), %edx + movl %edi, 52(%eax) + movl 60(%ebx), %ebp + adcl %edx, %ebp + movl 64(%ecx), %edx + movl %esi, 56(%eax) + movl 64(%ebx), %esi + adcl %edx, %esi + movl %esi, 72(%esp) # 4-byte Spill + movl 68(%ecx), %edx + movl %ebp, 60(%eax) + movl 68(%ebx), %eax + adcl %edx, %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 72(%ebx), %eax + adcl 112(%esp), %eax # 4-byte Folded Reload + movl %eax, 80(%esp) # 4-byte Spill + movl 76(%ecx), %ebp + movl 76(%ebx), %eax + adcl %ebp, %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 80(%ecx), %ebp + movl 80(%ebx), %eax + adcl %ebp, %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 84(%ecx), %ebp + movl 84(%ebx), %eax + adcl %ebp, %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 88(%ecx), %ebp + movl 88(%ebx), %eax + adcl %ebp, %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 92(%ecx), %ebp + movl 92(%ebx), %eax + adcl %ebp, %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 96(%ecx), %ebp + movl 96(%ebx), %eax + adcl %ebp, %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 100(%ecx), %ebp + movl 100(%ebx), %edx + adcl %ebp, %edx + movl %edx, 112(%esp) # 4-byte Spill + movl 104(%ecx), %ebp + movl 104(%ebx), %edx + adcl %ebp, %edx + movl %edx, 96(%esp) # 4-byte Spill + movl 108(%ecx), %ebp + movl 108(%ebx), %edx + adcl %ebp, %edx + movl %edx, 100(%esp) # 4-byte Spill + movl 112(%ecx), %edx + movl 112(%ebx), %ebp + adcl %edx, %ebp + movl %ebp, 60(%esp) # 4-byte Spill + movl 116(%ecx), %edx + movl 116(%ebx), %esi + adcl %edx, %esi + movl %esi, 68(%esp) # 4-byte Spill + movl 120(%ecx), %edx + movl 120(%ebx), %edi + adcl %edx, %edi + movl 124(%ecx), %ecx + movl 124(%ebx), %esi + adcl %ecx, %esi + sbbl %ecx, %ecx + andl $1, %ecx + movl 148(%esp), %edx + movl 72(%esp), %ebx # 4-byte Reload + subl (%edx), %ebx + movl %ebx, (%esp) # 4-byte Spill + movl 76(%esp), %ebx # 4-byte Reload + sbbl 4(%edx), %ebx + movl %ebx, 4(%esp) # 4-byte Spill + movl 80(%esp), %ebx # 4-byte Reload + sbbl 8(%edx), %ebx + movl %ebx, 8(%esp) # 4-byte Spill + movl 84(%esp), %ebx # 4-byte Reload + sbbl 12(%edx), %ebx + movl %ebx, 12(%esp) # 4-byte Spill + movl 104(%esp), %ebx # 4-byte Reload + sbbl 16(%edx), %ebx + movl %ebx, 16(%esp) # 4-byte Spill + movl 88(%esp), %ebx # 4-byte Reload + sbbl 20(%edx), %ebx + movl %ebx, 20(%esp) # 4-byte Spill + movl 108(%esp), %ebx # 4-byte Reload + sbbl 24(%edx), %ebx + movl %ebx, 24(%esp) # 4-byte Spill + movl 92(%esp), %ebx # 4-byte Reload + sbbl 28(%edx), %ebx + movl %ebx, 28(%esp) # 4-byte Spill + movl %eax, %ebx + sbbl 32(%edx), %ebx + movl 112(%esp), %eax # 4-byte Reload + sbbl 36(%edx), %eax + movl %eax, 32(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + sbbl 40(%edx), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + sbbl 44(%edx), %eax + movl %eax, 40(%esp) # 4-byte Spill + sbbl 48(%edx), %ebp + movl %ebp, 44(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + movl %eax, %ebp + sbbl 52(%edx), %ebp + movl %ebp, 48(%esp) # 4-byte Spill + movl %edi, %ebp + sbbl 56(%edx), %ebp + movl %ebp, 52(%esp) # 4-byte Spill + movl %esi, %ebp + sbbl 60(%edx), %ebp + movl %ebp, 56(%esp) # 4-byte Spill + sbbl $0, %ecx + andl $1, %ecx + jne .LBB252_2 +# BB#1: + movl %ebx, 64(%esp) # 4-byte Spill +.LBB252_2: + testb %cl, %cl + movl 72(%esp), %ecx # 4-byte Reload + jne .LBB252_4 +# BB#3: + movl (%esp), %ecx # 4-byte Reload +.LBB252_4: + movl 136(%esp), %ebx + movl %ecx, 64(%ebx) + movl %esi, %ebp + movl %edi, 72(%esp) # 4-byte Spill + movl 96(%esp), %edi # 4-byte Reload + movl 92(%esp), %ecx # 4-byte Reload + movl 88(%esp), %edx # 4-byte Reload + movl 76(%esp), %esi # 4-byte Reload + jne .LBB252_6 +# BB#5: + movl 4(%esp), %esi # 4-byte Reload +.LBB252_6: + movl %esi, 68(%ebx) + movl 84(%esp), %esi # 4-byte Reload + movl 80(%esp), %eax # 4-byte Reload + jne .LBB252_8 +# BB#7: + movl 8(%esp), %eax # 4-byte Reload +.LBB252_8: + movl %eax, 72(%ebx) + movl 60(%esp), %eax # 4-byte Reload + jne .LBB252_10 +# BB#9: + movl 12(%esp), %esi # 4-byte Reload +.LBB252_10: + movl %esi, 76(%ebx) + jne .LBB252_12 +# BB#11: + movl 16(%esp), %esi # 4-byte Reload + movl %esi, 104(%esp) # 4-byte Spill +.LBB252_12: + movl 104(%esp), %esi # 4-byte Reload + movl %esi, 80(%ebx) + jne .LBB252_14 +# BB#13: + movl 20(%esp), %edx # 4-byte Reload +.LBB252_14: + movl %edx, 84(%ebx) + jne .LBB252_16 +# BB#15: + movl 24(%esp), %edx # 4-byte Reload + movl %edx, 108(%esp) # 4-byte Spill +.LBB252_16: + movl 108(%esp), %edx # 4-byte Reload + movl %edx, 88(%ebx) + jne .LBB252_18 +# BB#17: + movl 28(%esp), %ecx # 4-byte Reload +.LBB252_18: + movl %ecx, 92(%ebx) + movl 64(%esp), %ecx # 4-byte Reload + movl %ecx, 96(%ebx) + jne .LBB252_20 +# BB#19: + movl 32(%esp), %ecx # 4-byte Reload + movl %ecx, 112(%esp) # 4-byte Spill +.LBB252_20: + movl 112(%esp), %ecx # 4-byte Reload + movl %ecx, 100(%ebx) + jne .LBB252_22 +# BB#21: + movl 36(%esp), %edi # 4-byte Reload +.LBB252_22: + movl %edi, 104(%ebx) + movl 100(%esp), %ecx # 4-byte Reload + jne .LBB252_24 +# BB#23: + movl 40(%esp), %ecx # 4-byte Reload +.LBB252_24: + movl %ecx, 108(%ebx) + movl 72(%esp), %ecx # 4-byte Reload + jne .LBB252_26 +# BB#25: + movl 44(%esp), %eax # 4-byte Reload +.LBB252_26: + movl %eax, 112(%ebx) + movl 68(%esp), %eax # 4-byte Reload + jne .LBB252_28 +# BB#27: + movl 48(%esp), %eax # 4-byte Reload +.LBB252_28: + movl %eax, 116(%ebx) + jne .LBB252_30 +# BB#29: + movl 52(%esp), %ecx # 4-byte Reload +.LBB252_30: + movl %ecx, 120(%ebx) + jne .LBB252_32 +# BB#31: + movl 56(%esp), %ebp # 4-byte Reload +.LBB252_32: + movl %ebp, 124(%ebx) + addl $116, %esp + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end252: + .size mcl_fpDbl_add16Lbmi2, .Lfunc_end252-mcl_fpDbl_add16Lbmi2 + + .globl mcl_fpDbl_sub16Lbmi2 + .align 16, 0x90 + .type mcl_fpDbl_sub16Lbmi2,@function +mcl_fpDbl_sub16Lbmi2: # @mcl_fpDbl_sub16Lbmi2 +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $108, %esp + movl 132(%esp), %eax + movl (%eax), %esi + movl 4(%eax), %edi + movl 136(%esp), %edx + subl (%edx), %esi + sbbl 4(%edx), %edi + movl 8(%eax), %ebx + sbbl 8(%edx), %ebx + movl 128(%esp), %ecx + movl %esi, (%ecx) + movl 12(%eax), %esi + sbbl 12(%edx), %esi + movl %edi, 4(%ecx) + movl 16(%eax), %edi + sbbl 16(%edx), %edi + movl %ebx, 8(%ecx) + movl 20(%edx), %ebx + movl %esi, 12(%ecx) + movl 20(%eax), %esi + sbbl %ebx, %esi + movl 24(%edx), %ebx + movl %edi, 16(%ecx) + movl 24(%eax), %edi + sbbl %ebx, %edi + movl 28(%edx), %ebx + movl %esi, 20(%ecx) + movl 28(%eax), %esi + sbbl %ebx, %esi + movl 32(%edx), %ebx + movl %edi, 24(%ecx) + movl 32(%eax), %edi + sbbl %ebx, %edi + movl 36(%edx), %ebx + movl %esi, 28(%ecx) + movl 36(%eax), %esi + sbbl %ebx, %esi + movl 40(%edx), %ebx + movl %edi, 32(%ecx) + movl 40(%eax), %edi + sbbl %ebx, %edi + movl 44(%edx), %ebx + movl %esi, 36(%ecx) + movl 44(%eax), %esi + sbbl %ebx, %esi + movl 48(%edx), %ebx + movl %edi, 40(%ecx) + movl 48(%eax), %edi + sbbl %ebx, %edi + movl 52(%edx), %ebx + movl %esi, 44(%ecx) + movl 52(%eax), %esi + sbbl %ebx, %esi + movl 56(%edx), %ebx + movl %edi, 48(%ecx) + movl 56(%eax), %edi + sbbl %ebx, %edi + movl 60(%edx), %ebx + movl %esi, 52(%ecx) + movl 60(%eax), %esi + sbbl %ebx, %esi + movl 64(%edx), %ebx + movl %edi, 56(%ecx) + movl 64(%eax), %edi + sbbl %ebx, %edi + movl %edi, 48(%esp) # 4-byte Spill + movl 68(%edx), %edi + movl %esi, 60(%ecx) + movl 68(%eax), %esi + sbbl %edi, %esi + movl %esi, 40(%esp) # 4-byte Spill + movl 72(%edx), %esi + movl 72(%eax), %edi + sbbl %esi, %edi + movl %edi, 44(%esp) # 4-byte Spill + movl 76(%edx), %esi + movl 76(%eax), %edi + sbbl %esi, %edi + movl %edi, 52(%esp) # 4-byte Spill + movl 80(%edx), %esi + movl 80(%eax), %edi + sbbl %esi, %edi + movl %edi, 56(%esp) # 4-byte Spill + movl 84(%edx), %esi + movl 84(%eax), %edi + sbbl %esi, %edi + movl %edi, 60(%esp) # 4-byte Spill + movl 88(%edx), %esi + movl 88(%eax), %edi + sbbl %esi, %edi + movl %edi, 64(%esp) # 4-byte Spill + movl 92(%edx), %esi + movl 92(%eax), %edi + sbbl %esi, %edi + movl %edi, 72(%esp) # 4-byte Spill + movl 96(%edx), %esi + movl 96(%eax), %edi + sbbl %esi, %edi + movl %edi, 76(%esp) # 4-byte Spill + movl 100(%edx), %esi + movl 100(%eax), %edi + sbbl %esi, %edi + movl %edi, 80(%esp) # 4-byte Spill + movl 104(%edx), %esi + movl 104(%eax), %edi + sbbl %esi, %edi + movl %edi, 84(%esp) # 4-byte Spill + movl 108(%edx), %esi + movl 108(%eax), %edi + sbbl %esi, %edi + movl %edi, 88(%esp) # 4-byte Spill + movl 112(%edx), %esi + movl 112(%eax), %edi + sbbl %esi, %edi + movl %edi, 92(%esp) # 4-byte Spill + movl 116(%edx), %esi + movl 116(%eax), %edi + sbbl %esi, %edi + movl %edi, 96(%esp) # 4-byte Spill + movl 120(%edx), %esi + movl 120(%eax), %edi + sbbl %esi, %edi + movl %edi, 100(%esp) # 4-byte Spill + movl 124(%edx), %edx + movl 124(%eax), %eax + sbbl %edx, %eax + movl %eax, 104(%esp) # 4-byte Spill + movl $0, %eax + sbbl $0, %eax + andl $1, %eax + movl 140(%esp), %ebx + jne .LBB253_1 +# BB#2: + movl $0, 68(%esp) # 4-byte Folded Spill + jmp .LBB253_3 +.LBB253_1: + movl 60(%ebx), %edx + movl %edx, 68(%esp) # 4-byte Spill +.LBB253_3: + testb %al, %al + jne .LBB253_4 +# BB#5: + movl $0, 24(%esp) # 4-byte Folded Spill + movl $0, %ebp + jmp .LBB253_6 +.LBB253_4: + movl (%ebx), %ebp + movl 4(%ebx), %eax + movl %eax, 24(%esp) # 4-byte Spill +.LBB253_6: + jne .LBB253_7 +# BB#8: + movl $0, 36(%esp) # 4-byte Folded Spill + jmp .LBB253_9 +.LBB253_7: + movl 56(%ebx), %eax + movl %eax, 36(%esp) # 4-byte Spill +.LBB253_9: + jne .LBB253_10 +# BB#11: + movl $0, 32(%esp) # 4-byte Folded Spill + jmp .LBB253_12 +.LBB253_10: + movl 52(%ebx), %eax + movl %eax, 32(%esp) # 4-byte Spill +.LBB253_12: + jne .LBB253_13 +# BB#14: + movl $0, 28(%esp) # 4-byte Folded Spill + jmp .LBB253_15 +.LBB253_13: + movl 48(%ebx), %eax + movl %eax, 28(%esp) # 4-byte Spill +.LBB253_15: + jne .LBB253_16 +# BB#17: + movl $0, 20(%esp) # 4-byte Folded Spill + jmp .LBB253_18 +.LBB253_16: + movl 44(%ebx), %eax + movl %eax, 20(%esp) # 4-byte Spill +.LBB253_18: + jne .LBB253_19 +# BB#20: + movl $0, 16(%esp) # 4-byte Folded Spill + jmp .LBB253_21 +.LBB253_19: + movl 40(%ebx), %eax + movl %eax, 16(%esp) # 4-byte Spill +.LBB253_21: + jne .LBB253_22 +# BB#23: + movl $0, 12(%esp) # 4-byte Folded Spill + jmp .LBB253_24 +.LBB253_22: + movl 36(%ebx), %eax + movl %eax, 12(%esp) # 4-byte Spill +.LBB253_24: + jne .LBB253_25 +# BB#26: + movl $0, 8(%esp) # 4-byte Folded Spill + jmp .LBB253_27 +.LBB253_25: + movl 32(%ebx), %eax + movl %eax, 8(%esp) # 4-byte Spill +.LBB253_27: + jne .LBB253_28 +# BB#29: + movl $0, 4(%esp) # 4-byte Folded Spill + jmp .LBB253_30 +.LBB253_28: + movl 28(%ebx), %eax + movl %eax, 4(%esp) # 4-byte Spill +.LBB253_30: + jne .LBB253_31 +# BB#32: + movl $0, (%esp) # 4-byte Folded Spill + jmp .LBB253_33 +.LBB253_31: + movl 24(%ebx), %eax + movl %eax, (%esp) # 4-byte Spill +.LBB253_33: + jne .LBB253_34 +# BB#35: + movl $0, %esi + jmp .LBB253_36 +.LBB253_34: + movl 20(%ebx), %esi +.LBB253_36: + jne .LBB253_37 +# BB#38: + movl $0, %edx + jmp .LBB253_39 +.LBB253_37: + movl 16(%ebx), %edx +.LBB253_39: + jne .LBB253_40 +# BB#41: + movl $0, %edi + jmp .LBB253_42 +.LBB253_40: + movl 12(%ebx), %edi +.LBB253_42: + jne .LBB253_43 +# BB#44: + xorl %ebx, %ebx + jmp .LBB253_45 +.LBB253_43: + movl 8(%ebx), %ebx +.LBB253_45: + addl 48(%esp), %ebp # 4-byte Folded Reload + movl %ebp, %eax + movl 24(%esp), %ebp # 4-byte Reload + adcl 40(%esp), %ebp # 4-byte Folded Reload + movl %eax, 64(%ecx) + adcl 44(%esp), %ebx # 4-byte Folded Reload + movl %ebp, 68(%ecx) + adcl 52(%esp), %edi # 4-byte Folded Reload + movl %ebx, 72(%ecx) + adcl 56(%esp), %edx # 4-byte Folded Reload + movl %edi, 76(%ecx) + adcl 60(%esp), %esi # 4-byte Folded Reload + movl %edx, 80(%ecx) + movl (%esp), %eax # 4-byte Reload + adcl 64(%esp), %eax # 4-byte Folded Reload + movl %esi, 84(%ecx) + movl 4(%esp), %edx # 4-byte Reload + adcl 72(%esp), %edx # 4-byte Folded Reload + movl %eax, 88(%ecx) + movl 8(%esp), %eax # 4-byte Reload + adcl 76(%esp), %eax # 4-byte Folded Reload + movl %edx, 92(%ecx) + movl 12(%esp), %edx # 4-byte Reload + adcl 80(%esp), %edx # 4-byte Folded Reload + movl %eax, 96(%ecx) + movl 16(%esp), %eax # 4-byte Reload + adcl 84(%esp), %eax # 4-byte Folded Reload + movl %edx, 100(%ecx) + movl 20(%esp), %edx # 4-byte Reload + adcl 88(%esp), %edx # 4-byte Folded Reload + movl %eax, 104(%ecx) + movl 28(%esp), %eax # 4-byte Reload + adcl 92(%esp), %eax # 4-byte Folded Reload + movl %edx, 108(%ecx) + movl 32(%esp), %edx # 4-byte Reload + adcl 96(%esp), %edx # 4-byte Folded Reload + movl %eax, 112(%ecx) + movl 36(%esp), %eax # 4-byte Reload + adcl 100(%esp), %eax # 4-byte Folded Reload + movl %edx, 116(%ecx) + movl %eax, 120(%ecx) + movl 68(%esp), %eax # 4-byte Reload + adcl 104(%esp), %eax # 4-byte Folded Reload + movl %eax, 124(%ecx) + addl $108, %esp + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end253: + .size mcl_fpDbl_sub16Lbmi2, .Lfunc_end253-mcl_fpDbl_sub16Lbmi2 + + .align 16, 0x90 + .type .LmulPv544x32,@function +.LmulPv544x32: # @mulPv544x32 +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $60, %esp + movl %edx, %eax + movl 80(%esp), %esi + movl %esi, %edx + mulxl 4(%eax), %edi, %ebx + movl %esi, %edx + mulxl (%eax), %ebp, %edx + movl %ebp, 56(%esp) # 4-byte Spill + addl %edi, %edx + movl %edx, 52(%esp) # 4-byte Spill + movl %esi, %edx + mulxl 8(%eax), %edx, %edi + adcl %ebx, %edx + movl %edx, 48(%esp) # 4-byte Spill + movl %esi, %edx + mulxl 12(%eax), %edx, %ebx + adcl %edi, %edx + movl %edx, 44(%esp) # 4-byte Spill + movl %esi, %edx + mulxl 16(%eax), %edx, %edi + adcl %ebx, %edx + movl %edx, 40(%esp) # 4-byte Spill + movl %esi, %edx + mulxl 20(%eax), %edx, %ebx + adcl %edi, %edx + movl %edx, 36(%esp) # 4-byte Spill + movl %esi, %edx + mulxl 24(%eax), %edx, %edi + adcl %ebx, %edx + movl %edx, 32(%esp) # 4-byte Spill + movl %esi, %edx + mulxl 28(%eax), %edx, %ebx + adcl %edi, %edx + movl %edx, 28(%esp) # 4-byte Spill + movl %esi, %edx + mulxl 32(%eax), %edx, %edi + adcl %ebx, %edx + movl %edx, 24(%esp) # 4-byte Spill + movl %esi, %edx + mulxl 36(%eax), %edx, %ebx + adcl %edi, %edx + movl %edx, 20(%esp) # 4-byte Spill + movl %esi, %edx + mulxl 40(%eax), %edx, %edi + adcl %ebx, %edx + movl %edx, 16(%esp) # 4-byte Spill + movl %esi, %edx + mulxl 44(%eax), %edx, %ebx + adcl %edi, %edx + movl %edx, 12(%esp) # 4-byte Spill + movl %esi, %edx + mulxl 48(%eax), %edx, %edi + adcl %ebx, %edx + movl %edx, 8(%esp) # 4-byte Spill + movl %esi, %edx + mulxl 52(%eax), %ebx, %ebp + adcl %edi, %ebx + movl %esi, %edx + mulxl 56(%eax), %edi, %edx + movl %edx, (%esp) # 4-byte Spill + adcl %ebp, %edi + movl %esi, %edx + mulxl 60(%eax), %edx, %ebp + movl %ebp, 4(%esp) # 4-byte Spill + adcl (%esp), %edx # 4-byte Folded Reload + movl 56(%esp), %ebp # 4-byte Reload + movl %ebp, (%ecx) + movl 52(%esp), %ebp # 4-byte Reload + movl %ebp, 4(%ecx) + movl 48(%esp), %ebp # 4-byte Reload + movl %ebp, 8(%ecx) + movl 44(%esp), %ebp # 4-byte Reload + movl %ebp, 12(%ecx) + movl 40(%esp), %ebp # 4-byte Reload + movl %ebp, 16(%ecx) + movl 36(%esp), %ebp # 4-byte Reload + movl %ebp, 20(%ecx) + movl 32(%esp), %ebp # 4-byte Reload + movl %ebp, 24(%ecx) + movl 28(%esp), %ebp # 4-byte Reload + movl %ebp, 28(%ecx) + movl 24(%esp), %ebp # 4-byte Reload + movl %ebp, 32(%ecx) + movl 20(%esp), %ebp # 4-byte Reload + movl %ebp, 36(%ecx) + movl 16(%esp), %ebp # 4-byte Reload + movl %ebp, 40(%ecx) + movl 12(%esp), %ebp # 4-byte Reload + movl %ebp, 44(%ecx) + movl 8(%esp), %ebp # 4-byte Reload + movl %ebp, 48(%ecx) + movl %ebx, 52(%ecx) + movl %edi, 56(%ecx) + movl %edx, 60(%ecx) + movl %esi, %edx + mulxl 64(%eax), %eax, %edx + adcl 4(%esp), %eax # 4-byte Folded Reload + movl %eax, 64(%ecx) + adcl $0, %edx + movl %edx, 68(%ecx) + movl %ecx, %eax + addl $60, %esp + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end254: + .size .LmulPv544x32, .Lfunc_end254-.LmulPv544x32 + + .globl mcl_fp_mulUnitPre17Lbmi2 + .align 16, 0x90 + .type mcl_fp_mulUnitPre17Lbmi2,@function +mcl_fp_mulUnitPre17Lbmi2: # @mcl_fp_mulUnitPre17Lbmi2 +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $140, %esp + calll .L255$pb +.L255$pb: + popl %ebx +.Ltmp56: + addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp56-.L255$pb), %ebx + movl 168(%esp), %eax + movl %eax, (%esp) + leal 64(%esp), %ecx + movl 164(%esp), %edx + calll .LmulPv544x32 + movl 132(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 128(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 124(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 120(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 116(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 112(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 108(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 104(%esp), %eax + movl %eax, 32(%esp) # 4-byte Spill + movl 100(%esp), %eax + movl %eax, 28(%esp) # 4-byte Spill + movl 96(%esp), %eax + movl %eax, 24(%esp) # 4-byte Spill + movl 92(%esp), %eax + movl %eax, 20(%esp) # 4-byte Spill + movl 88(%esp), %eax + movl %eax, 16(%esp) # 4-byte Spill + movl 84(%esp), %ebp + movl 80(%esp), %ebx + movl 76(%esp), %edi + movl 72(%esp), %esi + movl 64(%esp), %edx + movl 68(%esp), %ecx + movl 160(%esp), %eax + movl %edx, (%eax) + movl %ecx, 4(%eax) + movl %esi, 8(%eax) + movl %edi, 12(%eax) + movl %ebx, 16(%eax) + movl %ebp, 20(%eax) + movl 16(%esp), %ecx # 4-byte Reload + movl %ecx, 24(%eax) + movl 20(%esp), %ecx # 4-byte Reload + movl %ecx, 28(%eax) + movl 24(%esp), %ecx # 4-byte Reload + movl %ecx, 32(%eax) + movl 28(%esp), %ecx # 4-byte Reload + movl %ecx, 36(%eax) + movl 32(%esp), %ecx # 4-byte Reload + movl %ecx, 40(%eax) + movl 36(%esp), %ecx # 4-byte Reload + movl %ecx, 44(%eax) + movl 40(%esp), %ecx # 4-byte Reload + movl %ecx, 48(%eax) + movl 44(%esp), %ecx # 4-byte Reload + movl %ecx, 52(%eax) + movl 48(%esp), %ecx # 4-byte Reload + movl %ecx, 56(%eax) + movl 52(%esp), %ecx # 4-byte Reload + movl %ecx, 60(%eax) + movl 56(%esp), %ecx # 4-byte Reload + movl %ecx, 64(%eax) + movl 60(%esp), %ecx # 4-byte Reload + movl %ecx, 68(%eax) + addl $140, %esp + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end255: + .size mcl_fp_mulUnitPre17Lbmi2, .Lfunc_end255-mcl_fp_mulUnitPre17Lbmi2 + + .globl mcl_fpDbl_mulPre17Lbmi2 + .align 16, 0x90 + .type mcl_fpDbl_mulPre17Lbmi2,@function +mcl_fpDbl_mulPre17Lbmi2: # @mcl_fpDbl_mulPre17Lbmi2 +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $1356, %esp # imm = 0x54C + calll .L256$pb +.L256$pb: + popl %edi +.Ltmp57: + addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp57-.L256$pb), %edi + movl %edi, 124(%esp) # 4-byte Spill + movl 1384(%esp), %eax + movl (%eax), %eax + movl %eax, (%esp) + leal 1280(%esp), %ecx + movl 1380(%esp), %edx + movl %edx, %esi + movl %edi, %ebx + calll .LmulPv544x32 + movl 1348(%esp), %eax + movl %eax, 116(%esp) # 4-byte Spill + movl 1344(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 1340(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 1336(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 1332(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 1328(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 1324(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 1320(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 1316(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 1312(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 1308(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 1304(%esp), %eax + movl %eax, 32(%esp) # 4-byte Spill + movl 1300(%esp), %eax + movl %eax, 24(%esp) # 4-byte Spill + movl 1296(%esp), %eax + movl %eax, 20(%esp) # 4-byte Spill + movl 1292(%esp), %eax + movl %eax, 16(%esp) # 4-byte Spill + movl 1288(%esp), %eax + movl %eax, 12(%esp) # 4-byte Spill + movl 1280(%esp), %eax + movl 1284(%esp), %ebp + movl 1376(%esp), %ecx + movl %eax, (%ecx) + movl 1384(%esp), %eax + movl 4(%eax), %eax + movl %eax, (%esp) + leal 1208(%esp), %ecx + movl %esi, %edx + movl %edi, %ebx + calll .LmulPv544x32 + addl 1208(%esp), %ebp + movl %ebp, 8(%esp) # 4-byte Spill + movl 1276(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 1272(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 1268(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 1264(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 1260(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 1256(%esp), %edi + movl %edi, 64(%esp) # 4-byte Spill + movl 1252(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 1248(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 1244(%esp), %eax + movl %eax, 28(%esp) # 4-byte Spill + movl 1240(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 1236(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 1232(%esp), %edi + movl 1228(%esp), %esi + movl 1224(%esp), %edx + movl 1220(%esp), %ecx + movl 1212(%esp), %eax + movl %eax, 120(%esp) # 4-byte Spill + movl 1216(%esp), %eax + movl 1376(%esp), %ebp + movl 8(%esp), %ebx # 4-byte Reload + movl %ebx, 4(%ebp) + movl 12(%esp), %ebp # 4-byte Reload + adcl %ebp, 120(%esp) # 4-byte Folded Spill + adcl 16(%esp), %eax # 4-byte Folded Reload + movl %eax, 8(%esp) # 4-byte Spill + adcl 20(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 12(%esp) # 4-byte Spill + adcl 24(%esp), %edx # 4-byte Folded Reload + movl %edx, 16(%esp) # 4-byte Spill + adcl 32(%esp), %esi # 4-byte Folded Reload + movl %esi, 20(%esp) # 4-byte Spill + adcl 40(%esp), %edi # 4-byte Folded Reload + movl %edi, 24(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 48(%esp), %eax # 4-byte Folded Reload + movl %eax, 80(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 52(%esp), %eax # 4-byte Folded Reload + movl %eax, 72(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl %eax, 28(%esp) # 4-byte Folded Spill + movl 68(%esp), %eax # 4-byte Reload + adcl %eax, 36(%esp) # 4-byte Folded Spill + movl 76(%esp), %eax # 4-byte Reload + adcl %eax, 44(%esp) # 4-byte Folded Spill + movl 84(%esp), %eax # 4-byte Reload + adcl %eax, 64(%esp) # 4-byte Folded Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 92(%esp), %eax # 4-byte Folded Reload + movl %eax, 88(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 100(%esp), %eax # 4-byte Folded Reload + movl %eax, 96(%esp) # 4-byte Spill + movl 104(%esp), %eax # 4-byte Reload + adcl 108(%esp), %eax # 4-byte Folded Reload + movl %eax, 104(%esp) # 4-byte Spill + movl 112(%esp), %eax # 4-byte Reload + adcl 116(%esp), %eax # 4-byte Folded Reload + movl %eax, 112(%esp) # 4-byte Spill + adcl $0, 56(%esp) # 4-byte Folded Spill + movl 1384(%esp), %eax + movl 8(%eax), %eax + movl %eax, (%esp) + leal 1136(%esp), %ecx + movl 1380(%esp), %edx + movl 124(%esp), %ebx # 4-byte Reload + calll .LmulPv544x32 + movl 120(%esp), %eax # 4-byte Reload + addl 1136(%esp), %eax + movl %eax, 120(%esp) # 4-byte Spill + movl 1204(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 1200(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 1196(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 1192(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 1188(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 1184(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 1180(%esp), %eax + movl %eax, 116(%esp) # 4-byte Spill + movl 1176(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 1172(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 1168(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 1164(%esp), %ebx + movl 1160(%esp), %edi + movl 1156(%esp), %esi + movl 1152(%esp), %eax + movl %eax, 32(%esp) # 4-byte Spill + movl 1148(%esp), %edx + movl 1140(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 1144(%esp), %ecx + movl 1376(%esp), %eax + movl 120(%esp), %ebp # 4-byte Reload + movl %ebp, 8(%eax) + movl 52(%esp), %ebp # 4-byte Reload + adcl 8(%esp), %ebp # 4-byte Folded Reload + adcl 12(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 8(%esp) # 4-byte Spill + adcl 16(%esp), %edx # 4-byte Folded Reload + movl %edx, 12(%esp) # 4-byte Spill + movl 20(%esp), %eax # 4-byte Reload + adcl %eax, 32(%esp) # 4-byte Folded Spill + adcl 24(%esp), %esi # 4-byte Folded Reload + movl %esi, 16(%esp) # 4-byte Spill + adcl 80(%esp), %edi # 4-byte Folded Reload + movl %edi, 20(%esp) # 4-byte Spill + adcl 72(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 24(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 28(%esp), %eax # 4-byte Folded Reload + movl %eax, 60(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 36(%esp), %eax # 4-byte Folded Reload + movl %eax, 68(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 44(%esp), %eax # 4-byte Folded Reload + movl %eax, 76(%esp) # 4-byte Spill + movl 116(%esp), %eax # 4-byte Reload + adcl 64(%esp), %eax # 4-byte Folded Reload + movl %eax, 116(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl %eax, 48(%esp) # 4-byte Folded Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 96(%esp), %eax # 4-byte Folded Reload + movl %eax, 84(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 104(%esp), %eax # 4-byte Folded Reload + movl %eax, 92(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 112(%esp), %eax # 4-byte Folded Reload + movl %eax, 100(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl 56(%esp), %eax # 4-byte Folded Reload + movl %eax, 108(%esp) # 4-byte Spill + adcl $0, 40(%esp) # 4-byte Folded Spill + movl 1384(%esp), %eax + movl 12(%eax), %eax + movl %eax, (%esp) + leal 1064(%esp), %ecx + movl 1380(%esp), %edx + movl 124(%esp), %ebx # 4-byte Reload + calll .LmulPv544x32 + addl 1064(%esp), %ebp + movl %ebp, 52(%esp) # 4-byte Spill + movl 1132(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 1128(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 1124(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 1120(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 1116(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 1112(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 1108(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 1104(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 1100(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 1096(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 1092(%esp), %ebx + movl 1088(%esp), %edi + movl 1084(%esp), %esi + movl 1080(%esp), %eax + movl %eax, 28(%esp) # 4-byte Spill + movl 1076(%esp), %edx + movl 1068(%esp), %eax + movl %eax, 120(%esp) # 4-byte Spill + movl 1072(%esp), %ecx + movl 1376(%esp), %eax + movl 52(%esp), %ebp # 4-byte Reload + movl %ebp, 12(%eax) + movl 8(%esp), %eax # 4-byte Reload + adcl %eax, 120(%esp) # 4-byte Folded Spill + adcl 12(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 4(%esp) # 4-byte Spill + adcl 32(%esp), %edx # 4-byte Folded Reload + movl %edx, 8(%esp) # 4-byte Spill + movl 16(%esp), %eax # 4-byte Reload + adcl %eax, 28(%esp) # 4-byte Folded Spill + adcl 20(%esp), %esi # 4-byte Folded Reload + movl %esi, 12(%esp) # 4-byte Spill + adcl 24(%esp), %edi # 4-byte Folded Reload + movl %edi, 16(%esp) # 4-byte Spill + adcl 60(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 20(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 68(%esp), %eax # 4-byte Folded Reload + movl %eax, 56(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 76(%esp), %eax # 4-byte Folded Reload + movl %eax, 64(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 116(%esp), %eax # 4-byte Folded Reload + movl %eax, 72(%esp) # 4-byte Spill + movl 112(%esp), %eax # 4-byte Reload + adcl 48(%esp), %eax # 4-byte Folded Reload + movl %eax, 112(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl %eax, 44(%esp) # 4-byte Folded Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 92(%esp), %eax # 4-byte Folded Reload + movl %eax, 80(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 100(%esp), %eax # 4-byte Folded Reload + movl %eax, 88(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 108(%esp), %eax # 4-byte Folded Reload + movl %eax, 96(%esp) # 4-byte Spill + movl 104(%esp), %eax # 4-byte Reload + adcl 40(%esp), %eax # 4-byte Folded Reload + movl %eax, 104(%esp) # 4-byte Spill + adcl $0, 36(%esp) # 4-byte Folded Spill + movl 1384(%esp), %eax + movl 16(%eax), %eax + movl %eax, (%esp) + leal 992(%esp), %ecx + movl 1380(%esp), %edx + movl 124(%esp), %ebx # 4-byte Reload + calll .LmulPv544x32 + movl 120(%esp), %eax # 4-byte Reload + addl 992(%esp), %eax + movl %eax, 120(%esp) # 4-byte Spill + movl 1060(%esp), %eax + movl %eax, 32(%esp) # 4-byte Spill + movl 1056(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 1052(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 1048(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 1044(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 1040(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 1036(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 1032(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 1028(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 1024(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 1020(%esp), %ebx + movl 1016(%esp), %edi + movl 1012(%esp), %esi + movl 1008(%esp), %eax + movl %eax, 24(%esp) # 4-byte Spill + movl 1004(%esp), %edx + movl 996(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 1000(%esp), %ecx + movl 1376(%esp), %eax + movl 120(%esp), %ebp # 4-byte Reload + movl %ebp, 16(%eax) + movl 52(%esp), %ebp # 4-byte Reload + adcl 4(%esp), %ebp # 4-byte Folded Reload + adcl 8(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 4(%esp) # 4-byte Spill + adcl 28(%esp), %edx # 4-byte Folded Reload + movl %edx, 8(%esp) # 4-byte Spill + movl 12(%esp), %eax # 4-byte Reload + adcl %eax, 24(%esp) # 4-byte Folded Spill + adcl 16(%esp), %esi # 4-byte Folded Reload + movl %esi, 12(%esp) # 4-byte Spill + adcl 20(%esp), %edi # 4-byte Folded Reload + movl %edi, 16(%esp) # 4-byte Spill + adcl 56(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 20(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 64(%esp), %eax # 4-byte Folded Reload + movl %eax, 60(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 72(%esp), %eax # 4-byte Folded Reload + movl %eax, 68(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 112(%esp), %eax # 4-byte Folded Reload + movl %eax, 76(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl 44(%esp), %eax # 4-byte Folded Reload + movl %eax, 108(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl %eax, 40(%esp) # 4-byte Folded Spill + movl 88(%esp), %eax # 4-byte Reload + adcl %eax, 48(%esp) # 4-byte Folded Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 96(%esp), %eax # 4-byte Folded Reload + movl %eax, 84(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 104(%esp), %eax # 4-byte Folded Reload + movl %eax, 92(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 36(%esp), %eax # 4-byte Folded Reload + movl %eax, 100(%esp) # 4-byte Spill + adcl $0, 32(%esp) # 4-byte Folded Spill + movl 1384(%esp), %eax + movl 20(%eax), %eax + movl %eax, (%esp) + leal 920(%esp), %ecx + movl 1380(%esp), %edx + movl 124(%esp), %ebx # 4-byte Reload + calll .LmulPv544x32 + addl 920(%esp), %ebp + movl %ebp, 52(%esp) # 4-byte Spill + movl 988(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 984(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 980(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 976(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 972(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 968(%esp), %eax + movl %eax, 116(%esp) # 4-byte Spill + movl 964(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 960(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 956(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 952(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 948(%esp), %ebx + movl 944(%esp), %edi + movl 940(%esp), %esi + movl 936(%esp), %eax + movl %eax, 28(%esp) # 4-byte Spill + movl 932(%esp), %edx + movl 924(%esp), %eax + movl %eax, 120(%esp) # 4-byte Spill + movl 928(%esp), %ecx + movl 1376(%esp), %eax + movl 52(%esp), %ebp # 4-byte Reload + movl %ebp, 20(%eax) + movl 4(%esp), %eax # 4-byte Reload + adcl %eax, 120(%esp) # 4-byte Folded Spill + adcl 8(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 4(%esp) # 4-byte Spill + adcl 24(%esp), %edx # 4-byte Folded Reload + movl %edx, 8(%esp) # 4-byte Spill + movl 12(%esp), %eax # 4-byte Reload + adcl %eax, 28(%esp) # 4-byte Folded Spill + adcl 16(%esp), %esi # 4-byte Folded Reload + movl %esi, 16(%esp) # 4-byte Spill + adcl 20(%esp), %edi # 4-byte Folded Reload + movl %edi, 20(%esp) # 4-byte Spill + adcl 60(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 24(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 68(%esp), %eax # 4-byte Folded Reload + movl %eax, 56(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 76(%esp), %eax # 4-byte Folded Reload + movl %eax, 64(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 108(%esp), %eax # 4-byte Folded Reload + movl %eax, 72(%esp) # 4-byte Spill + movl 104(%esp), %eax # 4-byte Reload + adcl 40(%esp), %eax # 4-byte Folded Reload + movl %eax, 104(%esp) # 4-byte Spill + movl 116(%esp), %eax # 4-byte Reload + adcl 48(%esp), %eax # 4-byte Folded Reload + movl %eax, 116(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl %eax, 44(%esp) # 4-byte Folded Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 92(%esp), %eax # 4-byte Folded Reload + movl %eax, 80(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 100(%esp), %eax # 4-byte Folded Reload + movl %eax, 88(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 32(%esp), %eax # 4-byte Folded Reload + movl %eax, 96(%esp) # 4-byte Spill + adcl $0, 36(%esp) # 4-byte Folded Spill + movl 1384(%esp), %eax + movl 24(%eax), %eax + movl %eax, (%esp) + leal 848(%esp), %ecx + movl 1380(%esp), %edx + movl 124(%esp), %ebx # 4-byte Reload + calll .LmulPv544x32 + movl 120(%esp), %eax # 4-byte Reload + addl 848(%esp), %eax + movl %eax, 120(%esp) # 4-byte Spill + movl 916(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 912(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 908(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 904(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 900(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 896(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 892(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 888(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 884(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 880(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 876(%esp), %ebx + movl 872(%esp), %edi + movl 868(%esp), %esi + movl 864(%esp), %eax + movl %eax, 32(%esp) # 4-byte Spill + movl 860(%esp), %edx + movl 852(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 856(%esp), %ecx + movl 1376(%esp), %eax + movl 120(%esp), %ebp # 4-byte Reload + movl %ebp, 24(%eax) + movl 52(%esp), %ebp # 4-byte Reload + adcl 4(%esp), %ebp # 4-byte Folded Reload + adcl 8(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 8(%esp) # 4-byte Spill + adcl 28(%esp), %edx # 4-byte Folded Reload + movl %edx, 12(%esp) # 4-byte Spill + movl 16(%esp), %eax # 4-byte Reload + adcl %eax, 32(%esp) # 4-byte Folded Spill + adcl 20(%esp), %esi # 4-byte Folded Reload + movl %esi, 16(%esp) # 4-byte Spill + adcl 24(%esp), %edi # 4-byte Folded Reload + movl %edi, 20(%esp) # 4-byte Spill + adcl 56(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 24(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 64(%esp), %eax # 4-byte Folded Reload + movl %eax, 60(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 72(%esp), %eax # 4-byte Folded Reload + movl %eax, 68(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 104(%esp), %eax # 4-byte Folded Reload + movl %eax, 76(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl 116(%esp), %eax # 4-byte Folded Reload + movl %eax, 108(%esp) # 4-byte Spill + movl 112(%esp), %eax # 4-byte Reload + adcl 44(%esp), %eax # 4-byte Folded Reload + movl %eax, 112(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl %eax, 48(%esp) # 4-byte Folded Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 88(%esp), %eax # 4-byte Folded Reload + movl %eax, 84(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 96(%esp), %eax # 4-byte Folded Reload + movl %eax, 92(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 36(%esp), %eax # 4-byte Folded Reload + movl %eax, 100(%esp) # 4-byte Spill + adcl $0, 40(%esp) # 4-byte Folded Spill + movl 1384(%esp), %eax + movl 28(%eax), %eax + movl %eax, (%esp) + leal 776(%esp), %ecx + movl 1380(%esp), %edx + movl 124(%esp), %ebx # 4-byte Reload + calll .LmulPv544x32 + addl 776(%esp), %ebp + movl %ebp, 52(%esp) # 4-byte Spill + movl 844(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 840(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 836(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 832(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 828(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 824(%esp), %eax + movl %eax, 116(%esp) # 4-byte Spill + movl 820(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 816(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 812(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 808(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 804(%esp), %ebx + movl 800(%esp), %edi + movl 796(%esp), %esi + movl 792(%esp), %eax + movl %eax, 28(%esp) # 4-byte Spill + movl 788(%esp), %edx + movl 780(%esp), %eax + movl %eax, 120(%esp) # 4-byte Spill + movl 784(%esp), %ecx + movl 1376(%esp), %eax + movl 52(%esp), %ebp # 4-byte Reload + movl %ebp, 28(%eax) + movl 8(%esp), %eax # 4-byte Reload + adcl %eax, 120(%esp) # 4-byte Folded Spill + adcl 12(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 8(%esp) # 4-byte Spill + adcl 32(%esp), %edx # 4-byte Folded Reload + movl %edx, 12(%esp) # 4-byte Spill + movl 16(%esp), %eax # 4-byte Reload + adcl %eax, 28(%esp) # 4-byte Folded Spill + adcl 20(%esp), %esi # 4-byte Folded Reload + movl %esi, 16(%esp) # 4-byte Spill + adcl 24(%esp), %edi # 4-byte Folded Reload + movl %edi, 20(%esp) # 4-byte Spill + adcl 60(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 24(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 68(%esp), %eax # 4-byte Folded Reload + movl %eax, 56(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 76(%esp), %eax # 4-byte Folded Reload + movl %eax, 64(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 108(%esp), %eax # 4-byte Folded Reload + movl %eax, 72(%esp) # 4-byte Spill + movl 104(%esp), %eax # 4-byte Reload + adcl 112(%esp), %eax # 4-byte Folded Reload + movl %eax, 104(%esp) # 4-byte Spill + movl 116(%esp), %eax # 4-byte Reload + adcl 48(%esp), %eax # 4-byte Folded Reload + movl %eax, 116(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl %eax, 44(%esp) # 4-byte Folded Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 92(%esp), %eax # 4-byte Folded Reload + movl %eax, 80(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 100(%esp), %eax # 4-byte Folded Reload + movl %eax, 88(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 40(%esp), %eax # 4-byte Folded Reload + movl %eax, 96(%esp) # 4-byte Spill + adcl $0, 36(%esp) # 4-byte Folded Spill + movl 1384(%esp), %eax + movl 32(%eax), %eax + movl %eax, (%esp) + leal 704(%esp), %ecx + movl 1380(%esp), %edx + movl 124(%esp), %ebx # 4-byte Reload + calll .LmulPv544x32 + movl 120(%esp), %eax # 4-byte Reload + addl 704(%esp), %eax + movl %eax, 120(%esp) # 4-byte Spill + movl 772(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 768(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 764(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 760(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 756(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 752(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 748(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 744(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 740(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 736(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 732(%esp), %ebx + movl 728(%esp), %edi + movl 724(%esp), %esi + movl 720(%esp), %eax + movl %eax, 32(%esp) # 4-byte Spill + movl 716(%esp), %edx + movl 708(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 712(%esp), %ecx + movl 1376(%esp), %eax + movl 120(%esp), %ebp # 4-byte Reload + movl %ebp, 32(%eax) + movl 52(%esp), %ebp # 4-byte Reload + adcl 8(%esp), %ebp # 4-byte Folded Reload + adcl 12(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 8(%esp) # 4-byte Spill + adcl 28(%esp), %edx # 4-byte Folded Reload + movl %edx, 12(%esp) # 4-byte Spill + movl 16(%esp), %eax # 4-byte Reload + adcl %eax, 32(%esp) # 4-byte Folded Spill + adcl 20(%esp), %esi # 4-byte Folded Reload + movl %esi, 16(%esp) # 4-byte Spill + adcl 24(%esp), %edi # 4-byte Folded Reload + movl %edi, 20(%esp) # 4-byte Spill + adcl 56(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 24(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 64(%esp), %eax # 4-byte Folded Reload + movl %eax, 60(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 72(%esp), %eax # 4-byte Folded Reload + movl %eax, 68(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 104(%esp), %eax # 4-byte Folded Reload + movl %eax, 76(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl 116(%esp), %eax # 4-byte Folded Reload + movl %eax, 108(%esp) # 4-byte Spill + movl 112(%esp), %eax # 4-byte Reload + adcl 44(%esp), %eax # 4-byte Folded Reload + movl %eax, 112(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl %eax, 48(%esp) # 4-byte Folded Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 88(%esp), %eax # 4-byte Folded Reload + movl %eax, 84(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 96(%esp), %eax # 4-byte Folded Reload + movl %eax, 92(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 36(%esp), %eax # 4-byte Folded Reload + movl %eax, 100(%esp) # 4-byte Spill + adcl $0, 40(%esp) # 4-byte Folded Spill + movl 1384(%esp), %eax + movl 36(%eax), %eax + movl %eax, (%esp) + leal 632(%esp), %ecx + movl 1380(%esp), %edx + movl 124(%esp), %ebx # 4-byte Reload + calll .LmulPv544x32 + addl 632(%esp), %ebp + movl %ebp, 52(%esp) # 4-byte Spill + movl 700(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 696(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 692(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 688(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 684(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 680(%esp), %eax + movl %eax, 116(%esp) # 4-byte Spill + movl 676(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 672(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 668(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 664(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 660(%esp), %ebx + movl 656(%esp), %edi + movl 652(%esp), %esi + movl 648(%esp), %eax + movl %eax, 28(%esp) # 4-byte Spill + movl 644(%esp), %edx + movl 636(%esp), %eax + movl %eax, 120(%esp) # 4-byte Spill + movl 640(%esp), %ecx + movl 1376(%esp), %eax + movl 52(%esp), %ebp # 4-byte Reload + movl %ebp, 36(%eax) + movl 8(%esp), %eax # 4-byte Reload + adcl %eax, 120(%esp) # 4-byte Folded Spill + adcl 12(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 8(%esp) # 4-byte Spill + adcl 32(%esp), %edx # 4-byte Folded Reload + movl %edx, 12(%esp) # 4-byte Spill + movl 16(%esp), %eax # 4-byte Reload + adcl %eax, 28(%esp) # 4-byte Folded Spill + adcl 20(%esp), %esi # 4-byte Folded Reload + movl %esi, 16(%esp) # 4-byte Spill + adcl 24(%esp), %edi # 4-byte Folded Reload + movl %edi, 20(%esp) # 4-byte Spill + adcl 60(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 52(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 68(%esp), %eax # 4-byte Folded Reload + movl %eax, 56(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 76(%esp), %eax # 4-byte Folded Reload + movl %eax, 64(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 108(%esp), %eax # 4-byte Folded Reload + movl %eax, 72(%esp) # 4-byte Spill + movl 104(%esp), %eax # 4-byte Reload + adcl 112(%esp), %eax # 4-byte Folded Reload + movl %eax, 104(%esp) # 4-byte Spill + movl 116(%esp), %eax # 4-byte Reload + adcl 48(%esp), %eax # 4-byte Folded Reload + movl %eax, 116(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl %eax, 44(%esp) # 4-byte Folded Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 92(%esp), %eax # 4-byte Folded Reload + movl %eax, 80(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 100(%esp), %eax # 4-byte Folded Reload + movl %eax, 88(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 40(%esp), %eax # 4-byte Folded Reload + movl %eax, 96(%esp) # 4-byte Spill + adcl $0, 36(%esp) # 4-byte Folded Spill + movl 1384(%esp), %eax + movl 40(%eax), %eax + movl %eax, (%esp) + leal 560(%esp), %ecx + movl 1380(%esp), %edx + movl 124(%esp), %ebx # 4-byte Reload + calll .LmulPv544x32 + movl 120(%esp), %eax # 4-byte Reload + addl 560(%esp), %eax + movl %eax, 120(%esp) # 4-byte Spill + movl 628(%esp), %eax + movl %eax, 32(%esp) # 4-byte Spill + movl 624(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 620(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 616(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 612(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 608(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 604(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 600(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 596(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 592(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 588(%esp), %ebx + movl 584(%esp), %edi + movl 580(%esp), %esi + movl 576(%esp), %eax + movl %eax, 24(%esp) # 4-byte Spill + movl 572(%esp), %edx + movl 564(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 568(%esp), %ecx + movl 1376(%esp), %eax + movl 120(%esp), %ebp # 4-byte Reload + movl %ebp, 40(%eax) + movl 48(%esp), %ebp # 4-byte Reload + adcl 8(%esp), %ebp # 4-byte Folded Reload + adcl 12(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 8(%esp) # 4-byte Spill + adcl 28(%esp), %edx # 4-byte Folded Reload + movl %edx, 12(%esp) # 4-byte Spill + movl 16(%esp), %eax # 4-byte Reload + adcl %eax, 24(%esp) # 4-byte Folded Spill + adcl 20(%esp), %esi # 4-byte Folded Reload + movl %esi, 16(%esp) # 4-byte Spill + adcl 52(%esp), %edi # 4-byte Folded Reload + movl %edi, 20(%esp) # 4-byte Spill + adcl 56(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 56(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 64(%esp), %eax # 4-byte Folded Reload + movl %eax, 60(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 72(%esp), %eax # 4-byte Folded Reload + movl %eax, 68(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 104(%esp), %eax # 4-byte Folded Reload + movl %eax, 76(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl 116(%esp), %eax # 4-byte Folded Reload + movl %eax, 108(%esp) # 4-byte Spill + movl 112(%esp), %eax # 4-byte Reload + adcl 44(%esp), %eax # 4-byte Folded Reload + movl %eax, 112(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl %eax, 40(%esp) # 4-byte Folded Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 88(%esp), %eax # 4-byte Folded Reload + movl %eax, 84(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 96(%esp), %eax # 4-byte Folded Reload + movl %eax, 92(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 36(%esp), %eax # 4-byte Folded Reload + movl %eax, 100(%esp) # 4-byte Spill + adcl $0, 32(%esp) # 4-byte Folded Spill + movl 1384(%esp), %eax + movl 44(%eax), %eax + movl %eax, (%esp) + leal 488(%esp), %ecx + movl 1380(%esp), %edx + movl 124(%esp), %ebx # 4-byte Reload + calll .LmulPv544x32 + addl 488(%esp), %ebp + movl %ebp, 48(%esp) # 4-byte Spill + movl 556(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 552(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 548(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 544(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 540(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 536(%esp), %eax + movl %eax, 116(%esp) # 4-byte Spill + movl 532(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 528(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 524(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 520(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 516(%esp), %ebx + movl 512(%esp), %edi + movl 508(%esp), %esi + movl 504(%esp), %eax + movl %eax, 28(%esp) # 4-byte Spill + movl 500(%esp), %edx + movl 492(%esp), %eax + movl %eax, 120(%esp) # 4-byte Spill + movl 496(%esp), %ecx + movl 1376(%esp), %eax + movl 48(%esp), %ebp # 4-byte Reload + movl %ebp, 44(%eax) + movl 8(%esp), %eax # 4-byte Reload + adcl %eax, 120(%esp) # 4-byte Folded Spill + adcl 12(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 8(%esp) # 4-byte Spill + adcl 24(%esp), %edx # 4-byte Folded Reload + movl %edx, 12(%esp) # 4-byte Spill + movl 16(%esp), %eax # 4-byte Reload + adcl %eax, 28(%esp) # 4-byte Folded Spill + adcl 20(%esp), %esi # 4-byte Folded Reload + movl %esi, 16(%esp) # 4-byte Spill + adcl 56(%esp), %edi # 4-byte Folded Reload + movl %edi, 20(%esp) # 4-byte Spill + adcl 60(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 24(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 68(%esp), %eax # 4-byte Folded Reload + movl %eax, 52(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 76(%esp), %eax # 4-byte Folded Reload + movl %eax, 64(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 108(%esp), %eax # 4-byte Folded Reload + movl %eax, 72(%esp) # 4-byte Spill + movl 104(%esp), %eax # 4-byte Reload + adcl 112(%esp), %eax # 4-byte Folded Reload + movl %eax, 104(%esp) # 4-byte Spill + movl 116(%esp), %eax # 4-byte Reload + adcl 40(%esp), %eax # 4-byte Folded Reload + movl %eax, 116(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl %eax, 44(%esp) # 4-byte Folded Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 92(%esp), %eax # 4-byte Folded Reload + movl %eax, 80(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 100(%esp), %eax # 4-byte Folded Reload + movl %eax, 88(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 32(%esp), %eax # 4-byte Folded Reload + movl %eax, 96(%esp) # 4-byte Spill + adcl $0, 36(%esp) # 4-byte Folded Spill + movl 1384(%esp), %eax + movl 48(%eax), %eax + movl %eax, (%esp) + leal 416(%esp), %ecx + movl 1380(%esp), %edx + movl 124(%esp), %ebx # 4-byte Reload + calll .LmulPv544x32 + movl 120(%esp), %eax # 4-byte Reload + addl 416(%esp), %eax + movl %eax, 120(%esp) # 4-byte Spill + movl 484(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 480(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 476(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 472(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 468(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 464(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 460(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 456(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 452(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 448(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 444(%esp), %ebx + movl 440(%esp), %edi + movl 436(%esp), %esi + movl 432(%esp), %eax + movl %eax, 32(%esp) # 4-byte Spill + movl 428(%esp), %edx + movl 420(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 424(%esp), %ecx + movl 1376(%esp), %eax + movl 120(%esp), %ebp # 4-byte Reload + movl %ebp, 48(%eax) + movl 48(%esp), %ebp # 4-byte Reload + adcl 8(%esp), %ebp # 4-byte Folded Reload + adcl 12(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 8(%esp) # 4-byte Spill + adcl 28(%esp), %edx # 4-byte Folded Reload + movl %edx, 12(%esp) # 4-byte Spill + movl 16(%esp), %eax # 4-byte Reload + adcl %eax, 32(%esp) # 4-byte Folded Spill + adcl 20(%esp), %esi # 4-byte Folded Reload + movl %esi, 16(%esp) # 4-byte Spill + adcl 24(%esp), %edi # 4-byte Folded Reload + movl %edi, 20(%esp) # 4-byte Spill + adcl 52(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 24(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 64(%esp), %eax # 4-byte Folded Reload + movl %eax, 56(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 72(%esp), %eax # 4-byte Folded Reload + movl %eax, 60(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 104(%esp), %eax # 4-byte Folded Reload + movl %eax, 68(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl 116(%esp), %eax # 4-byte Folded Reload + movl %eax, 108(%esp) # 4-byte Spill + movl 112(%esp), %eax # 4-byte Reload + adcl 44(%esp), %eax # 4-byte Folded Reload + movl %eax, 112(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 80(%esp), %eax # 4-byte Folded Reload + movl %eax, 76(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 88(%esp), %eax # 4-byte Folded Reload + movl %eax, 84(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 96(%esp), %eax # 4-byte Folded Reload + movl %eax, 92(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 36(%esp), %eax # 4-byte Folded Reload + movl %eax, 100(%esp) # 4-byte Spill + adcl $0, 40(%esp) # 4-byte Folded Spill + movl 1384(%esp), %eax + movl 52(%eax), %eax + movl %eax, (%esp) + leal 344(%esp), %ecx + movl 1380(%esp), %edx + movl 124(%esp), %ebx # 4-byte Reload + calll .LmulPv544x32 + addl 344(%esp), %ebp + movl %ebp, 48(%esp) # 4-byte Spill + movl 412(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 408(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 404(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 400(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 396(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 392(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 388(%esp), %eax + movl %eax, 116(%esp) # 4-byte Spill + movl 384(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 380(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 376(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 372(%esp), %ebx + movl 368(%esp), %edi + movl 364(%esp), %esi + movl 360(%esp), %eax + movl %eax, 28(%esp) # 4-byte Spill + movl 356(%esp), %edx + movl 348(%esp), %eax + movl %eax, 120(%esp) # 4-byte Spill + movl 352(%esp), %ecx + movl 1376(%esp), %eax + movl 48(%esp), %ebp # 4-byte Reload + movl %ebp, 52(%eax) + movl 8(%esp), %eax # 4-byte Reload + adcl %eax, 120(%esp) # 4-byte Folded Spill + adcl 12(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 4(%esp) # 4-byte Spill + adcl 32(%esp), %edx # 4-byte Folded Reload + movl %edx, 8(%esp) # 4-byte Spill + movl 16(%esp), %eax # 4-byte Reload + adcl %eax, 28(%esp) # 4-byte Folded Spill + adcl 20(%esp), %esi # 4-byte Folded Reload + movl %esi, 12(%esp) # 4-byte Spill + adcl 24(%esp), %edi # 4-byte Folded Reload + movl %edi, 16(%esp) # 4-byte Spill + adcl 56(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 20(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 60(%esp), %eax # 4-byte Folded Reload + movl %eax, 52(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 68(%esp), %eax # 4-byte Folded Reload + movl %eax, 64(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 108(%esp), %eax # 4-byte Folded Reload + movl %eax, 72(%esp) # 4-byte Spill + movl 116(%esp), %eax # 4-byte Reload + adcl 112(%esp), %eax # 4-byte Folded Reload + movl %eax, 116(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl %eax, 44(%esp) # 4-byte Folded Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 84(%esp), %eax # 4-byte Folded Reload + movl %eax, 80(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 92(%esp), %eax # 4-byte Folded Reload + movl %eax, 88(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 100(%esp), %eax # 4-byte Folded Reload + movl %eax, 96(%esp) # 4-byte Spill + movl 104(%esp), %eax # 4-byte Reload + adcl 40(%esp), %eax # 4-byte Folded Reload + movl %eax, 104(%esp) # 4-byte Spill + adcl $0, 36(%esp) # 4-byte Folded Spill + movl 1384(%esp), %eax + movl 56(%eax), %eax + movl %eax, (%esp) + leal 272(%esp), %ecx + movl 1380(%esp), %eax + movl %eax, %edx + movl 124(%esp), %ebx # 4-byte Reload + calll .LmulPv544x32 + movl 120(%esp), %eax # 4-byte Reload + addl 272(%esp), %eax + movl %eax, 120(%esp) # 4-byte Spill + movl 340(%esp), %eax + movl %eax, 32(%esp) # 4-byte Spill + movl 336(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 332(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 328(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 324(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 320(%esp), %esi + movl %esi, 48(%esp) # 4-byte Spill + movl 316(%esp), %esi + movl %esi, 112(%esp) # 4-byte Spill + movl 312(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 308(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 304(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 300(%esp), %ebx + movl 296(%esp), %edi + movl 292(%esp), %edx + movl 288(%esp), %esi + movl %esi, 24(%esp) # 4-byte Spill + movl 284(%esp), %esi + movl %esi, 76(%esp) # 4-byte Spill + movl 276(%esp), %eax + movl 280(%esp), %ecx + movl 120(%esp), %esi # 4-byte Reload + movl 1376(%esp), %ebp + movl %esi, 56(%ebp) + adcl 4(%esp), %eax # 4-byte Folded Reload + movl %eax, %ebp + adcl 8(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 8(%esp) # 4-byte Spill + movl 76(%esp), %esi # 4-byte Reload + adcl 28(%esp), %esi # 4-byte Folded Reload + movl %esi, 76(%esp) # 4-byte Spill + movl 12(%esp), %eax # 4-byte Reload + adcl %eax, 24(%esp) # 4-byte Folded Spill + adcl 16(%esp), %edx # 4-byte Folded Reload + movl %edx, 16(%esp) # 4-byte Spill + adcl 20(%esp), %edi # 4-byte Folded Reload + movl %edi, 20(%esp) # 4-byte Spill + adcl 52(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 28(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 64(%esp), %eax # 4-byte Folded Reload + movl %eax, 56(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 72(%esp), %eax # 4-byte Folded Reload + movl %eax, 60(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 116(%esp), %eax # 4-byte Folded Reload + movl %eax, 68(%esp) # 4-byte Spill + movl 112(%esp), %esi # 4-byte Reload + adcl 44(%esp), %esi # 4-byte Folded Reload + movl %esi, 112(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl %eax, 48(%esp) # 4-byte Folded Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 88(%esp), %eax # 4-byte Folded Reload + movl %eax, 84(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 96(%esp), %eax # 4-byte Folded Reload + movl %eax, 92(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 104(%esp), %eax # 4-byte Folded Reload + movl %eax, 100(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl 36(%esp), %eax # 4-byte Folded Reload + movl %eax, 108(%esp) # 4-byte Spill + adcl $0, 32(%esp) # 4-byte Folded Spill + movl 1384(%esp), %ecx + movl %ecx, %eax + movl 60(%eax), %eax + movl %eax, (%esp) + leal 200(%esp), %ecx + movl 1380(%esp), %edx + movl 124(%esp), %ebx # 4-byte Reload + calll .LmulPv544x32 + addl 200(%esp), %ebp + movl %ebp, 12(%esp) # 4-byte Spill + movl 268(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 264(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 260(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 256(%esp), %eax + movl %eax, 116(%esp) # 4-byte Spill + movl 252(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 248(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 244(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 240(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 236(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 232(%esp), %edi + movl 228(%esp), %esi + movl 224(%esp), %edx + movl 220(%esp), %ecx + movl 216(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 212(%esp), %eax + movl 204(%esp), %ebp + movl %ebp, 120(%esp) # 4-byte Spill + movl 208(%esp), %ebp + movl %ebp, 64(%esp) # 4-byte Spill + movl 12(%esp), %ebp # 4-byte Reload + movl 1376(%esp), %ebx + movl %ebp, 60(%ebx) + movl 120(%esp), %ebp # 4-byte Reload + adcl 8(%esp), %ebp # 4-byte Folded Reload + movl %ebp, 120(%esp) # 4-byte Spill + movl 64(%esp), %ebp # 4-byte Reload + adcl 76(%esp), %ebp # 4-byte Folded Reload + adcl 24(%esp), %eax # 4-byte Folded Reload + movl %eax, 76(%esp) # 4-byte Spill + movl 16(%esp), %eax # 4-byte Reload + adcl %eax, 36(%esp) # 4-byte Folded Spill + adcl 20(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 24(%esp) # 4-byte Spill + adcl 28(%esp), %edx # 4-byte Folded Reload + movl %edx, 28(%esp) # 4-byte Spill + adcl 56(%esp), %esi # 4-byte Folded Reload + movl %esi, 56(%esp) # 4-byte Spill + adcl 60(%esp), %edi # 4-byte Folded Reload + movl %edi, 60(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 68(%esp), %eax # 4-byte Folded Reload + movl %eax, 72(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 112(%esp), %eax # 4-byte Folded Reload + movl %eax, 80(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 48(%esp), %eax # 4-byte Folded Reload + movl %eax, 88(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 84(%esp), %eax # 4-byte Folded Reload + movl %eax, 96(%esp) # 4-byte Spill + movl 104(%esp), %eax # 4-byte Reload + adcl 92(%esp), %eax # 4-byte Folded Reload + movl %eax, 104(%esp) # 4-byte Spill + movl 116(%esp), %eax # 4-byte Reload + adcl 100(%esp), %eax # 4-byte Folded Reload + movl %eax, 116(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl %eax, 40(%esp) # 4-byte Folded Spill + movl 32(%esp), %eax # 4-byte Reload + adcl %eax, 44(%esp) # 4-byte Folded Spill + adcl $0, 52(%esp) # 4-byte Folded Spill + movl 1384(%esp), %eax + movl 64(%eax), %eax + movl %eax, (%esp) + leal 128(%esp), %ecx + movl 1380(%esp), %edx + movl 124(%esp), %ebx # 4-byte Reload + calll .LmulPv544x32 + movl 120(%esp), %eax # 4-byte Reload + addl 128(%esp), %eax + movl %eax, 120(%esp) # 4-byte Spill + adcl 132(%esp), %ebp + movl %ebp, 64(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 136(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 196(%esp), %eax + movl %eax, 124(%esp) # 4-byte Spill + movl 192(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 188(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 184(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 180(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 176(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 172(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 168(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 164(%esp), %eax + movl %eax, 32(%esp) # 4-byte Spill + movl 160(%esp), %eax + movl %eax, 20(%esp) # 4-byte Spill + movl 156(%esp), %ebx + movl 152(%esp), %edi + movl 148(%esp), %esi + movl 144(%esp), %edx + movl 140(%esp), %ecx + movl 1376(%esp), %eax + movl 120(%esp), %ebp # 4-byte Reload + movl %ebp, 64(%eax) + movl 64(%esp), %ebp # 4-byte Reload + movl %ebp, 68(%eax) + adcl 36(%esp), %ecx # 4-byte Folded Reload + movl 76(%esp), %ebp # 4-byte Reload + movl %ebp, 72(%eax) + adcl 24(%esp), %edx # 4-byte Folded Reload + movl %ecx, 76(%eax) + adcl 28(%esp), %esi # 4-byte Folded Reload + movl %edx, 80(%eax) + adcl 56(%esp), %edi # 4-byte Folded Reload + movl %esi, 84(%eax) + adcl 60(%esp), %ebx # 4-byte Folded Reload + movl %edi, 88(%eax) + movl 20(%esp), %edx # 4-byte Reload + adcl 72(%esp), %edx # 4-byte Folded Reload + movl %ebx, 92(%eax) + movl 32(%esp), %ecx # 4-byte Reload + adcl 80(%esp), %ecx # 4-byte Folded Reload + movl %edx, 96(%eax) + movl 48(%esp), %edx # 4-byte Reload + adcl 88(%esp), %edx # 4-byte Folded Reload + movl %ecx, 100(%eax) + movl 68(%esp), %ecx # 4-byte Reload + adcl 96(%esp), %ecx # 4-byte Folded Reload + movl %edx, 104(%eax) + movl 84(%esp), %edx # 4-byte Reload + adcl 104(%esp), %edx # 4-byte Folded Reload + movl %ecx, 108(%eax) + movl 92(%esp), %ecx # 4-byte Reload + adcl 116(%esp), %ecx # 4-byte Folded Reload + movl %edx, 112(%eax) + movl 100(%esp), %edx # 4-byte Reload + adcl 40(%esp), %edx # 4-byte Folded Reload + movl %ecx, 116(%eax) + movl 108(%esp), %ecx # 4-byte Reload + adcl 44(%esp), %ecx # 4-byte Folded Reload + movl %edx, 120(%eax) + movl %ecx, 124(%eax) + movl 112(%esp), %ecx # 4-byte Reload + adcl 52(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 128(%eax) + movl 124(%esp), %ecx # 4-byte Reload + adcl $0, %ecx + movl %ecx, 132(%eax) + addl $1356, %esp # imm = 0x54C + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end256: + .size mcl_fpDbl_mulPre17Lbmi2, .Lfunc_end256-mcl_fpDbl_mulPre17Lbmi2 + + .globl mcl_fpDbl_sqrPre17Lbmi2 + .align 16, 0x90 + .type mcl_fpDbl_sqrPre17Lbmi2,@function +mcl_fpDbl_sqrPre17Lbmi2: # @mcl_fpDbl_sqrPre17Lbmi2 +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $1356, %esp # imm = 0x54C + calll .L257$pb +.L257$pb: + popl %ebx +.Ltmp58: + addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp58-.L257$pb), %ebx + movl %ebx, 124(%esp) # 4-byte Spill + movl 1380(%esp), %edx + movl (%edx), %eax + movl %eax, (%esp) + leal 1280(%esp), %ecx + movl %edx, %edi + movl %ebx, %esi + calll .LmulPv544x32 + movl 1348(%esp), %eax + movl %eax, 116(%esp) # 4-byte Spill + movl 1344(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 1340(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 1336(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 1332(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 1328(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 1324(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 1320(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 1316(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 1312(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 1308(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 1304(%esp), %eax + movl %eax, 32(%esp) # 4-byte Spill + movl 1300(%esp), %eax + movl %eax, 24(%esp) # 4-byte Spill + movl 1296(%esp), %eax + movl %eax, 20(%esp) # 4-byte Spill + movl 1292(%esp), %eax + movl %eax, 16(%esp) # 4-byte Spill + movl 1288(%esp), %eax + movl %eax, 12(%esp) # 4-byte Spill + movl 1280(%esp), %eax + movl 1284(%esp), %ebp + movl 1376(%esp), %ecx + movl %eax, (%ecx) + movl %edi, %edx + movl 4(%edx), %eax + movl %eax, (%esp) + leal 1208(%esp), %ecx + movl %esi, %ebx + calll .LmulPv544x32 + addl 1208(%esp), %ebp + movl %ebp, 8(%esp) # 4-byte Spill + movl 1276(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 1272(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 1268(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 1264(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 1260(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 1256(%esp), %edi + movl %edi, 64(%esp) # 4-byte Spill + movl 1252(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 1248(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 1244(%esp), %eax + movl %eax, 28(%esp) # 4-byte Spill + movl 1240(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 1236(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 1232(%esp), %edi + movl 1228(%esp), %esi + movl 1224(%esp), %edx + movl 1220(%esp), %ecx + movl 1212(%esp), %eax + movl %eax, 120(%esp) # 4-byte Spill + movl 1216(%esp), %eax + movl 1376(%esp), %ebp + movl 8(%esp), %ebx # 4-byte Reload + movl %ebx, 4(%ebp) + movl 12(%esp), %ebp # 4-byte Reload + adcl %ebp, 120(%esp) # 4-byte Folded Spill + adcl 16(%esp), %eax # 4-byte Folded Reload + movl %eax, 8(%esp) # 4-byte Spill + adcl 20(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 12(%esp) # 4-byte Spill + adcl 24(%esp), %edx # 4-byte Folded Reload + movl %edx, 16(%esp) # 4-byte Spill + adcl 32(%esp), %esi # 4-byte Folded Reload + movl %esi, 20(%esp) # 4-byte Spill + adcl 40(%esp), %edi # 4-byte Folded Reload + movl %edi, 24(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 48(%esp), %eax # 4-byte Folded Reload + movl %eax, 80(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 52(%esp), %eax # 4-byte Folded Reload + movl %eax, 72(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl %eax, 28(%esp) # 4-byte Folded Spill + movl 68(%esp), %eax # 4-byte Reload + adcl %eax, 36(%esp) # 4-byte Folded Spill + movl 76(%esp), %eax # 4-byte Reload + adcl %eax, 44(%esp) # 4-byte Folded Spill + movl 84(%esp), %eax # 4-byte Reload + adcl %eax, 64(%esp) # 4-byte Folded Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 92(%esp), %eax # 4-byte Folded Reload + movl %eax, 88(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 100(%esp), %eax # 4-byte Folded Reload + movl %eax, 96(%esp) # 4-byte Spill + movl 104(%esp), %eax # 4-byte Reload + adcl 108(%esp), %eax # 4-byte Folded Reload + movl %eax, 104(%esp) # 4-byte Spill + movl 112(%esp), %eax # 4-byte Reload + adcl 116(%esp), %eax # 4-byte Folded Reload + movl %eax, 112(%esp) # 4-byte Spill + adcl $0, 56(%esp) # 4-byte Folded Spill + movl 1380(%esp), %edx + movl 8(%edx), %eax + movl %eax, (%esp) + leal 1136(%esp), %ecx + movl 124(%esp), %ebx # 4-byte Reload + calll .LmulPv544x32 + movl 120(%esp), %eax # 4-byte Reload + addl 1136(%esp), %eax + movl %eax, 120(%esp) # 4-byte Spill + movl 1204(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 1200(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 1196(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 1192(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 1188(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 1184(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 1180(%esp), %eax + movl %eax, 116(%esp) # 4-byte Spill + movl 1176(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 1172(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 1168(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 1164(%esp), %ebx + movl 1160(%esp), %edi + movl 1156(%esp), %esi + movl 1152(%esp), %eax + movl %eax, 32(%esp) # 4-byte Spill + movl 1148(%esp), %edx + movl 1140(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 1144(%esp), %ecx + movl 1376(%esp), %eax + movl 120(%esp), %ebp # 4-byte Reload + movl %ebp, 8(%eax) + movl 52(%esp), %ebp # 4-byte Reload + adcl 8(%esp), %ebp # 4-byte Folded Reload + adcl 12(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 8(%esp) # 4-byte Spill + adcl 16(%esp), %edx # 4-byte Folded Reload + movl %edx, 12(%esp) # 4-byte Spill + movl 20(%esp), %eax # 4-byte Reload + adcl %eax, 32(%esp) # 4-byte Folded Spill + adcl 24(%esp), %esi # 4-byte Folded Reload + movl %esi, 16(%esp) # 4-byte Spill + adcl 80(%esp), %edi # 4-byte Folded Reload + movl %edi, 20(%esp) # 4-byte Spill + adcl 72(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 24(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 28(%esp), %eax # 4-byte Folded Reload + movl %eax, 60(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 36(%esp), %eax # 4-byte Folded Reload + movl %eax, 68(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 44(%esp), %eax # 4-byte Folded Reload + movl %eax, 76(%esp) # 4-byte Spill + movl 116(%esp), %eax # 4-byte Reload + adcl 64(%esp), %eax # 4-byte Folded Reload + movl %eax, 116(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl %eax, 48(%esp) # 4-byte Folded Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 96(%esp), %eax # 4-byte Folded Reload + movl %eax, 84(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 104(%esp), %eax # 4-byte Folded Reload + movl %eax, 92(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 112(%esp), %eax # 4-byte Folded Reload + movl %eax, 100(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl 56(%esp), %eax # 4-byte Folded Reload + movl %eax, 108(%esp) # 4-byte Spill + adcl $0, 40(%esp) # 4-byte Folded Spill + movl 1380(%esp), %edx + movl 12(%edx), %eax + movl %eax, (%esp) + leal 1064(%esp), %ecx + movl 124(%esp), %ebx # 4-byte Reload + calll .LmulPv544x32 + addl 1064(%esp), %ebp + movl %ebp, 52(%esp) # 4-byte Spill + movl 1132(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 1128(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 1124(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 1120(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 1116(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 1112(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 1108(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 1104(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 1100(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 1096(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 1092(%esp), %ebx + movl 1088(%esp), %edi + movl 1084(%esp), %esi + movl 1080(%esp), %eax + movl %eax, 28(%esp) # 4-byte Spill + movl 1076(%esp), %edx + movl 1068(%esp), %eax + movl %eax, 120(%esp) # 4-byte Spill + movl 1072(%esp), %ecx + movl 1376(%esp), %eax + movl 52(%esp), %ebp # 4-byte Reload + movl %ebp, 12(%eax) + movl 8(%esp), %eax # 4-byte Reload + adcl %eax, 120(%esp) # 4-byte Folded Spill + adcl 12(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 8(%esp) # 4-byte Spill + adcl 32(%esp), %edx # 4-byte Folded Reload + movl %edx, 12(%esp) # 4-byte Spill + movl 16(%esp), %eax # 4-byte Reload + adcl %eax, 28(%esp) # 4-byte Folded Spill + adcl 20(%esp), %esi # 4-byte Folded Reload + movl %esi, 16(%esp) # 4-byte Spill + adcl 24(%esp), %edi # 4-byte Folded Reload + movl %edi, 20(%esp) # 4-byte Spill + adcl 60(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 24(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 68(%esp), %eax # 4-byte Folded Reload + movl %eax, 56(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 76(%esp), %eax # 4-byte Folded Reload + movl %eax, 64(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 116(%esp), %eax # 4-byte Folded Reload + movl %eax, 72(%esp) # 4-byte Spill + movl 112(%esp), %eax # 4-byte Reload + adcl 48(%esp), %eax # 4-byte Folded Reload + movl %eax, 112(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl %eax, 44(%esp) # 4-byte Folded Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 92(%esp), %eax # 4-byte Folded Reload + movl %eax, 80(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 100(%esp), %eax # 4-byte Folded Reload + movl %eax, 88(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 108(%esp), %eax # 4-byte Folded Reload + movl %eax, 96(%esp) # 4-byte Spill + movl 104(%esp), %eax # 4-byte Reload + adcl 40(%esp), %eax # 4-byte Folded Reload + movl %eax, 104(%esp) # 4-byte Spill + adcl $0, 36(%esp) # 4-byte Folded Spill + movl 1380(%esp), %edx + movl 16(%edx), %eax + movl %eax, (%esp) + leal 992(%esp), %ecx + movl 124(%esp), %ebx # 4-byte Reload + calll .LmulPv544x32 + movl 120(%esp), %eax # 4-byte Reload + addl 992(%esp), %eax + movl %eax, 120(%esp) # 4-byte Spill + movl 1060(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 1056(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 1052(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 1048(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 1044(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 1040(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 1036(%esp), %eax + movl %eax, 116(%esp) # 4-byte Spill + movl 1032(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 1028(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 1024(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 1020(%esp), %ebx + movl 1016(%esp), %edi + movl 1012(%esp), %esi + movl 1008(%esp), %eax + movl %eax, 32(%esp) # 4-byte Spill + movl 1004(%esp), %edx + movl 996(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 1000(%esp), %ecx + movl 1376(%esp), %eax + movl 120(%esp), %ebp # 4-byte Reload + movl %ebp, 16(%eax) + movl 52(%esp), %ebp # 4-byte Reload + adcl 8(%esp), %ebp # 4-byte Folded Reload + adcl 12(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 8(%esp) # 4-byte Spill + adcl 28(%esp), %edx # 4-byte Folded Reload + movl %edx, 12(%esp) # 4-byte Spill + movl 16(%esp), %eax # 4-byte Reload + adcl %eax, 32(%esp) # 4-byte Folded Spill + adcl 20(%esp), %esi # 4-byte Folded Reload + movl %esi, 16(%esp) # 4-byte Spill + adcl 24(%esp), %edi # 4-byte Folded Reload + movl %edi, 20(%esp) # 4-byte Spill + adcl 56(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 24(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 64(%esp), %eax # 4-byte Folded Reload + movl %eax, 60(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 72(%esp), %eax # 4-byte Folded Reload + movl %eax, 68(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 112(%esp), %eax # 4-byte Folded Reload + movl %eax, 76(%esp) # 4-byte Spill + movl 116(%esp), %eax # 4-byte Reload + adcl 44(%esp), %eax # 4-byte Folded Reload + movl %eax, 116(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl %eax, 48(%esp) # 4-byte Folded Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 88(%esp), %eax # 4-byte Folded Reload + movl %eax, 84(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 96(%esp), %eax # 4-byte Folded Reload + movl %eax, 92(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 104(%esp), %eax # 4-byte Folded Reload + movl %eax, 100(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl 36(%esp), %eax # 4-byte Folded Reload + movl %eax, 108(%esp) # 4-byte Spill + adcl $0, 40(%esp) # 4-byte Folded Spill + movl 1380(%esp), %edx + movl 20(%edx), %eax + movl %eax, (%esp) + leal 920(%esp), %ecx + movl 124(%esp), %ebx # 4-byte Reload + calll .LmulPv544x32 + addl 920(%esp), %ebp + movl %ebp, 52(%esp) # 4-byte Spill + movl 988(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 984(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 980(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 976(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 972(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 968(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 964(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 960(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 956(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 952(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 948(%esp), %ebx + movl 944(%esp), %edi + movl 940(%esp), %esi + movl 936(%esp), %eax + movl %eax, 28(%esp) # 4-byte Spill + movl 932(%esp), %edx + movl 924(%esp), %eax + movl %eax, 120(%esp) # 4-byte Spill + movl 928(%esp), %ecx + movl 1376(%esp), %eax + movl 52(%esp), %ebp # 4-byte Reload + movl %ebp, 20(%eax) + movl 8(%esp), %eax # 4-byte Reload + adcl %eax, 120(%esp) # 4-byte Folded Spill + adcl 12(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 8(%esp) # 4-byte Spill + adcl 32(%esp), %edx # 4-byte Folded Reload + movl %edx, 12(%esp) # 4-byte Spill + movl 16(%esp), %eax # 4-byte Reload + adcl %eax, 28(%esp) # 4-byte Folded Spill + adcl 20(%esp), %esi # 4-byte Folded Reload + movl %esi, 16(%esp) # 4-byte Spill + adcl 24(%esp), %edi # 4-byte Folded Reload + movl %edi, 20(%esp) # 4-byte Spill + adcl 60(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 24(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 68(%esp), %eax # 4-byte Folded Reload + movl %eax, 56(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 76(%esp), %eax # 4-byte Folded Reload + movl %eax, 64(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 116(%esp), %eax # 4-byte Folded Reload + movl %eax, 72(%esp) # 4-byte Spill + movl 112(%esp), %eax # 4-byte Reload + adcl 48(%esp), %eax # 4-byte Folded Reload + movl %eax, 112(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl %eax, 44(%esp) # 4-byte Folded Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 92(%esp), %eax # 4-byte Folded Reload + movl %eax, 80(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 100(%esp), %eax # 4-byte Folded Reload + movl %eax, 88(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 108(%esp), %eax # 4-byte Folded Reload + movl %eax, 96(%esp) # 4-byte Spill + movl 104(%esp), %eax # 4-byte Reload + adcl 40(%esp), %eax # 4-byte Folded Reload + movl %eax, 104(%esp) # 4-byte Spill + adcl $0, 36(%esp) # 4-byte Folded Spill + movl 1380(%esp), %edx + movl 24(%edx), %eax + movl %eax, (%esp) + leal 848(%esp), %ecx + movl 124(%esp), %ebx # 4-byte Reload + calll .LmulPv544x32 + movl 120(%esp), %eax # 4-byte Reload + addl 848(%esp), %eax + movl %eax, 120(%esp) # 4-byte Spill + movl 916(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 912(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 908(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 904(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 900(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 896(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 892(%esp), %eax + movl %eax, 116(%esp) # 4-byte Spill + movl 888(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 884(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 880(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 876(%esp), %ebx + movl 872(%esp), %edi + movl 868(%esp), %esi + movl 864(%esp), %eax + movl %eax, 32(%esp) # 4-byte Spill + movl 860(%esp), %edx + movl 852(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 856(%esp), %ecx + movl 1376(%esp), %eax + movl 120(%esp), %ebp # 4-byte Reload + movl %ebp, 24(%eax) + movl 52(%esp), %ebp # 4-byte Reload + adcl 8(%esp), %ebp # 4-byte Folded Reload + adcl 12(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 8(%esp) # 4-byte Spill + adcl 28(%esp), %edx # 4-byte Folded Reload + movl %edx, 12(%esp) # 4-byte Spill + movl 16(%esp), %eax # 4-byte Reload + adcl %eax, 32(%esp) # 4-byte Folded Spill + adcl 20(%esp), %esi # 4-byte Folded Reload + movl %esi, 16(%esp) # 4-byte Spill + adcl 24(%esp), %edi # 4-byte Folded Reload + movl %edi, 20(%esp) # 4-byte Spill + adcl 56(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 24(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 64(%esp), %eax # 4-byte Folded Reload + movl %eax, 60(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 72(%esp), %eax # 4-byte Folded Reload + movl %eax, 68(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 112(%esp), %eax # 4-byte Folded Reload + movl %eax, 76(%esp) # 4-byte Spill + movl 116(%esp), %eax # 4-byte Reload + adcl 44(%esp), %eax # 4-byte Folded Reload + movl %eax, 116(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl %eax, 48(%esp) # 4-byte Folded Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 88(%esp), %eax # 4-byte Folded Reload + movl %eax, 84(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 96(%esp), %eax # 4-byte Folded Reload + movl %eax, 92(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 104(%esp), %eax # 4-byte Folded Reload + movl %eax, 100(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl 36(%esp), %eax # 4-byte Folded Reload + movl %eax, 108(%esp) # 4-byte Spill + adcl $0, 40(%esp) # 4-byte Folded Spill + movl 1380(%esp), %edx + movl 28(%edx), %eax + movl %eax, (%esp) + leal 776(%esp), %ecx + movl 124(%esp), %ebx # 4-byte Reload + calll .LmulPv544x32 + addl 776(%esp), %ebp + movl %ebp, 52(%esp) # 4-byte Spill + movl 844(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 840(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 836(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 832(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 828(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 824(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 820(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 816(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 812(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 808(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 804(%esp), %ebx + movl 800(%esp), %edi + movl 796(%esp), %esi + movl 792(%esp), %eax + movl %eax, 28(%esp) # 4-byte Spill + movl 788(%esp), %edx + movl 780(%esp), %eax + movl %eax, 120(%esp) # 4-byte Spill + movl 784(%esp), %ecx + movl 1376(%esp), %eax + movl 52(%esp), %ebp # 4-byte Reload + movl %ebp, 28(%eax) + movl 8(%esp), %eax # 4-byte Reload + adcl %eax, 120(%esp) # 4-byte Folded Spill + adcl 12(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 8(%esp) # 4-byte Spill + adcl 32(%esp), %edx # 4-byte Folded Reload + movl %edx, 12(%esp) # 4-byte Spill + movl 16(%esp), %eax # 4-byte Reload + adcl %eax, 28(%esp) # 4-byte Folded Spill + adcl 20(%esp), %esi # 4-byte Folded Reload + movl %esi, 16(%esp) # 4-byte Spill + adcl 24(%esp), %edi # 4-byte Folded Reload + movl %edi, 20(%esp) # 4-byte Spill + adcl 60(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 24(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 68(%esp), %eax # 4-byte Folded Reload + movl %eax, 56(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 76(%esp), %eax # 4-byte Folded Reload + movl %eax, 64(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 116(%esp), %eax # 4-byte Folded Reload + movl %eax, 72(%esp) # 4-byte Spill + movl 112(%esp), %eax # 4-byte Reload + adcl 48(%esp), %eax # 4-byte Folded Reload + movl %eax, 112(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl %eax, 44(%esp) # 4-byte Folded Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 92(%esp), %eax # 4-byte Folded Reload + movl %eax, 80(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 100(%esp), %eax # 4-byte Folded Reload + movl %eax, 88(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 108(%esp), %eax # 4-byte Folded Reload + movl %eax, 96(%esp) # 4-byte Spill + movl 104(%esp), %eax # 4-byte Reload + adcl 40(%esp), %eax # 4-byte Folded Reload + movl %eax, 104(%esp) # 4-byte Spill + adcl $0, 36(%esp) # 4-byte Folded Spill + movl 1380(%esp), %edx + movl 32(%edx), %eax + movl %eax, (%esp) + leal 704(%esp), %ecx + movl 124(%esp), %ebx # 4-byte Reload + calll .LmulPv544x32 + movl 120(%esp), %eax # 4-byte Reload + addl 704(%esp), %eax + movl %eax, 120(%esp) # 4-byte Spill + movl 772(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 768(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 764(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 760(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 756(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 752(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 748(%esp), %eax + movl %eax, 116(%esp) # 4-byte Spill + movl 744(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 740(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 736(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 732(%esp), %ebx + movl 728(%esp), %edi + movl 724(%esp), %esi + movl 720(%esp), %eax + movl %eax, 32(%esp) # 4-byte Spill + movl 716(%esp), %edx + movl 708(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 712(%esp), %ecx + movl 1376(%esp), %eax + movl 120(%esp), %ebp # 4-byte Reload + movl %ebp, 32(%eax) + movl 52(%esp), %ebp # 4-byte Reload + adcl 8(%esp), %ebp # 4-byte Folded Reload + adcl 12(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 8(%esp) # 4-byte Spill + adcl 28(%esp), %edx # 4-byte Folded Reload + movl %edx, 12(%esp) # 4-byte Spill + movl 16(%esp), %eax # 4-byte Reload + adcl %eax, 32(%esp) # 4-byte Folded Spill + adcl 20(%esp), %esi # 4-byte Folded Reload + movl %esi, 16(%esp) # 4-byte Spill + adcl 24(%esp), %edi # 4-byte Folded Reload + movl %edi, 20(%esp) # 4-byte Spill + adcl 56(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 24(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 64(%esp), %eax # 4-byte Folded Reload + movl %eax, 60(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 72(%esp), %eax # 4-byte Folded Reload + movl %eax, 68(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 112(%esp), %eax # 4-byte Folded Reload + movl %eax, 76(%esp) # 4-byte Spill + movl 116(%esp), %eax # 4-byte Reload + adcl 44(%esp), %eax # 4-byte Folded Reload + movl %eax, 116(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl %eax, 48(%esp) # 4-byte Folded Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 88(%esp), %eax # 4-byte Folded Reload + movl %eax, 84(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 96(%esp), %eax # 4-byte Folded Reload + movl %eax, 92(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 104(%esp), %eax # 4-byte Folded Reload + movl %eax, 100(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl 36(%esp), %eax # 4-byte Folded Reload + movl %eax, 108(%esp) # 4-byte Spill + adcl $0, 40(%esp) # 4-byte Folded Spill + movl 1380(%esp), %edx + movl 36(%edx), %eax + movl %eax, (%esp) + leal 632(%esp), %ecx + movl 124(%esp), %ebx # 4-byte Reload + calll .LmulPv544x32 + addl 632(%esp), %ebp + movl %ebp, 52(%esp) # 4-byte Spill + movl 700(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 696(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 692(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 688(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 684(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 680(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 676(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 672(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 668(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 664(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 660(%esp), %ebx + movl 656(%esp), %edi + movl 652(%esp), %esi + movl 648(%esp), %eax + movl %eax, 28(%esp) # 4-byte Spill + movl 644(%esp), %edx + movl 636(%esp), %eax + movl %eax, 120(%esp) # 4-byte Spill + movl 640(%esp), %ecx + movl 1376(%esp), %eax + movl 52(%esp), %ebp # 4-byte Reload + movl %ebp, 36(%eax) + movl 8(%esp), %eax # 4-byte Reload + adcl %eax, 120(%esp) # 4-byte Folded Spill + adcl 12(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 8(%esp) # 4-byte Spill + adcl 32(%esp), %edx # 4-byte Folded Reload + movl %edx, 12(%esp) # 4-byte Spill + movl 16(%esp), %eax # 4-byte Reload + adcl %eax, 28(%esp) # 4-byte Folded Spill + adcl 20(%esp), %esi # 4-byte Folded Reload + movl %esi, 16(%esp) # 4-byte Spill + adcl 24(%esp), %edi # 4-byte Folded Reload + movl %edi, 20(%esp) # 4-byte Spill + adcl 60(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 52(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 68(%esp), %eax # 4-byte Folded Reload + movl %eax, 56(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 76(%esp), %eax # 4-byte Folded Reload + movl %eax, 64(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 116(%esp), %eax # 4-byte Folded Reload + movl %eax, 72(%esp) # 4-byte Spill + movl 112(%esp), %eax # 4-byte Reload + adcl 48(%esp), %eax # 4-byte Folded Reload + movl %eax, 112(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl %eax, 44(%esp) # 4-byte Folded Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 92(%esp), %eax # 4-byte Folded Reload + movl %eax, 80(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 100(%esp), %eax # 4-byte Folded Reload + movl %eax, 88(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 108(%esp), %eax # 4-byte Folded Reload + movl %eax, 96(%esp) # 4-byte Spill + movl 104(%esp), %eax # 4-byte Reload + adcl 40(%esp), %eax # 4-byte Folded Reload + movl %eax, 104(%esp) # 4-byte Spill + adcl $0, 36(%esp) # 4-byte Folded Spill + movl 1380(%esp), %edx + movl 40(%edx), %eax + movl %eax, (%esp) + leal 560(%esp), %ecx + movl 124(%esp), %ebx # 4-byte Reload + calll .LmulPv544x32 + movl 120(%esp), %eax # 4-byte Reload + addl 560(%esp), %eax + movl %eax, 120(%esp) # 4-byte Spill + movl 628(%esp), %eax + movl %eax, 32(%esp) # 4-byte Spill + movl 624(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 620(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 616(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 612(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 608(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 604(%esp), %eax + movl %eax, 116(%esp) # 4-byte Spill + movl 600(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 596(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 592(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 588(%esp), %ebx + movl 584(%esp), %edi + movl 580(%esp), %esi + movl 576(%esp), %eax + movl %eax, 24(%esp) # 4-byte Spill + movl 572(%esp), %edx + movl 564(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 568(%esp), %ecx + movl 1376(%esp), %eax + movl 120(%esp), %ebp # 4-byte Reload + movl %ebp, 40(%eax) + movl 48(%esp), %ebp # 4-byte Reload + adcl 8(%esp), %ebp # 4-byte Folded Reload + adcl 12(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 8(%esp) # 4-byte Spill + adcl 28(%esp), %edx # 4-byte Folded Reload + movl %edx, 12(%esp) # 4-byte Spill + movl 16(%esp), %eax # 4-byte Reload + adcl %eax, 24(%esp) # 4-byte Folded Spill + adcl 20(%esp), %esi # 4-byte Folded Reload + movl %esi, 16(%esp) # 4-byte Spill + adcl 52(%esp), %edi # 4-byte Folded Reload + movl %edi, 20(%esp) # 4-byte Spill + adcl 56(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 56(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 64(%esp), %eax # 4-byte Folded Reload + movl %eax, 60(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 72(%esp), %eax # 4-byte Folded Reload + movl %eax, 68(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 112(%esp), %eax # 4-byte Folded Reload + movl %eax, 76(%esp) # 4-byte Spill + movl 116(%esp), %eax # 4-byte Reload + adcl 44(%esp), %eax # 4-byte Folded Reload + movl %eax, 116(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl %eax, 40(%esp) # 4-byte Folded Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 88(%esp), %eax # 4-byte Folded Reload + movl %eax, 84(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 96(%esp), %eax # 4-byte Folded Reload + movl %eax, 92(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 104(%esp), %eax # 4-byte Folded Reload + movl %eax, 100(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl 36(%esp), %eax # 4-byte Folded Reload + movl %eax, 108(%esp) # 4-byte Spill + adcl $0, 32(%esp) # 4-byte Folded Spill + movl 1380(%esp), %edx + movl 44(%edx), %eax + movl %eax, (%esp) + leal 488(%esp), %ecx + movl 124(%esp), %ebx # 4-byte Reload + calll .LmulPv544x32 + addl 488(%esp), %ebp + movl %ebp, 48(%esp) # 4-byte Spill + movl 556(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 552(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 548(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 544(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 540(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 536(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 532(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 528(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 524(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 520(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 516(%esp), %ebx + movl 512(%esp), %edi + movl 508(%esp), %esi + movl 504(%esp), %eax + movl %eax, 28(%esp) # 4-byte Spill + movl 500(%esp), %edx + movl 492(%esp), %eax + movl %eax, 120(%esp) # 4-byte Spill + movl 496(%esp), %ecx + movl 1376(%esp), %eax + movl 48(%esp), %ebp # 4-byte Reload + movl %ebp, 44(%eax) + movl 8(%esp), %eax # 4-byte Reload + adcl %eax, 120(%esp) # 4-byte Folded Spill + adcl 12(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 4(%esp) # 4-byte Spill + adcl 24(%esp), %edx # 4-byte Folded Reload + movl %edx, 8(%esp) # 4-byte Spill + movl 16(%esp), %eax # 4-byte Reload + adcl %eax, 28(%esp) # 4-byte Folded Spill + adcl 20(%esp), %esi # 4-byte Folded Reload + movl %esi, 12(%esp) # 4-byte Spill + adcl 56(%esp), %edi # 4-byte Folded Reload + movl %edi, 16(%esp) # 4-byte Spill + adcl 60(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 20(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 68(%esp), %eax # 4-byte Folded Reload + movl %eax, 52(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 76(%esp), %eax # 4-byte Folded Reload + movl %eax, 64(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 116(%esp), %eax # 4-byte Folded Reload + movl %eax, 72(%esp) # 4-byte Spill + movl 112(%esp), %eax # 4-byte Reload + adcl 40(%esp), %eax # 4-byte Folded Reload + movl %eax, 112(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl %eax, 44(%esp) # 4-byte Folded Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 92(%esp), %eax # 4-byte Folded Reload + movl %eax, 80(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 100(%esp), %eax # 4-byte Folded Reload + movl %eax, 88(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 108(%esp), %eax # 4-byte Folded Reload + movl %eax, 96(%esp) # 4-byte Spill + movl 104(%esp), %eax # 4-byte Reload + adcl 32(%esp), %eax # 4-byte Folded Reload + movl %eax, 104(%esp) # 4-byte Spill + adcl $0, 36(%esp) # 4-byte Folded Spill + movl 1380(%esp), %edx + movl 48(%edx), %eax + movl %eax, (%esp) + leal 416(%esp), %ecx + movl 124(%esp), %ebx # 4-byte Reload + calll .LmulPv544x32 + movl 120(%esp), %eax # 4-byte Reload + addl 416(%esp), %eax + movl %eax, 120(%esp) # 4-byte Spill + movl 484(%esp), %eax + movl %eax, 32(%esp) # 4-byte Spill + movl 480(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 476(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 472(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 468(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 464(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 460(%esp), %eax + movl %eax, 116(%esp) # 4-byte Spill + movl 456(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 452(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 448(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 444(%esp), %ebx + movl 440(%esp), %edi + movl 436(%esp), %esi + movl 432(%esp), %eax + movl %eax, 24(%esp) # 4-byte Spill + movl 428(%esp), %edx + movl 420(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 424(%esp), %ecx + movl 1376(%esp), %eax + movl 120(%esp), %ebp # 4-byte Reload + movl %ebp, 48(%eax) + movl 48(%esp), %ebp # 4-byte Reload + adcl 4(%esp), %ebp # 4-byte Folded Reload + adcl 8(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 4(%esp) # 4-byte Spill + adcl 28(%esp), %edx # 4-byte Folded Reload + movl %edx, 8(%esp) # 4-byte Spill + movl 12(%esp), %eax # 4-byte Reload + adcl %eax, 24(%esp) # 4-byte Folded Spill + adcl 16(%esp), %esi # 4-byte Folded Reload + movl %esi, 12(%esp) # 4-byte Spill + adcl 20(%esp), %edi # 4-byte Folded Reload + movl %edi, 16(%esp) # 4-byte Spill + adcl 52(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 20(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 64(%esp), %eax # 4-byte Folded Reload + movl %eax, 56(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 72(%esp), %eax # 4-byte Folded Reload + movl %eax, 60(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 112(%esp), %eax # 4-byte Folded Reload + movl %eax, 76(%esp) # 4-byte Spill + movl 116(%esp), %eax # 4-byte Reload + adcl 44(%esp), %eax # 4-byte Folded Reload + movl %eax, 116(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl %eax, 40(%esp) # 4-byte Folded Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 88(%esp), %eax # 4-byte Folded Reload + movl %eax, 84(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 96(%esp), %eax # 4-byte Folded Reload + movl %eax, 92(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 104(%esp), %eax # 4-byte Folded Reload + movl %eax, 100(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl 36(%esp), %eax # 4-byte Folded Reload + movl %eax, 108(%esp) # 4-byte Spill + adcl $0, 32(%esp) # 4-byte Folded Spill + movl 1380(%esp), %edx + movl 52(%edx), %eax + movl %eax, (%esp) + leal 344(%esp), %ecx + movl 124(%esp), %ebx # 4-byte Reload + calll .LmulPv544x32 + addl 344(%esp), %ebp + movl %ebp, 48(%esp) # 4-byte Spill + movl 412(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 408(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 404(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 400(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 396(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 392(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 388(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 384(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 380(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 376(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 372(%esp), %ebx + movl 368(%esp), %edi + movl 364(%esp), %esi + movl 360(%esp), %eax + movl %eax, 28(%esp) # 4-byte Spill + movl 356(%esp), %edx + movl 348(%esp), %eax + movl %eax, 120(%esp) # 4-byte Spill + movl 352(%esp), %ecx + movl 1376(%esp), %eax + movl 48(%esp), %ebp # 4-byte Reload + movl %ebp, 52(%eax) + movl 4(%esp), %eax # 4-byte Reload + adcl %eax, 120(%esp) # 4-byte Folded Spill + adcl 8(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 4(%esp) # 4-byte Spill + adcl 24(%esp), %edx # 4-byte Folded Reload + movl %edx, 8(%esp) # 4-byte Spill + movl 12(%esp), %eax # 4-byte Reload + adcl %eax, 28(%esp) # 4-byte Folded Spill + adcl 16(%esp), %esi # 4-byte Folded Reload + movl %esi, 12(%esp) # 4-byte Spill + adcl 20(%esp), %edi # 4-byte Folded Reload + movl %edi, 16(%esp) # 4-byte Spill + adcl 56(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 20(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 60(%esp), %eax # 4-byte Folded Reload + movl %eax, 52(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 76(%esp), %eax # 4-byte Folded Reload + movl %eax, 68(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 116(%esp), %eax # 4-byte Folded Reload + movl %eax, 72(%esp) # 4-byte Spill + movl 112(%esp), %eax # 4-byte Reload + adcl 40(%esp), %eax # 4-byte Folded Reload + movl %eax, 112(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl %eax, 44(%esp) # 4-byte Folded Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 92(%esp), %eax # 4-byte Folded Reload + movl %eax, 80(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 100(%esp), %eax # 4-byte Folded Reload + movl %eax, 88(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 108(%esp), %eax # 4-byte Folded Reload + movl %eax, 96(%esp) # 4-byte Spill + movl 104(%esp), %eax # 4-byte Reload + adcl 32(%esp), %eax # 4-byte Folded Reload + movl %eax, 104(%esp) # 4-byte Spill + adcl $0, 36(%esp) # 4-byte Folded Spill + movl 1380(%esp), %edx + movl 56(%edx), %eax + movl %eax, (%esp) + leal 272(%esp), %ecx + movl 124(%esp), %ebx # 4-byte Reload + calll .LmulPv544x32 + movl 120(%esp), %eax # 4-byte Reload + addl 272(%esp), %eax + movl %eax, 120(%esp) # 4-byte Spill + movl 340(%esp), %eax + movl %eax, 32(%esp) # 4-byte Spill + movl 336(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 332(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 328(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 324(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 320(%esp), %esi + movl %esi, 48(%esp) # 4-byte Spill + movl 316(%esp), %esi + movl %esi, 116(%esp) # 4-byte Spill + movl 312(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 308(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 304(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 300(%esp), %ebx + movl 296(%esp), %edi + movl 292(%esp), %edx + movl 288(%esp), %esi + movl %esi, 24(%esp) # 4-byte Spill + movl 284(%esp), %esi + movl %esi, 76(%esp) # 4-byte Spill + movl 276(%esp), %eax + movl 280(%esp), %ecx + movl 120(%esp), %esi # 4-byte Reload + movl 1376(%esp), %ebp + movl %esi, 56(%ebp) + adcl 4(%esp), %eax # 4-byte Folded Reload + movl %eax, %ebp + adcl 8(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 8(%esp) # 4-byte Spill + movl 76(%esp), %esi # 4-byte Reload + adcl 28(%esp), %esi # 4-byte Folded Reload + movl %esi, 76(%esp) # 4-byte Spill + movl 12(%esp), %eax # 4-byte Reload + adcl %eax, 24(%esp) # 4-byte Folded Spill + adcl 16(%esp), %edx # 4-byte Folded Reload + movl %edx, 16(%esp) # 4-byte Spill + adcl 20(%esp), %edi # 4-byte Folded Reload + movl %edi, 20(%esp) # 4-byte Spill + adcl 52(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 28(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 68(%esp), %eax # 4-byte Folded Reload + movl %eax, 56(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 72(%esp), %eax # 4-byte Folded Reload + movl %eax, 60(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 112(%esp), %eax # 4-byte Folded Reload + movl %eax, 64(%esp) # 4-byte Spill + movl 116(%esp), %esi # 4-byte Reload + adcl 44(%esp), %esi # 4-byte Folded Reload + movl %esi, 116(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl %eax, 48(%esp) # 4-byte Folded Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 88(%esp), %eax # 4-byte Folded Reload + movl %eax, 84(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 96(%esp), %eax # 4-byte Folded Reload + movl %eax, 92(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 104(%esp), %eax # 4-byte Folded Reload + movl %eax, 100(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl 36(%esp), %eax # 4-byte Folded Reload + movl %eax, 108(%esp) # 4-byte Spill + adcl $0, 32(%esp) # 4-byte Folded Spill + movl 1380(%esp), %edx + movl 60(%edx), %eax + movl %eax, (%esp) + leal 200(%esp), %ecx + movl 124(%esp), %ebx # 4-byte Reload + calll .LmulPv544x32 + addl 200(%esp), %ebp + movl %ebp, 12(%esp) # 4-byte Spill + movl 268(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 264(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 260(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 256(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 252(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 248(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 244(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 240(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 236(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 232(%esp), %edi + movl 228(%esp), %esi + movl 224(%esp), %edx + movl 220(%esp), %ecx + movl 216(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 212(%esp), %eax + movl 204(%esp), %ebp + movl %ebp, 120(%esp) # 4-byte Spill + movl 208(%esp), %ebp + movl %ebp, 68(%esp) # 4-byte Spill + movl 12(%esp), %ebp # 4-byte Reload + movl 1376(%esp), %ebx + movl %ebp, 60(%ebx) + movl 120(%esp), %ebp # 4-byte Reload + adcl 8(%esp), %ebp # 4-byte Folded Reload + movl %ebp, 120(%esp) # 4-byte Spill + movl 68(%esp), %ebp # 4-byte Reload + adcl 76(%esp), %ebp # 4-byte Folded Reload + adcl 24(%esp), %eax # 4-byte Folded Reload + movl %eax, 76(%esp) # 4-byte Spill + movl 16(%esp), %eax # 4-byte Reload + adcl %eax, 36(%esp) # 4-byte Folded Spill + adcl 20(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 24(%esp) # 4-byte Spill + adcl 28(%esp), %edx # 4-byte Folded Reload + movl %edx, 28(%esp) # 4-byte Spill + adcl 56(%esp), %esi # 4-byte Folded Reload + movl %esi, 56(%esp) # 4-byte Spill + adcl 60(%esp), %edi # 4-byte Folded Reload + movl %edi, 60(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 64(%esp), %eax # 4-byte Folded Reload + movl %eax, 72(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 116(%esp), %eax # 4-byte Folded Reload + movl %eax, 80(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 48(%esp), %eax # 4-byte Folded Reload + movl %eax, 88(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 84(%esp), %eax # 4-byte Folded Reload + movl %eax, 96(%esp) # 4-byte Spill + movl 104(%esp), %eax # 4-byte Reload + adcl 92(%esp), %eax # 4-byte Folded Reload + movl %eax, 104(%esp) # 4-byte Spill + movl 112(%esp), %eax # 4-byte Reload + adcl 100(%esp), %eax # 4-byte Folded Reload + movl %eax, 112(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl %eax, 40(%esp) # 4-byte Folded Spill + movl 32(%esp), %eax # 4-byte Reload + adcl %eax, 44(%esp) # 4-byte Folded Spill + adcl $0, 52(%esp) # 4-byte Folded Spill + movl 1380(%esp), %edx + movl 64(%edx), %eax + movl %eax, (%esp) + leal 128(%esp), %ecx + movl 124(%esp), %ebx # 4-byte Reload + calll .LmulPv544x32 + movl 120(%esp), %eax # 4-byte Reload + addl 128(%esp), %eax + movl %eax, 120(%esp) # 4-byte Spill + adcl 132(%esp), %ebp + movl %ebp, 68(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 136(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 196(%esp), %eax + movl %eax, 124(%esp) # 4-byte Spill + movl 192(%esp), %eax + movl %eax, 116(%esp) # 4-byte Spill + movl 188(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 184(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 180(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 176(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 172(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 168(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 164(%esp), %eax + movl %eax, 32(%esp) # 4-byte Spill + movl 160(%esp), %eax + movl %eax, 20(%esp) # 4-byte Spill + movl 156(%esp), %ebx + movl 152(%esp), %edi + movl 148(%esp), %esi + movl 144(%esp), %edx + movl 140(%esp), %ecx + movl 1376(%esp), %eax + movl 120(%esp), %ebp # 4-byte Reload + movl %ebp, 64(%eax) + movl 68(%esp), %ebp # 4-byte Reload + movl %ebp, 68(%eax) + adcl 36(%esp), %ecx # 4-byte Folded Reload + movl 76(%esp), %ebp # 4-byte Reload + movl %ebp, 72(%eax) + adcl 24(%esp), %edx # 4-byte Folded Reload + movl %ecx, 76(%eax) + adcl 28(%esp), %esi # 4-byte Folded Reload + movl %edx, 80(%eax) + adcl 56(%esp), %edi # 4-byte Folded Reload + movl %esi, 84(%eax) + adcl 60(%esp), %ebx # 4-byte Folded Reload + movl %edi, 88(%eax) + movl 20(%esp), %edx # 4-byte Reload + adcl 72(%esp), %edx # 4-byte Folded Reload + movl %ebx, 92(%eax) + movl 32(%esp), %ecx # 4-byte Reload + adcl 80(%esp), %ecx # 4-byte Folded Reload + movl %edx, 96(%eax) + movl 48(%esp), %edx # 4-byte Reload + adcl 88(%esp), %edx # 4-byte Folded Reload + movl %ecx, 100(%eax) + movl 64(%esp), %ecx # 4-byte Reload + adcl 96(%esp), %ecx # 4-byte Folded Reload + movl %edx, 104(%eax) + movl 84(%esp), %edx # 4-byte Reload + adcl 104(%esp), %edx # 4-byte Folded Reload + movl %ecx, 108(%eax) + movl 92(%esp), %ecx # 4-byte Reload + adcl 112(%esp), %ecx # 4-byte Folded Reload + movl %edx, 112(%eax) + movl 100(%esp), %edx # 4-byte Reload + adcl 40(%esp), %edx # 4-byte Folded Reload + movl %ecx, 116(%eax) + movl 108(%esp), %ecx # 4-byte Reload + adcl 44(%esp), %ecx # 4-byte Folded Reload + movl %edx, 120(%eax) + movl %ecx, 124(%eax) + movl 116(%esp), %ecx # 4-byte Reload + adcl 52(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 128(%eax) + movl 124(%esp), %ecx # 4-byte Reload + adcl $0, %ecx + movl %ecx, 132(%eax) + addl $1356, %esp # imm = 0x54C + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end257: + .size mcl_fpDbl_sqrPre17Lbmi2, .Lfunc_end257-mcl_fpDbl_sqrPre17Lbmi2 + + .globl mcl_fp_mont17Lbmi2 + .align 16, 0x90 + .type mcl_fp_mont17Lbmi2,@function +mcl_fp_mont17Lbmi2: # @mcl_fp_mont17Lbmi2 +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $2588, %esp # imm = 0xA1C + calll .L258$pb +.L258$pb: + popl %ebx +.Ltmp59: + addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp59-.L258$pb), %ebx + movl 2620(%esp), %eax + movl -4(%eax), %esi + movl %esi, 60(%esp) # 4-byte Spill + movl 2616(%esp), %eax + movl (%eax), %eax + movl %eax, (%esp) + leal 2512(%esp), %ecx + movl 2612(%esp), %edx + calll .LmulPv544x32 + movl 2512(%esp), %ebp + movl 2516(%esp), %eax + movl %eax, 120(%esp) # 4-byte Spill + movl %ebp, %eax + imull %esi, %eax + movl 2580(%esp), %ecx + movl %ecx, 132(%esp) # 4-byte Spill + movl 2576(%esp), %ecx + movl %ecx, 128(%esp) # 4-byte Spill + movl 2572(%esp), %ecx + movl %ecx, 124(%esp) # 4-byte Spill + movl 2568(%esp), %ecx + movl %ecx, 108(%esp) # 4-byte Spill + movl 2564(%esp), %ecx + movl %ecx, 104(%esp) # 4-byte Spill + movl 2560(%esp), %ecx + movl %ecx, 96(%esp) # 4-byte Spill + movl 2556(%esp), %ecx + movl %ecx, 84(%esp) # 4-byte Spill + movl 2552(%esp), %ecx + movl %ecx, 72(%esp) # 4-byte Spill + movl 2548(%esp), %ecx + movl %ecx, 64(%esp) # 4-byte Spill + movl 2544(%esp), %ecx + movl %ecx, 68(%esp) # 4-byte Spill + movl 2540(%esp), %ecx + movl %ecx, 76(%esp) # 4-byte Spill + movl 2536(%esp), %ecx + movl %ecx, 92(%esp) # 4-byte Spill + movl 2532(%esp), %edi + movl 2528(%esp), %ecx + movl %ecx, 88(%esp) # 4-byte Spill + movl 2524(%esp), %esi + movl 2520(%esp), %ecx + movl %ecx, 100(%esp) # 4-byte Spill + movl %eax, (%esp) + leal 2440(%esp), %ecx + movl 2620(%esp), %edx + calll .LmulPv544x32 + addl 2440(%esp), %ebp + movl 120(%esp), %eax # 4-byte Reload + adcl 2444(%esp), %eax + movl %eax, 120(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 2448(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + adcl 2452(%esp), %esi + movl %esi, 112(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 2456(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + adcl 2460(%esp), %edi + movl 92(%esp), %eax # 4-byte Reload + adcl 2464(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 2468(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 2472(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 2476(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 2480(%esp), %eax + movl %eax, %esi + movl 84(%esp), %eax # 4-byte Reload + adcl 2484(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 2488(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 104(%esp), %eax # 4-byte Reload + adcl 2492(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl 2496(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 124(%esp), %eax # 4-byte Reload + adcl 2500(%esp), %eax + movl %eax, 124(%esp) # 4-byte Spill + movl 128(%esp), %eax # 4-byte Reload + adcl 2504(%esp), %eax + movl %eax, 128(%esp) # 4-byte Spill + movl 132(%esp), %eax # 4-byte Reload + adcl 2508(%esp), %eax + movl %eax, 132(%esp) # 4-byte Spill + sbbl %ebp, %ebp + movl 2616(%esp), %eax + movl 4(%eax), %eax + movl %eax, (%esp) + leal 2368(%esp), %ecx + movl 2612(%esp), %edx + calll .LmulPv544x32 + andl $1, %ebp + movl 120(%esp), %ecx # 4-byte Reload + addl 2368(%esp), %ecx + movl 100(%esp), %eax # 4-byte Reload + adcl 2372(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 112(%esp), %eax # 4-byte Reload + adcl 2376(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 2380(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + adcl 2384(%esp), %edi + movl %edi, 80(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 2388(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 76(%esp), %edi # 4-byte Reload + adcl 2392(%esp), %edi + movl 68(%esp), %eax # 4-byte Reload + adcl 2396(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 2400(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + adcl 2404(%esp), %esi + movl %esi, 72(%esp) # 4-byte Spill + movl 84(%esp), %esi # 4-byte Reload + adcl 2408(%esp), %esi + movl 96(%esp), %eax # 4-byte Reload + adcl 2412(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 104(%esp), %eax # 4-byte Reload + adcl 2416(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl 2420(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 124(%esp), %eax # 4-byte Reload + adcl 2424(%esp), %eax + movl %eax, 124(%esp) # 4-byte Spill + movl 128(%esp), %eax # 4-byte Reload + adcl 2428(%esp), %eax + movl %eax, 128(%esp) # 4-byte Spill + movl 132(%esp), %eax # 4-byte Reload + adcl 2432(%esp), %eax + movl %eax, 132(%esp) # 4-byte Spill + adcl 2436(%esp), %ebp + movl %ebp, 120(%esp) # 4-byte Spill + sbbl %eax, %eax + movl %eax, 116(%esp) # 4-byte Spill + movl %ecx, %eax + movl %ecx, %ebp + imull 60(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 2296(%esp), %ecx + movl 2620(%esp), %eax + movl %eax, %edx + calll .LmulPv544x32 + movl 116(%esp), %eax # 4-byte Reload + andl $1, %eax + addl 2296(%esp), %ebp + movl 100(%esp), %ecx # 4-byte Reload + adcl 2300(%esp), %ecx + movl %ecx, 100(%esp) # 4-byte Spill + movl 112(%esp), %ecx # 4-byte Reload + adcl 2304(%esp), %ecx + movl %ecx, 112(%esp) # 4-byte Spill + movl 88(%esp), %ecx # 4-byte Reload + adcl 2308(%esp), %ecx + movl %ecx, 88(%esp) # 4-byte Spill + movl 80(%esp), %ecx # 4-byte Reload + adcl 2312(%esp), %ecx + movl %ecx, 80(%esp) # 4-byte Spill + movl 92(%esp), %ecx # 4-byte Reload + adcl 2316(%esp), %ecx + movl %ecx, 92(%esp) # 4-byte Spill + adcl 2320(%esp), %edi + movl %edi, 76(%esp) # 4-byte Spill + movl 68(%esp), %ecx # 4-byte Reload + adcl 2324(%esp), %ecx + movl %ecx, 68(%esp) # 4-byte Spill + movl 64(%esp), %ecx # 4-byte Reload + adcl 2328(%esp), %ecx + movl %ecx, 64(%esp) # 4-byte Spill + movl 72(%esp), %ecx # 4-byte Reload + adcl 2332(%esp), %ecx + movl %ecx, 72(%esp) # 4-byte Spill + adcl 2336(%esp), %esi + movl %esi, 84(%esp) # 4-byte Spill + movl 96(%esp), %ecx # 4-byte Reload + adcl 2340(%esp), %ecx + movl %ecx, 96(%esp) # 4-byte Spill + movl 104(%esp), %ecx # 4-byte Reload + adcl 2344(%esp), %ecx + movl %ecx, 104(%esp) # 4-byte Spill + movl 108(%esp), %esi # 4-byte Reload + adcl 2348(%esp), %esi + movl 124(%esp), %ecx # 4-byte Reload + adcl 2352(%esp), %ecx + movl %ecx, 124(%esp) # 4-byte Spill + movl 128(%esp), %ecx # 4-byte Reload + adcl 2356(%esp), %ecx + movl %ecx, 128(%esp) # 4-byte Spill + movl 132(%esp), %ecx # 4-byte Reload + adcl 2360(%esp), %ecx + movl %ecx, 132(%esp) # 4-byte Spill + movl 120(%esp), %ebp # 4-byte Reload + adcl 2364(%esp), %ebp + adcl $0, %eax + movl %eax, %edi + movl 2616(%esp), %eax + movl 8(%eax), %eax + movl %eax, (%esp) + leal 2224(%esp), %ecx + movl 2612(%esp), %edx + calll .LmulPv544x32 + movl 100(%esp), %ecx # 4-byte Reload + addl 2224(%esp), %ecx + movl 112(%esp), %eax # 4-byte Reload + adcl 2228(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 2232(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 2236(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 2240(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 2244(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 2248(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 2252(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 2256(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 2260(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 2264(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 104(%esp), %eax # 4-byte Reload + adcl 2268(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + adcl 2272(%esp), %esi + movl %esi, 108(%esp) # 4-byte Spill + movl 124(%esp), %eax # 4-byte Reload + adcl 2276(%esp), %eax + movl %eax, 124(%esp) # 4-byte Spill + movl 128(%esp), %eax # 4-byte Reload + adcl 2280(%esp), %eax + movl %eax, 128(%esp) # 4-byte Spill + movl 132(%esp), %eax # 4-byte Reload + adcl 2284(%esp), %eax + movl %eax, 132(%esp) # 4-byte Spill + adcl 2288(%esp), %ebp + movl %ebp, 120(%esp) # 4-byte Spill + adcl 2292(%esp), %edi + movl %edi, 116(%esp) # 4-byte Spill + sbbl %esi, %esi + movl %ecx, %edi + movl %edi, %eax + imull 60(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 2152(%esp), %ecx + movl 2620(%esp), %edx + calll .LmulPv544x32 + andl $1, %esi + movl %esi, %ecx + addl 2152(%esp), %edi + movl 112(%esp), %eax # 4-byte Reload + adcl 2156(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 2160(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 2164(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 2168(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 2172(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 2176(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 2180(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 2184(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 2188(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 2192(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 104(%esp), %eax # 4-byte Reload + adcl 2196(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl 2200(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 124(%esp), %ebp # 4-byte Reload + adcl 2204(%esp), %ebp + movl 128(%esp), %edi # 4-byte Reload + adcl 2208(%esp), %edi + movl 132(%esp), %esi # 4-byte Reload + adcl 2212(%esp), %esi + movl 120(%esp), %eax # 4-byte Reload + adcl 2216(%esp), %eax + movl %eax, 120(%esp) # 4-byte Spill + movl 116(%esp), %eax # 4-byte Reload + adcl 2220(%esp), %eax + movl %eax, 116(%esp) # 4-byte Spill + adcl $0, %ecx + movl %ecx, 100(%esp) # 4-byte Spill + movl 2616(%esp), %eax + movl 12(%eax), %eax + movl %eax, (%esp) + leal 2080(%esp), %ecx + movl 2612(%esp), %edx + calll .LmulPv544x32 + movl 112(%esp), %ecx # 4-byte Reload + addl 2080(%esp), %ecx + movl 88(%esp), %eax # 4-byte Reload + adcl 2084(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 2088(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 2092(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 2096(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 2100(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 2104(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 2108(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 2112(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 2116(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 104(%esp), %eax # 4-byte Reload + adcl 2120(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl 2124(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + adcl 2128(%esp), %ebp + movl %ebp, 124(%esp) # 4-byte Spill + adcl 2132(%esp), %edi + movl %edi, 128(%esp) # 4-byte Spill + adcl 2136(%esp), %esi + movl %esi, 132(%esp) # 4-byte Spill + movl 120(%esp), %eax # 4-byte Reload + adcl 2140(%esp), %eax + movl %eax, 120(%esp) # 4-byte Spill + movl 116(%esp), %eax # 4-byte Reload + adcl 2144(%esp), %eax + movl %eax, 116(%esp) # 4-byte Spill + movl 100(%esp), %esi # 4-byte Reload + adcl 2148(%esp), %esi + sbbl %ebp, %ebp + movl %ecx, %edi + movl %edi, %eax + imull 60(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 2008(%esp), %ecx + movl 2620(%esp), %edx + calll .LmulPv544x32 + movl %ebp, %eax + andl $1, %eax + addl 2008(%esp), %edi + movl 88(%esp), %ecx # 4-byte Reload + adcl 2012(%esp), %ecx + movl %ecx, 88(%esp) # 4-byte Spill + movl 80(%esp), %ecx # 4-byte Reload + adcl 2016(%esp), %ecx + movl %ecx, 80(%esp) # 4-byte Spill + movl 92(%esp), %ecx # 4-byte Reload + adcl 2020(%esp), %ecx + movl %ecx, 92(%esp) # 4-byte Spill + movl 76(%esp), %ecx # 4-byte Reload + adcl 2024(%esp), %ecx + movl %ecx, 76(%esp) # 4-byte Spill + movl 68(%esp), %ecx # 4-byte Reload + adcl 2028(%esp), %ecx + movl %ecx, 68(%esp) # 4-byte Spill + movl 64(%esp), %ecx # 4-byte Reload + adcl 2032(%esp), %ecx + movl %ecx, 64(%esp) # 4-byte Spill + movl 72(%esp), %ecx # 4-byte Reload + adcl 2036(%esp), %ecx + movl %ecx, 72(%esp) # 4-byte Spill + movl 84(%esp), %ecx # 4-byte Reload + adcl 2040(%esp), %ecx + movl %ecx, 84(%esp) # 4-byte Spill + movl 96(%esp), %edi # 4-byte Reload + adcl 2044(%esp), %edi + movl 104(%esp), %ecx # 4-byte Reload + adcl 2048(%esp), %ecx + movl %ecx, 104(%esp) # 4-byte Spill + movl 108(%esp), %ecx # 4-byte Reload + adcl 2052(%esp), %ecx + movl %ecx, 108(%esp) # 4-byte Spill + movl 124(%esp), %ecx # 4-byte Reload + adcl 2056(%esp), %ecx + movl %ecx, 124(%esp) # 4-byte Spill + movl 128(%esp), %ecx # 4-byte Reload + adcl 2060(%esp), %ecx + movl %ecx, 128(%esp) # 4-byte Spill + movl 132(%esp), %ecx # 4-byte Reload + adcl 2064(%esp), %ecx + movl %ecx, 132(%esp) # 4-byte Spill + movl 120(%esp), %ecx # 4-byte Reload + adcl 2068(%esp), %ecx + movl %ecx, 120(%esp) # 4-byte Spill + movl 116(%esp), %ebp # 4-byte Reload + adcl 2072(%esp), %ebp + adcl 2076(%esp), %esi + movl %esi, 100(%esp) # 4-byte Spill + adcl $0, %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 2616(%esp), %eax + movl 16(%eax), %eax + movl %eax, (%esp) + leal 1936(%esp), %ecx + movl 2612(%esp), %edx + calll .LmulPv544x32 + movl 88(%esp), %ecx # 4-byte Reload + addl 1936(%esp), %ecx + movl 80(%esp), %eax # 4-byte Reload + adcl 1940(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 1944(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 1948(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 1952(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 64(%esp), %esi # 4-byte Reload + adcl 1956(%esp), %esi + movl 72(%esp), %eax # 4-byte Reload + adcl 1960(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 1964(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + adcl 1968(%esp), %edi + movl %edi, 96(%esp) # 4-byte Spill + movl 104(%esp), %eax # 4-byte Reload + adcl 1972(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl 1976(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 124(%esp), %eax # 4-byte Reload + adcl 1980(%esp), %eax + movl %eax, 124(%esp) # 4-byte Spill + movl 128(%esp), %eax # 4-byte Reload + adcl 1984(%esp), %eax + movl %eax, 128(%esp) # 4-byte Spill + movl 132(%esp), %eax # 4-byte Reload + adcl 1988(%esp), %eax + movl %eax, 132(%esp) # 4-byte Spill + movl 120(%esp), %eax # 4-byte Reload + adcl 1992(%esp), %eax + movl %eax, 120(%esp) # 4-byte Spill + adcl 1996(%esp), %ebp + movl %ebp, 116(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 2000(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 112(%esp), %eax # 4-byte Reload + adcl 2004(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + sbbl %ebp, %ebp + movl %ecx, %edi + movl %edi, %eax + imull 60(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 1864(%esp), %ecx + movl 2620(%esp), %eax + movl %eax, %edx + calll .LmulPv544x32 + andl $1, %ebp + movl %ebp, %ecx + addl 1864(%esp), %edi + movl 80(%esp), %eax # 4-byte Reload + adcl 1868(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 1872(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 1876(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 68(%esp), %edi # 4-byte Reload + adcl 1880(%esp), %edi + adcl 1884(%esp), %esi + movl %esi, 64(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 1888(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 1892(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 96(%esp), %esi # 4-byte Reload + adcl 1896(%esp), %esi + movl 104(%esp), %eax # 4-byte Reload + adcl 1900(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl 1904(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 124(%esp), %eax # 4-byte Reload + adcl 1908(%esp), %eax + movl %eax, 124(%esp) # 4-byte Spill + movl 128(%esp), %eax # 4-byte Reload + adcl 1912(%esp), %eax + movl %eax, 128(%esp) # 4-byte Spill + movl 132(%esp), %ebp # 4-byte Reload + adcl 1916(%esp), %ebp + movl 120(%esp), %eax # 4-byte Reload + adcl 1920(%esp), %eax + movl %eax, 120(%esp) # 4-byte Spill + movl 116(%esp), %eax # 4-byte Reload + adcl 1924(%esp), %eax + movl %eax, 116(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 1928(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 112(%esp), %eax # 4-byte Reload + adcl 1932(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + adcl $0, %ecx + movl %ecx, 88(%esp) # 4-byte Spill + movl 2616(%esp), %eax + movl 20(%eax), %eax + movl %eax, (%esp) + leal 1792(%esp), %ecx + movl 2612(%esp), %edx + calll .LmulPv544x32 + movl 80(%esp), %ecx # 4-byte Reload + addl 1792(%esp), %ecx + movl %ecx, 80(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 1796(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 1800(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + adcl 1804(%esp), %edi + movl %edi, 68(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 1808(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 1812(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 1816(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + adcl 1820(%esp), %esi + movl %esi, 96(%esp) # 4-byte Spill + movl 104(%esp), %eax # 4-byte Reload + adcl 1824(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl 1828(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 124(%esp), %eax # 4-byte Reload + adcl 1832(%esp), %eax + movl %eax, 124(%esp) # 4-byte Spill + movl 128(%esp), %eax # 4-byte Reload + adcl 1836(%esp), %eax + movl %eax, 128(%esp) # 4-byte Spill + adcl 1840(%esp), %ebp + movl %ebp, 132(%esp) # 4-byte Spill + movl 120(%esp), %eax # 4-byte Reload + adcl 1844(%esp), %eax + movl %eax, 120(%esp) # 4-byte Spill + movl 116(%esp), %edi # 4-byte Reload + adcl 1848(%esp), %edi + movl 100(%esp), %ebp # 4-byte Reload + adcl 1852(%esp), %ebp + movl 112(%esp), %eax # 4-byte Reload + adcl 1856(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 1860(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + sbbl %esi, %esi + movl %ecx, %eax + imull 60(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 1720(%esp), %ecx + movl 2620(%esp), %edx + calll .LmulPv544x32 + andl $1, %esi + movl %esi, %eax + movl 80(%esp), %ecx # 4-byte Reload + addl 1720(%esp), %ecx + movl 92(%esp), %ecx # 4-byte Reload + adcl 1724(%esp), %ecx + movl %ecx, 92(%esp) # 4-byte Spill + movl 76(%esp), %ecx # 4-byte Reload + adcl 1728(%esp), %ecx + movl %ecx, 76(%esp) # 4-byte Spill + movl 68(%esp), %ecx # 4-byte Reload + adcl 1732(%esp), %ecx + movl %ecx, 68(%esp) # 4-byte Spill + movl 64(%esp), %ecx # 4-byte Reload + adcl 1736(%esp), %ecx + movl %ecx, 64(%esp) # 4-byte Spill + movl 72(%esp), %ecx # 4-byte Reload + adcl 1740(%esp), %ecx + movl %ecx, 72(%esp) # 4-byte Spill + movl 84(%esp), %ecx # 4-byte Reload + adcl 1744(%esp), %ecx + movl %ecx, 84(%esp) # 4-byte Spill + movl 96(%esp), %ecx # 4-byte Reload + adcl 1748(%esp), %ecx + movl %ecx, 96(%esp) # 4-byte Spill + movl 104(%esp), %esi # 4-byte Reload + adcl 1752(%esp), %esi + movl 108(%esp), %ecx # 4-byte Reload + adcl 1756(%esp), %ecx + movl %ecx, 108(%esp) # 4-byte Spill + movl 124(%esp), %ecx # 4-byte Reload + adcl 1760(%esp), %ecx + movl %ecx, 124(%esp) # 4-byte Spill + movl 128(%esp), %ecx # 4-byte Reload + adcl 1764(%esp), %ecx + movl %ecx, 128(%esp) # 4-byte Spill + movl 132(%esp), %ecx # 4-byte Reload + adcl 1768(%esp), %ecx + movl %ecx, 132(%esp) # 4-byte Spill + movl 120(%esp), %ecx # 4-byte Reload + adcl 1772(%esp), %ecx + movl %ecx, 120(%esp) # 4-byte Spill + adcl 1776(%esp), %edi + movl %edi, 116(%esp) # 4-byte Spill + adcl 1780(%esp), %ebp + movl %ebp, 100(%esp) # 4-byte Spill + movl 112(%esp), %ecx # 4-byte Reload + adcl 1784(%esp), %ecx + movl %ecx, 112(%esp) # 4-byte Spill + movl 88(%esp), %ebp # 4-byte Reload + adcl 1788(%esp), %ebp + adcl $0, %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 2616(%esp), %eax + movl 24(%eax), %eax + movl %eax, (%esp) + leal 1648(%esp), %ecx + movl 2612(%esp), %eax + movl %eax, %edx + calll .LmulPv544x32 + movl 92(%esp), %eax # 4-byte Reload + addl 1648(%esp), %eax + movl 76(%esp), %edi # 4-byte Reload + adcl 1652(%esp), %edi + movl 68(%esp), %ecx # 4-byte Reload + adcl 1656(%esp), %ecx + movl %ecx, 68(%esp) # 4-byte Spill + movl 64(%esp), %ecx # 4-byte Reload + adcl 1660(%esp), %ecx + movl %ecx, 64(%esp) # 4-byte Spill + movl 72(%esp), %ecx # 4-byte Reload + adcl 1664(%esp), %ecx + movl %ecx, 72(%esp) # 4-byte Spill + movl 84(%esp), %ecx # 4-byte Reload + adcl 1668(%esp), %ecx + movl %ecx, 84(%esp) # 4-byte Spill + movl 96(%esp), %ecx # 4-byte Reload + adcl 1672(%esp), %ecx + movl %ecx, 96(%esp) # 4-byte Spill + adcl 1676(%esp), %esi + movl %esi, 104(%esp) # 4-byte Spill + movl 108(%esp), %ecx # 4-byte Reload + adcl 1680(%esp), %ecx + movl %ecx, 108(%esp) # 4-byte Spill + movl 124(%esp), %ecx # 4-byte Reload + adcl 1684(%esp), %ecx + movl %ecx, 124(%esp) # 4-byte Spill + movl 128(%esp), %ecx # 4-byte Reload + adcl 1688(%esp), %ecx + movl %ecx, 128(%esp) # 4-byte Spill + movl 132(%esp), %ecx # 4-byte Reload + adcl 1692(%esp), %ecx + movl %ecx, 132(%esp) # 4-byte Spill + movl 120(%esp), %ecx # 4-byte Reload + adcl 1696(%esp), %ecx + movl %ecx, 120(%esp) # 4-byte Spill + movl 116(%esp), %ecx # 4-byte Reload + adcl 1700(%esp), %ecx + movl %ecx, 116(%esp) # 4-byte Spill + movl 100(%esp), %ecx # 4-byte Reload + adcl 1704(%esp), %ecx + movl %ecx, 100(%esp) # 4-byte Spill + movl 112(%esp), %ecx # 4-byte Reload + adcl 1708(%esp), %ecx + movl %ecx, 112(%esp) # 4-byte Spill + adcl 1712(%esp), %ebp + movl %ebp, 88(%esp) # 4-byte Spill + movl 80(%esp), %ecx # 4-byte Reload + adcl 1716(%esp), %ecx + movl %ecx, 80(%esp) # 4-byte Spill + sbbl %esi, %esi + movl %eax, %ebp + imull 60(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 1576(%esp), %ecx + movl 2620(%esp), %edx + calll .LmulPv544x32 + andl $1, %esi + movl %esi, %ecx + addl 1576(%esp), %ebp + adcl 1580(%esp), %edi + movl %edi, 76(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 1584(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 64(%esp), %ebp # 4-byte Reload + adcl 1588(%esp), %ebp + movl 72(%esp), %edi # 4-byte Reload + adcl 1592(%esp), %edi + movl 84(%esp), %esi # 4-byte Reload + adcl 1596(%esp), %esi + movl 96(%esp), %eax # 4-byte Reload + adcl 1600(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 104(%esp), %eax # 4-byte Reload + adcl 1604(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl 1608(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 124(%esp), %eax # 4-byte Reload + adcl 1612(%esp), %eax + movl %eax, 124(%esp) # 4-byte Spill + movl 128(%esp), %eax # 4-byte Reload + adcl 1616(%esp), %eax + movl %eax, 128(%esp) # 4-byte Spill + movl 132(%esp), %eax # 4-byte Reload + adcl 1620(%esp), %eax + movl %eax, 132(%esp) # 4-byte Spill + movl 120(%esp), %eax # 4-byte Reload + adcl 1624(%esp), %eax + movl %eax, 120(%esp) # 4-byte Spill + movl 116(%esp), %eax # 4-byte Reload + adcl 1628(%esp), %eax + movl %eax, 116(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 1632(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 112(%esp), %eax # 4-byte Reload + adcl 1636(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 1640(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 1644(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + adcl $0, %ecx + movl %ecx, 92(%esp) # 4-byte Spill + movl 2616(%esp), %eax + movl 28(%eax), %eax + movl %eax, (%esp) + leal 1504(%esp), %ecx + movl 2612(%esp), %edx + calll .LmulPv544x32 + movl 76(%esp), %ecx # 4-byte Reload + addl 1504(%esp), %ecx + movl 68(%esp), %eax # 4-byte Reload + adcl 1508(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + adcl 1512(%esp), %ebp + movl %ebp, 64(%esp) # 4-byte Spill + adcl 1516(%esp), %edi + movl %edi, 72(%esp) # 4-byte Spill + adcl 1520(%esp), %esi + movl %esi, 84(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 1524(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 104(%esp), %eax # 4-byte Reload + adcl 1528(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl 1532(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 124(%esp), %eax # 4-byte Reload + adcl 1536(%esp), %eax + movl %eax, 124(%esp) # 4-byte Spill + movl 128(%esp), %edi # 4-byte Reload + adcl 1540(%esp), %edi + movl 132(%esp), %eax # 4-byte Reload + adcl 1544(%esp), %eax + movl %eax, 132(%esp) # 4-byte Spill + movl 120(%esp), %eax # 4-byte Reload + adcl 1548(%esp), %eax + movl %eax, 120(%esp) # 4-byte Spill + movl 116(%esp), %eax # 4-byte Reload + adcl 1552(%esp), %eax + movl %eax, 116(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 1556(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 112(%esp), %eax # 4-byte Reload + adcl 1560(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 1564(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 80(%esp), %esi # 4-byte Reload + adcl 1568(%esp), %esi + movl 92(%esp), %eax # 4-byte Reload + adcl 1572(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + sbbl %eax, %eax + movl %eax, 76(%esp) # 4-byte Spill + movl %ecx, %eax + movl %ecx, %ebp + imull 60(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 1432(%esp), %ecx + movl 2620(%esp), %edx + calll .LmulPv544x32 + movl 76(%esp), %eax # 4-byte Reload + andl $1, %eax + addl 1432(%esp), %ebp + movl 68(%esp), %ecx # 4-byte Reload + adcl 1436(%esp), %ecx + movl %ecx, 68(%esp) # 4-byte Spill + movl 64(%esp), %ecx # 4-byte Reload + adcl 1440(%esp), %ecx + movl %ecx, 64(%esp) # 4-byte Spill + movl 72(%esp), %ecx # 4-byte Reload + adcl 1444(%esp), %ecx + movl %ecx, 72(%esp) # 4-byte Spill + movl 84(%esp), %ecx # 4-byte Reload + adcl 1448(%esp), %ecx + movl %ecx, 84(%esp) # 4-byte Spill + movl 96(%esp), %ecx # 4-byte Reload + adcl 1452(%esp), %ecx + movl %ecx, 96(%esp) # 4-byte Spill + movl 104(%esp), %ecx # 4-byte Reload + adcl 1456(%esp), %ecx + movl %ecx, 104(%esp) # 4-byte Spill + movl 108(%esp), %ebp # 4-byte Reload + adcl 1460(%esp), %ebp + movl 124(%esp), %ecx # 4-byte Reload + adcl 1464(%esp), %ecx + movl %ecx, 124(%esp) # 4-byte Spill + adcl 1468(%esp), %edi + movl %edi, 128(%esp) # 4-byte Spill + movl 132(%esp), %ecx # 4-byte Reload + adcl 1472(%esp), %ecx + movl %ecx, 132(%esp) # 4-byte Spill + movl 120(%esp), %edi # 4-byte Reload + adcl 1476(%esp), %edi + movl 116(%esp), %ecx # 4-byte Reload + adcl 1480(%esp), %ecx + movl %ecx, 116(%esp) # 4-byte Spill + movl 100(%esp), %ecx # 4-byte Reload + adcl 1484(%esp), %ecx + movl %ecx, 100(%esp) # 4-byte Spill + movl 112(%esp), %ecx # 4-byte Reload + adcl 1488(%esp), %ecx + movl %ecx, 112(%esp) # 4-byte Spill + movl 88(%esp), %ecx # 4-byte Reload + adcl 1492(%esp), %ecx + movl %ecx, 88(%esp) # 4-byte Spill + adcl 1496(%esp), %esi + movl %esi, 80(%esp) # 4-byte Spill + movl 92(%esp), %ecx # 4-byte Reload + adcl 1500(%esp), %ecx + movl %ecx, 92(%esp) # 4-byte Spill + adcl $0, %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 2616(%esp), %eax + movl 32(%eax), %eax + movl %eax, (%esp) + leal 1360(%esp), %ecx + movl 2612(%esp), %edx + calll .LmulPv544x32 + movl 68(%esp), %ecx # 4-byte Reload + addl 1360(%esp), %ecx + movl 64(%esp), %eax # 4-byte Reload + adcl 1364(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 1368(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 1372(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 1376(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 104(%esp), %eax # 4-byte Reload + adcl 1380(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + adcl 1384(%esp), %ebp + movl 124(%esp), %eax # 4-byte Reload + adcl 1388(%esp), %eax + movl %eax, 124(%esp) # 4-byte Spill + movl 128(%esp), %eax # 4-byte Reload + adcl 1392(%esp), %eax + movl %eax, 128(%esp) # 4-byte Spill + movl 132(%esp), %eax # 4-byte Reload + adcl 1396(%esp), %eax + movl %eax, 132(%esp) # 4-byte Spill + adcl 1400(%esp), %edi + movl %edi, 120(%esp) # 4-byte Spill + movl 116(%esp), %eax # 4-byte Reload + adcl 1404(%esp), %eax + movl %eax, 116(%esp) # 4-byte Spill + movl 100(%esp), %esi # 4-byte Reload + adcl 1408(%esp), %esi + movl 112(%esp), %eax # 4-byte Reload + adcl 1412(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 1416(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 1420(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 1424(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 1428(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + sbbl %eax, %eax + movl %eax, 68(%esp) # 4-byte Spill + movl %ecx, %eax + movl %ecx, %edi + imull 60(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 1288(%esp), %ecx + movl 2620(%esp), %edx + calll .LmulPv544x32 + movl 68(%esp), %eax # 4-byte Reload + andl $1, %eax + addl 1288(%esp), %edi + movl 64(%esp), %ecx # 4-byte Reload + adcl 1292(%esp), %ecx + movl %ecx, 64(%esp) # 4-byte Spill + movl 72(%esp), %ecx # 4-byte Reload + adcl 1296(%esp), %ecx + movl %ecx, 72(%esp) # 4-byte Spill + movl 84(%esp), %ecx # 4-byte Reload + adcl 1300(%esp), %ecx + movl %ecx, 84(%esp) # 4-byte Spill + movl 96(%esp), %ecx # 4-byte Reload + adcl 1304(%esp), %ecx + movl %ecx, 96(%esp) # 4-byte Spill + movl 104(%esp), %ecx # 4-byte Reload + adcl 1308(%esp), %ecx + movl %ecx, 104(%esp) # 4-byte Spill + adcl 1312(%esp), %ebp + movl 124(%esp), %ecx # 4-byte Reload + adcl 1316(%esp), %ecx + movl %ecx, 124(%esp) # 4-byte Spill + movl 128(%esp), %ecx # 4-byte Reload + adcl 1320(%esp), %ecx + movl %ecx, 128(%esp) # 4-byte Spill + movl 132(%esp), %ecx # 4-byte Reload + adcl 1324(%esp), %ecx + movl %ecx, 132(%esp) # 4-byte Spill + movl 120(%esp), %ecx # 4-byte Reload + adcl 1328(%esp), %ecx + movl %ecx, 120(%esp) # 4-byte Spill + movl 116(%esp), %ecx # 4-byte Reload + adcl 1332(%esp), %ecx + movl %ecx, 116(%esp) # 4-byte Spill + adcl 1336(%esp), %esi + movl %esi, 100(%esp) # 4-byte Spill + movl 112(%esp), %edi # 4-byte Reload + adcl 1340(%esp), %edi + movl 88(%esp), %esi # 4-byte Reload + adcl 1344(%esp), %esi + movl 80(%esp), %ecx # 4-byte Reload + adcl 1348(%esp), %ecx + movl %ecx, 80(%esp) # 4-byte Spill + movl 92(%esp), %ecx # 4-byte Reload + adcl 1352(%esp), %ecx + movl %ecx, 92(%esp) # 4-byte Spill + movl 76(%esp), %ecx # 4-byte Reload + adcl 1356(%esp), %ecx + movl %ecx, 76(%esp) # 4-byte Spill + adcl $0, %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 2616(%esp), %eax + movl 36(%eax), %eax + movl %eax, (%esp) + leal 1216(%esp), %ecx + movl 2612(%esp), %edx + calll .LmulPv544x32 + movl 64(%esp), %ecx # 4-byte Reload + addl 1216(%esp), %ecx + movl 72(%esp), %eax # 4-byte Reload + adcl 1220(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 1224(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 1228(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 104(%esp), %eax # 4-byte Reload + adcl 1232(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + adcl 1236(%esp), %ebp + movl %ebp, 108(%esp) # 4-byte Spill + movl 124(%esp), %ebp # 4-byte Reload + adcl 1240(%esp), %ebp + movl 128(%esp), %eax # 4-byte Reload + adcl 1244(%esp), %eax + movl %eax, 128(%esp) # 4-byte Spill + movl 132(%esp), %eax # 4-byte Reload + adcl 1248(%esp), %eax + movl %eax, 132(%esp) # 4-byte Spill + movl 120(%esp), %eax # 4-byte Reload + adcl 1252(%esp), %eax + movl %eax, 120(%esp) # 4-byte Spill + movl 116(%esp), %eax # 4-byte Reload + adcl 1256(%esp), %eax + movl %eax, 116(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 1260(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + adcl 1264(%esp), %edi + movl %edi, 112(%esp) # 4-byte Spill + adcl 1268(%esp), %esi + movl %esi, 88(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 1272(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 1276(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 1280(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 1284(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + sbbl %edi, %edi + movl %ecx, %eax + movl %ecx, %esi + imull 60(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 1144(%esp), %ecx + movl 2620(%esp), %edx + calll .LmulPv544x32 + movl %edi, %eax + andl $1, %eax + addl 1144(%esp), %esi + movl 72(%esp), %ecx # 4-byte Reload + adcl 1148(%esp), %ecx + movl %ecx, 72(%esp) # 4-byte Spill + movl 84(%esp), %ecx # 4-byte Reload + adcl 1152(%esp), %ecx + movl %ecx, 84(%esp) # 4-byte Spill + movl 96(%esp), %ecx # 4-byte Reload + adcl 1156(%esp), %ecx + movl %ecx, 96(%esp) # 4-byte Spill + movl 104(%esp), %edi # 4-byte Reload + adcl 1160(%esp), %edi + movl 108(%esp), %ecx # 4-byte Reload + adcl 1164(%esp), %ecx + movl %ecx, 108(%esp) # 4-byte Spill + adcl 1168(%esp), %ebp + movl %ebp, 124(%esp) # 4-byte Spill + movl 128(%esp), %ecx # 4-byte Reload + adcl 1172(%esp), %ecx + movl %ecx, 128(%esp) # 4-byte Spill + movl 132(%esp), %ecx # 4-byte Reload + adcl 1176(%esp), %ecx + movl %ecx, 132(%esp) # 4-byte Spill + movl 120(%esp), %esi # 4-byte Reload + adcl 1180(%esp), %esi + movl 116(%esp), %ecx # 4-byte Reload + adcl 1184(%esp), %ecx + movl %ecx, 116(%esp) # 4-byte Spill + movl 100(%esp), %ecx # 4-byte Reload + adcl 1188(%esp), %ecx + movl %ecx, 100(%esp) # 4-byte Spill + movl 112(%esp), %ecx # 4-byte Reload + adcl 1192(%esp), %ecx + movl %ecx, 112(%esp) # 4-byte Spill + movl 88(%esp), %ecx # 4-byte Reload + adcl 1196(%esp), %ecx + movl %ecx, 88(%esp) # 4-byte Spill + movl 80(%esp), %ecx # 4-byte Reload + adcl 1200(%esp), %ecx + movl %ecx, 80(%esp) # 4-byte Spill + movl 92(%esp), %ecx # 4-byte Reload + adcl 1204(%esp), %ecx + movl %ecx, 92(%esp) # 4-byte Spill + movl 76(%esp), %ecx # 4-byte Reload + adcl 1208(%esp), %ecx + movl %ecx, 76(%esp) # 4-byte Spill + movl 68(%esp), %ecx # 4-byte Reload + adcl 1212(%esp), %ecx + movl %ecx, 68(%esp) # 4-byte Spill + adcl $0, %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 2616(%esp), %eax + movl 40(%eax), %eax + movl %eax, (%esp) + leal 1072(%esp), %ecx + movl 2612(%esp), %edx + calll .LmulPv544x32 + movl 72(%esp), %ecx # 4-byte Reload + addl 1072(%esp), %ecx + movl 84(%esp), %eax # 4-byte Reload + adcl 1076(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 96(%esp), %ebp # 4-byte Reload + adcl 1080(%esp), %ebp + adcl 1084(%esp), %edi + movl %edi, 104(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl 1088(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 124(%esp), %eax # 4-byte Reload + adcl 1092(%esp), %eax + movl %eax, 124(%esp) # 4-byte Spill + movl 128(%esp), %eax # 4-byte Reload + adcl 1096(%esp), %eax + movl %eax, 128(%esp) # 4-byte Spill + movl 132(%esp), %eax # 4-byte Reload + adcl 1100(%esp), %eax + movl %eax, 132(%esp) # 4-byte Spill + adcl 1104(%esp), %esi + movl %esi, 120(%esp) # 4-byte Spill + movl 116(%esp), %eax # 4-byte Reload + adcl 1108(%esp), %eax + movl %eax, 116(%esp) # 4-byte Spill + movl 100(%esp), %esi # 4-byte Reload + adcl 1112(%esp), %esi + movl 112(%esp), %eax # 4-byte Reload + adcl 1116(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 1120(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 1124(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 1128(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 1132(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 1136(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 1140(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + sbbl %eax, %eax + movl %eax, 72(%esp) # 4-byte Spill + movl %ecx, %eax + movl %ecx, %edi + imull 60(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 1000(%esp), %ecx + movl 2620(%esp), %edx + calll .LmulPv544x32 + movl 72(%esp), %eax # 4-byte Reload + andl $1, %eax + addl 1000(%esp), %edi + movl 84(%esp), %ecx # 4-byte Reload + adcl 1004(%esp), %ecx + movl %ecx, 84(%esp) # 4-byte Spill + adcl 1008(%esp), %ebp + movl %ebp, 96(%esp) # 4-byte Spill + movl 104(%esp), %ecx # 4-byte Reload + adcl 1012(%esp), %ecx + movl %ecx, 104(%esp) # 4-byte Spill + movl 108(%esp), %ecx # 4-byte Reload + adcl 1016(%esp), %ecx + movl %ecx, 108(%esp) # 4-byte Spill + movl 124(%esp), %ecx # 4-byte Reload + adcl 1020(%esp), %ecx + movl %ecx, 124(%esp) # 4-byte Spill + movl 128(%esp), %ecx # 4-byte Reload + adcl 1024(%esp), %ecx + movl %ecx, 128(%esp) # 4-byte Spill + movl 132(%esp), %ebp # 4-byte Reload + adcl 1028(%esp), %ebp + movl 120(%esp), %ecx # 4-byte Reload + adcl 1032(%esp), %ecx + movl %ecx, 120(%esp) # 4-byte Spill + movl 116(%esp), %edi # 4-byte Reload + adcl 1036(%esp), %edi + adcl 1040(%esp), %esi + movl %esi, 100(%esp) # 4-byte Spill + movl 112(%esp), %ecx # 4-byte Reload + adcl 1044(%esp), %ecx + movl %ecx, 112(%esp) # 4-byte Spill + movl 88(%esp), %ecx # 4-byte Reload + adcl 1048(%esp), %ecx + movl %ecx, 88(%esp) # 4-byte Spill + movl 80(%esp), %ecx # 4-byte Reload + adcl 1052(%esp), %ecx + movl %ecx, 80(%esp) # 4-byte Spill + movl 92(%esp), %esi # 4-byte Reload + adcl 1056(%esp), %esi + movl 76(%esp), %ecx # 4-byte Reload + adcl 1060(%esp), %ecx + movl %ecx, 76(%esp) # 4-byte Spill + movl 68(%esp), %ecx # 4-byte Reload + adcl 1064(%esp), %ecx + movl %ecx, 68(%esp) # 4-byte Spill + movl 64(%esp), %ecx # 4-byte Reload + adcl 1068(%esp), %ecx + movl %ecx, 64(%esp) # 4-byte Spill + adcl $0, %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 2616(%esp), %eax + movl 44(%eax), %eax + movl %eax, (%esp) + leal 928(%esp), %ecx + movl 2612(%esp), %edx + calll .LmulPv544x32 + movl 84(%esp), %ecx # 4-byte Reload + addl 928(%esp), %ecx + movl 96(%esp), %eax # 4-byte Reload + adcl 932(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 104(%esp), %eax # 4-byte Reload + adcl 936(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl 940(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 124(%esp), %eax # 4-byte Reload + adcl 944(%esp), %eax + movl %eax, 124(%esp) # 4-byte Spill + movl 128(%esp), %eax # 4-byte Reload + adcl 948(%esp), %eax + movl %eax, 128(%esp) # 4-byte Spill + adcl 952(%esp), %ebp + movl %ebp, 132(%esp) # 4-byte Spill + movl 120(%esp), %eax # 4-byte Reload + adcl 956(%esp), %eax + movl %eax, 120(%esp) # 4-byte Spill + adcl 960(%esp), %edi + movl %edi, 116(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 964(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 112(%esp), %eax # 4-byte Reload + adcl 968(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 972(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 976(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + adcl 980(%esp), %esi + movl %esi, 92(%esp) # 4-byte Spill + movl 76(%esp), %edi # 4-byte Reload + adcl 984(%esp), %edi + movl 68(%esp), %eax # 4-byte Reload + adcl 988(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 992(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 996(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + sbbl %esi, %esi + movl %ecx, %eax + movl %ecx, %ebp + imull 60(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 856(%esp), %ecx + movl 2620(%esp), %edx + calll .LmulPv544x32 + andl $1, %esi + movl %esi, %ecx + addl 856(%esp), %ebp + movl 96(%esp), %eax # 4-byte Reload + adcl 860(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 104(%esp), %eax # 4-byte Reload + adcl 864(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl 868(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 124(%esp), %eax # 4-byte Reload + adcl 872(%esp), %eax + movl %eax, 124(%esp) # 4-byte Spill + movl 128(%esp), %eax # 4-byte Reload + adcl 876(%esp), %eax + movl %eax, 128(%esp) # 4-byte Spill + movl 132(%esp), %eax # 4-byte Reload + adcl 880(%esp), %eax + movl %eax, 132(%esp) # 4-byte Spill + movl 120(%esp), %eax # 4-byte Reload + adcl 884(%esp), %eax + movl %eax, 120(%esp) # 4-byte Spill + movl 116(%esp), %ebp # 4-byte Reload + adcl 888(%esp), %ebp + movl 100(%esp), %eax # 4-byte Reload + adcl 892(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 112(%esp), %esi # 4-byte Reload + adcl 896(%esp), %esi + movl 88(%esp), %eax # 4-byte Reload + adcl 900(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 904(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 908(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + adcl 912(%esp), %edi + movl %edi, 76(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 916(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 920(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 924(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + adcl $0, %ecx + movl %ecx, 84(%esp) # 4-byte Spill + movl 2616(%esp), %ecx + movl %ecx, %eax + movl 48(%eax), %eax + movl %eax, (%esp) + leal 784(%esp), %ecx + movl 2612(%esp), %eax + movl %eax, %edx + calll .LmulPv544x32 + movl 96(%esp), %ecx # 4-byte Reload + addl 784(%esp), %ecx + movl 104(%esp), %eax # 4-byte Reload + adcl 788(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl 792(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 124(%esp), %eax # 4-byte Reload + adcl 796(%esp), %eax + movl %eax, 124(%esp) # 4-byte Spill + movl 128(%esp), %eax # 4-byte Reload + adcl 800(%esp), %eax + movl %eax, 128(%esp) # 4-byte Spill + movl 132(%esp), %eax # 4-byte Reload + adcl 804(%esp), %eax + movl %eax, 132(%esp) # 4-byte Spill + movl 120(%esp), %eax # 4-byte Reload + adcl 808(%esp), %eax + movl %eax, 120(%esp) # 4-byte Spill + adcl 812(%esp), %ebp + movl %ebp, 116(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 816(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + adcl 820(%esp), %esi + movl %esi, 112(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 824(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 80(%esp), %edi # 4-byte Reload + adcl 828(%esp), %edi + movl 92(%esp), %eax # 4-byte Reload + adcl 832(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 836(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 840(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 844(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 848(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 852(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + sbbl %esi, %esi + movl %ecx, %eax + movl %ecx, %ebp + imull 60(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 712(%esp), %ecx + movl 2620(%esp), %edx + calll .LmulPv544x32 + andl $1, %esi + movl %esi, %ecx + addl 712(%esp), %ebp + movl 104(%esp), %eax # 4-byte Reload + adcl 716(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl 720(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 124(%esp), %eax # 4-byte Reload + adcl 724(%esp), %eax + movl %eax, 124(%esp) # 4-byte Spill + movl 128(%esp), %esi # 4-byte Reload + adcl 728(%esp), %esi + movl 132(%esp), %eax # 4-byte Reload + adcl 732(%esp), %eax + movl %eax, 132(%esp) # 4-byte Spill + movl 120(%esp), %ebp # 4-byte Reload + adcl 736(%esp), %ebp + movl 116(%esp), %eax # 4-byte Reload + adcl 740(%esp), %eax + movl %eax, 116(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 744(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 112(%esp), %eax # 4-byte Reload + adcl 748(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 752(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + adcl 756(%esp), %edi + movl 92(%esp), %eax # 4-byte Reload + adcl 760(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 764(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 768(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 772(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 776(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 780(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + adcl $0, %ecx + movl %ecx, 96(%esp) # 4-byte Spill + movl 2616(%esp), %eax + movl 52(%eax), %eax + movl %eax, (%esp) + leal 640(%esp), %ecx + movl 2612(%esp), %edx + calll .LmulPv544x32 + movl 104(%esp), %ecx # 4-byte Reload + addl 640(%esp), %ecx + movl 108(%esp), %eax # 4-byte Reload + adcl 644(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 124(%esp), %eax # 4-byte Reload + adcl 648(%esp), %eax + movl %eax, 124(%esp) # 4-byte Spill + adcl 652(%esp), %esi + movl %esi, 128(%esp) # 4-byte Spill + movl 132(%esp), %eax # 4-byte Reload + adcl 656(%esp), %eax + movl %eax, 132(%esp) # 4-byte Spill + adcl 660(%esp), %ebp + movl %ebp, 120(%esp) # 4-byte Spill + movl 116(%esp), %eax # 4-byte Reload + adcl 664(%esp), %eax + movl %eax, 116(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 668(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 112(%esp), %eax # 4-byte Reload + adcl 672(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 676(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + adcl 680(%esp), %edi + movl 92(%esp), %eax # 4-byte Reload + adcl 684(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 688(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 692(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 64(%esp), %ebp # 4-byte Reload + adcl 696(%esp), %ebp + movl 72(%esp), %eax # 4-byte Reload + adcl 700(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 704(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 708(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + sbbl %eax, %eax + movl %eax, 104(%esp) # 4-byte Spill + movl %ecx, %eax + movl %ecx, %esi + imull 60(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 568(%esp), %ecx + movl 2620(%esp), %edx + calll .LmulPv544x32 + movl 104(%esp), %ecx # 4-byte Reload + andl $1, %ecx + addl 568(%esp), %esi + movl 108(%esp), %eax # 4-byte Reload + adcl 572(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 124(%esp), %eax # 4-byte Reload + adcl 576(%esp), %eax + movl %eax, 124(%esp) # 4-byte Spill + movl 128(%esp), %eax # 4-byte Reload + adcl 580(%esp), %eax + movl %eax, 128(%esp) # 4-byte Spill + movl 132(%esp), %eax # 4-byte Reload + adcl 584(%esp), %eax + movl %eax, 132(%esp) # 4-byte Spill + movl 120(%esp), %eax # 4-byte Reload + adcl 588(%esp), %eax + movl %eax, 120(%esp) # 4-byte Spill + movl 116(%esp), %eax # 4-byte Reload + adcl 592(%esp), %eax + movl %eax, 116(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 596(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 112(%esp), %esi # 4-byte Reload + adcl 600(%esp), %esi + movl 88(%esp), %eax # 4-byte Reload + adcl 604(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + adcl 608(%esp), %edi + movl %edi, 80(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 612(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 76(%esp), %edi # 4-byte Reload + adcl 616(%esp), %edi + movl 68(%esp), %eax # 4-byte Reload + adcl 620(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + adcl 624(%esp), %ebp + movl %ebp, 64(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 628(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 632(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 636(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + adcl $0, %ecx + movl %ecx, 104(%esp) # 4-byte Spill + movl 2616(%esp), %eax + movl 56(%eax), %eax + movl %eax, (%esp) + leal 496(%esp), %ecx + movl 2612(%esp), %edx + calll .LmulPv544x32 + movl 108(%esp), %ecx # 4-byte Reload + addl 496(%esp), %ecx + movl 124(%esp), %eax # 4-byte Reload + adcl 500(%esp), %eax + movl %eax, 124(%esp) # 4-byte Spill + movl 128(%esp), %eax # 4-byte Reload + adcl 504(%esp), %eax + movl %eax, 128(%esp) # 4-byte Spill + movl 132(%esp), %eax # 4-byte Reload + adcl 508(%esp), %eax + movl %eax, 132(%esp) # 4-byte Spill + movl 120(%esp), %eax # 4-byte Reload + adcl 512(%esp), %eax + movl %eax, 120(%esp) # 4-byte Spill + movl 116(%esp), %eax # 4-byte Reload + adcl 516(%esp), %eax + movl %eax, 116(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 520(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + adcl 524(%esp), %esi + movl %esi, 112(%esp) # 4-byte Spill + movl 88(%esp), %ebp # 4-byte Reload + adcl 528(%esp), %ebp + movl 80(%esp), %eax # 4-byte Reload + adcl 532(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 536(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + adcl 540(%esp), %edi + movl %edi, 76(%esp) # 4-byte Spill + movl 68(%esp), %edi # 4-byte Reload + adcl 544(%esp), %edi + movl 64(%esp), %eax # 4-byte Reload + adcl 548(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 552(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 556(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 560(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 104(%esp), %eax # 4-byte Reload + adcl 564(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + sbbl %eax, %eax + movl %eax, 108(%esp) # 4-byte Spill + movl %ecx, %eax + movl %ecx, %esi + imull 60(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 424(%esp), %ecx + movl 2620(%esp), %edx + calll .LmulPv544x32 + movl 108(%esp), %ecx # 4-byte Reload + andl $1, %ecx + addl 424(%esp), %esi + movl 124(%esp), %eax # 4-byte Reload + adcl 428(%esp), %eax + movl %eax, 124(%esp) # 4-byte Spill + movl 128(%esp), %eax # 4-byte Reload + adcl 432(%esp), %eax + movl %eax, 128(%esp) # 4-byte Spill + movl 132(%esp), %eax # 4-byte Reload + adcl 436(%esp), %eax + movl %eax, 132(%esp) # 4-byte Spill + movl 120(%esp), %esi # 4-byte Reload + adcl 440(%esp), %esi + movl 116(%esp), %eax # 4-byte Reload + adcl 444(%esp), %eax + movl %eax, 116(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 448(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 112(%esp), %eax # 4-byte Reload + adcl 452(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + adcl 456(%esp), %ebp + movl %ebp, 88(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 460(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 464(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 468(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + adcl 472(%esp), %edi + movl %edi, 68(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 476(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 72(%esp), %ebp # 4-byte Reload + adcl 480(%esp), %ebp + movl 84(%esp), %eax # 4-byte Reload + adcl 484(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 488(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 104(%esp), %eax # 4-byte Reload + adcl 492(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + adcl $0, %ecx + movl %ecx, 108(%esp) # 4-byte Spill + movl 2616(%esp), %eax + movl 60(%eax), %eax + movl %eax, (%esp) + leal 352(%esp), %ecx + movl 2612(%esp), %edx + calll .LmulPv544x32 + movl 124(%esp), %ecx # 4-byte Reload + addl 352(%esp), %ecx + movl 128(%esp), %eax # 4-byte Reload + adcl 356(%esp), %eax + movl %eax, 128(%esp) # 4-byte Spill + movl 132(%esp), %eax # 4-byte Reload + adcl 360(%esp), %eax + movl %eax, 132(%esp) # 4-byte Spill + adcl 364(%esp), %esi + movl %esi, 120(%esp) # 4-byte Spill + movl 116(%esp), %eax # 4-byte Reload + adcl 368(%esp), %eax + movl %eax, 116(%esp) # 4-byte Spill + movl 100(%esp), %edi # 4-byte Reload + adcl 372(%esp), %edi + movl 112(%esp), %eax # 4-byte Reload + adcl 376(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 380(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 384(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 388(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 392(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 396(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 400(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + adcl 404(%esp), %ebp + movl %ebp, 72(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 408(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 412(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 104(%esp), %eax # 4-byte Reload + adcl 416(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl 420(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + sbbl %esi, %esi + movl %ecx, %eax + movl %ecx, %ebp + imull 60(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 280(%esp), %ecx + movl 2620(%esp), %edx + calll .LmulPv544x32 + andl $1, %esi + movl %esi, %ecx + addl 280(%esp), %ebp + movl 128(%esp), %eax # 4-byte Reload + adcl 284(%esp), %eax + movl %eax, 128(%esp) # 4-byte Spill + movl 132(%esp), %esi # 4-byte Reload + adcl 288(%esp), %esi + movl 120(%esp), %eax # 4-byte Reload + adcl 292(%esp), %eax + movl %eax, 120(%esp) # 4-byte Spill + movl 116(%esp), %ebp # 4-byte Reload + adcl 296(%esp), %ebp + adcl 300(%esp), %edi + movl %edi, 100(%esp) # 4-byte Spill + movl 112(%esp), %eax # 4-byte Reload + adcl 304(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 88(%esp), %edi # 4-byte Reload + adcl 308(%esp), %edi + movl 80(%esp), %eax # 4-byte Reload + adcl 312(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 316(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 320(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 324(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 328(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 332(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 336(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 340(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 104(%esp), %eax # 4-byte Reload + adcl 344(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl 348(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + adcl $0, %ecx + movl %ecx, 124(%esp) # 4-byte Spill + movl 2616(%esp), %eax + movl 64(%eax), %eax + movl %eax, (%esp) + leal 208(%esp), %ecx + movl 2612(%esp), %edx + calll .LmulPv544x32 + movl 128(%esp), %ecx # 4-byte Reload + addl 208(%esp), %ecx + adcl 212(%esp), %esi + movl %esi, 132(%esp) # 4-byte Spill + movl 120(%esp), %eax # 4-byte Reload + adcl 216(%esp), %eax + movl %eax, 120(%esp) # 4-byte Spill + adcl 220(%esp), %ebp + movl %ebp, 116(%esp) # 4-byte Spill + movl 100(%esp), %ebp # 4-byte Reload + adcl 224(%esp), %ebp + movl 112(%esp), %eax # 4-byte Reload + adcl 228(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + adcl 232(%esp), %edi + movl %edi, 88(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 236(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 240(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 244(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 248(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 252(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 256(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 260(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 264(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 104(%esp), %eax # 4-byte Reload + adcl 268(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl 272(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 124(%esp), %eax # 4-byte Reload + adcl 276(%esp), %eax + movl %eax, 124(%esp) # 4-byte Spill + sbbl %edi, %edi + movl 60(%esp), %eax # 4-byte Reload + imull %ecx, %eax + movl %ecx, %esi + movl %eax, (%esp) + leal 136(%esp), %ecx + movl 2620(%esp), %eax + movl %eax, %edx + calll .LmulPv544x32 + andl $1, %edi + addl 136(%esp), %esi + movl 116(%esp), %edx # 4-byte Reload + movl 132(%esp), %eax # 4-byte Reload + adcl 140(%esp), %eax + movl %eax, 132(%esp) # 4-byte Spill + movl 120(%esp), %eax # 4-byte Reload + adcl 144(%esp), %eax + movl %eax, 120(%esp) # 4-byte Spill + adcl 148(%esp), %edx + movl %edx, 116(%esp) # 4-byte Spill + adcl 152(%esp), %ebp + movl %ebp, 100(%esp) # 4-byte Spill + movl 112(%esp), %eax # 4-byte Reload + adcl 156(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 160(%esp), %eax + movl 80(%esp), %ecx # 4-byte Reload + adcl 164(%esp), %ecx + movl %ecx, 80(%esp) # 4-byte Spill + movl 92(%esp), %esi # 4-byte Reload + adcl 168(%esp), %esi + movl %esi, 92(%esp) # 4-byte Spill + movl 76(%esp), %ecx # 4-byte Reload + adcl 172(%esp), %ecx + movl %ecx, 76(%esp) # 4-byte Spill + movl 68(%esp), %ecx # 4-byte Reload + adcl 176(%esp), %ecx + movl %ecx, 68(%esp) # 4-byte Spill + movl 64(%esp), %ecx # 4-byte Reload + adcl 180(%esp), %ecx + movl %ecx, 64(%esp) # 4-byte Spill + movl 72(%esp), %ecx # 4-byte Reload + adcl 184(%esp), %ecx + movl %ecx, 72(%esp) # 4-byte Spill + movl 84(%esp), %ecx # 4-byte Reload + adcl 188(%esp), %ecx + movl %ecx, 84(%esp) # 4-byte Spill + movl 96(%esp), %ecx # 4-byte Reload + adcl 192(%esp), %ecx + movl %ecx, 96(%esp) # 4-byte Spill + movl 104(%esp), %ecx # 4-byte Reload + adcl 196(%esp), %ecx + movl %ecx, 104(%esp) # 4-byte Spill + movl 108(%esp), %ecx # 4-byte Reload + adcl 200(%esp), %ecx + movl %ecx, 108(%esp) # 4-byte Spill + movl 124(%esp), %ecx # 4-byte Reload + adcl 204(%esp), %ecx + movl %ecx, 124(%esp) # 4-byte Spill + adcl $0, %edi + movl 132(%esp), %ecx # 4-byte Reload + movl 2620(%esp), %ebx + subl (%ebx), %ecx + movl %ecx, 12(%esp) # 4-byte Spill + movl 120(%esp), %ecx # 4-byte Reload + sbbl 4(%ebx), %ecx + movl %ecx, 16(%esp) # 4-byte Spill + sbbl 8(%ebx), %edx + movl %edx, 20(%esp) # 4-byte Spill + sbbl 12(%ebx), %ebp + movl %ebp, 24(%esp) # 4-byte Spill + movl 112(%esp), %ebp # 4-byte Reload + movl %eax, %edx + sbbl 16(%ebx), %ebp + movl %ebp, 28(%esp) # 4-byte Spill + sbbl 20(%ebx), %eax + movl %eax, 32(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + sbbl 24(%ebx), %eax + movl %eax, 36(%esp) # 4-byte Spill + sbbl 28(%ebx), %esi + movl %esi, 40(%esp) # 4-byte Spill + movl 76(%esp), %esi # 4-byte Reload + sbbl 32(%ebx), %esi + movl %esi, 44(%esp) # 4-byte Spill + movl 68(%esp), %esi # 4-byte Reload + sbbl 36(%ebx), %esi + movl %esi, 48(%esp) # 4-byte Spill + movl 64(%esp), %esi # 4-byte Reload + sbbl 40(%ebx), %esi + movl %esi, 52(%esp) # 4-byte Spill + movl 72(%esp), %esi # 4-byte Reload + sbbl 44(%ebx), %esi + movl %esi, 56(%esp) # 4-byte Spill + movl 84(%esp), %esi # 4-byte Reload + sbbl 48(%ebx), %esi + movl %esi, 60(%esp) # 4-byte Spill + movl 96(%esp), %ebp # 4-byte Reload + sbbl 52(%ebx), %ebp + movl %ebp, 88(%esp) # 4-byte Spill + movl 104(%esp), %esi # 4-byte Reload + sbbl 56(%ebx), %esi + movl %esi, 128(%esp) # 4-byte Spill + movl %ebx, %ebp + movl 108(%esp), %ebx # 4-byte Reload + sbbl 60(%ebp), %ebx + movl 124(%esp), %esi # 4-byte Reload + sbbl 64(%ebp), %esi + movl %esi, %ebp + sbbl $0, %edi + andl $1, %edi + jne .LBB258_2 +# BB#1: + movl %ebx, 108(%esp) # 4-byte Spill +.LBB258_2: + movl %edi, %ebx + testb %bl, %bl + movl 132(%esp), %ebx # 4-byte Reload + jne .LBB258_4 +# BB#3: + movl 12(%esp), %ebx # 4-byte Reload +.LBB258_4: + movl 2608(%esp), %eax + movl %ebx, (%eax) + movl 120(%esp), %ebx # 4-byte Reload + jne .LBB258_6 +# BB#5: + movl 16(%esp), %ebx # 4-byte Reload +.LBB258_6: + movl %ebx, 4(%eax) + jne .LBB258_8 +# BB#7: + movl 20(%esp), %ecx # 4-byte Reload + movl %ecx, 116(%esp) # 4-byte Spill +.LBB258_8: + movl 116(%esp), %ecx # 4-byte Reload + movl %ecx, 8(%eax) + jne .LBB258_10 +# BB#9: + movl 24(%esp), %ecx # 4-byte Reload + movl %ecx, 100(%esp) # 4-byte Spill +.LBB258_10: + movl 100(%esp), %ecx # 4-byte Reload + movl %ecx, 12(%eax) + movl 112(%esp), %esi # 4-byte Reload + jne .LBB258_12 +# BB#11: + movl 28(%esp), %esi # 4-byte Reload +.LBB258_12: + movl %esi, 16(%eax) + movl 80(%esp), %ecx # 4-byte Reload + jne .LBB258_14 +# BB#13: + movl 32(%esp), %edx # 4-byte Reload +.LBB258_14: + movl %edx, 20(%eax) + jne .LBB258_16 +# BB#15: + movl 36(%esp), %ecx # 4-byte Reload +.LBB258_16: + movl %ecx, 24(%eax) + movl 92(%esp), %ecx # 4-byte Reload + jne .LBB258_18 +# BB#17: + movl 40(%esp), %ecx # 4-byte Reload +.LBB258_18: + movl %ecx, 28(%eax) + movl 76(%esp), %ecx # 4-byte Reload + jne .LBB258_20 +# BB#19: + movl 44(%esp), %ecx # 4-byte Reload +.LBB258_20: + movl %ecx, 32(%eax) + movl 68(%esp), %ecx # 4-byte Reload + jne .LBB258_22 +# BB#21: + movl 48(%esp), %ecx # 4-byte Reload +.LBB258_22: + movl %ecx, 36(%eax) + movl 64(%esp), %ecx # 4-byte Reload + jne .LBB258_24 +# BB#23: + movl 52(%esp), %ecx # 4-byte Reload +.LBB258_24: + movl %ecx, 40(%eax) + movl 72(%esp), %ecx # 4-byte Reload + jne .LBB258_26 +# BB#25: + movl 56(%esp), %ecx # 4-byte Reload +.LBB258_26: + movl %ecx, 44(%eax) + movl 84(%esp), %ecx # 4-byte Reload + jne .LBB258_28 +# BB#27: + movl 60(%esp), %ecx # 4-byte Reload +.LBB258_28: + movl %ecx, 48(%eax) + movl 96(%esp), %ecx # 4-byte Reload + jne .LBB258_30 +# BB#29: + movl 88(%esp), %ecx # 4-byte Reload +.LBB258_30: + movl %ecx, 52(%eax) + movl 104(%esp), %ecx # 4-byte Reload + jne .LBB258_32 +# BB#31: + movl 128(%esp), %ecx # 4-byte Reload +.LBB258_32: + movl %ecx, 56(%eax) + movl 108(%esp), %ecx # 4-byte Reload + movl %ecx, 60(%eax) + movl 124(%esp), %ecx # 4-byte Reload + jne .LBB258_34 +# BB#33: + movl %ebp, %ecx +.LBB258_34: + movl %ecx, 64(%eax) + addl $2588, %esp # imm = 0xA1C + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end258: + .size mcl_fp_mont17Lbmi2, .Lfunc_end258-mcl_fp_mont17Lbmi2 + + .globl mcl_fp_montNF17Lbmi2 + .align 16, 0x90 + .type mcl_fp_montNF17Lbmi2,@function +mcl_fp_montNF17Lbmi2: # @mcl_fp_montNF17Lbmi2 +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $2572, %esp # imm = 0xA0C + calll .L259$pb +.L259$pb: + popl %ebx +.Ltmp60: + addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp60-.L259$pb), %ebx + movl 2604(%esp), %eax + movl -4(%eax), %esi + movl %esi, 48(%esp) # 4-byte Spill + movl 2600(%esp), %eax + movl (%eax), %eax + movl %eax, (%esp) + leal 2496(%esp), %ecx + movl 2596(%esp), %edx + calll .LmulPv544x32 + movl 2496(%esp), %edi + movl 2500(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl %edi, %eax + imull %esi, %eax + movl 2564(%esp), %ecx + movl %ecx, 116(%esp) # 4-byte Spill + movl 2560(%esp), %ecx + movl %ecx, 100(%esp) # 4-byte Spill + movl 2556(%esp), %ecx + movl %ecx, 108(%esp) # 4-byte Spill + movl 2552(%esp), %ecx + movl %ecx, 96(%esp) # 4-byte Spill + movl 2548(%esp), %ecx + movl %ecx, 88(%esp) # 4-byte Spill + movl 2544(%esp), %ecx + movl %ecx, 64(%esp) # 4-byte Spill + movl 2540(%esp), %ecx + movl %ecx, 72(%esp) # 4-byte Spill + movl 2536(%esp), %ecx + movl %ecx, 68(%esp) # 4-byte Spill + movl 2532(%esp), %ecx + movl %ecx, 56(%esp) # 4-byte Spill + movl 2528(%esp), %ecx + movl %ecx, 52(%esp) # 4-byte Spill + movl 2524(%esp), %ebp + movl 2520(%esp), %esi + movl 2516(%esp), %ecx + movl %ecx, 84(%esp) # 4-byte Spill + movl 2512(%esp), %ecx + movl %ecx, 76(%esp) # 4-byte Spill + movl 2508(%esp), %ecx + movl %ecx, 104(%esp) # 4-byte Spill + movl 2504(%esp), %ecx + movl %ecx, 92(%esp) # 4-byte Spill + movl %eax, (%esp) + leal 2424(%esp), %ecx + movl 2604(%esp), %edx + calll .LmulPv544x32 + addl 2424(%esp), %edi + movl 112(%esp), %eax # 4-byte Reload + adcl 2428(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 2432(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 104(%esp), %eax # 4-byte Reload + adcl 2436(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 2440(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 2444(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + adcl 2448(%esp), %esi + movl %esi, 80(%esp) # 4-byte Spill + adcl 2452(%esp), %ebp + movl %ebp, 60(%esp) # 4-byte Spill + movl 52(%esp), %ebp # 4-byte Reload + adcl 2456(%esp), %ebp + movl 56(%esp), %eax # 4-byte Reload + adcl 2460(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 2464(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 2468(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 64(%esp), %esi # 4-byte Reload + adcl 2472(%esp), %esi + movl 88(%esp), %eax # 4-byte Reload + adcl 2476(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 96(%esp), %edi # 4-byte Reload + adcl 2480(%esp), %edi + movl 108(%esp), %eax # 4-byte Reload + adcl 2484(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 2488(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 116(%esp), %eax # 4-byte Reload + adcl 2492(%esp), %eax + movl %eax, 116(%esp) # 4-byte Spill + movl 2600(%esp), %eax + movl 4(%eax), %eax + movl %eax, (%esp) + leal 2352(%esp), %ecx + movl 2596(%esp), %edx + calll .LmulPv544x32 + movl 2420(%esp), %ecx + movl 112(%esp), %edx # 4-byte Reload + addl 2352(%esp), %edx + movl 92(%esp), %eax # 4-byte Reload + adcl 2356(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 104(%esp), %eax # 4-byte Reload + adcl 2360(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 2364(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 2368(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 2372(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 2376(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + adcl 2380(%esp), %ebp + movl %ebp, 52(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 2384(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 2388(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 2392(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + adcl 2396(%esp), %esi + movl %esi, %ebp + movl 88(%esp), %eax # 4-byte Reload + adcl 2400(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + adcl 2404(%esp), %edi + movl 108(%esp), %eax # 4-byte Reload + adcl 2408(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 2412(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 116(%esp), %eax # 4-byte Reload + adcl 2416(%esp), %eax + movl %eax, 116(%esp) # 4-byte Spill + adcl $0, %ecx + movl %ecx, 112(%esp) # 4-byte Spill + movl %edx, %esi + movl %esi, %eax + imull 48(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 2280(%esp), %ecx + movl 2604(%esp), %edx + calll .LmulPv544x32 + addl 2280(%esp), %esi + movl 92(%esp), %eax # 4-byte Reload + adcl 2284(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 104(%esp), %eax # 4-byte Reload + adcl 2288(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 2292(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 2296(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 2300(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 2304(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 2308(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 2312(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 68(%esp), %esi # 4-byte Reload + adcl 2316(%esp), %esi + movl 72(%esp), %eax # 4-byte Reload + adcl 2320(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + adcl 2324(%esp), %ebp + movl %ebp, 64(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 2328(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + adcl 2332(%esp), %edi + movl %edi, 96(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl 2336(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 100(%esp), %ebp # 4-byte Reload + adcl 2340(%esp), %ebp + movl 116(%esp), %edi # 4-byte Reload + adcl 2344(%esp), %edi + movl 112(%esp), %eax # 4-byte Reload + adcl 2348(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 2600(%esp), %eax + movl 8(%eax), %eax + movl %eax, (%esp) + leal 2208(%esp), %ecx + movl 2596(%esp), %edx + calll .LmulPv544x32 + movl 2276(%esp), %eax + movl 92(%esp), %edx # 4-byte Reload + addl 2208(%esp), %edx + movl 104(%esp), %ecx # 4-byte Reload + adcl 2212(%esp), %ecx + movl %ecx, 104(%esp) # 4-byte Spill + movl 76(%esp), %ecx # 4-byte Reload + adcl 2216(%esp), %ecx + movl %ecx, 76(%esp) # 4-byte Spill + movl 84(%esp), %ecx # 4-byte Reload + adcl 2220(%esp), %ecx + movl %ecx, 84(%esp) # 4-byte Spill + movl 80(%esp), %ecx # 4-byte Reload + adcl 2224(%esp), %ecx + movl %ecx, 80(%esp) # 4-byte Spill + movl 60(%esp), %ecx # 4-byte Reload + adcl 2228(%esp), %ecx + movl %ecx, 60(%esp) # 4-byte Spill + movl 52(%esp), %ecx # 4-byte Reload + adcl 2232(%esp), %ecx + movl %ecx, 52(%esp) # 4-byte Spill + movl 56(%esp), %ecx # 4-byte Reload + adcl 2236(%esp), %ecx + movl %ecx, 56(%esp) # 4-byte Spill + adcl 2240(%esp), %esi + movl %esi, 68(%esp) # 4-byte Spill + movl 72(%esp), %ecx # 4-byte Reload + adcl 2244(%esp), %ecx + movl %ecx, 72(%esp) # 4-byte Spill + movl 64(%esp), %ecx # 4-byte Reload + adcl 2248(%esp), %ecx + movl %ecx, 64(%esp) # 4-byte Spill + movl 88(%esp), %ecx # 4-byte Reload + adcl 2252(%esp), %ecx + movl %ecx, 88(%esp) # 4-byte Spill + movl 96(%esp), %ecx # 4-byte Reload + adcl 2256(%esp), %ecx + movl %ecx, 96(%esp) # 4-byte Spill + movl 108(%esp), %ecx # 4-byte Reload + adcl 2260(%esp), %ecx + movl %ecx, 108(%esp) # 4-byte Spill + adcl 2264(%esp), %ebp + adcl 2268(%esp), %edi + movl %edi, %esi + movl 112(%esp), %ecx # 4-byte Reload + adcl 2272(%esp), %ecx + movl %ecx, 112(%esp) # 4-byte Spill + adcl $0, %eax + movl %eax, 92(%esp) # 4-byte Spill + movl %edx, %edi + movl %edi, %eax + imull 48(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 2136(%esp), %ecx + movl 2604(%esp), %eax + movl %eax, %edx + calll .LmulPv544x32 + addl 2136(%esp), %edi + movl 104(%esp), %eax # 4-byte Reload + adcl 2140(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 2144(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 2148(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 2152(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 2156(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 2160(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 2164(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 2168(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 2172(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 2176(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 2180(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 2184(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 108(%esp), %edi # 4-byte Reload + adcl 2188(%esp), %edi + adcl 2192(%esp), %ebp + movl %ebp, 100(%esp) # 4-byte Spill + adcl 2196(%esp), %esi + movl %esi, 116(%esp) # 4-byte Spill + movl 112(%esp), %esi # 4-byte Reload + adcl 2200(%esp), %esi + movl 92(%esp), %eax # 4-byte Reload + adcl 2204(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 2600(%esp), %eax + movl 12(%eax), %eax + movl %eax, (%esp) + leal 2064(%esp), %ecx + movl 2596(%esp), %edx + calll .LmulPv544x32 + movl 2132(%esp), %eax + movl 104(%esp), %edx # 4-byte Reload + addl 2064(%esp), %edx + movl 76(%esp), %ecx # 4-byte Reload + adcl 2068(%esp), %ecx + movl %ecx, 76(%esp) # 4-byte Spill + movl 84(%esp), %ecx # 4-byte Reload + adcl 2072(%esp), %ecx + movl %ecx, 84(%esp) # 4-byte Spill + movl 80(%esp), %ecx # 4-byte Reload + adcl 2076(%esp), %ecx + movl %ecx, 80(%esp) # 4-byte Spill + movl 60(%esp), %ecx # 4-byte Reload + adcl 2080(%esp), %ecx + movl %ecx, 60(%esp) # 4-byte Spill + movl 52(%esp), %ecx # 4-byte Reload + adcl 2084(%esp), %ecx + movl %ecx, 52(%esp) # 4-byte Spill + movl 56(%esp), %ebp # 4-byte Reload + adcl 2088(%esp), %ebp + movl 68(%esp), %ecx # 4-byte Reload + adcl 2092(%esp), %ecx + movl %ecx, 68(%esp) # 4-byte Spill + movl 72(%esp), %ecx # 4-byte Reload + adcl 2096(%esp), %ecx + movl %ecx, 72(%esp) # 4-byte Spill + movl 64(%esp), %ecx # 4-byte Reload + adcl 2100(%esp), %ecx + movl %ecx, 64(%esp) # 4-byte Spill + movl 88(%esp), %ecx # 4-byte Reload + adcl 2104(%esp), %ecx + movl %ecx, 88(%esp) # 4-byte Spill + movl 96(%esp), %ecx # 4-byte Reload + adcl 2108(%esp), %ecx + movl %ecx, 96(%esp) # 4-byte Spill + adcl 2112(%esp), %edi + movl 100(%esp), %ecx # 4-byte Reload + adcl 2116(%esp), %ecx + movl %ecx, 100(%esp) # 4-byte Spill + movl 116(%esp), %ecx # 4-byte Reload + adcl 2120(%esp), %ecx + movl %ecx, 116(%esp) # 4-byte Spill + adcl 2124(%esp), %esi + movl %esi, 112(%esp) # 4-byte Spill + movl 92(%esp), %ecx # 4-byte Reload + adcl 2128(%esp), %ecx + movl %ecx, 92(%esp) # 4-byte Spill + adcl $0, %eax + movl %eax, 104(%esp) # 4-byte Spill + movl %edx, %esi + movl %esi, %eax + imull 48(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 1992(%esp), %ecx + movl 2604(%esp), %edx + calll .LmulPv544x32 + addl 1992(%esp), %esi + movl 76(%esp), %eax # 4-byte Reload + adcl 1996(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 2000(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 2004(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 2008(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 2012(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + adcl 2016(%esp), %ebp + movl %ebp, 56(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 2020(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 2024(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 2028(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 2032(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 2036(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + adcl 2040(%esp), %edi + movl %edi, 108(%esp) # 4-byte Spill + movl 100(%esp), %esi # 4-byte Reload + adcl 2044(%esp), %esi + movl 116(%esp), %eax # 4-byte Reload + adcl 2048(%esp), %eax + movl %eax, 116(%esp) # 4-byte Spill + movl 112(%esp), %eax # 4-byte Reload + adcl 2052(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 92(%esp), %edi # 4-byte Reload + adcl 2056(%esp), %edi + movl 104(%esp), %eax # 4-byte Reload + adcl 2060(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 2600(%esp), %eax + movl 16(%eax), %eax + movl %eax, (%esp) + leal 1920(%esp), %ecx + movl 2596(%esp), %edx + calll .LmulPv544x32 + movl 1988(%esp), %eax + movl 76(%esp), %edx # 4-byte Reload + addl 1920(%esp), %edx + movl 84(%esp), %ecx # 4-byte Reload + adcl 1924(%esp), %ecx + movl %ecx, 84(%esp) # 4-byte Spill + movl 80(%esp), %ecx # 4-byte Reload + adcl 1928(%esp), %ecx + movl %ecx, 80(%esp) # 4-byte Spill + movl 60(%esp), %ecx # 4-byte Reload + adcl 1932(%esp), %ecx + movl %ecx, 60(%esp) # 4-byte Spill + movl 52(%esp), %ebp # 4-byte Reload + adcl 1936(%esp), %ebp + movl 56(%esp), %ecx # 4-byte Reload + adcl 1940(%esp), %ecx + movl %ecx, 56(%esp) # 4-byte Spill + movl 68(%esp), %ecx # 4-byte Reload + adcl 1944(%esp), %ecx + movl %ecx, 68(%esp) # 4-byte Spill + movl 72(%esp), %ecx # 4-byte Reload + adcl 1948(%esp), %ecx + movl %ecx, 72(%esp) # 4-byte Spill + movl 64(%esp), %ecx # 4-byte Reload + adcl 1952(%esp), %ecx + movl %ecx, 64(%esp) # 4-byte Spill + movl 88(%esp), %ecx # 4-byte Reload + adcl 1956(%esp), %ecx + movl %ecx, 88(%esp) # 4-byte Spill + movl 96(%esp), %ecx # 4-byte Reload + adcl 1960(%esp), %ecx + movl %ecx, 96(%esp) # 4-byte Spill + movl 108(%esp), %ecx # 4-byte Reload + adcl 1964(%esp), %ecx + movl %ecx, 108(%esp) # 4-byte Spill + adcl 1968(%esp), %esi + movl %esi, 100(%esp) # 4-byte Spill + movl 116(%esp), %ecx # 4-byte Reload + adcl 1972(%esp), %ecx + movl %ecx, 116(%esp) # 4-byte Spill + movl 112(%esp), %esi # 4-byte Reload + adcl 1976(%esp), %esi + adcl 1980(%esp), %edi + movl %edi, 92(%esp) # 4-byte Spill + movl 104(%esp), %ecx # 4-byte Reload + adcl 1984(%esp), %ecx + movl %ecx, 104(%esp) # 4-byte Spill + adcl $0, %eax + movl %eax, 76(%esp) # 4-byte Spill + movl %edx, %eax + movl %edx, %edi + imull 48(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 1848(%esp), %ecx + movl 2604(%esp), %edx + calll .LmulPv544x32 + addl 1848(%esp), %edi + movl 84(%esp), %eax # 4-byte Reload + adcl 1852(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 1856(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 1860(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + adcl 1864(%esp), %ebp + movl %ebp, 52(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 1868(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 1872(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 1876(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 1880(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 1884(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 1888(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl 1892(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 1896(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 116(%esp), %eax # 4-byte Reload + adcl 1900(%esp), %eax + movl %eax, 116(%esp) # 4-byte Spill + adcl 1904(%esp), %esi + movl %esi, 112(%esp) # 4-byte Spill + movl 92(%esp), %ebp # 4-byte Reload + adcl 1908(%esp), %ebp + movl 104(%esp), %eax # 4-byte Reload + adcl 1912(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 1916(%esp), %eax + movl %eax, %edi + movl 2600(%esp), %eax + movl 20(%eax), %eax + movl %eax, (%esp) + leal 1776(%esp), %ecx + movl 2596(%esp), %edx + calll .LmulPv544x32 + movl 1844(%esp), %eax + movl 84(%esp), %edx # 4-byte Reload + addl 1776(%esp), %edx + movl 80(%esp), %ecx # 4-byte Reload + adcl 1780(%esp), %ecx + movl %ecx, 80(%esp) # 4-byte Spill + movl 60(%esp), %ecx # 4-byte Reload + adcl 1784(%esp), %ecx + movl %ecx, 60(%esp) # 4-byte Spill + movl 52(%esp), %ecx # 4-byte Reload + adcl 1788(%esp), %ecx + movl %ecx, 52(%esp) # 4-byte Spill + movl 56(%esp), %ecx # 4-byte Reload + adcl 1792(%esp), %ecx + movl %ecx, 56(%esp) # 4-byte Spill + movl 68(%esp), %ecx # 4-byte Reload + adcl 1796(%esp), %ecx + movl %ecx, 68(%esp) # 4-byte Spill + movl 72(%esp), %ecx # 4-byte Reload + adcl 1800(%esp), %ecx + movl %ecx, 72(%esp) # 4-byte Spill + movl 64(%esp), %ecx # 4-byte Reload + adcl 1804(%esp), %ecx + movl %ecx, 64(%esp) # 4-byte Spill + movl 88(%esp), %esi # 4-byte Reload + adcl 1808(%esp), %esi + movl 96(%esp), %ecx # 4-byte Reload + adcl 1812(%esp), %ecx + movl %ecx, 96(%esp) # 4-byte Spill + movl 108(%esp), %ecx # 4-byte Reload + adcl 1816(%esp), %ecx + movl %ecx, 108(%esp) # 4-byte Spill + movl 100(%esp), %ecx # 4-byte Reload + adcl 1820(%esp), %ecx + movl %ecx, 100(%esp) # 4-byte Spill + movl 116(%esp), %ecx # 4-byte Reload + adcl 1824(%esp), %ecx + movl %ecx, 116(%esp) # 4-byte Spill + movl 112(%esp), %ecx # 4-byte Reload + adcl 1828(%esp), %ecx + movl %ecx, 112(%esp) # 4-byte Spill + adcl 1832(%esp), %ebp + movl %ebp, 92(%esp) # 4-byte Spill + movl 104(%esp), %ecx # 4-byte Reload + adcl 1836(%esp), %ecx + movl %ecx, 104(%esp) # 4-byte Spill + adcl 1840(%esp), %edi + adcl $0, %eax + movl %eax, 84(%esp) # 4-byte Spill + movl %edx, %ebp + movl %ebp, %eax + imull 48(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 1704(%esp), %ecx + movl 2604(%esp), %edx + calll .LmulPv544x32 + addl 1704(%esp), %ebp + movl 80(%esp), %eax # 4-byte Reload + adcl 1708(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 1712(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 1716(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 1720(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 1724(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 1728(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 1732(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + adcl 1736(%esp), %esi + movl %esi, %ebp + movl 96(%esp), %esi # 4-byte Reload + adcl 1740(%esp), %esi + movl 108(%esp), %eax # 4-byte Reload + adcl 1744(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 1748(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 116(%esp), %eax # 4-byte Reload + adcl 1752(%esp), %eax + movl %eax, 116(%esp) # 4-byte Spill + movl 112(%esp), %eax # 4-byte Reload + adcl 1756(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 1760(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 104(%esp), %eax # 4-byte Reload + adcl 1764(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + adcl 1768(%esp), %edi + movl 84(%esp), %eax # 4-byte Reload + adcl 1772(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 2600(%esp), %eax + movl 24(%eax), %eax + movl %eax, (%esp) + leal 1632(%esp), %ecx + movl 2596(%esp), %eax + movl %eax, %edx + calll .LmulPv544x32 + movl 1700(%esp), %eax + movl 80(%esp), %edx # 4-byte Reload + addl 1632(%esp), %edx + movl 60(%esp), %ecx # 4-byte Reload + adcl 1636(%esp), %ecx + movl %ecx, 60(%esp) # 4-byte Spill + movl 52(%esp), %ecx # 4-byte Reload + adcl 1640(%esp), %ecx + movl %ecx, 52(%esp) # 4-byte Spill + movl 56(%esp), %ecx # 4-byte Reload + adcl 1644(%esp), %ecx + movl %ecx, 56(%esp) # 4-byte Spill + movl 68(%esp), %ecx # 4-byte Reload + adcl 1648(%esp), %ecx + movl %ecx, 68(%esp) # 4-byte Spill + movl 72(%esp), %ecx # 4-byte Reload + adcl 1652(%esp), %ecx + movl %ecx, 72(%esp) # 4-byte Spill + movl 64(%esp), %ecx # 4-byte Reload + adcl 1656(%esp), %ecx + movl %ecx, 64(%esp) # 4-byte Spill + adcl 1660(%esp), %ebp + movl %ebp, 88(%esp) # 4-byte Spill + adcl 1664(%esp), %esi + movl %esi, 96(%esp) # 4-byte Spill + movl 108(%esp), %ecx # 4-byte Reload + adcl 1668(%esp), %ecx + movl %ecx, 108(%esp) # 4-byte Spill + movl 100(%esp), %esi # 4-byte Reload + adcl 1672(%esp), %esi + movl 116(%esp), %ecx # 4-byte Reload + adcl 1676(%esp), %ecx + movl %ecx, 116(%esp) # 4-byte Spill + movl 112(%esp), %ecx # 4-byte Reload + adcl 1680(%esp), %ecx + movl %ecx, 112(%esp) # 4-byte Spill + movl 92(%esp), %ecx # 4-byte Reload + adcl 1684(%esp), %ecx + movl %ecx, 92(%esp) # 4-byte Spill + movl 104(%esp), %ecx # 4-byte Reload + adcl 1688(%esp), %ecx + movl %ecx, 104(%esp) # 4-byte Spill + adcl 1692(%esp), %edi + movl %edi, 76(%esp) # 4-byte Spill + movl 84(%esp), %ecx # 4-byte Reload + adcl 1696(%esp), %ecx + movl %ecx, 84(%esp) # 4-byte Spill + adcl $0, %eax + movl %eax, 80(%esp) # 4-byte Spill + movl %edx, %edi + movl %edi, %eax + imull 48(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 1560(%esp), %ecx + movl 2604(%esp), %edx + calll .LmulPv544x32 + addl 1560(%esp), %edi + movl 60(%esp), %eax # 4-byte Reload + adcl 1564(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 1568(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 1572(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 1576(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 72(%esp), %edi # 4-byte Reload + adcl 1580(%esp), %edi + movl 64(%esp), %ebp # 4-byte Reload + adcl 1584(%esp), %ebp + movl 88(%esp), %eax # 4-byte Reload + adcl 1588(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 1592(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl 1596(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + adcl 1600(%esp), %esi + movl %esi, 100(%esp) # 4-byte Spill + movl 116(%esp), %eax # 4-byte Reload + adcl 1604(%esp), %eax + movl %eax, 116(%esp) # 4-byte Spill + movl 112(%esp), %esi # 4-byte Reload + adcl 1608(%esp), %esi + movl 92(%esp), %eax # 4-byte Reload + adcl 1612(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 104(%esp), %eax # 4-byte Reload + adcl 1616(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 1620(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 1624(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 1628(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 2600(%esp), %eax + movl 28(%eax), %eax + movl %eax, (%esp) + leal 1488(%esp), %ecx + movl 2596(%esp), %edx + calll .LmulPv544x32 + movl 1556(%esp), %eax + movl 60(%esp), %ecx # 4-byte Reload + addl 1488(%esp), %ecx + movl 52(%esp), %edx # 4-byte Reload + adcl 1492(%esp), %edx + movl %edx, 52(%esp) # 4-byte Spill + movl 56(%esp), %edx # 4-byte Reload + adcl 1496(%esp), %edx + movl %edx, 56(%esp) # 4-byte Spill + movl 68(%esp), %edx # 4-byte Reload + adcl 1500(%esp), %edx + movl %edx, 68(%esp) # 4-byte Spill + adcl 1504(%esp), %edi + adcl 1508(%esp), %ebp + movl %ebp, 64(%esp) # 4-byte Spill + movl 88(%esp), %edx # 4-byte Reload + adcl 1512(%esp), %edx + movl %edx, 88(%esp) # 4-byte Spill + movl 96(%esp), %edx # 4-byte Reload + adcl 1516(%esp), %edx + movl %edx, 96(%esp) # 4-byte Spill + movl 108(%esp), %edx # 4-byte Reload + adcl 1520(%esp), %edx + movl %edx, 108(%esp) # 4-byte Spill + movl 100(%esp), %edx # 4-byte Reload + adcl 1524(%esp), %edx + movl %edx, 100(%esp) # 4-byte Spill + movl 116(%esp), %edx # 4-byte Reload + adcl 1528(%esp), %edx + movl %edx, 116(%esp) # 4-byte Spill + adcl 1532(%esp), %esi + movl %esi, %ebp + movl 92(%esp), %edx # 4-byte Reload + adcl 1536(%esp), %edx + movl %edx, 92(%esp) # 4-byte Spill + movl 104(%esp), %edx # 4-byte Reload + adcl 1540(%esp), %edx + movl %edx, 104(%esp) # 4-byte Spill + movl 76(%esp), %edx # 4-byte Reload + adcl 1544(%esp), %edx + movl %edx, 76(%esp) # 4-byte Spill + movl 84(%esp), %edx # 4-byte Reload + adcl 1548(%esp), %edx + movl %edx, 84(%esp) # 4-byte Spill + movl 80(%esp), %edx # 4-byte Reload + adcl 1552(%esp), %edx + movl %edx, 80(%esp) # 4-byte Spill + adcl $0, %eax + movl %eax, 60(%esp) # 4-byte Spill + movl %ecx, %esi + movl %esi, %eax + imull 48(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 1416(%esp), %ecx + movl 2604(%esp), %edx + calll .LmulPv544x32 + addl 1416(%esp), %esi + movl 52(%esp), %eax # 4-byte Reload + adcl 1420(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 1424(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 68(%esp), %esi # 4-byte Reload + adcl 1428(%esp), %esi + adcl 1432(%esp), %edi + movl %edi, 72(%esp) # 4-byte Spill + movl 64(%esp), %edi # 4-byte Reload + adcl 1436(%esp), %edi + movl 88(%esp), %eax # 4-byte Reload + adcl 1440(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 1444(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl 1448(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 1452(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 116(%esp), %eax # 4-byte Reload + adcl 1456(%esp), %eax + movl %eax, 116(%esp) # 4-byte Spill + adcl 1460(%esp), %ebp + movl %ebp, 112(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 1464(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 104(%esp), %eax # 4-byte Reload + adcl 1468(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 1472(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 1476(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 1480(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 60(%esp), %ebp # 4-byte Reload + adcl 1484(%esp), %ebp + movl 2600(%esp), %eax + movl 32(%eax), %eax + movl %eax, (%esp) + leal 1344(%esp), %ecx + movl 2596(%esp), %edx + calll .LmulPv544x32 + movl 1412(%esp), %eax + movl 52(%esp), %edx # 4-byte Reload + addl 1344(%esp), %edx + movl 56(%esp), %ecx # 4-byte Reload + adcl 1348(%esp), %ecx + movl %ecx, 56(%esp) # 4-byte Spill + adcl 1352(%esp), %esi + movl %esi, 68(%esp) # 4-byte Spill + movl 72(%esp), %ecx # 4-byte Reload + adcl 1356(%esp), %ecx + movl %ecx, 72(%esp) # 4-byte Spill + adcl 1360(%esp), %edi + movl 88(%esp), %ecx # 4-byte Reload + adcl 1364(%esp), %ecx + movl %ecx, 88(%esp) # 4-byte Spill + movl 96(%esp), %ecx # 4-byte Reload + adcl 1368(%esp), %ecx + movl %ecx, 96(%esp) # 4-byte Spill + movl 108(%esp), %ecx # 4-byte Reload + adcl 1372(%esp), %ecx + movl %ecx, 108(%esp) # 4-byte Spill + movl 100(%esp), %ecx # 4-byte Reload + adcl 1376(%esp), %ecx + movl %ecx, 100(%esp) # 4-byte Spill + movl 116(%esp), %ecx # 4-byte Reload + adcl 1380(%esp), %ecx + movl %ecx, 116(%esp) # 4-byte Spill + movl 112(%esp), %ecx # 4-byte Reload + adcl 1384(%esp), %ecx + movl %ecx, 112(%esp) # 4-byte Spill + movl 92(%esp), %ecx # 4-byte Reload + adcl 1388(%esp), %ecx + movl %ecx, 92(%esp) # 4-byte Spill + movl 104(%esp), %ecx # 4-byte Reload + adcl 1392(%esp), %ecx + movl %ecx, 104(%esp) # 4-byte Spill + movl 76(%esp), %esi # 4-byte Reload + adcl 1396(%esp), %esi + movl 84(%esp), %ecx # 4-byte Reload + adcl 1400(%esp), %ecx + movl %ecx, 84(%esp) # 4-byte Spill + movl 80(%esp), %ecx # 4-byte Reload + adcl 1404(%esp), %ecx + movl %ecx, 80(%esp) # 4-byte Spill + adcl 1408(%esp), %ebp + movl %ebp, 60(%esp) # 4-byte Spill + adcl $0, %eax + movl %eax, 52(%esp) # 4-byte Spill + movl %edx, %ebp + movl %ebp, %eax + imull 48(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 1272(%esp), %ecx + movl 2604(%esp), %edx + calll .LmulPv544x32 + addl 1272(%esp), %ebp + movl 56(%esp), %eax # 4-byte Reload + adcl 1276(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 1280(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 1284(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + adcl 1288(%esp), %edi + movl 88(%esp), %eax # 4-byte Reload + adcl 1292(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 1296(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl 1300(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 100(%esp), %ebp # 4-byte Reload + adcl 1304(%esp), %ebp + movl 116(%esp), %eax # 4-byte Reload + adcl 1308(%esp), %eax + movl %eax, 116(%esp) # 4-byte Spill + movl 112(%esp), %eax # 4-byte Reload + adcl 1312(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 1316(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 104(%esp), %eax # 4-byte Reload + adcl 1320(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + adcl 1324(%esp), %esi + movl %esi, 76(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 1328(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 1332(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 1336(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 1340(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 2600(%esp), %eax + movl 36(%eax), %eax + movl %eax, (%esp) + leal 1200(%esp), %ecx + movl 2596(%esp), %edx + calll .LmulPv544x32 + movl 1268(%esp), %eax + movl 56(%esp), %ecx # 4-byte Reload + addl 1200(%esp), %ecx + movl 68(%esp), %edx # 4-byte Reload + adcl 1204(%esp), %edx + movl %edx, 68(%esp) # 4-byte Spill + movl 72(%esp), %edx # 4-byte Reload + adcl 1208(%esp), %edx + movl %edx, 72(%esp) # 4-byte Spill + adcl 1212(%esp), %edi + movl %edi, 64(%esp) # 4-byte Spill + movl 88(%esp), %edx # 4-byte Reload + adcl 1216(%esp), %edx + movl %edx, 88(%esp) # 4-byte Spill + movl 96(%esp), %edx # 4-byte Reload + adcl 1220(%esp), %edx + movl %edx, 96(%esp) # 4-byte Spill + movl 108(%esp), %esi # 4-byte Reload + adcl 1224(%esp), %esi + adcl 1228(%esp), %ebp + movl %ebp, 100(%esp) # 4-byte Spill + movl 116(%esp), %edi # 4-byte Reload + adcl 1232(%esp), %edi + movl 112(%esp), %edx # 4-byte Reload + adcl 1236(%esp), %edx + movl %edx, 112(%esp) # 4-byte Spill + movl 92(%esp), %edx # 4-byte Reload + adcl 1240(%esp), %edx + movl %edx, 92(%esp) # 4-byte Spill + movl 104(%esp), %edx # 4-byte Reload + adcl 1244(%esp), %edx + movl %edx, 104(%esp) # 4-byte Spill + movl 76(%esp), %edx # 4-byte Reload + adcl 1248(%esp), %edx + movl %edx, 76(%esp) # 4-byte Spill + movl 84(%esp), %edx # 4-byte Reload + adcl 1252(%esp), %edx + movl %edx, 84(%esp) # 4-byte Spill + movl 80(%esp), %edx # 4-byte Reload + adcl 1256(%esp), %edx + movl %edx, 80(%esp) # 4-byte Spill + movl 60(%esp), %edx # 4-byte Reload + adcl 1260(%esp), %edx + movl %edx, 60(%esp) # 4-byte Spill + movl 52(%esp), %edx # 4-byte Reload + adcl 1264(%esp), %edx + movl %edx, 52(%esp) # 4-byte Spill + adcl $0, %eax + movl %eax, 56(%esp) # 4-byte Spill + movl %ecx, %eax + movl %ecx, %ebp + imull 48(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 1128(%esp), %ecx + movl 2604(%esp), %edx + calll .LmulPv544x32 + addl 1128(%esp), %ebp + movl 68(%esp), %eax # 4-byte Reload + adcl 1132(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 1136(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 1140(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 1144(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 1148(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + adcl 1152(%esp), %esi + movl %esi, 108(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 1156(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + adcl 1160(%esp), %edi + movl %edi, 116(%esp) # 4-byte Spill + movl 112(%esp), %eax # 4-byte Reload + adcl 1164(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 1168(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 104(%esp), %ebp # 4-byte Reload + adcl 1172(%esp), %ebp + movl 76(%esp), %eax # 4-byte Reload + adcl 1176(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 84(%esp), %esi # 4-byte Reload + adcl 1180(%esp), %esi + movl 80(%esp), %eax # 4-byte Reload + adcl 1184(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 60(%esp), %edi # 4-byte Reload + adcl 1188(%esp), %edi + movl 52(%esp), %eax # 4-byte Reload + adcl 1192(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 1196(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 2600(%esp), %eax + movl 40(%eax), %eax + movl %eax, (%esp) + leal 1056(%esp), %ecx + movl 2596(%esp), %edx + calll .LmulPv544x32 + movl 1124(%esp), %edx + movl 68(%esp), %eax # 4-byte Reload + addl 1056(%esp), %eax + movl 72(%esp), %ecx # 4-byte Reload + adcl 1060(%esp), %ecx + movl %ecx, 72(%esp) # 4-byte Spill + movl 64(%esp), %ecx # 4-byte Reload + adcl 1064(%esp), %ecx + movl %ecx, 64(%esp) # 4-byte Spill + movl 88(%esp), %ecx # 4-byte Reload + adcl 1068(%esp), %ecx + movl %ecx, 88(%esp) # 4-byte Spill + movl 96(%esp), %ecx # 4-byte Reload + adcl 1072(%esp), %ecx + movl %ecx, 96(%esp) # 4-byte Spill + movl 108(%esp), %ecx # 4-byte Reload + adcl 1076(%esp), %ecx + movl %ecx, 108(%esp) # 4-byte Spill + movl 100(%esp), %ecx # 4-byte Reload + adcl 1080(%esp), %ecx + movl %ecx, 100(%esp) # 4-byte Spill + movl 116(%esp), %ecx # 4-byte Reload + adcl 1084(%esp), %ecx + movl %ecx, 116(%esp) # 4-byte Spill + movl 112(%esp), %ecx # 4-byte Reload + adcl 1088(%esp), %ecx + movl %ecx, 112(%esp) # 4-byte Spill + movl 92(%esp), %ecx # 4-byte Reload + adcl 1092(%esp), %ecx + movl %ecx, 92(%esp) # 4-byte Spill + adcl 1096(%esp), %ebp + movl %ebp, 104(%esp) # 4-byte Spill + movl 76(%esp), %ecx # 4-byte Reload + adcl 1100(%esp), %ecx + movl %ecx, 76(%esp) # 4-byte Spill + adcl 1104(%esp), %esi + movl %esi, 84(%esp) # 4-byte Spill + movl 80(%esp), %ecx # 4-byte Reload + adcl 1108(%esp), %ecx + movl %ecx, 80(%esp) # 4-byte Spill + adcl 1112(%esp), %edi + movl %edi, 60(%esp) # 4-byte Spill + movl 52(%esp), %edi # 4-byte Reload + adcl 1116(%esp), %edi + movl 56(%esp), %ecx # 4-byte Reload + adcl 1120(%esp), %ecx + movl %ecx, 56(%esp) # 4-byte Spill + adcl $0, %edx + movl %edx, 68(%esp) # 4-byte Spill + movl %eax, %esi + imull 48(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 984(%esp), %ecx + movl 2604(%esp), %edx + calll .LmulPv544x32 + addl 984(%esp), %esi + movl 72(%esp), %esi # 4-byte Reload + adcl 988(%esp), %esi + movl 64(%esp), %eax # 4-byte Reload + adcl 992(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 88(%esp), %ebp # 4-byte Reload + adcl 996(%esp), %ebp + movl 96(%esp), %eax # 4-byte Reload + adcl 1000(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl 1004(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 1008(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 116(%esp), %eax # 4-byte Reload + adcl 1012(%esp), %eax + movl %eax, 116(%esp) # 4-byte Spill + movl 112(%esp), %eax # 4-byte Reload + adcl 1016(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 1020(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 104(%esp), %eax # 4-byte Reload + adcl 1024(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 1028(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 1032(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 1036(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 1040(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + adcl 1044(%esp), %edi + movl %edi, 52(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 1048(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 1052(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 2600(%esp), %eax + movl 44(%eax), %eax + movl %eax, (%esp) + leal 912(%esp), %ecx + movl 2596(%esp), %edx + calll .LmulPv544x32 + movl 980(%esp), %eax + addl 912(%esp), %esi + movl 64(%esp), %edx # 4-byte Reload + adcl 916(%esp), %edx + movl %edx, 64(%esp) # 4-byte Spill + adcl 920(%esp), %ebp + movl %ebp, 88(%esp) # 4-byte Spill + movl 96(%esp), %edi # 4-byte Reload + adcl 924(%esp), %edi + movl 108(%esp), %edx # 4-byte Reload + adcl 928(%esp), %edx + movl %edx, 108(%esp) # 4-byte Spill + movl 100(%esp), %edx # 4-byte Reload + adcl 932(%esp), %edx + movl %edx, 100(%esp) # 4-byte Spill + movl 116(%esp), %edx # 4-byte Reload + adcl 936(%esp), %edx + movl %edx, 116(%esp) # 4-byte Spill + movl 112(%esp), %edx # 4-byte Reload + adcl 940(%esp), %edx + movl %edx, 112(%esp) # 4-byte Spill + movl 92(%esp), %ebp # 4-byte Reload + adcl 944(%esp), %ebp + movl 104(%esp), %edx # 4-byte Reload + adcl 948(%esp), %edx + movl %edx, 104(%esp) # 4-byte Spill + movl 76(%esp), %edx # 4-byte Reload + adcl 952(%esp), %edx + movl %edx, 76(%esp) # 4-byte Spill + movl 84(%esp), %edx # 4-byte Reload + adcl 956(%esp), %edx + movl %edx, 84(%esp) # 4-byte Spill + movl 80(%esp), %edx # 4-byte Reload + adcl 960(%esp), %edx + movl %edx, 80(%esp) # 4-byte Spill + movl 60(%esp), %edx # 4-byte Reload + adcl 964(%esp), %edx + movl %edx, 60(%esp) # 4-byte Spill + movl 52(%esp), %edx # 4-byte Reload + adcl 968(%esp), %edx + movl %edx, 52(%esp) # 4-byte Spill + movl 56(%esp), %edx # 4-byte Reload + adcl 972(%esp), %edx + movl %edx, 56(%esp) # 4-byte Spill + movl 68(%esp), %edx # 4-byte Reload + adcl 976(%esp), %edx + movl %edx, 68(%esp) # 4-byte Spill + adcl $0, %eax + movl %eax, 72(%esp) # 4-byte Spill + movl %esi, %eax + imull 48(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 840(%esp), %ecx + movl 2604(%esp), %edx + calll .LmulPv544x32 + addl 840(%esp), %esi + movl 64(%esp), %eax # 4-byte Reload + adcl 844(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 848(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + adcl 852(%esp), %edi + movl %edi, 96(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl 856(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 860(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 116(%esp), %edi # 4-byte Reload + adcl 864(%esp), %edi + movl 112(%esp), %eax # 4-byte Reload + adcl 868(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl %ebp, %esi + adcl 872(%esp), %esi + movl 104(%esp), %eax # 4-byte Reload + adcl 876(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 880(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 884(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 80(%esp), %ebp # 4-byte Reload + adcl 888(%esp), %ebp + movl 60(%esp), %eax # 4-byte Reload + adcl 892(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 896(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 900(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 904(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 908(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 2600(%esp), %eax + movl 48(%eax), %eax + movl %eax, (%esp) + leal 768(%esp), %ecx + movl 2596(%esp), %edx + calll .LmulPv544x32 + movl 836(%esp), %edx + movl 64(%esp), %ecx # 4-byte Reload + addl 768(%esp), %ecx + movl 88(%esp), %eax # 4-byte Reload + adcl 772(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 776(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl 780(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 784(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + adcl 788(%esp), %edi + movl %edi, 116(%esp) # 4-byte Spill + movl 112(%esp), %edi # 4-byte Reload + adcl 792(%esp), %edi + adcl 796(%esp), %esi + movl %esi, 92(%esp) # 4-byte Spill + movl 104(%esp), %eax # 4-byte Reload + adcl 800(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 804(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 808(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + adcl 812(%esp), %ebp + movl %ebp, 80(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 816(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 820(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 824(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 68(%esp), %ebp # 4-byte Reload + adcl 828(%esp), %ebp + movl 72(%esp), %eax # 4-byte Reload + adcl 832(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + adcl $0, %edx + movl %edx, 64(%esp) # 4-byte Spill + movl %ecx, %eax + movl %ecx, %esi + imull 48(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 696(%esp), %ecx + movl 2604(%esp), %edx + calll .LmulPv544x32 + addl 696(%esp), %esi + movl 88(%esp), %eax # 4-byte Reload + adcl 700(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 704(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl 708(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 712(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 116(%esp), %esi # 4-byte Reload + adcl 716(%esp), %esi + adcl 720(%esp), %edi + movl %edi, 112(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 724(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 104(%esp), %eax # 4-byte Reload + adcl 728(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 76(%esp), %edi # 4-byte Reload + adcl 732(%esp), %edi + movl 84(%esp), %eax # 4-byte Reload + adcl 736(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 740(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 744(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 748(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 752(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + adcl 756(%esp), %ebp + movl %ebp, 68(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 760(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 764(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 2600(%esp), %eax + movl 52(%eax), %eax + movl %eax, (%esp) + leal 624(%esp), %ecx + movl 2596(%esp), %edx + calll .LmulPv544x32 + movl 692(%esp), %edx + movl 88(%esp), %ecx # 4-byte Reload + addl 624(%esp), %ecx + movl 96(%esp), %eax # 4-byte Reload + adcl 628(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl 632(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 100(%esp), %ebp # 4-byte Reload + adcl 636(%esp), %ebp + adcl 640(%esp), %esi + movl %esi, 116(%esp) # 4-byte Spill + movl 112(%esp), %eax # 4-byte Reload + adcl 644(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 648(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 104(%esp), %eax # 4-byte Reload + adcl 652(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + adcl 656(%esp), %edi + movl %edi, 76(%esp) # 4-byte Spill + movl 84(%esp), %edi # 4-byte Reload + adcl 660(%esp), %edi + movl 80(%esp), %eax # 4-byte Reload + adcl 664(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 668(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 672(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 676(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 680(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 684(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 688(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + adcl $0, %edx + movl %edx, 88(%esp) # 4-byte Spill + movl %ecx, %eax + movl %ecx, %esi + imull 48(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 552(%esp), %ecx + movl 2604(%esp), %edx + calll .LmulPv544x32 + addl 552(%esp), %esi + movl 96(%esp), %eax # 4-byte Reload + adcl 556(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl 560(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + adcl 564(%esp), %ebp + movl %ebp, 100(%esp) # 4-byte Spill + movl 116(%esp), %eax # 4-byte Reload + adcl 568(%esp), %eax + movl %eax, 116(%esp) # 4-byte Spill + movl 112(%esp), %eax # 4-byte Reload + adcl 572(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 92(%esp), %esi # 4-byte Reload + adcl 576(%esp), %esi + movl 104(%esp), %eax # 4-byte Reload + adcl 580(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 584(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + adcl 588(%esp), %edi + movl %edi, 84(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 592(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 596(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 52(%esp), %ebp # 4-byte Reload + adcl 600(%esp), %ebp + movl 56(%esp), %edi # 4-byte Reload + adcl 604(%esp), %edi + movl 68(%esp), %eax # 4-byte Reload + adcl 608(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 612(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 616(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 620(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 2600(%esp), %eax + movl 56(%eax), %eax + movl %eax, (%esp) + leal 480(%esp), %ecx + movl 2596(%esp), %edx + calll .LmulPv544x32 + movl 548(%esp), %edx + movl 96(%esp), %ecx # 4-byte Reload + addl 480(%esp), %ecx + movl 108(%esp), %eax # 4-byte Reload + adcl 484(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 488(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 116(%esp), %eax # 4-byte Reload + adcl 492(%esp), %eax + movl %eax, 116(%esp) # 4-byte Spill + movl 112(%esp), %eax # 4-byte Reload + adcl 496(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + adcl 500(%esp), %esi + movl %esi, 92(%esp) # 4-byte Spill + movl 104(%esp), %eax # 4-byte Reload + adcl 504(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 508(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 512(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 516(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 520(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + adcl 524(%esp), %ebp + movl %ebp, 52(%esp) # 4-byte Spill + adcl 528(%esp), %edi + movl %edi, 56(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 532(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 536(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 540(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 544(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + adcl $0, %edx + movl %edx, 96(%esp) # 4-byte Spill + movl %ecx, %eax + movl %ecx, %esi + imull 48(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 408(%esp), %ecx + movl 2604(%esp), %edx + calll .LmulPv544x32 + addl 408(%esp), %esi + movl 108(%esp), %eax # 4-byte Reload + adcl 412(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 416(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 116(%esp), %esi # 4-byte Reload + adcl 420(%esp), %esi + movl 112(%esp), %eax # 4-byte Reload + adcl 424(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 428(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 104(%esp), %ebp # 4-byte Reload + adcl 432(%esp), %ebp + movl 76(%esp), %edi # 4-byte Reload + adcl 436(%esp), %edi + movl 84(%esp), %eax # 4-byte Reload + adcl 440(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 444(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 448(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 452(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 456(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 460(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 464(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 468(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 472(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 476(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 2600(%esp), %eax + movl 60(%eax), %eax + movl %eax, (%esp) + leal 336(%esp), %ecx + movl 2596(%esp), %edx + calll .LmulPv544x32 + movl 404(%esp), %edx + movl 108(%esp), %ecx # 4-byte Reload + addl 336(%esp), %ecx + movl 100(%esp), %eax # 4-byte Reload + adcl 340(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + adcl 344(%esp), %esi + movl %esi, 116(%esp) # 4-byte Spill + movl 112(%esp), %eax # 4-byte Reload + adcl 348(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 352(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + adcl 356(%esp), %ebp + movl %ebp, 104(%esp) # 4-byte Spill + adcl 360(%esp), %edi + movl %edi, 76(%esp) # 4-byte Spill + movl 84(%esp), %edi # 4-byte Reload + adcl 364(%esp), %edi + movl 80(%esp), %eax # 4-byte Reload + adcl 368(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 60(%esp), %ebp # 4-byte Reload + adcl 372(%esp), %ebp + movl 52(%esp), %eax # 4-byte Reload + adcl 376(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 380(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 384(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 388(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 392(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 396(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 400(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + adcl $0, %edx + movl %edx, 108(%esp) # 4-byte Spill + movl %ecx, %eax + movl %ecx, %esi + imull 48(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 264(%esp), %ecx + movl 2604(%esp), %edx + calll .LmulPv544x32 + addl 264(%esp), %esi + movl 100(%esp), %eax # 4-byte Reload + adcl 268(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 116(%esp), %eax # 4-byte Reload + adcl 272(%esp), %eax + movl %eax, 116(%esp) # 4-byte Spill + movl 112(%esp), %eax # 4-byte Reload + adcl 276(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 92(%esp), %esi # 4-byte Reload + adcl 280(%esp), %esi + movl 104(%esp), %eax # 4-byte Reload + adcl 284(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 288(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + adcl 292(%esp), %edi + movl %edi, 84(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 296(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + adcl 300(%esp), %ebp + movl %ebp, 60(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 304(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 56(%esp), %edi # 4-byte Reload + adcl 308(%esp), %edi + movl 68(%esp), %ebp # 4-byte Reload + adcl 312(%esp), %ebp + movl 72(%esp), %eax # 4-byte Reload + adcl 316(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 320(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 324(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 328(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl 332(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 2600(%esp), %eax + movl 64(%eax), %eax + movl %eax, (%esp) + leal 192(%esp), %ecx + movl 2596(%esp), %edx + calll .LmulPv544x32 + movl 260(%esp), %edx + movl 100(%esp), %ecx # 4-byte Reload + addl 192(%esp), %ecx + movl 116(%esp), %eax # 4-byte Reload + adcl 196(%esp), %eax + movl %eax, 116(%esp) # 4-byte Spill + movl 112(%esp), %eax # 4-byte Reload + adcl 200(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + adcl 204(%esp), %esi + movl %esi, 92(%esp) # 4-byte Spill + movl 104(%esp), %eax # 4-byte Reload + adcl 208(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 212(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 216(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 220(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 224(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 228(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + adcl 232(%esp), %edi + movl %edi, 56(%esp) # 4-byte Spill + adcl 236(%esp), %ebp + movl %ebp, 68(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 240(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 244(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 248(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 252(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl 256(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + adcl $0, %edx + movl %edx, 100(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + imull %ecx, %eax + movl %ecx, %esi + movl %eax, (%esp) + leal 120(%esp), %ecx + movl 2604(%esp), %edx + calll .LmulPv544x32 + addl 120(%esp), %esi + movl 92(%esp), %esi # 4-byte Reload + movl 116(%esp), %eax # 4-byte Reload + adcl 124(%esp), %eax + movl %eax, 116(%esp) # 4-byte Spill + movl 112(%esp), %ebp # 4-byte Reload + adcl 128(%esp), %ebp + movl %ebp, 112(%esp) # 4-byte Spill + adcl 132(%esp), %esi + movl 104(%esp), %edx # 4-byte Reload + adcl 136(%esp), %edx + movl %edx, 104(%esp) # 4-byte Spill + movl 76(%esp), %edx # 4-byte Reload + adcl 140(%esp), %edx + movl %edx, 76(%esp) # 4-byte Spill + movl 84(%esp), %edx # 4-byte Reload + adcl 144(%esp), %edx + movl %edx, 84(%esp) # 4-byte Spill + movl 80(%esp), %edx # 4-byte Reload + adcl 148(%esp), %edx + movl %edx, 80(%esp) # 4-byte Spill + movl 60(%esp), %edx # 4-byte Reload + adcl 152(%esp), %edx + movl %edx, 60(%esp) # 4-byte Spill + movl 52(%esp), %edx # 4-byte Reload + adcl 156(%esp), %edx + movl %edx, 52(%esp) # 4-byte Spill + movl 56(%esp), %edx # 4-byte Reload + adcl 160(%esp), %edx + movl %edx, 56(%esp) # 4-byte Spill + movl 68(%esp), %edx # 4-byte Reload + adcl 164(%esp), %edx + movl %edx, 68(%esp) # 4-byte Spill + movl 72(%esp), %edx # 4-byte Reload + adcl 168(%esp), %edx + movl %edx, 72(%esp) # 4-byte Spill + movl 64(%esp), %edx # 4-byte Reload + adcl 172(%esp), %edx + movl %edx, 64(%esp) # 4-byte Spill + movl 88(%esp), %edx # 4-byte Reload + adcl 176(%esp), %edx + movl %edx, 88(%esp) # 4-byte Spill + movl 96(%esp), %edx # 4-byte Reload + adcl 180(%esp), %edx + movl %edx, 96(%esp) # 4-byte Spill + movl 108(%esp), %edx # 4-byte Reload + adcl 184(%esp), %edx + movl %edx, 108(%esp) # 4-byte Spill + movl 100(%esp), %edx # 4-byte Reload + adcl 188(%esp), %edx + movl %edx, 100(%esp) # 4-byte Spill + movl %eax, %edx + movl 2604(%esp), %edi + subl (%edi), %edx + sbbl 4(%edi), %ebp + movl %esi, %ebx + sbbl 8(%edi), %ebx + movl 104(%esp), %ecx # 4-byte Reload + sbbl 12(%edi), %ecx + movl 76(%esp), %eax # 4-byte Reload + sbbl 16(%edi), %eax + movl %eax, 4(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + sbbl 20(%edi), %eax + movl %eax, 8(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + sbbl 24(%edi), %eax + movl %eax, 12(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + sbbl 28(%edi), %eax + movl %eax, 16(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + sbbl 32(%edi), %eax + movl %eax, 20(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + sbbl 36(%edi), %eax + movl %eax, 24(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + sbbl 40(%edi), %eax + movl %eax, 28(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + sbbl 44(%edi), %eax + movl %eax, 32(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + sbbl 48(%edi), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + sbbl 52(%edi), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + sbbl 56(%edi), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + sbbl 60(%edi), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + sbbl 64(%edi), %eax + movl %eax, 92(%esp) # 4-byte Spill + sarl $31, %eax + testl %eax, %eax + movl 116(%esp), %edi # 4-byte Reload + js .LBB259_2 +# BB#1: + movl %edx, %edi +.LBB259_2: + movl 2592(%esp), %edx + movl %edi, (%edx) + movl 112(%esp), %edi # 4-byte Reload + js .LBB259_4 +# BB#3: + movl %ebp, %edi +.LBB259_4: + movl %edi, 4(%edx) + js .LBB259_6 +# BB#5: + movl %ebx, %esi +.LBB259_6: + movl %esi, 8(%edx) + movl 104(%esp), %esi # 4-byte Reload + js .LBB259_8 +# BB#7: + movl %ecx, %esi +.LBB259_8: + movl %esi, 12(%edx) + movl 76(%esp), %ecx # 4-byte Reload + js .LBB259_10 +# BB#9: + movl 4(%esp), %ecx # 4-byte Reload +.LBB259_10: + movl %ecx, 16(%edx) + movl 84(%esp), %eax # 4-byte Reload + js .LBB259_12 +# BB#11: + movl 8(%esp), %eax # 4-byte Reload +.LBB259_12: + movl %eax, 20(%edx) + movl 80(%esp), %eax # 4-byte Reload + js .LBB259_14 +# BB#13: + movl 12(%esp), %eax # 4-byte Reload +.LBB259_14: + movl %eax, 24(%edx) + movl 60(%esp), %eax # 4-byte Reload + js .LBB259_16 +# BB#15: + movl 16(%esp), %eax # 4-byte Reload +.LBB259_16: + movl %eax, 28(%edx) + movl 52(%esp), %eax # 4-byte Reload + js .LBB259_18 +# BB#17: + movl 20(%esp), %eax # 4-byte Reload +.LBB259_18: + movl %eax, 32(%edx) + movl 56(%esp), %eax # 4-byte Reload + js .LBB259_20 +# BB#19: + movl 24(%esp), %eax # 4-byte Reload +.LBB259_20: + movl %eax, 36(%edx) + movl 68(%esp), %eax # 4-byte Reload + js .LBB259_22 +# BB#21: + movl 28(%esp), %eax # 4-byte Reload +.LBB259_22: + movl %eax, 40(%edx) + movl 72(%esp), %eax # 4-byte Reload + js .LBB259_24 +# BB#23: + movl 32(%esp), %eax # 4-byte Reload +.LBB259_24: + movl %eax, 44(%edx) + movl 64(%esp), %eax # 4-byte Reload + js .LBB259_26 +# BB#25: + movl 36(%esp), %eax # 4-byte Reload +.LBB259_26: + movl %eax, 48(%edx) + movl 88(%esp), %eax # 4-byte Reload + js .LBB259_28 +# BB#27: + movl 40(%esp), %eax # 4-byte Reload +.LBB259_28: + movl %eax, 52(%edx) + movl 96(%esp), %eax # 4-byte Reload + js .LBB259_30 +# BB#29: + movl 44(%esp), %eax # 4-byte Reload +.LBB259_30: + movl %eax, 56(%edx) + movl 108(%esp), %eax # 4-byte Reload + js .LBB259_32 +# BB#31: + movl 48(%esp), %eax # 4-byte Reload +.LBB259_32: + movl %eax, 60(%edx) + movl 100(%esp), %eax # 4-byte Reload + js .LBB259_34 +# BB#33: + movl 92(%esp), %eax # 4-byte Reload +.LBB259_34: + movl %eax, 64(%edx) + addl $2572, %esp # imm = 0xA0C + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end259: + .size mcl_fp_montNF17Lbmi2, .Lfunc_end259-mcl_fp_montNF17Lbmi2 + + .globl mcl_fp_montRed17Lbmi2 + .align 16, 0x90 + .type mcl_fp_montRed17Lbmi2,@function +mcl_fp_montRed17Lbmi2: # @mcl_fp_montRed17Lbmi2 +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $1436, %esp # imm = 0x59C + calll .L260$pb +.L260$pb: + popl %eax +.Ltmp61: + addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp61-.L260$pb), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 1464(%esp), %edx + movl -4(%edx), %esi + movl %esi, 96(%esp) # 4-byte Spill + movl 1460(%esp), %ecx + movl (%ecx), %ebx + movl %ebx, 76(%esp) # 4-byte Spill + movl 4(%ecx), %edi + movl %edi, 80(%esp) # 4-byte Spill + imull %esi, %ebx + movl 132(%ecx), %eax + movl %eax, 116(%esp) # 4-byte Spill + movl 128(%ecx), %eax + movl %eax, 124(%esp) # 4-byte Spill + movl 124(%ecx), %eax + movl %eax, 132(%esp) # 4-byte Spill + movl 120(%ecx), %eax + movl %eax, 128(%esp) # 4-byte Spill + movl 116(%ecx), %eax + movl %eax, 144(%esp) # 4-byte Spill + movl 112(%ecx), %eax + movl %eax, 152(%esp) # 4-byte Spill + movl 108(%ecx), %eax + movl %eax, 160(%esp) # 4-byte Spill + movl 104(%ecx), %esi + movl %esi, 156(%esp) # 4-byte Spill + movl 100(%ecx), %eax + movl %eax, 172(%esp) # 4-byte Spill + movl 96(%ecx), %eax + movl %eax, 168(%esp) # 4-byte Spill + movl 92(%ecx), %eax + movl %eax, 188(%esp) # 4-byte Spill + movl 88(%ecx), %eax + movl %eax, 184(%esp) # 4-byte Spill + movl 84(%ecx), %esi + movl %esi, 180(%esp) # 4-byte Spill + movl 80(%ecx), %edi + movl %edi, 196(%esp) # 4-byte Spill + movl 76(%ecx), %esi + movl %esi, 192(%esp) # 4-byte Spill + movl 72(%ecx), %esi + movl %esi, 204(%esp) # 4-byte Spill + movl 68(%ecx), %eax + movl %eax, 200(%esp) # 4-byte Spill + movl 64(%ecx), %ebp + movl %ebp, 176(%esp) # 4-byte Spill + movl 60(%ecx), %ebp + movl %ebp, 164(%esp) # 4-byte Spill + movl 56(%ecx), %eax + movl %eax, 148(%esp) # 4-byte Spill + movl 52(%ecx), %eax + movl %eax, 136(%esp) # 4-byte Spill + movl 48(%ecx), %eax + movl %eax, 140(%esp) # 4-byte Spill + movl 44(%ecx), %eax + movl %eax, 120(%esp) # 4-byte Spill + movl 40(%ecx), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 36(%ecx), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 32(%ecx), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 28(%ecx), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 24(%ecx), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 20(%ecx), %ebp + movl 16(%ecx), %esi + movl 12(%ecx), %edi + movl 8(%ecx), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl (%edx), %ecx + movl %ecx, 16(%esp) # 4-byte Spill + movl 64(%edx), %ecx + movl %ecx, 68(%esp) # 4-byte Spill + movl 60(%edx), %ecx + movl %ecx, 64(%esp) # 4-byte Spill + movl 56(%edx), %ecx + movl %ecx, 60(%esp) # 4-byte Spill + movl 52(%edx), %ecx + movl %ecx, 56(%esp) # 4-byte Spill + movl 48(%edx), %ecx + movl %ecx, 52(%esp) # 4-byte Spill + movl 44(%edx), %ecx + movl %ecx, 48(%esp) # 4-byte Spill + movl 40(%edx), %ecx + movl %ecx, 44(%esp) # 4-byte Spill + movl 36(%edx), %ecx + movl %ecx, 40(%esp) # 4-byte Spill + movl 32(%edx), %ecx + movl %ecx, 36(%esp) # 4-byte Spill + movl 28(%edx), %ecx + movl %ecx, 32(%esp) # 4-byte Spill + movl 24(%edx), %ecx + movl %ecx, 28(%esp) # 4-byte Spill + movl 20(%edx), %ecx + movl %ecx, 24(%esp) # 4-byte Spill + movl 16(%edx), %ecx + movl %ecx, 20(%esp) # 4-byte Spill + movl 12(%edx), %ecx + movl %ecx, 12(%esp) # 4-byte Spill + movl 8(%edx), %ecx + movl %ecx, 8(%esp) # 4-byte Spill + movl 4(%edx), %ecx + movl %ecx, 4(%esp) # 4-byte Spill + movl %ebx, (%esp) + leal 1360(%esp), %ecx + movl 104(%esp), %ebx # 4-byte Reload + calll .LmulPv544x32 + movl 76(%esp), %eax # 4-byte Reload + addl 1360(%esp), %eax + movl 80(%esp), %ecx # 4-byte Reload + adcl 1364(%esp), %ecx + movl 100(%esp), %eax # 4-byte Reload + adcl 1368(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + adcl 1372(%esp), %edi + movl %edi, 72(%esp) # 4-byte Spill + adcl 1376(%esp), %esi + movl %esi, 76(%esp) # 4-byte Spill + adcl 1380(%esp), %ebp + movl %ebp, 80(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 1384(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 1388(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 1392(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl 1396(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 112(%esp), %eax # 4-byte Reload + adcl 1400(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 120(%esp), %eax # 4-byte Reload + adcl 1404(%esp), %eax + movl %eax, 120(%esp) # 4-byte Spill + movl 140(%esp), %eax # 4-byte Reload + adcl 1408(%esp), %eax + movl %eax, 140(%esp) # 4-byte Spill + movl 136(%esp), %eax # 4-byte Reload + adcl 1412(%esp), %eax + movl %eax, 136(%esp) # 4-byte Spill + movl 148(%esp), %eax # 4-byte Reload + adcl 1416(%esp), %eax + movl %eax, 148(%esp) # 4-byte Spill + movl 164(%esp), %eax # 4-byte Reload + adcl 1420(%esp), %eax + movl %eax, 164(%esp) # 4-byte Spill + movl 176(%esp), %eax # 4-byte Reload + adcl 1424(%esp), %eax + movl %eax, 176(%esp) # 4-byte Spill + movl 200(%esp), %eax # 4-byte Reload + adcl 1428(%esp), %eax + movl %eax, 200(%esp) # 4-byte Spill + adcl $0, 204(%esp) # 4-byte Folded Spill + adcl $0, 192(%esp) # 4-byte Folded Spill + adcl $0, 196(%esp) # 4-byte Folded Spill + adcl $0, 180(%esp) # 4-byte Folded Spill + adcl $0, 184(%esp) # 4-byte Folded Spill + adcl $0, 188(%esp) # 4-byte Folded Spill + adcl $0, 168(%esp) # 4-byte Folded Spill + adcl $0, 172(%esp) # 4-byte Folded Spill + adcl $0, 156(%esp) # 4-byte Folded Spill + adcl $0, 160(%esp) # 4-byte Folded Spill + adcl $0, 152(%esp) # 4-byte Folded Spill + adcl $0, 144(%esp) # 4-byte Folded Spill + movl 128(%esp), %ebp # 4-byte Reload + adcl $0, %ebp + adcl $0, 132(%esp) # 4-byte Folded Spill + adcl $0, 124(%esp) # 4-byte Folded Spill + adcl $0, 116(%esp) # 4-byte Folded Spill + sbbl %edi, %edi + movl %ecx, %eax + movl %ecx, %esi + imull 96(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 1288(%esp), %ecx + movl 1464(%esp), %edx + movl 104(%esp), %ebx # 4-byte Reload + calll .LmulPv544x32 + andl $1, %edi + movl %edi, %ecx + addl 1288(%esp), %esi + movl 100(%esp), %edx # 4-byte Reload + adcl 1292(%esp), %edx + movl 72(%esp), %eax # 4-byte Reload + adcl 1296(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 1300(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 1304(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 1308(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 1312(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 1316(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl 1320(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 112(%esp), %eax # 4-byte Reload + adcl 1324(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 120(%esp), %eax # 4-byte Reload + adcl 1328(%esp), %eax + movl %eax, 120(%esp) # 4-byte Spill + movl 140(%esp), %eax # 4-byte Reload + adcl 1332(%esp), %eax + movl %eax, 140(%esp) # 4-byte Spill + movl 136(%esp), %edi # 4-byte Reload + adcl 1336(%esp), %edi + movl 148(%esp), %eax # 4-byte Reload + adcl 1340(%esp), %eax + movl %eax, 148(%esp) # 4-byte Spill + movl 164(%esp), %eax # 4-byte Reload + adcl 1344(%esp), %eax + movl %eax, 164(%esp) # 4-byte Spill + movl 176(%esp), %eax # 4-byte Reload + adcl 1348(%esp), %eax + movl %eax, 176(%esp) # 4-byte Spill + movl 200(%esp), %eax # 4-byte Reload + adcl 1352(%esp), %eax + movl %eax, 200(%esp) # 4-byte Spill + movl 204(%esp), %eax # 4-byte Reload + adcl 1356(%esp), %eax + movl %eax, 204(%esp) # 4-byte Spill + adcl $0, 192(%esp) # 4-byte Folded Spill + adcl $0, 196(%esp) # 4-byte Folded Spill + adcl $0, 180(%esp) # 4-byte Folded Spill + movl 184(%esp), %eax # 4-byte Reload + adcl $0, %eax + movl %eax, %esi + adcl $0, 188(%esp) # 4-byte Folded Spill + adcl $0, 168(%esp) # 4-byte Folded Spill + adcl $0, 172(%esp) # 4-byte Folded Spill + adcl $0, 156(%esp) # 4-byte Folded Spill + adcl $0, 160(%esp) # 4-byte Folded Spill + adcl $0, 152(%esp) # 4-byte Folded Spill + adcl $0, 144(%esp) # 4-byte Folded Spill + adcl $0, %ebp + movl %ebp, 128(%esp) # 4-byte Spill + adcl $0, 132(%esp) # 4-byte Folded Spill + adcl $0, 124(%esp) # 4-byte Folded Spill + adcl $0, 116(%esp) # 4-byte Folded Spill + adcl $0, %ecx + movl %ecx, 100(%esp) # 4-byte Spill + movl %edx, %ebp + movl %ebp, %eax + imull 96(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 1216(%esp), %ecx + movl 1464(%esp), %edx + movl 104(%esp), %ebx # 4-byte Reload + calll .LmulPv544x32 + addl 1216(%esp), %ebp + movl 72(%esp), %ecx # 4-byte Reload + adcl 1220(%esp), %ecx + movl 76(%esp), %eax # 4-byte Reload + adcl 1224(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 1228(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 1232(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 1236(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 1240(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl 1244(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 112(%esp), %eax # 4-byte Reload + adcl 1248(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 120(%esp), %eax # 4-byte Reload + adcl 1252(%esp), %eax + movl %eax, 120(%esp) # 4-byte Spill + movl 140(%esp), %eax # 4-byte Reload + adcl 1256(%esp), %eax + movl %eax, 140(%esp) # 4-byte Spill + adcl 1260(%esp), %edi + movl %edi, 136(%esp) # 4-byte Spill + movl 148(%esp), %edi # 4-byte Reload + adcl 1264(%esp), %edi + movl 164(%esp), %eax # 4-byte Reload + adcl 1268(%esp), %eax + movl %eax, 164(%esp) # 4-byte Spill + movl 176(%esp), %eax # 4-byte Reload + adcl 1272(%esp), %eax + movl %eax, 176(%esp) # 4-byte Spill + movl 200(%esp), %eax # 4-byte Reload + adcl 1276(%esp), %eax + movl %eax, 200(%esp) # 4-byte Spill + movl 204(%esp), %eax # 4-byte Reload + adcl 1280(%esp), %eax + movl %eax, 204(%esp) # 4-byte Spill + movl 192(%esp), %eax # 4-byte Reload + adcl 1284(%esp), %eax + movl %eax, 192(%esp) # 4-byte Spill + adcl $0, 196(%esp) # 4-byte Folded Spill + adcl $0, 180(%esp) # 4-byte Folded Spill + adcl $0, %esi + movl %esi, 184(%esp) # 4-byte Spill + adcl $0, 188(%esp) # 4-byte Folded Spill + adcl $0, 168(%esp) # 4-byte Folded Spill + adcl $0, 172(%esp) # 4-byte Folded Spill + adcl $0, 156(%esp) # 4-byte Folded Spill + adcl $0, 160(%esp) # 4-byte Folded Spill + adcl $0, 152(%esp) # 4-byte Folded Spill + movl 144(%esp), %ebp # 4-byte Reload + adcl $0, %ebp + adcl $0, 128(%esp) # 4-byte Folded Spill + adcl $0, 132(%esp) # 4-byte Folded Spill + adcl $0, 124(%esp) # 4-byte Folded Spill + adcl $0, 116(%esp) # 4-byte Folded Spill + adcl $0, 100(%esp) # 4-byte Folded Spill + movl %ecx, %eax + movl %ecx, %esi + imull 96(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 1144(%esp), %ecx + movl 1464(%esp), %edx + movl 104(%esp), %ebx # 4-byte Reload + calll .LmulPv544x32 + addl 1144(%esp), %esi + movl 76(%esp), %ecx # 4-byte Reload + adcl 1148(%esp), %ecx + movl 80(%esp), %eax # 4-byte Reload + adcl 1152(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 1156(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 1160(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 1164(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl 1168(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 112(%esp), %eax # 4-byte Reload + adcl 1172(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 120(%esp), %eax # 4-byte Reload + adcl 1176(%esp), %eax + movl %eax, 120(%esp) # 4-byte Spill + movl 140(%esp), %eax # 4-byte Reload + adcl 1180(%esp), %eax + movl %eax, 140(%esp) # 4-byte Spill + movl 136(%esp), %eax # 4-byte Reload + adcl 1184(%esp), %eax + movl %eax, 136(%esp) # 4-byte Spill + adcl 1188(%esp), %edi + movl %edi, 148(%esp) # 4-byte Spill + movl 164(%esp), %eax # 4-byte Reload + adcl 1192(%esp), %eax + movl %eax, 164(%esp) # 4-byte Spill + movl 176(%esp), %eax # 4-byte Reload + adcl 1196(%esp), %eax + movl %eax, 176(%esp) # 4-byte Spill + movl 200(%esp), %eax # 4-byte Reload + adcl 1200(%esp), %eax + movl %eax, 200(%esp) # 4-byte Spill + movl 204(%esp), %eax # 4-byte Reload + adcl 1204(%esp), %eax + movl %eax, 204(%esp) # 4-byte Spill + movl 192(%esp), %eax # 4-byte Reload + adcl 1208(%esp), %eax + movl %eax, 192(%esp) # 4-byte Spill + movl 196(%esp), %eax # 4-byte Reload + adcl 1212(%esp), %eax + movl %eax, 196(%esp) # 4-byte Spill + adcl $0, 180(%esp) # 4-byte Folded Spill + adcl $0, 184(%esp) # 4-byte Folded Spill + movl 188(%esp), %edi # 4-byte Reload + adcl $0, %edi + adcl $0, 168(%esp) # 4-byte Folded Spill + adcl $0, 172(%esp) # 4-byte Folded Spill + adcl $0, 156(%esp) # 4-byte Folded Spill + adcl $0, 160(%esp) # 4-byte Folded Spill + adcl $0, 152(%esp) # 4-byte Folded Spill + adcl $0, %ebp + movl %ebp, 144(%esp) # 4-byte Spill + adcl $0, 128(%esp) # 4-byte Folded Spill + adcl $0, 132(%esp) # 4-byte Folded Spill + adcl $0, 124(%esp) # 4-byte Folded Spill + adcl $0, 116(%esp) # 4-byte Folded Spill + adcl $0, 100(%esp) # 4-byte Folded Spill + movl %ecx, %esi + movl %esi, %eax + imull 96(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 1072(%esp), %ecx + movl 1464(%esp), %edx + movl 104(%esp), %ebx # 4-byte Reload + calll .LmulPv544x32 + addl 1072(%esp), %esi + movl 80(%esp), %esi # 4-byte Reload + adcl 1076(%esp), %esi + movl 84(%esp), %eax # 4-byte Reload + adcl 1080(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 1084(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 1088(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl 1092(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 112(%esp), %eax # 4-byte Reload + adcl 1096(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 120(%esp), %eax # 4-byte Reload + adcl 1100(%esp), %eax + movl %eax, 120(%esp) # 4-byte Spill + movl 140(%esp), %eax # 4-byte Reload + adcl 1104(%esp), %eax + movl %eax, 140(%esp) # 4-byte Spill + movl 136(%esp), %eax # 4-byte Reload + adcl 1108(%esp), %eax + movl %eax, 136(%esp) # 4-byte Spill + movl 148(%esp), %eax # 4-byte Reload + adcl 1112(%esp), %eax + movl %eax, 148(%esp) # 4-byte Spill + movl 164(%esp), %eax # 4-byte Reload + adcl 1116(%esp), %eax + movl %eax, 164(%esp) # 4-byte Spill + movl 176(%esp), %eax # 4-byte Reload + adcl 1120(%esp), %eax + movl %eax, 176(%esp) # 4-byte Spill + movl 200(%esp), %eax # 4-byte Reload + adcl 1124(%esp), %eax + movl %eax, 200(%esp) # 4-byte Spill + movl 204(%esp), %eax # 4-byte Reload + adcl 1128(%esp), %eax + movl %eax, 204(%esp) # 4-byte Spill + movl 192(%esp), %eax # 4-byte Reload + adcl 1132(%esp), %eax + movl %eax, 192(%esp) # 4-byte Spill + movl 196(%esp), %eax # 4-byte Reload + adcl 1136(%esp), %eax + movl %eax, 196(%esp) # 4-byte Spill + movl 180(%esp), %eax # 4-byte Reload + adcl 1140(%esp), %eax + movl %eax, 180(%esp) # 4-byte Spill + adcl $0, 184(%esp) # 4-byte Folded Spill + adcl $0, %edi + movl %edi, 188(%esp) # 4-byte Spill + adcl $0, 168(%esp) # 4-byte Folded Spill + movl 172(%esp), %ebp # 4-byte Reload + adcl $0, %ebp + adcl $0, 156(%esp) # 4-byte Folded Spill + adcl $0, 160(%esp) # 4-byte Folded Spill + movl 152(%esp), %edi # 4-byte Reload + adcl $0, %edi + adcl $0, 144(%esp) # 4-byte Folded Spill + adcl $0, 128(%esp) # 4-byte Folded Spill + adcl $0, 132(%esp) # 4-byte Folded Spill + adcl $0, 124(%esp) # 4-byte Folded Spill + adcl $0, 116(%esp) # 4-byte Folded Spill + adcl $0, 100(%esp) # 4-byte Folded Spill + movl %esi, %eax + imull 96(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 1000(%esp), %ecx + movl 1464(%esp), %edx + movl 104(%esp), %ebx # 4-byte Reload + calll .LmulPv544x32 + addl 1000(%esp), %esi + movl 84(%esp), %ecx # 4-byte Reload + adcl 1004(%esp), %ecx + movl 88(%esp), %eax # 4-byte Reload + adcl 1008(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 1012(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl 1016(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 112(%esp), %eax # 4-byte Reload + adcl 1020(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 120(%esp), %eax # 4-byte Reload + adcl 1024(%esp), %eax + movl %eax, 120(%esp) # 4-byte Spill + movl 140(%esp), %eax # 4-byte Reload + adcl 1028(%esp), %eax + movl %eax, 140(%esp) # 4-byte Spill + movl 136(%esp), %eax # 4-byte Reload + adcl 1032(%esp), %eax + movl %eax, 136(%esp) # 4-byte Spill + movl 148(%esp), %eax # 4-byte Reload + adcl 1036(%esp), %eax + movl %eax, 148(%esp) # 4-byte Spill + movl 164(%esp), %eax # 4-byte Reload + adcl 1040(%esp), %eax + movl %eax, 164(%esp) # 4-byte Spill + movl 176(%esp), %eax # 4-byte Reload + adcl 1044(%esp), %eax + movl %eax, 176(%esp) # 4-byte Spill + movl 200(%esp), %eax # 4-byte Reload + adcl 1048(%esp), %eax + movl %eax, 200(%esp) # 4-byte Spill + movl 204(%esp), %eax # 4-byte Reload + adcl 1052(%esp), %eax + movl %eax, 204(%esp) # 4-byte Spill + movl 192(%esp), %eax # 4-byte Reload + adcl 1056(%esp), %eax + movl %eax, 192(%esp) # 4-byte Spill + movl 196(%esp), %eax # 4-byte Reload + adcl 1060(%esp), %eax + movl %eax, 196(%esp) # 4-byte Spill + movl 180(%esp), %eax # 4-byte Reload + adcl 1064(%esp), %eax + movl %eax, 180(%esp) # 4-byte Spill + movl 184(%esp), %eax # 4-byte Reload + adcl 1068(%esp), %eax + movl %eax, 184(%esp) # 4-byte Spill + adcl $0, 188(%esp) # 4-byte Folded Spill + adcl $0, 168(%esp) # 4-byte Folded Spill + adcl $0, %ebp + movl %ebp, 172(%esp) # 4-byte Spill + adcl $0, 156(%esp) # 4-byte Folded Spill + adcl $0, 160(%esp) # 4-byte Folded Spill + adcl $0, %edi + movl %edi, 152(%esp) # 4-byte Spill + adcl $0, 144(%esp) # 4-byte Folded Spill + adcl $0, 128(%esp) # 4-byte Folded Spill + adcl $0, 132(%esp) # 4-byte Folded Spill + adcl $0, 124(%esp) # 4-byte Folded Spill + adcl $0, 116(%esp) # 4-byte Folded Spill + adcl $0, 100(%esp) # 4-byte Folded Spill + movl %ecx, %esi + movl %esi, %eax + imull 96(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 928(%esp), %ecx + movl 1464(%esp), %edx + movl 104(%esp), %ebx # 4-byte Reload + calll .LmulPv544x32 + addl 928(%esp), %esi + movl 88(%esp), %esi # 4-byte Reload + adcl 932(%esp), %esi + movl 92(%esp), %eax # 4-byte Reload + adcl 936(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl 940(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 112(%esp), %eax # 4-byte Reload + adcl 944(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 120(%esp), %eax # 4-byte Reload + adcl 948(%esp), %eax + movl %eax, 120(%esp) # 4-byte Spill + movl 140(%esp), %eax # 4-byte Reload + adcl 952(%esp), %eax + movl %eax, 140(%esp) # 4-byte Spill + movl 136(%esp), %eax # 4-byte Reload + adcl 956(%esp), %eax + movl %eax, 136(%esp) # 4-byte Spill + movl 148(%esp), %eax # 4-byte Reload + adcl 960(%esp), %eax + movl %eax, 148(%esp) # 4-byte Spill + movl 164(%esp), %eax # 4-byte Reload + adcl 964(%esp), %eax + movl %eax, 164(%esp) # 4-byte Spill + movl 176(%esp), %eax # 4-byte Reload + adcl 968(%esp), %eax + movl %eax, 176(%esp) # 4-byte Spill + movl 200(%esp), %eax # 4-byte Reload + adcl 972(%esp), %eax + movl %eax, 200(%esp) # 4-byte Spill + movl 204(%esp), %eax # 4-byte Reload + adcl 976(%esp), %eax + movl %eax, 204(%esp) # 4-byte Spill + movl 192(%esp), %eax # 4-byte Reload + adcl 980(%esp), %eax + movl %eax, 192(%esp) # 4-byte Spill + movl 196(%esp), %eax # 4-byte Reload + adcl 984(%esp), %eax + movl %eax, 196(%esp) # 4-byte Spill + movl 180(%esp), %eax # 4-byte Reload + adcl 988(%esp), %eax + movl %eax, 180(%esp) # 4-byte Spill + movl 184(%esp), %eax # 4-byte Reload + adcl 992(%esp), %eax + movl %eax, 184(%esp) # 4-byte Spill + movl 188(%esp), %eax # 4-byte Reload + adcl 996(%esp), %eax + movl %eax, 188(%esp) # 4-byte Spill + movl 168(%esp), %ebp # 4-byte Reload + adcl $0, %ebp + adcl $0, 172(%esp) # 4-byte Folded Spill + adcl $0, 156(%esp) # 4-byte Folded Spill + movl 160(%esp), %edi # 4-byte Reload + adcl $0, %edi + adcl $0, 152(%esp) # 4-byte Folded Spill + adcl $0, 144(%esp) # 4-byte Folded Spill + adcl $0, 128(%esp) # 4-byte Folded Spill + adcl $0, 132(%esp) # 4-byte Folded Spill + adcl $0, 124(%esp) # 4-byte Folded Spill + adcl $0, 116(%esp) # 4-byte Folded Spill + adcl $0, 100(%esp) # 4-byte Folded Spill + movl %esi, %eax + imull 96(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 856(%esp), %ecx + movl 1464(%esp), %edx + movl 104(%esp), %ebx # 4-byte Reload + calll .LmulPv544x32 + addl 856(%esp), %esi + movl 92(%esp), %ecx # 4-byte Reload + adcl 860(%esp), %ecx + movl 108(%esp), %eax # 4-byte Reload + adcl 864(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 112(%esp), %eax # 4-byte Reload + adcl 868(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 120(%esp), %eax # 4-byte Reload + adcl 872(%esp), %eax + movl %eax, 120(%esp) # 4-byte Spill + movl 140(%esp), %eax # 4-byte Reload + adcl 876(%esp), %eax + movl %eax, 140(%esp) # 4-byte Spill + movl 136(%esp), %eax # 4-byte Reload + adcl 880(%esp), %eax + movl %eax, 136(%esp) # 4-byte Spill + movl 148(%esp), %eax # 4-byte Reload + adcl 884(%esp), %eax + movl %eax, 148(%esp) # 4-byte Spill + movl 164(%esp), %eax # 4-byte Reload + adcl 888(%esp), %eax + movl %eax, 164(%esp) # 4-byte Spill + movl 176(%esp), %eax # 4-byte Reload + adcl 892(%esp), %eax + movl %eax, 176(%esp) # 4-byte Spill + movl 200(%esp), %eax # 4-byte Reload + adcl 896(%esp), %eax + movl %eax, 200(%esp) # 4-byte Spill + movl 204(%esp), %eax # 4-byte Reload + adcl 900(%esp), %eax + movl %eax, 204(%esp) # 4-byte Spill + movl 192(%esp), %eax # 4-byte Reload + adcl 904(%esp), %eax + movl %eax, 192(%esp) # 4-byte Spill + movl 196(%esp), %eax # 4-byte Reload + adcl 908(%esp), %eax + movl %eax, 196(%esp) # 4-byte Spill + movl 180(%esp), %eax # 4-byte Reload + adcl 912(%esp), %eax + movl %eax, 180(%esp) # 4-byte Spill + movl 184(%esp), %eax # 4-byte Reload + adcl 916(%esp), %eax + movl %eax, 184(%esp) # 4-byte Spill + movl 188(%esp), %eax # 4-byte Reload + adcl 920(%esp), %eax + movl %eax, 188(%esp) # 4-byte Spill + adcl 924(%esp), %ebp + movl %ebp, 168(%esp) # 4-byte Spill + adcl $0, 172(%esp) # 4-byte Folded Spill + adcl $0, 156(%esp) # 4-byte Folded Spill + adcl $0, %edi + movl %edi, 160(%esp) # 4-byte Spill + adcl $0, 152(%esp) # 4-byte Folded Spill + adcl $0, 144(%esp) # 4-byte Folded Spill + adcl $0, 128(%esp) # 4-byte Folded Spill + adcl $0, 132(%esp) # 4-byte Folded Spill + movl 124(%esp), %esi # 4-byte Reload + adcl $0, %esi + adcl $0, 116(%esp) # 4-byte Folded Spill + adcl $0, 100(%esp) # 4-byte Folded Spill + movl %ecx, %edi + movl %edi, %eax + movl 96(%esp), %ebp # 4-byte Reload + imull %ebp, %eax + movl %eax, (%esp) + leal 784(%esp), %ecx + movl 1464(%esp), %edx + movl 104(%esp), %ebx # 4-byte Reload + calll .LmulPv544x32 + addl 784(%esp), %edi + movl 108(%esp), %ecx # 4-byte Reload + adcl 788(%esp), %ecx + movl 112(%esp), %eax # 4-byte Reload + adcl 792(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 120(%esp), %eax # 4-byte Reload + adcl 796(%esp), %eax + movl %eax, 120(%esp) # 4-byte Spill + movl 140(%esp), %eax # 4-byte Reload + adcl 800(%esp), %eax + movl %eax, 140(%esp) # 4-byte Spill + movl 136(%esp), %eax # 4-byte Reload + adcl 804(%esp), %eax + movl %eax, 136(%esp) # 4-byte Spill + movl 148(%esp), %eax # 4-byte Reload + adcl 808(%esp), %eax + movl %eax, 148(%esp) # 4-byte Spill + movl 164(%esp), %eax # 4-byte Reload + adcl 812(%esp), %eax + movl %eax, 164(%esp) # 4-byte Spill + movl 176(%esp), %eax # 4-byte Reload + adcl 816(%esp), %eax + movl %eax, 176(%esp) # 4-byte Spill + movl 200(%esp), %eax # 4-byte Reload + adcl 820(%esp), %eax + movl %eax, 200(%esp) # 4-byte Spill + movl 204(%esp), %eax # 4-byte Reload + adcl 824(%esp), %eax + movl %eax, 204(%esp) # 4-byte Spill + movl 192(%esp), %eax # 4-byte Reload + adcl 828(%esp), %eax + movl %eax, 192(%esp) # 4-byte Spill + movl 196(%esp), %eax # 4-byte Reload + adcl 832(%esp), %eax + movl %eax, 196(%esp) # 4-byte Spill + movl 180(%esp), %eax # 4-byte Reload + adcl 836(%esp), %eax + movl %eax, 180(%esp) # 4-byte Spill + movl 184(%esp), %eax # 4-byte Reload + adcl 840(%esp), %eax + movl %eax, 184(%esp) # 4-byte Spill + movl 188(%esp), %eax # 4-byte Reload + adcl 844(%esp), %eax + movl %eax, 188(%esp) # 4-byte Spill + movl 168(%esp), %eax # 4-byte Reload + adcl 848(%esp), %eax + movl %eax, 168(%esp) # 4-byte Spill + movl 172(%esp), %eax # 4-byte Reload + adcl 852(%esp), %eax + movl %eax, 172(%esp) # 4-byte Spill + movl 156(%esp), %edi # 4-byte Reload + adcl $0, %edi + adcl $0, 160(%esp) # 4-byte Folded Spill + adcl $0, 152(%esp) # 4-byte Folded Spill + adcl $0, 144(%esp) # 4-byte Folded Spill + adcl $0, 128(%esp) # 4-byte Folded Spill + adcl $0, 132(%esp) # 4-byte Folded Spill + adcl $0, %esi + movl %esi, 124(%esp) # 4-byte Spill + adcl $0, 116(%esp) # 4-byte Folded Spill + adcl $0, 100(%esp) # 4-byte Folded Spill + movl %ecx, %eax + movl %ecx, %esi + imull %ebp, %eax + movl %eax, (%esp) + leal 712(%esp), %ecx + movl 1464(%esp), %edx + movl 104(%esp), %ebx # 4-byte Reload + calll .LmulPv544x32 + addl 712(%esp), %esi + movl 112(%esp), %ecx # 4-byte Reload + adcl 716(%esp), %ecx + movl 120(%esp), %eax # 4-byte Reload + adcl 720(%esp), %eax + movl %eax, 120(%esp) # 4-byte Spill + movl 140(%esp), %eax # 4-byte Reload + adcl 724(%esp), %eax + movl %eax, 140(%esp) # 4-byte Spill + movl 136(%esp), %eax # 4-byte Reload + adcl 728(%esp), %eax + movl %eax, 136(%esp) # 4-byte Spill + movl 148(%esp), %eax # 4-byte Reload + adcl 732(%esp), %eax + movl %eax, 148(%esp) # 4-byte Spill + movl 164(%esp), %eax # 4-byte Reload + adcl 736(%esp), %eax + movl %eax, 164(%esp) # 4-byte Spill + movl 176(%esp), %eax # 4-byte Reload + adcl 740(%esp), %eax + movl %eax, 176(%esp) # 4-byte Spill + movl 200(%esp), %eax # 4-byte Reload + adcl 744(%esp), %eax + movl %eax, 200(%esp) # 4-byte Spill + movl 204(%esp), %eax # 4-byte Reload + adcl 748(%esp), %eax + movl %eax, 204(%esp) # 4-byte Spill + movl 192(%esp), %eax # 4-byte Reload + adcl 752(%esp), %eax + movl %eax, 192(%esp) # 4-byte Spill + movl 196(%esp), %eax # 4-byte Reload + adcl 756(%esp), %eax + movl %eax, 196(%esp) # 4-byte Spill + movl 180(%esp), %ebp # 4-byte Reload + adcl 760(%esp), %ebp + movl 184(%esp), %eax # 4-byte Reload + adcl 764(%esp), %eax + movl %eax, 184(%esp) # 4-byte Spill + movl 188(%esp), %eax # 4-byte Reload + adcl 768(%esp), %eax + movl %eax, 188(%esp) # 4-byte Spill + movl 168(%esp), %eax # 4-byte Reload + adcl 772(%esp), %eax + movl %eax, 168(%esp) # 4-byte Spill + movl 172(%esp), %eax # 4-byte Reload + adcl 776(%esp), %eax + movl %eax, 172(%esp) # 4-byte Spill + adcl 780(%esp), %edi + movl %edi, 156(%esp) # 4-byte Spill + adcl $0, 160(%esp) # 4-byte Folded Spill + adcl $0, 152(%esp) # 4-byte Folded Spill + adcl $0, 144(%esp) # 4-byte Folded Spill + adcl $0, 128(%esp) # 4-byte Folded Spill + adcl $0, 132(%esp) # 4-byte Folded Spill + adcl $0, 124(%esp) # 4-byte Folded Spill + adcl $0, 116(%esp) # 4-byte Folded Spill + adcl $0, 100(%esp) # 4-byte Folded Spill + movl %ecx, %esi + movl %esi, %eax + imull 96(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 640(%esp), %ecx + movl 1464(%esp), %edx + movl 104(%esp), %ebx # 4-byte Reload + calll .LmulPv544x32 + addl 640(%esp), %esi + movl 120(%esp), %ecx # 4-byte Reload + adcl 644(%esp), %ecx + movl 140(%esp), %eax # 4-byte Reload + adcl 648(%esp), %eax + movl %eax, 140(%esp) # 4-byte Spill + movl 136(%esp), %eax # 4-byte Reload + adcl 652(%esp), %eax + movl %eax, 136(%esp) # 4-byte Spill + movl 148(%esp), %eax # 4-byte Reload + adcl 656(%esp), %eax + movl %eax, 148(%esp) # 4-byte Spill + movl 164(%esp), %eax # 4-byte Reload + adcl 660(%esp), %eax + movl %eax, 164(%esp) # 4-byte Spill + movl 176(%esp), %eax # 4-byte Reload + adcl 664(%esp), %eax + movl %eax, 176(%esp) # 4-byte Spill + movl 200(%esp), %eax # 4-byte Reload + adcl 668(%esp), %eax + movl %eax, 200(%esp) # 4-byte Spill + movl 204(%esp), %edi # 4-byte Reload + adcl 672(%esp), %edi + movl 192(%esp), %esi # 4-byte Reload + adcl 676(%esp), %esi + movl 196(%esp), %eax # 4-byte Reload + adcl 680(%esp), %eax + movl %eax, 196(%esp) # 4-byte Spill + adcl 684(%esp), %ebp + movl %ebp, 180(%esp) # 4-byte Spill + movl 184(%esp), %eax # 4-byte Reload + adcl 688(%esp), %eax + movl %eax, 184(%esp) # 4-byte Spill + movl 188(%esp), %eax # 4-byte Reload + adcl 692(%esp), %eax + movl %eax, 188(%esp) # 4-byte Spill + movl 168(%esp), %eax # 4-byte Reload + adcl 696(%esp), %eax + movl %eax, 168(%esp) # 4-byte Spill + movl 172(%esp), %eax # 4-byte Reload + adcl 700(%esp), %eax + movl %eax, 172(%esp) # 4-byte Spill + movl 156(%esp), %eax # 4-byte Reload + adcl 704(%esp), %eax + movl %eax, 156(%esp) # 4-byte Spill + movl 160(%esp), %eax # 4-byte Reload + adcl 708(%esp), %eax + movl %eax, 160(%esp) # 4-byte Spill + adcl $0, 152(%esp) # 4-byte Folded Spill + adcl $0, 144(%esp) # 4-byte Folded Spill + adcl $0, 128(%esp) # 4-byte Folded Spill + adcl $0, 132(%esp) # 4-byte Folded Spill + adcl $0, 124(%esp) # 4-byte Folded Spill + adcl $0, 116(%esp) # 4-byte Folded Spill + adcl $0, 100(%esp) # 4-byte Folded Spill + movl %ecx, %ebp + movl %ebp, %eax + imull 96(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 568(%esp), %ecx + movl 1464(%esp), %eax + movl %eax, %edx + movl 104(%esp), %ebx # 4-byte Reload + calll .LmulPv544x32 + addl 568(%esp), %ebp + movl 140(%esp), %ecx # 4-byte Reload + adcl 572(%esp), %ecx + movl 136(%esp), %eax # 4-byte Reload + adcl 576(%esp), %eax + movl %eax, 136(%esp) # 4-byte Spill + movl 148(%esp), %eax # 4-byte Reload + adcl 580(%esp), %eax + movl %eax, 148(%esp) # 4-byte Spill + movl 164(%esp), %eax # 4-byte Reload + adcl 584(%esp), %eax + movl %eax, 164(%esp) # 4-byte Spill + movl 176(%esp), %ebp # 4-byte Reload + adcl 588(%esp), %ebp + movl 200(%esp), %eax # 4-byte Reload + adcl 592(%esp), %eax + movl %eax, 200(%esp) # 4-byte Spill + adcl 596(%esp), %edi + movl %edi, 204(%esp) # 4-byte Spill + adcl 600(%esp), %esi + movl %esi, 192(%esp) # 4-byte Spill + movl 196(%esp), %esi # 4-byte Reload + adcl 604(%esp), %esi + movl 180(%esp), %eax # 4-byte Reload + adcl 608(%esp), %eax + movl %eax, 180(%esp) # 4-byte Spill + movl 184(%esp), %eax # 4-byte Reload + adcl 612(%esp), %eax + movl %eax, 184(%esp) # 4-byte Spill + movl 188(%esp), %eax # 4-byte Reload + adcl 616(%esp), %eax + movl %eax, 188(%esp) # 4-byte Spill + movl 168(%esp), %eax # 4-byte Reload + adcl 620(%esp), %eax + movl %eax, 168(%esp) # 4-byte Spill + movl 172(%esp), %eax # 4-byte Reload + adcl 624(%esp), %eax + movl %eax, 172(%esp) # 4-byte Spill + movl 156(%esp), %eax # 4-byte Reload + adcl 628(%esp), %eax + movl %eax, 156(%esp) # 4-byte Spill + movl 160(%esp), %eax # 4-byte Reload + adcl 632(%esp), %eax + movl %eax, 160(%esp) # 4-byte Spill + movl 152(%esp), %eax # 4-byte Reload + adcl 636(%esp), %eax + movl %eax, 152(%esp) # 4-byte Spill + adcl $0, 144(%esp) # 4-byte Folded Spill + adcl $0, 128(%esp) # 4-byte Folded Spill + adcl $0, 132(%esp) # 4-byte Folded Spill + adcl $0, 124(%esp) # 4-byte Folded Spill + adcl $0, 116(%esp) # 4-byte Folded Spill + adcl $0, 100(%esp) # 4-byte Folded Spill + movl %ecx, %edi + movl %edi, %eax + imull 96(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 496(%esp), %ecx + movl 1464(%esp), %edx + movl 104(%esp), %ebx # 4-byte Reload + calll .LmulPv544x32 + addl 496(%esp), %edi + movl 136(%esp), %edi # 4-byte Reload + adcl 500(%esp), %edi + movl 148(%esp), %eax # 4-byte Reload + adcl 504(%esp), %eax + movl %eax, 148(%esp) # 4-byte Spill + movl 164(%esp), %eax # 4-byte Reload + adcl 508(%esp), %eax + movl %eax, 164(%esp) # 4-byte Spill + adcl 512(%esp), %ebp + movl %ebp, 176(%esp) # 4-byte Spill + movl 200(%esp), %ebp # 4-byte Reload + adcl 516(%esp), %ebp + movl 204(%esp), %eax # 4-byte Reload + adcl 520(%esp), %eax + movl %eax, 204(%esp) # 4-byte Spill + movl 192(%esp), %eax # 4-byte Reload + adcl 524(%esp), %eax + movl %eax, 192(%esp) # 4-byte Spill + adcl 528(%esp), %esi + movl %esi, 196(%esp) # 4-byte Spill + movl 180(%esp), %eax # 4-byte Reload + adcl 532(%esp), %eax + movl %eax, 180(%esp) # 4-byte Spill + movl 184(%esp), %eax # 4-byte Reload + adcl 536(%esp), %eax + movl %eax, 184(%esp) # 4-byte Spill + movl 188(%esp), %eax # 4-byte Reload + adcl 540(%esp), %eax + movl %eax, 188(%esp) # 4-byte Spill + movl 168(%esp), %eax # 4-byte Reload + adcl 544(%esp), %eax + movl %eax, 168(%esp) # 4-byte Spill + movl 172(%esp), %eax # 4-byte Reload + adcl 548(%esp), %eax + movl %eax, 172(%esp) # 4-byte Spill + movl 156(%esp), %eax # 4-byte Reload + adcl 552(%esp), %eax + movl %eax, 156(%esp) # 4-byte Spill + movl 160(%esp), %eax # 4-byte Reload + adcl 556(%esp), %eax + movl %eax, 160(%esp) # 4-byte Spill + movl 152(%esp), %eax # 4-byte Reload + adcl 560(%esp), %eax + movl %eax, 152(%esp) # 4-byte Spill + movl 144(%esp), %eax # 4-byte Reload + adcl 564(%esp), %eax + movl %eax, 144(%esp) # 4-byte Spill + adcl $0, 128(%esp) # 4-byte Folded Spill + adcl $0, 132(%esp) # 4-byte Folded Spill + adcl $0, 124(%esp) # 4-byte Folded Spill + adcl $0, 116(%esp) # 4-byte Folded Spill + movl 100(%esp), %esi # 4-byte Reload + adcl $0, %esi + movl %edi, %eax + imull 96(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 424(%esp), %ecx + movl 1464(%esp), %edx + movl 104(%esp), %ebx # 4-byte Reload + calll .LmulPv544x32 + addl 424(%esp), %edi + movl 148(%esp), %ecx # 4-byte Reload + adcl 428(%esp), %ecx + movl 164(%esp), %edi # 4-byte Reload + adcl 432(%esp), %edi + movl 176(%esp), %eax # 4-byte Reload + adcl 436(%esp), %eax + movl %eax, 176(%esp) # 4-byte Spill + adcl 440(%esp), %ebp + movl 204(%esp), %eax # 4-byte Reload + adcl 444(%esp), %eax + movl %eax, 204(%esp) # 4-byte Spill + movl 192(%esp), %eax # 4-byte Reload + adcl 448(%esp), %eax + movl %eax, 192(%esp) # 4-byte Spill + movl 196(%esp), %eax # 4-byte Reload + adcl 452(%esp), %eax + movl %eax, 196(%esp) # 4-byte Spill + movl 180(%esp), %eax # 4-byte Reload + adcl 456(%esp), %eax + movl %eax, 180(%esp) # 4-byte Spill + movl 184(%esp), %eax # 4-byte Reload + adcl 460(%esp), %eax + movl %eax, 184(%esp) # 4-byte Spill + movl 188(%esp), %eax # 4-byte Reload + adcl 464(%esp), %eax + movl %eax, 188(%esp) # 4-byte Spill + movl 168(%esp), %eax # 4-byte Reload + adcl 468(%esp), %eax + movl %eax, 168(%esp) # 4-byte Spill + movl 172(%esp), %eax # 4-byte Reload + adcl 472(%esp), %eax + movl %eax, 172(%esp) # 4-byte Spill + movl 156(%esp), %eax # 4-byte Reload + adcl 476(%esp), %eax + movl %eax, 156(%esp) # 4-byte Spill + movl 160(%esp), %eax # 4-byte Reload + adcl 480(%esp), %eax + movl %eax, 160(%esp) # 4-byte Spill + movl 152(%esp), %eax # 4-byte Reload + adcl 484(%esp), %eax + movl %eax, 152(%esp) # 4-byte Spill + movl 144(%esp), %eax # 4-byte Reload + adcl 488(%esp), %eax + movl %eax, 144(%esp) # 4-byte Spill + movl 128(%esp), %eax # 4-byte Reload + adcl 492(%esp), %eax + movl %eax, 128(%esp) # 4-byte Spill + adcl $0, 132(%esp) # 4-byte Folded Spill + adcl $0, 124(%esp) # 4-byte Folded Spill + adcl $0, 116(%esp) # 4-byte Folded Spill + adcl $0, %esi + movl %esi, 100(%esp) # 4-byte Spill + movl %ecx, %eax + movl %ecx, %esi + imull 96(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 352(%esp), %ecx + movl 1464(%esp), %edx + movl 104(%esp), %ebx # 4-byte Reload + calll .LmulPv544x32 + addl 352(%esp), %esi + movl %edi, %ecx + adcl 356(%esp), %ecx + movl 176(%esp), %eax # 4-byte Reload + adcl 360(%esp), %eax + movl %eax, 176(%esp) # 4-byte Spill + adcl 364(%esp), %ebp + movl %ebp, 200(%esp) # 4-byte Spill + movl 204(%esp), %eax # 4-byte Reload + adcl 368(%esp), %eax + movl %eax, 204(%esp) # 4-byte Spill + movl 192(%esp), %eax # 4-byte Reload + adcl 372(%esp), %eax + movl %eax, 192(%esp) # 4-byte Spill + movl 196(%esp), %eax # 4-byte Reload + adcl 376(%esp), %eax + movl %eax, 196(%esp) # 4-byte Spill + movl 180(%esp), %eax # 4-byte Reload + adcl 380(%esp), %eax + movl %eax, 180(%esp) # 4-byte Spill + movl 184(%esp), %edi # 4-byte Reload + adcl 384(%esp), %edi + movl 188(%esp), %eax # 4-byte Reload + adcl 388(%esp), %eax + movl %eax, 188(%esp) # 4-byte Spill + movl 168(%esp), %eax # 4-byte Reload + adcl 392(%esp), %eax + movl %eax, 168(%esp) # 4-byte Spill + movl 172(%esp), %eax # 4-byte Reload + adcl 396(%esp), %eax + movl %eax, 172(%esp) # 4-byte Spill + movl 156(%esp), %eax # 4-byte Reload + adcl 400(%esp), %eax + movl %eax, 156(%esp) # 4-byte Spill + movl 160(%esp), %eax # 4-byte Reload + adcl 404(%esp), %eax + movl %eax, 160(%esp) # 4-byte Spill + movl 152(%esp), %eax # 4-byte Reload + adcl 408(%esp), %eax + movl %eax, 152(%esp) # 4-byte Spill + movl 144(%esp), %eax # 4-byte Reload + adcl 412(%esp), %eax + movl %eax, 144(%esp) # 4-byte Spill + movl 128(%esp), %eax # 4-byte Reload + adcl 416(%esp), %eax + movl %eax, 128(%esp) # 4-byte Spill + movl 132(%esp), %eax # 4-byte Reload + adcl 420(%esp), %eax + movl %eax, 132(%esp) # 4-byte Spill + adcl $0, 124(%esp) # 4-byte Folded Spill + adcl $0, 116(%esp) # 4-byte Folded Spill + movl 100(%esp), %esi # 4-byte Reload + adcl $0, %esi + movl %ecx, %eax + movl %ecx, %ebp + imull 96(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 280(%esp), %ecx + movl 1464(%esp), %edx + movl 104(%esp), %ebx # 4-byte Reload + calll .LmulPv544x32 + addl 280(%esp), %ebp + movl 176(%esp), %ecx # 4-byte Reload + adcl 284(%esp), %ecx + movl 200(%esp), %eax # 4-byte Reload + adcl 288(%esp), %eax + movl %eax, 200(%esp) # 4-byte Spill + movl 204(%esp), %eax # 4-byte Reload + adcl 292(%esp), %eax + movl %eax, 204(%esp) # 4-byte Spill + movl 192(%esp), %eax # 4-byte Reload + adcl 296(%esp), %eax + movl %eax, 192(%esp) # 4-byte Spill + movl 196(%esp), %eax # 4-byte Reload + adcl 300(%esp), %eax + movl %eax, 196(%esp) # 4-byte Spill + movl 180(%esp), %eax # 4-byte Reload + adcl 304(%esp), %eax + movl %eax, 180(%esp) # 4-byte Spill + adcl 308(%esp), %edi + movl %edi, 184(%esp) # 4-byte Spill + movl 188(%esp), %eax # 4-byte Reload + adcl 312(%esp), %eax + movl %eax, 188(%esp) # 4-byte Spill + movl 168(%esp), %eax # 4-byte Reload + adcl 316(%esp), %eax + movl %eax, 168(%esp) # 4-byte Spill + movl 172(%esp), %eax # 4-byte Reload + adcl 320(%esp), %eax + movl %eax, 172(%esp) # 4-byte Spill + movl 156(%esp), %eax # 4-byte Reload + adcl 324(%esp), %eax + movl %eax, 156(%esp) # 4-byte Spill + movl 160(%esp), %eax # 4-byte Reload + adcl 328(%esp), %eax + movl %eax, 160(%esp) # 4-byte Spill + movl 152(%esp), %eax # 4-byte Reload + adcl 332(%esp), %eax + movl %eax, 152(%esp) # 4-byte Spill + movl 144(%esp), %eax # 4-byte Reload + adcl 336(%esp), %eax + movl %eax, 144(%esp) # 4-byte Spill + movl 128(%esp), %eax # 4-byte Reload + adcl 340(%esp), %eax + movl %eax, 128(%esp) # 4-byte Spill + movl 132(%esp), %eax # 4-byte Reload + adcl 344(%esp), %eax + movl %eax, 132(%esp) # 4-byte Spill + movl 124(%esp), %eax # 4-byte Reload + adcl 348(%esp), %eax + movl %eax, 124(%esp) # 4-byte Spill + movl 116(%esp), %edi # 4-byte Reload + adcl $0, %edi + adcl $0, %esi + movl 96(%esp), %eax # 4-byte Reload + imull %ecx, %eax + movl %ecx, %ebp + movl %eax, (%esp) + leal 208(%esp), %ecx + movl 1464(%esp), %edx + movl 104(%esp), %ebx # 4-byte Reload + calll .LmulPv544x32 + addl 208(%esp), %ebp + movl 200(%esp), %eax # 4-byte Reload + adcl 212(%esp), %eax + movl %eax, 200(%esp) # 4-byte Spill + movl 204(%esp), %edx # 4-byte Reload + adcl 216(%esp), %edx + movl %edx, 204(%esp) # 4-byte Spill + movl 192(%esp), %ecx # 4-byte Reload + adcl 220(%esp), %ecx + movl %ecx, 192(%esp) # 4-byte Spill + movl 196(%esp), %eax # 4-byte Reload + adcl 224(%esp), %eax + movl %eax, 196(%esp) # 4-byte Spill + movl 180(%esp), %eax # 4-byte Reload + adcl 228(%esp), %eax + movl %eax, 180(%esp) # 4-byte Spill + movl %eax, %ebp + movl 184(%esp), %eax # 4-byte Reload + adcl 232(%esp), %eax + movl %eax, 184(%esp) # 4-byte Spill + movl 188(%esp), %eax # 4-byte Reload + adcl 236(%esp), %eax + movl %eax, 188(%esp) # 4-byte Spill + movl 168(%esp), %eax # 4-byte Reload + adcl 240(%esp), %eax + movl %eax, 168(%esp) # 4-byte Spill + movl 172(%esp), %eax # 4-byte Reload + adcl 244(%esp), %eax + movl %eax, 172(%esp) # 4-byte Spill + movl 156(%esp), %eax # 4-byte Reload + adcl 248(%esp), %eax + movl %eax, 156(%esp) # 4-byte Spill + movl 160(%esp), %eax # 4-byte Reload + adcl 252(%esp), %eax + movl %eax, 160(%esp) # 4-byte Spill + movl 152(%esp), %eax # 4-byte Reload + adcl 256(%esp), %eax + movl %eax, 152(%esp) # 4-byte Spill + movl 144(%esp), %eax # 4-byte Reload + adcl 260(%esp), %eax + movl %eax, 144(%esp) # 4-byte Spill + movl 128(%esp), %ebx # 4-byte Reload + adcl 264(%esp), %ebx + movl %ebx, 128(%esp) # 4-byte Spill + movl 132(%esp), %eax # 4-byte Reload + adcl 268(%esp), %eax + movl %eax, 132(%esp) # 4-byte Spill + movl 124(%esp), %eax # 4-byte Reload + adcl 272(%esp), %eax + movl %eax, 124(%esp) # 4-byte Spill + adcl 276(%esp), %edi + movl %edi, 116(%esp) # 4-byte Spill + adcl $0, %esi + movl %esi, 100(%esp) # 4-byte Spill + movl 200(%esp), %edi # 4-byte Reload + subl 16(%esp), %edi # 4-byte Folded Reload + sbbl 4(%esp), %edx # 4-byte Folded Reload + sbbl 8(%esp), %ecx # 4-byte Folded Reload + movl 196(%esp), %eax # 4-byte Reload + sbbl 12(%esp), %eax # 4-byte Folded Reload + sbbl 20(%esp), %ebp # 4-byte Folded Reload + movl %ebp, 88(%esp) # 4-byte Spill + movl 184(%esp), %esi # 4-byte Reload + sbbl 24(%esp), %esi # 4-byte Folded Reload + movl %esi, 92(%esp) # 4-byte Spill + movl 188(%esp), %esi # 4-byte Reload + sbbl 28(%esp), %esi # 4-byte Folded Reload + movl %esi, 96(%esp) # 4-byte Spill + movl 168(%esp), %esi # 4-byte Reload + sbbl 32(%esp), %esi # 4-byte Folded Reload + movl 172(%esp), %ebp # 4-byte Reload + sbbl 36(%esp), %ebp # 4-byte Folded Reload + movl %ebp, 104(%esp) # 4-byte Spill + movl 156(%esp), %ebp # 4-byte Reload + sbbl 40(%esp), %ebp # 4-byte Folded Reload + movl %ebp, 108(%esp) # 4-byte Spill + movl 160(%esp), %ebp # 4-byte Reload + sbbl 44(%esp), %ebp # 4-byte Folded Reload + movl %ebp, 112(%esp) # 4-byte Spill + movl 152(%esp), %ebp # 4-byte Reload + sbbl 48(%esp), %ebp # 4-byte Folded Reload + movl %ebp, 120(%esp) # 4-byte Spill + movl 144(%esp), %ebp # 4-byte Reload + sbbl 52(%esp), %ebp # 4-byte Folded Reload + movl %ebp, 136(%esp) # 4-byte Spill + sbbl 56(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 140(%esp) # 4-byte Spill + movl 132(%esp), %ebx # 4-byte Reload + sbbl 60(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 148(%esp) # 4-byte Spill + movl 124(%esp), %ebx # 4-byte Reload + sbbl 64(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 164(%esp) # 4-byte Spill + movl 116(%esp), %ebx # 4-byte Reload + sbbl 68(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 176(%esp) # 4-byte Spill + movl 100(%esp), %ebx # 4-byte Reload + sbbl $0, %ebx + andl $1, %ebx + jne .LBB260_2 +# BB#1: + movl %esi, 168(%esp) # 4-byte Spill +.LBB260_2: + testb %bl, %bl + movl 200(%esp), %esi # 4-byte Reload + jne .LBB260_4 +# BB#3: + movl %edi, %esi +.LBB260_4: + movl 1456(%esp), %edi + movl %esi, (%edi) + movl 156(%esp), %esi # 4-byte Reload + movl 204(%esp), %ebx # 4-byte Reload + jne .LBB260_6 +# BB#5: + movl %edx, %ebx +.LBB260_6: + movl %ebx, 4(%edi) + movl 144(%esp), %ebx # 4-byte Reload + movl 192(%esp), %edx # 4-byte Reload + jne .LBB260_8 +# BB#7: + movl %ecx, %edx +.LBB260_8: + movl %edx, 8(%edi) + movl 132(%esp), %edx # 4-byte Reload + movl 196(%esp), %ecx # 4-byte Reload + jne .LBB260_10 +# BB#9: + movl %eax, %ecx +.LBB260_10: + movl %ecx, 12(%edi) + movl 124(%esp), %ecx # 4-byte Reload + movl 180(%esp), %eax # 4-byte Reload + jne .LBB260_12 +# BB#11: + movl 88(%esp), %eax # 4-byte Reload +.LBB260_12: + movl %eax, 16(%edi) + movl 188(%esp), %eax # 4-byte Reload + movl 184(%esp), %ebp # 4-byte Reload + jne .LBB260_14 +# BB#13: + movl 92(%esp), %ebp # 4-byte Reload +.LBB260_14: + movl %ebp, 20(%edi) + movl 152(%esp), %ebp # 4-byte Reload + jne .LBB260_16 +# BB#15: + movl 96(%esp), %eax # 4-byte Reload +.LBB260_16: + movl %eax, 24(%edi) + movl 168(%esp), %eax # 4-byte Reload + movl %eax, 28(%edi) + jne .LBB260_18 +# BB#17: + movl 104(%esp), %eax # 4-byte Reload + movl %eax, 172(%esp) # 4-byte Spill +.LBB260_18: + movl 172(%esp), %eax # 4-byte Reload + movl %eax, 32(%edi) + jne .LBB260_20 +# BB#19: + movl 108(%esp), %esi # 4-byte Reload +.LBB260_20: + movl %esi, 36(%edi) + jne .LBB260_22 +# BB#21: + movl 112(%esp), %eax # 4-byte Reload + movl %eax, 160(%esp) # 4-byte Spill +.LBB260_22: + movl 160(%esp), %esi # 4-byte Reload + movl %esi, 40(%edi) + movl 128(%esp), %eax # 4-byte Reload + jne .LBB260_24 +# BB#23: + movl 120(%esp), %ebp # 4-byte Reload +.LBB260_24: + movl %ebp, 44(%edi) + jne .LBB260_26 +# BB#25: + movl 136(%esp), %ebx # 4-byte Reload +.LBB260_26: + movl %ebx, 48(%edi) + jne .LBB260_28 +# BB#27: + movl 140(%esp), %eax # 4-byte Reload +.LBB260_28: + movl %eax, 52(%edi) + jne .LBB260_30 +# BB#29: + movl 148(%esp), %edx # 4-byte Reload +.LBB260_30: + movl %edx, 56(%edi) + movl 116(%esp), %eax # 4-byte Reload + jne .LBB260_32 +# BB#31: + movl 164(%esp), %ecx # 4-byte Reload +.LBB260_32: + movl %ecx, 60(%edi) + jne .LBB260_34 +# BB#33: + movl 176(%esp), %eax # 4-byte Reload +.LBB260_34: + movl %eax, 64(%edi) + addl $1436, %esp # imm = 0x59C + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end260: + .size mcl_fp_montRed17Lbmi2, .Lfunc_end260-mcl_fp_montRed17Lbmi2 + + .globl mcl_fp_addPre17Lbmi2 + .align 16, 0x90 + .type mcl_fp_addPre17Lbmi2,@function +mcl_fp_addPre17Lbmi2: # @mcl_fp_addPre17Lbmi2 +# BB#0: + pushl %ebx + pushl %edi + pushl %esi + movl 24(%esp), %eax + movl (%eax), %edx + movl 4(%eax), %esi + movl 20(%esp), %ecx + addl (%ecx), %edx + adcl 4(%ecx), %esi + movl 8(%eax), %ebx + adcl 8(%ecx), %ebx + movl 16(%esp), %edi + movl %edx, (%edi) + movl 12(%ecx), %edx + movl %esi, 4(%edi) + movl 16(%ecx), %esi + adcl 12(%eax), %edx + adcl 16(%eax), %esi + movl %ebx, 8(%edi) + movl 20(%eax), %ebx + movl %edx, 12(%edi) + movl 20(%ecx), %edx + adcl %ebx, %edx + movl 24(%eax), %ebx + movl %esi, 16(%edi) + movl 24(%ecx), %esi + adcl %ebx, %esi + movl 28(%eax), %ebx + movl %edx, 20(%edi) + movl 28(%ecx), %edx + adcl %ebx, %edx + movl 32(%eax), %ebx + movl %esi, 24(%edi) + movl 32(%ecx), %esi + adcl %ebx, %esi + movl 36(%eax), %ebx + movl %edx, 28(%edi) + movl 36(%ecx), %edx + adcl %ebx, %edx + movl 40(%eax), %ebx + movl %esi, 32(%edi) + movl 40(%ecx), %esi + adcl %ebx, %esi + movl 44(%eax), %ebx + movl %edx, 36(%edi) + movl 44(%ecx), %edx + adcl %ebx, %edx + movl 48(%eax), %ebx + movl %esi, 40(%edi) + movl 48(%ecx), %esi + adcl %ebx, %esi + movl 52(%eax), %ebx + movl %edx, 44(%edi) + movl 52(%ecx), %edx + adcl %ebx, %edx + movl 56(%eax), %ebx + movl %esi, 48(%edi) + movl 56(%ecx), %esi + adcl %ebx, %esi + movl 60(%eax), %ebx + movl %edx, 52(%edi) + movl 60(%ecx), %edx + adcl %ebx, %edx + movl %esi, 56(%edi) + movl %edx, 60(%edi) + movl 64(%eax), %eax + movl 64(%ecx), %ecx + adcl %eax, %ecx + movl %ecx, 64(%edi) + sbbl %eax, %eax + andl $1, %eax + popl %esi + popl %edi + popl %ebx + retl +.Lfunc_end261: + .size mcl_fp_addPre17Lbmi2, .Lfunc_end261-mcl_fp_addPre17Lbmi2 + + .globl mcl_fp_subPre17Lbmi2 + .align 16, 0x90 + .type mcl_fp_subPre17Lbmi2,@function +mcl_fp_subPre17Lbmi2: # @mcl_fp_subPre17Lbmi2 +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + movl 24(%esp), %ecx + movl (%ecx), %esi + movl 4(%ecx), %edi + xorl %eax, %eax + movl 28(%esp), %edx + subl (%edx), %esi + sbbl 4(%edx), %edi + movl 8(%ecx), %ebp + sbbl 8(%edx), %ebp + movl 20(%esp), %ebx + movl %esi, (%ebx) + movl 12(%ecx), %esi + sbbl 12(%edx), %esi + movl %edi, 4(%ebx) + movl 16(%ecx), %edi + sbbl 16(%edx), %edi + movl %ebp, 8(%ebx) + movl 20(%edx), %ebp + movl %esi, 12(%ebx) + movl 20(%ecx), %esi + sbbl %ebp, %esi + movl 24(%edx), %ebp + movl %edi, 16(%ebx) + movl 24(%ecx), %edi + sbbl %ebp, %edi + movl 28(%edx), %ebp + movl %esi, 20(%ebx) + movl 28(%ecx), %esi + sbbl %ebp, %esi + movl 32(%edx), %ebp + movl %edi, 24(%ebx) + movl 32(%ecx), %edi + sbbl %ebp, %edi + movl 36(%edx), %ebp + movl %esi, 28(%ebx) + movl 36(%ecx), %esi + sbbl %ebp, %esi + movl 40(%edx), %ebp + movl %edi, 32(%ebx) + movl 40(%ecx), %edi + sbbl %ebp, %edi + movl 44(%edx), %ebp + movl %esi, 36(%ebx) + movl 44(%ecx), %esi + sbbl %ebp, %esi + movl 48(%edx), %ebp + movl %edi, 40(%ebx) + movl 48(%ecx), %edi + sbbl %ebp, %edi + movl 52(%edx), %ebp + movl %esi, 44(%ebx) + movl 52(%ecx), %esi + sbbl %ebp, %esi + movl 56(%edx), %ebp + movl %edi, 48(%ebx) + movl 56(%ecx), %edi + sbbl %ebp, %edi + movl 60(%edx), %ebp + movl %esi, 52(%ebx) + movl 60(%ecx), %esi + sbbl %ebp, %esi + movl %edi, 56(%ebx) + movl %esi, 60(%ebx) + movl 64(%edx), %edx + movl 64(%ecx), %ecx + sbbl %edx, %ecx + movl %ecx, 64(%ebx) + sbbl $0, %eax + andl $1, %eax + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end262: + .size mcl_fp_subPre17Lbmi2, .Lfunc_end262-mcl_fp_subPre17Lbmi2 + + .globl mcl_fp_shr1_17Lbmi2 + .align 16, 0x90 + .type mcl_fp_shr1_17Lbmi2,@function +mcl_fp_shr1_17Lbmi2: # @mcl_fp_shr1_17Lbmi2 +# BB#0: + pushl %esi + movl 12(%esp), %eax + movl (%eax), %edx + movl 4(%eax), %esi + shrdl $1, %esi, %edx + movl 8(%esp), %ecx + movl %edx, (%ecx) + movl 8(%eax), %edx + shrdl $1, %edx, %esi + movl %esi, 4(%ecx) + movl 12(%eax), %esi + shrdl $1, %esi, %edx + movl %edx, 8(%ecx) + movl 16(%eax), %edx + shrdl $1, %edx, %esi + movl %esi, 12(%ecx) + movl 20(%eax), %esi + shrdl $1, %esi, %edx + movl %edx, 16(%ecx) + movl 24(%eax), %edx + shrdl $1, %edx, %esi + movl %esi, 20(%ecx) + movl 28(%eax), %esi + shrdl $1, %esi, %edx + movl %edx, 24(%ecx) + movl 32(%eax), %edx + shrdl $1, %edx, %esi + movl %esi, 28(%ecx) + movl 36(%eax), %esi + shrdl $1, %esi, %edx + movl %edx, 32(%ecx) + movl 40(%eax), %edx + shrdl $1, %edx, %esi + movl %esi, 36(%ecx) + movl 44(%eax), %esi + shrdl $1, %esi, %edx + movl %edx, 40(%ecx) + movl 48(%eax), %edx + shrdl $1, %edx, %esi + movl %esi, 44(%ecx) + movl 52(%eax), %esi + shrdl $1, %esi, %edx + movl %edx, 48(%ecx) + movl 56(%eax), %edx + shrdl $1, %edx, %esi + movl %esi, 52(%ecx) + movl 60(%eax), %esi + shrdl $1, %esi, %edx + movl %edx, 56(%ecx) + movl 64(%eax), %eax + shrdl $1, %eax, %esi + movl %esi, 60(%ecx) + shrl %eax + movl %eax, 64(%ecx) + popl %esi + retl +.Lfunc_end263: + .size mcl_fp_shr1_17Lbmi2, .Lfunc_end263-mcl_fp_shr1_17Lbmi2 + + .globl mcl_fp_add17Lbmi2 + .align 16, 0x90 + .type mcl_fp_add17Lbmi2,@function +mcl_fp_add17Lbmi2: # @mcl_fp_add17Lbmi2 +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $60, %esp + movl 88(%esp), %esi + movl (%esi), %ecx + movl 4(%esi), %eax + movl 84(%esp), %edx + addl (%edx), %ecx + movl %ecx, %ebx + adcl 4(%edx), %eax + movl %eax, 8(%esp) # 4-byte Spill + movl 8(%esi), %eax + adcl 8(%edx), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 12(%edx), %ecx + movl 16(%edx), %edi + adcl 12(%esi), %ecx + movl %ecx, 52(%esp) # 4-byte Spill + adcl 16(%esi), %edi + movl %edi, 4(%esp) # 4-byte Spill + movl 20(%edx), %eax + adcl 20(%esi), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 24(%edx), %eax + adcl 24(%esi), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 28(%edx), %eax + adcl 28(%esi), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 32(%edx), %eax + adcl 32(%esi), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 36(%edx), %eax + adcl 36(%esi), %eax + movl %eax, 32(%esp) # 4-byte Spill + movl 40(%edx), %eax + adcl 40(%esi), %eax + movl %eax, 28(%esp) # 4-byte Spill + movl 44(%edx), %eax + adcl 44(%esi), %eax + movl %eax, 24(%esp) # 4-byte Spill + movl 48(%edx), %eax + adcl 48(%esi), %eax + movl %eax, 20(%esp) # 4-byte Spill + movl 52(%edx), %eax + adcl 52(%esi), %eax + movl %eax, 16(%esp) # 4-byte Spill + movl 56(%edx), %eax + adcl 56(%esi), %eax + movl %eax, 12(%esp) # 4-byte Spill + movl 60(%edx), %ebp + adcl 60(%esi), %ebp + movl 64(%edx), %edx + adcl 64(%esi), %edx + movl 80(%esp), %esi + movl %ebx, (%esi) + movl %ebx, %eax + movl 8(%esp), %ecx # 4-byte Reload + movl %ecx, 4(%esi) + movl 56(%esp), %ebx # 4-byte Reload + movl %ebx, 8(%esi) + movl 52(%esp), %ebx # 4-byte Reload + movl %ebx, 12(%esi) + movl %edi, 16(%esi) + movl 48(%esp), %edi # 4-byte Reload + movl %edi, 20(%esi) + movl 44(%esp), %edi # 4-byte Reload + movl %edi, 24(%esi) + movl 40(%esp), %edi # 4-byte Reload + movl %edi, 28(%esi) + movl 36(%esp), %edi # 4-byte Reload + movl %edi, 32(%esi) + movl 32(%esp), %edi # 4-byte Reload + movl %edi, 36(%esi) + movl 28(%esp), %edi # 4-byte Reload + movl %edi, 40(%esi) + movl 24(%esp), %edi # 4-byte Reload + movl %edi, 44(%esi) + movl 20(%esp), %edi # 4-byte Reload + movl %edi, 48(%esi) + movl 16(%esp), %edi # 4-byte Reload + movl %edi, 52(%esi) + movl 12(%esp), %edi # 4-byte Reload + movl %edi, 56(%esi) + movl %ebp, 60(%esi) + movl %edx, 64(%esi) + sbbl %ebx, %ebx + andl $1, %ebx + movl 92(%esp), %edi + subl (%edi), %eax + movl %eax, (%esp) # 4-byte Spill + sbbl 4(%edi), %ecx + movl %ecx, 8(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + sbbl 8(%edi), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + sbbl 12(%edi), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 4(%esp), %eax # 4-byte Reload + sbbl 16(%edi), %eax + movl %eax, 4(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + sbbl 20(%edi), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 44(%esp), %eax # 4-byte Reload + sbbl 24(%edi), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 40(%esp), %eax # 4-byte Reload + sbbl 28(%edi), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 36(%esp), %eax # 4-byte Reload + sbbl 32(%edi), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 32(%esp), %eax # 4-byte Reload + sbbl 36(%edi), %eax + movl %eax, 32(%esp) # 4-byte Spill + movl 28(%esp), %eax # 4-byte Reload + sbbl 40(%edi), %eax + movl %eax, 28(%esp) # 4-byte Spill + movl 24(%esp), %eax # 4-byte Reload + sbbl 44(%edi), %eax + movl %eax, 24(%esp) # 4-byte Spill + movl 20(%esp), %eax # 4-byte Reload + sbbl 48(%edi), %eax + movl %eax, 20(%esp) # 4-byte Spill + movl 16(%esp), %eax # 4-byte Reload + sbbl 52(%edi), %eax + movl %eax, 16(%esp) # 4-byte Spill + movl 12(%esp), %eax # 4-byte Reload + sbbl 56(%edi), %eax + movl %eax, 12(%esp) # 4-byte Spill + sbbl 60(%edi), %ebp + sbbl 64(%edi), %edx + sbbl $0, %ebx + testb $1, %bl + jne .LBB264_2 +# BB#1: # %nocarry + movl (%esp), %edi # 4-byte Reload + movl %edi, (%esi) + movl 8(%esp), %edi # 4-byte Reload + movl %edi, 4(%esi) + movl 56(%esp), %edi # 4-byte Reload + movl %edi, 8(%esi) + movl 52(%esp), %edi # 4-byte Reload + movl %edi, 12(%esi) + movl 4(%esp), %edi # 4-byte Reload + movl %edi, 16(%esi) + movl 48(%esp), %edi # 4-byte Reload + movl %edi, 20(%esi) + movl 44(%esp), %edi # 4-byte Reload + movl %edi, 24(%esi) + movl 40(%esp), %edi # 4-byte Reload + movl %edi, 28(%esi) + movl 36(%esp), %edi # 4-byte Reload + movl %edi, 32(%esi) + movl 32(%esp), %edi # 4-byte Reload + movl %edi, 36(%esi) + movl 28(%esp), %edi # 4-byte Reload + movl %edi, 40(%esi) + movl 24(%esp), %edi # 4-byte Reload + movl %edi, 44(%esi) + movl 20(%esp), %edi # 4-byte Reload + movl %edi, 48(%esi) + movl 16(%esp), %ecx # 4-byte Reload + movl %ecx, 52(%esi) + movl 12(%esp), %ecx # 4-byte Reload + movl %ecx, 56(%esi) + movl %ebp, 60(%esi) + movl %edx, 64(%esi) +.LBB264_2: # %carry + addl $60, %esp + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end264: + .size mcl_fp_add17Lbmi2, .Lfunc_end264-mcl_fp_add17Lbmi2 + + .globl mcl_fp_addNF17Lbmi2 + .align 16, 0x90 + .type mcl_fp_addNF17Lbmi2,@function +mcl_fp_addNF17Lbmi2: # @mcl_fp_addNF17Lbmi2 +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $132, %esp + movl 160(%esp), %eax + movl (%eax), %ecx + movl 4(%eax), %edx + movl 156(%esp), %esi + addl (%esi), %ecx + movl %ecx, 84(%esp) # 4-byte Spill + adcl 4(%esi), %edx + movl %edx, 88(%esp) # 4-byte Spill + movl 64(%eax), %ecx + movl %ecx, 100(%esp) # 4-byte Spill + movl 60(%eax), %ecx + movl %ecx, 96(%esp) # 4-byte Spill + movl 56(%eax), %ecx + movl %ecx, 92(%esp) # 4-byte Spill + movl 52(%eax), %ebp + movl 48(%eax), %ecx + movl %ecx, 116(%esp) # 4-byte Spill + movl 44(%eax), %ecx + movl %ecx, 112(%esp) # 4-byte Spill + movl 40(%eax), %ecx + movl %ecx, 128(%esp) # 4-byte Spill + movl 36(%eax), %ecx + movl %ecx, 108(%esp) # 4-byte Spill + movl 32(%eax), %ecx + movl %ecx, 124(%esp) # 4-byte Spill + movl 28(%eax), %ecx + movl %ecx, 104(%esp) # 4-byte Spill + movl 24(%eax), %ecx + movl %ecx, 120(%esp) # 4-byte Spill + movl 20(%eax), %ebx + movl 16(%eax), %edi + movl 12(%eax), %edx + movl 8(%eax), %ecx + adcl 8(%esi), %ecx + movl %ecx, 64(%esp) # 4-byte Spill + adcl 12(%esi), %edx + movl %edx, 68(%esp) # 4-byte Spill + adcl 16(%esi), %edi + movl %edi, 72(%esp) # 4-byte Spill + adcl 20(%esi), %ebx + movl %ebx, 76(%esp) # 4-byte Spill + movl 120(%esp), %eax # 4-byte Reload + adcl 24(%esi), %eax + movl %eax, 120(%esp) # 4-byte Spill + movl 104(%esp), %eax # 4-byte Reload + adcl 28(%esi), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 124(%esp), %eax # 4-byte Reload + adcl 32(%esi), %eax + movl %eax, 124(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl 36(%esi), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 128(%esp), %eax # 4-byte Reload + adcl 40(%esi), %eax + movl %eax, 128(%esp) # 4-byte Spill + movl 112(%esp), %eax # 4-byte Reload + adcl 44(%esi), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 116(%esp), %eax # 4-byte Reload + adcl 48(%esi), %eax + movl %eax, 116(%esp) # 4-byte Spill + adcl 52(%esi), %ebp + movl %ebp, 80(%esp) # 4-byte Spill + movl 92(%esp), %ebp # 4-byte Reload + adcl 56(%esi), %ebp + movl %ebp, 92(%esp) # 4-byte Spill + movl 96(%esp), %ebp # 4-byte Reload + adcl 60(%esi), %ebp + movl %ebp, 96(%esp) # 4-byte Spill + movl 100(%esp), %ebp # 4-byte Reload + adcl 64(%esi), %ebp + movl %ebp, 100(%esp) # 4-byte Spill + movl 164(%esp), %esi + movl 84(%esp), %eax # 4-byte Reload + subl (%esi), %eax + movl %eax, (%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + sbbl 4(%esi), %eax + movl %eax, 4(%esp) # 4-byte Spill + sbbl 8(%esi), %ecx + movl %ecx, 8(%esp) # 4-byte Spill + sbbl 12(%esi), %edx + sbbl 16(%esi), %edi + movl %edi, 12(%esp) # 4-byte Spill + sbbl 20(%esi), %ebx + movl %ebx, 16(%esp) # 4-byte Spill + movl 120(%esp), %ebx # 4-byte Reload + sbbl 24(%esi), %ebx + movl %ebx, 20(%esp) # 4-byte Spill + movl 104(%esp), %eax # 4-byte Reload + sbbl 28(%esi), %eax + movl %eax, 24(%esp) # 4-byte Spill + movl 124(%esp), %eax # 4-byte Reload + sbbl 32(%esi), %eax + movl %eax, 28(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + sbbl 36(%esi), %eax + movl %eax, 32(%esp) # 4-byte Spill + movl 128(%esp), %ecx # 4-byte Reload + sbbl 40(%esi), %ecx + movl %ecx, 36(%esp) # 4-byte Spill + movl 112(%esp), %ecx # 4-byte Reload + sbbl 44(%esi), %ecx + movl %ecx, 40(%esp) # 4-byte Spill + movl 116(%esp), %ecx # 4-byte Reload + sbbl 48(%esi), %ecx + movl %ecx, 44(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + movl %eax, %ecx + sbbl 52(%esi), %ecx + movl %ecx, 48(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + movl %eax, %ecx + movl %eax, %ebp + sbbl 56(%esi), %ecx + movl %ecx, 52(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + movl %eax, %ecx + sbbl 60(%esi), %ecx + movl %ecx, 56(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + movl %eax, %ebx + sbbl 64(%esi), %ebx + movl %ebx, 60(%esp) # 4-byte Spill + movl %ebx, %esi + sarl $31, %esi + testl %esi, %esi + movl 84(%esp), %esi # 4-byte Reload + js .LBB265_2 +# BB#1: + movl (%esp), %esi # 4-byte Reload +.LBB265_2: + movl 152(%esp), %ebx + movl %esi, (%ebx) + movl 88(%esp), %eax # 4-byte Reload + js .LBB265_4 +# BB#3: + movl 4(%esp), %eax # 4-byte Reload +.LBB265_4: + movl %eax, 4(%ebx) + movl 108(%esp), %eax # 4-byte Reload + movl 76(%esp), %esi # 4-byte Reload + movl 64(%esp), %edi # 4-byte Reload + js .LBB265_6 +# BB#5: + movl 8(%esp), %edi # 4-byte Reload +.LBB265_6: + movl %edi, 8(%ebx) + movl 116(%esp), %edi # 4-byte Reload + movl 68(%esp), %ecx # 4-byte Reload + js .LBB265_8 +# BB#7: + movl %edx, %ecx +.LBB265_8: + movl %ecx, 12(%ebx) + movl 104(%esp), %ecx # 4-byte Reload + movl 72(%esp), %edx # 4-byte Reload + js .LBB265_10 +# BB#9: + movl 12(%esp), %edx # 4-byte Reload +.LBB265_10: + movl %edx, 16(%ebx) + movl %ebp, %edx + js .LBB265_12 +# BB#11: + movl 16(%esp), %esi # 4-byte Reload +.LBB265_12: + movl %esi, 20(%ebx) + movl 112(%esp), %ebp # 4-byte Reload + js .LBB265_14 +# BB#13: + movl 20(%esp), %esi # 4-byte Reload + movl %esi, 120(%esp) # 4-byte Spill +.LBB265_14: + movl 120(%esp), %esi # 4-byte Reload + movl %esi, 24(%ebx) + js .LBB265_16 +# BB#15: + movl 24(%esp), %ecx # 4-byte Reload +.LBB265_16: + movl %ecx, 28(%ebx) + js .LBB265_18 +# BB#17: + movl 28(%esp), %ecx # 4-byte Reload + movl %ecx, 124(%esp) # 4-byte Spill +.LBB265_18: + movl 124(%esp), %ecx # 4-byte Reload + movl %ecx, 32(%ebx) + js .LBB265_20 +# BB#19: + movl 32(%esp), %eax # 4-byte Reload +.LBB265_20: + movl %eax, 36(%ebx) + movl 100(%esp), %ecx # 4-byte Reload + js .LBB265_22 +# BB#21: + movl 36(%esp), %eax # 4-byte Reload + movl %eax, 128(%esp) # 4-byte Spill +.LBB265_22: + movl 128(%esp), %eax # 4-byte Reload + movl %eax, 40(%ebx) + js .LBB265_24 +# BB#23: + movl 40(%esp), %ebp # 4-byte Reload +.LBB265_24: + movl %ebp, 44(%ebx) + js .LBB265_26 +# BB#25: + movl 44(%esp), %edi # 4-byte Reload +.LBB265_26: + movl %edi, 48(%ebx) + movl 80(%esp), %eax # 4-byte Reload + js .LBB265_28 +# BB#27: + movl 48(%esp), %eax # 4-byte Reload +.LBB265_28: + movl %eax, 52(%ebx) + js .LBB265_30 +# BB#29: + movl 52(%esp), %edx # 4-byte Reload +.LBB265_30: + movl %edx, 56(%ebx) + movl 96(%esp), %eax # 4-byte Reload + js .LBB265_32 +# BB#31: + movl 56(%esp), %eax # 4-byte Reload +.LBB265_32: + movl %eax, 60(%ebx) + js .LBB265_34 +# BB#33: + movl 60(%esp), %ecx # 4-byte Reload +.LBB265_34: + movl %ecx, 64(%ebx) + addl $132, %esp + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end265: + .size mcl_fp_addNF17Lbmi2, .Lfunc_end265-mcl_fp_addNF17Lbmi2 + + .globl mcl_fp_sub17Lbmi2 + .align 16, 0x90 + .type mcl_fp_sub17Lbmi2,@function +mcl_fp_sub17Lbmi2: # @mcl_fp_sub17Lbmi2 +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $64, %esp + movl 88(%esp), %esi + movl (%esi), %eax + movl 4(%esi), %ecx + xorl %ebx, %ebx + movl 92(%esp), %edi + subl (%edi), %eax + movl %eax, 56(%esp) # 4-byte Spill + sbbl 4(%edi), %ecx + movl %ecx, 16(%esp) # 4-byte Spill + movl 8(%esi), %eax + sbbl 8(%edi), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 12(%esi), %eax + sbbl 12(%edi), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 16(%esi), %eax + sbbl 16(%edi), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 20(%esi), %eax + sbbl 20(%edi), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 24(%esi), %eax + sbbl 24(%edi), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 28(%esi), %eax + sbbl 28(%edi), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 32(%esi), %eax + sbbl 32(%edi), %eax + movl %eax, 32(%esp) # 4-byte Spill + movl 36(%esi), %eax + sbbl 36(%edi), %eax + movl %eax, 28(%esp) # 4-byte Spill + movl 40(%esi), %eax + sbbl 40(%edi), %eax + movl %eax, 24(%esp) # 4-byte Spill + movl 44(%esi), %eax + sbbl 44(%edi), %eax + movl %eax, 20(%esp) # 4-byte Spill + movl 48(%esi), %edx + sbbl 48(%edi), %edx + movl %edx, 12(%esp) # 4-byte Spill + movl 52(%esi), %ecx + sbbl 52(%edi), %ecx + movl %ecx, 8(%esp) # 4-byte Spill + movl 56(%esi), %eax + sbbl 56(%edi), %eax + movl %eax, 4(%esp) # 4-byte Spill + movl 60(%esi), %ebp + sbbl 60(%edi), %ebp + movl 64(%esi), %esi + sbbl 64(%edi), %esi + sbbl $0, %ebx + testb $1, %bl + movl 84(%esp), %ebx + movl 56(%esp), %edi # 4-byte Reload + movl %edi, (%ebx) + movl 16(%esp), %edi # 4-byte Reload + movl %edi, 4(%ebx) + movl 48(%esp), %edi # 4-byte Reload + movl %edi, 8(%ebx) + movl 60(%esp), %edi # 4-byte Reload + movl %edi, 12(%ebx) + movl 52(%esp), %edi # 4-byte Reload + movl %edi, 16(%ebx) + movl 44(%esp), %edi # 4-byte Reload + movl %edi, 20(%ebx) + movl 40(%esp), %edi # 4-byte Reload + movl %edi, 24(%ebx) + movl 36(%esp), %edi # 4-byte Reload + movl %edi, 28(%ebx) + movl 32(%esp), %edi # 4-byte Reload + movl %edi, 32(%ebx) + movl 28(%esp), %edi # 4-byte Reload + movl %edi, 36(%ebx) + movl 24(%esp), %edi # 4-byte Reload + movl %edi, 40(%ebx) + movl 20(%esp), %edi # 4-byte Reload + movl %edi, 44(%ebx) + movl %edx, 48(%ebx) + movl %ecx, 52(%ebx) + movl %eax, 56(%ebx) + movl %ebp, 60(%ebx) + movl %esi, 64(%ebx) + je .LBB266_2 +# BB#1: # %carry + movl %esi, (%esp) # 4-byte Spill + movl 96(%esp), %esi + movl 56(%esp), %ecx # 4-byte Reload + addl (%esi), %ecx + movl %ecx, (%ebx) + movl 16(%esp), %edx # 4-byte Reload + adcl 4(%esi), %edx + movl %edx, 4(%ebx) + movl 48(%esp), %edi # 4-byte Reload + adcl 8(%esi), %edi + movl 12(%esi), %eax + adcl 60(%esp), %eax # 4-byte Folded Reload + movl %edi, 8(%ebx) + movl 16(%esi), %ecx + adcl 52(%esp), %ecx # 4-byte Folded Reload + movl %eax, 12(%ebx) + movl 20(%esi), %eax + adcl 44(%esp), %eax # 4-byte Folded Reload + movl %ecx, 16(%ebx) + movl 24(%esi), %ecx + adcl 40(%esp), %ecx # 4-byte Folded Reload + movl %eax, 20(%ebx) + movl 28(%esi), %eax + adcl 36(%esp), %eax # 4-byte Folded Reload + movl %ecx, 24(%ebx) + movl 32(%esi), %ecx + adcl 32(%esp), %ecx # 4-byte Folded Reload + movl %eax, 28(%ebx) + movl 36(%esi), %eax + adcl 28(%esp), %eax # 4-byte Folded Reload + movl %ecx, 32(%ebx) + movl 40(%esi), %ecx + adcl 24(%esp), %ecx # 4-byte Folded Reload + movl %eax, 36(%ebx) + movl 44(%esi), %eax + adcl 20(%esp), %eax # 4-byte Folded Reload + movl %ecx, 40(%ebx) + movl 48(%esi), %ecx + adcl 12(%esp), %ecx # 4-byte Folded Reload + movl %eax, 44(%ebx) + movl 52(%esi), %eax + adcl 8(%esp), %eax # 4-byte Folded Reload + movl %ecx, 48(%ebx) + movl 56(%esi), %ecx + adcl 4(%esp), %ecx # 4-byte Folded Reload + movl %eax, 52(%ebx) + movl %ecx, 56(%ebx) + movl 60(%esi), %eax + adcl %ebp, %eax + movl %eax, 60(%ebx) + movl 64(%esi), %eax + adcl (%esp), %eax # 4-byte Folded Reload + movl %eax, 64(%ebx) +.LBB266_2: # %nocarry + addl $64, %esp + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end266: + .size mcl_fp_sub17Lbmi2, .Lfunc_end266-mcl_fp_sub17Lbmi2 + + .globl mcl_fp_subNF17Lbmi2 + .align 16, 0x90 + .type mcl_fp_subNF17Lbmi2,@function +mcl_fp_subNF17Lbmi2: # @mcl_fp_subNF17Lbmi2 +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $116, %esp + movl 140(%esp), %ecx + movl (%ecx), %esi + movl 4(%ecx), %edx + movl 144(%esp), %edi + subl (%edi), %esi + movl %esi, 72(%esp) # 4-byte Spill + sbbl 4(%edi), %edx + movl %edx, 76(%esp) # 4-byte Spill + movl 64(%ecx), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 60(%ecx), %edx + movl %edx, 108(%esp) # 4-byte Spill + movl 56(%ecx), %edx + movl %edx, 104(%esp) # 4-byte Spill + movl 52(%ecx), %edx + movl %edx, 100(%esp) # 4-byte Spill + movl 48(%ecx), %edx + movl %edx, 96(%esp) # 4-byte Spill + movl 44(%ecx), %edx + movl %edx, 92(%esp) # 4-byte Spill + movl 40(%ecx), %esi + movl %esi, 112(%esp) # 4-byte Spill + movl 36(%ecx), %edx + movl %edx, 84(%esp) # 4-byte Spill + movl 32(%ecx), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 28(%ecx), %ebp + movl 24(%ecx), %ebx + movl 20(%ecx), %esi + movl 16(%ecx), %edx + movl 12(%ecx), %eax + movl 8(%ecx), %ecx + sbbl 8(%edi), %ecx + movl %ecx, 48(%esp) # 4-byte Spill + sbbl 12(%edi), %eax + movl %eax, 52(%esp) # 4-byte Spill + sbbl 16(%edi), %edx + movl %edx, 56(%esp) # 4-byte Spill + sbbl 20(%edi), %esi + movl %esi, 60(%esp) # 4-byte Spill + sbbl 24(%edi), %ebx + movl %ebx, 64(%esp) # 4-byte Spill + sbbl 28(%edi), %ebp + movl %ebp, 68(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + sbbl 32(%edi), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 84(%esp), %ecx # 4-byte Reload + sbbl 36(%edi), %ecx + movl %ecx, 84(%esp) # 4-byte Spill + movl 112(%esp), %ecx # 4-byte Reload + sbbl 40(%edi), %ecx + movl %ecx, 112(%esp) # 4-byte Spill + movl 92(%esp), %ecx # 4-byte Reload + sbbl 44(%edi), %ecx + movl %ecx, 92(%esp) # 4-byte Spill + movl 96(%esp), %ecx # 4-byte Reload + sbbl 48(%edi), %ecx + movl %ecx, 96(%esp) # 4-byte Spill + movl 100(%esp), %ecx # 4-byte Reload + sbbl 52(%edi), %ecx + movl %ecx, 100(%esp) # 4-byte Spill + movl 104(%esp), %ecx # 4-byte Reload + sbbl 56(%edi), %ecx + movl %ecx, 104(%esp) # 4-byte Spill + movl 108(%esp), %ecx # 4-byte Reload + sbbl 60(%edi), %ecx + movl %ecx, 108(%esp) # 4-byte Spill + movl 88(%esp), %ecx # 4-byte Reload + sbbl 64(%edi), %ecx + movl %ecx, 88(%esp) # 4-byte Spill + movl %ecx, %eax + sarl $31, %eax + movl %eax, %edx + shldl $1, %ecx, %edx + movl 148(%esp), %ebx + movl 28(%ebx), %ecx + andl %edx, %ecx + movl %ecx, 32(%esp) # 4-byte Spill + movl 12(%ebx), %ecx + andl %edx, %ecx + movl %ecx, 16(%esp) # 4-byte Spill + movl 4(%ebx), %ecx + andl %edx, %ecx + movl %ecx, 8(%esp) # 4-byte Spill + andl (%ebx), %edx + movl 64(%ebx), %edi + movl %eax, %ecx + andl %ecx, %edi + movl %edi, 44(%esp) # 4-byte Spill + rorxl $31, %ecx, %eax + andl 60(%ebx), %ecx + movl %ecx, 40(%esp) # 4-byte Spill + movl 56(%ebx), %ecx + andl %eax, %ecx + movl %ecx, 36(%esp) # 4-byte Spill + movl 52(%ebx), %ecx + andl %eax, %ecx + movl %ecx, 28(%esp) # 4-byte Spill + movl 48(%ebx), %ecx + andl %eax, %ecx + movl %ecx, 24(%esp) # 4-byte Spill + movl 44(%ebx), %ecx + andl %eax, %ecx + movl %ecx, 20(%esp) # 4-byte Spill + movl 40(%ebx), %ecx + andl %eax, %ecx + movl %ecx, 12(%esp) # 4-byte Spill + movl 36(%ebx), %ecx + andl %eax, %ecx + movl %ecx, 4(%esp) # 4-byte Spill + movl 32(%ebx), %ecx + andl %eax, %ecx + movl %ecx, (%esp) # 4-byte Spill + movl 24(%ebx), %ebp + andl %eax, %ebp + movl 20(%ebx), %edi + andl %eax, %edi + movl 16(%ebx), %esi + andl %eax, %esi + andl 8(%ebx), %eax + addl 72(%esp), %edx # 4-byte Folded Reload + movl 8(%esp), %ecx # 4-byte Reload + adcl 76(%esp), %ecx # 4-byte Folded Reload + movl 136(%esp), %ebx + movl %edx, (%ebx) + adcl 48(%esp), %eax # 4-byte Folded Reload + movl %ecx, 4(%ebx) + movl 16(%esp), %edx # 4-byte Reload + adcl 52(%esp), %edx # 4-byte Folded Reload + movl %eax, 8(%ebx) + adcl 56(%esp), %esi # 4-byte Folded Reload + movl %edx, 12(%ebx) + adcl 60(%esp), %edi # 4-byte Folded Reload + movl %esi, 16(%ebx) + adcl 64(%esp), %ebp # 4-byte Folded Reload + movl %edi, 20(%ebx) + movl 32(%esp), %eax # 4-byte Reload + adcl 68(%esp), %eax # 4-byte Folded Reload + movl %ebp, 24(%ebx) + movl (%esp), %ecx # 4-byte Reload + adcl 80(%esp), %ecx # 4-byte Folded Reload + movl %eax, 28(%ebx) + movl 4(%esp), %eax # 4-byte Reload + adcl 84(%esp), %eax # 4-byte Folded Reload + movl %ecx, 32(%ebx) + movl 12(%esp), %ecx # 4-byte Reload + adcl 112(%esp), %ecx # 4-byte Folded Reload + movl %eax, 36(%ebx) + movl 20(%esp), %eax # 4-byte Reload + adcl 92(%esp), %eax # 4-byte Folded Reload + movl %ecx, 40(%ebx) + movl 24(%esp), %ecx # 4-byte Reload + adcl 96(%esp), %ecx # 4-byte Folded Reload + movl %eax, 44(%ebx) + movl 28(%esp), %eax # 4-byte Reload + adcl 100(%esp), %eax # 4-byte Folded Reload + movl %ecx, 48(%ebx) + movl 36(%esp), %ecx # 4-byte Reload + adcl 104(%esp), %ecx # 4-byte Folded Reload + movl %eax, 52(%ebx) + movl 40(%esp), %eax # 4-byte Reload + adcl 108(%esp), %eax # 4-byte Folded Reload + movl %ecx, 56(%ebx) + movl %eax, 60(%ebx) + movl 44(%esp), %eax # 4-byte Reload + adcl 88(%esp), %eax # 4-byte Folded Reload + movl %eax, 64(%ebx) + addl $116, %esp + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end267: + .size mcl_fp_subNF17Lbmi2, .Lfunc_end267-mcl_fp_subNF17Lbmi2 + + .globl mcl_fpDbl_add17Lbmi2 + .align 16, 0x90 + .type mcl_fpDbl_add17Lbmi2,@function +mcl_fpDbl_add17Lbmi2: # @mcl_fpDbl_add17Lbmi2 +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $128, %esp + movl 156(%esp), %ecx + movl 152(%esp), %edx + movl 12(%edx), %edi + movl 16(%edx), %esi + movl 8(%ecx), %ebx + movl (%ecx), %ebp + addl (%edx), %ebp + movl 148(%esp), %eax + movl %ebp, (%eax) + movl 4(%ecx), %ebp + adcl 4(%edx), %ebp + adcl 8(%edx), %ebx + adcl 12(%ecx), %edi + adcl 16(%ecx), %esi + movl %ebp, 4(%eax) + movl 76(%ecx), %ebp + movl %ebx, 8(%eax) + movl 20(%ecx), %ebx + movl %edi, 12(%eax) + movl 20(%edx), %edi + adcl %ebx, %edi + movl 24(%ecx), %ebx + movl %esi, 16(%eax) + movl 24(%edx), %esi + adcl %ebx, %esi + movl 28(%ecx), %ebx + movl %edi, 20(%eax) + movl 28(%edx), %edi + adcl %ebx, %edi + movl 32(%ecx), %ebx + movl %esi, 24(%eax) + movl 32(%edx), %esi + adcl %ebx, %esi + movl 36(%ecx), %ebx + movl %edi, 28(%eax) + movl 36(%edx), %edi + adcl %ebx, %edi + movl 40(%ecx), %ebx + movl %esi, 32(%eax) + movl 40(%edx), %esi + adcl %ebx, %esi + movl 44(%ecx), %ebx + movl %edi, 36(%eax) + movl 44(%edx), %edi + adcl %ebx, %edi + movl 48(%ecx), %ebx + movl %esi, 40(%eax) + movl 48(%edx), %esi + adcl %ebx, %esi + movl 52(%ecx), %ebx + movl %edi, 44(%eax) + movl 52(%edx), %edi + adcl %ebx, %edi + movl 56(%ecx), %ebx + movl %esi, 48(%eax) + movl 56(%edx), %esi + adcl %ebx, %esi + movl 60(%ecx), %ebx + movl %edi, 52(%eax) + movl 60(%edx), %edi + adcl %ebx, %edi + movl 64(%ecx), %ebx + movl %esi, 56(%eax) + movl 64(%edx), %esi + adcl %ebx, %esi + movl 68(%ecx), %ebx + movl %edi, 60(%eax) + movl 68(%edx), %edi + adcl %ebx, %edi + movl %edi, 92(%esp) # 4-byte Spill + movl 72(%ecx), %edi + movl %esi, 64(%eax) + movl 72(%edx), %eax + adcl %edi, %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 76(%edx), %eax + adcl %ebp, %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 80(%ecx), %esi + movl 80(%edx), %eax + adcl %esi, %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 84(%ecx), %esi + movl 84(%edx), %eax + adcl %esi, %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 88(%ecx), %esi + movl 88(%edx), %eax + adcl %esi, %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 92(%ecx), %esi + movl 92(%edx), %eax + adcl %esi, %eax + movl %eax, 116(%esp) # 4-byte Spill + movl 96(%ecx), %esi + movl 96(%edx), %eax + adcl %esi, %eax + movl %eax, 120(%esp) # 4-byte Spill + movl 100(%ecx), %esi + movl 100(%edx), %eax + adcl %esi, %eax + movl %eax, 124(%esp) # 4-byte Spill + movl 104(%ecx), %esi + movl 104(%edx), %eax + adcl %esi, %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 108(%ecx), %esi + movl 108(%edx), %eax + adcl %esi, %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 112(%ecx), %esi + movl 112(%edx), %eax + adcl %esi, %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 116(%ecx), %esi + movl 116(%edx), %eax + adcl %esi, %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 120(%ecx), %edi + movl 120(%edx), %esi + adcl %edi, %esi + movl %esi, 64(%esp) # 4-byte Spill + movl 124(%ecx), %ebx + movl 124(%edx), %edi + adcl %ebx, %edi + movl %edi, 68(%esp) # 4-byte Spill + movl 128(%ecx), %ebx + movl 128(%edx), %ebp + adcl %ebx, %ebp + movl %ebp, 72(%esp) # 4-byte Spill + movl 132(%ecx), %ecx + movl 132(%edx), %edx + adcl %ecx, %edx + sbbl %ecx, %ecx + andl $1, %ecx + movl 160(%esp), %ebx + movl 92(%esp), %eax # 4-byte Reload + subl (%ebx), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + sbbl 4(%ebx), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + sbbl 8(%ebx), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 104(%esp), %eax # 4-byte Reload + sbbl 12(%ebx), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + sbbl 16(%ebx), %eax + movl %eax, 32(%esp) # 4-byte Spill + movl 112(%esp), %eax # 4-byte Reload + sbbl 20(%ebx), %eax + movl %eax, 28(%esp) # 4-byte Spill + movl 116(%esp), %eax # 4-byte Reload + sbbl 24(%ebx), %eax + movl %eax, 24(%esp) # 4-byte Spill + movl 120(%esp), %eax # 4-byte Reload + sbbl 28(%ebx), %eax + movl %eax, 20(%esp) # 4-byte Spill + movl 124(%esp), %eax # 4-byte Reload + sbbl 32(%ebx), %eax + movl %eax, 16(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + sbbl 36(%ebx), %eax + movl %eax, 12(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + sbbl 40(%ebx), %eax + movl %eax, 8(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + sbbl 44(%ebx), %eax + movl %eax, 4(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + sbbl 48(%ebx), %eax + movl %eax, (%esp) # 4-byte Spill + sbbl 52(%ebx), %esi + movl %esi, 52(%esp) # 4-byte Spill + sbbl 56(%ebx), %edi + movl %edi, 56(%esp) # 4-byte Spill + sbbl 60(%ebx), %ebp + movl %ebp, 60(%esp) # 4-byte Spill + movl %edx, %ebp + sbbl 64(%ebx), %ebp + sbbl $0, %ecx + andl $1, %ecx + jne .LBB268_2 +# BB#1: + movl %ebp, %edx +.LBB268_2: + testb %cl, %cl + movl 92(%esp), %eax # 4-byte Reload + movl 88(%esp), %esi # 4-byte Reload + movl 84(%esp), %edi # 4-byte Reload + movl 80(%esp), %ebx # 4-byte Reload + movl 76(%esp), %ebp # 4-byte Reload + jne .LBB268_4 +# BB#3: + movl (%esp), %esi # 4-byte Reload + movl 4(%esp), %edi # 4-byte Reload + movl 8(%esp), %ebx # 4-byte Reload + movl 12(%esp), %ebp # 4-byte Reload + movl 16(%esp), %eax # 4-byte Reload + movl %eax, 124(%esp) # 4-byte Spill + movl 20(%esp), %eax # 4-byte Reload + movl %eax, 120(%esp) # 4-byte Spill + movl 24(%esp), %eax # 4-byte Reload + movl %eax, 116(%esp) # 4-byte Spill + movl 28(%esp), %eax # 4-byte Reload + movl %eax, 112(%esp) # 4-byte Spill + movl 32(%esp), %eax # 4-byte Reload + movl %eax, 108(%esp) # 4-byte Spill + movl 36(%esp), %eax # 4-byte Reload + movl %eax, 104(%esp) # 4-byte Spill + movl 40(%esp), %eax # 4-byte Reload + movl %eax, 100(%esp) # 4-byte Spill + movl 44(%esp), %eax # 4-byte Reload + movl %eax, 96(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload +.LBB268_4: + movl 148(%esp), %ecx + movl %eax, 68(%ecx) + movl %ecx, %eax + movl 96(%esp), %ecx # 4-byte Reload + movl %ecx, 72(%eax) + movl 100(%esp), %ecx # 4-byte Reload + movl %ecx, 76(%eax) + movl 104(%esp), %ecx # 4-byte Reload + movl %ecx, 80(%eax) + movl 108(%esp), %ecx # 4-byte Reload + movl %ecx, 84(%eax) + movl 112(%esp), %ecx # 4-byte Reload + movl %ecx, 88(%eax) + movl 116(%esp), %ecx # 4-byte Reload + movl %ecx, 92(%eax) + movl 120(%esp), %ecx # 4-byte Reload + movl %ecx, 96(%eax) + movl 124(%esp), %ecx # 4-byte Reload + movl %ecx, 100(%eax) + movl %ebp, 104(%eax) + movl %ebx, 108(%eax) + movl %edi, 112(%eax) + movl %esi, 116(%eax) + movl 72(%esp), %ecx # 4-byte Reload + movl 64(%esp), %esi # 4-byte Reload + jne .LBB268_6 +# BB#5: + movl 52(%esp), %esi # 4-byte Reload +.LBB268_6: + movl %esi, 120(%eax) + movl 68(%esp), %esi # 4-byte Reload + jne .LBB268_8 +# BB#7: + movl 56(%esp), %esi # 4-byte Reload +.LBB268_8: + movl %esi, 124(%eax) + jne .LBB268_10 +# BB#9: + movl 60(%esp), %ecx # 4-byte Reload +.LBB268_10: + movl %ecx, 128(%eax) + movl %edx, 132(%eax) + addl $128, %esp + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end268: + .size mcl_fpDbl_add17Lbmi2, .Lfunc_end268-mcl_fpDbl_add17Lbmi2 + + .globl mcl_fpDbl_sub17Lbmi2 + .align 16, 0x90 + .type mcl_fpDbl_sub17Lbmi2,@function +mcl_fpDbl_sub17Lbmi2: # @mcl_fpDbl_sub17Lbmi2 +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $116, %esp + movl 140(%esp), %edx + movl (%edx), %eax + movl 4(%edx), %edi + movl 144(%esp), %esi + subl (%esi), %eax + sbbl 4(%esi), %edi + movl 8(%edx), %ebx + sbbl 8(%esi), %ebx + movl 136(%esp), %ecx + movl %eax, (%ecx) + movl 12(%edx), %eax + sbbl 12(%esi), %eax + movl %edi, 4(%ecx) + movl 16(%edx), %edi + sbbl 16(%esi), %edi + movl %ebx, 8(%ecx) + movl 20(%esi), %ebx + movl %eax, 12(%ecx) + movl 20(%edx), %eax + sbbl %ebx, %eax + movl 24(%esi), %ebx + movl %edi, 16(%ecx) + movl 24(%edx), %edi + sbbl %ebx, %edi + movl 28(%esi), %ebx + movl %eax, 20(%ecx) + movl 28(%edx), %eax + sbbl %ebx, %eax + movl 32(%esi), %ebx + movl %edi, 24(%ecx) + movl 32(%edx), %edi + sbbl %ebx, %edi + movl 36(%esi), %ebx + movl %eax, 28(%ecx) + movl 36(%edx), %eax + sbbl %ebx, %eax + movl 40(%esi), %ebx + movl %edi, 32(%ecx) + movl 40(%edx), %edi + sbbl %ebx, %edi + movl 44(%esi), %ebx + movl %eax, 36(%ecx) + movl 44(%edx), %eax + sbbl %ebx, %eax + movl 48(%esi), %ebx + movl %edi, 40(%ecx) + movl 48(%edx), %edi + sbbl %ebx, %edi + movl 52(%esi), %ebx + movl %eax, 44(%ecx) + movl 52(%edx), %eax + sbbl %ebx, %eax + movl 56(%esi), %ebx + movl %edi, 48(%ecx) + movl 56(%edx), %edi + sbbl %ebx, %edi + movl 60(%esi), %ebx + movl %eax, 52(%ecx) + movl 60(%edx), %eax + sbbl %ebx, %eax + movl 64(%esi), %ebx + movl %edi, 56(%ecx) + movl 64(%edx), %edi + sbbl %ebx, %edi + movl 68(%esi), %ebx + movl %eax, 60(%ecx) + movl 68(%edx), %eax + sbbl %ebx, %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 72(%esi), %eax + movl %edi, 64(%ecx) + movl 72(%edx), %edi + sbbl %eax, %edi + movl %edi, 44(%esp) # 4-byte Spill + movl 76(%esi), %eax + movl 76(%edx), %edi + sbbl %eax, %edi + movl %edi, 48(%esp) # 4-byte Spill + movl 80(%esi), %eax + movl 80(%edx), %edi + sbbl %eax, %edi + movl %edi, 56(%esp) # 4-byte Spill + movl 84(%esi), %eax + movl 84(%edx), %edi + sbbl %eax, %edi + movl %edi, 60(%esp) # 4-byte Spill + movl 88(%esi), %eax + movl 88(%edx), %edi + sbbl %eax, %edi + movl %edi, 64(%esp) # 4-byte Spill + movl 92(%esi), %eax + movl 92(%edx), %edi + sbbl %eax, %edi + movl %edi, 68(%esp) # 4-byte Spill + movl 96(%esi), %eax + movl 96(%edx), %edi + sbbl %eax, %edi + movl %edi, 72(%esp) # 4-byte Spill + movl 100(%esi), %eax + movl 100(%edx), %edi + sbbl %eax, %edi + movl %edi, 80(%esp) # 4-byte Spill + movl 104(%esi), %eax + movl 104(%edx), %edi + sbbl %eax, %edi + movl %edi, 84(%esp) # 4-byte Spill + movl 108(%esi), %eax + movl 108(%edx), %edi + sbbl %eax, %edi + movl %edi, 88(%esp) # 4-byte Spill + movl 112(%esi), %eax + movl 112(%edx), %edi + sbbl %eax, %edi + movl %edi, 92(%esp) # 4-byte Spill + movl 116(%esi), %eax + movl 116(%edx), %edi + sbbl %eax, %edi + movl %edi, 96(%esp) # 4-byte Spill + movl 120(%esi), %eax + movl 120(%edx), %edi + sbbl %eax, %edi + movl %edi, 100(%esp) # 4-byte Spill + movl 124(%esi), %eax + movl 124(%edx), %edi + sbbl %eax, %edi + movl %edi, 104(%esp) # 4-byte Spill + movl 128(%esi), %eax + movl 128(%edx), %edi + sbbl %eax, %edi + movl %edi, 108(%esp) # 4-byte Spill + movl 132(%esi), %eax + movl 132(%edx), %edx + sbbl %eax, %edx + movl %edx, 112(%esp) # 4-byte Spill + movl $0, %eax + sbbl $0, %eax + andl $1, %eax + movl 148(%esp), %ebp + jne .LBB269_1 +# BB#2: + movl $0, 76(%esp) # 4-byte Folded Spill + jmp .LBB269_3 +.LBB269_1: + movl 64(%ebp), %edx + movl %edx, 76(%esp) # 4-byte Spill +.LBB269_3: + testb %al, %al + jne .LBB269_4 +# BB#5: + movl $0, 28(%esp) # 4-byte Folded Spill + movl $0, %esi + jmp .LBB269_6 +.LBB269_4: + movl (%ebp), %esi + movl 4(%ebp), %eax + movl %eax, 28(%esp) # 4-byte Spill +.LBB269_6: + jne .LBB269_7 +# BB#8: + movl $0, 40(%esp) # 4-byte Folded Spill + jmp .LBB269_9 +.LBB269_7: + movl 60(%ebp), %eax + movl %eax, 40(%esp) # 4-byte Spill +.LBB269_9: + jne .LBB269_10 +# BB#11: + movl $0, 36(%esp) # 4-byte Folded Spill + jmp .LBB269_12 +.LBB269_10: + movl 56(%ebp), %eax + movl %eax, 36(%esp) # 4-byte Spill +.LBB269_12: + jne .LBB269_13 +# BB#14: + movl $0, 32(%esp) # 4-byte Folded Spill + jmp .LBB269_15 +.LBB269_13: + movl 52(%ebp), %eax + movl %eax, 32(%esp) # 4-byte Spill +.LBB269_15: + jne .LBB269_16 +# BB#17: + movl $0, 24(%esp) # 4-byte Folded Spill + jmp .LBB269_18 +.LBB269_16: + movl 48(%ebp), %eax + movl %eax, 24(%esp) # 4-byte Spill +.LBB269_18: + jne .LBB269_19 +# BB#20: + movl $0, 20(%esp) # 4-byte Folded Spill + jmp .LBB269_21 +.LBB269_19: + movl 44(%ebp), %eax + movl %eax, 20(%esp) # 4-byte Spill +.LBB269_21: + jne .LBB269_22 +# BB#23: + movl $0, 16(%esp) # 4-byte Folded Spill + jmp .LBB269_24 +.LBB269_22: + movl 40(%ebp), %eax + movl %eax, 16(%esp) # 4-byte Spill +.LBB269_24: + jne .LBB269_25 +# BB#26: + movl $0, 12(%esp) # 4-byte Folded Spill + jmp .LBB269_27 +.LBB269_25: + movl 36(%ebp), %eax + movl %eax, 12(%esp) # 4-byte Spill +.LBB269_27: + jne .LBB269_28 +# BB#29: + movl $0, 8(%esp) # 4-byte Folded Spill + jmp .LBB269_30 +.LBB269_28: + movl 32(%ebp), %eax + movl %eax, 8(%esp) # 4-byte Spill +.LBB269_30: + jne .LBB269_31 +# BB#32: + movl $0, 4(%esp) # 4-byte Folded Spill + jmp .LBB269_33 +.LBB269_31: + movl 28(%ebp), %eax + movl %eax, 4(%esp) # 4-byte Spill +.LBB269_33: + jne .LBB269_34 +# BB#35: + movl $0, (%esp) # 4-byte Folded Spill + jmp .LBB269_36 +.LBB269_34: + movl 24(%ebp), %eax + movl %eax, (%esp) # 4-byte Spill +.LBB269_36: + jne .LBB269_37 +# BB#38: + movl $0, %ebx + jmp .LBB269_39 +.LBB269_37: + movl 20(%ebp), %ebx +.LBB269_39: + jne .LBB269_40 +# BB#41: + movl $0, %edi + jmp .LBB269_42 +.LBB269_40: + movl 16(%ebp), %edi +.LBB269_42: + jne .LBB269_43 +# BB#44: + movl %ebp, %eax + movl $0, %ebp + jmp .LBB269_45 +.LBB269_43: + movl %ebp, %eax + movl 12(%eax), %ebp +.LBB269_45: + jne .LBB269_46 +# BB#47: + xorl %eax, %eax + jmp .LBB269_48 +.LBB269_46: + movl 8(%eax), %eax +.LBB269_48: + addl 52(%esp), %esi # 4-byte Folded Reload + movl 28(%esp), %edx # 4-byte Reload + adcl 44(%esp), %edx # 4-byte Folded Reload + movl %esi, 68(%ecx) + adcl 48(%esp), %eax # 4-byte Folded Reload + movl %edx, 72(%ecx) + adcl 56(%esp), %ebp # 4-byte Folded Reload + movl %eax, 76(%ecx) + adcl 60(%esp), %edi # 4-byte Folded Reload + movl %ebp, 80(%ecx) + adcl 64(%esp), %ebx # 4-byte Folded Reload + movl %edi, 84(%ecx) + movl (%esp), %edx # 4-byte Reload + adcl 68(%esp), %edx # 4-byte Folded Reload + movl %ebx, 88(%ecx) + movl 4(%esp), %eax # 4-byte Reload + adcl 72(%esp), %eax # 4-byte Folded Reload + movl %edx, 92(%ecx) + movl 8(%esp), %edx # 4-byte Reload + adcl 80(%esp), %edx # 4-byte Folded Reload + movl %eax, 96(%ecx) + movl 12(%esp), %eax # 4-byte Reload + adcl 84(%esp), %eax # 4-byte Folded Reload + movl %edx, 100(%ecx) + movl 16(%esp), %edx # 4-byte Reload + adcl 88(%esp), %edx # 4-byte Folded Reload + movl %eax, 104(%ecx) + movl 20(%esp), %eax # 4-byte Reload + adcl 92(%esp), %eax # 4-byte Folded Reload + movl %edx, 108(%ecx) + movl 24(%esp), %edx # 4-byte Reload + adcl 96(%esp), %edx # 4-byte Folded Reload + movl %eax, 112(%ecx) + movl 32(%esp), %eax # 4-byte Reload + adcl 100(%esp), %eax # 4-byte Folded Reload + movl %edx, 116(%ecx) + movl 36(%esp), %edx # 4-byte Reload + adcl 104(%esp), %edx # 4-byte Folded Reload + movl %eax, 120(%ecx) + movl 40(%esp), %eax # 4-byte Reload + adcl 108(%esp), %eax # 4-byte Folded Reload + movl %edx, 124(%ecx) + movl %eax, 128(%ecx) + movl 76(%esp), %eax # 4-byte Reload + adcl 112(%esp), %eax # 4-byte Folded Reload + movl %eax, 132(%ecx) + addl $116, %esp + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end269: + .size mcl_fpDbl_sub17Lbmi2, .Lfunc_end269-mcl_fpDbl_sub17Lbmi2 + + + .section ".note.GNU-stack","",@progbits |