Source release 15.2.0

This commit is contained in:
John W. Bruce
2019-06-28 16:02:52 -07:00
parent 2b26dee09c
commit 2990f23065
1236 changed files with 166886 additions and 142315 deletions

View File

@@ -556,6 +556,12 @@ L$handle_ctr32_2:
.p2align 5
_aesni_gcm_encrypt:
#ifndef NDEBUG
#ifndef BORINGSSL_FIPS
movb $1,_BORINGSSL_function_hit+2(%rip)
#endif
#endif
xorq %r10,%r10

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,426 @@
# This file is generated from a similarly-named Perl script in the BoringSSL
# source tree. Do not edit by hand.
#if defined(__has_feature)
#if __has_feature(memory_sanitizer) && !defined(OPENSSL_NO_ASM)
#define OPENSSL_NO_ASM
#endif
#endif
#if defined(__x86_64__) && !defined(OPENSSL_NO_ASM)
#if defined(BORINGSSL_PREFIX)
#include <boringssl_prefix_symbols_asm.h>
#endif
.text
.globl _gcm_gmult_ssse3
.private_extern _gcm_gmult_ssse3
.p2align 4
_gcm_gmult_ssse3:
L$gmult_seh_begin:
movdqu (%rdi),%xmm0
movdqa L$reverse_bytes(%rip),%xmm10
movdqa L$low4_mask(%rip),%xmm2
.byte 102,65,15,56,0,194
movdqa %xmm2,%xmm1
pandn %xmm0,%xmm1
psrld $4,%xmm1
pand %xmm2,%xmm0
pxor %xmm2,%xmm2
pxor %xmm3,%xmm3
movq $5,%rax
L$oop_row_1:
movdqa (%rsi),%xmm4
leaq 16(%rsi),%rsi
movdqa %xmm2,%xmm6
.byte 102,15,58,15,243,1
movdqa %xmm6,%xmm3
psrldq $1,%xmm2
movdqa %xmm4,%xmm5
.byte 102,15,56,0,224
.byte 102,15,56,0,233
pxor %xmm5,%xmm2
movdqa %xmm4,%xmm5
psllq $60,%xmm5
movdqa %xmm5,%xmm6
pslldq $8,%xmm6
pxor %xmm6,%xmm3
psrldq $8,%xmm5
pxor %xmm5,%xmm2
psrlq $4,%xmm4
pxor %xmm4,%xmm2
subq $1,%rax
jnz L$oop_row_1
pxor %xmm3,%xmm2
psrlq $1,%xmm3
pxor %xmm3,%xmm2
psrlq $1,%xmm3
pxor %xmm3,%xmm2
psrlq $5,%xmm3
pxor %xmm3,%xmm2
pxor %xmm3,%xmm3
movq $5,%rax
L$oop_row_2:
movdqa (%rsi),%xmm4
leaq 16(%rsi),%rsi
movdqa %xmm2,%xmm6
.byte 102,15,58,15,243,1
movdqa %xmm6,%xmm3
psrldq $1,%xmm2
movdqa %xmm4,%xmm5
.byte 102,15,56,0,224
.byte 102,15,56,0,233
pxor %xmm5,%xmm2
movdqa %xmm4,%xmm5
psllq $60,%xmm5
movdqa %xmm5,%xmm6
pslldq $8,%xmm6
pxor %xmm6,%xmm3
psrldq $8,%xmm5
pxor %xmm5,%xmm2
psrlq $4,%xmm4
pxor %xmm4,%xmm2
subq $1,%rax
jnz L$oop_row_2
pxor %xmm3,%xmm2
psrlq $1,%xmm3
pxor %xmm3,%xmm2
psrlq $1,%xmm3
pxor %xmm3,%xmm2
psrlq $5,%xmm3
pxor %xmm3,%xmm2
pxor %xmm3,%xmm3
movq $6,%rax
L$oop_row_3:
movdqa (%rsi),%xmm4
leaq 16(%rsi),%rsi
movdqa %xmm2,%xmm6
.byte 102,15,58,15,243,1
movdqa %xmm6,%xmm3
psrldq $1,%xmm2
movdqa %xmm4,%xmm5
.byte 102,15,56,0,224
.byte 102,15,56,0,233
pxor %xmm5,%xmm2
movdqa %xmm4,%xmm5
psllq $60,%xmm5
movdqa %xmm5,%xmm6
pslldq $8,%xmm6
pxor %xmm6,%xmm3
psrldq $8,%xmm5
pxor %xmm5,%xmm2
psrlq $4,%xmm4
pxor %xmm4,%xmm2
subq $1,%rax
jnz L$oop_row_3
pxor %xmm3,%xmm2
psrlq $1,%xmm3
pxor %xmm3,%xmm2
psrlq $1,%xmm3
pxor %xmm3,%xmm2
psrlq $5,%xmm3
pxor %xmm3,%xmm2
pxor %xmm3,%xmm3
.byte 102,65,15,56,0,210
movdqu %xmm2,(%rdi)
pxor %xmm0,%xmm0
pxor %xmm1,%xmm1
pxor %xmm2,%xmm2
pxor %xmm3,%xmm3
pxor %xmm4,%xmm4
pxor %xmm5,%xmm5
pxor %xmm6,%xmm6
.byte 0xf3,0xc3
L$gmult_seh_end:
.globl _gcm_ghash_ssse3
.private_extern _gcm_ghash_ssse3
.p2align 4
_gcm_ghash_ssse3:
L$ghash_seh_begin:
movdqu (%rdi),%xmm0
movdqa L$reverse_bytes(%rip),%xmm10
movdqa L$low4_mask(%rip),%xmm11
andq $-16,%rcx
.byte 102,65,15,56,0,194
pxor %xmm3,%xmm3
L$oop_ghash:
movdqu (%rdx),%xmm1
.byte 102,65,15,56,0,202
pxor %xmm1,%xmm0
movdqa %xmm11,%xmm1
pandn %xmm0,%xmm1
psrld $4,%xmm1
pand %xmm11,%xmm0
pxor %xmm2,%xmm2
movq $5,%rax
L$oop_row_4:
movdqa (%rsi),%xmm4
leaq 16(%rsi),%rsi
movdqa %xmm2,%xmm6
.byte 102,15,58,15,243,1
movdqa %xmm6,%xmm3
psrldq $1,%xmm2
movdqa %xmm4,%xmm5
.byte 102,15,56,0,224
.byte 102,15,56,0,233
pxor %xmm5,%xmm2
movdqa %xmm4,%xmm5
psllq $60,%xmm5
movdqa %xmm5,%xmm6
pslldq $8,%xmm6
pxor %xmm6,%xmm3
psrldq $8,%xmm5
pxor %xmm5,%xmm2
psrlq $4,%xmm4
pxor %xmm4,%xmm2
subq $1,%rax
jnz L$oop_row_4
pxor %xmm3,%xmm2
psrlq $1,%xmm3
pxor %xmm3,%xmm2
psrlq $1,%xmm3
pxor %xmm3,%xmm2
psrlq $5,%xmm3
pxor %xmm3,%xmm2
pxor %xmm3,%xmm3
movq $5,%rax
L$oop_row_5:
movdqa (%rsi),%xmm4
leaq 16(%rsi),%rsi
movdqa %xmm2,%xmm6
.byte 102,15,58,15,243,1
movdqa %xmm6,%xmm3
psrldq $1,%xmm2
movdqa %xmm4,%xmm5
.byte 102,15,56,0,224
.byte 102,15,56,0,233
pxor %xmm5,%xmm2
movdqa %xmm4,%xmm5
psllq $60,%xmm5
movdqa %xmm5,%xmm6
pslldq $8,%xmm6
pxor %xmm6,%xmm3
psrldq $8,%xmm5
pxor %xmm5,%xmm2
psrlq $4,%xmm4
pxor %xmm4,%xmm2
subq $1,%rax
jnz L$oop_row_5
pxor %xmm3,%xmm2
psrlq $1,%xmm3
pxor %xmm3,%xmm2
psrlq $1,%xmm3
pxor %xmm3,%xmm2
psrlq $5,%xmm3
pxor %xmm3,%xmm2
pxor %xmm3,%xmm3
movq $6,%rax
L$oop_row_6:
movdqa (%rsi),%xmm4
leaq 16(%rsi),%rsi
movdqa %xmm2,%xmm6
.byte 102,15,58,15,243,1
movdqa %xmm6,%xmm3
psrldq $1,%xmm2
movdqa %xmm4,%xmm5
.byte 102,15,56,0,224
.byte 102,15,56,0,233
pxor %xmm5,%xmm2
movdqa %xmm4,%xmm5
psllq $60,%xmm5
movdqa %xmm5,%xmm6
pslldq $8,%xmm6
pxor %xmm6,%xmm3
psrldq $8,%xmm5
pxor %xmm5,%xmm2
psrlq $4,%xmm4
pxor %xmm4,%xmm2
subq $1,%rax
jnz L$oop_row_6
pxor %xmm3,%xmm2
psrlq $1,%xmm3
pxor %xmm3,%xmm2
psrlq $1,%xmm3
pxor %xmm3,%xmm2
psrlq $5,%xmm3
pxor %xmm3,%xmm2
pxor %xmm3,%xmm3
movdqa %xmm2,%xmm0
leaq -256(%rsi),%rsi
leaq 16(%rdx),%rdx
subq $16,%rcx
jnz L$oop_ghash
.byte 102,65,15,56,0,194
movdqu %xmm0,(%rdi)
pxor %xmm0,%xmm0
pxor %xmm1,%xmm1
pxor %xmm2,%xmm2
pxor %xmm3,%xmm3
pxor %xmm4,%xmm4
pxor %xmm5,%xmm5
pxor %xmm6,%xmm6
.byte 0xf3,0xc3
L$ghash_seh_end:
.p2align 4
L$reverse_bytes:
.byte 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0
L$low4_mask:
.quad 0x0f0f0f0f0f0f0f0f, 0x0f0f0f0f0f0f0f0f
#endif

View File

@@ -709,6 +709,7 @@ L$ghash_epilogue:
.p2align 4
_gcm_init_clmul:
L$_init_clmul:
movdqu (%rsi),%xmm2
pshufd $78,%xmm2,%xmm2
@@ -861,11 +862,13 @@ L$_init_clmul:
movdqu %xmm4,80(%rdi)
.byte 0xf3,0xc3
.globl _gcm_gmult_clmul
.private_extern _gcm_gmult_clmul
.p2align 4
_gcm_gmult_clmul:
L$_gmult_clmul:
movdqu (%rdi),%xmm0
movdqa L$bswap_mask(%rip),%xmm5
@@ -913,11 +916,13 @@ L$_gmult_clmul:
movdqu %xmm0,(%rdi)
.byte 0xf3,0xc3
.globl _gcm_ghash_clmul
.private_extern _gcm_ghash_clmul
.p2align 5
_gcm_ghash_clmul:
L$_ghash_clmul:
movdqa L$bswap_mask(%rip),%xmm10
@@ -1298,11 +1303,13 @@ L$done:
movdqu %xmm0,(%rdi)
.byte 0xf3,0xc3
.globl _gcm_init_avx
.private_extern _gcm_init_avx
.p2align 5
_gcm_init_avx:
vzeroupper
vmovdqu (%rsi),%xmm2
@@ -1406,18 +1413,22 @@ L$init_start_avx:
vzeroupper
.byte 0xf3,0xc3
.globl _gcm_gmult_avx
.private_extern _gcm_gmult_avx
.p2align 5
_gcm_gmult_avx:
jmp L$_gmult_clmul
.globl _gcm_ghash_avx
.private_extern _gcm_ghash_avx
.p2align 5
_gcm_ghash_avx:
vzeroupper
vmovdqu (%rdi),%xmm10
@@ -1790,6 +1801,7 @@ L$tail_no_xor_avx:
vzeroupper
.byte 0xf3,0xc3
.p2align 6
L$bswap_mask:
.byte 15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0

View File

@@ -23,12 +23,10 @@
_CRYPTO_rdrand:
xorq %rax,%rax
.byte 0x48, 0x0f, 0xc7, 0xf1
.byte 72,15,199,242
adcq %rax,%rax
movq %rcx,0(%rdi)
movq %rdx,0(%rdi)
.byte 0xf3,0xc3
@@ -36,6 +34,7 @@ _CRYPTO_rdrand:
.globl _CRYPTO_rdrand_multiple8_buf
.private_extern _CRYPTO_rdrand_multiple8_buf
@@ -46,9 +45,7 @@ _CRYPTO_rdrand_multiple8_buf:
jz L$out
movq $8,%rdx
L$loop:
.byte 0x48, 0x0f, 0xc7, 0xf1
.byte 72,15,199,241
jnc L$err
movq %rcx,0(%rdi)
addq %rdx,%rdi
@@ -61,4 +58,5 @@ L$err:
xorq %rax,%rax
.byte 0xf3,0xc3
#endif

View File

@@ -1228,6 +1228,7 @@ L$mul_1024_epilogue:
.p2align 5
_rsaz_1024_red2norm_avx2:
subq $-128,%rsi
xorq %rax,%rax
movq -128(%rsi),%r8
@@ -1421,11 +1422,13 @@ _rsaz_1024_red2norm_avx2:
.byte 0xf3,0xc3
.globl _rsaz_1024_norm2red_avx2
.private_extern _rsaz_1024_norm2red_avx2
.p2align 5
_rsaz_1024_norm2red_avx2:
subq $-128,%rdi
movq (%rsi),%r8
movl $0x1fffffff,%eax
@@ -1579,11 +1582,13 @@ _rsaz_1024_norm2red_avx2:
movq %r8,184(%rdi)
.byte 0xf3,0xc3
.globl _rsaz_1024_scatter5_avx2
.private_extern _rsaz_1024_scatter5_avx2
.p2align 5
_rsaz_1024_scatter5_avx2:
vzeroupper
vmovdqu L$scatter_permd(%rip),%ymm5
shll $4,%edx
@@ -1605,6 +1610,7 @@ L$oop_scatter_1024:
.byte 0xf3,0xc3
.globl _rsaz_1024_gather5_avx2
.private_extern _rsaz_1024_gather5_avx2
@@ -1727,24 +1733,6 @@ L$oop_gather_1024:
L$SEH_end_rsaz_1024_gather5:
.globl _rsaz_avx2_eligible
.private_extern _rsaz_avx2_eligible
.p2align 5
_rsaz_avx2_eligible:
leaq _OPENSSL_ia32cap_P(%rip),%rax
movl 8(%rax),%eax
movl $524544,%ecx
movl $0,%edx
andl %eax,%ecx
cmpl $524544,%ecx
cmovel %edx,%eax
andl $32,%eax
shrl $5,%eax
.byte 0xf3,0xc3
.p2align 6
L$and_mask:
.quad 0x1fffffff,0x1fffffff,0x1fffffff,0x1fffffff

View File

@@ -121,6 +121,181 @@ L$enc_entry:
.p2align 4
_vpaes_encrypt_core_2x:
movq %rdx,%r9
movq $16,%r11
movl 240(%rdx),%eax
movdqa %xmm9,%xmm1
movdqa %xmm9,%xmm7
movdqa L$k_ipt(%rip),%xmm2
movdqa %xmm2,%xmm8
pandn %xmm0,%xmm1
pandn %xmm6,%xmm7
movdqu (%r9),%xmm5
psrld $4,%xmm1
psrld $4,%xmm7
pand %xmm9,%xmm0
pand %xmm9,%xmm6
.byte 102,15,56,0,208
.byte 102,68,15,56,0,198
movdqa L$k_ipt+16(%rip),%xmm0
movdqa %xmm0,%xmm6
.byte 102,15,56,0,193
.byte 102,15,56,0,247
pxor %xmm5,%xmm2
pxor %xmm5,%xmm8
addq $16,%r9
pxor %xmm2,%xmm0
pxor %xmm8,%xmm6
leaq L$k_mc_backward(%rip),%r10
jmp L$enc2x_entry
.p2align 4
L$enc2x_loop:
movdqa L$k_sb1(%rip),%xmm4
movdqa L$k_sb1+16(%rip),%xmm0
movdqa %xmm4,%xmm12
movdqa %xmm0,%xmm6
.byte 102,15,56,0,226
.byte 102,69,15,56,0,224
.byte 102,15,56,0,195
.byte 102,65,15,56,0,243
pxor %xmm5,%xmm4
pxor %xmm5,%xmm12
movdqa L$k_sb2(%rip),%xmm5
movdqa %xmm5,%xmm13
pxor %xmm4,%xmm0
pxor %xmm12,%xmm6
movdqa -64(%r11,%r10,1),%xmm1
.byte 102,15,56,0,234
.byte 102,69,15,56,0,232
movdqa (%r11,%r10,1),%xmm4
movdqa L$k_sb2+16(%rip),%xmm2
movdqa %xmm2,%xmm8
.byte 102,15,56,0,211
.byte 102,69,15,56,0,195
movdqa %xmm0,%xmm3
movdqa %xmm6,%xmm11
pxor %xmm5,%xmm2
pxor %xmm13,%xmm8
.byte 102,15,56,0,193
.byte 102,15,56,0,241
addq $16,%r9
pxor %xmm2,%xmm0
pxor %xmm8,%xmm6
.byte 102,15,56,0,220
.byte 102,68,15,56,0,220
addq $16,%r11
pxor %xmm0,%xmm3
pxor %xmm6,%xmm11
.byte 102,15,56,0,193
.byte 102,15,56,0,241
andq $0x30,%r11
subq $1,%rax
pxor %xmm3,%xmm0
pxor %xmm11,%xmm6
L$enc2x_entry:
movdqa %xmm9,%xmm1
movdqa %xmm9,%xmm7
movdqa L$k_inv+16(%rip),%xmm5
movdqa %xmm5,%xmm13
pandn %xmm0,%xmm1
pandn %xmm6,%xmm7
psrld $4,%xmm1
psrld $4,%xmm7
pand %xmm9,%xmm0
pand %xmm9,%xmm6
.byte 102,15,56,0,232
.byte 102,68,15,56,0,238
movdqa %xmm10,%xmm3
movdqa %xmm10,%xmm11
pxor %xmm1,%xmm0
pxor %xmm7,%xmm6
.byte 102,15,56,0,217
.byte 102,68,15,56,0,223
movdqa %xmm10,%xmm4
movdqa %xmm10,%xmm12
pxor %xmm5,%xmm3
pxor %xmm13,%xmm11
.byte 102,15,56,0,224
.byte 102,68,15,56,0,230
movdqa %xmm10,%xmm2
movdqa %xmm10,%xmm8
pxor %xmm5,%xmm4
pxor %xmm13,%xmm12
.byte 102,15,56,0,211
.byte 102,69,15,56,0,195
movdqa %xmm10,%xmm3
movdqa %xmm10,%xmm11
pxor %xmm0,%xmm2
pxor %xmm6,%xmm8
.byte 102,15,56,0,220
.byte 102,69,15,56,0,220
movdqu (%r9),%xmm5
pxor %xmm1,%xmm3
pxor %xmm7,%xmm11
jnz L$enc2x_loop
movdqa -96(%r10),%xmm4
movdqa -80(%r10),%xmm0
movdqa %xmm4,%xmm12
movdqa %xmm0,%xmm6
.byte 102,15,56,0,226
.byte 102,69,15,56,0,224
pxor %xmm5,%xmm4
pxor %xmm5,%xmm12
.byte 102,15,56,0,195
.byte 102,65,15,56,0,243
movdqa 64(%r11,%r10,1),%xmm1
pxor %xmm4,%xmm0
pxor %xmm12,%xmm6
.byte 102,15,56,0,193
.byte 102,15,56,0,241
.byte 0xf3,0xc3
.p2align 4
_vpaes_decrypt_core:
@@ -637,6 +812,13 @@ L$schedule_mangle_both:
.p2align 4
_vpaes_set_encrypt_key:
#ifndef NDEBUG
#ifndef BORINGSSL_FIPS
movb $1,_BORINGSSL_function_hit+5(%rip)
#endif
#endif
movl %esi,%eax
shrl $5,%eax
addl $5,%eax
@@ -680,6 +862,12 @@ _vpaes_set_decrypt_key:
.p2align 4
_vpaes_encrypt:
#ifndef NDEBUG
#ifndef BORINGSSL_FIPS
movb $1,_BORINGSSL_function_hit+4(%rip)
#endif
#endif
movdqu (%rdi),%xmm0
call _vpaes_preheat
call _vpaes_encrypt_core
@@ -744,6 +932,69 @@ L$cbc_abort:
.byte 0xf3,0xc3
.globl _vpaes_ctr32_encrypt_blocks
.private_extern _vpaes_ctr32_encrypt_blocks
.p2align 4
_vpaes_ctr32_encrypt_blocks:
xchgq %rcx,%rdx
testq %rcx,%rcx
jz L$ctr32_abort
movdqu (%r8),%xmm0
movdqa L$ctr_add_one(%rip),%xmm8
subq %rdi,%rsi
call _vpaes_preheat
movdqa %xmm0,%xmm6
pshufb L$rev_ctr(%rip),%xmm6
testq $1,%rcx
jz L$ctr32_prep_loop
movdqu (%rdi),%xmm7
call _vpaes_encrypt_core
pxor %xmm7,%xmm0
paddd %xmm8,%xmm6
movdqu %xmm0,(%rsi,%rdi,1)
subq $1,%rcx
leaq 16(%rdi),%rdi
jz L$ctr32_done
L$ctr32_prep_loop:
movdqa %xmm6,%xmm14
movdqa %xmm6,%xmm15
paddd %xmm8,%xmm15
L$ctr32_loop:
movdqa L$rev_ctr(%rip),%xmm1
movdqa %xmm14,%xmm0
movdqa %xmm15,%xmm6
.byte 102,15,56,0,193
.byte 102,15,56,0,241
call _vpaes_encrypt_core_2x
movdqu (%rdi),%xmm1
movdqu 16(%rdi),%xmm2
movdqa L$ctr_add_two(%rip),%xmm3
pxor %xmm1,%xmm0
pxor %xmm2,%xmm6
paddd %xmm3,%xmm14
paddd %xmm3,%xmm15
movdqu %xmm0,(%rsi,%rdi,1)
movdqu %xmm6,16(%rsi,%rdi,1)
subq $2,%rcx
leaq 32(%rdi),%rdi
jnz L$ctr32_loop
L$ctr32_done:
L$ctr32_abort:
.byte 0xf3,0xc3
@@ -866,6 +1117,17 @@ L$k_dsbe:
L$k_dsbo:
.quad 0x1387EA537EF94000, 0xC7AA6DB9D4943E2D
.quad 0x12D7560F93441D00, 0xCA4B8159D8C58E9C
L$rev_ctr:
.quad 0x0706050403020100, 0x0c0d0e0f0b0a0908
L$ctr_add_one:
.quad 0x0000000000000000, 0x0000000100000000
L$ctr_add_two:
.quad 0x0000000000000000, 0x0000000200000000
.byte 86,101,99,116,111,114,32,80,101,114,109,117,116,97,116,105,111,110,32,65,69,83,32,102,111,114,32,120,56,54,95,54,52,47,83,83,83,69,51,44,32,77,105,107,101,32,72,97,109,98,117,114,103,32,40,83,116,97,110,102,111,114,100,32,85,110,105,118,101,114,115,105,116,121,41,0
.p2align 6

View File

@@ -565,6 +565,7 @@ L$mul4x_epilogue:
.p2align 5
mul4x_internal:
shlq $5,%r9
movd 8(%rax),%xmm5
leaq L$inc(%rip),%rax
@@ -1087,6 +1088,7 @@ L$inner4x:
movq 24(%rbp),%r15
jmp L$sqr4x_sub_entry
.globl _bn_power5
.private_extern _bn_power5
@@ -1302,6 +1304,7 @@ __bn_sqr8x_internal:
leaq 32(%r10),%rbp
@@ -2007,8 +2010,10 @@ L$8x_no_tail:
.byte 0xf3,0xc3
.p2align 5
__bn_post4x_internal:
movq 0(%rbp),%r12
leaq (%rdi,%r9,1),%rbx
movq %r9,%rcx
@@ -2060,11 +2065,13 @@ L$sqr4x_sub_entry:
negq %r9
.byte 0xf3,0xc3
.globl _bn_from_montgomery
.private_extern _bn_from_montgomery
.p2align 5
_bn_from_montgomery:
testl $7,%r9d
jz bn_from_mont8x
xorl %eax,%eax
@@ -2072,6 +2079,7 @@ _bn_from_montgomery:
.p2align 5
bn_from_mont8x:
@@ -2353,6 +2361,7 @@ L$mulx4x_epilogue:
.p2align 5
mulx4x_internal:
movq %r9,8(%rsp)
movq %r9,%r10
negq %r9
@@ -2773,6 +2782,7 @@ L$mulx4x_inner:
jmp L$sqrx4x_sub_entry
.p2align 5
bn_powerx5:
@@ -3529,7 +3539,9 @@ L$sqrx8x_no_tail:
.p2align 5
__bn_postx4x_internal:
movq 0(%rbp),%r12
movq %rcx,%r10
movq %rcx,%r9
@@ -3578,11 +3590,13 @@ L$sqrx4x_sub_entry:
.byte 0xf3,0xc3
.globl _bn_scatter5
.private_extern _bn_scatter5
.p2align 4
_bn_scatter5:
cmpl $0,%esi
jz L$scatter_epilogue
leaq (%rdx,%rcx,8),%rdx
@@ -3597,14 +3611,17 @@ L$scatter_epilogue:
.byte 0xf3,0xc3
.globl _bn_gather5
.private_extern _bn_gather5
.p2align 5
_bn_gather5:
L$SEH_begin_bn_gather5:
.byte 0x4c,0x8d,0x14,0x24
.byte 0x48,0x81,0xec,0x08,0x01,0x00,0x00
leaq L$inc(%rip),%rax
andq $-16,%rsp
@@ -3758,9 +3775,11 @@ L$gather:
jnz L$gather
leaq (%r10),%rsp
.byte 0xf3,0xc3
L$SEH_end_bn_gather5:
.p2align 6
L$inc:
.long 0,0, 1,1

View File

@@ -25,7 +25,7 @@
.private_extern _abi_test_trampoline
.p2align 4
_abi_test_trampoline:
L$abi_test_trampoline_begin:
L$abi_test_trampoline_seh_begin:
@@ -38,27 +38,27 @@ L$abi_test_trampoline_begin:
subq $120,%rsp
L$abi_test_trampoline_prolog_alloc:
L$abi_test_trampoline_seh_prolog_alloc:
movq %r8,48(%rsp)
movq %rbx,64(%rsp)
L$abi_test_trampoline_prolog_rbx:
L$abi_test_trampoline_seh_prolog_rbx:
movq %rbp,72(%rsp)
L$abi_test_trampoline_prolog_rbp:
L$abi_test_trampoline_seh_prolog_rbp:
movq %r12,80(%rsp)
L$abi_test_trampoline_prolog_r12:
L$abi_test_trampoline_seh_prolog_r12:
movq %r13,88(%rsp)
L$abi_test_trampoline_prolog_r13:
L$abi_test_trampoline_seh_prolog_r13:
movq %r14,96(%rsp)
L$abi_test_trampoline_prolog_r14:
L$abi_test_trampoline_seh_prolog_r14:
movq %r15,104(%rsp)
L$abi_test_trampoline_prolog_r15:
L$abi_test_trampoline_prolog_end:
L$abi_test_trampoline_seh_prolog_r15:
L$abi_test_trampoline_seh_prolog_end:
movq 0(%rsi),%rbx
movq 8(%rsi),%rbp
movq 16(%rsi),%r12
@@ -182,7 +182,7 @@ L$call_done:
.byte 0xf3,0xc3
L$abi_test_trampoline_end:
L$abi_test_trampoline_seh_end:
.globl _abi_test_clobber_rax
@@ -441,11 +441,18 @@ _abi_test_clobber_xmm15:
.p2align 4
_abi_test_bad_unwind_wrong_register:
L$abi_test_bad_unwind_wrong_register_seh_begin:
pushq %r12
L$abi_test_bad_unwind_wrong_register_seh_push_r13:
nop
popq %r12
.byte 0xf3,0xc3
L$abi_test_bad_unwind_wrong_register_seh_end:
@@ -458,20 +465,24 @@ _abi_test_bad_unwind_wrong_register:
.p2align 4
_abi_test_bad_unwind_temporary:
L$abi_test_bad_unwind_temporary_seh_begin:
pushq %r12
L$abi_test_bad_unwind_temporary_seh_push_r12:
movq %r12,%rax
incq %rax
movq %rax,(%rsp)
incq %r12
movq %r12,(%rsp)
decq %r12
movq %r12,(%rsp)
popq %r12
.byte 0xf3,0xc3
L$abi_test_bad_unwind_temporary_seh_end:
@@ -485,7 +496,7 @@ _abi_test_get_and_clear_direction_flag:
pushfq
popq %rax
andq $0x400,%rax
shlq $10,%rax
shrq $10,%rax
cld
.byte 0xf3,0xc3

File diff suppressed because it is too large Load Diff