libbb/sha1: in unrolled x86-64 code, pass initial W[] in registers, not on stack
This can be faster on some CPUs. On Skylake, evidently load latency from L1 (or store-to-load forwarding in LSU) is fast enough to completely hide memory reference latencies here. function old new delta sha1_process_block64 3495 3514 +19 Signed-off-by: Denys Vlasenko <vda.linux@googlemail.com>
This commit is contained in:
parent
99e22d230d
commit
205042c07a
@ -1,7 +1,7 @@
|
|||||||
### Generated by hash_md5_sha_x86-64.S.sh ###
|
### Generated by hash_md5_sha_x86-64.S.sh ###
|
||||||
|
|
||||||
#if CONFIG_SHA1_SMALL == 0 && defined(__GNUC__) && defined(__x86_64__)
|
#if CONFIG_SHA1_SMALL == 0 && defined(__GNUC__) && defined(__x86_64__)
|
||||||
.section .text.sha1_process_block64,"ax",@progbits
|
.section .text.sha1_process_block64, "ax", @progbits
|
||||||
.globl sha1_process_block64
|
.globl sha1_process_block64
|
||||||
.hidden sha1_process_block64
|
.hidden sha1_process_block64
|
||||||
.type sha1_process_block64, @function
|
.type sha1_process_block64, @function
|
||||||
@ -10,7 +10,7 @@
|
|||||||
sha1_process_block64:
|
sha1_process_block64:
|
||||||
pushq %rbp # 1 byte insn
|
pushq %rbp # 1 byte insn
|
||||||
pushq %rbx # 1 byte insn
|
pushq %rbx # 1 byte insn
|
||||||
pushq %r15 # 2 byte insn
|
# pushq %r15 # 2 byte insn
|
||||||
pushq %r14 # 2 byte insn
|
pushq %r14 # 2 byte insn
|
||||||
pushq %r13 # 2 byte insn
|
pushq %r13 # 2 byte insn
|
||||||
pushq %r12 # 2 byte insn
|
pushq %r12 # 2 byte insn
|
||||||
@ -19,7 +19,8 @@ sha1_process_block64:
|
|||||||
#Register and stack use:
|
#Register and stack use:
|
||||||
# eax..edx: a..d
|
# eax..edx: a..d
|
||||||
# ebp: e
|
# ebp: e
|
||||||
# esi,edi: temps
|
# esi,edi,r8..r14: temps
|
||||||
|
# r15: unused
|
||||||
# xmm0..xmm3: W[]
|
# xmm0..xmm3: W[]
|
||||||
# xmm4,xmm5: temps
|
# xmm4,xmm5: temps
|
||||||
# xmm6: current round constant
|
# xmm6: current round constant
|
||||||
@ -33,147 +34,148 @@ sha1_process_block64:
|
|||||||
|
|
||||||
movaps rconst0x5A827999(%rip), %xmm6
|
movaps rconst0x5A827999(%rip), %xmm6
|
||||||
|
|
||||||
# For round 1, steps 0 and 8..15, we pass W[0,8..15] in esi,r8..r15
|
# Load W[] to xmm registers, byteswapping on the fly.
|
||||||
# instead of spilling them to stack.
|
#
|
||||||
# (We lose parallelized addition of RCONST, but LEA
|
# For iterations 0..15, we pass W[] in rsi,r8..r14
|
||||||
# can do two additions at once, so...)
|
# for use in RD1A's instead of spilling them to stack.
|
||||||
|
# We lose parallelized addition of RCONST, but LEA
|
||||||
|
# can do two additions at once, so it's probably a wash.
|
||||||
|
# (We use rsi instead of rN because this makes two
|
||||||
|
# LEAs in two first RD1A's shorter by one byte).
|
||||||
movq 4*0(%rdi), %rsi
|
movq 4*0(%rdi), %rsi
|
||||||
movq 4*2(%rdi), %r10
|
movq 4*2(%rdi), %r8
|
||||||
bswapq %rsi
|
bswapq %rsi
|
||||||
bswapq %r10
|
bswapq %r8
|
||||||
rolq $32, %rsi # rsi = W[1]:W[0]
|
rolq $32, %rsi # rsi = W[1]:W[0]
|
||||||
rolq $32, %r10
|
rolq $32, %r8 # r8 = W[3]:W[2]
|
||||||
movq %rsi, %xmm0
|
movq %rsi, %xmm0
|
||||||
movq %r10, %xmm4
|
movq %r8, %xmm4
|
||||||
punpcklqdq %xmm4, %xmm0 # xmm0 = r10:rsi = (W[0],W[1],W[2],W[3])
|
punpcklqdq %xmm4, %xmm0 # xmm0 = r8:rsi = (W[0],W[1],W[2],W[3])
|
||||||
movaps %xmm0, %xmm4
|
# movaps %xmm0, %xmm4 # add RCONST, spill to stack
|
||||||
paddd %xmm6, %xmm4
|
# paddd %xmm6, %xmm4
|
||||||
movups %xmm4, -64+4*0(%rsp)
|
# movups %xmm4, -64+16*0(%rsp)
|
||||||
|
|
||||||
movq 4*4(%rdi), %r8
|
movq 4*4(%rdi), %r9
|
||||||
movq 4*6(%rdi), %r10
|
movq 4*6(%rdi), %r10
|
||||||
bswapq %r8
|
bswapq %r9
|
||||||
bswapq %r10
|
bswapq %r10
|
||||||
rolq $32, %r8
|
rolq $32, %r9 # r9 = W[5]:W[4]
|
||||||
rolq $32, %r10
|
rolq $32, %r10 # r10 = W[7]:W[6]
|
||||||
movq %r8, %xmm1
|
movq %r9, %xmm1
|
||||||
movq %r10, %xmm4
|
movq %r10, %xmm4
|
||||||
punpcklqdq %xmm4, %xmm1 # xmm1 = r10:r8 = (W[4],W[5],W[6],W[7])
|
punpcklqdq %xmm4, %xmm1 # xmm1 = r10:r9 = (W[4],W[5],W[6],W[7])
|
||||||
movaps %xmm1, %xmm4
|
|
||||||
paddd %xmm6, %xmm4
|
|
||||||
movups %xmm4, -64+4*4(%rsp)
|
|
||||||
|
|
||||||
movq 4*8(%rdi), %r8
|
movq 4*8(%rdi), %r11
|
||||||
movq 4*10(%rdi), %r10
|
movq 4*10(%rdi), %r12
|
||||||
bswapq %r8
|
bswapq %r11
|
||||||
bswapq %r10
|
|
||||||
movl %r8d, %r9d # r9d = W[9]
|
|
||||||
rolq $32, %r8 # r8 = W[9]:W[8]
|
|
||||||
movl %r10d, %r11d # r11d = W[11]
|
|
||||||
rolq $32, %r10 # r10 = W[11]:W[10]
|
|
||||||
movq %r8, %xmm2
|
|
||||||
movq %r10, %xmm4
|
|
||||||
punpcklqdq %xmm4, %xmm2 # xmm2 = r10:r8 = (W[8],W[9],W[10],W[11])
|
|
||||||
|
|
||||||
movq 4*12(%rdi), %r12
|
|
||||||
movq 4*14(%rdi), %r14
|
|
||||||
bswapq %r12
|
bswapq %r12
|
||||||
|
rolq $32, %r11 # r11 = W[9]:W[8]
|
||||||
|
rolq $32, %r12 # r12 = W[11]:W[10]
|
||||||
|
movq %r11, %xmm2
|
||||||
|
movq %r12, %xmm4
|
||||||
|
punpcklqdq %xmm4, %xmm2 # xmm2 = r12:r11 = (W[8],W[9],W[10],W[11])
|
||||||
|
|
||||||
|
movq 4*12(%rdi), %r13
|
||||||
|
movq 4*14(%rdi), %r14
|
||||||
|
bswapq %r13
|
||||||
bswapq %r14
|
bswapq %r14
|
||||||
movl %r12d, %r13d # r13d = W[13]
|
rolq $32, %r13 # r13 = W[13]:W[12]
|
||||||
rolq $32, %r12 # r12 = W[13]:W[12]
|
|
||||||
movl %r14d, %r15d # r15d = W[15]
|
|
||||||
rolq $32, %r14 # r14 = W[15]:W[14]
|
rolq $32, %r14 # r14 = W[15]:W[14]
|
||||||
movq %r12, %xmm3
|
movq %r13, %xmm3
|
||||||
movq %r14, %xmm4
|
movq %r14, %xmm4
|
||||||
punpcklqdq %xmm4, %xmm3 # xmm3 = r14:r12 = (W[12],W[13],W[14],W[15])
|
punpcklqdq %xmm4, %xmm3 # xmm3 = r14:r13 = (W[12],W[13],W[14],W[15])
|
||||||
|
|
||||||
# 0
|
# 0
|
||||||
leal 0x5A827999(%rbp,%rsi), %ebp # e += RCONST + W[n]
|
leal 0x5A827999(%rbp,%rsi), %ebp # e += RCONST + W[n]
|
||||||
|
shrq $32, %rsi
|
||||||
movl %ecx, %edi # c
|
movl %ecx, %edi # c
|
||||||
xorl %edx, %edi # ^d
|
xorl %edx, %edi # ^d
|
||||||
andl %ebx, %edi # &b
|
andl %ebx, %edi # &b
|
||||||
xorl %edx, %edi # (((c ^ d) & b) ^ d)
|
xorl %edx, %edi # (((c ^ d) & b) ^ d)
|
||||||
addl %edi, %ebp # e += (((c ^ d) & b) ^ d)
|
addl %edi, %ebp # e += (((c ^ d) & b) ^ d)
|
||||||
movl %eax, %esi #
|
movl %eax, %edi #
|
||||||
roll $5, %esi # rotl32(a,5)
|
roll $5, %edi # rotl32(a,5)
|
||||||
addl %esi, %ebp # e += rotl32(a,5)
|
addl %edi, %ebp # e += rotl32(a,5)
|
||||||
rorl $2, %ebx # b = rotl32(b,30)
|
rorl $2, %ebx # b = rotl32(b,30)
|
||||||
# 1
|
# 1
|
||||||
addl -64+4*1(%rsp), %edx # e += RCONST + W[n]
|
leal 0x5A827999(%rdx,%rsi), %edx # e += RCONST + W[n]
|
||||||
movl %ebx, %edi # c
|
movl %ebx, %edi # c
|
||||||
xorl %ecx, %edi # ^d
|
xorl %ecx, %edi # ^d
|
||||||
andl %eax, %edi # &b
|
andl %eax, %edi # &b
|
||||||
xorl %ecx, %edi # (((c ^ d) & b) ^ d)
|
xorl %ecx, %edi # (((c ^ d) & b) ^ d)
|
||||||
addl %edi, %edx # e += (((c ^ d) & b) ^ d)
|
addl %edi, %edx # e += (((c ^ d) & b) ^ d)
|
||||||
movl %ebp, %esi #
|
movl %ebp, %edi #
|
||||||
roll $5, %esi # rotl32(a,5)
|
roll $5, %edi # rotl32(a,5)
|
||||||
addl %esi, %edx # e += rotl32(a,5)
|
addl %edi, %edx # e += rotl32(a,5)
|
||||||
rorl $2, %eax # b = rotl32(b,30)
|
rorl $2, %eax # b = rotl32(b,30)
|
||||||
# 2
|
# 2
|
||||||
addl -64+4*2(%rsp), %ecx # e += RCONST + W[n]
|
leal 0x5A827999(%rcx,%r8), %ecx # e += RCONST + W[n]
|
||||||
|
shrq $32, %r8
|
||||||
movl %eax, %edi # c
|
movl %eax, %edi # c
|
||||||
xorl %ebx, %edi # ^d
|
xorl %ebx, %edi # ^d
|
||||||
andl %ebp, %edi # &b
|
andl %ebp, %edi # &b
|
||||||
xorl %ebx, %edi # (((c ^ d) & b) ^ d)
|
xorl %ebx, %edi # (((c ^ d) & b) ^ d)
|
||||||
addl %edi, %ecx # e += (((c ^ d) & b) ^ d)
|
addl %edi, %ecx # e += (((c ^ d) & b) ^ d)
|
||||||
movl %edx, %esi #
|
movl %edx, %edi #
|
||||||
roll $5, %esi # rotl32(a,5)
|
roll $5, %edi # rotl32(a,5)
|
||||||
addl %esi, %ecx # e += rotl32(a,5)
|
addl %edi, %ecx # e += rotl32(a,5)
|
||||||
rorl $2, %ebp # b = rotl32(b,30)
|
rorl $2, %ebp # b = rotl32(b,30)
|
||||||
# 3
|
# 3
|
||||||
addl -64+4*3(%rsp), %ebx # e += RCONST + W[n]
|
leal 0x5A827999(%rbx,%r8), %ebx # e += RCONST + W[n]
|
||||||
movl %ebp, %edi # c
|
movl %ebp, %edi # c
|
||||||
xorl %eax, %edi # ^d
|
xorl %eax, %edi # ^d
|
||||||
andl %edx, %edi # &b
|
andl %edx, %edi # &b
|
||||||
xorl %eax, %edi # (((c ^ d) & b) ^ d)
|
xorl %eax, %edi # (((c ^ d) & b) ^ d)
|
||||||
addl %edi, %ebx # e += (((c ^ d) & b) ^ d)
|
addl %edi, %ebx # e += (((c ^ d) & b) ^ d)
|
||||||
movl %ecx, %esi #
|
movl %ecx, %edi #
|
||||||
roll $5, %esi # rotl32(a,5)
|
roll $5, %edi # rotl32(a,5)
|
||||||
addl %esi, %ebx # e += rotl32(a,5)
|
addl %edi, %ebx # e += rotl32(a,5)
|
||||||
rorl $2, %edx # b = rotl32(b,30)
|
rorl $2, %edx # b = rotl32(b,30)
|
||||||
# 4
|
# 4
|
||||||
addl -64+4*4(%rsp), %eax # e += RCONST + W[n]
|
leal 0x5A827999(%rax,%r9), %eax # e += RCONST + W[n]
|
||||||
|
shrq $32, %r9
|
||||||
movl %edx, %edi # c
|
movl %edx, %edi # c
|
||||||
xorl %ebp, %edi # ^d
|
xorl %ebp, %edi # ^d
|
||||||
andl %ecx, %edi # &b
|
andl %ecx, %edi # &b
|
||||||
xorl %ebp, %edi # (((c ^ d) & b) ^ d)
|
xorl %ebp, %edi # (((c ^ d) & b) ^ d)
|
||||||
addl %edi, %eax # e += (((c ^ d) & b) ^ d)
|
addl %edi, %eax # e += (((c ^ d) & b) ^ d)
|
||||||
movl %ebx, %esi #
|
movl %ebx, %edi #
|
||||||
roll $5, %esi # rotl32(a,5)
|
roll $5, %edi # rotl32(a,5)
|
||||||
addl %esi, %eax # e += rotl32(a,5)
|
addl %edi, %eax # e += rotl32(a,5)
|
||||||
rorl $2, %ecx # b = rotl32(b,30)
|
rorl $2, %ecx # b = rotl32(b,30)
|
||||||
# 5
|
# 5
|
||||||
addl -64+4*5(%rsp), %ebp # e += RCONST + W[n]
|
leal 0x5A827999(%rbp,%r9), %ebp # e += RCONST + W[n]
|
||||||
movl %ecx, %edi # c
|
movl %ecx, %edi # c
|
||||||
xorl %edx, %edi # ^d
|
xorl %edx, %edi # ^d
|
||||||
andl %ebx, %edi # &b
|
andl %ebx, %edi # &b
|
||||||
xorl %edx, %edi # (((c ^ d) & b) ^ d)
|
xorl %edx, %edi # (((c ^ d) & b) ^ d)
|
||||||
addl %edi, %ebp # e += (((c ^ d) & b) ^ d)
|
addl %edi, %ebp # e += (((c ^ d) & b) ^ d)
|
||||||
movl %eax, %esi #
|
movl %eax, %edi #
|
||||||
roll $5, %esi # rotl32(a,5)
|
roll $5, %edi # rotl32(a,5)
|
||||||
addl %esi, %ebp # e += rotl32(a,5)
|
addl %edi, %ebp # e += rotl32(a,5)
|
||||||
rorl $2, %ebx # b = rotl32(b,30)
|
rorl $2, %ebx # b = rotl32(b,30)
|
||||||
# 6
|
# 6
|
||||||
addl -64+4*6(%rsp), %edx # e += RCONST + W[n]
|
leal 0x5A827999(%rdx,%r10), %edx # e += RCONST + W[n]
|
||||||
|
shrq $32, %r10
|
||||||
movl %ebx, %edi # c
|
movl %ebx, %edi # c
|
||||||
xorl %ecx, %edi # ^d
|
xorl %ecx, %edi # ^d
|
||||||
andl %eax, %edi # &b
|
andl %eax, %edi # &b
|
||||||
xorl %ecx, %edi # (((c ^ d) & b) ^ d)
|
xorl %ecx, %edi # (((c ^ d) & b) ^ d)
|
||||||
addl %edi, %edx # e += (((c ^ d) & b) ^ d)
|
addl %edi, %edx # e += (((c ^ d) & b) ^ d)
|
||||||
movl %ebp, %esi #
|
movl %ebp, %edi #
|
||||||
roll $5, %esi # rotl32(a,5)
|
roll $5, %edi # rotl32(a,5)
|
||||||
addl %esi, %edx # e += rotl32(a,5)
|
addl %edi, %edx # e += rotl32(a,5)
|
||||||
rorl $2, %eax # b = rotl32(b,30)
|
rorl $2, %eax # b = rotl32(b,30)
|
||||||
# 7
|
# 7
|
||||||
addl -64+4*7(%rsp), %ecx # e += RCONST + W[n]
|
leal 0x5A827999(%rcx,%r10), %ecx # e += RCONST + W[n]
|
||||||
movl %eax, %edi # c
|
movl %eax, %edi # c
|
||||||
xorl %ebx, %edi # ^d
|
xorl %ebx, %edi # ^d
|
||||||
andl %ebp, %edi # &b
|
andl %ebp, %edi # &b
|
||||||
xorl %ebx, %edi # (((c ^ d) & b) ^ d)
|
xorl %ebx, %edi # (((c ^ d) & b) ^ d)
|
||||||
addl %edi, %ecx # e += (((c ^ d) & b) ^ d)
|
addl %edi, %ecx # e += (((c ^ d) & b) ^ d)
|
||||||
movl %edx, %esi #
|
movl %edx, %edi #
|
||||||
roll $5, %esi # rotl32(a,5)
|
roll $5, %edi # rotl32(a,5)
|
||||||
addl %esi, %ecx # e += rotl32(a,5)
|
addl %edi, %ecx # e += rotl32(a,5)
|
||||||
rorl $2, %ebp # b = rotl32(b,30)
|
rorl $2, %ebp # b = rotl32(b,30)
|
||||||
# PREP %xmm0 %xmm1 %xmm2 %xmm3 -64+16*0(%rsp)
|
# PREP %xmm0 %xmm1 %xmm2 %xmm3 -64+16*0(%rsp)
|
||||||
movaps %xmm3, %xmm4
|
movaps %xmm3, %xmm4
|
||||||
@ -186,9 +188,9 @@ sha1_process_block64:
|
|||||||
# W0 = unrotated (W[0]..W[3]), still needs W[3] fixup
|
# W0 = unrotated (W[0]..W[3]), still needs W[3] fixup
|
||||||
movaps %xmm0, %xmm5
|
movaps %xmm0, %xmm5
|
||||||
xorps %xmm4, %xmm4 # rol(W0,1):
|
xorps %xmm4, %xmm4 # rol(W0,1):
|
||||||
pcmpgtd %xmm0, %xmm4 # ffffffff for elements <0 (ones with msb bit 1)
|
pcmpgtd %xmm0, %xmm4 # ffffffff for elements <0 (ones with msb bit 1)
|
||||||
paddd %xmm0, %xmm0 # shift left by 1
|
paddd %xmm0, %xmm0 # shift left by 1
|
||||||
psubd %xmm4, %xmm0 # add 1 to those who had msb bit 1
|
psubd %xmm4, %xmm0 # add 1 to those who had msb bit 1
|
||||||
# W0 = rotated (W[0]..W[3]), still needs W[3] fixup
|
# W0 = rotated (W[0]..W[3]), still needs W[3] fixup
|
||||||
pslldq $12, %xmm5 # lshift by 12 bytes: T2 = (0,0,0,unrotW[0])
|
pslldq $12, %xmm5 # lshift by 12 bytes: T2 = (0,0,0,unrotW[0])
|
||||||
movaps %xmm5, %xmm4
|
movaps %xmm5, %xmm4
|
||||||
@ -201,48 +203,50 @@ sha1_process_block64:
|
|||||||
paddd %xmm6, %xmm5
|
paddd %xmm6, %xmm5
|
||||||
movups %xmm5, -64+16*0(%rsp)
|
movups %xmm5, -64+16*0(%rsp)
|
||||||
# 8
|
# 8
|
||||||
leal 0x5A827999(%rbx,%r8), %ebx # e += RCONST + W[n]
|
leal 0x5A827999(%rbx,%r11), %ebx # e += RCONST + W[n]
|
||||||
|
shrq $32, %r11
|
||||||
movl %ebp, %edi # c
|
movl %ebp, %edi # c
|
||||||
xorl %eax, %edi # ^d
|
xorl %eax, %edi # ^d
|
||||||
andl %edx, %edi # &b
|
andl %edx, %edi # &b
|
||||||
xorl %eax, %edi # (((c ^ d) & b) ^ d)
|
xorl %eax, %edi # (((c ^ d) & b) ^ d)
|
||||||
addl %edi, %ebx # e += (((c ^ d) & b) ^ d)
|
addl %edi, %ebx # e += (((c ^ d) & b) ^ d)
|
||||||
movl %ecx, %esi #
|
movl %ecx, %edi #
|
||||||
roll $5, %esi # rotl32(a,5)
|
roll $5, %edi # rotl32(a,5)
|
||||||
addl %esi, %ebx # e += rotl32(a,5)
|
addl %edi, %ebx # e += rotl32(a,5)
|
||||||
rorl $2, %edx # b = rotl32(b,30)
|
rorl $2, %edx # b = rotl32(b,30)
|
||||||
# 9
|
# 9
|
||||||
leal 0x5A827999(%rax,%r9), %eax # e += RCONST + W[n]
|
leal 0x5A827999(%rax,%r11), %eax # e += RCONST + W[n]
|
||||||
movl %edx, %edi # c
|
movl %edx, %edi # c
|
||||||
xorl %ebp, %edi # ^d
|
xorl %ebp, %edi # ^d
|
||||||
andl %ecx, %edi # &b
|
andl %ecx, %edi # &b
|
||||||
xorl %ebp, %edi # (((c ^ d) & b) ^ d)
|
xorl %ebp, %edi # (((c ^ d) & b) ^ d)
|
||||||
addl %edi, %eax # e += (((c ^ d) & b) ^ d)
|
addl %edi, %eax # e += (((c ^ d) & b) ^ d)
|
||||||
movl %ebx, %esi #
|
movl %ebx, %edi #
|
||||||
roll $5, %esi # rotl32(a,5)
|
roll $5, %edi # rotl32(a,5)
|
||||||
addl %esi, %eax # e += rotl32(a,5)
|
addl %edi, %eax # e += rotl32(a,5)
|
||||||
rorl $2, %ecx # b = rotl32(b,30)
|
rorl $2, %ecx # b = rotl32(b,30)
|
||||||
# 10
|
# 10
|
||||||
leal 0x5A827999(%rbp,%r10), %ebp # e += RCONST + W[n]
|
leal 0x5A827999(%rbp,%r12), %ebp # e += RCONST + W[n]
|
||||||
|
shrq $32, %r12
|
||||||
movl %ecx, %edi # c
|
movl %ecx, %edi # c
|
||||||
xorl %edx, %edi # ^d
|
xorl %edx, %edi # ^d
|
||||||
andl %ebx, %edi # &b
|
andl %ebx, %edi # &b
|
||||||
xorl %edx, %edi # (((c ^ d) & b) ^ d)
|
xorl %edx, %edi # (((c ^ d) & b) ^ d)
|
||||||
addl %edi, %ebp # e += (((c ^ d) & b) ^ d)
|
addl %edi, %ebp # e += (((c ^ d) & b) ^ d)
|
||||||
movl %eax, %esi #
|
movl %eax, %edi #
|
||||||
roll $5, %esi # rotl32(a,5)
|
roll $5, %edi # rotl32(a,5)
|
||||||
addl %esi, %ebp # e += rotl32(a,5)
|
addl %edi, %ebp # e += rotl32(a,5)
|
||||||
rorl $2, %ebx # b = rotl32(b,30)
|
rorl $2, %ebx # b = rotl32(b,30)
|
||||||
# 11
|
# 11
|
||||||
leal 0x5A827999(%rdx,%r11), %edx # e += RCONST + W[n]
|
leal 0x5A827999(%rdx,%r12), %edx # e += RCONST + W[n]
|
||||||
movl %ebx, %edi # c
|
movl %ebx, %edi # c
|
||||||
xorl %ecx, %edi # ^d
|
xorl %ecx, %edi # ^d
|
||||||
andl %eax, %edi # &b
|
andl %eax, %edi # &b
|
||||||
xorl %ecx, %edi # (((c ^ d) & b) ^ d)
|
xorl %ecx, %edi # (((c ^ d) & b) ^ d)
|
||||||
addl %edi, %edx # e += (((c ^ d) & b) ^ d)
|
addl %edi, %edx # e += (((c ^ d) & b) ^ d)
|
||||||
movl %ebp, %esi #
|
movl %ebp, %edi #
|
||||||
roll $5, %esi # rotl32(a,5)
|
roll $5, %edi # rotl32(a,5)
|
||||||
addl %esi, %edx # e += rotl32(a,5)
|
addl %edi, %edx # e += rotl32(a,5)
|
||||||
rorl $2, %eax # b = rotl32(b,30)
|
rorl $2, %eax # b = rotl32(b,30)
|
||||||
movaps rconst0x6ED9EBA1(%rip), %xmm6
|
movaps rconst0x6ED9EBA1(%rip), %xmm6
|
||||||
# PREP %xmm1 %xmm2 %xmm3 %xmm0 -64+16*1(%rsp)
|
# PREP %xmm1 %xmm2 %xmm3 %xmm0 -64+16*1(%rsp)
|
||||||
@ -256,9 +260,9 @@ sha1_process_block64:
|
|||||||
# W0 = unrotated (W[0]..W[3]), still needs W[3] fixup
|
# W0 = unrotated (W[0]..W[3]), still needs W[3] fixup
|
||||||
movaps %xmm1, %xmm5
|
movaps %xmm1, %xmm5
|
||||||
xorps %xmm4, %xmm4 # rol(W0,1):
|
xorps %xmm4, %xmm4 # rol(W0,1):
|
||||||
pcmpgtd %xmm1, %xmm4 # ffffffff for elements <0 (ones with msb bit 1)
|
pcmpgtd %xmm1, %xmm4 # ffffffff for elements <0 (ones with msb bit 1)
|
||||||
paddd %xmm1, %xmm1 # shift left by 1
|
paddd %xmm1, %xmm1 # shift left by 1
|
||||||
psubd %xmm4, %xmm1 # add 1 to those who had msb bit 1
|
psubd %xmm4, %xmm1 # add 1 to those who had msb bit 1
|
||||||
# W0 = rotated (W[0]..W[3]), still needs W[3] fixup
|
# W0 = rotated (W[0]..W[3]), still needs W[3] fixup
|
||||||
pslldq $12, %xmm5 # lshift by 12 bytes: T2 = (0,0,0,unrotW[0])
|
pslldq $12, %xmm5 # lshift by 12 bytes: T2 = (0,0,0,unrotW[0])
|
||||||
movaps %xmm5, %xmm4
|
movaps %xmm5, %xmm4
|
||||||
@ -271,15 +275,16 @@ sha1_process_block64:
|
|||||||
paddd %xmm6, %xmm5
|
paddd %xmm6, %xmm5
|
||||||
movups %xmm5, -64+16*1(%rsp)
|
movups %xmm5, -64+16*1(%rsp)
|
||||||
# 12
|
# 12
|
||||||
leal 0x5A827999(%rcx,%r12), %ecx # e += RCONST + W[n]
|
leal 0x5A827999(%rcx,%r13), %ecx # e += RCONST + W[n]
|
||||||
|
shrq $32, %r13
|
||||||
movl %eax, %edi # c
|
movl %eax, %edi # c
|
||||||
xorl %ebx, %edi # ^d
|
xorl %ebx, %edi # ^d
|
||||||
andl %ebp, %edi # &b
|
andl %ebp, %edi # &b
|
||||||
xorl %ebx, %edi # (((c ^ d) & b) ^ d)
|
xorl %ebx, %edi # (((c ^ d) & b) ^ d)
|
||||||
addl %edi, %ecx # e += (((c ^ d) & b) ^ d)
|
addl %edi, %ecx # e += (((c ^ d) & b) ^ d)
|
||||||
movl %edx, %esi #
|
movl %edx, %edi #
|
||||||
roll $5, %esi # rotl32(a,5)
|
roll $5, %edi # rotl32(a,5)
|
||||||
addl %esi, %ecx # e += rotl32(a,5)
|
addl %edi, %ecx # e += rotl32(a,5)
|
||||||
rorl $2, %ebp # b = rotl32(b,30)
|
rorl $2, %ebp # b = rotl32(b,30)
|
||||||
# 13
|
# 13
|
||||||
leal 0x5A827999(%rbx,%r13), %ebx # e += RCONST + W[n]
|
leal 0x5A827999(%rbx,%r13), %ebx # e += RCONST + W[n]
|
||||||
@ -288,31 +293,32 @@ sha1_process_block64:
|
|||||||
andl %edx, %edi # &b
|
andl %edx, %edi # &b
|
||||||
xorl %eax, %edi # (((c ^ d) & b) ^ d)
|
xorl %eax, %edi # (((c ^ d) & b) ^ d)
|
||||||
addl %edi, %ebx # e += (((c ^ d) & b) ^ d)
|
addl %edi, %ebx # e += (((c ^ d) & b) ^ d)
|
||||||
movl %ecx, %esi #
|
movl %ecx, %edi #
|
||||||
roll $5, %esi # rotl32(a,5)
|
roll $5, %edi # rotl32(a,5)
|
||||||
addl %esi, %ebx # e += rotl32(a,5)
|
addl %edi, %ebx # e += rotl32(a,5)
|
||||||
rorl $2, %edx # b = rotl32(b,30)
|
rorl $2, %edx # b = rotl32(b,30)
|
||||||
# 14
|
# 14
|
||||||
leal 0x5A827999(%rax,%r14), %eax # e += RCONST + W[n]
|
leal 0x5A827999(%rax,%r14), %eax # e += RCONST + W[n]
|
||||||
|
shrq $32, %r14
|
||||||
movl %edx, %edi # c
|
movl %edx, %edi # c
|
||||||
xorl %ebp, %edi # ^d
|
xorl %ebp, %edi # ^d
|
||||||
andl %ecx, %edi # &b
|
andl %ecx, %edi # &b
|
||||||
xorl %ebp, %edi # (((c ^ d) & b) ^ d)
|
xorl %ebp, %edi # (((c ^ d) & b) ^ d)
|
||||||
addl %edi, %eax # e += (((c ^ d) & b) ^ d)
|
addl %edi, %eax # e += (((c ^ d) & b) ^ d)
|
||||||
movl %ebx, %esi #
|
movl %ebx, %edi #
|
||||||
roll $5, %esi # rotl32(a,5)
|
roll $5, %edi # rotl32(a,5)
|
||||||
addl %esi, %eax # e += rotl32(a,5)
|
addl %edi, %eax # e += rotl32(a,5)
|
||||||
rorl $2, %ecx # b = rotl32(b,30)
|
rorl $2, %ecx # b = rotl32(b,30)
|
||||||
# 15
|
# 15
|
||||||
leal 0x5A827999(%rbp,%r15), %ebp # e += RCONST + W[n]
|
leal 0x5A827999(%rbp,%r14), %ebp # e += RCONST + W[n]
|
||||||
movl %ecx, %edi # c
|
movl %ecx, %edi # c
|
||||||
xorl %edx, %edi # ^d
|
xorl %edx, %edi # ^d
|
||||||
andl %ebx, %edi # &b
|
andl %ebx, %edi # &b
|
||||||
xorl %edx, %edi # (((c ^ d) & b) ^ d)
|
xorl %edx, %edi # (((c ^ d) & b) ^ d)
|
||||||
addl %edi, %ebp # e += (((c ^ d) & b) ^ d)
|
addl %edi, %ebp # e += (((c ^ d) & b) ^ d)
|
||||||
movl %eax, %esi #
|
movl %eax, %edi #
|
||||||
roll $5, %esi # rotl32(a,5)
|
roll $5, %edi # rotl32(a,5)
|
||||||
addl %esi, %ebp # e += rotl32(a,5)
|
addl %edi, %ebp # e += rotl32(a,5)
|
||||||
rorl $2, %ebx # b = rotl32(b,30)
|
rorl $2, %ebx # b = rotl32(b,30)
|
||||||
# PREP %xmm2 %xmm3 %xmm0 %xmm1 -64+16*2(%rsp)
|
# PREP %xmm2 %xmm3 %xmm0 %xmm1 -64+16*2(%rsp)
|
||||||
movaps %xmm1, %xmm4
|
movaps %xmm1, %xmm4
|
||||||
@ -325,9 +331,9 @@ sha1_process_block64:
|
|||||||
# W0 = unrotated (W[0]..W[3]), still needs W[3] fixup
|
# W0 = unrotated (W[0]..W[3]), still needs W[3] fixup
|
||||||
movaps %xmm2, %xmm5
|
movaps %xmm2, %xmm5
|
||||||
xorps %xmm4, %xmm4 # rol(W0,1):
|
xorps %xmm4, %xmm4 # rol(W0,1):
|
||||||
pcmpgtd %xmm2, %xmm4 # ffffffff for elements <0 (ones with msb bit 1)
|
pcmpgtd %xmm2, %xmm4 # ffffffff for elements <0 (ones with msb bit 1)
|
||||||
paddd %xmm2, %xmm2 # shift left by 1
|
paddd %xmm2, %xmm2 # shift left by 1
|
||||||
psubd %xmm4, %xmm2 # add 1 to those who had msb bit 1
|
psubd %xmm4, %xmm2 # add 1 to those who had msb bit 1
|
||||||
# W0 = rotated (W[0]..W[3]), still needs W[3] fixup
|
# W0 = rotated (W[0]..W[3]), still needs W[3] fixup
|
||||||
pslldq $12, %xmm5 # lshift by 12 bytes: T2 = (0,0,0,unrotW[0])
|
pslldq $12, %xmm5 # lshift by 12 bytes: T2 = (0,0,0,unrotW[0])
|
||||||
movaps %xmm5, %xmm4
|
movaps %xmm5, %xmm4
|
||||||
@ -394,9 +400,9 @@ sha1_process_block64:
|
|||||||
# W0 = unrotated (W[0]..W[3]), still needs W[3] fixup
|
# W0 = unrotated (W[0]..W[3]), still needs W[3] fixup
|
||||||
movaps %xmm3, %xmm5
|
movaps %xmm3, %xmm5
|
||||||
xorps %xmm4, %xmm4 # rol(W0,1):
|
xorps %xmm4, %xmm4 # rol(W0,1):
|
||||||
pcmpgtd %xmm3, %xmm4 # ffffffff for elements <0 (ones with msb bit 1)
|
pcmpgtd %xmm3, %xmm4 # ffffffff for elements <0 (ones with msb bit 1)
|
||||||
paddd %xmm3, %xmm3 # shift left by 1
|
paddd %xmm3, %xmm3 # shift left by 1
|
||||||
psubd %xmm4, %xmm3 # add 1 to those who had msb bit 1
|
psubd %xmm4, %xmm3 # add 1 to those who had msb bit 1
|
||||||
# W0 = rotated (W[0]..W[3]), still needs W[3] fixup
|
# W0 = rotated (W[0]..W[3]), still needs W[3] fixup
|
||||||
pslldq $12, %xmm5 # lshift by 12 bytes: T2 = (0,0,0,unrotW[0])
|
pslldq $12, %xmm5 # lshift by 12 bytes: T2 = (0,0,0,unrotW[0])
|
||||||
movaps %xmm5, %xmm4
|
movaps %xmm5, %xmm4
|
||||||
@ -459,9 +465,9 @@ sha1_process_block64:
|
|||||||
# W0 = unrotated (W[0]..W[3]), still needs W[3] fixup
|
# W0 = unrotated (W[0]..W[3]), still needs W[3] fixup
|
||||||
movaps %xmm0, %xmm5
|
movaps %xmm0, %xmm5
|
||||||
xorps %xmm4, %xmm4 # rol(W0,1):
|
xorps %xmm4, %xmm4 # rol(W0,1):
|
||||||
pcmpgtd %xmm0, %xmm4 # ffffffff for elements <0 (ones with msb bit 1)
|
pcmpgtd %xmm0, %xmm4 # ffffffff for elements <0 (ones with msb bit 1)
|
||||||
paddd %xmm0, %xmm0 # shift left by 1
|
paddd %xmm0, %xmm0 # shift left by 1
|
||||||
psubd %xmm4, %xmm0 # add 1 to those who had msb bit 1
|
psubd %xmm4, %xmm0 # add 1 to those who had msb bit 1
|
||||||
# W0 = rotated (W[0]..W[3]), still needs W[3] fixup
|
# W0 = rotated (W[0]..W[3]), still needs W[3] fixup
|
||||||
pslldq $12, %xmm5 # lshift by 12 bytes: T2 = (0,0,0,unrotW[0])
|
pslldq $12, %xmm5 # lshift by 12 bytes: T2 = (0,0,0,unrotW[0])
|
||||||
movaps %xmm5, %xmm4
|
movaps %xmm5, %xmm4
|
||||||
@ -524,9 +530,9 @@ sha1_process_block64:
|
|||||||
# W0 = unrotated (W[0]..W[3]), still needs W[3] fixup
|
# W0 = unrotated (W[0]..W[3]), still needs W[3] fixup
|
||||||
movaps %xmm1, %xmm5
|
movaps %xmm1, %xmm5
|
||||||
xorps %xmm4, %xmm4 # rol(W0,1):
|
xorps %xmm4, %xmm4 # rol(W0,1):
|
||||||
pcmpgtd %xmm1, %xmm4 # ffffffff for elements <0 (ones with msb bit 1)
|
pcmpgtd %xmm1, %xmm4 # ffffffff for elements <0 (ones with msb bit 1)
|
||||||
paddd %xmm1, %xmm1 # shift left by 1
|
paddd %xmm1, %xmm1 # shift left by 1
|
||||||
psubd %xmm4, %xmm1 # add 1 to those who had msb bit 1
|
psubd %xmm4, %xmm1 # add 1 to those who had msb bit 1
|
||||||
# W0 = rotated (W[0]..W[3]), still needs W[3] fixup
|
# W0 = rotated (W[0]..W[3]), still needs W[3] fixup
|
||||||
pslldq $12, %xmm5 # lshift by 12 bytes: T2 = (0,0,0,unrotW[0])
|
pslldq $12, %xmm5 # lshift by 12 bytes: T2 = (0,0,0,unrotW[0])
|
||||||
movaps %xmm5, %xmm4
|
movaps %xmm5, %xmm4
|
||||||
@ -590,9 +596,9 @@ sha1_process_block64:
|
|||||||
# W0 = unrotated (W[0]..W[3]), still needs W[3] fixup
|
# W0 = unrotated (W[0]..W[3]), still needs W[3] fixup
|
||||||
movaps %xmm2, %xmm5
|
movaps %xmm2, %xmm5
|
||||||
xorps %xmm4, %xmm4 # rol(W0,1):
|
xorps %xmm4, %xmm4 # rol(W0,1):
|
||||||
pcmpgtd %xmm2, %xmm4 # ffffffff for elements <0 (ones with msb bit 1)
|
pcmpgtd %xmm2, %xmm4 # ffffffff for elements <0 (ones with msb bit 1)
|
||||||
paddd %xmm2, %xmm2 # shift left by 1
|
paddd %xmm2, %xmm2 # shift left by 1
|
||||||
psubd %xmm4, %xmm2 # add 1 to those who had msb bit 1
|
psubd %xmm4, %xmm2 # add 1 to those who had msb bit 1
|
||||||
# W0 = rotated (W[0]..W[3]), still needs W[3] fixup
|
# W0 = rotated (W[0]..W[3]), still needs W[3] fixup
|
||||||
pslldq $12, %xmm5 # lshift by 12 bytes: T2 = (0,0,0,unrotW[0])
|
pslldq $12, %xmm5 # lshift by 12 bytes: T2 = (0,0,0,unrotW[0])
|
||||||
movaps %xmm5, %xmm4
|
movaps %xmm5, %xmm4
|
||||||
@ -655,9 +661,9 @@ sha1_process_block64:
|
|||||||
# W0 = unrotated (W[0]..W[3]), still needs W[3] fixup
|
# W0 = unrotated (W[0]..W[3]), still needs W[3] fixup
|
||||||
movaps %xmm3, %xmm5
|
movaps %xmm3, %xmm5
|
||||||
xorps %xmm4, %xmm4 # rol(W0,1):
|
xorps %xmm4, %xmm4 # rol(W0,1):
|
||||||
pcmpgtd %xmm3, %xmm4 # ffffffff for elements <0 (ones with msb bit 1)
|
pcmpgtd %xmm3, %xmm4 # ffffffff for elements <0 (ones with msb bit 1)
|
||||||
paddd %xmm3, %xmm3 # shift left by 1
|
paddd %xmm3, %xmm3 # shift left by 1
|
||||||
psubd %xmm4, %xmm3 # add 1 to those who had msb bit 1
|
psubd %xmm4, %xmm3 # add 1 to those who had msb bit 1
|
||||||
# W0 = rotated (W[0]..W[3]), still needs W[3] fixup
|
# W0 = rotated (W[0]..W[3]), still needs W[3] fixup
|
||||||
pslldq $12, %xmm5 # lshift by 12 bytes: T2 = (0,0,0,unrotW[0])
|
pslldq $12, %xmm5 # lshift by 12 bytes: T2 = (0,0,0,unrotW[0])
|
||||||
movaps %xmm5, %xmm4
|
movaps %xmm5, %xmm4
|
||||||
@ -720,9 +726,9 @@ sha1_process_block64:
|
|||||||
# W0 = unrotated (W[0]..W[3]), still needs W[3] fixup
|
# W0 = unrotated (W[0]..W[3]), still needs W[3] fixup
|
||||||
movaps %xmm0, %xmm5
|
movaps %xmm0, %xmm5
|
||||||
xorps %xmm4, %xmm4 # rol(W0,1):
|
xorps %xmm4, %xmm4 # rol(W0,1):
|
||||||
pcmpgtd %xmm0, %xmm4 # ffffffff for elements <0 (ones with msb bit 1)
|
pcmpgtd %xmm0, %xmm4 # ffffffff for elements <0 (ones with msb bit 1)
|
||||||
paddd %xmm0, %xmm0 # shift left by 1
|
paddd %xmm0, %xmm0 # shift left by 1
|
||||||
psubd %xmm4, %xmm0 # add 1 to those who had msb bit 1
|
psubd %xmm4, %xmm0 # add 1 to those who had msb bit 1
|
||||||
# W0 = rotated (W[0]..W[3]), still needs W[3] fixup
|
# W0 = rotated (W[0]..W[3]), still needs W[3] fixup
|
||||||
pslldq $12, %xmm5 # lshift by 12 bytes: T2 = (0,0,0,unrotW[0])
|
pslldq $12, %xmm5 # lshift by 12 bytes: T2 = (0,0,0,unrotW[0])
|
||||||
movaps %xmm5, %xmm4
|
movaps %xmm5, %xmm4
|
||||||
@ -797,9 +803,9 @@ sha1_process_block64:
|
|||||||
# W0 = unrotated (W[0]..W[3]), still needs W[3] fixup
|
# W0 = unrotated (W[0]..W[3]), still needs W[3] fixup
|
||||||
movaps %xmm1, %xmm5
|
movaps %xmm1, %xmm5
|
||||||
xorps %xmm4, %xmm4 # rol(W0,1):
|
xorps %xmm4, %xmm4 # rol(W0,1):
|
||||||
pcmpgtd %xmm1, %xmm4 # ffffffff for elements <0 (ones with msb bit 1)
|
pcmpgtd %xmm1, %xmm4 # ffffffff for elements <0 (ones with msb bit 1)
|
||||||
paddd %xmm1, %xmm1 # shift left by 1
|
paddd %xmm1, %xmm1 # shift left by 1
|
||||||
psubd %xmm4, %xmm1 # add 1 to those who had msb bit 1
|
psubd %xmm4, %xmm1 # add 1 to those who had msb bit 1
|
||||||
# W0 = rotated (W[0]..W[3]), still needs W[3] fixup
|
# W0 = rotated (W[0]..W[3]), still needs W[3] fixup
|
||||||
pslldq $12, %xmm5 # lshift by 12 bytes: T2 = (0,0,0,unrotW[0])
|
pslldq $12, %xmm5 # lshift by 12 bytes: T2 = (0,0,0,unrotW[0])
|
||||||
movaps %xmm5, %xmm4
|
movaps %xmm5, %xmm4
|
||||||
@ -874,9 +880,9 @@ sha1_process_block64:
|
|||||||
# W0 = unrotated (W[0]..W[3]), still needs W[3] fixup
|
# W0 = unrotated (W[0]..W[3]), still needs W[3] fixup
|
||||||
movaps %xmm2, %xmm5
|
movaps %xmm2, %xmm5
|
||||||
xorps %xmm4, %xmm4 # rol(W0,1):
|
xorps %xmm4, %xmm4 # rol(W0,1):
|
||||||
pcmpgtd %xmm2, %xmm4 # ffffffff for elements <0 (ones with msb bit 1)
|
pcmpgtd %xmm2, %xmm4 # ffffffff for elements <0 (ones with msb bit 1)
|
||||||
paddd %xmm2, %xmm2 # shift left by 1
|
paddd %xmm2, %xmm2 # shift left by 1
|
||||||
psubd %xmm4, %xmm2 # add 1 to those who had msb bit 1
|
psubd %xmm4, %xmm2 # add 1 to those who had msb bit 1
|
||||||
# W0 = rotated (W[0]..W[3]), still needs W[3] fixup
|
# W0 = rotated (W[0]..W[3]), still needs W[3] fixup
|
||||||
pslldq $12, %xmm5 # lshift by 12 bytes: T2 = (0,0,0,unrotW[0])
|
pslldq $12, %xmm5 # lshift by 12 bytes: T2 = (0,0,0,unrotW[0])
|
||||||
movaps %xmm5, %xmm4
|
movaps %xmm5, %xmm4
|
||||||
@ -952,9 +958,9 @@ sha1_process_block64:
|
|||||||
# W0 = unrotated (W[0]..W[3]), still needs W[3] fixup
|
# W0 = unrotated (W[0]..W[3]), still needs W[3] fixup
|
||||||
movaps %xmm3, %xmm5
|
movaps %xmm3, %xmm5
|
||||||
xorps %xmm4, %xmm4 # rol(W0,1):
|
xorps %xmm4, %xmm4 # rol(W0,1):
|
||||||
pcmpgtd %xmm3, %xmm4 # ffffffff for elements <0 (ones with msb bit 1)
|
pcmpgtd %xmm3, %xmm4 # ffffffff for elements <0 (ones with msb bit 1)
|
||||||
paddd %xmm3, %xmm3 # shift left by 1
|
paddd %xmm3, %xmm3 # shift left by 1
|
||||||
psubd %xmm4, %xmm3 # add 1 to those who had msb bit 1
|
psubd %xmm4, %xmm3 # add 1 to those who had msb bit 1
|
||||||
# W0 = rotated (W[0]..W[3]), still needs W[3] fixup
|
# W0 = rotated (W[0]..W[3]), still needs W[3] fixup
|
||||||
pslldq $12, %xmm5 # lshift by 12 bytes: T2 = (0,0,0,unrotW[0])
|
pslldq $12, %xmm5 # lshift by 12 bytes: T2 = (0,0,0,unrotW[0])
|
||||||
movaps %xmm5, %xmm4
|
movaps %xmm5, %xmm4
|
||||||
@ -1029,9 +1035,9 @@ sha1_process_block64:
|
|||||||
# W0 = unrotated (W[0]..W[3]), still needs W[3] fixup
|
# W0 = unrotated (W[0]..W[3]), still needs W[3] fixup
|
||||||
movaps %xmm0, %xmm5
|
movaps %xmm0, %xmm5
|
||||||
xorps %xmm4, %xmm4 # rol(W0,1):
|
xorps %xmm4, %xmm4 # rol(W0,1):
|
||||||
pcmpgtd %xmm0, %xmm4 # ffffffff for elements <0 (ones with msb bit 1)
|
pcmpgtd %xmm0, %xmm4 # ffffffff for elements <0 (ones with msb bit 1)
|
||||||
paddd %xmm0, %xmm0 # shift left by 1
|
paddd %xmm0, %xmm0 # shift left by 1
|
||||||
psubd %xmm4, %xmm0 # add 1 to those who had msb bit 1
|
psubd %xmm4, %xmm0 # add 1 to those who had msb bit 1
|
||||||
# W0 = rotated (W[0]..W[3]), still needs W[3] fixup
|
# W0 = rotated (W[0]..W[3]), still needs W[3] fixup
|
||||||
pslldq $12, %xmm5 # lshift by 12 bytes: T2 = (0,0,0,unrotW[0])
|
pslldq $12, %xmm5 # lshift by 12 bytes: T2 = (0,0,0,unrotW[0])
|
||||||
movaps %xmm5, %xmm4
|
movaps %xmm5, %xmm4
|
||||||
@ -1106,9 +1112,9 @@ sha1_process_block64:
|
|||||||
# W0 = unrotated (W[0]..W[3]), still needs W[3] fixup
|
# W0 = unrotated (W[0]..W[3]), still needs W[3] fixup
|
||||||
movaps %xmm1, %xmm5
|
movaps %xmm1, %xmm5
|
||||||
xorps %xmm4, %xmm4 # rol(W0,1):
|
xorps %xmm4, %xmm4 # rol(W0,1):
|
||||||
pcmpgtd %xmm1, %xmm4 # ffffffff for elements <0 (ones with msb bit 1)
|
pcmpgtd %xmm1, %xmm4 # ffffffff for elements <0 (ones with msb bit 1)
|
||||||
paddd %xmm1, %xmm1 # shift left by 1
|
paddd %xmm1, %xmm1 # shift left by 1
|
||||||
psubd %xmm4, %xmm1 # add 1 to those who had msb bit 1
|
psubd %xmm4, %xmm1 # add 1 to those who had msb bit 1
|
||||||
# W0 = rotated (W[0]..W[3]), still needs W[3] fixup
|
# W0 = rotated (W[0]..W[3]), still needs W[3] fixup
|
||||||
pslldq $12, %xmm5 # lshift by 12 bytes: T2 = (0,0,0,unrotW[0])
|
pslldq $12, %xmm5 # lshift by 12 bytes: T2 = (0,0,0,unrotW[0])
|
||||||
movaps %xmm5, %xmm4
|
movaps %xmm5, %xmm4
|
||||||
@ -1171,9 +1177,9 @@ sha1_process_block64:
|
|||||||
# W0 = unrotated (W[0]..W[3]), still needs W[3] fixup
|
# W0 = unrotated (W[0]..W[3]), still needs W[3] fixup
|
||||||
movaps %xmm2, %xmm5
|
movaps %xmm2, %xmm5
|
||||||
xorps %xmm4, %xmm4 # rol(W0,1):
|
xorps %xmm4, %xmm4 # rol(W0,1):
|
||||||
pcmpgtd %xmm2, %xmm4 # ffffffff for elements <0 (ones with msb bit 1)
|
pcmpgtd %xmm2, %xmm4 # ffffffff for elements <0 (ones with msb bit 1)
|
||||||
paddd %xmm2, %xmm2 # shift left by 1
|
paddd %xmm2, %xmm2 # shift left by 1
|
||||||
psubd %xmm4, %xmm2 # add 1 to those who had msb bit 1
|
psubd %xmm4, %xmm2 # add 1 to those who had msb bit 1
|
||||||
# W0 = rotated (W[0]..W[3]), still needs W[3] fixup
|
# W0 = rotated (W[0]..W[3]), still needs W[3] fixup
|
||||||
pslldq $12, %xmm5 # lshift by 12 bytes: T2 = (0,0,0,unrotW[0])
|
pslldq $12, %xmm5 # lshift by 12 bytes: T2 = (0,0,0,unrotW[0])
|
||||||
movaps %xmm5, %xmm4
|
movaps %xmm5, %xmm4
|
||||||
@ -1236,9 +1242,9 @@ sha1_process_block64:
|
|||||||
# W0 = unrotated (W[0]..W[3]), still needs W[3] fixup
|
# W0 = unrotated (W[0]..W[3]), still needs W[3] fixup
|
||||||
movaps %xmm3, %xmm5
|
movaps %xmm3, %xmm5
|
||||||
xorps %xmm4, %xmm4 # rol(W0,1):
|
xorps %xmm4, %xmm4 # rol(W0,1):
|
||||||
pcmpgtd %xmm3, %xmm4 # ffffffff for elements <0 (ones with msb bit 1)
|
pcmpgtd %xmm3, %xmm4 # ffffffff for elements <0 (ones with msb bit 1)
|
||||||
paddd %xmm3, %xmm3 # shift left by 1
|
paddd %xmm3, %xmm3 # shift left by 1
|
||||||
psubd %xmm4, %xmm3 # add 1 to those who had msb bit 1
|
psubd %xmm4, %xmm3 # add 1 to those who had msb bit 1
|
||||||
# W0 = rotated (W[0]..W[3]), still needs W[3] fixup
|
# W0 = rotated (W[0]..W[3]), still needs W[3] fixup
|
||||||
pslldq $12, %xmm5 # lshift by 12 bytes: T2 = (0,0,0,unrotW[0])
|
pslldq $12, %xmm5 # lshift by 12 bytes: T2 = (0,0,0,unrotW[0])
|
||||||
movaps %xmm5, %xmm4
|
movaps %xmm5, %xmm4
|
||||||
@ -1378,7 +1384,7 @@ sha1_process_block64:
|
|||||||
addl %ebx, 84(%rdi) # ctx->hash[1] += b
|
addl %ebx, 84(%rdi) # ctx->hash[1] += b
|
||||||
popq %r14 #
|
popq %r14 #
|
||||||
addl %ecx, 88(%rdi) # ctx->hash[2] += c
|
addl %ecx, 88(%rdi) # ctx->hash[2] += c
|
||||||
popq %r15 #
|
# popq %r15 #
|
||||||
addl %edx, 92(%rdi) # ctx->hash[3] += d
|
addl %edx, 92(%rdi) # ctx->hash[3] += d
|
||||||
popq %rbx #
|
popq %rbx #
|
||||||
addl %ebp, 96(%rdi) # ctx->hash[4] += e
|
addl %ebp, 96(%rdi) # ctx->hash[4] += e
|
||||||
|
@ -102,7 +102,7 @@ echo \
|
|||||||
"### Generated by hash_md5_sha_x86-64.S.sh ###
|
"### Generated by hash_md5_sha_x86-64.S.sh ###
|
||||||
|
|
||||||
#if CONFIG_SHA1_SMALL == 0 && defined(__GNUC__) && defined(__x86_64__)
|
#if CONFIG_SHA1_SMALL == 0 && defined(__GNUC__) && defined(__x86_64__)
|
||||||
.section .text.sha1_process_block64,\"ax\",@progbits
|
.section .text.sha1_process_block64, \"ax\", @progbits
|
||||||
.globl sha1_process_block64
|
.globl sha1_process_block64
|
||||||
.hidden sha1_process_block64
|
.hidden sha1_process_block64
|
||||||
.type sha1_process_block64, @function
|
.type sha1_process_block64, @function
|
||||||
@ -111,7 +111,7 @@ echo \
|
|||||||
sha1_process_block64:
|
sha1_process_block64:
|
||||||
pushq %rbp # 1 byte insn
|
pushq %rbp # 1 byte insn
|
||||||
pushq %rbx # 1 byte insn
|
pushq %rbx # 1 byte insn
|
||||||
pushq %r15 # 2 byte insn
|
# pushq %r15 # 2 byte insn
|
||||||
pushq %r14 # 2 byte insn
|
pushq %r14 # 2 byte insn
|
||||||
pushq %r13 # 2 byte insn
|
pushq %r13 # 2 byte insn
|
||||||
pushq %r12 # 2 byte insn
|
pushq %r12 # 2 byte insn
|
||||||
@ -120,7 +120,8 @@ sha1_process_block64:
|
|||||||
#Register and stack use:
|
#Register and stack use:
|
||||||
# eax..edx: a..d
|
# eax..edx: a..d
|
||||||
# ebp: e
|
# ebp: e
|
||||||
# esi,edi: temps
|
# esi,edi,r8..r14: temps
|
||||||
|
# r15: unused
|
||||||
# xmm0..xmm3: W[]
|
# xmm0..xmm3: W[]
|
||||||
# xmm4,xmm5: temps
|
# xmm4,xmm5: temps
|
||||||
# xmm6: current round constant
|
# xmm6: current round constant
|
||||||
@ -134,59 +135,56 @@ sha1_process_block64:
|
|||||||
|
|
||||||
movaps rconst0x5A827999(%rip), $xmmRCONST
|
movaps rconst0x5A827999(%rip), $xmmRCONST
|
||||||
|
|
||||||
# For round 1, steps 0 and 8..15, we pass W[0,8..15] in esi,r8..r15
|
# Load W[] to xmm registers, byteswapping on the fly.
|
||||||
# instead of spilling them to stack.
|
#
|
||||||
# (We lose parallelized addition of RCONST, but LEA
|
# For iterations 0..15, we pass W[] in rsi,r8..r14
|
||||||
# can do two additions at once, so...)
|
# for use in RD1A's instead of spilling them to stack.
|
||||||
|
# We lose parallelized addition of RCONST, but LEA
|
||||||
|
# can do two additions at once, so it's probably a wash.
|
||||||
|
# (We use rsi instead of rN because this makes two
|
||||||
|
# LEAs in two first RD1A's shorter by one byte).
|
||||||
movq 4*0(%rdi), %rsi
|
movq 4*0(%rdi), %rsi
|
||||||
movq 4*2(%rdi), %r10
|
movq 4*2(%rdi), %r8
|
||||||
bswapq %rsi
|
bswapq %rsi
|
||||||
bswapq %r10
|
bswapq %r8
|
||||||
rolq \$32, %rsi # rsi = W[1]:W[0]
|
rolq \$32, %rsi # rsi = W[1]:W[0]
|
||||||
rolq \$32, %r10
|
rolq \$32, %r8 # r8 = W[3]:W[2]
|
||||||
movq %rsi, %xmm0
|
movq %rsi, %xmm0
|
||||||
movq %r10, $xmmT1
|
movq %r8, $xmmT1
|
||||||
punpcklqdq $xmmT1, %xmm0 # xmm0 = r10:rsi = (W[0],W[1],W[2],W[3])
|
punpcklqdq $xmmT1, %xmm0 # xmm0 = r8:rsi = (W[0],W[1],W[2],W[3])
|
||||||
movaps %xmm0, $xmmT1
|
# movaps %xmm0, $xmmT1 # add RCONST, spill to stack
|
||||||
paddd $xmmRCONST, $xmmT1
|
# paddd $xmmRCONST, $xmmT1
|
||||||
movups $xmmT1, -64+4*0(%rsp)
|
# movups $xmmT1, -64+16*0(%rsp)
|
||||||
|
|
||||||
movq 4*4(%rdi), %r8
|
movq 4*4(%rdi), %r9
|
||||||
movq 4*6(%rdi), %r10
|
movq 4*6(%rdi), %r10
|
||||||
bswapq %r8
|
bswapq %r9
|
||||||
bswapq %r10
|
bswapq %r10
|
||||||
rolq \$32, %r8
|
rolq \$32, %r9 # r9 = W[5]:W[4]
|
||||||
rolq \$32, %r10
|
rolq \$32, %r10 # r10 = W[7]:W[6]
|
||||||
movq %r8, %xmm1
|
movq %r9, %xmm1
|
||||||
movq %r10, $xmmT1
|
movq %r10, $xmmT1
|
||||||
punpcklqdq $xmmT1, %xmm1 # xmm1 = r10:r8 = (W[4],W[5],W[6],W[7])
|
punpcklqdq $xmmT1, %xmm1 # xmm1 = r10:r9 = (W[4],W[5],W[6],W[7])
|
||||||
movaps %xmm1, $xmmT1
|
|
||||||
paddd $xmmRCONST, $xmmT1
|
|
||||||
movups $xmmT1, -64+4*4(%rsp)
|
|
||||||
|
|
||||||
movq 4*8(%rdi), %r8
|
movq 4*8(%rdi), %r11
|
||||||
movq 4*10(%rdi), %r10
|
movq 4*10(%rdi), %r12
|
||||||
bswapq %r8
|
bswapq %r11
|
||||||
bswapq %r10
|
|
||||||
movl %r8d, %r9d # r9d = W[9]
|
|
||||||
rolq \$32, %r8 # r8 = W[9]:W[8]
|
|
||||||
movl %r10d, %r11d # r11d = W[11]
|
|
||||||
rolq \$32, %r10 # r10 = W[11]:W[10]
|
|
||||||
movq %r8, %xmm2
|
|
||||||
movq %r10, $xmmT1
|
|
||||||
punpcklqdq $xmmT1, %xmm2 # xmm2 = r10:r8 = (W[8],W[9],W[10],W[11])
|
|
||||||
|
|
||||||
movq 4*12(%rdi), %r12
|
|
||||||
movq 4*14(%rdi), %r14
|
|
||||||
bswapq %r12
|
bswapq %r12
|
||||||
|
rolq \$32, %r11 # r11 = W[9]:W[8]
|
||||||
|
rolq \$32, %r12 # r12 = W[11]:W[10]
|
||||||
|
movq %r11, %xmm2
|
||||||
|
movq %r12, $xmmT1
|
||||||
|
punpcklqdq $xmmT1, %xmm2 # xmm2 = r12:r11 = (W[8],W[9],W[10],W[11])
|
||||||
|
|
||||||
|
movq 4*12(%rdi), %r13
|
||||||
|
movq 4*14(%rdi), %r14
|
||||||
|
bswapq %r13
|
||||||
bswapq %r14
|
bswapq %r14
|
||||||
movl %r12d, %r13d # r13d = W[13]
|
rolq \$32, %r13 # r13 = W[13]:W[12]
|
||||||
rolq \$32, %r12 # r12 = W[13]:W[12]
|
|
||||||
movl %r14d, %r15d # r15d = W[15]
|
|
||||||
rolq \$32, %r14 # r14 = W[15]:W[14]
|
rolq \$32, %r14 # r14 = W[15]:W[14]
|
||||||
movq %r12, %xmm3
|
movq %r13, %xmm3
|
||||||
movq %r14, $xmmT1
|
movq %r14, $xmmT1
|
||||||
punpcklqdq $xmmT1, %xmm3 # xmm3 = r14:r12 = (W[12],W[13],W[14],W[15])
|
punpcklqdq $xmmT1, %xmm3 # xmm3 = r14:r13 = (W[12],W[13],W[14],W[15])
|
||||||
"
|
"
|
||||||
|
|
||||||
PREP() {
|
PREP() {
|
||||||
@ -215,9 +213,9 @@ echo "# PREP $@
|
|||||||
movaps $xmmW0, $xmmT2
|
movaps $xmmW0, $xmmT2
|
||||||
|
|
||||||
xorps $xmmT1, $xmmT1 # rol(W0,1):
|
xorps $xmmT1, $xmmT1 # rol(W0,1):
|
||||||
pcmpgtd $xmmW0, $xmmT1 # ffffffff for elements <0 (ones with msb bit 1)
|
pcmpgtd $xmmW0, $xmmT1 # ffffffff for elements <0 (ones with msb bit 1)
|
||||||
paddd $xmmW0, $xmmW0 # shift left by 1
|
paddd $xmmW0, $xmmW0 # shift left by 1
|
||||||
psubd $xmmT1, $xmmW0 # add 1 to those who had msb bit 1
|
psubd $xmmT1, $xmmW0 # add 1 to those who had msb bit 1
|
||||||
# W0 = rotated (W[0]..W[3]), still needs W[3] fixup
|
# W0 = rotated (W[0]..W[3]), still needs W[3] fixup
|
||||||
|
|
||||||
pslldq \$12, $xmmT2 # lshift by 12 bytes: T2 = (0,0,0,unrotW[0])
|
pslldq \$12, $xmmT2 # lshift by 12 bytes: T2 = (0,0,0,unrotW[0])
|
||||||
@ -256,23 +254,28 @@ RD1A() {
|
|||||||
local a=$1;local b=$2;local c=$3;local d=$4;local e=$5
|
local a=$1;local b=$2;local c=$3;local d=$4;local e=$5
|
||||||
local n=$(($6))
|
local n=$(($6))
|
||||||
local n0=$(((n+0) & 15))
|
local n0=$(((n+0) & 15))
|
||||||
|
local rN=$((7+n0/2))
|
||||||
echo "
|
echo "
|
||||||
# $n
|
# $n
|
||||||
";test $n0 = 0 && echo "
|
";test $n0 = 0 && echo "
|
||||||
leal $RCONST(%r$e,%rsi), %e$e # e += RCONST + W[n]
|
leal $RCONST(%r$e,%rsi), %e$e # e += RCONST + W[n]
|
||||||
";test $n0 != 0 && test $n0 -lt 8 && echo "
|
shrq \$32, %rsi
|
||||||
addl -64+4*$n0(%rsp), %e$e # e += RCONST + W[n]
|
";test $n0 = 1 && echo "
|
||||||
";test $n0 -ge 8 && echo "
|
leal $RCONST(%r$e,%rsi), %e$e # e += RCONST + W[n]
|
||||||
leal $RCONST(%r$e,%r$n0), %e$e # e += RCONST + W[n]
|
";test $n0 -ge 2 && test $((n0 & 1)) = 0 && echo "
|
||||||
|
leal $RCONST(%r$e,%r$rN), %e$e # e += RCONST + W[n]
|
||||||
|
shrq \$32, %r$rN
|
||||||
|
";test $n0 -ge 2 && test $((n0 & 1)) = 1 && echo "
|
||||||
|
leal $RCONST(%r$e,%r$rN), %e$e # e += RCONST + W[n]
|
||||||
";echo "
|
";echo "
|
||||||
movl %e$c, %edi # c
|
movl %e$c, %edi # c
|
||||||
xorl %e$d, %edi # ^d
|
xorl %e$d, %edi # ^d
|
||||||
andl %e$b, %edi # &b
|
andl %e$b, %edi # &b
|
||||||
xorl %e$d, %edi # (((c ^ d) & b) ^ d)
|
xorl %e$d, %edi # (((c ^ d) & b) ^ d)
|
||||||
addl %edi, %e$e # e += (((c ^ d) & b) ^ d)
|
addl %edi, %e$e # e += (((c ^ d) & b) ^ d)
|
||||||
movl %e$a, %esi #
|
movl %e$a, %edi #
|
||||||
roll \$5, %esi # rotl32(a,5)
|
roll \$5, %edi # rotl32(a,5)
|
||||||
addl %esi, %e$e # e += rotl32(a,5)
|
addl %edi, %e$e # e += rotl32(a,5)
|
||||||
rorl \$2, %e$b # b = rotl32(b,30)
|
rorl \$2, %e$b # b = rotl32(b,30)
|
||||||
"
|
"
|
||||||
}
|
}
|
||||||
@ -420,7 +423,7 @@ echo "
|
|||||||
addl %ebx, 84(%rdi) # ctx->hash[1] += b
|
addl %ebx, 84(%rdi) # ctx->hash[1] += b
|
||||||
popq %r14 #
|
popq %r14 #
|
||||||
addl %ecx, 88(%rdi) # ctx->hash[2] += c
|
addl %ecx, 88(%rdi) # ctx->hash[2] += c
|
||||||
popq %r15 #
|
# popq %r15 #
|
||||||
addl %edx, 92(%rdi) # ctx->hash[3] += d
|
addl %edx, 92(%rdi) # ctx->hash[3] += d
|
||||||
popq %rbx #
|
popq %rbx #
|
||||||
addl %ebp, 96(%rdi) # ctx->hash[4] += e
|
addl %ebp, 96(%rdi) # ctx->hash[4] += e
|
||||||
|
Loading…
Reference in New Issue
Block a user