diff --git a/libbb/hash_md5_sha_x86-64.S b/libbb/hash_md5_sha_x86-64.S index 069a18719..743269d98 100644 --- a/libbb/hash_md5_sha_x86-64.S +++ b/libbb/hash_md5_sha_x86-64.S @@ -1,7 +1,7 @@ ### Generated by hash_md5_sha_x86-64.S.sh ### #if CONFIG_SHA1_SMALL == 0 && defined(__GNUC__) && defined(__x86_64__) - .section .text.sha1_process_block64,"ax",@progbits + .section .text.sha1_process_block64, "ax", @progbits .globl sha1_process_block64 .hidden sha1_process_block64 .type sha1_process_block64, @function @@ -10,7 +10,7 @@ sha1_process_block64: pushq %rbp # 1 byte insn pushq %rbx # 1 byte insn - pushq %r15 # 2 byte insn +# pushq %r15 # 2 byte insn pushq %r14 # 2 byte insn pushq %r13 # 2 byte insn pushq %r12 # 2 byte insn @@ -19,7 +19,8 @@ sha1_process_block64: #Register and stack use: # eax..edx: a..d # ebp: e -# esi,edi: temps +# esi,edi,r8..r14: temps +# r15: unused # xmm0..xmm3: W[] # xmm4,xmm5: temps # xmm6: current round constant @@ -33,147 +34,148 @@ sha1_process_block64: movaps rconst0x5A827999(%rip), %xmm6 - # For round 1, steps 0 and 8..15, we pass W[0,8..15] in esi,r8..r15 - # instead of spilling them to stack. - # (We lose parallelized addition of RCONST, but LEA - # can do two additions at once, so...) + # Load W[] to xmm registers, byteswapping on the fly. + # + # For iterations 0..15, we pass W[] in rsi,r8..r14 + # for use in RD1A's instead of spilling them to stack. + # We lose parallelized addition of RCONST, but LEA + # can do two additions at once, so it's probably a wash. + # (We use rsi instead of rN because this makes two + # LEAs in two first RD1A's shorter by one byte). movq 4*0(%rdi), %rsi - movq 4*2(%rdi), %r10 + movq 4*2(%rdi), %r8 bswapq %rsi - bswapq %r10 + bswapq %r8 rolq $32, %rsi # rsi = W[1]:W[0] - rolq $32, %r10 + rolq $32, %r8 # r8 = W[3]:W[2] movq %rsi, %xmm0 - movq %r10, %xmm4 - punpcklqdq %xmm4, %xmm0 # xmm0 = r10:rsi = (W[0],W[1],W[2],W[3]) - movaps %xmm0, %xmm4 - paddd %xmm6, %xmm4 - movups %xmm4, -64+4*0(%rsp) + movq %r8, %xmm4 + punpcklqdq %xmm4, %xmm0 # xmm0 = r8:rsi = (W[0],W[1],W[2],W[3]) +# movaps %xmm0, %xmm4 # add RCONST, spill to stack +# paddd %xmm6, %xmm4 +# movups %xmm4, -64+16*0(%rsp) - movq 4*4(%rdi), %r8 + movq 4*4(%rdi), %r9 movq 4*6(%rdi), %r10 - bswapq %r8 + bswapq %r9 bswapq %r10 - rolq $32, %r8 - rolq $32, %r10 - movq %r8, %xmm1 + rolq $32, %r9 # r9 = W[5]:W[4] + rolq $32, %r10 # r10 = W[7]:W[6] + movq %r9, %xmm1 movq %r10, %xmm4 - punpcklqdq %xmm4, %xmm1 # xmm1 = r10:r8 = (W[4],W[5],W[6],W[7]) - movaps %xmm1, %xmm4 - paddd %xmm6, %xmm4 - movups %xmm4, -64+4*4(%rsp) + punpcklqdq %xmm4, %xmm1 # xmm1 = r10:r9 = (W[4],W[5],W[6],W[7]) - movq 4*8(%rdi), %r8 - movq 4*10(%rdi), %r10 - bswapq %r8 - bswapq %r10 - movl %r8d, %r9d # r9d = W[9] - rolq $32, %r8 # r8 = W[9]:W[8] - movl %r10d, %r11d # r11d = W[11] - rolq $32, %r10 # r10 = W[11]:W[10] - movq %r8, %xmm2 - movq %r10, %xmm4 - punpcklqdq %xmm4, %xmm2 # xmm2 = r10:r8 = (W[8],W[9],W[10],W[11]) - - movq 4*12(%rdi), %r12 - movq 4*14(%rdi), %r14 + movq 4*8(%rdi), %r11 + movq 4*10(%rdi), %r12 + bswapq %r11 bswapq %r12 + rolq $32, %r11 # r11 = W[9]:W[8] + rolq $32, %r12 # r12 = W[11]:W[10] + movq %r11, %xmm2 + movq %r12, %xmm4 + punpcklqdq %xmm4, %xmm2 # xmm2 = r12:r11 = (W[8],W[9],W[10],W[11]) + + movq 4*12(%rdi), %r13 + movq 4*14(%rdi), %r14 + bswapq %r13 bswapq %r14 - movl %r12d, %r13d # r13d = W[13] - rolq $32, %r12 # r12 = W[13]:W[12] - movl %r14d, %r15d # r15d = W[15] + rolq $32, %r13 # r13 = W[13]:W[12] rolq $32, %r14 # r14 = W[15]:W[14] - movq %r12, %xmm3 + movq %r13, %xmm3 movq %r14, %xmm4 - punpcklqdq %xmm4, %xmm3 # xmm3 = r14:r12 = (W[12],W[13],W[14],W[15]) + punpcklqdq %xmm4, %xmm3 # xmm3 = r14:r13 = (W[12],W[13],W[14],W[15]) # 0 leal 0x5A827999(%rbp,%rsi), %ebp # e += RCONST + W[n] + shrq $32, %rsi movl %ecx, %edi # c xorl %edx, %edi # ^d andl %ebx, %edi # &b xorl %edx, %edi # (((c ^ d) & b) ^ d) addl %edi, %ebp # e += (((c ^ d) & b) ^ d) - movl %eax, %esi # - roll $5, %esi # rotl32(a,5) - addl %esi, %ebp # e += rotl32(a,5) + movl %eax, %edi # + roll $5, %edi # rotl32(a,5) + addl %edi, %ebp # e += rotl32(a,5) rorl $2, %ebx # b = rotl32(b,30) # 1 - addl -64+4*1(%rsp), %edx # e += RCONST + W[n] + leal 0x5A827999(%rdx,%rsi), %edx # e += RCONST + W[n] movl %ebx, %edi # c xorl %ecx, %edi # ^d andl %eax, %edi # &b xorl %ecx, %edi # (((c ^ d) & b) ^ d) addl %edi, %edx # e += (((c ^ d) & b) ^ d) - movl %ebp, %esi # - roll $5, %esi # rotl32(a,5) - addl %esi, %edx # e += rotl32(a,5) + movl %ebp, %edi # + roll $5, %edi # rotl32(a,5) + addl %edi, %edx # e += rotl32(a,5) rorl $2, %eax # b = rotl32(b,30) # 2 - addl -64+4*2(%rsp), %ecx # e += RCONST + W[n] + leal 0x5A827999(%rcx,%r8), %ecx # e += RCONST + W[n] + shrq $32, %r8 movl %eax, %edi # c xorl %ebx, %edi # ^d andl %ebp, %edi # &b xorl %ebx, %edi # (((c ^ d) & b) ^ d) addl %edi, %ecx # e += (((c ^ d) & b) ^ d) - movl %edx, %esi # - roll $5, %esi # rotl32(a,5) - addl %esi, %ecx # e += rotl32(a,5) + movl %edx, %edi # + roll $5, %edi # rotl32(a,5) + addl %edi, %ecx # e += rotl32(a,5) rorl $2, %ebp # b = rotl32(b,30) # 3 - addl -64+4*3(%rsp), %ebx # e += RCONST + W[n] + leal 0x5A827999(%rbx,%r8), %ebx # e += RCONST + W[n] movl %ebp, %edi # c xorl %eax, %edi # ^d andl %edx, %edi # &b xorl %eax, %edi # (((c ^ d) & b) ^ d) addl %edi, %ebx # e += (((c ^ d) & b) ^ d) - movl %ecx, %esi # - roll $5, %esi # rotl32(a,5) - addl %esi, %ebx # e += rotl32(a,5) + movl %ecx, %edi # + roll $5, %edi # rotl32(a,5) + addl %edi, %ebx # e += rotl32(a,5) rorl $2, %edx # b = rotl32(b,30) # 4 - addl -64+4*4(%rsp), %eax # e += RCONST + W[n] + leal 0x5A827999(%rax,%r9), %eax # e += RCONST + W[n] + shrq $32, %r9 movl %edx, %edi # c xorl %ebp, %edi # ^d andl %ecx, %edi # &b xorl %ebp, %edi # (((c ^ d) & b) ^ d) addl %edi, %eax # e += (((c ^ d) & b) ^ d) - movl %ebx, %esi # - roll $5, %esi # rotl32(a,5) - addl %esi, %eax # e += rotl32(a,5) + movl %ebx, %edi # + roll $5, %edi # rotl32(a,5) + addl %edi, %eax # e += rotl32(a,5) rorl $2, %ecx # b = rotl32(b,30) # 5 - addl -64+4*5(%rsp), %ebp # e += RCONST + W[n] + leal 0x5A827999(%rbp,%r9), %ebp # e += RCONST + W[n] movl %ecx, %edi # c xorl %edx, %edi # ^d andl %ebx, %edi # &b xorl %edx, %edi # (((c ^ d) & b) ^ d) addl %edi, %ebp # e += (((c ^ d) & b) ^ d) - movl %eax, %esi # - roll $5, %esi # rotl32(a,5) - addl %esi, %ebp # e += rotl32(a,5) + movl %eax, %edi # + roll $5, %edi # rotl32(a,5) + addl %edi, %ebp # e += rotl32(a,5) rorl $2, %ebx # b = rotl32(b,30) # 6 - addl -64+4*6(%rsp), %edx # e += RCONST + W[n] + leal 0x5A827999(%rdx,%r10), %edx # e += RCONST + W[n] + shrq $32, %r10 movl %ebx, %edi # c xorl %ecx, %edi # ^d andl %eax, %edi # &b xorl %ecx, %edi # (((c ^ d) & b) ^ d) addl %edi, %edx # e += (((c ^ d) & b) ^ d) - movl %ebp, %esi # - roll $5, %esi # rotl32(a,5) - addl %esi, %edx # e += rotl32(a,5) + movl %ebp, %edi # + roll $5, %edi # rotl32(a,5) + addl %edi, %edx # e += rotl32(a,5) rorl $2, %eax # b = rotl32(b,30) # 7 - addl -64+4*7(%rsp), %ecx # e += RCONST + W[n] + leal 0x5A827999(%rcx,%r10), %ecx # e += RCONST + W[n] movl %eax, %edi # c xorl %ebx, %edi # ^d andl %ebp, %edi # &b xorl %ebx, %edi # (((c ^ d) & b) ^ d) addl %edi, %ecx # e += (((c ^ d) & b) ^ d) - movl %edx, %esi # - roll $5, %esi # rotl32(a,5) - addl %esi, %ecx # e += rotl32(a,5) + movl %edx, %edi # + roll $5, %edi # rotl32(a,5) + addl %edi, %ecx # e += rotl32(a,5) rorl $2, %ebp # b = rotl32(b,30) # PREP %xmm0 %xmm1 %xmm2 %xmm3 -64+16*0(%rsp) movaps %xmm3, %xmm4 @@ -186,9 +188,9 @@ sha1_process_block64: # W0 = unrotated (W[0]..W[3]), still needs W[3] fixup movaps %xmm0, %xmm5 xorps %xmm4, %xmm4 # rol(W0,1): - pcmpgtd %xmm0, %xmm4 # ffffffff for elements <0 (ones with msb bit 1) - paddd %xmm0, %xmm0 # shift left by 1 - psubd %xmm4, %xmm0 # add 1 to those who had msb bit 1 + pcmpgtd %xmm0, %xmm4 # ffffffff for elements <0 (ones with msb bit 1) + paddd %xmm0, %xmm0 # shift left by 1 + psubd %xmm4, %xmm0 # add 1 to those who had msb bit 1 # W0 = rotated (W[0]..W[3]), still needs W[3] fixup pslldq $12, %xmm5 # lshift by 12 bytes: T2 = (0,0,0,unrotW[0]) movaps %xmm5, %xmm4 @@ -201,48 +203,50 @@ sha1_process_block64: paddd %xmm6, %xmm5 movups %xmm5, -64+16*0(%rsp) # 8 - leal 0x5A827999(%rbx,%r8), %ebx # e += RCONST + W[n] + leal 0x5A827999(%rbx,%r11), %ebx # e += RCONST + W[n] + shrq $32, %r11 movl %ebp, %edi # c xorl %eax, %edi # ^d andl %edx, %edi # &b xorl %eax, %edi # (((c ^ d) & b) ^ d) addl %edi, %ebx # e += (((c ^ d) & b) ^ d) - movl %ecx, %esi # - roll $5, %esi # rotl32(a,5) - addl %esi, %ebx # e += rotl32(a,5) + movl %ecx, %edi # + roll $5, %edi # rotl32(a,5) + addl %edi, %ebx # e += rotl32(a,5) rorl $2, %edx # b = rotl32(b,30) # 9 - leal 0x5A827999(%rax,%r9), %eax # e += RCONST + W[n] + leal 0x5A827999(%rax,%r11), %eax # e += RCONST + W[n] movl %edx, %edi # c xorl %ebp, %edi # ^d andl %ecx, %edi # &b xorl %ebp, %edi # (((c ^ d) & b) ^ d) addl %edi, %eax # e += (((c ^ d) & b) ^ d) - movl %ebx, %esi # - roll $5, %esi # rotl32(a,5) - addl %esi, %eax # e += rotl32(a,5) + movl %ebx, %edi # + roll $5, %edi # rotl32(a,5) + addl %edi, %eax # e += rotl32(a,5) rorl $2, %ecx # b = rotl32(b,30) # 10 - leal 0x5A827999(%rbp,%r10), %ebp # e += RCONST + W[n] + leal 0x5A827999(%rbp,%r12), %ebp # e += RCONST + W[n] + shrq $32, %r12 movl %ecx, %edi # c xorl %edx, %edi # ^d andl %ebx, %edi # &b xorl %edx, %edi # (((c ^ d) & b) ^ d) addl %edi, %ebp # e += (((c ^ d) & b) ^ d) - movl %eax, %esi # - roll $5, %esi # rotl32(a,5) - addl %esi, %ebp # e += rotl32(a,5) + movl %eax, %edi # + roll $5, %edi # rotl32(a,5) + addl %edi, %ebp # e += rotl32(a,5) rorl $2, %ebx # b = rotl32(b,30) # 11 - leal 0x5A827999(%rdx,%r11), %edx # e += RCONST + W[n] + leal 0x5A827999(%rdx,%r12), %edx # e += RCONST + W[n] movl %ebx, %edi # c xorl %ecx, %edi # ^d andl %eax, %edi # &b xorl %ecx, %edi # (((c ^ d) & b) ^ d) addl %edi, %edx # e += (((c ^ d) & b) ^ d) - movl %ebp, %esi # - roll $5, %esi # rotl32(a,5) - addl %esi, %edx # e += rotl32(a,5) + movl %ebp, %edi # + roll $5, %edi # rotl32(a,5) + addl %edi, %edx # e += rotl32(a,5) rorl $2, %eax # b = rotl32(b,30) movaps rconst0x6ED9EBA1(%rip), %xmm6 # PREP %xmm1 %xmm2 %xmm3 %xmm0 -64+16*1(%rsp) @@ -256,9 +260,9 @@ sha1_process_block64: # W0 = unrotated (W[0]..W[3]), still needs W[3] fixup movaps %xmm1, %xmm5 xorps %xmm4, %xmm4 # rol(W0,1): - pcmpgtd %xmm1, %xmm4 # ffffffff for elements <0 (ones with msb bit 1) - paddd %xmm1, %xmm1 # shift left by 1 - psubd %xmm4, %xmm1 # add 1 to those who had msb bit 1 + pcmpgtd %xmm1, %xmm4 # ffffffff for elements <0 (ones with msb bit 1) + paddd %xmm1, %xmm1 # shift left by 1 + psubd %xmm4, %xmm1 # add 1 to those who had msb bit 1 # W0 = rotated (W[0]..W[3]), still needs W[3] fixup pslldq $12, %xmm5 # lshift by 12 bytes: T2 = (0,0,0,unrotW[0]) movaps %xmm5, %xmm4 @@ -271,15 +275,16 @@ sha1_process_block64: paddd %xmm6, %xmm5 movups %xmm5, -64+16*1(%rsp) # 12 - leal 0x5A827999(%rcx,%r12), %ecx # e += RCONST + W[n] + leal 0x5A827999(%rcx,%r13), %ecx # e += RCONST + W[n] + shrq $32, %r13 movl %eax, %edi # c xorl %ebx, %edi # ^d andl %ebp, %edi # &b xorl %ebx, %edi # (((c ^ d) & b) ^ d) addl %edi, %ecx # e += (((c ^ d) & b) ^ d) - movl %edx, %esi # - roll $5, %esi # rotl32(a,5) - addl %esi, %ecx # e += rotl32(a,5) + movl %edx, %edi # + roll $5, %edi # rotl32(a,5) + addl %edi, %ecx # e += rotl32(a,5) rorl $2, %ebp # b = rotl32(b,30) # 13 leal 0x5A827999(%rbx,%r13), %ebx # e += RCONST + W[n] @@ -288,31 +293,32 @@ sha1_process_block64: andl %edx, %edi # &b xorl %eax, %edi # (((c ^ d) & b) ^ d) addl %edi, %ebx # e += (((c ^ d) & b) ^ d) - movl %ecx, %esi # - roll $5, %esi # rotl32(a,5) - addl %esi, %ebx # e += rotl32(a,5) + movl %ecx, %edi # + roll $5, %edi # rotl32(a,5) + addl %edi, %ebx # e += rotl32(a,5) rorl $2, %edx # b = rotl32(b,30) # 14 leal 0x5A827999(%rax,%r14), %eax # e += RCONST + W[n] + shrq $32, %r14 movl %edx, %edi # c xorl %ebp, %edi # ^d andl %ecx, %edi # &b xorl %ebp, %edi # (((c ^ d) & b) ^ d) addl %edi, %eax # e += (((c ^ d) & b) ^ d) - movl %ebx, %esi # - roll $5, %esi # rotl32(a,5) - addl %esi, %eax # e += rotl32(a,5) + movl %ebx, %edi # + roll $5, %edi # rotl32(a,5) + addl %edi, %eax # e += rotl32(a,5) rorl $2, %ecx # b = rotl32(b,30) # 15 - leal 0x5A827999(%rbp,%r15), %ebp # e += RCONST + W[n] + leal 0x5A827999(%rbp,%r14), %ebp # e += RCONST + W[n] movl %ecx, %edi # c xorl %edx, %edi # ^d andl %ebx, %edi # &b xorl %edx, %edi # (((c ^ d) & b) ^ d) addl %edi, %ebp # e += (((c ^ d) & b) ^ d) - movl %eax, %esi # - roll $5, %esi # rotl32(a,5) - addl %esi, %ebp # e += rotl32(a,5) + movl %eax, %edi # + roll $5, %edi # rotl32(a,5) + addl %edi, %ebp # e += rotl32(a,5) rorl $2, %ebx # b = rotl32(b,30) # PREP %xmm2 %xmm3 %xmm0 %xmm1 -64+16*2(%rsp) movaps %xmm1, %xmm4 @@ -325,9 +331,9 @@ sha1_process_block64: # W0 = unrotated (W[0]..W[3]), still needs W[3] fixup movaps %xmm2, %xmm5 xorps %xmm4, %xmm4 # rol(W0,1): - pcmpgtd %xmm2, %xmm4 # ffffffff for elements <0 (ones with msb bit 1) - paddd %xmm2, %xmm2 # shift left by 1 - psubd %xmm4, %xmm2 # add 1 to those who had msb bit 1 + pcmpgtd %xmm2, %xmm4 # ffffffff for elements <0 (ones with msb bit 1) + paddd %xmm2, %xmm2 # shift left by 1 + psubd %xmm4, %xmm2 # add 1 to those who had msb bit 1 # W0 = rotated (W[0]..W[3]), still needs W[3] fixup pslldq $12, %xmm5 # lshift by 12 bytes: T2 = (0,0,0,unrotW[0]) movaps %xmm5, %xmm4 @@ -394,9 +400,9 @@ sha1_process_block64: # W0 = unrotated (W[0]..W[3]), still needs W[3] fixup movaps %xmm3, %xmm5 xorps %xmm4, %xmm4 # rol(W0,1): - pcmpgtd %xmm3, %xmm4 # ffffffff for elements <0 (ones with msb bit 1) - paddd %xmm3, %xmm3 # shift left by 1 - psubd %xmm4, %xmm3 # add 1 to those who had msb bit 1 + pcmpgtd %xmm3, %xmm4 # ffffffff for elements <0 (ones with msb bit 1) + paddd %xmm3, %xmm3 # shift left by 1 + psubd %xmm4, %xmm3 # add 1 to those who had msb bit 1 # W0 = rotated (W[0]..W[3]), still needs W[3] fixup pslldq $12, %xmm5 # lshift by 12 bytes: T2 = (0,0,0,unrotW[0]) movaps %xmm5, %xmm4 @@ -459,9 +465,9 @@ sha1_process_block64: # W0 = unrotated (W[0]..W[3]), still needs W[3] fixup movaps %xmm0, %xmm5 xorps %xmm4, %xmm4 # rol(W0,1): - pcmpgtd %xmm0, %xmm4 # ffffffff for elements <0 (ones with msb bit 1) - paddd %xmm0, %xmm0 # shift left by 1 - psubd %xmm4, %xmm0 # add 1 to those who had msb bit 1 + pcmpgtd %xmm0, %xmm4 # ffffffff for elements <0 (ones with msb bit 1) + paddd %xmm0, %xmm0 # shift left by 1 + psubd %xmm4, %xmm0 # add 1 to those who had msb bit 1 # W0 = rotated (W[0]..W[3]), still needs W[3] fixup pslldq $12, %xmm5 # lshift by 12 bytes: T2 = (0,0,0,unrotW[0]) movaps %xmm5, %xmm4 @@ -524,9 +530,9 @@ sha1_process_block64: # W0 = unrotated (W[0]..W[3]), still needs W[3] fixup movaps %xmm1, %xmm5 xorps %xmm4, %xmm4 # rol(W0,1): - pcmpgtd %xmm1, %xmm4 # ffffffff for elements <0 (ones with msb bit 1) - paddd %xmm1, %xmm1 # shift left by 1 - psubd %xmm4, %xmm1 # add 1 to those who had msb bit 1 + pcmpgtd %xmm1, %xmm4 # ffffffff for elements <0 (ones with msb bit 1) + paddd %xmm1, %xmm1 # shift left by 1 + psubd %xmm4, %xmm1 # add 1 to those who had msb bit 1 # W0 = rotated (W[0]..W[3]), still needs W[3] fixup pslldq $12, %xmm5 # lshift by 12 bytes: T2 = (0,0,0,unrotW[0]) movaps %xmm5, %xmm4 @@ -590,9 +596,9 @@ sha1_process_block64: # W0 = unrotated (W[0]..W[3]), still needs W[3] fixup movaps %xmm2, %xmm5 xorps %xmm4, %xmm4 # rol(W0,1): - pcmpgtd %xmm2, %xmm4 # ffffffff for elements <0 (ones with msb bit 1) - paddd %xmm2, %xmm2 # shift left by 1 - psubd %xmm4, %xmm2 # add 1 to those who had msb bit 1 + pcmpgtd %xmm2, %xmm4 # ffffffff for elements <0 (ones with msb bit 1) + paddd %xmm2, %xmm2 # shift left by 1 + psubd %xmm4, %xmm2 # add 1 to those who had msb bit 1 # W0 = rotated (W[0]..W[3]), still needs W[3] fixup pslldq $12, %xmm5 # lshift by 12 bytes: T2 = (0,0,0,unrotW[0]) movaps %xmm5, %xmm4 @@ -655,9 +661,9 @@ sha1_process_block64: # W0 = unrotated (W[0]..W[3]), still needs W[3] fixup movaps %xmm3, %xmm5 xorps %xmm4, %xmm4 # rol(W0,1): - pcmpgtd %xmm3, %xmm4 # ffffffff for elements <0 (ones with msb bit 1) - paddd %xmm3, %xmm3 # shift left by 1 - psubd %xmm4, %xmm3 # add 1 to those who had msb bit 1 + pcmpgtd %xmm3, %xmm4 # ffffffff for elements <0 (ones with msb bit 1) + paddd %xmm3, %xmm3 # shift left by 1 + psubd %xmm4, %xmm3 # add 1 to those who had msb bit 1 # W0 = rotated (W[0]..W[3]), still needs W[3] fixup pslldq $12, %xmm5 # lshift by 12 bytes: T2 = (0,0,0,unrotW[0]) movaps %xmm5, %xmm4 @@ -720,9 +726,9 @@ sha1_process_block64: # W0 = unrotated (W[0]..W[3]), still needs W[3] fixup movaps %xmm0, %xmm5 xorps %xmm4, %xmm4 # rol(W0,1): - pcmpgtd %xmm0, %xmm4 # ffffffff for elements <0 (ones with msb bit 1) - paddd %xmm0, %xmm0 # shift left by 1 - psubd %xmm4, %xmm0 # add 1 to those who had msb bit 1 + pcmpgtd %xmm0, %xmm4 # ffffffff for elements <0 (ones with msb bit 1) + paddd %xmm0, %xmm0 # shift left by 1 + psubd %xmm4, %xmm0 # add 1 to those who had msb bit 1 # W0 = rotated (W[0]..W[3]), still needs W[3] fixup pslldq $12, %xmm5 # lshift by 12 bytes: T2 = (0,0,0,unrotW[0]) movaps %xmm5, %xmm4 @@ -797,9 +803,9 @@ sha1_process_block64: # W0 = unrotated (W[0]..W[3]), still needs W[3] fixup movaps %xmm1, %xmm5 xorps %xmm4, %xmm4 # rol(W0,1): - pcmpgtd %xmm1, %xmm4 # ffffffff for elements <0 (ones with msb bit 1) - paddd %xmm1, %xmm1 # shift left by 1 - psubd %xmm4, %xmm1 # add 1 to those who had msb bit 1 + pcmpgtd %xmm1, %xmm4 # ffffffff for elements <0 (ones with msb bit 1) + paddd %xmm1, %xmm1 # shift left by 1 + psubd %xmm4, %xmm1 # add 1 to those who had msb bit 1 # W0 = rotated (W[0]..W[3]), still needs W[3] fixup pslldq $12, %xmm5 # lshift by 12 bytes: T2 = (0,0,0,unrotW[0]) movaps %xmm5, %xmm4 @@ -874,9 +880,9 @@ sha1_process_block64: # W0 = unrotated (W[0]..W[3]), still needs W[3] fixup movaps %xmm2, %xmm5 xorps %xmm4, %xmm4 # rol(W0,1): - pcmpgtd %xmm2, %xmm4 # ffffffff for elements <0 (ones with msb bit 1) - paddd %xmm2, %xmm2 # shift left by 1 - psubd %xmm4, %xmm2 # add 1 to those who had msb bit 1 + pcmpgtd %xmm2, %xmm4 # ffffffff for elements <0 (ones with msb bit 1) + paddd %xmm2, %xmm2 # shift left by 1 + psubd %xmm4, %xmm2 # add 1 to those who had msb bit 1 # W0 = rotated (W[0]..W[3]), still needs W[3] fixup pslldq $12, %xmm5 # lshift by 12 bytes: T2 = (0,0,0,unrotW[0]) movaps %xmm5, %xmm4 @@ -952,9 +958,9 @@ sha1_process_block64: # W0 = unrotated (W[0]..W[3]), still needs W[3] fixup movaps %xmm3, %xmm5 xorps %xmm4, %xmm4 # rol(W0,1): - pcmpgtd %xmm3, %xmm4 # ffffffff for elements <0 (ones with msb bit 1) - paddd %xmm3, %xmm3 # shift left by 1 - psubd %xmm4, %xmm3 # add 1 to those who had msb bit 1 + pcmpgtd %xmm3, %xmm4 # ffffffff for elements <0 (ones with msb bit 1) + paddd %xmm3, %xmm3 # shift left by 1 + psubd %xmm4, %xmm3 # add 1 to those who had msb bit 1 # W0 = rotated (W[0]..W[3]), still needs W[3] fixup pslldq $12, %xmm5 # lshift by 12 bytes: T2 = (0,0,0,unrotW[0]) movaps %xmm5, %xmm4 @@ -1029,9 +1035,9 @@ sha1_process_block64: # W0 = unrotated (W[0]..W[3]), still needs W[3] fixup movaps %xmm0, %xmm5 xorps %xmm4, %xmm4 # rol(W0,1): - pcmpgtd %xmm0, %xmm4 # ffffffff for elements <0 (ones with msb bit 1) - paddd %xmm0, %xmm0 # shift left by 1 - psubd %xmm4, %xmm0 # add 1 to those who had msb bit 1 + pcmpgtd %xmm0, %xmm4 # ffffffff for elements <0 (ones with msb bit 1) + paddd %xmm0, %xmm0 # shift left by 1 + psubd %xmm4, %xmm0 # add 1 to those who had msb bit 1 # W0 = rotated (W[0]..W[3]), still needs W[3] fixup pslldq $12, %xmm5 # lshift by 12 bytes: T2 = (0,0,0,unrotW[0]) movaps %xmm5, %xmm4 @@ -1106,9 +1112,9 @@ sha1_process_block64: # W0 = unrotated (W[0]..W[3]), still needs W[3] fixup movaps %xmm1, %xmm5 xorps %xmm4, %xmm4 # rol(W0,1): - pcmpgtd %xmm1, %xmm4 # ffffffff for elements <0 (ones with msb bit 1) - paddd %xmm1, %xmm1 # shift left by 1 - psubd %xmm4, %xmm1 # add 1 to those who had msb bit 1 + pcmpgtd %xmm1, %xmm4 # ffffffff for elements <0 (ones with msb bit 1) + paddd %xmm1, %xmm1 # shift left by 1 + psubd %xmm4, %xmm1 # add 1 to those who had msb bit 1 # W0 = rotated (W[0]..W[3]), still needs W[3] fixup pslldq $12, %xmm5 # lshift by 12 bytes: T2 = (0,0,0,unrotW[0]) movaps %xmm5, %xmm4 @@ -1171,9 +1177,9 @@ sha1_process_block64: # W0 = unrotated (W[0]..W[3]), still needs W[3] fixup movaps %xmm2, %xmm5 xorps %xmm4, %xmm4 # rol(W0,1): - pcmpgtd %xmm2, %xmm4 # ffffffff for elements <0 (ones with msb bit 1) - paddd %xmm2, %xmm2 # shift left by 1 - psubd %xmm4, %xmm2 # add 1 to those who had msb bit 1 + pcmpgtd %xmm2, %xmm4 # ffffffff for elements <0 (ones with msb bit 1) + paddd %xmm2, %xmm2 # shift left by 1 + psubd %xmm4, %xmm2 # add 1 to those who had msb bit 1 # W0 = rotated (W[0]..W[3]), still needs W[3] fixup pslldq $12, %xmm5 # lshift by 12 bytes: T2 = (0,0,0,unrotW[0]) movaps %xmm5, %xmm4 @@ -1236,9 +1242,9 @@ sha1_process_block64: # W0 = unrotated (W[0]..W[3]), still needs W[3] fixup movaps %xmm3, %xmm5 xorps %xmm4, %xmm4 # rol(W0,1): - pcmpgtd %xmm3, %xmm4 # ffffffff for elements <0 (ones with msb bit 1) - paddd %xmm3, %xmm3 # shift left by 1 - psubd %xmm4, %xmm3 # add 1 to those who had msb bit 1 + pcmpgtd %xmm3, %xmm4 # ffffffff for elements <0 (ones with msb bit 1) + paddd %xmm3, %xmm3 # shift left by 1 + psubd %xmm4, %xmm3 # add 1 to those who had msb bit 1 # W0 = rotated (W[0]..W[3]), still needs W[3] fixup pslldq $12, %xmm5 # lshift by 12 bytes: T2 = (0,0,0,unrotW[0]) movaps %xmm5, %xmm4 @@ -1378,7 +1384,7 @@ sha1_process_block64: addl %ebx, 84(%rdi) # ctx->hash[1] += b popq %r14 # addl %ecx, 88(%rdi) # ctx->hash[2] += c - popq %r15 # +# popq %r15 # addl %edx, 92(%rdi) # ctx->hash[3] += d popq %rbx # addl %ebp, 96(%rdi) # ctx->hash[4] += e diff --git a/libbb/hash_md5_sha_x86-64.S.sh b/libbb/hash_md5_sha_x86-64.S.sh index 87c2d0800..47c40af0d 100755 --- a/libbb/hash_md5_sha_x86-64.S.sh +++ b/libbb/hash_md5_sha_x86-64.S.sh @@ -102,7 +102,7 @@ echo \ "### Generated by hash_md5_sha_x86-64.S.sh ### #if CONFIG_SHA1_SMALL == 0 && defined(__GNUC__) && defined(__x86_64__) - .section .text.sha1_process_block64,\"ax\",@progbits + .section .text.sha1_process_block64, \"ax\", @progbits .globl sha1_process_block64 .hidden sha1_process_block64 .type sha1_process_block64, @function @@ -111,7 +111,7 @@ echo \ sha1_process_block64: pushq %rbp # 1 byte insn pushq %rbx # 1 byte insn - pushq %r15 # 2 byte insn +# pushq %r15 # 2 byte insn pushq %r14 # 2 byte insn pushq %r13 # 2 byte insn pushq %r12 # 2 byte insn @@ -120,7 +120,8 @@ sha1_process_block64: #Register and stack use: # eax..edx: a..d # ebp: e -# esi,edi: temps +# esi,edi,r8..r14: temps +# r15: unused # xmm0..xmm3: W[] # xmm4,xmm5: temps # xmm6: current round constant @@ -134,59 +135,56 @@ sha1_process_block64: movaps rconst0x5A827999(%rip), $xmmRCONST - # For round 1, steps 0 and 8..15, we pass W[0,8..15] in esi,r8..r15 - # instead of spilling them to stack. - # (We lose parallelized addition of RCONST, but LEA - # can do two additions at once, so...) + # Load W[] to xmm registers, byteswapping on the fly. + # + # For iterations 0..15, we pass W[] in rsi,r8..r14 + # for use in RD1A's instead of spilling them to stack. + # We lose parallelized addition of RCONST, but LEA + # can do two additions at once, so it's probably a wash. + # (We use rsi instead of rN because this makes two + # LEAs in two first RD1A's shorter by one byte). movq 4*0(%rdi), %rsi - movq 4*2(%rdi), %r10 + movq 4*2(%rdi), %r8 bswapq %rsi - bswapq %r10 + bswapq %r8 rolq \$32, %rsi # rsi = W[1]:W[0] - rolq \$32, %r10 + rolq \$32, %r8 # r8 = W[3]:W[2] movq %rsi, %xmm0 - movq %r10, $xmmT1 - punpcklqdq $xmmT1, %xmm0 # xmm0 = r10:rsi = (W[0],W[1],W[2],W[3]) - movaps %xmm0, $xmmT1 - paddd $xmmRCONST, $xmmT1 - movups $xmmT1, -64+4*0(%rsp) + movq %r8, $xmmT1 + punpcklqdq $xmmT1, %xmm0 # xmm0 = r8:rsi = (W[0],W[1],W[2],W[3]) +# movaps %xmm0, $xmmT1 # add RCONST, spill to stack +# paddd $xmmRCONST, $xmmT1 +# movups $xmmT1, -64+16*0(%rsp) - movq 4*4(%rdi), %r8 + movq 4*4(%rdi), %r9 movq 4*6(%rdi), %r10 - bswapq %r8 + bswapq %r9 bswapq %r10 - rolq \$32, %r8 - rolq \$32, %r10 - movq %r8, %xmm1 + rolq \$32, %r9 # r9 = W[5]:W[4] + rolq \$32, %r10 # r10 = W[7]:W[6] + movq %r9, %xmm1 movq %r10, $xmmT1 - punpcklqdq $xmmT1, %xmm1 # xmm1 = r10:r8 = (W[4],W[5],W[6],W[7]) - movaps %xmm1, $xmmT1 - paddd $xmmRCONST, $xmmT1 - movups $xmmT1, -64+4*4(%rsp) + punpcklqdq $xmmT1, %xmm1 # xmm1 = r10:r9 = (W[4],W[5],W[6],W[7]) - movq 4*8(%rdi), %r8 - movq 4*10(%rdi), %r10 - bswapq %r8 - bswapq %r10 - movl %r8d, %r9d # r9d = W[9] - rolq \$32, %r8 # r8 = W[9]:W[8] - movl %r10d, %r11d # r11d = W[11] - rolq \$32, %r10 # r10 = W[11]:W[10] - movq %r8, %xmm2 - movq %r10, $xmmT1 - punpcklqdq $xmmT1, %xmm2 # xmm2 = r10:r8 = (W[8],W[9],W[10],W[11]) - - movq 4*12(%rdi), %r12 - movq 4*14(%rdi), %r14 + movq 4*8(%rdi), %r11 + movq 4*10(%rdi), %r12 + bswapq %r11 bswapq %r12 + rolq \$32, %r11 # r11 = W[9]:W[8] + rolq \$32, %r12 # r12 = W[11]:W[10] + movq %r11, %xmm2 + movq %r12, $xmmT1 + punpcklqdq $xmmT1, %xmm2 # xmm2 = r12:r11 = (W[8],W[9],W[10],W[11]) + + movq 4*12(%rdi), %r13 + movq 4*14(%rdi), %r14 + bswapq %r13 bswapq %r14 - movl %r12d, %r13d # r13d = W[13] - rolq \$32, %r12 # r12 = W[13]:W[12] - movl %r14d, %r15d # r15d = W[15] + rolq \$32, %r13 # r13 = W[13]:W[12] rolq \$32, %r14 # r14 = W[15]:W[14] - movq %r12, %xmm3 + movq %r13, %xmm3 movq %r14, $xmmT1 - punpcklqdq $xmmT1, %xmm3 # xmm3 = r14:r12 = (W[12],W[13],W[14],W[15]) + punpcklqdq $xmmT1, %xmm3 # xmm3 = r14:r13 = (W[12],W[13],W[14],W[15]) " PREP() { @@ -215,9 +213,9 @@ echo "# PREP $@ movaps $xmmW0, $xmmT2 xorps $xmmT1, $xmmT1 # rol(W0,1): - pcmpgtd $xmmW0, $xmmT1 # ffffffff for elements <0 (ones with msb bit 1) - paddd $xmmW0, $xmmW0 # shift left by 1 - psubd $xmmT1, $xmmW0 # add 1 to those who had msb bit 1 + pcmpgtd $xmmW0, $xmmT1 # ffffffff for elements <0 (ones with msb bit 1) + paddd $xmmW0, $xmmW0 # shift left by 1 + psubd $xmmT1, $xmmW0 # add 1 to those who had msb bit 1 # W0 = rotated (W[0]..W[3]), still needs W[3] fixup pslldq \$12, $xmmT2 # lshift by 12 bytes: T2 = (0,0,0,unrotW[0]) @@ -256,23 +254,28 @@ RD1A() { local a=$1;local b=$2;local c=$3;local d=$4;local e=$5 local n=$(($6)) local n0=$(((n+0) & 15)) +local rN=$((7+n0/2)) echo " # $n ";test $n0 = 0 && echo " leal $RCONST(%r$e,%rsi), %e$e # e += RCONST + W[n] -";test $n0 != 0 && test $n0 -lt 8 && echo " - addl -64+4*$n0(%rsp), %e$e # e += RCONST + W[n] -";test $n0 -ge 8 && echo " - leal $RCONST(%r$e,%r$n0), %e$e # e += RCONST + W[n] + shrq \$32, %rsi +";test $n0 = 1 && echo " + leal $RCONST(%r$e,%rsi), %e$e # e += RCONST + W[n] +";test $n0 -ge 2 && test $((n0 & 1)) = 0 && echo " + leal $RCONST(%r$e,%r$rN), %e$e # e += RCONST + W[n] + shrq \$32, %r$rN +";test $n0 -ge 2 && test $((n0 & 1)) = 1 && echo " + leal $RCONST(%r$e,%r$rN), %e$e # e += RCONST + W[n] ";echo " movl %e$c, %edi # c xorl %e$d, %edi # ^d andl %e$b, %edi # &b xorl %e$d, %edi # (((c ^ d) & b) ^ d) addl %edi, %e$e # e += (((c ^ d) & b) ^ d) - movl %e$a, %esi # - roll \$5, %esi # rotl32(a,5) - addl %esi, %e$e # e += rotl32(a,5) + movl %e$a, %edi # + roll \$5, %edi # rotl32(a,5) + addl %edi, %e$e # e += rotl32(a,5) rorl \$2, %e$b # b = rotl32(b,30) " } @@ -420,7 +423,7 @@ echo " addl %ebx, 84(%rdi) # ctx->hash[1] += b popq %r14 # addl %ecx, 88(%rdi) # ctx->hash[2] += c - popq %r15 # +# popq %r15 # addl %edx, 92(%rdi) # ctx->hash[3] += d popq %rbx # addl %ebp, 96(%rdi) # ctx->hash[4] += e