#include "crypto_assembly.h"
#define ctx %rdi
#define in %rsi
#define num %rdx
#define round %rdi
#define hs0 %r8d
#define hs1 %r9d
#define hs2 %r10d
#define hs3 %r11d
#define hs4 %r12d
#define hs5 %r13d
#define hs6 %r14d
#define hs7 %r15d
#define k256 %rbp
#define tmp0 %eax
#define tmp1 %ebx
#define tmp2 %ecx
#define tmp3 %edx
#define sha256_message_schedule_load(idx, m, w, wt) \
movl (m, round, 4), wt; \
bswapl wt; \
movl wt, ((idx&0xf)*4)(w)
#define sha256_message_schedule_update(idx, w, wt) \
movl (((idx-2)&0xf)*4)(w), wt; \
movl wt, tmp1; \
rorl $(19-17), tmp1; \
xorl wt, tmp1; \
rorl $17, tmp1; \
shrl $10, wt; \
xorl tmp1, wt; \
\
addl (((idx-7)&0xf)*4)(w), wt; \
addl (((idx-16)&0xf)*4)(w), wt; \
\
movl (((idx-15)&0xf)*4)(w), tmp2; \
movl tmp2, tmp3; \
rorl $(18-7), tmp2; \
xorl tmp3, tmp2; \
rorl $7, tmp2; \
shrl $3, tmp3; \
xorl tmp3, tmp2; \
addl tmp2, wt; \
\
movl wt, ((idx&0xf)*4)(w)
#define sha256_round(idx, a, b, c, d, e, f, g, h, k, w, wt) \
addl wt, h; \
addl (k256, round, 4), h; \
\
movl e, tmp1; \
rorl $(25-11), tmp1; \
xorl e, tmp1; \
rorl $(11-6), tmp1; \
xorl e, tmp1; \
rorl $6, tmp1; \
addl tmp1, h; \
\
movl f, tmp2; \
xorl g, tmp2; \
andl e, tmp2; \
xorl g, tmp2; \
addl tmp2, h; \
\
addl h, d; \
\
movl a, tmp1; \
rorl $(22-13), tmp1; \
xorl a, tmp1; \
rorl $(13-2), tmp1; \
xorl a, tmp1; \
rorl $2, tmp1; \
addl tmp1, h; \
\
movl b, tmp2; \
xorl c, tmp2; \
andl a, tmp2; \
movl b, tmp3; \
andl c, tmp3; \
xorl tmp2, tmp3; \
addl tmp3, h; \
\
addq $1, round
#define sha256_round_load(idx, a, b, c, d, e, f, g, h) \
sha256_message_schedule_load(idx, in, %rsp, tmp0); \
sha256_round(idx, a, b, c, d, e, f, g, h, k256, %rsp, tmp0)
#define sha256_round_update(idx, a, b, c, d, e, f, g, h) \
sha256_message_schedule_update(idx, %rsp, tmp0); \
sha256_round(idx, a, b, c, d, e, f, g, h, k256, %rsp, tmp0)
.section .text
.align 16
.globl sha256_block_generic
.type sha256_block_generic,@function
sha256_block_generic:
_CET_ENDBR
pushq %rbx
pushq %rbp
pushq %r12
pushq %r13
pushq %r14
pushq %r15
movq %rsp, %rax
subq $(64+3*8), %rsp
andq $~63, %rsp
movq %rax, (64+2*8)(%rsp)
movq ctx, (64+1*8)(%rsp)
shlq $6, num
leaq (in, num, 1), %rbx
movq %rbx, (64+0*8)(%rsp)
leaq K256(%rip), k256
movl (0*4)(ctx), hs0
movl (1*4)(ctx), hs1
movl (2*4)(ctx), hs2
movl (3*4)(ctx), hs3
movl (4*4)(ctx), hs4
movl (5*4)(ctx), hs5
movl (6*4)(ctx), hs6
movl (7*4)(ctx), hs7
jmp .Lblock_loop0
.align 16
.Lblock_loop0:
mov $0, round
sha256_round_load(0, hs0, hs1, hs2, hs3, hs4, hs5, hs6, hs7)
sha256_round_load(1, hs7, hs0, hs1, hs2, hs3, hs4, hs5, hs6)
sha256_round_load(2, hs6, hs7, hs0, hs1, hs2, hs3, hs4, hs5)
sha256_round_load(3, hs5, hs6, hs7, hs0, hs1, hs2, hs3, hs4)
sha256_round_load(4, hs4, hs5, hs6, hs7, hs0, hs1, hs2, hs3)
sha256_round_load(5, hs3, hs4, hs5, hs6, hs7, hs0, hs1, hs2)
sha256_round_load(6, hs2, hs3, hs4, hs5, hs6, hs7, hs0, hs1)
sha256_round_load(7, hs1, hs2, hs3, hs4, hs5, hs6, hs7, hs0)
sha256_round_load(8, hs0, hs1, hs2, hs3, hs4, hs5, hs6, hs7)
sha256_round_load(9, hs7, hs0, hs1, hs2, hs3, hs4, hs5, hs6)
sha256_round_load(10, hs6, hs7, hs0, hs1, hs2, hs3, hs4, hs5)
sha256_round_load(11, hs5, hs6, hs7, hs0, hs1, hs2, hs3, hs4)
sha256_round_load(12, hs4, hs5, hs6, hs7, hs0, hs1, hs2, hs3)
sha256_round_load(13, hs3, hs4, hs5, hs6, hs7, hs0, hs1, hs2)
sha256_round_load(14, hs2, hs3, hs4, hs5, hs6, hs7, hs0, hs1)
sha256_round_load(15, hs1, hs2, hs3, hs4, hs5, hs6, hs7, hs0)
jmp .Lblock_loop16
.align 16
.Lblock_loop16:
sha256_round_update(16, hs0, hs1, hs2, hs3, hs4, hs5, hs6, hs7)
sha256_round_update(17, hs7, hs0, hs1, hs2, hs3, hs4, hs5, hs6)
sha256_round_update(18, hs6, hs7, hs0, hs1, hs2, hs3, hs4, hs5)
sha256_round_update(19, hs5, hs6, hs7, hs0, hs1, hs2, hs3, hs4)
sha256_round_update(20, hs4, hs5, hs6, hs7, hs0, hs1, hs2, hs3)
sha256_round_update(21, hs3, hs4, hs5, hs6, hs7, hs0, hs1, hs2)
sha256_round_update(22, hs2, hs3, hs4, hs5, hs6, hs7, hs0, hs1)
sha256_round_update(23, hs1, hs2, hs3, hs4, hs5, hs6, hs7, hs0)
sha256_round_update(24, hs0, hs1, hs2, hs3, hs4, hs5, hs6, hs7)
sha256_round_update(25, hs7, hs0, hs1, hs2, hs3, hs4, hs5, hs6)
sha256_round_update(26, hs6, hs7, hs0, hs1, hs2, hs3, hs4, hs5)
sha256_round_update(27, hs5, hs6, hs7, hs0, hs1, hs2, hs3, hs4)
sha256_round_update(28, hs4, hs5, hs6, hs7, hs0, hs1, hs2, hs3)
sha256_round_update(29, hs3, hs4, hs5, hs6, hs7, hs0, hs1, hs2)
sha256_round_update(30, hs2, hs3, hs4, hs5, hs6, hs7, hs0, hs1)
sha256_round_update(31, hs1, hs2, hs3, hs4, hs5, hs6, hs7, hs0)
cmp $64, round
jb .Lblock_loop16
movq (64+1*8)(%rsp), ctx
addl (0*4)(ctx), hs0
addl (1*4)(ctx), hs1
addl (2*4)(ctx), hs2
addl (3*4)(ctx), hs3
addl (4*4)(ctx), hs4
addl (5*4)(ctx), hs5
addl (6*4)(ctx), hs6
addl (7*4)(ctx), hs7
movl hs0, (0*4)(ctx)
movl hs1, (1*4)(ctx)
movl hs2, (2*4)(ctx)
movl hs3, (3*4)(ctx)
movl hs4, (4*4)(ctx)
movl hs5, (5*4)(ctx)
movl hs6, (6*4)(ctx)
movl hs7, (7*4)(ctx)
addq $64, in
cmpq (64+0*8)(%rsp), in
jb .Lblock_loop0
movq (64+2*8)(%rsp), %rsp
popq %r15
popq %r14
popq %r13
popq %r12
popq %rbp
popq %rbx
ret
.section .rodata
.align 64
.type K256,@object
K256:
.long 0x428a2f98, 0x71374491, 0xb5c0fbcf, 0xe9b5dba5
.long 0x3956c25b, 0x59f111f1, 0x923f82a4, 0xab1c5ed5
.long 0xd807aa98, 0x12835b01, 0x243185be, 0x550c7dc3
.long 0x72be5d74, 0x80deb1fe, 0x9bdc06a7, 0xc19bf174
.long 0xe49b69c1, 0xefbe4786, 0x0fc19dc6, 0x240ca1cc
.long 0x2de92c6f, 0x4a7484aa, 0x5cb0a9dc, 0x76f988da
.long 0x983e5152, 0xa831c66d, 0xb00327c8, 0xbf597fc7
.long 0xc6e00bf3, 0xd5a79147, 0x06ca6351, 0x14292967
.long 0x27b70a85, 0x2e1b2138, 0x4d2c6dfc, 0x53380d13
.long 0x650a7354, 0x766a0abb, 0x81c2c92e, 0x92722c85
.long 0xa2bfe8a1, 0xa81a664b, 0xc24b8b70, 0xc76c51a3
.long 0xd192e819, 0xd6990624, 0xf40e3585, 0x106aa070
.long 0x19a4c116, 0x1e376c08, 0x2748774c, 0x34b0bcb5
.long 0x391c0cb3, 0x4ed8aa4a, 0x5b9cca4f, 0x682e6ff3
.long 0x748f82ee, 0x78a5636f, 0x84c87814, 0x8cc70208
.long 0x90befffa, 0xa4506ceb, 0xbef9a3f7, 0xc67178f2
.size K256,.-K256