#include <linux/linkage.h>
#include <asm/assembler.h>
X0 .req r0
X1 .req r1
X2 .req r2
X3 .req r3
X4 .req r4
X5 .req r5
X6 .req r6
X7 .req r7
X8_X10 .req r8
X9_X11 .req r9
X12 .req r10
X13 .req r11
X14 .req r12
X15 .req r14
.macro _le32_bswap_4x a, b, c, d, tmp
#ifdef __ARMEB__
rev_l \a, \tmp
rev_l \b, \tmp
rev_l \c, \tmp
rev_l \d, \tmp
#endif
.endm
.macro __ldrd a, b, src, offset
#if __LINUX_ARM_ARCH__ >= 6
ldrd \a, \b, [\src, #\offset]
#else
ldr \a, [\src, #\offset]
ldr \b, [\src, #\offset + 4]
#endif
.endm
.macro __strd a, b, dst, offset
#if __LINUX_ARM_ARCH__ >= 6
strd \a, \b, [\dst, #\offset]
#else
str \a, [\dst, #\offset]
str \b, [\dst, #\offset + 4]
#endif
.endm
.macro _halfround a1, b1, c1, d1, a2, b2, c2, d2
add \a1, \a1, \b1, ror #brot
add \a2, \a2, \b2, ror #brot
eor \d1, \a1, \d1, ror #drot
eor \d2, \a2, \d2, ror #drot
add \c1, \c1, \d1, ror #16
add \c2, \c2, \d2, ror #16
eor \b1, \c1, \b1, ror #brot
eor \b2, \c2, \b2, ror #brot
add \a1, \a1, \b1, ror #20
add \a2, \a2, \b2, ror #20
eor \d1, \a1, \d1, ror #16
eor \d2, \a2, \d2, ror #16
add \c1, \c1, \d1, ror #24
add \c2, \c2, \d2, ror #24
eor \b1, \c1, \b1, ror #20
eor \b2, \c2, \b2, ror #20
.endm
.macro _doubleround
_halfround X0, X4, X8_X10, X12, X1, X5, X9_X11, X13
__strd X8_X10, X9_X11, sp, 0
__ldrd X8_X10, X9_X11, sp, 8
_halfround X2, X6, X8_X10, X14, X3, X7, X9_X11, X15
.set brot, 25
.set drot, 24
_halfround X0, X5, X8_X10, X15, X1, X6, X9_X11, X12
__strd X8_X10, X9_X11, sp, 8
__ldrd X8_X10, X9_X11, sp, 0
_halfround X2, X7, X8_X10, X13, X3, X4, X9_X11, X14
.endm
.macro _chacha_permute nrounds
.set brot, 0
.set drot, 0
.rept \nrounds / 2
_doubleround
.endr
.endm
.macro _chacha nrounds
.Lnext_block\@:
_chacha_permute \nrounds
add sp, #8
push {X8_X10, X9_X11, X12, X13, X14, X15}
ldr r14, [sp, #96]
ldr r12, [sp, #100]
ldr r11, [sp, #104]
orr r10, r14, r12
cmp r11, #64
blt .Lxor_slowpath\@
tst r10, #3
bne .Lxor_slowpath\@
__ldrd r8, r9, sp, 32
__ldrd r10, r11, sp, 40
add X0, X0, r8
add X1, X1, r9
add X2, X2, r10
add X3, X3, r11
_le32_bswap_4x X0, X1, X2, X3, r8
ldmia r12!, {r8-r11}
eor X0, X0, r8
eor X1, X1, r9
eor X2, X2, r10
eor X3, X3, r11
stmia r14!, {X0-X3}
__ldrd r8, r9, sp, 48
__ldrd r10, r11, sp, 56
add X4, r8, X4, ror #brot
add X5, r9, X5, ror #brot
ldmia r12!, {X0-X3}
add X6, r10, X6, ror #brot
add X7, r11, X7, ror #brot
_le32_bswap_4x X4, X5, X6, X7, r8
eor X4, X4, X0
eor X5, X5, X1
eor X6, X6, X2
eor X7, X7, X3
stmia r14!, {X4-X7}
pop {r0-r7}
__ldrd r8, r9, sp, 32
__ldrd r10, r11, sp, 40
add r0, r0, r8
add r1, r1, r9
add r6, r6, r10
add r7, r7, r11
_le32_bswap_4x r0, r1, r6, r7, r8
ldmia r12!, {r8-r11}
eor r0, r0, r8
eor r1, r1, r9
eor r6, r6, r10
eor r7, r7, r11
stmia r14!, {r0,r1,r6,r7}
ldmia r12!, {r0,r1,r6,r7}
__ldrd r8, r9, sp, 48
__ldrd r10, r11, sp, 56
add r2, r8, r2, ror #drot
add r3, r9, r3, ror #drot
add r4, r10, r4, ror #drot
add r5, r11, r5, ror #drot
_le32_bswap_4x r2, r3, r4, r5, r9
ldr r9, [sp, #72]
eor r2, r2, r0
eor r3, r3, r1
eor r4, r4, r6
eor r5, r5, r7
subs r9, #64
stmia r14!, {r2-r5}
beq .Ldone\@
.Lprepare_for_next_block\@:
add r8, #1
str r14, [sp, #64]
str r12, [sp, #68]
str r9, [sp, #72]
mov r14, sp
str r8, [sp, #48]
sub sp, #16
ldmia r14!, {r0-r11}
__strd r10, r11, sp, 8
ldmia r14, {r10-r12,r14}
b .Lnext_block\@
.Lxor_slowpath\@:
sub sp, #64
mov r14, sp
__ldrd r8, r9, sp, 96
__ldrd r10, r11, sp, 104
add X0, X0, r8
add X1, X1, r9
add X2, X2, r10
add X3, X3, r11
_le32_bswap_4x X0, X1, X2, X3, r8
stmia r14!, {X0-X3}
__ldrd r8, r9, sp, 112
__ldrd r10, r11, sp, 120
add X4, r8, X4, ror #brot
add X5, r9, X5, ror #brot
add X6, r10, X6, ror #brot
add X7, r11, X7, ror #brot
_le32_bswap_4x X4, X5, X6, X7, r8
add r8, sp, #64
stmia r14!, {X4-X7}
ldm r8, {r0-r7}
__ldrd r8, r9, sp, 128
__ldrd r10, r11, sp, 136
add r0, r0, r8
add r1, r1, r9
add r6, r6, r10
add r7, r7, r11
_le32_bswap_4x r0, r1, r6, r7, r8
stmia r14!, {r0,r1,r6,r7}
__ldrd r8, r9, sp, 144
__ldrd r10, r11, sp, 152
add r2, r8, r2, ror #drot
add r3, r9, r3, ror #drot
add r4, r10, r4, ror #drot
add r5, r11, r5, ror #drot
_le32_bswap_4x r2, r3, r4, r5, r9
stmia r14, {r2-r5}
ldr r9, [sp, #168]
ldr r14, [sp, #160]
cmp r9, #64
mov r0, sp
movle r1, r9
movgt r1, #64
.if __LINUX_ARM_ARCH__ < 6
orr r2, r12, r14
tst r2, #3
bne .Lxor_next_byte\@
.endif
.rept 16
subs r1, #4
blt .Lxor_words_done\@
ldr r2, [r12], #4
ldr r3, [r0], #4
eor r2, r2, r3
str r2, [r14], #4
.endr
b .Lxor_slowpath_done\@
.Lxor_words_done\@:
ands r1, r1, #3
beq .Lxor_slowpath_done\@
.Lxor_next_byte\@:
ldrb r2, [r12], #1
ldrb r3, [r0], #1
eor r2, r2, r3
strb r2, [r14], #1
subs r1, #1
bne .Lxor_next_byte\@
.Lxor_slowpath_done\@:
subs r9, #64
add sp, #96
bgt .Lprepare_for_next_block\@
.Ldone\@:
.endm
ENTRY(chacha_doarm)
cmp r2, #0
reteq lr
ldr ip, [sp]
cmp ip, #12
push {r0-r2,r4-r11,lr}
add X12, r3, #48
ldm X12, {X12,X13,X14,X15}
push {X12,X13,X14,X15}
sub sp, sp, #64
__ldrd X8_X10, X9_X11, r3, 40
__strd X8_X10, X9_X11, sp, 8
__strd X8_X10, X9_X11, sp, 56
ldm r3, {X0-X9_X11}
__strd X0, X1, sp, 16
__strd X2, X3, sp, 24
__strd X4, X5, sp, 32
__strd X6, X7, sp, 40
__strd X8_X10, X9_X11, sp, 48
beq 1f
_chacha 20
0: add sp, #76
pop {r4-r11, pc}
1: _chacha 12
b 0b
ENDPROC(chacha_doarm)
ENTRY(hchacha_block_arm)
push {r1,r4-r11,lr}
cmp r2, #12
mov r14, r0
ldmia r14!, {r0-r11}
push {r10-r11}
ldm r14, {r10-r12,r14}
sub sp, #8
beq 1f
_chacha_permute 20
0: add sp, #16
ror X12, X12, #drot
ror X13, X13, #drot
pop {r4}
ror X14, X14, #drot
ror X15, X15, #drot
stm r4, {X0,X1,X2,X3,X12,X13,X14,X15}
pop {r4-r11,pc}
1: _chacha_permute 12
b 0b
ENDPROC(hchacha_block_arm)