#include <asm/cache.h>
#include <asm/processor.h>
#include <asm/ppc_asm.h>
#include <asm/unistd.h>
#include <asm/vdso.h>
#include <asm/ptrace.h>
.text
.balign 8
.balign IFETCH_ALIGN_BYTES
V_FUNCTION_BEGIN(__kernel_start_sigtramp_rt64)
.Lsigrt_start:
bctrl
V_FUNCTION_END(__kernel_start_sigtramp_rt64)
V_FUNCTION_BEGIN(__kernel_sigtramp_rt64)
addi r1, r1, __SIGNAL_FRAMESIZE
li r0,__NR_rt_sigreturn
sc
.Lsigrt_end:
V_FUNCTION_END(__kernel_sigtramp_rt64)
.long 0,0,0
.quad 0,-21*8
#define cfa_save \
.byte 0x0f; \
.uleb128 9f - 1f; \
1: \
.byte 0x71; .sleb128 PTREGS; \
.byte 0x06; \
.byte 0x23; .uleb128 RSIZE; \
.byte 0x06; \
9:
#define rsave(regno, ofs) \
.byte 0x10; \
.uleb128 regno; \
.uleb128 9f - 1f; \
1: \
.byte 0x71; .sleb128 PTREGS; \
.byte 0x06; \
.ifne ofs; \
.byte 0x23; .uleb128 ofs; \
.endif; \
9:
#define vsave_msr0(regno) \
.byte 0x10; \
.uleb128 regno + 77; \
.uleb128 9f - 1f; \
1: \
.byte 0x30 + regno; \
2: \
.byte 0x40; \
.byte 0x1e; \
3: \
.byte 0x71; .sleb128 PTREGS; \
.byte 0x06; \
.byte 0x12; \
.byte 0x23; \
.uleb128 33*RSIZE; \
.byte 0x06; \
.byte 0x0c; .long 1 << 25; \
.byte 0x1a; \
.byte 0x12; \
.byte 0x30; \
.byte 0x29; \
.byte 0x28; .short 0x7fff; \
.byte 0x13; \
.byte 0x23; .uleb128 VREGS; \
.byte 0x06; \
.byte 0x22; \
.byte 0x2f; .short 0x7fff; \
9:
#define vsave_msr1(regno) \
.byte 0x10; \
.uleb128 regno + 77; \
.uleb128 9f - 1f; \
1: \
.byte 0x30 + regno; \
.byte 0x2f; .short 2b - 9f; \
9:
#define vsave_msr2(regno, ofs) \
.byte 0x10; \
.uleb128 regno + 77; \
.uleb128 9f - 1f; \
1: \
.byte 0x0a; .short ofs; \
.byte 0x2f; .short 3b - 9f; \
9:
#define vsave(regno, ofs) \
.byte 0x10; \
.uleb128 regno + 77; \
.uleb128 9f - 1f; \
1: \
.byte 0x71; .sleb128 PTREGS; \
.byte 0x06; \
.byte 0x23; .uleb128 VREGS; \
.byte 0x06; \
.byte 0x23; .uleb128 ofs; \
9:
#define PTREGS 128+168+56
#define RSIZE 8
#define CRSIZE 4
#ifdef __LITTLE_ENDIAN__
#define CROFF 0
#else
#define CROFF (RSIZE - CRSIZE)
#endif
#define VREGS 48*RSIZE+33*8
#define EH_FRAME_GEN \
cfa_save; \
rsave ( 0, 0*RSIZE); \
rsave ( 2, 2*RSIZE); \
rsave ( 3, 3*RSIZE); \
rsave ( 4, 4*RSIZE); \
rsave ( 5, 5*RSIZE); \
rsave ( 6, 6*RSIZE); \
rsave ( 7, 7*RSIZE); \
rsave ( 8, 8*RSIZE); \
rsave ( 9, 9*RSIZE); \
rsave (10, 10*RSIZE); \
rsave (11, 11*RSIZE); \
rsave (12, 12*RSIZE); \
rsave (13, 13*RSIZE); \
rsave (14, 14*RSIZE); \
rsave (15, 15*RSIZE); \
rsave (16, 16*RSIZE); \
rsave (17, 17*RSIZE); \
rsave (18, 18*RSIZE); \
rsave (19, 19*RSIZE); \
rsave (20, 20*RSIZE); \
rsave (21, 21*RSIZE); \
rsave (22, 22*RSIZE); \
rsave (23, 23*RSIZE); \
rsave (24, 24*RSIZE); \
rsave (25, 25*RSIZE); \
rsave (26, 26*RSIZE); \
rsave (27, 27*RSIZE); \
rsave (28, 28*RSIZE); \
rsave (29, 29*RSIZE); \
rsave (30, 30*RSIZE); \
rsave (31, 31*RSIZE); \
rsave (67, 32*RSIZE); \
rsave (65, 36*RSIZE); \
rsave (68, 38*RSIZE + CROFF); \
rsave (69, 38*RSIZE + CROFF); \
rsave (70, 38*RSIZE + CROFF); \
rsave (71, 38*RSIZE + CROFF); \
rsave (72, 38*RSIZE + CROFF); \
rsave (73, 38*RSIZE + CROFF); \
rsave (74, 38*RSIZE + CROFF); \
rsave (75, 38*RSIZE + CROFF)
#define EH_FRAME_FP \
rsave (32, 48*RSIZE + 0*8); \
rsave (33, 48*RSIZE + 1*8); \
rsave (34, 48*RSIZE + 2*8); \
rsave (35, 48*RSIZE + 3*8); \
rsave (36, 48*RSIZE + 4*8); \
rsave (37, 48*RSIZE + 5*8); \
rsave (38, 48*RSIZE + 6*8); \
rsave (39, 48*RSIZE + 7*8); \
rsave (40, 48*RSIZE + 8*8); \
rsave (41, 48*RSIZE + 9*8); \
rsave (42, 48*RSIZE + 10*8); \
rsave (43, 48*RSIZE + 11*8); \
rsave (44, 48*RSIZE + 12*8); \
rsave (45, 48*RSIZE + 13*8); \
rsave (46, 48*RSIZE + 14*8); \
rsave (47, 48*RSIZE + 15*8); \
rsave (48, 48*RSIZE + 16*8); \
rsave (49, 48*RSIZE + 17*8); \
rsave (50, 48*RSIZE + 18*8); \
rsave (51, 48*RSIZE + 19*8); \
rsave (52, 48*RSIZE + 20*8); \
rsave (53, 48*RSIZE + 21*8); \
rsave (54, 48*RSIZE + 22*8); \
rsave (55, 48*RSIZE + 23*8); \
rsave (56, 48*RSIZE + 24*8); \
rsave (57, 48*RSIZE + 25*8); \
rsave (58, 48*RSIZE + 26*8); \
rsave (59, 48*RSIZE + 27*8); \
rsave (60, 48*RSIZE + 28*8); \
rsave (61, 48*RSIZE + 29*8); \
rsave (62, 48*RSIZE + 30*8); \
rsave (63, 48*RSIZE + 31*8)
#ifdef CONFIG_ALTIVEC
#define EH_FRAME_VMX \
vsave_msr0 ( 0); \
vsave_msr1 ( 1); \
vsave_msr1 ( 2); \
vsave_msr1 ( 3); \
vsave_msr1 ( 4); \
vsave_msr1 ( 5); \
vsave_msr1 ( 6); \
vsave_msr1 ( 7); \
vsave_msr1 ( 8); \
vsave_msr1 ( 9); \
vsave_msr1 (10); \
vsave_msr1 (11); \
vsave_msr1 (12); \
vsave_msr1 (13); \
vsave_msr1 (14); \
vsave_msr1 (15); \
vsave_msr1 (16); \
vsave_msr1 (17); \
vsave_msr1 (18); \
vsave_msr1 (19); \
vsave_msr1 (20); \
vsave_msr1 (21); \
vsave_msr1 (22); \
vsave_msr1 (23); \
vsave_msr1 (24); \
vsave_msr1 (25); \
vsave_msr1 (26); \
vsave_msr1 (27); \
vsave_msr1 (28); \
vsave_msr1 (29); \
vsave_msr1 (30); \
vsave_msr1 (31); \
vsave_msr2 (33, 32*16+12); \
vsave (32, 33*16)
#else
#define EH_FRAME_VMX
#endif
.section .eh_frame,"a",@progbits
.Lcie:
.long .Lcie_end - .Lcie_start
.Lcie_start:
.long 0
.byte 1
.string "zRS"
.uleb128 4
.sleb128 -8
.byte 67
.uleb128 1
.byte 0x14
.byte 0x0c,1,0
.balign 8
.Lcie_end:
.long .Lfde0_end - .Lfde0_start
.Lfde0_start:
.long .Lfde0_start - .Lcie
.quad .Lsigrt_start - .
.quad .Lsigrt_end - .Lsigrt_start
.uleb128 0
EH_FRAME_GEN
EH_FRAME_FP
EH_FRAME_VMX
# .byte 0x41
#undef PTREGS
#define PTREGS 168+56
.balign 8
.Lfde0_end: