diff options
author | Tom Stellard <tstellar@redhat.com> | 2019-06-11 03:47:50 +0000 |
---|---|---|
committer | Tom Stellard <tstellar@redhat.com> | 2019-06-11 03:47:50 +0000 |
commit | 2fb27a25fcb66e33152a278a6c6f284453fe5228 (patch) | |
tree | aeaa3e8a4979429f10541caa00ce2277b34b7960 | |
parent | Add release note for DIBuilder API changes (diff) | |
download | llvm-project-llvmorg-8.0.1-rc2.tar.gz llvm-project-llvmorg-8.0.1-rc2.tar.bz2 llvm-project-llvmorg-8.0.1-rc2.zip |
Merging r360862:llvmorg-8.0.1-rc2
------------------------------------------------------------------------
r360862 | mstorsjo | 2019-05-15 23:49:20 -0700 (Wed, 15 May 2019) | 12 lines
[PPC] Fix 32-bit build of libunwind
Clang integrated assembler was unable to build libunwind PPC32 assembly code,
present in functions used to save/restore register context.
This change consists in replacing the assembly style used in libunwind source,
to one that is compatible with both Clang integrated assembler as well as
GNU assembler.
Patch by Leandro Lupori!
Differential Revision: https://reviews.llvm.org/D61792
------------------------------------------------------------------------
llvm-svn: 363030
-rw-r--r-- | libunwind/src/UnwindRegistersRestore.S | 238 | ||||
-rw-r--r-- | libunwind/src/UnwindRegistersSave.S | 270 | ||||
-rw-r--r-- | libunwind/src/assembly.h | 2 |
3 files changed, 254 insertions, 256 deletions
diff --git a/libunwind/src/UnwindRegistersRestore.S b/libunwind/src/UnwindRegistersRestore.S index 389db67579cd..a155fbe2ddf2 100644 --- a/libunwind/src/UnwindRegistersRestore.S +++ b/libunwind/src/UnwindRegistersRestore.S @@ -396,119 +396,119 @@ Lnovec: #elif defined(__ppc__) DEFINE_LIBUNWIND_PRIVATE_FUNCTION(_ZN9libunwind13Registers_ppc6jumptoEv) -; -; void libunwind::Registers_ppc::jumpto() -; -; On entry: -; thread_state pointer is in r3 -; - - ; restore integral registerrs - ; skip r0 for now - ; skip r1 for now - lwz r2, 16(r3) - ; skip r3 for now - ; skip r4 for now - ; skip r5 for now - lwz r6, 32(r3) - lwz r7, 36(r3) - lwz r8, 40(r3) - lwz r9, 44(r3) - lwz r10, 48(r3) - lwz r11, 52(r3) - lwz r12, 56(r3) - lwz r13, 60(r3) - lwz r14, 64(r3) - lwz r15, 68(r3) - lwz r16, 72(r3) - lwz r17, 76(r3) - lwz r18, 80(r3) - lwz r19, 84(r3) - lwz r20, 88(r3) - lwz r21, 92(r3) - lwz r22, 96(r3) - lwz r23,100(r3) - lwz r24,104(r3) - lwz r25,108(r3) - lwz r26,112(r3) - lwz r27,116(r3) - lwz r28,120(r3) - lwz r29,124(r3) - lwz r30,128(r3) - lwz r31,132(r3) - - ; restore float registers - lfd f0, 160(r3) - lfd f1, 168(r3) - lfd f2, 176(r3) - lfd f3, 184(r3) - lfd f4, 192(r3) - lfd f5, 200(r3) - lfd f6, 208(r3) - lfd f7, 216(r3) - lfd f8, 224(r3) - lfd f9, 232(r3) - lfd f10,240(r3) - lfd f11,248(r3) - lfd f12,256(r3) - lfd f13,264(r3) - lfd f14,272(r3) - lfd f15,280(r3) - lfd f16,288(r3) - lfd f17,296(r3) - lfd f18,304(r3) - lfd f19,312(r3) - lfd f20,320(r3) - lfd f21,328(r3) - lfd f22,336(r3) - lfd f23,344(r3) - lfd f24,352(r3) - lfd f25,360(r3) - lfd f26,368(r3) - lfd f27,376(r3) - lfd f28,384(r3) - lfd f29,392(r3) - lfd f30,400(r3) - lfd f31,408(r3) - - ; restore vector registers if any are in use - lwz r5,156(r3) ; test VRsave - cmpwi r5,0 - beq Lnovec - - subi r4,r1,16 - rlwinm r4,r4,0,0,27 ; mask low 4-bits - ; r4 is now a 16-byte aligned pointer into the red zone - ; the _vectorRegisters may not be 16-byte aligned so copy via red zone temp buffer +// +// void libunwind::Registers_ppc::jumpto() +// +// On entry: +// thread_state pointer is in r3 +// + + // restore integral registerrs + // skip r0 for now + // skip r1 for now + lwz %r2, 16(%r3) + // skip r3 for now + // skip r4 for now + // skip r5 for now + lwz %r6, 32(%r3) + lwz %r7, 36(%r3) + lwz %r8, 40(%r3) + lwz %r9, 44(%r3) + lwz %r10, 48(%r3) + lwz %r11, 52(%r3) + lwz %r12, 56(%r3) + lwz %r13, 60(%r3) + lwz %r14, 64(%r3) + lwz %r15, 68(%r3) + lwz %r16, 72(%r3) + lwz %r17, 76(%r3) + lwz %r18, 80(%r3) + lwz %r19, 84(%r3) + lwz %r20, 88(%r3) + lwz %r21, 92(%r3) + lwz %r22, 96(%r3) + lwz %r23,100(%r3) + lwz %r24,104(%r3) + lwz %r25,108(%r3) + lwz %r26,112(%r3) + lwz %r27,116(%r3) + lwz %r28,120(%r3) + lwz %r29,124(%r3) + lwz %r30,128(%r3) + lwz %r31,132(%r3) + + // restore float registers + lfd %f0, 160(%r3) + lfd %f1, 168(%r3) + lfd %f2, 176(%r3) + lfd %f3, 184(%r3) + lfd %f4, 192(%r3) + lfd %f5, 200(%r3) + lfd %f6, 208(%r3) + lfd %f7, 216(%r3) + lfd %f8, 224(%r3) + lfd %f9, 232(%r3) + lfd %f10,240(%r3) + lfd %f11,248(%r3) + lfd %f12,256(%r3) + lfd %f13,264(%r3) + lfd %f14,272(%r3) + lfd %f15,280(%r3) + lfd %f16,288(%r3) + lfd %f17,296(%r3) + lfd %f18,304(%r3) + lfd %f19,312(%r3) + lfd %f20,320(%r3) + lfd %f21,328(%r3) + lfd %f22,336(%r3) + lfd %f23,344(%r3) + lfd %f24,352(%r3) + lfd %f25,360(%r3) + lfd %f26,368(%r3) + lfd %f27,376(%r3) + lfd %f28,384(%r3) + lfd %f29,392(%r3) + lfd %f30,400(%r3) + lfd %f31,408(%r3) + + // restore vector registers if any are in use + lwz %r5, 156(%r3) // test VRsave + cmpwi %r5, 0 + beq Lnovec + subi %r4, %r1, 16 + rlwinm %r4, %r4, 0, 0, 27 // mask low 4-bits + // r4 is now a 16-byte aligned pointer into the red zone + // the _vectorRegisters may not be 16-byte aligned so copy via red zone temp buffer + #define LOAD_VECTOR_UNALIGNEDl(_index) \ - andis. r0,r5,(1<<(15-_index)) @\ - beq Ldone ## _index @\ - lwz r0, 424+_index*16(r3) @\ - stw r0, 0(r4) @\ - lwz r0, 424+_index*16+4(r3) @\ - stw r0, 4(r4) @\ - lwz r0, 424+_index*16+8(r3) @\ - stw r0, 8(r4) @\ - lwz r0, 424+_index*16+12(r3)@\ - stw r0, 12(r4) @\ - lvx v ## _index,0,r4 @\ -Ldone ## _index: + andis. %r0, %r5, (1<<(15-_index)) SEPARATOR \ + beq Ldone ## _index SEPARATOR \ + lwz %r0, 424+_index*16(%r3) SEPARATOR \ + stw %r0, 0(%r4) SEPARATOR \ + lwz %r0, 424+_index*16+4(%r3) SEPARATOR \ + stw %r0, 4(%r4) SEPARATOR \ + lwz %r0, 424+_index*16+8(%r3) SEPARATOR \ + stw %r0, 8(%r4) SEPARATOR \ + lwz %r0, 424+_index*16+12(%r3) SEPARATOR \ + stw %r0, 12(%r4) SEPARATOR \ + lvx %v ## _index, 0, %r4 SEPARATOR \ + Ldone ## _index: #define LOAD_VECTOR_UNALIGNEDh(_index) \ - andi. r0,r5,(1<<(31-_index)) @\ - beq Ldone ## _index @\ - lwz r0, 424+_index*16(r3) @\ - stw r0, 0(r4) @\ - lwz r0, 424+_index*16+4(r3) @\ - stw r0, 4(r4) @\ - lwz r0, 424+_index*16+8(r3) @\ - stw r0, 8(r4) @\ - lwz r0, 424+_index*16+12(r3)@\ - stw r0, 12(r4) @\ - lvx v ## _index,0,r4 @\ - Ldone ## _index: + andi. %r0, %r5, (1<<(31-_index)) SEPARATOR \ + beq Ldone ## _index SEPARATOR \ + lwz %r0, 424+_index*16(%r3) SEPARATOR \ + stw %r0, 0(%r4) SEPARATOR \ + lwz %r0, 424+_index*16+4(%r3) SEPARATOR \ + stw %r0, 4(%r4) SEPARATOR \ + lwz %r0, 424+_index*16+8(%r3) SEPARATOR \ + stw %r0, 8(%r4) SEPARATOR \ + lwz %r0, 424+_index*16+12(%r3) SEPARATOR \ + stw %r0, 12(%r4) SEPARATOR \ + lvx %v ## _index, 0, %r4 SEPARATOR \ + Ldone ## _index: LOAD_VECTOR_UNALIGNEDl(0) @@ -545,17 +545,17 @@ Ldone ## _index: LOAD_VECTOR_UNALIGNEDh(31) Lnovec: - lwz r0, 136(r3) ; __cr - mtocrf 255,r0 - lwz r0, 148(r3) ; __ctr - mtctr r0 - lwz r0, 0(r3) ; __ssr0 - mtctr r0 - lwz r0, 8(r3) ; do r0 now - lwz r5,28(r3) ; do r5 now - lwz r4,24(r3) ; do r4 now - lwz r1,12(r3) ; do sp now - lwz r3,20(r3) ; do r3 last + lwz %r0, 136(%r3) // __cr + mtcr %r0 + lwz %r0, 148(%r3) // __ctr + mtctr %r0 + lwz %r0, 0(%r3) // __ssr0 + mtctr %r0 + lwz %r0, 8(%r3) // do r0 now + lwz %r5, 28(%r3) // do r5 now + lwz %r4, 24(%r3) // do r4 now + lwz %r1, 12(%r3) // do sp now + lwz %r3, 20(%r3) // do r3 last bctr #elif defined(__arm64__) || defined(__aarch64__) diff --git a/libunwind/src/UnwindRegistersSave.S b/libunwind/src/UnwindRegistersSave.S index 48ecb0aec70f..4b674afc6bdc 100644 --- a/libunwind/src/UnwindRegistersSave.S +++ b/libunwind/src/UnwindRegistersSave.S @@ -557,144 +557,144 @@ DEFINE_LIBUNWIND_FUNCTION(unw_getcontext) #elif defined(__ppc__) -; -; extern int unw_getcontext(unw_context_t* thread_state) -; -; On entry: -; thread_state pointer is in r3 -; +// +// extern int unw_getcontext(unw_context_t* thread_state) +// +// On entry: +// thread_state pointer is in r3 +// DEFINE_LIBUNWIND_FUNCTION(unw_getcontext) - stw r0, 8(r3) - mflr r0 - stw r0, 0(r3) ; store lr as ssr0 - stw r1, 12(r3) - stw r2, 16(r3) - stw r3, 20(r3) - stw r4, 24(r3) - stw r5, 28(r3) - stw r6, 32(r3) - stw r7, 36(r3) - stw r8, 40(r3) - stw r9, 44(r3) - stw r10, 48(r3) - stw r11, 52(r3) - stw r12, 56(r3) - stw r13, 60(r3) - stw r14, 64(r3) - stw r15, 68(r3) - stw r16, 72(r3) - stw r17, 76(r3) - stw r18, 80(r3) - stw r19, 84(r3) - stw r20, 88(r3) - stw r21, 92(r3) - stw r22, 96(r3) - stw r23,100(r3) - stw r24,104(r3) - stw r25,108(r3) - stw r26,112(r3) - stw r27,116(r3) - stw r28,120(r3) - stw r29,124(r3) - stw r30,128(r3) - stw r31,132(r3) - - ; save VRSave register - mfspr r0,256 - stw r0,156(r3) - ; save CR registers - mfcr r0 - stw r0,136(r3) - ; save CTR register - mfctr r0 - stw r0,148(r3) - - ; save float registers - stfd f0, 160(r3) - stfd f1, 168(r3) - stfd f2, 176(r3) - stfd f3, 184(r3) - stfd f4, 192(r3) - stfd f5, 200(r3) - stfd f6, 208(r3) - stfd f7, 216(r3) - stfd f8, 224(r3) - stfd f9, 232(r3) - stfd f10,240(r3) - stfd f11,248(r3) - stfd f12,256(r3) - stfd f13,264(r3) - stfd f14,272(r3) - stfd f15,280(r3) - stfd f16,288(r3) - stfd f17,296(r3) - stfd f18,304(r3) - stfd f19,312(r3) - stfd f20,320(r3) - stfd f21,328(r3) - stfd f22,336(r3) - stfd f23,344(r3) - stfd f24,352(r3) - stfd f25,360(r3) - stfd f26,368(r3) - stfd f27,376(r3) - stfd f28,384(r3) - stfd f29,392(r3) - stfd f30,400(r3) - stfd f31,408(r3) - - - ; save vector registers - - subi r4,r1,16 - rlwinm r4,r4,0,0,27 ; mask low 4-bits - ; r4 is now a 16-byte aligned pointer into the red zone + stw %r0, 8(%r3) + mflr %r0 + stw %r0, 0(%r3) // store lr as ssr0 + stw %r1, 12(%r3) + stw %r2, 16(%r3) + stw %r3, 20(%r3) + stw %r4, 24(%r3) + stw %r5, 28(%r3) + stw %r6, 32(%r3) + stw %r7, 36(%r3) + stw %r8, 40(%r3) + stw %r9, 44(%r3) + stw %r10, 48(%r3) + stw %r11, 52(%r3) + stw %r12, 56(%r3) + stw %r13, 60(%r3) + stw %r14, 64(%r3) + stw %r15, 68(%r3) + stw %r16, 72(%r3) + stw %r17, 76(%r3) + stw %r18, 80(%r3) + stw %r19, 84(%r3) + stw %r20, 88(%r3) + stw %r21, 92(%r3) + stw %r22, 96(%r3) + stw %r23,100(%r3) + stw %r24,104(%r3) + stw %r25,108(%r3) + stw %r26,112(%r3) + stw %r27,116(%r3) + stw %r28,120(%r3) + stw %r29,124(%r3) + stw %r30,128(%r3) + stw %r31,132(%r3) + + // save VRSave register + mfspr %r0, 256 + stw %r0, 156(%r3) + // save CR registers + mfcr %r0 + stw %r0, 136(%r3) + // save CTR register + mfctr %r0 + stw %r0, 148(%r3) + + // save float registers + stfd %f0, 160(%r3) + stfd %f1, 168(%r3) + stfd %f2, 176(%r3) + stfd %f3, 184(%r3) + stfd %f4, 192(%r3) + stfd %f5, 200(%r3) + stfd %f6, 208(%r3) + stfd %f7, 216(%r3) + stfd %f8, 224(%r3) + stfd %f9, 232(%r3) + stfd %f10,240(%r3) + stfd %f11,248(%r3) + stfd %f12,256(%r3) + stfd %f13,264(%r3) + stfd %f14,272(%r3) + stfd %f15,280(%r3) + stfd %f16,288(%r3) + stfd %f17,296(%r3) + stfd %f18,304(%r3) + stfd %f19,312(%r3) + stfd %f20,320(%r3) + stfd %f21,328(%r3) + stfd %f22,336(%r3) + stfd %f23,344(%r3) + stfd %f24,352(%r3) + stfd %f25,360(%r3) + stfd %f26,368(%r3) + stfd %f27,376(%r3) + stfd %f28,384(%r3) + stfd %f29,392(%r3) + stfd %f30,400(%r3) + stfd %f31,408(%r3) + + + // save vector registers + + subi %r4, %r1, 16 + rlwinm %r4, %r4, 0, 0, 27 // mask low 4-bits + // r4 is now a 16-byte aligned pointer into the red zone #define SAVE_VECTOR_UNALIGNED(_vec, _offset) \ - stvx _vec,0,r4 @\ - lwz r5, 0(r4) @\ - stw r5, _offset(r3) @\ - lwz r5, 4(r4) @\ - stw r5, _offset+4(r3) @\ - lwz r5, 8(r4) @\ - stw r5, _offset+8(r3) @\ - lwz r5, 12(r4) @\ - stw r5, _offset+12(r3) - - SAVE_VECTOR_UNALIGNED( v0, 424+0x000) - SAVE_VECTOR_UNALIGNED( v1, 424+0x010) - SAVE_VECTOR_UNALIGNED( v2, 424+0x020) - SAVE_VECTOR_UNALIGNED( v3, 424+0x030) - SAVE_VECTOR_UNALIGNED( v4, 424+0x040) - SAVE_VECTOR_UNALIGNED( v5, 424+0x050) - SAVE_VECTOR_UNALIGNED( v6, 424+0x060) - SAVE_VECTOR_UNALIGNED( v7, 424+0x070) - SAVE_VECTOR_UNALIGNED( v8, 424+0x080) - SAVE_VECTOR_UNALIGNED( v9, 424+0x090) - SAVE_VECTOR_UNALIGNED(v10, 424+0x0A0) - SAVE_VECTOR_UNALIGNED(v11, 424+0x0B0) - SAVE_VECTOR_UNALIGNED(v12, 424+0x0C0) - SAVE_VECTOR_UNALIGNED(v13, 424+0x0D0) - SAVE_VECTOR_UNALIGNED(v14, 424+0x0E0) - SAVE_VECTOR_UNALIGNED(v15, 424+0x0F0) - SAVE_VECTOR_UNALIGNED(v16, 424+0x100) - SAVE_VECTOR_UNALIGNED(v17, 424+0x110) - SAVE_VECTOR_UNALIGNED(v18, 424+0x120) - SAVE_VECTOR_UNALIGNED(v19, 424+0x130) - SAVE_VECTOR_UNALIGNED(v20, 424+0x140) - SAVE_VECTOR_UNALIGNED(v21, 424+0x150) - SAVE_VECTOR_UNALIGNED(v22, 424+0x160) - SAVE_VECTOR_UNALIGNED(v23, 424+0x170) - SAVE_VECTOR_UNALIGNED(v24, 424+0x180) - SAVE_VECTOR_UNALIGNED(v25, 424+0x190) - SAVE_VECTOR_UNALIGNED(v26, 424+0x1A0) - SAVE_VECTOR_UNALIGNED(v27, 424+0x1B0) - SAVE_VECTOR_UNALIGNED(v28, 424+0x1C0) - SAVE_VECTOR_UNALIGNED(v29, 424+0x1D0) - SAVE_VECTOR_UNALIGNED(v30, 424+0x1E0) - SAVE_VECTOR_UNALIGNED(v31, 424+0x1F0) - - li r3, 0 ; return UNW_ESUCCESS + stvx _vec, 0, %r4 SEPARATOR \ + lwz %r5, 0(%r4) SEPARATOR \ + stw %r5, _offset(%r3) SEPARATOR \ + lwz %r5, 4(%r4) SEPARATOR \ + stw %r5, _offset+4(%r3) SEPARATOR \ + lwz %r5, 8(%r4) SEPARATOR \ + stw %r5, _offset+8(%r3) SEPARATOR \ + lwz %r5, 12(%r4) SEPARATOR \ + stw %r5, _offset+12(%r3) + + SAVE_VECTOR_UNALIGNED( %v0, 424+0x000) + SAVE_VECTOR_UNALIGNED( %v1, 424+0x010) + SAVE_VECTOR_UNALIGNED( %v2, 424+0x020) + SAVE_VECTOR_UNALIGNED( %v3, 424+0x030) + SAVE_VECTOR_UNALIGNED( %v4, 424+0x040) + SAVE_VECTOR_UNALIGNED( %v5, 424+0x050) + SAVE_VECTOR_UNALIGNED( %v6, 424+0x060) + SAVE_VECTOR_UNALIGNED( %v7, 424+0x070) + SAVE_VECTOR_UNALIGNED( %v8, 424+0x080) + SAVE_VECTOR_UNALIGNED( %v9, 424+0x090) + SAVE_VECTOR_UNALIGNED(%v10, 424+0x0A0) + SAVE_VECTOR_UNALIGNED(%v11, 424+0x0B0) + SAVE_VECTOR_UNALIGNED(%v12, 424+0x0C0) + SAVE_VECTOR_UNALIGNED(%v13, 424+0x0D0) + SAVE_VECTOR_UNALIGNED(%v14, 424+0x0E0) + SAVE_VECTOR_UNALIGNED(%v15, 424+0x0F0) + SAVE_VECTOR_UNALIGNED(%v16, 424+0x100) + SAVE_VECTOR_UNALIGNED(%v17, 424+0x110) + SAVE_VECTOR_UNALIGNED(%v18, 424+0x120) + SAVE_VECTOR_UNALIGNED(%v19, 424+0x130) + SAVE_VECTOR_UNALIGNED(%v20, 424+0x140) + SAVE_VECTOR_UNALIGNED(%v21, 424+0x150) + SAVE_VECTOR_UNALIGNED(%v22, 424+0x160) + SAVE_VECTOR_UNALIGNED(%v23, 424+0x170) + SAVE_VECTOR_UNALIGNED(%v24, 424+0x180) + SAVE_VECTOR_UNALIGNED(%v25, 424+0x190) + SAVE_VECTOR_UNALIGNED(%v26, 424+0x1A0) + SAVE_VECTOR_UNALIGNED(%v27, 424+0x1B0) + SAVE_VECTOR_UNALIGNED(%v28, 424+0x1C0) + SAVE_VECTOR_UNALIGNED(%v29, 424+0x1D0) + SAVE_VECTOR_UNALIGNED(%v30, 424+0x1E0) + SAVE_VECTOR_UNALIGNED(%v31, 424+0x1F0) + + li %r3, 0 // return UNW_ESUCCESS blr diff --git a/libunwind/src/assembly.h b/libunwind/src/assembly.h index 2df930214fae..7806892e9dcf 100644 --- a/libunwind/src/assembly.h +++ b/libunwind/src/assembly.h @@ -29,8 +29,6 @@ #ifdef _ARCH_PWR8 #define PPC64_HAS_VMX #endif -#elif defined(__POWERPC__) || defined(__powerpc__) || defined(__ppc__) -#define SEPARATOR @ #elif defined(__arm64__) #define SEPARATOR %% #else |