This is the mail archive of the
libc-alpha@sources.redhat.com
mailing list for the glibc project.
[PATCH] PPC clean up symbols in setjmp/longjmp
- From: Steve Munroe <sjmunroe at us dot ibm dot com>
- To: libc-alpha at sources dot redhat dot com
- Date: Wed, 15 Dec 2004 14:33:18 -0600
- Subject: [PATCH] PPC clean up symbols in setjmp/longjmp
- Organization: IBM LTC
- Reply-to: sjmunroe at vnet dot ibm dot com
The setjmp/__longjmp implementation where polluting the namespace by
exporting not marking local branch targets as local.
2004-12-15 Steven Munroe <sjmunroe@us.ibm.com>
* sysdeps/powerpc/powerpc32/fpu/__longjmp-common.S: Make no_vmx symbol
local.
* sysdeps/powerpc/powerpc32/fpu/setjmp-common.S: Make no_vmx symbol
local.
* sysdeps/powerpc/powerpc64/__longjmp-common.S: Make no_vmx symbol
local.
* sysdeps/powerpc/powerpc64/setjmp-common.S: Make no_vmx and
aligned_save_vmx symbol local.
diff -urN libc23-cvstip-20041215/sysdeps/powerpc/powerpc32/fpu/__longjmp-common.S libc23/sysdeps/powerpc/powerpc32/fpu/__longjmp-common.S
--- libc23-cvstip-20041215/sysdeps/powerpc/powerpc32/fpu/__longjmp-common.S 2004-05-28 16:19:14.000000000 -0500
+++ libc23/sysdeps/powerpc/powerpc32/fpu/__longjmp-common.S 2004-12-15 14:05:46.171367480 -0600
@@ -50,7 +50,7 @@
lwz r5,_dl_hwcap@l(r5)
# endif
andis. r5,r5,(PPC_FEATURE_HAS_ALTIVEC >> 16)
- beq no_vmx
+ beq L(no_vmx)
la r5,((JB_VRS)*4)(3)
andi. r6,r5,0xf
lwz r0,((JB_VRSAVE)*4)(3)
@@ -78,7 +78,7 @@
load_misaligned_vmx_lo_loaded(v30,v31,v0,r6,r5)
lvx v1,0,r5
vperm v31,v31,v1,v0
- b no_vmx
+ b L(no_vmx)
aligned_restore_vmx:
addi r6,r5,16
lvx v20,0,r5
@@ -103,7 +103,7 @@
addi r6,r6,32
lvx v30,0,r5
lvx v31,0,r6
-no_vmx:
+L(no_vmx):
#endif
lwz r1,(JB_GPR1*4)(r3)
lwz r0,(JB_LR*4)(r3)
diff -urN libc23-cvstip-20041215/sysdeps/powerpc/powerpc32/fpu/setjmp-common.S libc23/sysdeps/powerpc/powerpc32/fpu/setjmp-common.S
--- libc23-cvstip-20041215/sysdeps/powerpc/powerpc32/fpu/setjmp-common.S 2004-05-28 16:19:15.000000000 -0500
+++ libc23/sysdeps/powerpc/powerpc32/fpu/setjmp-common.S 2004-12-15 14:05:46.172367328 -0600
@@ -92,13 +92,13 @@
lwz r5,_dl_hwcap@l(r5)
#endif
andis. r5,r5,(PPC_FEATURE_HAS_ALTIVEC >> 16)
- beq no_vmx
+ beq L(no_vmx)
la r5,((JB_VRS)*4)(3)
andi. r6,r5,0xf
mfspr r0,VRSAVE
stw r0,((JB_VRSAVE)*4)(3)
addi r6,r5,16
- beq+ aligned_save_vmx
+ beq+ L(aligned_save_vmx)
lvsr v0,0,r5
vspltisb v1,-1 /* set v1 to all 1's */
vspltisb v2,0 /* set v2 to all 0's */
@@ -137,9 +137,9 @@
stvx v5,0,r6
vsel v4,v31,v4,v3
stvx v4,0,r5
- b no_vmx
+ b L(no_vmx)
-aligned_save_vmx:
+L(aligned_save_vmx):
stvx 20,0,r5
addi r5,r5,32
stvx 21,0,r6
@@ -162,7 +162,7 @@
addi r6,r6,32
stvx 30,0,r5
stvx 31,0,r6
-no_vmx:
+L(no_vmx):
#endif
b JUMPTARGET (BP_SYM (__sigjmp_save))
END (BP_SYM (__sigsetjmp))
diff -urN libc23-cvstip-20041215/sysdeps/powerpc/powerpc64/__longjmp-common.S libc23/sysdeps/powerpc/powerpc64/__longjmp-common.S
--- libc23-cvstip-20041215/sysdeps/powerpc/powerpc64/__longjmp-common.S 2004-10-06 17:08:54.000000000 -0500
+++ libc23/sysdeps/powerpc/powerpc64/__longjmp-common.S 2004-12-15 14:05:46.173367176 -0600
@@ -53,7 +53,7 @@
ld r5,0(r5) /* Load extern _dl_hwcap. */
# endif
andis. r5,r5,(PPC_FEATURE_HAS_ALTIVEC >> 16)
- beq no_vmx
+ beq L(no_vmx)
la r5,((JB_VRS)*8)(3)
andi. r6,r5,0xf
lwz r0,((JB_VRSAVE)*8)(3)
@@ -81,7 +81,7 @@
load_misaligned_vmx_lo_loaded(v30,v31,v0,r6,r5)
lvx v1,0,r5
vperm v31,v31,v1,v0
- b no_vmx
+ b L(no_vmx)
aligned_restore_vmx:
addi r6,r5,16
lvx v20,0,r5
@@ -106,7 +106,7 @@
addi r6,r6,32
lvx v30,0,r5
lvx v31,0,r6
-no_vmx:
+L(no_vmx):
#endif
ld r1,(JB_GPR1*8)(r3)
ld r2,(JB_GPR2*8)(r3)
diff -urN libc23-cvstip-20041215/sysdeps/powerpc/powerpc64/setjmp-common.S libc23/sysdeps/powerpc/powerpc64/setjmp-common.S
--- libc23-cvstip-20041215/sysdeps/powerpc/powerpc64/setjmp-common.S 2004-10-06 17:07:03.000000000 -0500
+++ libc23/sysdeps/powerpc/powerpc64/setjmp-common.S 2004-12-15 14:05:46.173367176 -0600
@@ -102,13 +102,13 @@
ld r5,0(r5) /* Load extern _dl_hwcap. */
# endif
andis. r5,r5,(PPC_FEATURE_HAS_ALTIVEC >> 16)
- beq no_vmx
+ beq L(no_vmx)
la r5,((JB_VRS)*8)(3)
andi. r6,r5,0xf
mfspr r0,VRSAVE
stw r0,((JB_VRSAVE)*8)(3)
addi r6,r5,16
- beq+ aligned_save_vmx
+ beq+ L(aligned_save_vmx)
lvsr v0,0,r5
vspltisb v1,-1 /* set v1 to all 1's */
vspltisb v2,0 /* set v2 to all 0's */
@@ -150,9 +150,9 @@
stvx v5,0,r6
vsel v4,v31,v4,v3
stvx v4,0,r5
- b no_vmx
+ b L(no_vmx)
-aligned_save_vmx:
+L(aligned_save_vmx):
stvx 20,0,r5
addi r5,r5,32
stvx 21,0,r6
@@ -175,7 +175,7 @@
addi r6,r6,32
stvx 30,0,r5
stvx 31,0,r6
-no_vmx:
+L(no_vmx):
#endif
b JUMPTARGET (BP_SYM (__sigjmp_save))
END (BP_SYM (__sigsetjmp))