This is the mail archive of the libc-hacker@sourceware.cygnus.com mailing list for the glibc project.

Note that libc-hacker is a closed list. You may look at the archives of this list, but subscription and posting are not open.


Index Nav: [Date Index] [Subject Index] [Author Index] [Thread Index]
Message Nav: [Date Prev] [Date Next] [Thread Prev] [Thread Next]

PATCH: Exchange roles of %ebp and %ebx in i386 gmp asm funcs


This is another BP-prep patch.  BP checks occur after function
arguments are loaded into registers, and if a violation occurs, the
frame-pointer needs to be valid for debugging.  These gmp functions
need so many registers that they grab the frame pointer as well.  In
order to keep the frame pointer clean, I exchanged the roles of %ebp
and %ebx.  Formerly %ebp held a function argument and %ebx was a
temporary.  Now, these are reversed so that %ebp is not clobbered
until after the point where we want to check bounds.

The change was made with this perl command:
$ perl -p -i -e 's/\bebp\b/OLDEBP/g;s/\bebx\b/ebp/g;s/\bOLDEBP\b/ebx/g' FILES...
I verified that OLDEBP didn't exist prior to the change
I verified that there were no occurrences of "%b[hlx]" for accesses
to subregs of %ebx.

(I didn't see any runtime tests packaged with libc.  Can anyone suggest
some?)

OK?

2000-06-26  Greg McGary  <greg@mcgary.org>

	* sysdeps/i386/addmul_1.S: Exchange roles of %ebp and %ebx.
	* sysdeps/i386/mul_1.S: Likewise.
	* sysdeps/i386/submul_1.S: Likewise.
	* sysdeps/i386/i586/add_n.S: Likewise.
	* sysdeps/i386/i586/addmul_1.S: Likewise.
	* sysdeps/i386/i586/lshift.S: Likewise.
	* sysdeps/i386/i586/mul_1.S: Likewise.
	* sysdeps/i386/i586/rshift.S: Likewise.
	* sysdeps/i386/i586/sub_n.S: Likewise.
	* sysdeps/i386/i586/submul_1.S: Likewise.

Index: sysdeps/i386/addmul_1.S
===================================================================
RCS file: /cvs/glibc/libc/sysdeps/i386/addmul_1.S,v
retrieving revision 1.8
diff -u -p -r1.8 addmul_1.S
--- addmul_1.S	2000/06/26 16:48:10	1.8
+++ addmul_1.S	2000/06/26 17:33:54
@@ -31,7 +31,7 @@
 #define res_ptr edi
 #define s1_ptr esi
 #define sizeP ecx
-#define s2_limb ebp
+#define s2_limb ebx
 
 	.text
 ENTRY(__mpn_addmul_1)
@@ -39,8 +39,8 @@ ENTRY(__mpn_addmul_1)
 
 	pushl	%edi
 	pushl	%esi
-	pushl	%ebx
 	pushl	%ebp
+	pushl	%ebx
 
 	movl	RES(%esp), %res_ptr
 	movl	S1(%esp), %s1_ptr
@@ -50,23 +50,23 @@ ENTRY(__mpn_addmul_1)
 	leal	(%res_ptr,%sizeP,4), %res_ptr
 	leal	(%s1_ptr,%sizeP,4), %s1_ptr
 	negl	%sizeP
-	xorl	%ebx, %ebx
+	xorl	%ebp, %ebp
 	ALIGN (3)
 L(oop):
 	movl	(%s1_ptr,%sizeP,4), %eax
 	mull	%s2_limb
-	addl	%ebx, %eax
+	addl	%ebp, %eax
 	adcl	$0, %edx
 	addl	%eax, (%res_ptr,%sizeP,4)
 	adcl	$0, %edx
-	movl	%edx, %ebx
+	movl	%edx, %ebp
 
 	incl	%sizeP
 	jnz	L(oop)
-	movl	%ebx, %eax
+	movl	%ebp, %eax
 
-	popl	%ebp
 	popl	%ebx
+	popl	%ebp
 	popl	%esi
 	popl	%edi
 
Index: sysdeps/i386/mul_1.S
===================================================================
RCS file: /cvs/glibc/libc/sysdeps/i386/mul_1.S,v
retrieving revision 1.6
diff -u -p -r1.6 mul_1.S
--- mul_1.S	2000/06/26 16:48:10	1.6
+++ mul_1.S	2000/06/26 17:33:54
@@ -31,7 +31,7 @@
 #define res_ptr edi
 #define s1_ptr esi
 #define size ecx
-#define s2_limb ebp
+#define s2_limb ebx
 
 	.text
 ENTRY(__mpn_mul_1)
@@ -39,8 +39,8 @@ ENTRY(__mpn_mul_1)
 
 	pushl	%edi
 	pushl	%esi
-	pushl	%ebx
 	pushl	%ebp
+	pushl	%ebx
 
 	movl	RES(%esp), %res_ptr
 	movl	S1(%esp), %s1_ptr
@@ -50,22 +50,22 @@ ENTRY(__mpn_mul_1)
 	leal	(%res_ptr,%size,4), %res_ptr
 	leal	(%s1_ptr,%size,4), %s1_ptr
 	negl	%size
-	xorl	%ebx, %ebx
+	xorl	%ebp, %ebp
 	ALIGN (3)
 L(oop):
 	movl	(%s1_ptr,%size,4), %eax
 	mull	%s2_limb
-	addl	%ebx, %eax
+	addl	%ebp, %eax
 	movl	%eax, (%res_ptr,%size,4)
 	adcl	$0, %edx
-	movl	%edx, %ebx
+	movl	%edx, %ebp
 
 	incl	%size
 	jnz	L(oop)
-	movl	%ebx, %eax
+	movl	%ebp, %eax
 
-	popl	%ebp
 	popl	%ebx
+	popl	%ebp
 	popl	%esi
 	popl	%edi
 
Index: sysdeps/i386/submul_1.S
===================================================================
RCS file: /cvs/glibc/libc/sysdeps/i386/submul_1.S,v
retrieving revision 1.8
diff -u -p -r1.8 submul_1.S
--- submul_1.S	2000/06/26 16:48:10	1.8
+++ submul_1.S	2000/06/26 17:33:54
@@ -31,7 +31,7 @@
 #define res_ptr edi
 #define s1_ptr esi
 #define sizeP ecx
-#define s2_limb ebp
+#define s2_limb ebx
 
 	.text
 ENTRY(__mpn_submul_1)
@@ -39,8 +39,8 @@ ENTRY(__mpn_submul_1)
 
 	pushl	%edi
 	pushl	%esi
-	pushl	%ebx
 	pushl	%ebp
+	pushl	%ebx
 
 	movl	RES(%esp), %res_ptr
 	movl	S1(%esp), %s1_ptr
@@ -50,23 +50,23 @@ ENTRY(__mpn_submul_1)
 	leal	(%res_ptr,%sizeP,4), %res_ptr
 	leal	(%s1_ptr,%sizeP,4), %s1_ptr
 	negl	%sizeP
-	xorl	%ebx, %ebx
+	xorl	%ebp, %ebp
 	ALIGN (3)
 L(oop):
 	movl	(%s1_ptr,%sizeP,4), %eax
 	mull	%s2_limb
-	addl	%ebx, %eax
+	addl	%ebp, %eax
 	adcl	$0, %edx
 	subl	%eax, (%res_ptr,%sizeP,4)
 	adcl	$0, %edx
-	movl	%edx, %ebx
+	movl	%edx, %ebp
 
 	incl	%sizeP
 	jnz	L(oop)
-	movl	%ebx, %eax
+	movl	%ebp, %eax
 
-	popl	%ebp
 	popl	%ebx
+	popl	%ebp
 	popl	%esi
 	popl	%edi
 
Index: sysdeps/i386/i586/add_n.S
===================================================================
RCS file: /cvs/glibc/libc/sysdeps/i386/i586/add_n.S,v
retrieving revision 1.7
diff -u -p -r1.7 add_n.S
--- add_n.S	2000/06/09 06:14:39	1.7
+++ add_n.S	2000/06/26 17:33:54
@@ -34,15 +34,15 @@ ENTRY(__mpn_add_n)
 
 	pushl	%edi
 	pushl	%esi
-	pushl	%ebx
 	pushl	%ebp
+	pushl	%ebx
 
 	movl	RES(%esp),%edi
 	movl	S1(%esp),%esi
-	movl	S2(%esp),%ebp
+	movl	S2(%esp),%ebx
 	movl	SIZE(%esp),%ecx
 
-	movl	(%ebp),%ebx
+	movl	(%ebx),%ebp
 
 	decl	%ecx
 	movl	%ecx,%edx
@@ -58,42 +58,42 @@ L(oop):	movl	28(%edi),%eax		/* fetch des
 
 L(1):	movl	(%esi),%eax
 	movl	4(%esi),%edx
-	adcl	%ebx,%eax
-	movl	4(%ebp),%ebx
-	adcl	%ebx,%edx
-	movl	8(%ebp),%ebx
+	adcl	%ebp,%eax
+	movl	4(%ebx),%ebp
+	adcl	%ebp,%edx
+	movl	8(%ebx),%ebp
 	movl	%eax,-32(%edi)
 	movl	%edx,-28(%edi)
 
 L(2):	movl	8(%esi),%eax
 	movl	12(%esi),%edx
-	adcl	%ebx,%eax
-	movl	12(%ebp),%ebx
-	adcl	%ebx,%edx
-	movl	16(%ebp),%ebx
+	adcl	%ebp,%eax
+	movl	12(%ebx),%ebp
+	adcl	%ebp,%edx
+	movl	16(%ebx),%ebp
 	movl	%eax,-24(%edi)
 	movl	%edx,-20(%edi)
 
 L(3):	movl	16(%esi),%eax
 	movl	20(%esi),%edx
-	adcl	%ebx,%eax
-	movl	20(%ebp),%ebx
-	adcl	%ebx,%edx
-	movl	24(%ebp),%ebx
+	adcl	%ebp,%eax
+	movl	20(%ebx),%ebp
+	adcl	%ebp,%edx
+	movl	24(%ebx),%ebp
 	movl	%eax,-16(%edi)
 	movl	%edx,-12(%edi)
 
 L(4):	movl	24(%esi),%eax
 	movl	28(%esi),%edx
-	adcl	%ebx,%eax
-	movl	28(%ebp),%ebx
-	adcl	%ebx,%edx
-	movl	32(%ebp),%ebx
+	adcl	%ebp,%eax
+	movl	28(%ebx),%ebp
+	adcl	%ebp,%edx
+	movl	32(%ebx),%ebp
 	movl	%eax,-8(%edi)
 	movl	%edx,-4(%edi)
 
 	leal	32(%esi),%esi
-	leal	32(%ebp),%ebp
+	leal	32(%ebx),%ebx
 	decl	%ecx
 	jnz	L(oop)
 
@@ -105,23 +105,23 @@ L(end):
 L(oop2):
 	leal	4(%edi),%edi
 	movl	(%esi),%eax
-	adcl	%ebx,%eax
-	movl	4(%ebp),%ebx
+	adcl	%ebp,%eax
+	movl	4(%ebx),%ebp
 	movl	%eax,-4(%edi)
 	leal	4(%esi),%esi
-	leal	4(%ebp),%ebp
+	leal	4(%ebx),%ebx
 	decl	%edx
 	jnz	L(oop2)
 L(end2):
 	movl	(%esi),%eax
-	adcl	%ebx,%eax
+	adcl	%ebp,%eax
 	movl	%eax,(%edi)
 
 	sbbl	%eax,%eax
 	negl	%eax
 
-	popl	%ebp
 	popl	%ebx
+	popl	%ebp
 	popl	%esi
 	popl	%edi
 
Index: sysdeps/i386/i586/addmul_1.S
===================================================================
RCS file: /cvs/glibc/libc/sysdeps/i386/i586/addmul_1.S,v
retrieving revision 1.7
diff -u -p -r1.7 addmul_1.S
--- addmul_1.S	2000/06/26 16:48:10	1.7
+++ addmul_1.S	2000/06/26 17:33:55
@@ -31,7 +31,7 @@
 #define res_ptr edi
 #define s1_ptr esi
 #define size ecx
-#define s2_limb ebp
+#define s2_limb ebx
 
 	.text
 ENTRY(__mpn_addmul_1)
@@ -39,8 +39,8 @@ ENTRY(__mpn_addmul_1)
 
 	pushl	%edi
 	pushl	%esi
-	pushl	%ebx
 	pushl	%ebp
+	pushl	%ebx
 
 	movl	RES(%esp), %res_ptr
 	movl	S1(%esp), %s1_ptr
@@ -50,30 +50,30 @@ ENTRY(__mpn_addmul_1)
 	leal	(%res_ptr,%size,4), %res_ptr
 	leal	(%s1_ptr,%size,4), %s1_ptr
 	negl	%size
-	xorl	%ebx, %ebx
+	xorl	%ebp, %ebp
 	ALIGN (3)
 
-L(oop):	adcl	$0, %ebx
+L(oop):	adcl	$0, %ebp
 	movl	(%s1_ptr,%size,4), %eax
 
 	mull	%s2_limb
 
-	addl	%ebx, %eax
-	movl	(%res_ptr,%size,4), %ebx
+	addl	%ebp, %eax
+	movl	(%res_ptr,%size,4), %ebp
 
 	adcl	$0, %edx
-	addl	%eax, %ebx
+	addl	%eax, %ebp
 
-	movl	%ebx, (%res_ptr,%size,4)
+	movl	%ebp, (%res_ptr,%size,4)
 	incl	%size
 
-	movl	%edx, %ebx
+	movl	%edx, %ebp
 	jnz	L(oop)
 
-	adcl	$0, %ebx
-	movl	%ebx, %eax
-	popl	%ebp
+	adcl	$0, %ebp
+	movl	%ebp, %eax
 	popl	%ebx
+	popl	%ebp
 	popl	%esi
 	popl	%edi
 
Index: sysdeps/i386/i586/lshift.S
===================================================================
RCS file: /cvs/glibc/libc/sysdeps/i386/i586/lshift.S,v
retrieving revision 1.7
diff -u -p -r1.7 lshift.S
--- lshift.S	2000/06/09 06:14:39	1.7
+++ lshift.S	2000/06/26 17:33:55
@@ -33,12 +33,12 @@ ENTRY(__mpn_lshift)
 
 	pushl	%edi
 	pushl	%esi
-	pushl	%ebx
 	pushl	%ebp
+	pushl	%ebx
 
 	movl	RES(%esp),%edi
 	movl	S(%esp),%esi
-	movl	SIZE(%esp),%ebp
+	movl	SIZE(%esp),%ebx
 	movl	CNT(%esp),%ecx
 
 /* We can use faster code for shift-by-1 under certain conditions.  */
@@ -47,13 +47,13 @@ ENTRY(__mpn_lshift)
 	leal	4(%esi),%eax
 	cmpl	%edi,%eax
 	jnc	L(special)		/* jump if s_ptr + 1 >= res_ptr */
-	leal	(%esi,%ebp,4),%eax
+	leal	(%esi,%ebx,4),%eax
 	cmpl	%eax,%edi
 	jnc	L(special)		/* jump if res_ptr >= s_ptr + size */
 
 L(normal):
-	leal	-4(%edi,%ebp,4),%edi
-	leal	-4(%esi,%ebp,4),%esi
+	leal	-4(%edi,%ebx,4),%edi
+	leal	-4(%esi,%ebx,4),%esi
 
 	movl	(%esi),%edx
 	subl	$4,%esi
@@ -61,52 +61,52 @@ L(normal):
 	shldl	%cl,%edx,%eax		/* compute carry limb */
 	pushl	%eax			/* push carry limb onto stack */
 
-	decl	%ebp
-	pushl	%ebp
-	shrl	$3,%ebp
+	decl	%ebx
+	pushl	%ebx
+	shrl	$3,%ebx
 	jz	L(end)
 
 	movl	(%edi),%eax		/* fetch destination cache line */
 
 	ALIGN	(2)
 L(oop):	movl	-28(%edi),%eax		/* fetch destination cache line */
-	movl	%edx,%ebx
+	movl	%edx,%ebp
 
 	movl	(%esi),%eax
 	movl	-4(%esi),%edx
-	shldl	%cl,%eax,%ebx
+	shldl	%cl,%eax,%ebp
 	shldl	%cl,%edx,%eax
-	movl	%ebx,(%edi)
+	movl	%ebp,(%edi)
 	movl	%eax,-4(%edi)
 
-	movl	-8(%esi),%ebx
+	movl	-8(%esi),%ebp
 	movl	-12(%esi),%eax
-	shldl	%cl,%ebx,%edx
-	shldl	%cl,%eax,%ebx
+	shldl	%cl,%ebp,%edx
+	shldl	%cl,%eax,%ebp
 	movl	%edx,-8(%edi)
-	movl	%ebx,-12(%edi)
+	movl	%ebp,-12(%edi)
 
 	movl	-16(%esi),%edx
-	movl	-20(%esi),%ebx
+	movl	-20(%esi),%ebp
 	shldl	%cl,%edx,%eax
-	shldl	%cl,%ebx,%edx
+	shldl	%cl,%ebp,%edx
 	movl	%eax,-16(%edi)
 	movl	%edx,-20(%edi)
 
 	movl	-24(%esi),%eax
 	movl	-28(%esi),%edx
-	shldl	%cl,%eax,%ebx
+	shldl	%cl,%eax,%ebp
 	shldl	%cl,%edx,%eax
-	movl	%ebx,-24(%edi)
+	movl	%ebp,-24(%edi)
 	movl	%eax,-28(%edi)
 
 	subl	$32,%esi
 	subl	$32,%edi
-	decl	%ebp
+	decl	%ebx
 	jnz	L(oop)
 
-L(end):	popl	%ebp
-	andl	$7,%ebp
+L(end):	popl	%ebx
+	andl	$7,%ebx
 	jz	L(end2)
 L(oop2):
 	movl	(%esi),%eax
@@ -115,7 +115,7 @@ L(oop2):
 	movl	%eax,%edx
 	subl	$4,%esi
 	subl	$4,%edi
-	decl	%ebp
+	decl	%ebx
 	jnz	L(oop2)
 
 L(end2):
@@ -124,8 +124,8 @@ L(end2):
 
 	popl	%eax			/* pop carry limb */
 
-	popl	%ebp
 	popl	%ebx
+	popl	%ebp
 	popl	%esi
 	popl	%edi
 
@@ -141,13 +141,13 @@ L(special):
 	movl	(%esi),%edx
 	addl	$4,%esi
 
-	decl	%ebp
-	pushl	%ebp
-	shrl	$3,%ebp
+	decl	%ebx
+	pushl	%ebx
+	shrl	$3,%ebx
 
 	addl	%edx,%edx
-	incl	%ebp
-	decl	%ebp
+	incl	%ebx
+	decl	%ebx
 	jz	L(Lend)
 
 	movl	(%edi),%eax		/* fetch destination cache line */
@@ -155,56 +155,56 @@ L(special):
 	ALIGN	(2)
 L(Loop):
 	movl	28(%edi),%eax		/* fetch destination cache line */
-	movl	%edx,%ebx
+	movl	%edx,%ebp
 
 	movl	(%esi),%eax
 	movl	4(%esi),%edx
 	adcl	%eax,%eax
-	movl	%ebx,(%edi)
+	movl	%ebp,(%edi)
 	adcl	%edx,%edx
 	movl	%eax,4(%edi)
 
-	movl	8(%esi),%ebx
+	movl	8(%esi),%ebp
 	movl	12(%esi),%eax
-	adcl	%ebx,%ebx
+	adcl	%ebp,%ebp
 	movl	%edx,8(%edi)
 	adcl	%eax,%eax
-	movl	%ebx,12(%edi)
+	movl	%ebp,12(%edi)
 
 	movl	16(%esi),%edx
-	movl	20(%esi),%ebx
+	movl	20(%esi),%ebp
 	adcl	%edx,%edx
 	movl	%eax,16(%edi)
-	adcl	%ebx,%ebx
+	adcl	%ebp,%ebp
 	movl	%edx,20(%edi)
 
 	movl	24(%esi),%eax
 	movl	28(%esi),%edx
 	adcl	%eax,%eax
-	movl	%ebx,24(%edi)
+	movl	%ebp,24(%edi)
 	adcl	%edx,%edx
 	movl	%eax,28(%edi)
 
 	leal	32(%esi),%esi		/* use leal not to clobber carry */
 	leal	32(%edi),%edi
-	decl	%ebp
+	decl	%ebx
 	jnz	L(Loop)
 
 L(Lend):
-	popl	%ebp
+	popl	%ebx
 	sbbl	%eax,%eax		/* save carry in %eax */
-	andl	$7,%ebp
+	andl	$7,%ebx
 	jz	L(Lend2)
 	addl	%eax,%eax		/* restore carry from eax */
 L(Loop2):
-	movl	%edx,%ebx
+	movl	%edx,%ebp
 	movl	(%esi),%edx
 	adcl	%edx,%edx
-	movl	%ebx,(%edi)
+	movl	%ebp,(%edi)
 
 	leal	4(%esi),%esi		/* use leal not to clobber carry */
 	leal	4(%edi),%edi
-	decl	%ebp
+	decl	%ebx
 	jnz	L(Loop2)
 
 	jmp	L(L1)
@@ -215,8 +215,8 @@ L(L1):	movl	%edx,(%edi)		/* store last l
 	sbbl	%eax,%eax
 	negl	%eax
 
-	popl	%ebp
 	popl	%ebx
+	popl	%ebp
 	popl	%esi
 	popl	%edi
 
Index: sysdeps/i386/i586/mul_1.S
===================================================================
RCS file: /cvs/glibc/libc/sysdeps/i386/i586/mul_1.S,v
retrieving revision 1.6
diff -u -p -r1.6 mul_1.S
--- mul_1.S	2000/06/26 16:48:10	1.6
+++ mul_1.S	2000/06/26 17:33:55
@@ -31,7 +31,7 @@
 #define res_ptr edi
 #define s1_ptr esi
 #define size ecx
-#define s2_limb ebp
+#define s2_limb ebx
 
 	.text
 ENTRY(__mpn_mul_1)
@@ -39,8 +39,8 @@ ENTRY(__mpn_mul_1)
 
 	pushl	%edi
 	pushl	%esi
-	pushl	%ebx
 	pushl	%ebp
+	pushl	%ebx
 
 	movl	RES(%esp), %res_ptr
 	movl	S1(%esp), %s1_ptr
@@ -50,26 +50,26 @@ ENTRY(__mpn_mul_1)
 	leal	(%res_ptr,%size,4), %res_ptr
 	leal	(%s1_ptr,%size,4), %s1_ptr
 	negl	%size
-	xorl	%ebx, %ebx
+	xorl	%ebp, %ebp
 	ALIGN (3)
 
-L(oop):	adcl	$0, %ebx
+L(oop):	adcl	$0, %ebp
 	movl	(%s1_ptr,%size,4), %eax
 
 	mull	%s2_limb
 
-	addl	%eax, %ebx
+	addl	%eax, %ebp
 
-	movl	%ebx, (%res_ptr,%size,4)
+	movl	%ebp, (%res_ptr,%size,4)
 	incl	%size
 
-	movl	%edx, %ebx
+	movl	%edx, %ebp
 	jnz	L(oop)
 
-	adcl	$0, %ebx
-	movl	%ebx, %eax
-	popl	%ebp
+	adcl	$0, %ebp
+	movl	%ebp, %eax
 	popl	%ebx
+	popl	%ebp
 	popl	%esi
 	popl	%edi
 
Index: sysdeps/i386/i586/rshift.S
===================================================================
RCS file: /cvs/glibc/libc/sysdeps/i386/i586/rshift.S,v
retrieving revision 1.7
diff -u -p -r1.7 rshift.S
--- rshift.S	2000/06/09 06:14:39	1.7
+++ rshift.S	2000/06/26 17:33:55
@@ -33,12 +33,12 @@ ENTRY(__mpn_rshift)
 
 	pushl	%edi
 	pushl	%esi
-	pushl	%ebx
 	pushl	%ebp
+	pushl	%ebx
 
 	movl	RES(%esp),%edi
 	movl	S(%esp),%esi
-	movl	SIZE(%esp),%ebp
+	movl	SIZE(%esp),%ebx
 	movl	CNT(%esp),%ecx
 
 /* We can use faster code for shift-by-1 under certain conditions.  */
@@ -47,7 +47,7 @@ ENTRY(__mpn_rshift)
 	leal	4(%edi),%eax
 	cmpl	%esi,%eax
 	jnc	L(special)		/* jump if res_ptr + 1 >= s_ptr */
-	leal	(%edi,%ebp,4),%eax
+	leal	(%edi,%ebx,4),%eax
 	cmpl	%eax,%esi
 	jnc	L(special)		/* jump if s_ptr >= res_ptr + size */
 
@@ -58,52 +58,52 @@ L(normal):
 	shrdl	%cl,%edx,%eax		/* compute carry limb */
 	pushl	%eax			/* push carry limb onto stack */
 
-	decl	%ebp
-	pushl	%ebp
-	shrl	$3,%ebp
+	decl	%ebx
+	pushl	%ebx
+	shrl	$3,%ebx
 	jz	L(end)
 
 	movl	(%edi),%eax		/* fetch destination cache line */
 
 	ALIGN	(2)
 L(oop):	movl	28(%edi),%eax		/* fetch destination cache line */
-	movl	%edx,%ebx
+	movl	%edx,%ebp
 
 	movl	(%esi),%eax
 	movl	4(%esi),%edx
-	shrdl	%cl,%eax,%ebx
+	shrdl	%cl,%eax,%ebp
 	shrdl	%cl,%edx,%eax
-	movl	%ebx,(%edi)
+	movl	%ebp,(%edi)
 	movl	%eax,4(%edi)
 
-	movl	8(%esi),%ebx
+	movl	8(%esi),%ebp
 	movl	12(%esi),%eax
-	shrdl	%cl,%ebx,%edx
-	shrdl	%cl,%eax,%ebx
+	shrdl	%cl,%ebp,%edx
+	shrdl	%cl,%eax,%ebp
 	movl	%edx,8(%edi)
-	movl	%ebx,12(%edi)
+	movl	%ebp,12(%edi)
 
 	movl	16(%esi),%edx
-	movl	20(%esi),%ebx
+	movl	20(%esi),%ebp
 	shrdl	%cl,%edx,%eax
-	shrdl	%cl,%ebx,%edx
+	shrdl	%cl,%ebp,%edx
 	movl	%eax,16(%edi)
 	movl	%edx,20(%edi)
 
 	movl	24(%esi),%eax
 	movl	28(%esi),%edx
-	shrdl	%cl,%eax,%ebx
+	shrdl	%cl,%eax,%ebp
 	shrdl	%cl,%edx,%eax
-	movl	%ebx,24(%edi)
+	movl	%ebp,24(%edi)
 	movl	%eax,28(%edi)
 
 	addl	$32,%esi
 	addl	$32,%edi
-	decl	%ebp
+	decl	%ebx
 	jnz	L(oop)
 
-L(end):	popl	%ebp
-	andl	$7,%ebp
+L(end):	popl	%ebx
+	andl	$7,%ebx
 	jz	L(end2)
 L(oop2):
 	movl	(%esi),%eax
@@ -112,7 +112,7 @@ L(oop2):
 	movl	%eax,%edx
 	addl	$4,%esi
 	addl	$4,%edi
-	decl	%ebp
+	decl	%ebx
 	jnz	L(oop2)
 
 L(end2):
@@ -121,8 +121,8 @@ L(end2):
 
 	popl	%eax			/* pop carry limb */
 
-	popl	%ebp
 	popl	%ebx
+	popl	%ebp
 	popl	%esi
 	popl	%edi
 
@@ -135,19 +135,19 @@ L(end2):
 */
 
 L(special):
-	leal	-4(%edi,%ebp,4),%edi
-	leal	-4(%esi,%ebp,4),%esi
+	leal	-4(%edi,%ebx,4),%edi
+	leal	-4(%esi,%ebx,4),%esi
 
 	movl	(%esi),%edx
 	subl	$4,%esi
 
-	decl	%ebp
-	pushl	%ebp
-	shrl	$3,%ebp
+	decl	%ebx
+	pushl	%ebx
+	shrl	$3,%ebx
 
 	shrl	$1,%edx
-	incl	%ebp
-	decl	%ebp
+	incl	%ebx
+	decl	%ebx
 	jz	L(Lend)
 
 	movl	(%edi),%eax		/* fetch destination cache line */
@@ -155,56 +155,56 @@ L(special):
 	ALIGN	(2)
 L(Loop):
 	movl	-28(%edi),%eax		/* fetch destination cache line */
-	movl	%edx,%ebx
+	movl	%edx,%ebp
 
 	movl	(%esi),%eax
 	movl	-4(%esi),%edx
 	rcrl	$1,%eax
-	movl	%ebx,(%edi)
+	movl	%ebp,(%edi)
 	rcrl	$1,%edx
 	movl	%eax,-4(%edi)
 
-	movl	-8(%esi),%ebx
+	movl	-8(%esi),%ebp
 	movl	-12(%esi),%eax
-	rcrl	$1,%ebx
+	rcrl	$1,%ebp
 	movl	%edx,-8(%edi)
 	rcrl	$1,%eax
-	movl	%ebx,-12(%edi)
+	movl	%ebp,-12(%edi)
 
 	movl	-16(%esi),%edx
-	movl	-20(%esi),%ebx
+	movl	-20(%esi),%ebp
 	rcrl	$1,%edx
 	movl	%eax,-16(%edi)
-	rcrl	$1,%ebx
+	rcrl	$1,%ebp
 	movl	%edx,-20(%edi)
 
 	movl	-24(%esi),%eax
 	movl	-28(%esi),%edx
 	rcrl	$1,%eax
-	movl	%ebx,-24(%edi)
+	movl	%ebp,-24(%edi)
 	rcrl	$1,%edx
 	movl	%eax,-28(%edi)
 
 	leal	-32(%esi),%esi		/* use leal not to clobber carry */
 	leal	-32(%edi),%edi
-	decl	%ebp
+	decl	%ebx
 	jnz	L(Loop)
 
 L(Lend):
-	popl	%ebp
+	popl	%ebx
 	sbbl	%eax,%eax		/* save carry in %eax */
-	andl	$7,%ebp
+	andl	$7,%ebx
 	jz	L(Lend2)
 	addl	%eax,%eax		/* restore carry from eax */
 L(Loop2):
-	movl	%edx,%ebx
+	movl	%edx,%ebp
 	movl	(%esi),%edx
 	rcrl	$1,%edx
-	movl	%ebx,(%edi)
+	movl	%ebp,(%edi)
 
 	leal	-4(%esi),%esi		/* use leal not to clobber carry */
 	leal	-4(%edi),%edi
-	decl	%ebp
+	decl	%ebx
 	jnz	L(Loop2)
 
 	jmp	L(L1)
@@ -215,8 +215,8 @@ L(L1):	movl	%edx,(%edi)		/* store last l
 	movl	$0,%eax
 	rcrl	$1,%eax
 
-	popl	%ebp
 	popl	%ebx
+	popl	%ebp
 	popl	%esi
 	popl	%edi
 
Index: sysdeps/i386/i586/sub_n.S
===================================================================
RCS file: /cvs/glibc/libc/sysdeps/i386/i586/sub_n.S,v
retrieving revision 1.7
diff -u -p -r1.7 sub_n.S
--- sub_n.S	2000/06/09 06:14:39	1.7
+++ sub_n.S	2000/06/26 17:33:55
@@ -34,15 +34,15 @@ ENTRY(__mpn_sub_n)
 
 	pushl	%edi
 	pushl	%esi
-	pushl	%ebx
 	pushl	%ebp
+	pushl	%ebx
 
 	movl	RES(%esp),%edi
 	movl	S1(%esp),%esi
-	movl	S2(%esp),%ebp
+	movl	S2(%esp),%ebx
 	movl	SIZE(%esp),%ecx
 
-	movl	(%ebp),%ebx
+	movl	(%ebx),%ebp
 
 	decl	%ecx
 	movl	%ecx,%edx
@@ -58,42 +58,42 @@ L(oop):	movl	28(%edi),%eax		/* fetch des
 
 L(1):	movl	(%esi),%eax
 	movl	4(%esi),%edx
-	sbbl	%ebx,%eax
-	movl	4(%ebp),%ebx
-	sbbl	%ebx,%edx
-	movl	8(%ebp),%ebx
+	sbbl	%ebp,%eax
+	movl	4(%ebx),%ebp
+	sbbl	%ebp,%edx
+	movl	8(%ebx),%ebp
 	movl	%eax,-32(%edi)
 	movl	%edx,-28(%edi)
 
 L(2):	movl	8(%esi),%eax
 	movl	12(%esi),%edx
-	sbbl	%ebx,%eax
-	movl	12(%ebp),%ebx
-	sbbl	%ebx,%edx
-	movl	16(%ebp),%ebx
+	sbbl	%ebp,%eax
+	movl	12(%ebx),%ebp
+	sbbl	%ebp,%edx
+	movl	16(%ebx),%ebp
 	movl	%eax,-24(%edi)
 	movl	%edx,-20(%edi)
 
 L(3):	movl	16(%esi),%eax
 	movl	20(%esi),%edx
-	sbbl	%ebx,%eax
-	movl	20(%ebp),%ebx
-	sbbl	%ebx,%edx
-	movl	24(%ebp),%ebx
+	sbbl	%ebp,%eax
+	movl	20(%ebx),%ebp
+	sbbl	%ebp,%edx
+	movl	24(%ebx),%ebp
 	movl	%eax,-16(%edi)
 	movl	%edx,-12(%edi)
 
 L(4):	movl	24(%esi),%eax
 	movl	28(%esi),%edx
-	sbbl	%ebx,%eax
-	movl	28(%ebp),%ebx
-	sbbl	%ebx,%edx
-	movl	32(%ebp),%ebx
+	sbbl	%ebp,%eax
+	movl	28(%ebx),%ebp
+	sbbl	%ebp,%edx
+	movl	32(%ebx),%ebp
 	movl	%eax,-8(%edi)
 	movl	%edx,-4(%edi)
 
 	leal	32(%esi),%esi
-	leal	32(%ebp),%ebp
+	leal	32(%ebx),%ebx
 	decl	%ecx
 	jnz	L(oop)
 
@@ -105,23 +105,23 @@ L(end):
 L(oop2):
 	leal	4(%edi),%edi
 	movl	(%esi),%eax
-	sbbl	%ebx,%eax
-	movl	4(%ebp),%ebx
+	sbbl	%ebp,%eax
+	movl	4(%ebx),%ebp
 	movl	%eax,-4(%edi)
 	leal	4(%esi),%esi
-	leal	4(%ebp),%ebp
+	leal	4(%ebx),%ebx
 	decl	%edx
 	jnz	L(oop2)
 L(end2):
 	movl	(%esi),%eax
-	sbbl	%ebx,%eax
+	sbbl	%ebp,%eax
 	movl	%eax,(%edi)
 
 	sbbl	%eax,%eax
 	negl	%eax
 
-	popl	%ebp
 	popl	%ebx
+	popl	%ebp
 	popl	%esi
 	popl	%edi
 
Index: sysdeps/i386/i586/submul_1.S
===================================================================
RCS file: /cvs/glibc/libc/sysdeps/i386/i586/submul_1.S,v
retrieving revision 1.6
diff -u -p -r1.6 submul_1.S
--- submul_1.S	2000/06/26 16:48:10	1.6
+++ submul_1.S	2000/06/26 17:33:55
@@ -31,7 +31,7 @@
 #define res_ptr edi
 #define s1_ptr esi
 #define size ecx
-#define s2_limb ebp
+#define s2_limb ebx
 
 	.text
 ENTRY(__mpn_submul_1)
@@ -39,8 +39,8 @@ ENTRY(__mpn_submul_1)
 
 	pushl	%edi
 	pushl	%esi
-	pushl	%ebx
 	pushl	%ebp
+	pushl	%ebx
 
 	movl	RES(%esp), %res_ptr
 	movl	S1(%esp), %s1_ptr
@@ -50,30 +50,30 @@ ENTRY(__mpn_submul_1)
 	leal	(%res_ptr,%size,4), %res_ptr
 	leal	(%s1_ptr,%size,4), %s1_ptr
 	negl	%size
-	xorl	%ebx, %ebx
+	xorl	%ebp, %ebp
 	ALIGN (3)
 
-L(oop):	adcl	$0, %ebx
+L(oop):	adcl	$0, %ebp
 	movl	(%s1_ptr,%size,4), %eax
 
 	mull	%s2_limb
 
-	addl	%ebx, %eax
-	movl	(%res_ptr,%size,4), %ebx
+	addl	%ebp, %eax
+	movl	(%res_ptr,%size,4), %ebp
 
 	adcl	$0, %edx
-	subl	%eax, %ebx
+	subl	%eax, %ebp
 
-	movl	%ebx, (%res_ptr,%size,4)
+	movl	%ebp, (%res_ptr,%size,4)
 	incl	%size
 
-	movl	%edx, %ebx
+	movl	%edx, %ebp
 	jnz	L(oop)
 
-	adcl	$0, %ebx
-	movl	%ebx, %eax
-	popl	%ebp
+	adcl	$0, %ebp
+	movl	%ebp, %eax
 	popl	%ebx
+	popl	%ebp
 	popl	%esi
 	popl	%edi
 

Index Nav: [Date Index] [Subject Index] [Author Index] [Thread Index]
Message Nav: [Date Prev] [Date Next] [Thread Prev] [Thread Next]