summary refs log tree commit diff stats
path: root/libotr/libgcrypt-1.8.7/mpi/hppa
diff options
context:
space:
mode:
Diffstat (limited to 'libotr/libgcrypt-1.8.7/mpi/hppa')
-rw-r--r--libotr/libgcrypt-1.8.7/mpi/hppa/README84
-rw-r--r--libotr/libgcrypt-1.8.7/mpi/hppa/distfiles7
-rw-r--r--libotr/libgcrypt-1.8.7/mpi/hppa/mpih-add1.S70
-rw-r--r--libotr/libgcrypt-1.8.7/mpi/hppa/mpih-lshift.S77
-rw-r--r--libotr/libgcrypt-1.8.7/mpi/hppa/mpih-rshift.S73
-rw-r--r--libotr/libgcrypt-1.8.7/mpi/hppa/mpih-sub1.S78
-rw-r--r--libotr/libgcrypt-1.8.7/mpi/hppa/udiv-qrnnd.S297
7 files changed, 686 insertions, 0 deletions
diff --git a/libotr/libgcrypt-1.8.7/mpi/hppa/README b/libotr/libgcrypt-1.8.7/mpi/hppa/README
new file mode 100644
index 0000000..5a2d5fd
--- /dev/null
+++ b/libotr/libgcrypt-1.8.7/mpi/hppa/README
@@ -0,0 +1,84 @@
+This directory contains mpn functions for various HP PA-RISC chips.  Code
+that runs faster on the PA7100 and later implementations, is in the pa7100
+directory.
+
+RELEVANT OPTIMIZATION ISSUES
+
+  Load and Store timing
+
+On the PA7000 no memory instructions can issue the two cycles after a store.
+For the PA7100, this is reduced to one cycle.
+
+The PA7100 has a lookup-free cache, so it helps to schedule loads and the
+dependent instruction really far from each other.
+
+STATUS
+
+1. mpn_mul_1 could be improved to 6.5 cycles/limb on the PA7100, using the
+   instructions bwlow (but some sw pipelining is needed to avoid the
+   xmpyu-fstds delay):
+
+	fldds	s1_ptr
+
+	xmpyu
+	fstds	N(%r30)
+	xmpyu
+	fstds	N(%r30)
+
+	ldws	N(%r30)
+	ldws	N(%r30)
+	ldws	N(%r30)
+	ldws	N(%r30)
+
+	addc
+	stws	res_ptr
+	addc
+	stws	res_ptr
+
+	addib	Loop
+
+2. mpn_addmul_1 could be improved from the current 10 to 7.5 cycles/limb
+   (asymptotically) on the PA7100, using the instructions below.  With proper
+   sw pipelining and the unrolling level below, the speed becomes 8
+   cycles/limb.
+
+	fldds	s1_ptr
+	fldds	s1_ptr
+
+	xmpyu
+	fstds	N(%r30)
+	xmpyu
+	fstds	N(%r30)
+	xmpyu
+	fstds	N(%r30)
+	xmpyu
+	fstds	N(%r30)
+
+	ldws	N(%r30)
+	ldws	N(%r30)
+	ldws	N(%r30)
+	ldws	N(%r30)
+	ldws	N(%r30)
+	ldws	N(%r30)
+	ldws	N(%r30)
+	ldws	N(%r30)
+	addc
+	addc
+	addc
+	addc
+	addc	%r0,%r0,cy-limb
+
+	ldws	res_ptr
+	ldws	res_ptr
+	ldws	res_ptr
+	ldws	res_ptr
+	add
+	stws	res_ptr
+	addc
+	stws	res_ptr
+	addc
+	stws	res_ptr
+	addc
+	stws	res_ptr
+
+	addib
diff --git a/libotr/libgcrypt-1.8.7/mpi/hppa/distfiles b/libotr/libgcrypt-1.8.7/mpi/hppa/distfiles
new file mode 100644
index 0000000..7f24205
--- /dev/null
+++ b/libotr/libgcrypt-1.8.7/mpi/hppa/distfiles
@@ -0,0 +1,7 @@
+README
+udiv-qrnnd.S
+mpih-add1.S
+mpih-sub1.S
+mpih-lshift.S
+mpih-rshift.S
+
diff --git a/libotr/libgcrypt-1.8.7/mpi/hppa/mpih-add1.S b/libotr/libgcrypt-1.8.7/mpi/hppa/mpih-add1.S
new file mode 100644
index 0000000..3bc0e5e
--- /dev/null
+++ b/libotr/libgcrypt-1.8.7/mpi/hppa/mpih-add1.S
@@ -0,0 +1,70 @@
+/* hppa add_n -- Add two limb vectors of the same length > 0 and store
+ *		 sum in a third limb vector.
+ *
+ *      Copyright (C) 1992, 1994, 1998,
+ *                    2001, 2002 Fee Software Foundation, Inc.
+ *
+ * This file is part of Libgcrypt.
+ *
+ * Libgcrypt is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License as
+ * published by the Free Software Foundation; either version 2.1 of
+ * the License, or (at your option) any later version.
+ *
+ * Libgcrypt is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA
+ *
+ * Note: This code is heavily based on the GNU MP Library.
+ *	 Actually it's the same code with only minor changes in the
+ *	 way the data is stored; this is to support the abstraction
+ *	 of an optional secure memory allocation which may be used
+ *	 to avoid revealing of sensitive data due to paging etc.
+ */
+
+
+
+/*******************
+ *  mpi_limb_t
+ *  _gcry_mpih_add_n( mpi_ptr_t res_ptr,	(gr26)
+ *		   mpi_ptr_t s1_ptr,	(gr25)
+ *		   mpi_ptr_t s2_ptr,	(gr24)
+ *		   mpi_size_t size)	(gr23)
+ *
+ * One might want to unroll this as for other processors, but it turns
+ * out that the data cache contention after a store makes such
+ * unrolling useless.  We can't come under 5 cycles/limb anyway.
+ */
+
+	.code
+	.export 	_gcry_mpih_add_n
+	.label		_gcry_mpih_add_n
+	.proc
+	.callinfo	frame=0,no_calls
+	.entry
+
+	ldws,ma 	4(0,%r25),%r20
+	ldws,ma 	4(0,%r24),%r19
+
+	addib,= 	-1,%r23,L$end	; check for (SIZE == 1)
+	 add		%r20,%r19,%r28	; add first limbs ignoring cy
+
+	.label L$loop
+	ldws,ma 	4(0,%r25),%r20
+	ldws,ma 	4(0,%r24),%r19
+	stws,ma 	%r28,4(0,%r26)
+	addib,<>	-1,%r23,L$loop
+	 addc		%r20,%r19,%r28
+
+	.label L$end
+	stws		%r28,0(0,%r26)
+	bv		0(%r2)
+	 addc		%r0,%r0,%r28
+
+	.exit
+	.procend
diff --git a/libotr/libgcrypt-1.8.7/mpi/hppa/mpih-lshift.S b/libotr/libgcrypt-1.8.7/mpi/hppa/mpih-lshift.S
new file mode 100644
index 0000000..91b29bb
--- /dev/null
+++ b/libotr/libgcrypt-1.8.7/mpi/hppa/mpih-lshift.S
@@ -0,0 +1,77 @@
+/* hppa   lshift
+ *
+ *      Copyright (C) 1992, 1994, 1998
+ *                    2001, 2002 Free Software Foundation, Inc.
+ *
+ * This file is part of Libgcrypt.
+ *
+ * Libgcrypt is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License as
+ * published by the Free Software Foundation; either version 2.1 of
+ * the License, or (at your option) any later version.
+ *
+ * Libgcrypt is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA
+ */
+
+
+
+/*******************
+ * mpi_limb_t
+ * _gcry_mpih_lshift( mpi_ptr_t wp,	(gr26)
+ *		   mpi_ptr_t up,	(gr25)
+ *		   mpi_size_t usize,	(gr24)
+ *		   unsigned cnt)	(gr23)
+ */
+
+	.code
+	.export 	_gcry_mpih_lshift
+	.label		_gcry_mpih_lshift
+	.proc
+	.callinfo	frame=64,no_calls
+	.entry
+
+	sh2add		%r24,%r25,%r25
+	sh2add		%r24,%r26,%r26
+	ldws,mb 	-4(0,%r25),%r22
+	subi		32,%r23,%r1
+	mtsar		%r1
+	addib,= 	-1,%r24,L$0004
+	vshd		%r0,%r22,%r28		; compute carry out limb
+	ldws,mb 	-4(0,%r25),%r29
+	addib,= 	-1,%r24,L$0002
+	vshd		%r22,%r29,%r20
+
+	.label	L$loop
+	ldws,mb 	-4(0,%r25),%r22
+	stws,mb 	%r20,-4(0,%r26)
+	addib,= 	-1,%r24,L$0003
+	vshd		%r29,%r22,%r20
+	ldws,mb 	-4(0,%r25),%r29
+	stws,mb 	%r20,-4(0,%r26)
+	addib,<>	-1,%r24,L$loop
+	vshd		%r22,%r29,%r20
+
+	.label	L$0002
+	stws,mb 	%r20,-4(0,%r26)
+	vshd		%r29,%r0,%r20
+	bv		0(%r2)
+	stw		%r20,-4(0,%r26)
+	.label	L$0003
+	stws,mb 	%r20,-4(0,%r26)
+	.label	L$0004
+	vshd		%r22,%r0,%r20
+	bv		0(%r2)
+	stw		%r20,-4(0,%r26)
+
+	.exit
+	.procend
+
+
+
diff --git a/libotr/libgcrypt-1.8.7/mpi/hppa/mpih-rshift.S b/libotr/libgcrypt-1.8.7/mpi/hppa/mpih-rshift.S
new file mode 100644
index 0000000..37a9d4e
--- /dev/null
+++ b/libotr/libgcrypt-1.8.7/mpi/hppa/mpih-rshift.S
@@ -0,0 +1,73 @@
+/* hppa   rshift
+ *
+ *      Copyright (C) 1992, 1994, 1998,
+ *                    2001, 2002 Free Software Foundation, Inc.
+ *
+ * This file is part of Libgcrypt.
+ *
+ * Libgcrypt is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License as
+ * published by the Free Software Foundation; either version 2.1 of
+ * the License, or (at your option) any later version.
+ *
+ * Libgcrypt is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA
+ */
+
+
+
+
+/*******************
+ * mpi_limb_t
+ * _gcry_mpih_rshift( mpi_ptr_t wp,       (gr26)
+ *		   mpi_ptr_t up,       (gr25)
+ *		   mpi_size_t usize,   (gr24)
+ *		   unsigned cnt)       (gr23)
+ */
+
+	.code
+	.export 	_gcry_mpih_rshift
+	.label		_gcry_mpih_rshift
+	.proc
+	.callinfo	frame=64,no_calls
+	.entry
+
+	ldws,ma 	4(0,%r25),%r22
+	mtsar		%r23
+	addib,= 	-1,%r24,L$r004
+	vshd		%r22,%r0,%r28		; compute carry out limb
+	ldws,ma 	4(0,%r25),%r29
+	addib,= 	-1,%r24,L$r002
+	vshd		%r29,%r22,%r20
+
+	.label	L$roop
+	ldws,ma 	4(0,%r25),%r22
+	stws,ma 	%r20,4(0,%r26)
+	addib,= 	-1,%r24,L$r003
+	vshd		%r22,%r29,%r20
+	ldws,ma 	4(0,%r25),%r29
+	stws,ma 	%r20,4(0,%r26)
+	addib,<>	-1,%r24,L$roop
+	vshd		%r29,%r22,%r20
+
+	.label	L$r002
+	stws,ma 	%r20,4(0,%r26)
+	vshd		%r0,%r29,%r20
+	bv		0(%r2)
+	stw		%r20,0(0,%r26)
+	.label	L$r003
+	stws,ma 	%r20,4(0,%r26)
+	.label	L$r004
+	vshd		%r0,%r22,%r20
+	bv		0(%r2)
+	stw		%r20,0(0,%r26)
+
+	.exit
+	.procend
+
diff --git a/libotr/libgcrypt-1.8.7/mpi/hppa/mpih-sub1.S b/libotr/libgcrypt-1.8.7/mpi/hppa/mpih-sub1.S
new file mode 100644
index 0000000..8d197e4
--- /dev/null
+++ b/libotr/libgcrypt-1.8.7/mpi/hppa/mpih-sub1.S
@@ -0,0 +1,78 @@
+/* hppa   sub_n -- Sub two limb vectors of the same length > 0 and store
+ *		   sum in a third limb vector.
+ *
+ *      Copyright (C) 1992, 1994, 1998, 
+ *                    2001, 2002 Free Software Foundation, Inc.
+ *
+ * This file is part of Libgcrypt.
+ *
+ * Libgcrypt is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License as
+ * published by the Free Software Foundation; either version 2.1 of
+ * the License, or (at your option) any later version.
+ *
+ * Libgcrypt is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA
+ *
+ * Note: This code is heavily based on the GNU MP Library.
+ *	 Actually it's the same code with only minor changes in the
+ *	 way the data is stored; this is to support the abstraction
+ *	 of an optional secure memory allocation which may be used
+ *	 to avoid revealing of sensitive data due to paging etc.
+ */
+
+
+#include "sysdep.h"
+#include "asm-syntax.h"
+
+
+/*******************
+ *  mpi_limb_t
+ *  _gcry_mpih_sub_n( mpi_ptr_t res_ptr,	(gr26)
+ *		   mpi_ptr_t s1_ptr,	(gr25)
+ *		   mpi_ptr_t s2_ptr,	(gr24)
+ *		   mpi_size_t size)	(gr23)
+ *
+ * One might want to unroll this as for other processors, but it turns
+ * out that the data cache contention after a store makes such
+ * unrolling useless.  We can't come under 5 cycles/limb anyway.
+ */
+
+
+	.code
+	.export 	_gcry_mpih_sub_n
+	.label		_gcry_mpih_sub_n
+	.proc
+	.callinfo	frame=0,no_calls
+	.entry
+
+	ldws,ma 	4(0,%r25),%r20
+	ldws,ma 	4(0,%r24),%r19
+
+	addib,= 	-1,%r23,L$end	; check for (SIZE == 1)
+	 sub		%r20,%r19,%r28	; subtract first limbs ignoring cy
+
+	.label	L$loop
+	ldws,ma 	4(0,%r25),%r20
+	ldws,ma 	4(0,%r24),%r19
+	stws,ma 	%r28,4(0,%r26)
+	addib,<>	-1,%r23,L$loop
+	 subb		%r20,%r19,%r28
+
+	.label	L$end
+	stws		%r28,0(0,%r26)
+	addc		%r0,%r0,%r28
+	bv		0(%r2)
+	 subi		1,%r28,%r28
+
+	.exit
+	.procend
+
+
+
diff --git a/libotr/libgcrypt-1.8.7/mpi/hppa/udiv-qrnnd.S b/libotr/libgcrypt-1.8.7/mpi/hppa/udiv-qrnnd.S
new file mode 100644
index 0000000..59ebf7a
--- /dev/null
+++ b/libotr/libgcrypt-1.8.7/mpi/hppa/udiv-qrnnd.S
@@ -0,0 +1,297 @@
+/* HP-PA  __udiv_qrnnd division support, used from longlong.h.
+ *	  This version runs fast on pre-PA7000 CPUs.
+ *
+ *      Copyright (C) 1993, 1994, 1998, 2001,
+ *                     2002 Free Software Foundation, Inc.
+ *
+ * This file is part of Libgcrypt.
+ *
+ * Libgcrypt is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License as
+ * published by the Free Software Foundation; either version 2.1 of
+ * the License, or (at your option) any later version.
+ *
+ * Libgcrypt is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA
+ *
+ * Note: This code is heavily based on the GNU MP Library.
+ *	 Actually it's the same code with only minor changes in the
+ *	 way the data is stored; this is to support the abstraction
+ *	 of an optional secure memory allocation which may be used
+ *	 to avoid revealing of sensitive data due to paging etc.
+ */
+
+
+
+/* INPUT PARAMETERS
+ *   rem_ptr	   gr26
+ *   n1 	   gr25
+ *   n0 	   gr24
+ *   d		   gr23
+ *
+ *   The code size is a bit excessive.	We could merge the last two ds;addc
+ *   sequences by simply moving the "bb,< Odd" instruction down.  The only
+ *   trouble is the FFFFFFFF code that would need some hacking.
+ */
+
+	.code
+	.export 	__udiv_qrnnd
+	.label		__udiv_qrnnd
+	.proc
+	.callinfo	frame=0,no_calls
+	.entry
+
+	comb,<		%r23,0,L$largedivisor
+	 sub		%r0,%r23,%r1		; clear cy as side-effect
+	ds		%r0,%r1,%r0
+	addc		%r24,%r24,%r24
+	ds		%r25,%r23,%r25
+	addc		%r24,%r24,%r24
+	ds		%r25,%r23,%r25
+	addc		%r24,%r24,%r24
+	ds		%r25,%r23,%r25
+	addc		%r24,%r24,%r24
+	ds		%r25,%r23,%r25
+	addc		%r24,%r24,%r24
+	ds		%r25,%r23,%r25
+	addc		%r24,%r24,%r24
+	ds		%r25,%r23,%r25
+	addc		%r24,%r24,%r24
+	ds		%r25,%r23,%r25
+	addc		%r24,%r24,%r24
+	ds		%r25,%r23,%r25
+	addc		%r24,%r24,%r24
+	ds		%r25,%r23,%r25
+	addc		%r24,%r24,%r24
+	ds		%r25,%r23,%r25
+	addc		%r24,%r24,%r24
+	ds		%r25,%r23,%r25
+	addc		%r24,%r24,%r24
+	ds		%r25,%r23,%r25
+	addc		%r24,%r24,%r24
+	ds		%r25,%r23,%r25
+	addc		%r24,%r24,%r24
+	ds		%r25,%r23,%r25
+	addc		%r24,%r24,%r24
+	ds		%r25,%r23,%r25
+	addc		%r24,%r24,%r24
+	ds		%r25,%r23,%r25
+	addc		%r24,%r24,%r24
+	ds		%r25,%r23,%r25
+	addc		%r24,%r24,%r24
+	ds		%r25,%r23,%r25
+	addc		%r24,%r24,%r24
+	ds		%r25,%r23,%r25
+	addc		%r24,%r24,%r24
+	ds		%r25,%r23,%r25
+	addc		%r24,%r24,%r24
+	ds		%r25,%r23,%r25
+	addc		%r24,%r24,%r24
+	ds		%r25,%r23,%r25
+	addc		%r24,%r24,%r24
+	ds		%r25,%r23,%r25
+	addc		%r24,%r24,%r24
+	ds		%r25,%r23,%r25
+	addc		%r24,%r24,%r24
+	ds		%r25,%r23,%r25
+	addc		%r24,%r24,%r24
+	ds		%r25,%r23,%r25
+	addc		%r24,%r24,%r24
+	ds		%r25,%r23,%r25
+	addc		%r24,%r24,%r24
+	ds		%r25,%r23,%r25
+	addc		%r24,%r24,%r24
+	ds		%r25,%r23,%r25
+	addc		%r24,%r24,%r24
+	ds		%r25,%r23,%r25
+	addc		%r24,%r24,%r24
+	ds		%r25,%r23,%r25
+	addc		%r24,%r24,%r28
+	ds		%r25,%r23,%r25
+	comclr,>=	%r25,%r0,%r0
+	addl		%r25,%r23,%r25
+	stws		%r25,0(0,%r26)
+	bv		0(%r2)
+	 addc		%r28,%r28,%r28
+
+	.label	L$largedivisor
+	extru		%r24,31,1,%r19		; r19 = n0 & 1
+	bb,<		%r23,31,L$odd
+	 extru		%r23,30,31,%r22 	; r22 = d >> 1
+	shd		%r25,%r24,1,%r24	; r24 = new n0
+	extru		%r25,30,31,%r25 	; r25 = new n1
+	sub		%r0,%r22,%r21
+	ds		%r0,%r21,%r0
+	addc		%r24,%r24,%r24
+	ds		%r25,%r22,%r25
+	addc		%r24,%r24,%r24
+	ds		%r25,%r22,%r25
+	addc		%r24,%r24,%r24
+	ds		%r25,%r22,%r25
+	addc		%r24,%r24,%r24
+	ds		%r25,%r22,%r25
+	addc		%r24,%r24,%r24
+	ds		%r25,%r22,%r25
+	addc		%r24,%r24,%r24
+	ds		%r25,%r22,%r25
+	addc		%r24,%r24,%r24
+	ds		%r25,%r22,%r25
+	addc		%r24,%r24,%r24
+	ds		%r25,%r22,%r25
+	addc		%r24,%r24,%r24
+	ds		%r25,%r22,%r25
+	addc		%r24,%r24,%r24
+	ds		%r25,%r22,%r25
+	addc		%r24,%r24,%r24
+	ds		%r25,%r22,%r25
+	addc		%r24,%r24,%r24
+	ds		%r25,%r22,%r25
+	addc		%r24,%r24,%r24
+	ds		%r25,%r22,%r25
+	addc		%r24,%r24,%r24
+	ds		%r25,%r22,%r25
+	addc		%r24,%r24,%r24
+	ds		%r25,%r22,%r25
+	addc		%r24,%r24,%r24
+	ds		%r25,%r22,%r25
+	addc		%r24,%r24,%r24
+	ds		%r25,%r22,%r25
+	addc		%r24,%r24,%r24
+	ds		%r25,%r22,%r25
+	addc		%r24,%r24,%r24
+	ds		%r25,%r22,%r25
+	addc		%r24,%r24,%r24
+	ds		%r25,%r22,%r25
+	addc		%r24,%r24,%r24
+	ds		%r25,%r22,%r25
+	addc		%r24,%r24,%r24
+	ds		%r25,%r22,%r25
+	addc		%r24,%r24,%r24
+	ds		%r25,%r22,%r25
+	addc		%r24,%r24,%r24
+	ds		%r25,%r22,%r25
+	addc		%r24,%r24,%r24
+	ds		%r25,%r22,%r25
+	addc		%r24,%r24,%r24
+	ds		%r25,%r22,%r25
+	addc		%r24,%r24,%r24
+	ds		%r25,%r22,%r25
+	addc		%r24,%r24,%r24
+	ds		%r25,%r22,%r25
+	addc		%r24,%r24,%r24
+	ds		%r25,%r22,%r25
+	addc		%r24,%r24,%r24
+	ds		%r25,%r22,%r25
+	addc		%r24,%r24,%r24
+	ds		%r25,%r22,%r25
+	addc		%r24,%r24,%r24
+	ds		%r25,%r22,%r25
+	comclr,>=	%r25,%r0,%r0
+	addl		%r25,%r22,%r25
+	sh1addl 	%r25,%r19,%r25
+	stws		%r25,0(0,%r26)
+	bv		0(%r2)
+	 addc		%r24,%r24,%r28
+
+	.label	L$odd
+	addib,sv,n	1,%r22,L$FF..		; r22 = (d / 2 + 1)
+	shd		%r25,%r24,1,%r24	; r24 = new n0
+	extru		%r25,30,31,%r25 	; r25 = new n1
+	sub		%r0,%r22,%r21
+	ds		%r0,%r21,%r0
+	addc		%r24,%r24,%r24
+	ds		%r25,%r22,%r25
+	addc		%r24,%r24,%r24
+	ds		%r25,%r22,%r25
+	addc		%r24,%r24,%r24
+	ds		%r25,%r22,%r25
+	addc		%r24,%r24,%r24
+	ds		%r25,%r22,%r25
+	addc		%r24,%r24,%r24
+	ds		%r25,%r22,%r25
+	addc		%r24,%r24,%r24
+	ds		%r25,%r22,%r25
+	addc		%r24,%r24,%r24
+	ds		%r25,%r22,%r25
+	addc		%r24,%r24,%r24
+	ds		%r25,%r22,%r25
+	addc		%r24,%r24,%r24
+	ds		%r25,%r22,%r25
+	addc		%r24,%r24,%r24
+	ds		%r25,%r22,%r25
+	addc		%r24,%r24,%r24
+	ds		%r25,%r22,%r25
+	addc		%r24,%r24,%r24
+	ds		%r25,%r22,%r25
+	addc		%r24,%r24,%r24
+	ds		%r25,%r22,%r25
+	addc		%r24,%r24,%r24
+	ds		%r25,%r22,%r25
+	addc		%r24,%r24,%r24
+	ds		%r25,%r22,%r25
+	addc		%r24,%r24,%r24
+	ds		%r25,%r22,%r25
+	addc		%r24,%r24,%r24
+	ds		%r25,%r22,%r25
+	addc		%r24,%r24,%r24
+	ds		%r25,%r22,%r25
+	addc		%r24,%r24,%r24
+	ds		%r25,%r22,%r25
+	addc		%r24,%r24,%r24
+	ds		%r25,%r22,%r25
+	addc		%r24,%r24,%r24
+	ds		%r25,%r22,%r25
+	addc		%r24,%r24,%r24
+	ds		%r25,%r22,%r25
+	addc		%r24,%r24,%r24
+	ds		%r25,%r22,%r25
+	addc		%r24,%r24,%r24
+	ds		%r25,%r22,%r25
+	addc		%r24,%r24,%r24
+	ds		%r25,%r22,%r25
+	addc		%r24,%r24,%r24
+	ds		%r25,%r22,%r25
+	addc		%r24,%r24,%r24
+	ds		%r25,%r22,%r25
+	addc		%r24,%r24,%r24
+	ds		%r25,%r22,%r25
+	addc		%r24,%r24,%r24
+	ds		%r25,%r22,%r25
+	addc		%r24,%r24,%r24
+	ds		%r25,%r22,%r25
+	addc		%r24,%r24,%r24
+	ds		%r25,%r22,%r25
+	addc		%r24,%r24,%r24
+	ds		%r25,%r22,%r25
+	addc		%r24,%r24,%r28
+	comclr,>=	%r25,%r0,%r0
+	addl		%r25,%r22,%r25
+	sh1addl 	%r25,%r19,%r25
+; We have computed (n1,,n0) / (d + 1), q' = r28, r' = r25
+	add,nuv 	%r28,%r25,%r25
+	addl		%r25,%r1,%r25
+	addc		%r0,%r28,%r28
+	sub,<<		%r25,%r23,%r0
+	addl		%r25,%r1,%r25
+	stws		%r25,0(0,%r26)
+	bv		0(%r2)
+	 addc		%r0,%r28,%r28
+
+; This is just a special case of the code above.
+; We come here when d == 0xFFFFFFFF
+	.label	L$FF..
+	add,uv		%r25,%r24,%r24
+	sub,<<		%r24,%r23,%r0
+	ldo		1(%r24),%r24
+	stws		%r24,0(0,%r26)
+	bv		0(%r2)
+	 addc		%r0,%r25,%r28
+
+	.exit
+	.procend