summary refs log tree commit diff stats
path: root/libotr/libgcrypt-1.8.7/mpi/hppa1.1
diff options
context:
space:
mode:
Diffstat (limited to 'libotr/libgcrypt-1.8.7/mpi/hppa1.1')
-rw-r--r--libotr/libgcrypt-1.8.7/mpi/hppa1.1/distfiles5
-rw-r--r--libotr/libgcrypt-1.8.7/mpi/hppa1.1/mpih-mul1.S115
-rw-r--r--libotr/libgcrypt-1.8.7/mpi/hppa1.1/mpih-mul2.S117
-rw-r--r--libotr/libgcrypt-1.8.7/mpi/hppa1.1/mpih-mul3.S126
-rw-r--r--libotr/libgcrypt-1.8.7/mpi/hppa1.1/udiv-qrnnd.S92
5 files changed, 455 insertions, 0 deletions
diff --git a/libotr/libgcrypt-1.8.7/mpi/hppa1.1/distfiles b/libotr/libgcrypt-1.8.7/mpi/hppa1.1/distfiles
new file mode 100644
index 0000000..d68227a
--- /dev/null
+++ b/libotr/libgcrypt-1.8.7/mpi/hppa1.1/distfiles
@@ -0,0 +1,5 @@
+udiv-qrnnd.S
+mpih-mul1.S
+mpih-mul2.S
+mpih-mul3.S
+
diff --git a/libotr/libgcrypt-1.8.7/mpi/hppa1.1/mpih-mul1.S b/libotr/libgcrypt-1.8.7/mpi/hppa1.1/mpih-mul1.S
new file mode 100644
index 0000000..45926dd
--- /dev/null
+++ b/libotr/libgcrypt-1.8.7/mpi/hppa1.1/mpih-mul1.S
@@ -0,0 +1,115 @@
+/* hppa1.1  mul_1 -- Multiply a limb vector with a limb and store
+ *		     the result in a second limb vector.
+ *
+ *      Copyright (C) 1992, 1993, 1994, 1998, 
+ *                    2001, 2002 Free Software Foundation, Inc.
+ *
+ * This file is part of Libgcrypt.
+ *
+ * Libgcrypt is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License as
+ * published by the Free Software Foundation; either version 2.1 of
+ * the License, or (at your option) any later version.
+ *
+ * Libgcrypt is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA
+ *
+ * Note: This code is heavily based on the GNU MP Library.
+ *	 Actually it's the same code with only minor changes in the
+ *	 way the data is stored; this is to support the abstraction
+ *	 of an optional secure memory allocation which may be used
+ *	 to avoid revealing of sensitive data due to paging etc.
+ */
+
+
+/*******************
+ * mpi_limb_t
+ * _gcry_mpih_mul_1( mpi_ptr_t res_ptr,	(r26)
+ *		  mpi_ptr_t s1_ptr,	(r25)
+ *		  mpi_size_t s1_size,	(r24)
+ *		  mpi_limb_t s2_limb)	(r23)
+ *
+ *
+ *
+ * This runs at 9 cycles/limb on a PA7000.  With the used instructions, it can
+ * not become faster due to data cache contention after a store.  On the
+ * PA7100 it runs at 7 cycles/limb, and that can not be improved either, since
+ * only the xmpyu does not need the integer pipeline, so the only dual-issue
+ * we will get are addc+xmpyu.	Unrolling would not help either CPU.
+ *
+ * We could use fldds to read two limbs at a time from the S1 array, and that
+ * could bring down the times to 8.5 and 6.5 cycles/limb for the PA7000 and
+ * PA7100, respectively.  We don't do that since it does not seem worth the
+ * (alignment) troubles...
+ *
+ * At least the PA7100 is rumored to be able to deal with cache-misses
+ * without stalling instruction issue.	If this is true, and the cache is
+ * actually also lockup-free, we should use a deeper software pipeline, and
+ * load from S1 very early!  (The loads and stores to -12(sp) will surely be
+ * in the cache.)
+ */
+
+	.level		1.1
+
+	.code
+	.export 	_gcry_mpih_mul_1
+	.label		_gcry_mpih_mul_1
+	.proc
+	.callinfo	frame=64,no_calls
+	.entry
+
+	ldo		64(%r30),%r30
+	fldws,ma	4(%r25),%fr5
+	stw		%r23,-16(%r30)		; move s2_limb ...
+	addib,= 	-1,%r24,L$just_one_limb
+	 fldws		-16(%r30),%fr4		; ... into fr4
+	add		%r0,%r0,%r0		; clear carry
+	xmpyu		%fr4,%fr5,%fr6
+	fldws,ma	4(%r25),%fr7
+	fstds		%fr6,-16(%r30)
+	xmpyu		%fr4,%fr7,%fr8
+	ldw		-12(%r30),%r19		; least significant limb in product
+	ldw		-16(%r30),%r28
+
+	fstds		%fr8,-16(%r30)
+	addib,= 	-1,%r24,L$end
+	 ldw		-12(%r30),%r1
+
+; Main loop
+	.label	L$loop
+	fldws,ma	4(%r25),%fr5
+	stws,ma 	%r19,4(%r26)
+	addc		%r28,%r1,%r19
+	xmpyu		%fr4,%fr5,%fr6
+	ldw		-16(%r30),%r28
+	fstds		%fr6,-16(%r30)
+	addib,<>	-1,%r24,L$loop
+	 ldw		-12(%r30),%r1
+
+	.label	L$end
+	stws,ma 	%r19,4(%r26)
+	addc		%r28,%r1,%r19
+	ldw		-16(%r30),%r28
+	stws,ma 	%r19,4(%r26)
+	addc		%r0,%r28,%r28
+	bv		0(%r2)
+	 ldo		-64(%r30),%r30
+
+	.label	L$just_one_limb
+	xmpyu		%fr4,%fr5,%fr6
+	fstds		%fr6,-16(%r30)
+	ldw		-16(%r30),%r28
+	ldo		-64(%r30),%r30
+	bv		0(%r2)
+	 fstws		%fr6R,0(%r26)
+
+	.exit
+	.procend
+
+
diff --git a/libotr/libgcrypt-1.8.7/mpi/hppa1.1/mpih-mul2.S b/libotr/libgcrypt-1.8.7/mpi/hppa1.1/mpih-mul2.S
new file mode 100644
index 0000000..1047ab5
--- /dev/null
+++ b/libotr/libgcrypt-1.8.7/mpi/hppa1.1/mpih-mul2.S
@@ -0,0 +1,117 @@
+/* hppa1.1   addmul_1 -- Multiply a limb vector with a limb and add
+ *			 the result to a second limb vector.
+ *
+ *      Copyright (C) 1992, 1993, 1994, 1998,
+ *                    2001, 2002 Free Software Foundation, Inc.
+ *
+ * This file is part of Libgcrypt.
+ *
+ * Libgcrypt is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License as
+ * published by the Free Software Foundation; either version 2.1 of
+ * the License, or (at your option) any later version.
+ *
+ * Libgcrypt is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA
+ *
+ * Note: This code is heavily based on the GNU MP Library.
+ *	 Actually it's the same code with only minor changes in the
+ *	 way the data is stored; this is to support the abstraction
+ *	 of an optional secure memory allocation which may be used
+ *	 to avoid revealing of sensitive data due to paging etc.
+ */
+
+
+/*******************
+ * mpi_limb_t
+ * _gcry_mpih_addmul_1( mpi_ptr_t res_ptr,      (r26)
+ *		     mpi_ptr_t s1_ptr,	     (r25)
+ *		     mpi_size_t s1_size,     (r24)
+ *		     mpi_limb_t s2_limb)     (r23)
+ *
+ * This runs at 11 cycles/limb on a PA7000.  With the used instructions, it
+ * can not become faster due to data cache contention after a store.  On the
+ * PA7100 it runs at 10 cycles/limb, and that can not be improved either,
+ * since only the xmpyu does not need the integer pipeline, so the only
+ * dual-issue we will get are addc+xmpyu.  Unrolling could gain a cycle/limb
+ * on the PA7100.
+ *
+ * There are some ideas described in mul1.S that applies to this code too.
+ */
+
+	.level		1.1
+
+	.code
+	.export 	_gcry_mpih_addmul_1
+	.label		_gcry_mpih_addmul_1
+	.proc
+	.callinfo	frame=64,no_calls
+	.entry
+
+	ldo		64(%r30),%r30
+	fldws,ma	4(%r25),%fr5
+	stw		%r23,-16(%r30)		; move s2_limb ...
+	addib,= 	-1,%r24,L$just_one_limb
+	 fldws		-16(%r30),%fr4		; ... into fr4
+	add		%r0,%r0,%r0		; clear carry
+	xmpyu		%fr4,%fr5,%fr6
+	fldws,ma	4(%r25),%fr7
+	fstds		%fr6,-16(%r30)
+	xmpyu		%fr4,%fr7,%fr8
+	ldw		-12(%r30),%r19		; least significant limb in product
+	ldw		-16(%r30),%r28
+
+	fstds		%fr8,-16(%r30)
+	addib,= 	-1,%r24,L$end
+	 ldw		-12(%r30),%r1
+
+; Main loop
+	.label	L$loop
+	ldws		0(%r26),%r29
+	fldws,ma	4(%r25),%fr5
+	add		%r29,%r19,%r19
+	stws,ma 	%r19,4(%r26)
+	addc		%r28,%r1,%r19
+	xmpyu		%fr4,%fr5,%fr6
+	ldw		-16(%r30),%r28
+	fstds		%fr6,-16(%r30)
+	addc		%r0,%r28,%r28
+	addib,<>	-1,%r24,L$loop
+	 ldw		-12(%r30),%r1
+
+	.label	L$end
+	ldw		0(%r26),%r29
+	add		%r29,%r19,%r19
+	stws,ma 	%r19,4(%r26)
+	addc		%r28,%r1,%r19
+	ldw		-16(%r30),%r28
+	ldws		0(%r26),%r29
+	addc		%r0,%r28,%r28
+	add		%r29,%r19,%r19
+	stws,ma 	%r19,4(%r26)
+	addc		%r0,%r28,%r28
+	bv		0(%r2)
+	 ldo		-64(%r30),%r30
+
+	.label L$just_one_limb
+	xmpyu		%fr4,%fr5,%fr6
+	ldw		0(%r26),%r29
+	fstds		%fr6,-16(%r30)
+	ldw		-12(%r30),%r1
+	ldw		-16(%r30),%r28
+	add		%r29,%r1,%r19
+	stw		%r19,0(%r26)
+	addc		%r0,%r28,%r28
+	bv		0(%r2)
+	 ldo		-64(%r30),%r30
+
+	.exit
+	.procend
+
+
diff --git a/libotr/libgcrypt-1.8.7/mpi/hppa1.1/mpih-mul3.S b/libotr/libgcrypt-1.8.7/mpi/hppa1.1/mpih-mul3.S
new file mode 100644
index 0000000..632adf1
--- /dev/null
+++ b/libotr/libgcrypt-1.8.7/mpi/hppa1.1/mpih-mul3.S
@@ -0,0 +1,126 @@
+/* hppa1.1   submul_1 -- Multiply a limb vector with a limb and add
+ *			 the result to a second limb vector.
+ *
+ *      Copyright (C) 1992, 1993, 1994, 1998, 
+ *                    2001, 2002 Free Software Foundation, Inc.
+ *
+ * This file is part of Libgcrypt.
+ *
+ * Libgcrypt is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License as
+ * published by the Free Software Foundation; either version 2.1 of
+ * the License, or (at your option) any later version.
+ *
+ * Libgcrypt is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA
+ *
+ * Note: This code is heavily based on the GNU MP Library.
+ *	 Actually it's the same code with only minor changes in the
+ *	 way the data is stored; this is to support the abstraction
+ *	 of an optional secure memory allocation which may be used
+ *	 to avoid revealing of sensitive data due to paging etc.
+ */
+
+
+/*******************
+ * mpi_limb_t
+ * _gcry_mpih_submul_1( mpi_ptr_t res_ptr,      (r26)
+ *		     mpi_ptr_t s1_ptr,	     (r25)
+ *		     mpi_size_t s1_size,     (r24)
+ *		     mpi_limb_t s2_limb)     (r23)
+ *
+ *
+ * This runs at 12 cycles/limb on a PA7000.  With the used instructions, it
+ * can not become faster due to data cache contention after a store.  On the
+ * PA7100 it runs at 11 cycles/limb, and that can not be improved either,
+ * since only the xmpyu does not need the integer pipeline, so the only
+ * dual-issue we will get are addc+xmpyu.  Unrolling could gain a cycle/limb
+ * on the PA7100.
+ *
+ * There are some ideas described in mul1.S that applies to this code too.
+ *
+ * It seems possible to make this run as fast as addmul_1, if we use
+ *	 sub,>>= %r29,%r19,%r22
+ *	 addi	 1,%r28,%r28
+ * but that requires reworking the hairy software pipeline...
+ */
+
+	.level		1.1
+
+	.code
+	.export 	_gcry_mpih_submul_1
+	.label		_gcry_mpih_submul_1
+	.proc
+	.callinfo	frame=64,no_calls
+	.entry
+
+	ldo		64(%r30),%r30
+	fldws,ma	4(%r25),%fr5
+	stw		%r23,-16(%r30)		; move s2_limb ...
+	addib,= 	-1,%r24,L$just_one_limb
+	 fldws		-16(%r30),%fr4		; ... into fr4
+	add		%r0,%r0,%r0		; clear carry
+	xmpyu		%fr4,%fr5,%fr6
+	fldws,ma	4(%r25),%fr7
+	fstds		%fr6,-16(%r30)
+	xmpyu		%fr4,%fr7,%fr8
+	ldw		-12(%r30),%r19		; least significant limb in product
+	ldw		-16(%r30),%r28
+
+	fstds		%fr8,-16(%r30)
+	addib,= 	-1,%r24,L$end
+	 ldw		-12(%r30),%r1
+
+; Main loop
+	.label	L$loop
+	ldws		0(%r26),%r29
+	fldws,ma	4(%r25),%fr5
+	sub		%r29,%r19,%r22
+	add		%r22,%r19,%r0
+	stws,ma 	%r22,4(%r26)
+	addc		%r28,%r1,%r19
+	xmpyu		%fr4,%fr5,%fr6
+	ldw		-16(%r30),%r28
+	fstds		%fr6,-16(%r30)
+	addc		%r0,%r28,%r28
+	addib,<>	-1,%r24,L$loop
+	 ldw		-12(%r30),%r1
+
+	.label	L$end
+	ldw		0(%r26),%r29
+	sub		%r29,%r19,%r22
+	add		%r22,%r19,%r0
+	stws,ma 	%r22,4(%r26)
+	addc		%r28,%r1,%r19
+	ldw		-16(%r30),%r28
+	ldws		0(%r26),%r29
+	addc		%r0,%r28,%r28
+	sub		%r29,%r19,%r22
+	add		%r22,%r19,%r0
+	stws,ma 	%r22,4(%r26)
+	addc		%r0,%r28,%r28
+	bv		0(%r2)
+	 ldo		-64(%r30),%r30
+
+	.label	L$just_one_limb
+	xmpyu		%fr4,%fr5,%fr6
+	ldw		0(%r26),%r29
+	fstds		%fr6,-16(%r30)
+	ldw		-12(%r30),%r1
+	ldw		-16(%r30),%r28
+	sub		%r29,%r1,%r22
+	add		%r22,%r1,%r0
+	stw		%r22,0(%r26)
+	addc		%r0,%r28,%r28
+	bv		0(%r2)
+	 ldo		-64(%r30),%r30
+
+	.exit
+	.procend
+
diff --git a/libotr/libgcrypt-1.8.7/mpi/hppa1.1/udiv-qrnnd.S b/libotr/libgcrypt-1.8.7/mpi/hppa1.1/udiv-qrnnd.S
new file mode 100644
index 0000000..3f28b7b
--- /dev/null
+++ b/libotr/libgcrypt-1.8.7/mpi/hppa1.1/udiv-qrnnd.S
@@ -0,0 +1,92 @@
+/* HP-PA  __udiv_qrnnd division support, used from longlong.h.
+ *	  This version runs fast on PA 7000 and later.
+ *
+ *      Copyright (C) 1993, 1994, 1998, 
+ *                    2001, 2002, 2004 Free Software Foundation, Inc.
+ *
+ * This file is part of Libgcrypt.
+ *
+ * Libgcrypt is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License as
+ * published by the Free Software Foundation; either version 2.1 of
+ * the License, or (at your option) any later version.
+ *
+ * Libgcrypt is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA
+ *
+ * Note: This code is heavily based on the GNU MP Library.
+ *	 Actually it's the same code with only minor changes in the
+ *	 way the data is stored; this is to support the abstraction
+ *	 of an optional secure memory allocation which may be used
+ *	 to avoid revealing of sensitive data due to paging etc.
+ */
+
+
+
+/* INPUT PARAMETERS
+ *   rem_ptr	   gr26
+ *   n1 	   gr25
+ *   n0 	   gr24
+ *   d		   gr23
+ */
+
+	.level		1.1
+
+        .data
+	.align		8
+	.label L$0000
+	.word		0x43f00000
+	.word		0x0
+        .code
+	.export 	__udiv_qrnnd
+	.label		__udiv_qrnnd
+	.proc
+	.callinfo	frame=64,no_calls
+	.entry
+	ldo		64(%r30),%r30
+
+	stws		%r25,-16(0,%r30)	; n_hi
+	stws		%r24,-12(0,%r30)	; n_lo
+        stw             %r19,-32(%r30)
+        addil           LT%L$0000,%r19
+        ldw             RT%L$0000(%r1),%r1
+	fldds		-16(0,%r30),%fr5
+	stws		%r23,-12(0,%r30)
+	comib,<=	0,%r25,L$1
+	fcnvxf,dbl,dbl	%fr5,%fr5
+        fldds           0(0,%r1),%fr4
+	fadd,dbl	%fr4,%fr5,%fr5
+	.label	L$1
+	fcpy,sgl	%fr0,%fr6L
+	fldws		-12(0,%r30),%fr6R
+	fcnvxf,dbl,dbl	%fr6,%fr4
+
+	fdiv,dbl	%fr5,%fr4,%fr5
+
+	fcnvfx,dbl,dbl	%fr5,%fr4
+	fstws		%fr4R,-16(%r30)
+	xmpyu		%fr4R,%fr6R,%fr6
+	ldws		-16(%r30),%r28
+	fstds		%fr6,-16(0,%r30)
+	ldws		-12(0,%r30),%r21
+	ldws		-16(0,%r30),%r20
+	sub		%r24,%r21,%r22
+        subb            %r25,%r20,%r1
+        comib,=         0,%r1,L$2
+	ldo		-64(%r30),%r30
+
+	add		%r22,%r23,%r22
+	ldo		-1(%r28),%r28
+	.label	L$2
+	bv		0(%r2)
+	stws		%r22,0(0,%r26)
+
+	.exit
+	.procend
+