summary refs log tree commit diff stats
path: root/libotr/libgcrypt-1.8.7/mpi/pentium4/sse2/mpih-mul2.S
diff options
context:
space:
mode:
authorSoniEx2 <endermoneymod@gmail.com>2021-04-09 07:19:03 -0300
committerSoniEx2 <endermoneymod@gmail.com>2021-04-09 07:19:03 -0300
commit0e752a6e215aee21dc73da097c3225495d54a5b6 (patch)
treeb81be02cbf2f06aebf322ac4a5d014b44176bba5 /libotr/libgcrypt-1.8.7/mpi/pentium4/sse2/mpih-mul2.S
parent7754076c715285173311a1b6811ce377950e18a6 (diff)
Add libotr/etc sources
Diffstat (limited to 'libotr/libgcrypt-1.8.7/mpi/pentium4/sse2/mpih-mul2.S')
-rw-r--r--libotr/libgcrypt-1.8.7/mpi/pentium4/sse2/mpih-mul2.S136
1 files changed, 136 insertions, 0 deletions
diff --git a/libotr/libgcrypt-1.8.7/mpi/pentium4/sse2/mpih-mul2.S b/libotr/libgcrypt-1.8.7/mpi/pentium4/sse2/mpih-mul2.S
new file mode 100644
index 0000000..f975adf
--- /dev/null
+++ b/libotr/libgcrypt-1.8.7/mpi/pentium4/sse2/mpih-mul2.S
@@ -0,0 +1,136 @@
+/* Intel Pentium-4 mpn_addmul_1 -- Multiply a limb vector with a limb and add
+ * the result to a second limb vector.
+ *
+ * Copyright 2001, 2002, 2004, 2005 Free Software Foundation, Inc.
+ *
+ * This file is part of Libgcrypt.
+ *
+ * Libgcrypt is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License as
+ * published by the Free Software Foundation; either version 2.1 of
+ * the License, or (at your option) any later version.
+ *
+ * Libgcrypt is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA
+ *
+ * Note: This code is heavily based on the GNU MP Library.
+ *	 Actually it's the same code with only minor changes in the
+ *	 way the data is stored; this is to support the abstraction
+ *	 of an optional secure memory allocation which may be used
+ *	 to avoid revealing of sensitive data due to paging etc.
+ */
+
+
+#include "sysdep.h"
+#include "asm-syntax.h"
+
+
+/*******************
+ * mpi_limb_t
+ * _gcry_mpih_addmul_1( mpi_ptr_t res_ptr,      (sp + 4)
+ *		     mpi_ptr_t s1_ptr,	     (sp + 8)
+ *		     mpi_size_t s1_size,     (sp + 12)
+ *		     mpi_limb_t s2_limb)     (sp + 16)
+ *
+ * P3 model 9  (Banias)          ?.?
+ * P3 model 13 (Dothan)          5.8
+ * P4 model 0  (Willamette)      5.5
+ * P4 model 1  (?)               5.5
+ * P4 model 2  (Northwood)       5.5
+ * P4 model 3  (Prescott)        6.0
+ * P4 model 4  (Nocona)
+ *
+ * Only the carry limb propagation is on the dependent chain, but some other
+ * Pentium4 pipeline magic brings down performance to 6 cycles/l from the
+ * ideal 4 cycles/l.
+ */
+
+
+	TEXT
+	ALIGN (4)
+	GLOBL	C_SYMBOL_NAME(_gcry_mpih_addmul_1)
+C_SYMBOL_NAME(_gcry_mpih_addmul_1:)
+
+	pxor	%mm4, %mm4
+.Lstart_1c:
+	movl	8(%esp), %eax
+	movl	12(%esp), %ecx
+	movl	4(%esp), %edx
+	movd	16(%esp), %mm7
+
+/*
+	C eax	src, incrementing ; 5B
+	C ecx	loop counter, decrementing
+	C edx	dst, incrementing
+	C
+	C mm4	carry, low 32-bits
+	C mm7	multiplier
+*/
+
+	movd		(%eax), %mm2	
+	pmuludq		%mm7, %mm2
+
+	shrl	$1, %ecx
+	jnc	.Leven
+
+	leal		4(%eax), %eax
+	movd		(%edx), %mm1
+	paddq		%mm2, %mm1
+	paddq		%mm1, %mm4
+	movd		%mm4, (%edx)
+	psrlq		$32, %mm4
+
+	testl	%ecx, %ecx
+	jz	.Lrtn
+	leal	4(%edx), %edx
+
+	movd		(%eax), %mm2	
+	pmuludq		%mm7, %mm2
+.Leven:
+	movd		4(%eax), %mm0	
+	movd		(%edx), %mm1	
+	pmuludq		%mm7, %mm0
+
+	subl	$1, %ecx
+	jz	.Lend
+.Lloop:
+	paddq		%mm2, %mm1	
+	movd		8(%eax), %mm2	
+	paddq		%mm1, %mm4	
+	movd		4(%edx), %mm3	
+	pmuludq		%mm7, %mm2
+	movd		%mm4, (%edx)
+	psrlq		$32, %mm4
+
+	paddq		%mm0, %mm3	
+	movd		12(%eax), %mm0	
+	paddq		%mm3, %mm4	
+	movd		8(%edx), %mm1	
+	pmuludq		%mm7, %mm0
+	movd		%mm4, 4(%edx)
+	psrlq		$32, %mm4
+
+	leal	8(%eax), %eax
+	leal	8(%edx), %edx
+	subl	$1, %ecx
+	jnz	.Lloop
+.Lend:
+	paddq		%mm2, %mm1	
+	paddq		%mm1, %mm4	
+	movd		4(%edx), %mm3	
+	movd		%mm4, (%edx)
+	psrlq		$32, %mm4
+	paddq		%mm0, %mm3	
+	paddq		%mm3, %mm4	
+	movd		%mm4, 4(%edx)
+	psrlq		$32, %mm4
+.Lrtn:
+	movd	%mm4, %eax
+	emms
+	ret