From 17bc5bda644bdaaa03d4132e0df2fdb3c496050a Mon Sep 17 00:00:00 2001
From: miod <>
Date: Fri, 5 Jul 2013 21:10:50 +0000
Subject: VAX ELF userland bits. Consists mostly of register prefix additions.

---
 src/lib/libssl/crypto/arch/vax/bn_asm_vax.S | 302 ++++++++++++++--------------
 1 file changed, 151 insertions(+), 151 deletions(-)

(limited to 'src/lib')

diff --git a/src/lib/libssl/crypto/arch/vax/bn_asm_vax.S b/src/lib/libssl/crypto/arch/vax/bn_asm_vax.S
index f1e46b2010..efa9b6ebd6 100644
--- a/src/lib/libssl/crypto/arch/vax/bn_asm_vax.S
+++ b/src/lib/libssl/crypto/arch/vax/bn_asm_vax.S
@@ -1,4 +1,4 @@
-#	$OpenBSD: bn_asm_vax.S,v 1.2 2012/10/13 21:31:56 djm Exp $
+#	$OpenBSD: bn_asm_vax.S,v 1.3 2013/07/05 21:10:50 miod Exp $
 #	$NetBSD: bn_asm_vax.S,v 1.1 2003/11/03 10:22:28 ragge Exp $
 
 #include <machine/asm.h>
@@ -15,38 +15,38 @@
 # }
 
 ENTRY(bn_mul_add_words,R6)
-	movl	4(ap),r2		# *r
-	movl	8(ap),r3		# *a
-	movl	12(ap),r4		# n
-	movl	16(ap),r5		# w
-	clrl	r6			# return value ("carry")
+	movl	4(%ap),%r2		# *r
+	movl	8(%ap),%r3		# *a
+	movl	12(%ap),%r4		# n
+	movl	16(%ap),%r5		# w
+	clrl	%r6			# return value ("carry")
 
-0:	emul	r5,(r3),(r2),r0	# w * a[0] + r[0] -> r0
+0:	emul	%r5,(%r3),(%r2),%r0	# w * a[0] + r[0] -> r0
 
 	# fixup for "negative" r[]
-	tstl	(r2)
+	tstl	(%r2)
 	bgeq	1f
-	incl	r1			# add 1 to highword
+	incl	%r1			# add 1 to highword
 
 1:	# add saved carry to result
-	addl2	r6,r0
-	adwc	$0,r1
+	addl2	%r6,%r0
+	adwc	$0,%r1
 
 	# combined fixup for "negative" w, a[]
-	tstl	r5		# if w is negative...
+	tstl	%r5		# if w is negative...
 	bgeq	1f
-	addl2	(r3),r1		# ...add a[0] again to highword
-1:	tstl	(r3)		# if a[0] is negative...
+	addl2	(%r3),%r1	# ...add a[0] again to highword
+1:	tstl	(%r3)		# if a[0] is negative...
 	bgeq	1f
-	addl2	r5,r1		# ...add w again to highword
+	addl2	%r5,%r1		# ...add w again to highword
 1:
-	movl	r0,(r2)+	# save low word in dest & advance *r
-	addl2	$4,r3		# advance *a
-	movl	r1,r6		# high word in r6 for return value
+	movl	%r0,(%r2)+	# save low word in dest & advance *r
+	addl2	$4,%r3		# advance *a
+	movl	%r1,%r6		# high word in r6 for return value
 
-	sobgtr	r4,0b		# loop?
+	sobgtr	%r4,0b		# loop?
 
-	movl	r6,r0
+	movl	%r6,%r0
 	ret
 
 #	.title	vax_bn_mul_words  unsigned multiply & add, 32*32+32=>64
@@ -64,34 +64,34 @@ ENTRY(bn_mul_add_words,R6)
 #
 
 ENTRY(bn_mul_words,R6)
-	movl	4(ap),r2		# *r
-	movl	8(ap),r3		# *a
-	movl	12(ap),r4		# n
-	movl	16(ap),r5		# w
-	clrl	r6			# carry
+	movl	4(%ap),%r2		# *r
+	movl	8(%ap),%r3		# *a
+	movl	12(%ap),%r4		# n
+	movl	16(%ap),%r5		# w
+	clrl	%r6			# carry
 
-0:	emul	r5,(r3),r6,r0		# w * a[0] + carry -> r0
+0:	emul	%r5,(%r3),%r6,%r0	# w * a[0] + carry -> r0
 
 	# fixup for "negative" carry
-	tstl	r6
+	tstl	%r6
 	bgeq	1f
-	incl	r1
+	incl	%r1
 
 1:	# combined fixup for "negative" w, a[]
-	tstl	r5
+	tstl	%r5
 	bgeq	1f
-	addl2	(r3),r1
-1:	tstl	(r3)
+	addl2	(%r3),%r1
+1:	tstl	(%r3)
 	bgeq	1f
-	addl2	r5,r1
+	addl2	%r5,%r1
 
-1:	movl	r0,(r2)+
-	addl2	$4,r3
-	movl	r1,r6
+1:	movl	%r0,(%r2)+
+	addl2	$4,%r3
+	movl	%r1,%r6
 
-	sobgtr	r4,0b
+	sobgtr	%r4,0b
 
-	movl	r6,r0
+	movl	%r6,%r0
 	ret
 
 
@@ -109,23 +109,23 @@ ENTRY(bn_mul_words,R6)
 #
 
 ENTRY(bn_sqr_words,0)
-	movl	4(ap),r2		# r
-	movl	8(ap),r3		# a
-	movl	12(ap),r4		# n
+	movl	4(%ap),%r2		# r
+	movl	8(%ap),%r3		# a
+	movl	12(%ap),%r4		# n
 
-0:	movl	(r3)+,r5		# r5 = a[] & advance
+0:	movl	(%r3)+,%r5		# r5 = a[] & advance
 
-	emul	r5,r5,$0,r0		# a[0] * a[0] + 0 -> r0
+	emul	%r5,%r5,$0,%r0		# a[0] * a[0] + 0 -> r0
 
 	# fixup for "negative" a[]
-	tstl	r5
+	tstl	%r5
 	bgeq	1f
-	addl2	r5,r1
-	addl2	r5,r1
+	addl2	%r5,%r1
+	addl2	%r5,%r1
 
-1:	movq	r0,(r2)+		# store 64-bit result
+1:	movq	%r0,(%r2)+		# store 64-bit result
 
-	sobgtr	r4,0b			# loop
+	sobgtr	%r4,0b			# loop
 
 	ret
 
@@ -219,90 +219,90 @@ ENTRY(bn_sqr_words,0)
 #
 
 ENTRY(bn_div_words,R6|R7|R8)
-	movl	4(ap),r3		# h
-	movl	8(ap),r2		# l
-	movl	12(ap),r4		# d
+	movl	4(%ap),%r3		# h
+	movl	8(%ap),%r2		# l
+	movl	12(%ap),%r4		# d
 
-	bicl3	$-8,r2,r5		# l' = l & 7
-	bicl3	$7,r2,r2
+	bicl3	$-8,%r2,%r5		# l' = l & 7
+	bicl3	$7,%r2,%r2
 
-	bicl3	$-8,r3,r6
-	bicl3	$7,r3,r3
+	bicl3	$-8,%r3,%r6
+	bicl3	$7,%r3,%r3
 
-	addl2	r6,r2
+	addl2	%r6,%r2
 
-	rotl	$-3,r2,r2		# l = l >> 3
-	rotl	$-3,r3,r3		# h = h >> 3
+	rotl	$-3,%r2,%r2		# l = l >> 3
+	rotl	$-3,%r3,%r3		# h = h >> 3
 
-	movl	r4,r7			# d' = d
+	movl	%r4,%r7			# d' = d
 
-	clrl	r6			# r' = 0
-	clrl	r8			# q' = 0
+	clrl	%r6			# r' = 0
+	clrl	%r8			# q' = 0
 
-	tstl	r4
+	tstl	%r4
 	beql	0f			# Uh-oh, the divisor is 0...
 	bgtr	1f
-	rotl	$-1,r4,r4	# If d is negative, shift it right.
-	bicl2	$0x80000000,r4	# Since d is then a large number, the
+	rotl	$-1,%r4,%r4	# If d is negative, shift it right.
+	bicl2	$0x80000000,%r4	# Since d is then a large number, the
 				# lowest bit is insignificant
 				# (contradict that, and I'll fix the problem!)
 1:
-	ediv	r4,r2,r2,r3		# Do the actual division
+	ediv	%r4,%r2,%r2,%r3		# Do the actual division
 
-	tstl	r2
+	tstl	%r2
 	bgeq	1f
-	mnegl	r2,r2		# if q < 0, negate it
+	mnegl	%r2,%r2		# if q < 0, negate it
 1:
-	tstl	r7
+	tstl	%r7
 	blss	1f
-	rotl	$3,r2,r2	#   q = q << 3
-	bicl3	$-8,r2,r8	#   q' gets the high bits from q
-	bicl3	$7,r2,r2
+	rotl	$3,%r2,%r2	#   q = q << 3
+	bicl3	$-8,%r2,%r8	#   q' gets the high bits from q
+	bicl3	$7,%r2,%r2
 	brb	2f
 
 1:				# else
-	rotl	$2,r2,r2	#   q = q << 2
-	bicl3	$-4,r2,r8	#   q' gets the high bits from q
-	bicl3	$3,r2,r2
+	rotl	$2,%r2,%r2	#   q = q << 2
+	bicl3	$-4,%r2,%r8	#   q' gets the high bits from q
+	bicl3	$3,%r2,%r2
 2:
-	rotl	$3,r3,r3	# r = r << 3
-	bicl3	$-8,r3,r6	# r' gets the high bits from r
-	bicl3	$7,r3,r3
-	addl2	r5,r3		# r = r + l'
+	rotl	$3,%r3,%r3	# r = r << 3
+	bicl3	$-8,%r3,%r6	# r' gets the high bits from r
+	bicl3	$7,%r3,%r3
+	addl2	%r5,%r3		# r = r + l'
 
-	tstl	r7
+	tstl	%r7
 	bgeq	5f
-	bitl	$1,r7
+	bitl	$1,%r7
 	beql	5f		# if d' < 0 && d' & 1
-	subl2	r2,r3		#   [r',r] = [r',r] - [q',q]
-	sbwc	r8,r6
+	subl2	%r2,%r3		#   [r',r] = [r',r] - [q',q]
+	sbwc	%r8,%r6
 3:
 	bgeq	5f		#   while r < 0
-	decl	r2		#     [q',q] = [q',q] - 1
-	sbwc	$0,r8
-	addl2	r7,r3		#     [r',r] = [r',r] + d'
-	adwc	$0,r6
+	decl	%r2		#     [q',q] = [q',q] - 1
+	sbwc	$0,%r8
+	addl2	%r7,%r3		#     [r',r] = [r',r] + d'
+	adwc	$0,%r6
 	brb	3b
 
 # The return points are placed in the middle to keep a short distance from
 # all the branch points
 1:
-#	movl	r3,r1
-	movl	r2,r0
+#	movl	%r3,%r1
+	movl	%r2,%r0
 	ret
 0:
-	movl	$-1,r0
+	movl	$-1,%r0
 	ret
 5:
-	tstl	r6
+	tstl	%r6
 	bneq	6f
-	cmpl	r3,r7
+	cmpl	%r3,%r7
 	blssu	1b		# while [r',r] >= d'
 6:
-	subl2	r7,r3		#   [r',r] = [r',r] - d'
-	sbwc	$0,r6
-	incl	r2		#   [q',q] = [q',q] + 1
-	adwc	$0,r8
+	subl2	%r7,%r3		#   [r',r] = [r',r] - d'
+	sbwc	$0,%r6
+	incl	%r2		#   [q',q] = [q',q] + 1
+	adwc	$0,%r8
 	brb	5b
 
 
@@ -320,21 +320,21 @@ ENTRY(bn_div_words,R6|R7|R8)
 #
 
 ENTRY(bn_add_words,0)
-	movl	4(ap),r2	# r
-	movl	8(ap),r3	# a
-	movl	12(ap),r4	# b
-	movl	16(ap),r5	# n
-	clrl	r0
+	movl	4(%ap),%r2	# r
+	movl	8(%ap),%r3	# a
+	movl	12(%ap),%r4	# b
+	movl	16(%ap),%r5	# n
+	clrl	%r0
 
-	tstl	r5
+	tstl	%r5
 	bleq	1f
 
-0:	movl	(r3)+,r1	# carry untouched
-	adwc	(r4)+,r1	# carry used and touched
-	movl	r1,(r2)+	# carry untouched
-	sobgtr	r5,0b		# carry untouched
+0:	movl	(%r3)+,%r1	# carry untouched
+	adwc	(%r4)+,%r1	# carry used and touched
+	movl	%r1,(%r2)+	# carry untouched
+	sobgtr	%r5,0b		# carry untouched
 
-	adwc	$0,r0
+	adwc	$0,%r0
 1:	ret
 
 #;
@@ -349,21 +349,21 @@ ENTRY(bn_add_words,0)
 #
 
 ENTRY(bn_sub_words,R6)
-	movl	4(ap),r2	# r
-	movl	8(ap),r3	# a
-	movl	12(ap),r4	# b
-	movl	16(ap),r5	# n
-	clrl	r0
+	movl	4(%ap),%r2	# r
+	movl	8(%ap),%r3	# a
+	movl	12(%ap),%r4	# b
+	movl	16(%ap),%r5	# n
+	clrl	%r0
 
-	tstl	r5
+	tstl	%r5
 	bleq	1f
 
-0:	movl	(r3)+,r6	# carry untouched
-	sbwc	(r4)+,r6	# carry used and touched
-	movl	r6,(r2)+	# carry untouched
-	sobgtr	r5,0b		# carry untouched
+0:	movl	(%r3)+,%r6	# carry untouched
+	sbwc	(%r4)+,%r6	# carry used and touched
+	movl	%r6,(%r2)+	# carry untouched
+	sobgtr	%r5,0b		# carry untouched
 
-1:	adwc	$0,r0
+1:	adwc	$0,%r0
 	ret
 
 #
@@ -374,63 +374,63 @@ ENTRY(bn_sub_words,R6)
 #
 
 ENTRY(bn_mul_comba4,R6|R7|R8|R9)
-	movl	$4,r9		# 4*4
+	movl	$4,%r9		# 4*4
 	brb	6f
 
 ENTRY(bn_mul_comba8,R6|R7|R8|R9)
-	movl	$8,r9		# 8*8
+	movl	$8,%r9		# 8*8
 
-6:	movl	8(ap),r3	# a[]
-	movl	12(ap),r7	# b[]
+6:	movl	8(%ap),%r3	# a[]
+	movl	12(%ap),%r7	# b[]
 	brb	5f
 
 ENTRY(bn_sqr_comba4,R6|R7|R8|R9)
-	movl	$4,r9		# 4*4
+	movl	$4,%r9		# 4*4
 	brb 0f
 
 ENTRY(bn_sqr_comba8,R6|R7|R8|R9)
-	movl	$8,r9		# 8*8
+	movl	$8,%r9		# 8*8
 
 0:
-	movl	8(ap),r3	# a[]
-	movl	r3,r7		# a[]
+	movl	8(%ap),%r3	# a[]
+	movl	%r3,%r7		# a[]
 
-5:	movl	4(ap),r5	# r[]
-	movl	r9,r8
+5:	movl	4(%ap),%r5	# r[]
+	movl	%r9,%r8
 
-	clrq	(r5)		# clear destinatino, for add.
-	clrq	8(r5)
-	clrq	16(r5)		# these only needed for comba8
-	clrq	24(r5)
+	clrq	(%r5)		# clear destinatino, for add.
+	clrq	8(%r5)
+	clrq	16(%r5)		# these only needed for comba8
+	clrq	24(%r5)
 
-2:	clrl	r4		# carry
-	movl	r9,r6		# inner loop count
-	movl	(r7)+,r2	# value to multiply with
+2:	clrl	%r4		# carry
+	movl	%r9,%r6		# inner loop count
+	movl	(%r7)+,%r2	# value to multiply with
 
-1:	emul	r2,(r3),r4,r0
-	tstl	r4
+1:	emul	%r2,(%r3),%r4,%r0
+	tstl	%r4
 	bgeq	3f
-	incl	r1
-3:	tstl	r2
+	incl	%r1
+3:	tstl	%r2
 	bgeq	3f
-	addl2	(r3),r1
-3:	tstl	(r3)
+	addl2	(%r3),%r1
+3:	tstl	(%r3)
 	bgeq	3f
-	addl2	r2,r1
+	addl2	%r2,%r1
 
-3:	addl2	r0,(r5)+	# add to destination
-	adwc	$0,r1		# remember carry
-	movl	r1,r4		# add carry in next emul
-	addl2	$4,r3
-	sobgtr	r6,1b
+3:	addl2	%r0,(%r5)+	# add to destination
+	adwc	$0,%r1		# remember carry
+	movl	%r1,%r4		# add carry in next emul
+	addl2	$4,%r3
+	sobgtr	%r6,1b
 
-	movl	r4,(r5)		# save highest add result
+	movl	%r4,(%r5)	# save highest add result
 
-	ashl	$2,r9,r4
-	subl2	r4,r3
-	subl2	$4,r4
-	subl2	r4,r5
+	ashl	$2,%r9,%r4
+	subl2	%r4,%r3
+	subl2	$4,%r4
+	subl2	%r4,%r5
 
-	sobgtr	r8,2b
+	sobgtr	%r8,2b
 
 	ret
-- 
cgit v1.2.3-55-g6feb