From e82f18fab47b698d93971f576f962a3068132912 Mon Sep 17 00:00:00 2001 From: cvs2svn Date: Mon, 5 Oct 1998 20:13:17 +0000 Subject: This commit was manufactured by cvs2git to create tag 'SSLeay_0_9_0b'. --- src/lib/libcrypto/rc4/asm/r4-win32.asm | 314 +++++++++++++++++++++++++++++ src/lib/libcrypto/rc4/asm/rx86unix.cpp | 358 +++++++++++++++++++++++++++++++++ 2 files changed, 672 insertions(+) create mode 100644 src/lib/libcrypto/rc4/asm/r4-win32.asm create mode 100644 src/lib/libcrypto/rc4/asm/rx86unix.cpp (limited to 'src/lib/libcrypto/rc4/asm') diff --git a/src/lib/libcrypto/rc4/asm/r4-win32.asm b/src/lib/libcrypto/rc4/asm/r4-win32.asm new file mode 100644 index 0000000000..70b0f7484c --- /dev/null +++ b/src/lib/libcrypto/rc4/asm/r4-win32.asm @@ -0,0 +1,314 @@ + ; Don't even think of reading this code + ; It was automatically generated by rc4-586.pl + ; Which is a perl program used to generate the x86 assember for + ; any of elf, a.out, BSDI,Win32, or Solaris + ; eric + ; + TITLE rc4-586.asm + .386 +.model FLAT +_TEXT SEGMENT +PUBLIC _RC4 + +_RC4 PROC NEAR + ; + push ebp + push ebx + mov ebp, DWORD PTR 12[esp] + mov ebx, DWORD PTR 16[esp] + push esi + push edi + mov ecx, DWORD PTR [ebp] + mov edx, DWORD PTR 4[ebp] + mov esi, DWORD PTR 28[esp] + inc ecx + sub esp, 12 + add ebp, 8 + and ecx, 255 + lea ebx, DWORD PTR [esi+ebx-8] + mov edi, DWORD PTR 44[esp] + mov DWORD PTR 8[esp],ebx + mov eax, DWORD PTR [ecx*4+ebp] + cmp ebx, esi + jl $L000end +L001start: + add esi, 8 + ; Round 0 + add edx, eax + and edx, 255 + inc ecx + mov ebx, DWORD PTR [edx*4+ebp] + mov DWORD PTR [ecx*4+ebp-4],ebx + add ebx, eax + and ecx, 255 + and ebx, 255 + mov DWORD PTR [edx*4+ebp],eax + nop + mov ebx, DWORD PTR [ebx*4+ebp] + mov eax, DWORD PTR [ecx*4+ebp] + mov BYTE PTR [esp], bl + ; Round 1 + add edx, eax + and edx, 255 + inc ecx + mov ebx, DWORD PTR [edx*4+ebp] + mov DWORD PTR [ecx*4+ebp-4],ebx + add ebx, eax + and ecx, 255 + and ebx, 255 + mov DWORD PTR [edx*4+ebp],eax + nop + mov ebx, DWORD PTR [ebx*4+ebp] + mov eax, DWORD PTR [ecx*4+ebp] + mov BYTE PTR 1[esp],bl + ; Round 2 + add edx, eax + and edx, 255 + inc ecx + mov ebx, DWORD PTR [edx*4+ebp] + mov DWORD PTR [ecx*4+ebp-4],ebx + add ebx, eax + and ecx, 255 + and ebx, 255 + mov DWORD PTR [edx*4+ebp],eax + nop + mov ebx, DWORD PTR [ebx*4+ebp] + mov eax, DWORD PTR [ecx*4+ebp] + mov BYTE PTR 2[esp],bl + ; Round 3 + add edx, eax + and edx, 255 + inc ecx + mov ebx, DWORD PTR [edx*4+ebp] + mov DWORD PTR [ecx*4+ebp-4],ebx + add ebx, eax + and ecx, 255 + and ebx, 255 + mov DWORD PTR [edx*4+ebp],eax + nop + mov ebx, DWORD PTR [ebx*4+ebp] + mov eax, DWORD PTR [ecx*4+ebp] + mov BYTE PTR 3[esp],bl + ; Round 4 + add edx, eax + and edx, 255 + inc ecx + mov ebx, DWORD PTR [edx*4+ebp] + mov DWORD PTR [ecx*4+ebp-4],ebx + add ebx, eax + and ecx, 255 + and ebx, 255 + mov DWORD PTR [edx*4+ebp],eax + nop + mov ebx, DWORD PTR [ebx*4+ebp] + mov eax, DWORD PTR [ecx*4+ebp] + mov BYTE PTR 4[esp],bl + ; Round 5 + add edx, eax + and edx, 255 + inc ecx + mov ebx, DWORD PTR [edx*4+ebp] + mov DWORD PTR [ecx*4+ebp-4],ebx + add ebx, eax + and ecx, 255 + and ebx, 255 + mov DWORD PTR [edx*4+ebp],eax + nop + mov ebx, DWORD PTR [ebx*4+ebp] + mov eax, DWORD PTR [ecx*4+ebp] + mov BYTE PTR 5[esp],bl + ; Round 6 + add edx, eax + and edx, 255 + inc ecx + mov ebx, DWORD PTR [edx*4+ebp] + mov DWORD PTR [ecx*4+ebp-4],ebx + add ebx, eax + and ecx, 255 + and ebx, 255 + mov DWORD PTR [edx*4+ebp],eax + nop + mov ebx, DWORD PTR [ebx*4+ebp] + mov eax, DWORD PTR [ecx*4+ebp] + mov BYTE PTR 6[esp],bl + ; Round 7 + add edx, eax + and edx, 255 + inc ecx + mov ebx, DWORD PTR [edx*4+ebp] + mov DWORD PTR [ecx*4+ebp-4],ebx + add ebx, eax + and ecx, 255 + and ebx, 255 + mov DWORD PTR [edx*4+ebp],eax + nop + mov ebx, DWORD PTR [ebx*4+ebp] + add edi, 8 + mov BYTE PTR 7[esp],bl + ; apply the cipher text + mov eax, DWORD PTR [esp] + mov ebx, DWORD PTR [esi-8] + xor eax, ebx + mov ebx, DWORD PTR [esi-4] + mov DWORD PTR [edi-8],eax + mov eax, DWORD PTR 4[esp] + xor eax, ebx + mov ebx, DWORD PTR 8[esp] + mov DWORD PTR [edi-4],eax + mov eax, DWORD PTR [ecx*4+ebp] + cmp esi, ebx + jle L001start +$L000end: + ; Round 0 + add ebx, 8 + inc esi + cmp ebx, esi + jl $L002finished + mov DWORD PTR 8[esp],ebx + add edx, eax + and edx, 255 + inc ecx + mov ebx, DWORD PTR [edx*4+ebp] + mov DWORD PTR [ecx*4+ebp-4],ebx + add ebx, eax + and ecx, 255 + and ebx, 255 + mov DWORD PTR [edx*4+ebp],eax + nop + mov ebx, DWORD PTR [ebx*4+ebp] + mov eax, DWORD PTR [ecx*4+ebp] + mov bh, BYTE PTR [esi-1] + xor bl, bh + mov BYTE PTR [edi], bl + ; Round 1 + mov ebx, DWORD PTR 8[esp] + cmp ebx, esi + jle $L002finished + inc esi + add edx, eax + and edx, 255 + inc ecx + mov ebx, DWORD PTR [edx*4+ebp] + mov DWORD PTR [ecx*4+ebp-4],ebx + add ebx, eax + and ecx, 255 + and ebx, 255 + mov DWORD PTR [edx*4+ebp],eax + nop + mov ebx, DWORD PTR [ebx*4+ebp] + mov eax, DWORD PTR [ecx*4+ebp] + mov bh, BYTE PTR [esi-1] + xor bl, bh + mov BYTE PTR 1[edi],bl + ; Round 2 + mov ebx, DWORD PTR 8[esp] + cmp ebx, esi + jle $L002finished + inc esi + add edx, eax + and edx, 255 + inc ecx + mov ebx, DWORD PTR [edx*4+ebp] + mov DWORD PTR [ecx*4+ebp-4],ebx + add ebx, eax + and ecx, 255 + and ebx, 255 + mov DWORD PTR [edx*4+ebp],eax + nop + mov ebx, DWORD PTR [ebx*4+ebp] + mov eax, DWORD PTR [ecx*4+ebp] + mov bh, BYTE PTR [esi-1] + xor bl, bh + mov BYTE PTR 2[edi],bl + ; Round 3 + mov ebx, DWORD PTR 8[esp] + cmp ebx, esi + jle $L002finished + inc esi + add edx, eax + and edx, 255 + inc ecx + mov ebx, DWORD PTR [edx*4+ebp] + mov DWORD PTR [ecx*4+ebp-4],ebx + add ebx, eax + and ecx, 255 + and ebx, 255 + mov DWORD PTR [edx*4+ebp],eax + nop + mov ebx, DWORD PTR [ebx*4+ebp] + mov eax, DWORD PTR [ecx*4+ebp] + mov bh, BYTE PTR [esi-1] + xor bl, bh + mov BYTE PTR 3[edi],bl + ; Round 4 + mov ebx, DWORD PTR 8[esp] + cmp ebx, esi + jle $L002finished + inc esi + add edx, eax + and edx, 255 + inc ecx + mov ebx, DWORD PTR [edx*4+ebp] + mov DWORD PTR [ecx*4+ebp-4],ebx + add ebx, eax + and ecx, 255 + and ebx, 255 + mov DWORD PTR [edx*4+ebp],eax + nop + mov ebx, DWORD PTR [ebx*4+ebp] + mov eax, DWORD PTR [ecx*4+ebp] + mov bh, BYTE PTR [esi-1] + xor bl, bh + mov BYTE PTR 4[edi],bl + ; Round 5 + mov ebx, DWORD PTR 8[esp] + cmp ebx, esi + jle $L002finished + inc esi + add edx, eax + and edx, 255 + inc ecx + mov ebx, DWORD PTR [edx*4+ebp] + mov DWORD PTR [ecx*4+ebp-4],ebx + add ebx, eax + and ecx, 255 + and ebx, 255 + mov DWORD PTR [edx*4+ebp],eax + nop + mov ebx, DWORD PTR [ebx*4+ebp] + mov eax, DWORD PTR [ecx*4+ebp] + mov bh, BYTE PTR [esi-1] + xor bl, bh + mov BYTE PTR 5[edi],bl + ; Round 6 + mov ebx, DWORD PTR 8[esp] + cmp ebx, esi + jle $L002finished + inc esi + add edx, eax + and edx, 255 + inc ecx + mov ebx, DWORD PTR [edx*4+ebp] + mov DWORD PTR [ecx*4+ebp-4],ebx + add ebx, eax + and ecx, 255 + and ebx, 255 + mov DWORD PTR [edx*4+ebp],eax + nop + mov ebx, DWORD PTR [ebx*4+ebp] + mov bh, BYTE PTR [esi-1] + xor bl, bh + mov BYTE PTR 6[edi],bl +$L002finished: + dec ecx + add esp, 12 + mov DWORD PTR [ebp-4],edx + mov BYTE PTR [ebp-8],cl + pop edi + pop esi + pop ebx + pop ebp + ret +_RC4 ENDP +_TEXT ENDS +END diff --git a/src/lib/libcrypto/rc4/asm/rx86unix.cpp b/src/lib/libcrypto/rc4/asm/rx86unix.cpp new file mode 100644 index 0000000000..ec1d72a110 --- /dev/null +++ b/src/lib/libcrypto/rc4/asm/rx86unix.cpp @@ -0,0 +1,358 @@ +/* Run the C pre-processor over this file with one of the following defined + * ELF - elf object files, + * OUT - a.out object files, + * BSDI - BSDI style a.out object files + * SOL - Solaris style elf + */ + +#define TYPE(a,b) .type a,b +#define SIZE(a,b) .size a,b + +#if defined(OUT) || defined(BSDI) +#define RC4 _RC4 + +#endif + +#ifdef OUT +#define OK 1 +#define ALIGN 4 +#endif + +#ifdef BSDI +#define OK 1 +#define ALIGN 4 +#undef SIZE +#undef TYPE +#define SIZE(a,b) +#define TYPE(a,b) +#endif + +#if defined(ELF) || defined(SOL) +#define OK 1 +#define ALIGN 16 +#endif + +#ifndef OK +You need to define one of +ELF - elf systems - linux-elf, NetBSD and DG-UX +OUT - a.out systems - linux-a.out and FreeBSD +SOL - solaris systems, which are elf with strange comment lines +BSDI - a.out with a very primative version of as. +#endif + +/* Let the Assembler begin :-) */ + /* Don't even think of reading this code */ + /* It was automatically generated by rc4-586.pl */ + /* Which is a perl program used to generate the x86 assember for */ + /* any of elf, a.out, BSDI,Win32, or Solaris */ + /* eric */ + + .file "rc4-586.s" + .version "01.01" +gcc2_compiled.: +.text + .align ALIGN +.globl RC4 + TYPE(RC4,@function) +RC4: + + pushl %ebp + pushl %ebx + movl 12(%esp), %ebp + movl 16(%esp), %ebx + pushl %esi + pushl %edi + movl (%ebp), %ecx + movl 4(%ebp), %edx + movl 28(%esp), %esi + incl %ecx + subl $12, %esp + addl $8, %ebp + andl $255, %ecx + leal -8(%ebx,%esi,), %ebx + movl 44(%esp), %edi + movl %ebx, 8(%esp) + movl (%ebp,%ecx,4), %eax + cmpl %esi, %ebx + jl .L000end +.L001start: + addl $8, %esi + /* Round 0 */ + addl %eax, %edx + andl $255, %edx + incl %ecx + movl (%ebp,%edx,4), %ebx + movl %ebx, -4(%ebp,%ecx,4) + addl %eax, %ebx + andl $255, %ecx + andl $255, %ebx + movl %eax, (%ebp,%edx,4) + nop + movl (%ebp,%ebx,4), %ebx + movl (%ebp,%ecx,4), %eax + movb %bl, (%esp) + /* Round 1 */ + addl %eax, %edx + andl $255, %edx + incl %ecx + movl (%ebp,%edx,4), %ebx + movl %ebx, -4(%ebp,%ecx,4) + addl %eax, %ebx + andl $255, %ecx + andl $255, %ebx + movl %eax, (%ebp,%edx,4) + nop + movl (%ebp,%ebx,4), %ebx + movl (%ebp,%ecx,4), %eax + movb %bl, 1(%esp) + /* Round 2 */ + addl %eax, %edx + andl $255, %edx + incl %ecx + movl (%ebp,%edx,4), %ebx + movl %ebx, -4(%ebp,%ecx,4) + addl %eax, %ebx + andl $255, %ecx + andl $255, %ebx + movl %eax, (%ebp,%edx,4) + nop + movl (%ebp,%ebx,4), %ebx + movl (%ebp,%ecx,4), %eax + movb %bl, 2(%esp) + /* Round 3 */ + addl %eax, %edx + andl $255, %edx + incl %ecx + movl (%ebp,%edx,4), %ebx + movl %ebx, -4(%ebp,%ecx,4) + addl %eax, %ebx + andl $255, %ecx + andl $255, %ebx + movl %eax, (%ebp,%edx,4) + nop + movl (%ebp,%ebx,4), %ebx + movl (%ebp,%ecx,4), %eax + movb %bl, 3(%esp) + /* Round 4 */ + addl %eax, %edx + andl $255, %edx + incl %ecx + movl (%ebp,%edx,4), %ebx + movl %ebx, -4(%ebp,%ecx,4) + addl %eax, %ebx + andl $255, %ecx + andl $255, %ebx + movl %eax, (%ebp,%edx,4) + nop + movl (%ebp,%ebx,4), %ebx + movl (%ebp,%ecx,4), %eax + movb %bl, 4(%esp) + /* Round 5 */ + addl %eax, %edx + andl $255, %edx + incl %ecx + movl (%ebp,%edx,4), %ebx + movl %ebx, -4(%ebp,%ecx,4) + addl %eax, %ebx + andl $255, %ecx + andl $255, %ebx + movl %eax, (%ebp,%edx,4) + nop + movl (%ebp,%ebx,4), %ebx + movl (%ebp,%ecx,4), %eax + movb %bl, 5(%esp) + /* Round 6 */ + addl %eax, %edx + andl $255, %edx + incl %ecx + movl (%ebp,%edx,4), %ebx + movl %ebx, -4(%ebp,%ecx,4) + addl %eax, %ebx + andl $255, %ecx + andl $255, %ebx + movl %eax, (%ebp,%edx,4) + nop + movl (%ebp,%ebx,4), %ebx + movl (%ebp,%ecx,4), %eax + movb %bl, 6(%esp) + /* Round 7 */ + addl %eax, %edx + andl $255, %edx + incl %ecx + movl (%ebp,%edx,4), %ebx + movl %ebx, -4(%ebp,%ecx,4) + addl %eax, %ebx + andl $255, %ecx + andl $255, %ebx + movl %eax, (%ebp,%edx,4) + nop + movl (%ebp,%ebx,4), %ebx + addl $8, %edi + movb %bl, 7(%esp) + /* apply the cipher text */ + movl (%esp), %eax + movl -8(%esi), %ebx + xorl %ebx, %eax + movl -4(%esi), %ebx + movl %eax, -8(%edi) + movl 4(%esp), %eax + xorl %ebx, %eax + movl 8(%esp), %ebx + movl %eax, -4(%edi) + movl (%ebp,%ecx,4), %eax + cmpl %ebx, %esi + jle .L001start +.L000end: + /* Round 0 */ + addl $8, %ebx + incl %esi + cmpl %esi, %ebx + jl .L002finished + movl %ebx, 8(%esp) + addl %eax, %edx + andl $255, %edx + incl %ecx + movl (%ebp,%edx,4), %ebx + movl %ebx, -4(%ebp,%ecx,4) + addl %eax, %ebx + andl $255, %ecx + andl $255, %ebx + movl %eax, (%ebp,%edx,4) + nop + movl (%ebp,%ebx,4), %ebx + movl (%ebp,%ecx,4), %eax + movb -1(%esi), %bh + xorb %bh, %bl + movb %bl, (%edi) + /* Round 1 */ + movl 8(%esp), %ebx + cmpl %esi, %ebx + jle .L002finished + incl %esi + addl %eax, %edx + andl $255, %edx + incl %ecx + movl (%ebp,%edx,4), %ebx + movl %ebx, -4(%ebp,%ecx,4) + addl %eax, %ebx + andl $255, %ecx + andl $255, %ebx + movl %eax, (%ebp,%edx,4) + nop + movl (%ebp,%ebx,4), %ebx + movl (%ebp,%ecx,4), %eax + movb -1(%esi), %bh + xorb %bh, %bl + movb %bl, 1(%edi) + /* Round 2 */ + movl 8(%esp), %ebx + cmpl %esi, %ebx + jle .L002finished + incl %esi + addl %eax, %edx + andl $255, %edx + incl %ecx + movl (%ebp,%edx,4), %ebx + movl %ebx, -4(%ebp,%ecx,4) + addl %eax, %ebx + andl $255, %ecx + andl $255, %ebx + movl %eax, (%ebp,%edx,4) + nop + movl (%ebp,%ebx,4), %ebx + movl (%ebp,%ecx,4), %eax + movb -1(%esi), %bh + xorb %bh, %bl + movb %bl, 2(%edi) + /* Round 3 */ + movl 8(%esp), %ebx + cmpl %esi, %ebx + jle .L002finished + incl %esi + addl %eax, %edx + andl $255, %edx + incl %ecx + movl (%ebp,%edx,4), %ebx + movl %ebx, -4(%ebp,%ecx,4) + addl %eax, %ebx + andl $255, %ecx + andl $255, %ebx + movl %eax, (%ebp,%edx,4) + nop + movl (%ebp,%ebx,4), %ebx + movl (%ebp,%ecx,4), %eax + movb -1(%esi), %bh + xorb %bh, %bl + movb %bl, 3(%edi) + /* Round 4 */ + movl 8(%esp), %ebx + cmpl %esi, %ebx + jle .L002finished + incl %esi + addl %eax, %edx + andl $255, %edx + incl %ecx + movl (%ebp,%edx,4), %ebx + movl %ebx, -4(%ebp,%ecx,4) + addl %eax, %ebx + andl $255, %ecx + andl $255, %ebx + movl %eax, (%ebp,%edx,4) + nop + movl (%ebp,%ebx,4), %ebx + movl (%ebp,%ecx,4), %eax + movb -1(%esi), %bh + xorb %bh, %bl + movb %bl, 4(%edi) + /* Round 5 */ + movl 8(%esp), %ebx + cmpl %esi, %ebx + jle .L002finished + incl %esi + addl %eax, %edx + andl $255, %edx + incl %ecx + movl (%ebp,%edx,4), %ebx + movl %ebx, -4(%ebp,%ecx,4) + addl %eax, %ebx + andl $255, %ecx + andl $255, %ebx + movl %eax, (%ebp,%edx,4) + nop + movl (%ebp,%ebx,4), %ebx + movl (%ebp,%ecx,4), %eax + movb -1(%esi), %bh + xorb %bh, %bl + movb %bl, 5(%edi) + /* Round 6 */ + movl 8(%esp), %ebx + cmpl %esi, %ebx + jle .L002finished + incl %esi + addl %eax, %edx + andl $255, %edx + incl %ecx + movl (%ebp,%edx,4), %ebx + movl %ebx, -4(%ebp,%ecx,4) + addl %eax, %ebx + andl $255, %ecx + andl $255, %ebx + movl %eax, (%ebp,%edx,4) + nop + movl (%ebp,%ebx,4), %ebx + movb -1(%esi), %bh + xorb %bh, %bl + movb %bl, 6(%edi) +.L002finished: + decl %ecx + addl $12, %esp + movl %edx, -4(%ebp) + movb %cl, -8(%ebp) + popl %edi + popl %esi + popl %ebx + popl %ebp + ret +.RC4_end: + SIZE(RC4,.RC4_end-RC4) +.ident "RC4" -- cgit v1.2.3-55-g6feb