diff options
Diffstat (limited to 'src/lib/libcrypto/sha/asm/sha512-ia64.pl')
-rwxr-xr-x | src/lib/libcrypto/sha/asm/sha512-ia64.pl | 672 |
1 files changed, 0 insertions, 672 deletions
diff --git a/src/lib/libcrypto/sha/asm/sha512-ia64.pl b/src/lib/libcrypto/sha/asm/sha512-ia64.pl deleted file mode 100755 index 1c6ce56522..0000000000 --- a/src/lib/libcrypto/sha/asm/sha512-ia64.pl +++ /dev/null | |||
@@ -1,672 +0,0 @@ | |||
1 | #!/usr/bin/env perl | ||
2 | # | ||
3 | # ==================================================================== | ||
4 | # Written by Andy Polyakov <appro@fy.chalmers.se> for the OpenSSL | ||
5 | # project. The module is, however, dual licensed under OpenSSL and | ||
6 | # CRYPTOGAMS licenses depending on where you obtain it. For further | ||
7 | # details see http://www.openssl.org/~appro/cryptogams/. | ||
8 | # ==================================================================== | ||
9 | # | ||
10 | # SHA256/512_Transform for Itanium. | ||
11 | # | ||
12 | # sha512_block runs in 1003 cycles on Itanium 2, which is almost 50% | ||
13 | # faster than gcc and >60%(!) faster than code generated by HP-UX | ||
14 | # compiler (yes, HP-UX is generating slower code, because unlike gcc, | ||
15 | # it failed to deploy "shift right pair," 'shrp' instruction, which | ||
16 | # substitutes for 64-bit rotate). | ||
17 | # | ||
18 | # 924 cycles long sha256_block outperforms gcc by over factor of 2(!) | ||
19 | # and HP-UX compiler - by >40% (yes, gcc won sha512_block, but lost | ||
20 | # this one big time). Note that "formally" 924 is about 100 cycles | ||
21 | # too much. I mean it's 64 32-bit rounds vs. 80 virtually identical | ||
22 | # 64-bit ones and 1003*64/80 gives 802. Extra cycles, 2 per round, | ||
23 | # are spent on extra work to provide for 32-bit rotations. 32-bit | ||
24 | # rotations are still handled by 'shrp' instruction and for this | ||
25 | # reason lower 32 bits are deposited to upper half of 64-bit register | ||
26 | # prior 'shrp' issue. And in order to minimize the amount of such | ||
27 | # operations, X[16] values are *maintained* with copies of lower | ||
28 | # halves in upper halves, which is why you'll spot such instructions | ||
29 | # as custom 'mux2', "parallel 32-bit add," 'padd4' and "parallel | ||
30 | # 32-bit unsigned right shift," 'pshr4.u' instructions here. | ||
31 | # | ||
32 | # Rules of engagement. | ||
33 | # | ||
34 | # There is only one integer shifter meaning that if I have two rotate, | ||
35 | # deposit or extract instructions in adjacent bundles, they shall | ||
36 | # split [at run-time if they have to]. But note that variable and | ||
37 | # parallel shifts are performed by multi-media ALU and *are* pairable | ||
38 | # with rotates [and alike]. On the backside MMALU is rather slow: it | ||
39 | # takes 2 extra cycles before the result of integer operation is | ||
40 | # available *to* MMALU and 2(*) extra cycles before the result of MM | ||
41 | # operation is available "back" *to* integer ALU, not to mention that | ||
42 | # MMALU itself has 2 cycles latency. However! I explicitly scheduled | ||
43 | # these MM instructions to avoid MM stalls, so that all these extra | ||
44 | # latencies get "hidden" in instruction-level parallelism. | ||
45 | # | ||
46 | # (*) 2 cycles on Itanium 1 and 1 cycle on Itanium 2. But I schedule | ||
47 | # for 2 in order to provide for best *overall* performance, | ||
48 | # because on Itanium 1 stall on MM result is accompanied by | ||
49 | # pipeline flush, which takes 6 cycles:-( | ||
50 | # | ||
51 | # Resulting performance numbers for 900MHz Itanium 2 system: | ||
52 | # | ||
53 | # The 'numbers' are in 1000s of bytes per second processed. | ||
54 | # type 16 bytes 64 bytes 256 bytes 1024 bytes 8192 bytes | ||
55 | # sha1(*) 6210.14k 20376.30k 52447.83k 85870.05k 105478.12k | ||
56 | # sha256 7476.45k 20572.05k 41538.34k 56062.29k 62093.18k | ||
57 | # sha512 4996.56k 20026.28k 47597.20k 85278.79k 111501.31k | ||
58 | # | ||
59 | # (*) SHA1 numbers are for HP-UX compiler and are presented purely | ||
60 | # for reference purposes. I bet it can improved too... | ||
61 | # | ||
62 | # To generate code, pass the file name with either 256 or 512 in its | ||
63 | # name and compiler flags. | ||
64 | |||
65 | $output=shift; | ||
66 | |||
67 | if ($output =~ /512.*\.[s|asm]/) { | ||
68 | $SZ=8; | ||
69 | $BITS=8*$SZ; | ||
70 | $LDW="ld8"; | ||
71 | $STW="st8"; | ||
72 | $ADD="add"; | ||
73 | $SHRU="shr.u"; | ||
74 | $TABLE="K512"; | ||
75 | $func="sha512_block_data_order"; | ||
76 | @Sigma0=(28,34,39); | ||
77 | @Sigma1=(14,18,41); | ||
78 | @sigma0=(1, 8, 7); | ||
79 | @sigma1=(19,61, 6); | ||
80 | $rounds=80; | ||
81 | } elsif ($output =~ /256.*\.[s|asm]/) { | ||
82 | $SZ=4; | ||
83 | $BITS=8*$SZ; | ||
84 | $LDW="ld4"; | ||
85 | $STW="st4"; | ||
86 | $ADD="padd4"; | ||
87 | $SHRU="pshr4.u"; | ||
88 | $TABLE="K256"; | ||
89 | $func="sha256_block_data_order"; | ||
90 | @Sigma0=( 2,13,22); | ||
91 | @Sigma1=( 6,11,25); | ||
92 | @sigma0=( 7,18, 3); | ||
93 | @sigma1=(17,19,10); | ||
94 | $rounds=64; | ||
95 | } else { die "nonsense $output"; } | ||
96 | |||
97 | open STDOUT,">$output" || die "can't open $output: $!"; | ||
98 | |||
99 | if ($^O eq "hpux") { | ||
100 | $ADDP="addp4"; | ||
101 | for (@ARGV) { $ADDP="add" if (/[\+DD|\-mlp]64/); } | ||
102 | } else { $ADDP="add"; } | ||
103 | for (@ARGV) { $big_endian=1 if (/\-DB_ENDIAN/); | ||
104 | $big_endian=0 if (/\-DL_ENDIAN/); } | ||
105 | if (!defined($big_endian)) | ||
106 | { $big_endian=(unpack('L',pack('N',1))==1); } | ||
107 | |||
108 | $code=<<___; | ||
109 | .ident \"$output, version 1.1\" | ||
110 | .ident \"IA-64 ISA artwork by Andy Polyakov <appro\@fy.chalmers.se>\" | ||
111 | .explicit | ||
112 | .text | ||
113 | |||
114 | pfssave=r2; | ||
115 | lcsave=r3; | ||
116 | prsave=r14; | ||
117 | K=r15; | ||
118 | A=r16; B=r17; C=r18; D=r19; | ||
119 | E=r20; F=r21; G=r22; H=r23; | ||
120 | T1=r24; T2=r25; | ||
121 | s0=r26; s1=r27; t0=r28; t1=r29; | ||
122 | Ktbl=r30; | ||
123 | ctx=r31; // 1st arg | ||
124 | input=r48; // 2nd arg | ||
125 | num=r49; // 3rd arg | ||
126 | sgm0=r50; sgm1=r51; // small constants | ||
127 | A_=r54; B_=r55; C_=r56; D_=r57; | ||
128 | E_=r58; F_=r59; G_=r60; H_=r61; | ||
129 | |||
130 | // void $func (SHA_CTX *ctx, const void *in,size_t num[,int host]) | ||
131 | .global $func# | ||
132 | .proc $func# | ||
133 | .align 32 | ||
134 | $func: | ||
135 | .prologue | ||
136 | .save ar.pfs,pfssave | ||
137 | { .mmi; alloc pfssave=ar.pfs,3,27,0,16 | ||
138 | $ADDP ctx=0,r32 // 1st arg | ||
139 | .save ar.lc,lcsave | ||
140 | mov lcsave=ar.lc } | ||
141 | { .mmi; $ADDP input=0,r33 // 2nd arg | ||
142 | mov num=r34 // 3rd arg | ||
143 | .save pr,prsave | ||
144 | mov prsave=pr };; | ||
145 | |||
146 | .body | ||
147 | { .mib; add r8=0*$SZ,ctx | ||
148 | add r9=1*$SZ,ctx | ||
149 | brp.loop.imp .L_first16,.L_first16_end-16 } | ||
150 | { .mib; add r10=2*$SZ,ctx | ||
151 | add r11=3*$SZ,ctx | ||
152 | brp.loop.imp .L_rest,.L_rest_end-16 };; | ||
153 | |||
154 | // load A-H | ||
155 | .Lpic_point: | ||
156 | { .mmi; $LDW A_=[r8],4*$SZ | ||
157 | $LDW B_=[r9],4*$SZ | ||
158 | mov Ktbl=ip } | ||
159 | { .mmi; $LDW C_=[r10],4*$SZ | ||
160 | $LDW D_=[r11],4*$SZ | ||
161 | mov sgm0=$sigma0[2] };; | ||
162 | { .mmi; $LDW E_=[r8] | ||
163 | $LDW F_=[r9] | ||
164 | add Ktbl=($TABLE#-.Lpic_point),Ktbl } | ||
165 | { .mmi; $LDW G_=[r10] | ||
166 | $LDW H_=[r11] | ||
167 | cmp.ne p0,p16=0,r0 };; // used in sha256_block | ||
168 | ___ | ||
169 | $code.=<<___ if ($BITS==64); | ||
170 | { .mii; and r8=7,input | ||
171 | and input=~7,input;; | ||
172 | cmp.eq p9,p0=1,r8 } | ||
173 | { .mmi; cmp.eq p10,p0=2,r8 | ||
174 | cmp.eq p11,p0=3,r8 | ||
175 | cmp.eq p12,p0=4,r8 } | ||
176 | { .mmi; cmp.eq p13,p0=5,r8 | ||
177 | cmp.eq p14,p0=6,r8 | ||
178 | cmp.eq p15,p0=7,r8 };; | ||
179 | ___ | ||
180 | $code.=<<___; | ||
181 | .L_outer: | ||
182 | .rotr X[16] | ||
183 | { .mmi; mov A=A_ | ||
184 | mov B=B_ | ||
185 | mov ar.lc=14 } | ||
186 | { .mmi; mov C=C_ | ||
187 | mov D=D_ | ||
188 | mov E=E_ } | ||
189 | { .mmi; mov F=F_ | ||
190 | mov G=G_ | ||
191 | mov ar.ec=2 } | ||
192 | { .mmi; ld1 X[15]=[input],$SZ // eliminated in 64-bit | ||
193 | mov H=H_ | ||
194 | mov sgm1=$sigma1[2] };; | ||
195 | |||
196 | ___ | ||
197 | $t0="t0", $t1="t1", $code.=<<___ if ($BITS==32); | ||
198 | .align 32 | ||
199 | .L_first16: | ||
200 | { .mmi; add r9=1-$SZ,input | ||
201 | add r10=2-$SZ,input | ||
202 | add r11=3-$SZ,input };; | ||
203 | { .mmi; ld1 r9=[r9] | ||
204 | ld1 r10=[r10] | ||
205 | dep.z $t1=E,32,32 } | ||
206 | { .mmi; $LDW K=[Ktbl],$SZ | ||
207 | ld1 r11=[r11] | ||
208 | zxt4 E=E };; | ||
209 | { .mii; or $t1=$t1,E | ||
210 | dep X[15]=X[15],r9,8,8 | ||
211 | dep r11=r10,r11,8,8 };; | ||
212 | { .mmi; and T1=F,E | ||
213 | and T2=A,B | ||
214 | dep X[15]=X[15],r11,16,16 } | ||
215 | { .mmi; andcm r8=G,E | ||
216 | and r9=A,C | ||
217 | mux2 $t0=A,0x44 };; // copy lower half to upper | ||
218 | { .mmi; (p16) ld1 X[15-1]=[input],$SZ // prefetch | ||
219 | xor T1=T1,r8 // T1=((e & f) ^ (~e & g)) | ||
220 | _rotr r11=$t1,$Sigma1[0] } // ROTR(e,14) | ||
221 | { .mib; and r10=B,C | ||
222 | xor T2=T2,r9 };; | ||
223 | ___ | ||
224 | $t0="A", $t1="E", $code.=<<___ if ($BITS==64); | ||
225 | // in 64-bit mode I load whole X[16] at once and take care of alignment... | ||
226 | { .mmi; add r8=1*$SZ,input | ||
227 | add r9=2*$SZ,input | ||
228 | add r10=3*$SZ,input };; | ||
229 | { .mmb; $LDW X[15]=[input],4*$SZ | ||
230 | $LDW X[14]=[r8],4*$SZ | ||
231 | (p9) br.cond.dpnt.many .L1byte };; | ||
232 | { .mmb; $LDW X[13]=[r9],4*$SZ | ||
233 | $LDW X[12]=[r10],4*$SZ | ||
234 | (p10) br.cond.dpnt.many .L2byte };; | ||
235 | { .mmb; $LDW X[11]=[input],4*$SZ | ||
236 | $LDW X[10]=[r8],4*$SZ | ||
237 | (p11) br.cond.dpnt.many .L3byte };; | ||
238 | { .mmb; $LDW X[ 9]=[r9],4*$SZ | ||
239 | $LDW X[ 8]=[r10],4*$SZ | ||
240 | (p12) br.cond.dpnt.many .L4byte };; | ||
241 | { .mmb; $LDW X[ 7]=[input],4*$SZ | ||
242 | $LDW X[ 6]=[r8],4*$SZ | ||
243 | (p13) br.cond.dpnt.many .L5byte };; | ||
244 | { .mmb; $LDW X[ 5]=[r9],4*$SZ | ||
245 | $LDW X[ 4]=[r10],4*$SZ | ||
246 | (p14) br.cond.dpnt.many .L6byte };; | ||
247 | { .mmb; $LDW X[ 3]=[input],4*$SZ | ||
248 | $LDW X[ 2]=[r8],4*$SZ | ||
249 | (p15) br.cond.dpnt.many .L7byte };; | ||
250 | { .mmb; $LDW X[ 1]=[r9],4*$SZ | ||
251 | $LDW X[ 0]=[r10],4*$SZ | ||
252 | br.many .L_first16 };; | ||
253 | .L1byte: | ||
254 | { .mmi; $LDW X[13]=[r9],4*$SZ | ||
255 | $LDW X[12]=[r10],4*$SZ | ||
256 | shrp X[15]=X[15],X[14],56 };; | ||
257 | { .mmi; $LDW X[11]=[input],4*$SZ | ||
258 | $LDW X[10]=[r8],4*$SZ | ||
259 | shrp X[14]=X[14],X[13],56 } | ||
260 | { .mmi; $LDW X[ 9]=[r9],4*$SZ | ||
261 | $LDW X[ 8]=[r10],4*$SZ | ||
262 | shrp X[13]=X[13],X[12],56 };; | ||
263 | { .mmi; $LDW X[ 7]=[input],4*$SZ | ||
264 | $LDW X[ 6]=[r8],4*$SZ | ||
265 | shrp X[12]=X[12],X[11],56 } | ||
266 | { .mmi; $LDW X[ 5]=[r9],4*$SZ | ||
267 | $LDW X[ 4]=[r10],4*$SZ | ||
268 | shrp X[11]=X[11],X[10],56 };; | ||
269 | { .mmi; $LDW X[ 3]=[input],4*$SZ | ||
270 | $LDW X[ 2]=[r8],4*$SZ | ||
271 | shrp X[10]=X[10],X[ 9],56 } | ||
272 | { .mmi; $LDW X[ 1]=[r9],4*$SZ | ||
273 | $LDW X[ 0]=[r10],4*$SZ | ||
274 | shrp X[ 9]=X[ 9],X[ 8],56 };; | ||
275 | { .mii; $LDW T1=[input] | ||
276 | shrp X[ 8]=X[ 8],X[ 7],56 | ||
277 | shrp X[ 7]=X[ 7],X[ 6],56 } | ||
278 | { .mii; shrp X[ 6]=X[ 6],X[ 5],56 | ||
279 | shrp X[ 5]=X[ 5],X[ 4],56 };; | ||
280 | { .mii; shrp X[ 4]=X[ 4],X[ 3],56 | ||
281 | shrp X[ 3]=X[ 3],X[ 2],56 } | ||
282 | { .mii; shrp X[ 2]=X[ 2],X[ 1],56 | ||
283 | shrp X[ 1]=X[ 1],X[ 0],56 } | ||
284 | { .mib; shrp X[ 0]=X[ 0],T1,56 | ||
285 | br.many .L_first16 };; | ||
286 | .L2byte: | ||
287 | { .mmi; $LDW X[11]=[input],4*$SZ | ||
288 | $LDW X[10]=[r8],4*$SZ | ||
289 | shrp X[15]=X[15],X[14],48 } | ||
290 | { .mmi; $LDW X[ 9]=[r9],4*$SZ | ||
291 | $LDW X[ 8]=[r10],4*$SZ | ||
292 | shrp X[14]=X[14],X[13],48 };; | ||
293 | { .mmi; $LDW X[ 7]=[input],4*$SZ | ||
294 | $LDW X[ 6]=[r8],4*$SZ | ||
295 | shrp X[13]=X[13],X[12],48 } | ||
296 | { .mmi; $LDW X[ 5]=[r9],4*$SZ | ||
297 | $LDW X[ 4]=[r10],4*$SZ | ||
298 | shrp X[12]=X[12],X[11],48 };; | ||
299 | { .mmi; $LDW X[ 3]=[input],4*$SZ | ||
300 | $LDW X[ 2]=[r8],4*$SZ | ||
301 | shrp X[11]=X[11],X[10],48 } | ||
302 | { .mmi; $LDW X[ 1]=[r9],4*$SZ | ||
303 | $LDW X[ 0]=[r10],4*$SZ | ||
304 | shrp X[10]=X[10],X[ 9],48 };; | ||
305 | { .mii; $LDW T1=[input] | ||
306 | shrp X[ 9]=X[ 9],X[ 8],48 | ||
307 | shrp X[ 8]=X[ 8],X[ 7],48 } | ||
308 | { .mii; shrp X[ 7]=X[ 7],X[ 6],48 | ||
309 | shrp X[ 6]=X[ 6],X[ 5],48 };; | ||
310 | { .mii; shrp X[ 5]=X[ 5],X[ 4],48 | ||
311 | shrp X[ 4]=X[ 4],X[ 3],48 } | ||
312 | { .mii; shrp X[ 3]=X[ 3],X[ 2],48 | ||
313 | shrp X[ 2]=X[ 2],X[ 1],48 } | ||
314 | { .mii; shrp X[ 1]=X[ 1],X[ 0],48 | ||
315 | shrp X[ 0]=X[ 0],T1,48 } | ||
316 | { .mfb; br.many .L_first16 };; | ||
317 | .L3byte: | ||
318 | { .mmi; $LDW X[ 9]=[r9],4*$SZ | ||
319 | $LDW X[ 8]=[r10],4*$SZ | ||
320 | shrp X[15]=X[15],X[14],40 };; | ||
321 | { .mmi; $LDW X[ 7]=[input],4*$SZ | ||
322 | $LDW X[ 6]=[r8],4*$SZ | ||
323 | shrp X[14]=X[14],X[13],40 } | ||
324 | { .mmi; $LDW X[ 5]=[r9],4*$SZ | ||
325 | $LDW X[ 4]=[r10],4*$SZ | ||
326 | shrp X[13]=X[13],X[12],40 };; | ||
327 | { .mmi; $LDW X[ 3]=[input],4*$SZ | ||
328 | $LDW X[ 2]=[r8],4*$SZ | ||
329 | shrp X[12]=X[12],X[11],40 } | ||
330 | { .mmi; $LDW X[ 1]=[r9],4*$SZ | ||
331 | $LDW X[ 0]=[r10],4*$SZ | ||
332 | shrp X[11]=X[11],X[10],40 };; | ||
333 | { .mii; $LDW T1=[input] | ||
334 | shrp X[10]=X[10],X[ 9],40 | ||
335 | shrp X[ 9]=X[ 9],X[ 8],40 } | ||
336 | { .mii; shrp X[ 8]=X[ 8],X[ 7],40 | ||
337 | shrp X[ 7]=X[ 7],X[ 6],40 };; | ||
338 | { .mii; shrp X[ 6]=X[ 6],X[ 5],40 | ||
339 | shrp X[ 5]=X[ 5],X[ 4],40 } | ||
340 | { .mii; shrp X[ 4]=X[ 4],X[ 3],40 | ||
341 | shrp X[ 3]=X[ 3],X[ 2],40 } | ||
342 | { .mii; shrp X[ 2]=X[ 2],X[ 1],40 | ||
343 | shrp X[ 1]=X[ 1],X[ 0],40 } | ||
344 | { .mib; shrp X[ 0]=X[ 0],T1,40 | ||
345 | br.many .L_first16 };; | ||
346 | .L4byte: | ||
347 | { .mmi; $LDW X[ 7]=[input],4*$SZ | ||
348 | $LDW X[ 6]=[r8],4*$SZ | ||
349 | shrp X[15]=X[15],X[14],32 } | ||
350 | { .mmi; $LDW X[ 5]=[r9],4*$SZ | ||
351 | $LDW X[ 4]=[r10],4*$SZ | ||
352 | shrp X[14]=X[14],X[13],32 };; | ||
353 | { .mmi; $LDW X[ 3]=[input],4*$SZ | ||
354 | $LDW X[ 2]=[r8],4*$SZ | ||
355 | shrp X[13]=X[13],X[12],32 } | ||
356 | { .mmi; $LDW X[ 1]=[r9],4*$SZ | ||
357 | $LDW X[ 0]=[r10],4*$SZ | ||
358 | shrp X[12]=X[12],X[11],32 };; | ||
359 | { .mii; $LDW T1=[input] | ||
360 | shrp X[11]=X[11],X[10],32 | ||
361 | shrp X[10]=X[10],X[ 9],32 } | ||
362 | { .mii; shrp X[ 9]=X[ 9],X[ 8],32 | ||
363 | shrp X[ 8]=X[ 8],X[ 7],32 };; | ||
364 | { .mii; shrp X[ 7]=X[ 7],X[ 6],32 | ||
365 | shrp X[ 6]=X[ 6],X[ 5],32 } | ||
366 | { .mii; shrp X[ 5]=X[ 5],X[ 4],32 | ||
367 | shrp X[ 4]=X[ 4],X[ 3],32 } | ||
368 | { .mii; shrp X[ 3]=X[ 3],X[ 2],32 | ||
369 | shrp X[ 2]=X[ 2],X[ 1],32 } | ||
370 | { .mii; shrp X[ 1]=X[ 1],X[ 0],32 | ||
371 | shrp X[ 0]=X[ 0],T1,32 } | ||
372 | { .mfb; br.many .L_first16 };; | ||
373 | .L5byte: | ||
374 | { .mmi; $LDW X[ 5]=[r9],4*$SZ | ||
375 | $LDW X[ 4]=[r10],4*$SZ | ||
376 | shrp X[15]=X[15],X[14],24 };; | ||
377 | { .mmi; $LDW X[ 3]=[input],4*$SZ | ||
378 | $LDW X[ 2]=[r8],4*$SZ | ||
379 | shrp X[14]=X[14],X[13],24 } | ||
380 | { .mmi; $LDW X[ 1]=[r9],4*$SZ | ||
381 | $LDW X[ 0]=[r10],4*$SZ | ||
382 | shrp X[13]=X[13],X[12],24 };; | ||
383 | { .mii; $LDW T1=[input] | ||
384 | shrp X[12]=X[12],X[11],24 | ||
385 | shrp X[11]=X[11],X[10],24 } | ||
386 | { .mii; shrp X[10]=X[10],X[ 9],24 | ||
387 | shrp X[ 9]=X[ 9],X[ 8],24 };; | ||
388 | { .mii; shrp X[ 8]=X[ 8],X[ 7],24 | ||
389 | shrp X[ 7]=X[ 7],X[ 6],24 } | ||
390 | { .mii; shrp X[ 6]=X[ 6],X[ 5],24 | ||
391 | shrp X[ 5]=X[ 5],X[ 4],24 } | ||
392 | { .mii; shrp X[ 4]=X[ 4],X[ 3],24 | ||
393 | shrp X[ 3]=X[ 3],X[ 2],24 } | ||
394 | { .mii; shrp X[ 2]=X[ 2],X[ 1],24 | ||
395 | shrp X[ 1]=X[ 1],X[ 0],24 } | ||
396 | { .mib; shrp X[ 0]=X[ 0],T1,24 | ||
397 | br.many .L_first16 };; | ||
398 | .L6byte: | ||
399 | { .mmi; $LDW X[ 3]=[input],4*$SZ | ||
400 | $LDW X[ 2]=[r8],4*$SZ | ||
401 | shrp X[15]=X[15],X[14],16 } | ||
402 | { .mmi; $LDW X[ 1]=[r9],4*$SZ | ||
403 | $LDW X[ 0]=[r10],4*$SZ | ||
404 | shrp X[14]=X[14],X[13],16 };; | ||
405 | { .mii; $LDW T1=[input] | ||
406 | shrp X[13]=X[13],X[12],16 | ||
407 | shrp X[12]=X[12],X[11],16 } | ||
408 | { .mii; shrp X[11]=X[11],X[10],16 | ||
409 | shrp X[10]=X[10],X[ 9],16 };; | ||
410 | { .mii; shrp X[ 9]=X[ 9],X[ 8],16 | ||
411 | shrp X[ 8]=X[ 8],X[ 7],16 } | ||
412 | { .mii; shrp X[ 7]=X[ 7],X[ 6],16 | ||
413 | shrp X[ 6]=X[ 6],X[ 5],16 } | ||
414 | { .mii; shrp X[ 5]=X[ 5],X[ 4],16 | ||
415 | shrp X[ 4]=X[ 4],X[ 3],16 } | ||
416 | { .mii; shrp X[ 3]=X[ 3],X[ 2],16 | ||
417 | shrp X[ 2]=X[ 2],X[ 1],16 } | ||
418 | { .mii; shrp X[ 1]=X[ 1],X[ 0],16 | ||
419 | shrp X[ 0]=X[ 0],T1,16 } | ||
420 | { .mfb; br.many .L_first16 };; | ||
421 | .L7byte: | ||
422 | { .mmi; $LDW X[ 1]=[r9],4*$SZ | ||
423 | $LDW X[ 0]=[r10],4*$SZ | ||
424 | shrp X[15]=X[15],X[14],8 };; | ||
425 | { .mii; $LDW T1=[input] | ||
426 | shrp X[14]=X[14],X[13],8 | ||
427 | shrp X[13]=X[13],X[12],8 } | ||
428 | { .mii; shrp X[12]=X[12],X[11],8 | ||
429 | shrp X[11]=X[11],X[10],8 };; | ||
430 | { .mii; shrp X[10]=X[10],X[ 9],8 | ||
431 | shrp X[ 9]=X[ 9],X[ 8],8 } | ||
432 | { .mii; shrp X[ 8]=X[ 8],X[ 7],8 | ||
433 | shrp X[ 7]=X[ 7],X[ 6],8 } | ||
434 | { .mii; shrp X[ 6]=X[ 6],X[ 5],8 | ||
435 | shrp X[ 5]=X[ 5],X[ 4],8 } | ||
436 | { .mii; shrp X[ 4]=X[ 4],X[ 3],8 | ||
437 | shrp X[ 3]=X[ 3],X[ 2],8 } | ||
438 | { .mii; shrp X[ 2]=X[ 2],X[ 1],8 | ||
439 | shrp X[ 1]=X[ 1],X[ 0],8 } | ||
440 | { .mib; shrp X[ 0]=X[ 0],T1,8 | ||
441 | br.many .L_first16 };; | ||
442 | |||
443 | .align 32 | ||
444 | .L_first16: | ||
445 | { .mmi; $LDW K=[Ktbl],$SZ | ||
446 | and T1=F,E | ||
447 | and T2=A,B } | ||
448 | { .mmi; //$LDW X[15]=[input],$SZ // X[i]=*input++ | ||
449 | andcm r8=G,E | ||
450 | and r9=A,C };; | ||
451 | { .mmi; xor T1=T1,r8 //T1=((e & f) ^ (~e & g)) | ||
452 | and r10=B,C | ||
453 | _rotr r11=$t1,$Sigma1[0] } // ROTR(e,14) | ||
454 | { .mmi; xor T2=T2,r9 | ||
455 | mux1 X[15]=X[15],\@rev };; // eliminated in big-endian | ||
456 | ___ | ||
457 | $code.=<<___; | ||
458 | { .mib; add T1=T1,H // T1=Ch(e,f,g)+h | ||
459 | _rotr r8=$t1,$Sigma1[1] } // ROTR(e,18) | ||
460 | { .mib; xor T2=T2,r10 // T2=((a & b) ^ (a & c) ^ (b & c)) | ||
461 | mov H=G };; | ||
462 | { .mib; xor r11=r8,r11 | ||
463 | _rotr r9=$t1,$Sigma1[2] } // ROTR(e,41) | ||
464 | { .mib; mov G=F | ||
465 | mov F=E };; | ||
466 | { .mib; xor r9=r9,r11 // r9=Sigma1(e) | ||
467 | _rotr r10=$t0,$Sigma0[0] } // ROTR(a,28) | ||
468 | { .mib; add T1=T1,K // T1=Ch(e,f,g)+h+K512[i] | ||
469 | mov E=D };; | ||
470 | { .mib; add T1=T1,r9 // T1+=Sigma1(e) | ||
471 | _rotr r11=$t0,$Sigma0[1] } // ROTR(a,34) | ||
472 | { .mib; mov D=C | ||
473 | mov C=B };; | ||
474 | { .mib; add T1=T1,X[15] // T1+=X[i] | ||
475 | _rotr r8=$t0,$Sigma0[2] } // ROTR(a,39) | ||
476 | { .mib; xor r10=r10,r11 | ||
477 | mux2 X[15]=X[15],0x44 };; // eliminated in 64-bit | ||
478 | { .mmi; xor r10=r8,r10 // r10=Sigma0(a) | ||
479 | mov B=A | ||
480 | add A=T1,T2 };; | ||
481 | { .mib; add E=E,T1 | ||
482 | add A=A,r10 // T2=Maj(a,b,c)+Sigma0(a) | ||
483 | br.ctop.sptk .L_first16 };; | ||
484 | .L_first16_end: | ||
485 | |||
486 | { .mii; mov ar.lc=$rounds-17 | ||
487 | mov ar.ec=1 };; | ||
488 | |||
489 | .align 32 | ||
490 | .L_rest: | ||
491 | .rotr X[16] | ||
492 | { .mib; $LDW K=[Ktbl],$SZ | ||
493 | _rotr r8=X[15-1],$sigma0[0] } // ROTR(s0,1) | ||
494 | { .mib; $ADD X[15]=X[15],X[15-9] // X[i&0xF]+=X[(i+9)&0xF] | ||
495 | $SHRU s0=X[15-1],sgm0 };; // s0=X[(i+1)&0xF]>>7 | ||
496 | { .mib; and T1=F,E | ||
497 | _rotr r9=X[15-1],$sigma0[1] } // ROTR(s0,8) | ||
498 | { .mib; andcm r10=G,E | ||
499 | $SHRU s1=X[15-14],sgm1 };; // s1=X[(i+14)&0xF]>>6 | ||
500 | { .mmi; xor T1=T1,r10 // T1=((e & f) ^ (~e & g)) | ||
501 | xor r9=r8,r9 | ||
502 | _rotr r10=X[15-14],$sigma1[0] };;// ROTR(s1,19) | ||
503 | { .mib; and T2=A,B | ||
504 | _rotr r11=X[15-14],$sigma1[1] }// ROTR(s1,61) | ||
505 | { .mib; and r8=A,C };; | ||
506 | ___ | ||
507 | $t0="t0", $t1="t1", $code.=<<___ if ($BITS==32); | ||
508 | // I adhere to mmi; in order to hold Itanium 1 back and avoid 6 cycle | ||
509 | // pipeline flush in last bundle. Note that even on Itanium2 the | ||
510 | // latter stalls for one clock cycle... | ||
511 | { .mmi; xor s0=s0,r9 // s0=sigma0(X[(i+1)&0xF]) | ||
512 | dep.z $t1=E,32,32 } | ||
513 | { .mmi; xor r10=r11,r10 | ||
514 | zxt4 E=E };; | ||
515 | { .mmi; or $t1=$t1,E | ||
516 | xor s1=s1,r10 // s1=sigma1(X[(i+14)&0xF]) | ||
517 | mux2 $t0=A,0x44 };; // copy lower half to upper | ||
518 | { .mmi; xor T2=T2,r8 | ||
519 | _rotr r9=$t1,$Sigma1[0] } // ROTR(e,14) | ||
520 | { .mmi; and r10=B,C | ||
521 | add T1=T1,H // T1=Ch(e,f,g)+h | ||
522 | $ADD X[15]=X[15],s0 };; // X[i&0xF]+=sigma0(X[(i+1)&0xF]) | ||
523 | ___ | ||
524 | $t0="A", $t1="E", $code.=<<___ if ($BITS==64); | ||
525 | { .mib; xor s0=s0,r9 // s0=sigma0(X[(i+1)&0xF]) | ||
526 | _rotr r9=$t1,$Sigma1[0] } // ROTR(e,14) | ||
527 | { .mib; xor r10=r11,r10 | ||
528 | xor T2=T2,r8 };; | ||
529 | { .mib; xor s1=s1,r10 // s1=sigma1(X[(i+14)&0xF]) | ||
530 | add T1=T1,H } | ||
531 | { .mib; and r10=B,C | ||
532 | $ADD X[15]=X[15],s0 };; // X[i&0xF]+=sigma0(X[(i+1)&0xF]) | ||
533 | ___ | ||
534 | $code.=<<___; | ||
535 | { .mmi; xor T2=T2,r10 // T2=((a & b) ^ (a & c) ^ (b & c)) | ||
536 | mov H=G | ||
537 | _rotr r8=$t1,$Sigma1[1] };; // ROTR(e,18) | ||
538 | { .mmi; xor r11=r8,r9 | ||
539 | $ADD X[15]=X[15],s1 // X[i&0xF]+=sigma1(X[(i+14)&0xF]) | ||
540 | _rotr r9=$t1,$Sigma1[2] } // ROTR(e,41) | ||
541 | { .mmi; mov G=F | ||
542 | mov F=E };; | ||
543 | { .mib; xor r9=r9,r11 // r9=Sigma1(e) | ||
544 | _rotr r10=$t0,$Sigma0[0] } // ROTR(a,28) | ||
545 | { .mib; add T1=T1,K // T1=Ch(e,f,g)+h+K512[i] | ||
546 | mov E=D };; | ||
547 | { .mib; add T1=T1,r9 // T1+=Sigma1(e) | ||
548 | _rotr r11=$t0,$Sigma0[1] } // ROTR(a,34) | ||
549 | { .mib; mov D=C | ||
550 | mov C=B };; | ||
551 | { .mmi; add T1=T1,X[15] // T1+=X[i] | ||
552 | xor r10=r10,r11 | ||
553 | _rotr r8=$t0,$Sigma0[2] };; // ROTR(a,39) | ||
554 | { .mmi; xor r10=r8,r10 // r10=Sigma0(a) | ||
555 | mov B=A | ||
556 | add A=T1,T2 };; | ||
557 | { .mib; add E=E,T1 | ||
558 | add A=A,r10 // T2=Maj(a,b,c)+Sigma0(a) | ||
559 | br.ctop.sptk .L_rest };; | ||
560 | .L_rest_end: | ||
561 | |||
562 | { .mmi; add A_=A_,A | ||
563 | add B_=B_,B | ||
564 | add C_=C_,C } | ||
565 | { .mmi; add D_=D_,D | ||
566 | add E_=E_,E | ||
567 | cmp.ltu p16,p0=1,num };; | ||
568 | { .mmi; add F_=F_,F | ||
569 | add G_=G_,G | ||
570 | add H_=H_,H } | ||
571 | { .mmb; add Ktbl=-$SZ*$rounds,Ktbl | ||
572 | (p16) add num=-1,num | ||
573 | (p16) br.dptk.many .L_outer };; | ||
574 | |||
575 | { .mib; add r8=0*$SZ,ctx | ||
576 | add r9=1*$SZ,ctx } | ||
577 | { .mib; add r10=2*$SZ,ctx | ||
578 | add r11=3*$SZ,ctx };; | ||
579 | { .mmi; $STW [r8]=A_,4*$SZ | ||
580 | $STW [r9]=B_,4*$SZ | ||
581 | mov ar.lc=lcsave } | ||
582 | { .mmi; $STW [r10]=C_,4*$SZ | ||
583 | $STW [r11]=D_,4*$SZ | ||
584 | mov pr=prsave,0x1ffff };; | ||
585 | { .mmb; $STW [r8]=E_ | ||
586 | $STW [r9]=F_ } | ||
587 | { .mmb; $STW [r10]=G_ | ||
588 | $STW [r11]=H_ | ||
589 | br.ret.sptk.many b0 };; | ||
590 | .endp $func# | ||
591 | ___ | ||
592 | |||
593 | $code =~ s/\`([^\`]*)\`/eval $1/gem; | ||
594 | $code =~ s/_rotr(\s+)([^=]+)=([^,]+),([0-9]+)/shrp$1$2=$3,$3,$4/gm; | ||
595 | if ($BITS==64) { | ||
596 | $code =~ s/mux2(\s+)\S+/nop.i$1 0x0/gm; | ||
597 | $code =~ s/mux1(\s+)\S+/nop.i$1 0x0/gm if ($big_endian); | ||
598 | $code =~ s/(shrp\s+X\[[^=]+)=([^,]+),([^,]+),([1-9]+)/$1=$3,$2,64-$4/gm | ||
599 | if (!$big_endian); | ||
600 | $code =~ s/ld1(\s+)X\[\S+/nop.m$1 0x0/gm; | ||
601 | } | ||
602 | |||
603 | print $code; | ||
604 | |||
605 | print<<___ if ($BITS==32); | ||
606 | .align 64 | ||
607 | .type K256#,\@object | ||
608 | K256: data4 0x428a2f98,0x71374491,0xb5c0fbcf,0xe9b5dba5 | ||
609 | data4 0x3956c25b,0x59f111f1,0x923f82a4,0xab1c5ed5 | ||
610 | data4 0xd807aa98,0x12835b01,0x243185be,0x550c7dc3 | ||
611 | data4 0x72be5d74,0x80deb1fe,0x9bdc06a7,0xc19bf174 | ||
612 | data4 0xe49b69c1,0xefbe4786,0x0fc19dc6,0x240ca1cc | ||
613 | data4 0x2de92c6f,0x4a7484aa,0x5cb0a9dc,0x76f988da | ||
614 | data4 0x983e5152,0xa831c66d,0xb00327c8,0xbf597fc7 | ||
615 | data4 0xc6e00bf3,0xd5a79147,0x06ca6351,0x14292967 | ||
616 | data4 0x27b70a85,0x2e1b2138,0x4d2c6dfc,0x53380d13 | ||
617 | data4 0x650a7354,0x766a0abb,0x81c2c92e,0x92722c85 | ||
618 | data4 0xa2bfe8a1,0xa81a664b,0xc24b8b70,0xc76c51a3 | ||
619 | data4 0xd192e819,0xd6990624,0xf40e3585,0x106aa070 | ||
620 | data4 0x19a4c116,0x1e376c08,0x2748774c,0x34b0bcb5 | ||
621 | data4 0x391c0cb3,0x4ed8aa4a,0x5b9cca4f,0x682e6ff3 | ||
622 | data4 0x748f82ee,0x78a5636f,0x84c87814,0x8cc70208 | ||
623 | data4 0x90befffa,0xa4506ceb,0xbef9a3f7,0xc67178f2 | ||
624 | .size K256#,$SZ*$rounds | ||
625 | stringz "SHA256 block transform for IA64, CRYPTOGAMS by <appro\@openssl.org>" | ||
626 | ___ | ||
627 | print<<___ if ($BITS==64); | ||
628 | .align 64 | ||
629 | .type K512#,\@object | ||
630 | K512: data8 0x428a2f98d728ae22,0x7137449123ef65cd | ||
631 | data8 0xb5c0fbcfec4d3b2f,0xe9b5dba58189dbbc | ||
632 | data8 0x3956c25bf348b538,0x59f111f1b605d019 | ||
633 | data8 0x923f82a4af194f9b,0xab1c5ed5da6d8118 | ||
634 | data8 0xd807aa98a3030242,0x12835b0145706fbe | ||
635 | data8 0x243185be4ee4b28c,0x550c7dc3d5ffb4e2 | ||
636 | data8 0x72be5d74f27b896f,0x80deb1fe3b1696b1 | ||
637 | data8 0x9bdc06a725c71235,0xc19bf174cf692694 | ||
638 | data8 0xe49b69c19ef14ad2,0xefbe4786384f25e3 | ||
639 | data8 0x0fc19dc68b8cd5b5,0x240ca1cc77ac9c65 | ||
640 | data8 0x2de92c6f592b0275,0x4a7484aa6ea6e483 | ||
641 | data8 0x5cb0a9dcbd41fbd4,0x76f988da831153b5 | ||
642 | data8 0x983e5152ee66dfab,0xa831c66d2db43210 | ||
643 | data8 0xb00327c898fb213f,0xbf597fc7beef0ee4 | ||
644 | data8 0xc6e00bf33da88fc2,0xd5a79147930aa725 | ||
645 | data8 0x06ca6351e003826f,0x142929670a0e6e70 | ||
646 | data8 0x27b70a8546d22ffc,0x2e1b21385c26c926 | ||
647 | data8 0x4d2c6dfc5ac42aed,0x53380d139d95b3df | ||
648 | data8 0x650a73548baf63de,0x766a0abb3c77b2a8 | ||
649 | data8 0x81c2c92e47edaee6,0x92722c851482353b | ||
650 | data8 0xa2bfe8a14cf10364,0xa81a664bbc423001 | ||
651 | data8 0xc24b8b70d0f89791,0xc76c51a30654be30 | ||
652 | data8 0xd192e819d6ef5218,0xd69906245565a910 | ||
653 | data8 0xf40e35855771202a,0x106aa07032bbd1b8 | ||
654 | data8 0x19a4c116b8d2d0c8,0x1e376c085141ab53 | ||
655 | data8 0x2748774cdf8eeb99,0x34b0bcb5e19b48a8 | ||
656 | data8 0x391c0cb3c5c95a63,0x4ed8aa4ae3418acb | ||
657 | data8 0x5b9cca4f7763e373,0x682e6ff3d6b2b8a3 | ||
658 | data8 0x748f82ee5defb2fc,0x78a5636f43172f60 | ||
659 | data8 0x84c87814a1f0ab72,0x8cc702081a6439ec | ||
660 | data8 0x90befffa23631e28,0xa4506cebde82bde9 | ||
661 | data8 0xbef9a3f7b2c67915,0xc67178f2e372532b | ||
662 | data8 0xca273eceea26619c,0xd186b8c721c0c207 | ||
663 | data8 0xeada7dd6cde0eb1e,0xf57d4f7fee6ed178 | ||
664 | data8 0x06f067aa72176fba,0x0a637dc5a2c898a6 | ||
665 | data8 0x113f9804bef90dae,0x1b710b35131c471b | ||
666 | data8 0x28db77f523047d84,0x32caab7b40c72493 | ||
667 | data8 0x3c9ebe0a15c9bebc,0x431d67c49c100d4c | ||
668 | data8 0x4cc5d4becb3e42b6,0x597f299cfc657e2a | ||
669 | data8 0x5fcb6fab3ad6faec,0x6c44198c4a475817 | ||
670 | .size K512#,$SZ*$rounds | ||
671 | stringz "SHA512 block transform for IA64, CRYPTOGAMS by <appro\@openssl.org>" | ||
672 | ___ | ||