summaryrefslogtreecommitdiff
path: root/src/lib/libcrypto/aes/asm/aes-s390x.pl
diff options
context:
space:
mode:
Diffstat (limited to 'src/lib/libcrypto/aes/asm/aes-s390x.pl')
-rw-r--r--src/lib/libcrypto/aes/asm/aes-s390x.pl95
1 files changed, 39 insertions, 56 deletions
diff --git a/src/lib/libcrypto/aes/asm/aes-s390x.pl b/src/lib/libcrypto/aes/asm/aes-s390x.pl
index 445a1e6762..e75dcd0315 100644
--- a/src/lib/libcrypto/aes/asm/aes-s390x.pl
+++ b/src/lib/libcrypto/aes/asm/aes-s390x.pl
@@ -1598,11 +1598,11 @@ $code.=<<___ if(1);
1598 lghi $s1,0x7f 1598 lghi $s1,0x7f
1599 nr $s1,%r0 1599 nr $s1,%r0
1600 lghi %r0,0 # query capability vector 1600 lghi %r0,0 # query capability vector
1601 la %r1,2*$SIZE_T($sp) 1601 la %r1,$tweak-16($sp)
1602 .long 0xb92e0042 # km %r4,%r2 1602 .long 0xb92e0042 # km %r4,%r2
1603 llihh %r1,0x8000 1603 llihh %r1,0x8000
1604 srlg %r1,%r1,32($s1) # check for 32+function code 1604 srlg %r1,%r1,32($s1) # check for 32+function code
1605 ng %r1,2*$SIZE_T($sp) 1605 ng %r1,$tweak-16($sp)
1606 lgr %r0,$s0 # restore the function code 1606 lgr %r0,$s0 # restore the function code
1607 la %r1,0($key1) # restore $key1 1607 la %r1,0($key1) # restore $key1
1608 jz .Lxts_km_vanilla 1608 jz .Lxts_km_vanilla
@@ -1628,7 +1628,7 @@ $code.=<<___ if(1);
1628 1628
1629 lrvg $s0,$tweak+0($sp) # load the last tweak 1629 lrvg $s0,$tweak+0($sp) # load the last tweak
1630 lrvg $s1,$tweak+8($sp) 1630 lrvg $s1,$tweak+8($sp)
1631 stmg %r0,%r3,$tweak-32(%r1) # wipe copy of the key 1631 stmg %r0,%r3,$tweak-32($sp) # wipe copy of the key
1632 1632
1633 nill %r0,0xffdf # switch back to original function code 1633 nill %r0,0xffdf # switch back to original function code
1634 la %r1,0($key1) # restore pointer to $key1 1634 la %r1,0($key1) # restore pointer to $key1
@@ -1684,11 +1684,9 @@ $code.=<<___;
1684 lghi $i1,0x87 1684 lghi $i1,0x87
1685 srag $i2,$s1,63 # broadcast upper bit 1685 srag $i2,$s1,63 # broadcast upper bit
1686 ngr $i1,$i2 # rem 1686 ngr $i1,$i2 # rem
1687 srlg $i2,$s0,63 # carry bit from lower half 1687 algr $s0,$s0
1688 sllg $s0,$s0,1 1688 alcgr $s1,$s1
1689 sllg $s1,$s1,1
1690 xgr $s0,$i1 1689 xgr $s0,$i1
1691 ogr $s1,$i2
1692.Lxts_km_start: 1690.Lxts_km_start:
1693 lrvgr $i1,$s0 # flip byte order 1691 lrvgr $i1,$s0 # flip byte order
1694 lrvgr $i2,$s1 1692 lrvgr $i2,$s1
@@ -1745,11 +1743,9 @@ $code.=<<___;
1745 lghi $i1,0x87 1743 lghi $i1,0x87
1746 srag $i2,$s1,63 # broadcast upper bit 1744 srag $i2,$s1,63 # broadcast upper bit
1747 ngr $i1,$i2 # rem 1745 ngr $i1,$i2 # rem
1748 srlg $i2,$s0,63 # carry bit from lower half 1746 algr $s0,$s0
1749 sllg $s0,$s0,1 1747 alcgr $s1,$s1
1750 sllg $s1,$s1,1
1751 xgr $s0,$i1 1748 xgr $s0,$i1
1752 ogr $s1,$i2
1753 1749
1754 ltr $len,$len # clear zero flag 1750 ltr $len,$len # clear zero flag
1755 br $ra 1751 br $ra
@@ -1781,8 +1777,8 @@ $code.=<<___ if (!$softonly);
1781 clr %r0,%r1 1777 clr %r0,%r1
1782 jl .Lxts_enc_software 1778 jl .Lxts_enc_software
1783 1779
1780 st${g} $ra,5*$SIZE_T($sp)
1784 stm${g} %r6,$s3,6*$SIZE_T($sp) 1781 stm${g} %r6,$s3,6*$SIZE_T($sp)
1785 st${g} $ra,14*$SIZE_T($sp)
1786 1782
1787 sllg $len,$len,4 # $len&=~15 1783 sllg $len,$len,4 # $len&=~15
1788 slgr $out,$inp 1784 slgr $out,$inp
@@ -1830,9 +1826,9 @@ $code.=<<___ if (!$softonly);
1830 stg $i2,8($i3) 1826 stg $i2,8($i3)
1831 1827
1832.Lxts_enc_km_done: 1828.Lxts_enc_km_done:
1833 l${g} $ra,14*$SIZE_T($sp) 1829 stg $sp,$tweak+0($sp) # wipe tweak
1834 st${g} $sp,$tweak($sp) # wipe tweak 1830 stg $sp,$tweak+8($sp)
1835 st${g} $sp,$tweak($sp) 1831 l${g} $ra,5*$SIZE_T($sp)
1836 lm${g} %r6,$s3,6*$SIZE_T($sp) 1832 lm${g} %r6,$s3,6*$SIZE_T($sp)
1837 br $ra 1833 br $ra
1838.align 16 1834.align 16
@@ -1843,12 +1839,11 @@ $code.=<<___;
1843 1839
1844 slgr $out,$inp 1840 slgr $out,$inp
1845 1841
1846 xgr $s0,$s0 # clear upper half 1842 l${g} $s3,$stdframe($sp) # ivp
1847 xgr $s1,$s1 1843 llgf $s0,0($s3) # load iv
1848 lrv $s0,$stdframe+4($sp) # load secno 1844 llgf $s1,4($s3)
1849 lrv $s1,$stdframe+0($sp) 1845 llgf $s2,8($s3)
1850 xgr $s2,$s2 1846 llgf $s3,12($s3)
1851 xgr $s3,$s3
1852 stm${g} %r2,%r5,2*$SIZE_T($sp) 1847 stm${g} %r2,%r5,2*$SIZE_T($sp)
1853 la $key,0($key2) 1848 la $key,0($key2)
1854 larl $tbl,AES_Te 1849 larl $tbl,AES_Te
@@ -1864,11 +1859,9 @@ $code.=<<___;
1864 lghi %r1,0x87 1859 lghi %r1,0x87
1865 srag %r0,$s3,63 # broadcast upper bit 1860 srag %r0,$s3,63 # broadcast upper bit
1866 ngr %r1,%r0 # rem 1861 ngr %r1,%r0 # rem
1867 srlg %r0,$s1,63 # carry bit from lower half 1862 algr $s1,$s1
1868 sllg $s1,$s1,1 1863 alcgr $s3,$s3
1869 sllg $s3,$s3,1
1870 xgr $s1,%r1 1864 xgr $s1,%r1
1871 ogr $s3,%r0
1872 lrvgr $s1,$s1 # flip byte order 1865 lrvgr $s1,$s1 # flip byte order
1873 lrvgr $s3,$s3 1866 lrvgr $s3,$s3
1874 srlg $s0,$s1,32 # smash the tweak to 4x32-bits 1867 srlg $s0,$s1,32 # smash the tweak to 4x32-bits
@@ -1917,11 +1910,9 @@ $code.=<<___;
1917 lghi %r1,0x87 1910 lghi %r1,0x87
1918 srag %r0,$s3,63 # broadcast upper bit 1911 srag %r0,$s3,63 # broadcast upper bit
1919 ngr %r1,%r0 # rem 1912 ngr %r1,%r0 # rem
1920 srlg %r0,$s1,63 # carry bit from lower half 1913 algr $s1,$s1
1921 sllg $s1,$s1,1 1914 alcgr $s3,$s3
1922 sllg $s3,$s3,1
1923 xgr $s1,%r1 1915 xgr $s1,%r1
1924 ogr $s3,%r0
1925 lrvgr $s1,$s1 # flip byte order 1916 lrvgr $s1,$s1 # flip byte order
1926 lrvgr $s3,$s3 1917 lrvgr $s3,$s3
1927 srlg $s0,$s1,32 # smash the tweak to 4x32-bits 1918 srlg $s0,$s1,32 # smash the tweak to 4x32-bits
@@ -1956,7 +1947,8 @@ $code.=<<___;
1956.size AES_xts_encrypt,.-AES_xts_encrypt 1947.size AES_xts_encrypt,.-AES_xts_encrypt
1957___ 1948___
1958# void AES_xts_decrypt(const char *inp,char *out,size_t len, 1949# void AES_xts_decrypt(const char *inp,char *out,size_t len,
1959# const AES_KEY *key1, const AES_KEY *key2,u64 secno); 1950# const AES_KEY *key1, const AES_KEY *key2,
1951# const unsigned char iv[16]);
1960# 1952#
1961$code.=<<___; 1953$code.=<<___;
1962.globl AES_xts_decrypt 1954.globl AES_xts_decrypt
@@ -1988,8 +1980,8 @@ $code.=<<___ if (!$softonly);
1988 clr %r0,%r1 1980 clr %r0,%r1
1989 jl .Lxts_dec_software 1981 jl .Lxts_dec_software
1990 1982
1983 st${g} $ra,5*$SIZE_T($sp)
1991 stm${g} %r6,$s3,6*$SIZE_T($sp) 1984 stm${g} %r6,$s3,6*$SIZE_T($sp)
1992 st${g} $ra,14*$SIZE_T($sp)
1993 1985
1994 nill $len,0xfff0 # $len&=~15 1986 nill $len,0xfff0 # $len&=~15
1995 slgr $out,$inp 1987 slgr $out,$inp
@@ -2028,11 +2020,9 @@ $code.=<<___ if (!$softonly);
2028 lghi $i1,0x87 2020 lghi $i1,0x87
2029 srag $i2,$s1,63 # broadcast upper bit 2021 srag $i2,$s1,63 # broadcast upper bit
2030 ngr $i1,$i2 # rem 2022 ngr $i1,$i2 # rem
2031 srlg $i2,$s0,63 # carry bit from lower half 2023 algr $s0,$s0
2032 sllg $s0,$s0,1 2024 alcgr $s1,$s1
2033 sllg $s1,$s1,1
2034 xgr $s0,$i1 2025 xgr $s0,$i1
2035 ogr $s1,$i2
2036 lrvgr $i1,$s0 # flip byte order 2026 lrvgr $i1,$s0 # flip byte order
2037 lrvgr $i2,$s1 2027 lrvgr $i2,$s1
2038 2028
@@ -2075,9 +2065,9 @@ $code.=<<___ if (!$softonly);
2075 stg $s2,0($i3) 2065 stg $s2,0($i3)
2076 stg $s3,8($i3) 2066 stg $s3,8($i3)
2077.Lxts_dec_km_done: 2067.Lxts_dec_km_done:
2078 l${g} $ra,14*$SIZE_T($sp) 2068 stg $sp,$tweak+0($sp) # wipe tweak
2079 st${g} $sp,$tweak($sp) # wipe tweak 2069 stg $sp,$tweak+8($sp)
2080 st${g} $sp,$tweak($sp) 2070 l${g} $ra,5*$SIZE_T($sp)
2081 lm${g} %r6,$s3,6*$SIZE_T($sp) 2071 lm${g} %r6,$s3,6*$SIZE_T($sp)
2082 br $ra 2072 br $ra
2083.align 16 2073.align 16
@@ -2089,12 +2079,11 @@ $code.=<<___;
2089 srlg $len,$len,4 2079 srlg $len,$len,4
2090 slgr $out,$inp 2080 slgr $out,$inp
2091 2081
2092 xgr $s0,$s0 # clear upper half 2082 l${g} $s3,$stdframe($sp) # ivp
2093 xgr $s1,$s1 2083 llgf $s0,0($s3) # load iv
2094 lrv $s0,$stdframe+4($sp) # load secno 2084 llgf $s1,4($s3)
2095 lrv $s1,$stdframe+0($sp) 2085 llgf $s2,8($s3)
2096 xgr $s2,$s2 2086 llgf $s3,12($s3)
2097 xgr $s3,$s3
2098 stm${g} %r2,%r5,2*$SIZE_T($sp) 2087 stm${g} %r2,%r5,2*$SIZE_T($sp)
2099 la $key,0($key2) 2088 la $key,0($key2)
2100 larl $tbl,AES_Te 2089 larl $tbl,AES_Te
@@ -2113,11 +2102,9 @@ $code.=<<___;
2113 lghi %r1,0x87 2102 lghi %r1,0x87
2114 srag %r0,$s3,63 # broadcast upper bit 2103 srag %r0,$s3,63 # broadcast upper bit
2115 ngr %r1,%r0 # rem 2104 ngr %r1,%r0 # rem
2116 srlg %r0,$s1,63 # carry bit from lower half 2105 algr $s1,$s1
2117 sllg $s1,$s1,1 2106 alcgr $s3,$s3
2118 sllg $s3,$s3,1
2119 xgr $s1,%r1 2107 xgr $s1,%r1
2120 ogr $s3,%r0
2121 lrvgr $s1,$s1 # flip byte order 2108 lrvgr $s1,$s1 # flip byte order
2122 lrvgr $s3,$s3 2109 lrvgr $s3,$s3
2123 srlg $s0,$s1,32 # smash the tweak to 4x32-bits 2110 srlg $s0,$s1,32 # smash the tweak to 4x32-bits
@@ -2156,11 +2143,9 @@ $code.=<<___;
2156 lghi %r1,0x87 2143 lghi %r1,0x87
2157 srag %r0,$s3,63 # broadcast upper bit 2144 srag %r0,$s3,63 # broadcast upper bit
2158 ngr %r1,%r0 # rem 2145 ngr %r1,%r0 # rem
2159 srlg %r0,$s1,63 # carry bit from lower half 2146 algr $s1,$s1
2160 sllg $s1,$s1,1 2147 alcgr $s3,$s3
2161 sllg $s3,$s3,1
2162 xgr $s1,%r1 2148 xgr $s1,%r1
2163 ogr $s3,%r0
2164 lrvgr $i2,$s1 # flip byte order 2149 lrvgr $i2,$s1 # flip byte order
2165 lrvgr $i3,$s3 2150 lrvgr $i3,$s3
2166 stmg $i2,$i3,$tweak($sp) # save the 1st tweak 2151 stmg $i2,$i3,$tweak($sp) # save the 1st tweak
@@ -2176,11 +2161,9 @@ $code.=<<___;
2176 lghi %r1,0x87 2161 lghi %r1,0x87
2177 srag %r0,$s3,63 # broadcast upper bit 2162 srag %r0,$s3,63 # broadcast upper bit
2178 ngr %r1,%r0 # rem 2163 ngr %r1,%r0 # rem
2179 srlg %r0,$s1,63 # carry bit from lower half 2164 algr $s1,$s1
2180 sllg $s1,$s1,1 2165 alcgr $s3,$s3
2181 sllg $s3,$s3,1
2182 xgr $s1,%r1 2166 xgr $s1,%r1
2183 ogr $s3,%r0
2184 lrvgr $s1,$s1 # flip byte order 2167 lrvgr $s1,$s1 # flip byte order
2185 lrvgr $s3,$s3 2168 lrvgr $s3,$s3
2186 srlg $s0,$s1,32 # smash the tweak to 4x32-bits 2169 srlg $s0,$s1,32 # smash the tweak to 4x32-bits