crypto.cc (13168:4965381c122d) crypto.cc (13169:eb3b2bea4231)
1/*
2 * Copyright (c) 2018 ARM Limited
3 * All rights reserved
4 *
5 * The license below extends only to copyright in the software and shall
6 * not be construed as granting a license to any other intellectual
7 * property including but not limited to intellectual property relating
8 * to a hardware implementation of the functionality of the software
9 * licensed hereunder. You may use the software subject to the license
10 * terms below provided that you ensure that this notice is replicated
11 * unmodified and in its entirety in all distributions of the software,
12 * modified or unmodified, in source code or in binary form.
13 *
14 * Redistribution and use in source and binary forms, with or without
15 * modification, are permitted provided that the following conditions are
16 * met: redistributions of source code must retain the above copyright
17 * notice, this list of conditions and the following disclaimer;
18 * redistributions in binary form must reproduce the above copyright
19 * notice, this list of conditions and the following disclaimer in the
20 * documentation and/or other materials provided with the distribution;
21 * neither the name of the copyright holders nor the names of its
22 * contributors may be used to endorse or promote products derived from
23 * this software without specific prior written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
26 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
27 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
28 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
29 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
30 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
31 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
32 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
33 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
34 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
35 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
36 *
37 * Authors: Matt Horsnell
38 * Prakash Ramrakhyani
39 */
40
41#include <cstdio>
42#include <iostream>
43#include <string>
44
45#include "crypto.hh"
46
47namespace ArmISA {
48
1/*
2 * Copyright (c) 2018 ARM Limited
3 * All rights reserved
4 *
5 * The license below extends only to copyright in the software and shall
6 * not be construed as granting a license to any other intellectual
7 * property including but not limited to intellectual property relating
8 * to a hardware implementation of the functionality of the software
9 * licensed hereunder. You may use the software subject to the license
10 * terms below provided that you ensure that this notice is replicated
11 * unmodified and in its entirety in all distributions of the software,
12 * modified or unmodified, in source code or in binary form.
13 *
14 * Redistribution and use in source and binary forms, with or without
15 * modification, are permitted provided that the following conditions are
16 * met: redistributions of source code must retain the above copyright
17 * notice, this list of conditions and the following disclaimer;
18 * redistributions in binary form must reproduce the above copyright
19 * notice, this list of conditions and the following disclaimer in the
20 * documentation and/or other materials provided with the distribution;
21 * neither the name of the copyright holders nor the names of its
22 * contributors may be used to endorse or promote products derived from
23 * this software without specific prior written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
26 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
27 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
28 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
29 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
30 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
31 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
32 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
33 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
34 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
35 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
36 *
37 * Authors: Matt Horsnell
38 * Prakash Ramrakhyani
39 */
40
41#include <cstdio>
42#include <iostream>
43#include <string>
44
45#include "crypto.hh"
46
47namespace ArmISA {
48
49const uint8_t
50Crypto::aesSBOX[256] = {
51 0x63, 0x7c, 0x77, 0x7b, 0xf2, 0x6b, 0x6f, 0xc5, 0x30, 0x01, 0x67, 0x2b,
52 0xfe, 0xd7, 0xab, 0x76, 0xca, 0x82, 0xc9, 0x7d, 0xfa, 0x59, 0x47, 0xf0,
53 0xad, 0xd4, 0xa2, 0xaf, 0x9c, 0xa4, 0x72, 0xc0, 0xb7, 0xfd, 0x93, 0x26,
54 0x36, 0x3f, 0xf7, 0xcc, 0x34, 0xa5, 0xe5, 0xf1, 0x71, 0xd8, 0x31, 0x15,
55 0x04, 0xc7, 0x23, 0xc3, 0x18, 0x96, 0x05, 0x9a, 0x07, 0x12, 0x80, 0xe2,
56 0xeb, 0x27, 0xb2, 0x75, 0x09, 0x83, 0x2c, 0x1a, 0x1b, 0x6e, 0x5a, 0xa0,
57 0x52, 0x3b, 0xd6, 0xb3, 0x29, 0xe3, 0x2f, 0x84, 0x53, 0xd1, 0x00, 0xed,
58 0x20, 0xfc, 0xb1, 0x5b, 0x6a, 0xcb, 0xbe, 0x39, 0x4a, 0x4c, 0x58, 0xcf,
59 0xd0, 0xef, 0xaa, 0xfb, 0x43, 0x4d, 0x33, 0x85, 0x45, 0xf9, 0x02, 0x7f,
60 0x50, 0x3c, 0x9f, 0xa8, 0x51, 0xa3, 0x40, 0x8f, 0x92, 0x9d, 0x38, 0xf5,
61 0xbc, 0xb6, 0xda, 0x21, 0x10, 0xff, 0xf3, 0xd2, 0xcd, 0x0c, 0x13, 0xec,
62 0x5f, 0x97, 0x44, 0x17, 0xc4, 0xa7, 0x7e, 0x3d, 0x64, 0x5d, 0x19, 0x73,
63 0x60, 0x81, 0x4f, 0xdc, 0x22, 0x2a, 0x90, 0x88, 0x46, 0xee, 0xb8, 0x14,
64 0xde, 0x5e, 0x0b, 0xdb, 0xe0, 0x32, 0x3a, 0x0a, 0x49, 0x06, 0x24, 0x5c,
65 0xc2, 0xd3, 0xac, 0x62, 0x91, 0x95, 0xe4, 0x79, 0xe7, 0xc8, 0x37, 0x6d,
66 0x8d, 0xd5, 0x4e, 0xa9, 0x6c, 0x56, 0xf4, 0xea, 0x65, 0x7a, 0xae, 0x08,
67 0xba, 0x78, 0x25, 0x2e, 0x1c, 0xa6, 0xb4, 0xc6, 0xe8, 0xdd, 0x74, 0x1f,
68 0x4b, 0xbd, 0x8b, 0x8a, 0x70, 0x3e, 0xb5, 0x66, 0x48, 0x03, 0xf6, 0x0e,
69 0x61, 0x35, 0x57, 0xb9, 0x86, 0xc1, 0x1d, 0x9e, 0xe1, 0xf8, 0x98, 0x11,
70 0x69, 0xd9, 0x8e, 0x94, 0x9b, 0x1e, 0x87, 0xe9, 0xce, 0x55, 0x28, 0xdf,
71 0x8c, 0xa1, 0x89, 0x0d, 0xbf, 0xe6, 0x42, 0x68, 0x41, 0x99, 0x2d, 0x0f,
72 0xb0, 0x54, 0xbb, 0x16
73};
74
75const uint8_t
76Crypto::aesInvSBOX[256] = {
77 0x52, 0x09, 0x6a, 0xd5, 0x30, 0x36, 0xa5, 0x38, 0xbf, 0x40, 0xa3, 0x9e,
78 0x81, 0xf3, 0xd7, 0xfb, 0x7c, 0xe3, 0x39, 0x82, 0x9b, 0x2f, 0xff, 0x87,
79 0x34, 0x8e, 0x43, 0x44, 0xc4, 0xde, 0xe9, 0xcb, 0x54, 0x7b, 0x94, 0x32,
80 0xa6, 0xc2, 0x23, 0x3d, 0xee, 0x4c, 0x95, 0x0b, 0x42, 0xfa, 0xc3, 0x4e,
81 0x08, 0x2e, 0xa1, 0x66, 0x28, 0xd9, 0x24, 0xb2, 0x76, 0x5b, 0xa2, 0x49,
82 0x6d, 0x8b, 0xd1, 0x25, 0x72, 0xf8, 0xf6, 0x64, 0x86, 0x68, 0x98, 0x16,
83 0xd4, 0xa4, 0x5c, 0xcc, 0x5d, 0x65, 0xb6, 0x92, 0x6c, 0x70, 0x48, 0x50,
84 0xfd, 0xed, 0xb9, 0xda, 0x5e, 0x15, 0x46, 0x57, 0xa7, 0x8d, 0x9d, 0x84,
85 0x90, 0xd8, 0xab, 0x00, 0x8c, 0xbc, 0xd3, 0x0a, 0xf7, 0xe4, 0x58, 0x05,
86 0xb8, 0xb3, 0x45, 0x06, 0xd0, 0x2c, 0x1e, 0x8f, 0xca, 0x3f, 0x0f, 0x02,
87 0xc1, 0xaf, 0xbd, 0x03, 0x01, 0x13, 0x8a, 0x6b, 0x3a, 0x91, 0x11, 0x41,
88 0x4f, 0x67, 0xdc, 0xea, 0x97, 0xf2, 0xcf, 0xce, 0xf0, 0xb4, 0xe6, 0x73,
89 0x96, 0xac, 0x74, 0x22, 0xe7, 0xad, 0x35, 0x85, 0xe2, 0xf9, 0x37, 0xe8,
90 0x1c, 0x75, 0xdf, 0x6e, 0x47, 0xf1, 0x1a, 0x71, 0x1d, 0x29, 0xc5, 0x89,
91 0x6f, 0xb7, 0x62, 0x0e, 0xaa, 0x18, 0xbe, 0x1b, 0xfc, 0x56, 0x3e, 0x4b,
92 0xc6, 0xd2, 0x79, 0x20, 0x9a, 0xdb, 0xc0, 0xfe, 0x78, 0xcd, 0x5a, 0xf4,
93 0x1f, 0xdd, 0xa8, 0x33, 0x88, 0x07, 0xc7, 0x31, 0xb1, 0x12, 0x10, 0x59,
94 0x27, 0x80, 0xec, 0x5f, 0x60, 0x51, 0x7f, 0xa9, 0x19, 0xb5, 0x4a, 0x0d,
95 0x2d, 0xe5, 0x7a, 0x9f, 0x93, 0xc9, 0x9c, 0xef, 0xa0, 0xe0, 0x3b, 0x4d,
96 0xae, 0x2a, 0xf5, 0xb0, 0xc8, 0xeb, 0xbb, 0x3c, 0x83, 0x53, 0x99, 0x61,
97 0x17, 0x2b, 0x04, 0x7e, 0xba, 0x77, 0xd6, 0x26, 0xe1, 0x69, 0x14, 0x63,
98 0x55, 0x21, 0x0c, 0x7d
99};
100
101const uint8_t
102Crypto::aesFFLOG[256] = {
103 0x00, 0x00, 0x19, 0x01, 0x32, 0x02, 0x1a, 0xc6, 0x4b, 0xc7, 0x1b, 0x68,
104 0x33, 0xee, 0xdf, 0x03, 0x64, 0x04, 0xe0, 0x0e, 0x34, 0x8d, 0x81, 0xef,
105 0x4c, 0x71, 0x08, 0xc8, 0xf8, 0x69, 0x1c, 0xc1, 0x7d, 0xc2, 0x1d, 0xb5,
106 0xf9, 0xb9, 0x27, 0x6a, 0x4d, 0xe4, 0xa6, 0x72, 0x9a, 0xc9, 0x09, 0x78,
107 0x65, 0x2f, 0x8a, 0x05, 0x21, 0x0f, 0xe1, 0x24, 0x12, 0xf0, 0x82, 0x45,
108 0x35, 0x93, 0xda, 0x8e, 0x96, 0x8f, 0xdb, 0xbd, 0x36, 0xd0, 0xce, 0x94,
109 0x13, 0x5c, 0xd2, 0xf1, 0x40, 0x46, 0x83, 0x38, 0x66, 0xdd, 0xfd, 0x30,
110 0xbf, 0x06, 0x8b, 0x62, 0xb3, 0x25, 0xe2, 0x98, 0x22, 0x88, 0x91, 0x10,
111 0x7e, 0x6e, 0x48, 0xc3, 0xa3, 0xb6, 0x1e, 0x42, 0x3a, 0x6b, 0x28, 0x54,
112 0xfa, 0x85, 0x3d, 0xba, 0x2b, 0x79, 0x0a, 0x15, 0x9b, 0x9f, 0x5e, 0xca,
113 0x4e, 0xd4, 0xac, 0xe5, 0xf3, 0x73, 0xa7, 0x57, 0xaf, 0x58, 0xa8, 0x50,
114 0xf4, 0xea, 0xd6, 0x74, 0x4f, 0xae, 0xe9, 0xd5, 0xe7, 0xe6, 0xad, 0xe8,
115 0x2c, 0xd7, 0x75, 0x7a, 0xeb, 0x16, 0x0b, 0xf5, 0x59, 0xcb, 0x5f, 0xb0,
116 0x9c, 0xa9, 0x51, 0xa0, 0x7f, 0x0c, 0xf6, 0x6f, 0x17, 0xc4, 0x49, 0xec,
117 0xd8, 0x43, 0x1f, 0x2d, 0xa4, 0x76, 0x7b, 0xb7, 0xcc, 0xbb, 0x3e, 0x5a,
118 0xfb, 0x60, 0xb1, 0x86, 0x3b, 0x52, 0xa1, 0x6c, 0xaa, 0x55, 0x29, 0x9d,
119 0x97, 0xb2, 0x87, 0x90, 0x61, 0xbe, 0xdc, 0xfc, 0xbc, 0x95, 0xcf, 0xcd,
120 0x37, 0x3f, 0x5b, 0xd1, 0x53, 0x39, 0x84, 0x3c, 0x41, 0xa2, 0x6d, 0x47,
121 0x14, 0x2a, 0x9e, 0x5d, 0x56, 0xf2, 0xd3, 0xab, 0x44, 0x11, 0x92, 0xd9,
122 0x23, 0x20, 0x2e, 0x89, 0xb4, 0x7c, 0xb8, 0x26, 0x77, 0x99, 0xe3, 0xa5,
123 0x67, 0x4a, 0xed, 0xde, 0xc5, 0x31, 0xfe, 0x18, 0x0d, 0x63, 0x8c, 0x80,
124 0xc0, 0xf7, 0x70, 0x07
125};
126
127const uint8_t
128Crypto::aesFFEXP[256] = {
129 0x01, 0x03, 0x05, 0x0f, 0x11, 0x33, 0x55, 0xff, 0x1a, 0x2e, 0x72, 0x96,
130 0xa1, 0xf8, 0x13, 0x35, 0x5f, 0xe1, 0x38, 0x48, 0xd8, 0x73, 0x95, 0xa4,
131 0xf7, 0x02, 0x06, 0x0a, 0x1e, 0x22, 0x66, 0xaa, 0xe5, 0x34, 0x5c, 0xe4,
132 0x37, 0x59, 0xeb, 0x26, 0x6a, 0xbe, 0xd9, 0x70, 0x90, 0xab, 0xe6, 0x31,
133 0x53, 0xf5, 0x04, 0x0c, 0x14, 0x3c, 0x44, 0xcc, 0x4f, 0xd1, 0x68, 0xb8,
134 0xd3, 0x6e, 0xb2, 0xcd, 0x4c, 0xd4, 0x67, 0xa9, 0xe0, 0x3b, 0x4d, 0xd7,
135 0x62, 0xa6, 0xf1, 0x08, 0x18, 0x28, 0x78, 0x88, 0x83, 0x9e, 0xb9, 0xd0,
136 0x6b, 0xbd, 0xdc, 0x7f, 0x81, 0x98, 0xb3, 0xce, 0x49, 0xdb, 0x76, 0x9a,
137 0xb5, 0xc4, 0x57, 0xf9, 0x10, 0x30, 0x50, 0xf0, 0x0b, 0x1d, 0x27, 0x69,
138 0xbb, 0xd6, 0x61, 0xa3, 0xfe, 0x19, 0x2b, 0x7d, 0x87, 0x92, 0xad, 0xec,
139 0x2f, 0x71, 0x93, 0xae, 0xe9, 0x20, 0x60, 0xa0, 0xfb, 0x16, 0x3a, 0x4e,
140 0xd2, 0x6d, 0xb7, 0xc2, 0x5d, 0xe7, 0x32, 0x56, 0xfa, 0x15, 0x3f, 0x41,
141 0xc3, 0x5e, 0xe2, 0x3d, 0x47, 0xc9, 0x40, 0xc0, 0x5b, 0xed, 0x2c, 0x74,
142 0x9c, 0xbf, 0xda, 0x75, 0x9f, 0xba, 0xd5, 0x64, 0xac, 0xef, 0x2a, 0x7e,
143 0x82, 0x9d, 0xbc, 0xdf, 0x7a, 0x8e, 0x89, 0x80, 0x9b, 0xb6, 0xc1, 0x58,
144 0xe8, 0x23, 0x65, 0xaf, 0xea, 0x25, 0x6f, 0xb1, 0xc8, 0x43, 0xc5, 0x54,
145 0xfc, 0x1f, 0x21, 0x63, 0xa5, 0xf4, 0x07, 0x09, 0x1b, 0x2d, 0x77, 0x99,
146 0xb0, 0xcb, 0x46, 0xca, 0x45, 0xcf, 0x4a, 0xde, 0x79, 0x8b, 0x86, 0x91,
147 0xa8, 0xe3, 0x3e, 0x42, 0xc6, 0x51, 0xf3, 0x0e, 0x12, 0x36, 0x5a, 0xee,
148 0x29, 0x7b, 0x8d, 0x8c, 0x8f, 0x8a, 0x85, 0x94, 0xa7, 0xf2, 0x0d, 0x17,
149 0x39, 0x4b, 0xdd, 0x7c, 0x84, 0x97, 0xa2, 0xfd, 0x1c, 0x24, 0x6c, 0xb4,
150 0xc7, 0x52, 0xf6, 0x01
151};
152
153const uint8_t
154Crypto::aesSHIFT[16] = {
155 0, 5, 10, 15, 4, 9, 14, 3,
156 8, 13, 2, 7, 12, 1, 6, 11
157};
158
159const uint8_t
160Crypto::aesINVSHIFT[16] = {
161 0, 13, 10, 7, 4, 1, 14, 11,
162 8, 5, 2, 15, 12, 9, 6, 3
163};
164
165uint8_t
166Crypto::aesFFMul(uint8_t a, uint8_t b)
167{
168 unsigned int log_prod;
169
170 if ((a ==0)|| (b == 0)) return 0;
171
172 log_prod = (aesFFLOG[a] + aesFFLOG[b]);
173
174 if(log_prod > 0xff)
175 log_prod = log_prod - 0xff;
176
177 return aesFFEXP[log_prod];
178}
179
49void
180void
181Crypto::aesSubBytes(uint8_t *output, uint8_t *input)
182{
183 for (int i = 0; i < 16; ++i) {
184 output[i] = aesSBOX[input[i]];
185 }
186}
187
188void
189Crypto::aesInvSubBytes(uint8_t *output, uint8_t *input)
190{
191 for (int i = 0; i < 16; ++i) {
192 output[i] = aesInvSBOX[input[i]];
193 }
194}
195
196void
197Crypto::aesShiftRows(uint8_t *output, uint8_t *input)
198{
199 for (int i = 0; i < 16; ++i) {
200 output[i] = input[aesSHIFT[i]];
201 }
202}
203
204void
205Crypto::aesInvShiftRows(uint8_t *output, uint8_t *input)
206{
207 for (int i = 0; i < 16; ++i) {
208 output[i] = input[aesINVSHIFT[i]];
209 }
210}
211
212void
213Crypto::aesAddRoundKey(uint8_t *output, uint8_t *input,
214 uint8_t *key)
215{
216 for (int i = 0; i < 16; ++i) {
217 output[i] = input[i] ^ key[i];
218 }
219}
220
221void
222Crypto::aesMixColumns(uint8_t *output, uint8_t *input)
223{
224 for (int j = 0; j < 4; ++j) {
225 int row0 = (j * 4);
226 int row1 = row0 + 1;
227 int row2 = row0 + 2;
228 int row3 = row0 + 3;
229 uint8_t t1 = input[row0] ^ input[row1] ^
230 input[row2] ^ input[row3];
231
232 output[row1] = input[row1] ^ t1 ^ aesFFMul2(input[row1] ^ input[row2]);
233 output[row2] = input[row2] ^ t1 ^ aesFFMul2(input[row2] ^ input[row3]);
234 output[row3] = input[row3] ^ t1 ^ aesFFMul2(input[row3] ^ input[row0]);
235 output[row0] = input[row0] ^ t1 ^ aesFFMul2(input[row0] ^ input[row1]);
236 }
237}
238
239void
240Crypto::aesInvMixColumns(uint8_t *output, uint8_t *input)
241{
242 for (int j = 0; j < 4; ++j) {
243 for (int i = 0; i < 4; ++i) {
244 int index0 = (j * 4) + i;
245 int index1 = (j * 4) + ((i + 1) % 4);
246 int index2 = (j * 4) + ((i + 2) % 4);
247 int index3 = (j * 4) + ((i + 3) % 4);
248 output [index0] =
249 aesFFMul(0x0e, input[index0]) ^ aesFFMul(0x0b, input[index1]) ^
250 aesFFMul(0x0d, input[index2]) ^ aesFFMul(0x09, input[index3]);
251 }
252 }
253}
254
255void
256Crypto::aesEncrypt(uint8_t *output, uint8_t *input,
257 uint8_t *key)
258{
259 uint8_t temp1[16];
260 uint8_t temp2[16];
261 aesAddRoundKey(&temp1[0], input, key);
262 aesShiftRows(&temp2[0], &temp1[0]);
263 aesSubBytes(output, &temp2[0]);
264}
265
266void
267Crypto::aesDecrypt(uint8_t *output, uint8_t *input,
268 uint8_t *key)
269{
270 uint8_t temp1[16];
271 uint8_t temp2[16];
272 aesAddRoundKey(&temp1[0], input, key);
273 aesInvShiftRows(&temp2[0], &temp1[0]);
274 aesInvSubBytes(output, &temp2[0]);
275}
276
277void
50Crypto::sha256Op(
51 uint32_t *X,
52 uint32_t *Y,
53 uint32_t *Z)
54{
55 uint32_t T0, T1, T2, T3;
56 for (int i = 0; i < 4; ++i) {
57 T0 = choose(Y[0], Y[1], Y[2]);
58 T1 = majority(X[0], X[1], X[2]);
59 T2 = Y[3] + sigma1(Y[0]) + T0 + Z[i];
60 X[3] = T2 + X[3];
61 Y[3] = T2 + sigma0(X[0]) + T1;
62 // Rotate
63 T3 = Y[3];
64 Y[3] = Y[2]; Y[2] = Y[1]; Y[1] = Y[0]; Y[0] = X[3];
65 X[3] = X[2]; X[2] = X[1]; X[1] = X[0]; X[0] = T3;
66 }
67}
68
69void
70Crypto::_sha1Op(
71 uint32_t *X,
72 uint32_t *Y,
73 uint32_t *Z,
74 SHAOp op)
75{
76 uint32_t T1, T2;
77
78 for (int i = 0; i < 4; ++i) {
79 switch (op) {
80 case CHOOSE: T1 = choose(X[1], X[2], X[3]); break;
81 case PARITY: T1 = parity(X[1], X[2], X[3]); break;
82 case MAJORITY: T1 = majority(X[1], X[2], X[3]); break;
83 default: return;
84 }
85 Y[0] += ror(X[0], 27) + T1 + Z[i];
86 X[1] = ror(X[1], 2);
87 T2 = Y[0];
88 Y[0] = X[3];
89 X[3] = X[2]; X[2] = X[1]; X[1] = X[0]; X[0] = T2;
90 }
91}
92
93void
94Crypto::sha256H(
95 uint8_t *output,
96 uint8_t *input,
97 uint8_t *input2)
98{
99 uint32_t X[4], Y[4], Z[4];
100 load3Reg(&X[0], &Y[0], &Z[0], output, input, input2);
101 sha256Op(&X[0], &Y[0], &Z[0]);
102 store1Reg(output, &X[0]);
103}
104
105void
106Crypto::sha256H2(
107 uint8_t *output,
108 uint8_t *input,
109 uint8_t *input2)
110{
111 uint32_t X[4], Y[4], Z[4];
112 load3Reg(&X[0], &Y[0], &Z[0], output, input, input2);
113 sha256Op(&Y[0], &X[0], &Z[0]);
114 store1Reg(output, &X[0]);
115}
116
117void
118Crypto::sha256Su0(uint8_t *output, uint8_t *input)
119{
120 uint32_t X[4], Y[4];
121 uint32_t T[4];
122
123 load2Reg(&X[0], &Y[0], output, input);
124
125 T[3] = Y[0]; T[2] = X[3]; T[1] = X[2]; T[0] = X[1];
126
127 T[3] = ror(T[3], 7) ^ ror(T[3], 18) ^ (T[3] >> 3);
128 T[2] = ror(T[2], 7) ^ ror(T[2], 18) ^ (T[2] >> 3);
129 T[1] = ror(T[1], 7) ^ ror(T[1], 18) ^ (T[1] >> 3);
130 T[0] = ror(T[0], 7) ^ ror(T[0], 18) ^ (T[0] >> 3);
131
132 X[3] += T[3];
133 X[2] += T[2];
134 X[1] += T[1];
135 X[0] += T[0];
136
137 store1Reg(output, &X[0]);
138}
139
140void
141Crypto::sha256Su1(
142 uint8_t *output,
143 uint8_t *input,
144 uint8_t *input2)
145{
146 uint32_t X[4], Y[4], Z[4];
147 uint32_t T0[4], T1[4], T2[4], T3[4];
148
149 load3Reg(&X[0], &Y[0], &Z[0], output, input, input2);
150
151 T0[3] = Z[0]; T0[2] = Y[3]; T0[1] = Y[2]; T0[0] = Y[1];
152 T1[1] = Z[3]; T1[0] = Z[2];
153 T1[1] = ror(T1[1], 17) ^ ror(T1[1], 19) ^ (T1[1] >> 10);
154 T1[0] = ror(T1[0], 17) ^ ror(T1[0], 19) ^ (T1[0] >> 10);
155 T3[1] = X[1] + T0[1]; T3[0] = X[0] + T0[0];
156 T1[1] = T3[1] + T1[1]; T1[0] = T3[0] + T1[0];
157 T2[1] = ror(T1[1], 17) ^ ror(T1[1], 19) ^ (T1[1] >> 10);
158 T2[0] = ror(T1[0], 17) ^ ror(T1[0], 19) ^ (T1[0] >> 10);
159 T3[1] = X[3] + T0[3]; T3[0] = X[2] + T0[2];
160 X[3] = T3[1] + T2[1];
161 X[2] = T3[0] + T2[0];
162 X[1] = T1[1]; X[0] = T1[0];
163
164 store1Reg(output, &X[0]);
165}
166
167void
168Crypto::sha1Op(
169 uint8_t *output,
170 uint8_t *input,
171 uint8_t *input2,
172 SHAOp op)
173{
174 uint32_t X[4], Y[4], Z[4];
175 load3Reg(&X[0], &Y[0], &Z[0], output, input, input2);
176 _sha1Op(&X[0], &Y[0], &Z[0], op);
177 store1Reg(output, &X[0]);
178}
179
180void
181Crypto::sha1C(
182 uint8_t *output,
183 uint8_t *input,
184 uint8_t *input2)
185{
186 sha1Op(output, input, input2, CHOOSE);
187}
188
189void
190Crypto::sha1P(
191 uint8_t *output,
192 uint8_t *input,
193 uint8_t *input2)
194{
195 sha1Op(output, input, input2, PARITY);
196}
197
198void
199Crypto::sha1M(
200 uint8_t *output,
201 uint8_t *input,
202 uint8_t *input2)
203{
204 sha1Op(output, input, input2, MAJORITY);
205}
206
207void
208Crypto::sha1H(uint8_t *output, uint8_t *input)
209{
210 uint32_t X[4], Y[4];
211 load2Reg(&X[0], &Y[0], output, input);
212 X[0] = ror(Y[0], 2);
213 store1Reg(output, &X[0]);
214}
215
216void
217Crypto::sha1Su0(
218 uint8_t *output,
219 uint8_t *input,
220 uint8_t *input2)
221{
222 uint32_t X[4], Y[4], Z[4], T[4];
223 load3Reg(&X[0], &Y[0], &Z[0], output, input, input2);
224
225 T[3] = Y[1]; T[2] = Y[0]; T[1] = X[3]; T[0] = X[2];
226 X[3] = T[3] ^ X[3] ^ Z[3];
227 X[2] = T[2] ^ X[2] ^ Z[2];
228 X[1] = T[1] ^ X[1] ^ Z[1];
229 X[0] = T[0] ^ X[0] ^ Z[0];
230
231 store1Reg(output, &X[0]);
232}
233
234void
235Crypto::sha1Su1(uint8_t *output, uint8_t *input)
236{
237 uint32_t X[4], Y[4], T[4];
238 load2Reg(&X[0], &Y[0], output, input);
239
240 T[3] = X[3] ^ 0x0;
241 T[2] = X[2] ^ Y[3];
242 T[1] = X[1] ^ Y[2];
243 T[0] = X[0] ^ Y[1];
244 X[2] = ror(T[2], 31); X[1] = ror(T[1], 31); X[0] = ror(T[0], 31);
245 X[3] = ror(T[3], 31) ^ ror(T[0], 30);
246
247 store1Reg(output, &X[0]);
248}
249
250void
251Crypto::load2Reg(
252 uint32_t *X,
253 uint32_t *Y,
254 uint8_t *output,
255 uint8_t *input)
256{
257 for (int i = 0; i < 4; ++i) {
258 X[i] = *((uint32_t *)&output[i*4]);
259 Y[i] = *((uint32_t *)&input[i*4]);
260 }
261}
262
263void
264Crypto::load3Reg(
265 uint32_t *X,
266 uint32_t *Y,
267 uint32_t *Z,
268 uint8_t *output,
269 uint8_t *input,
270 uint8_t *input2)
271{
272 for (int i = 0; i < 4; ++i) {
273 X[i] = *((uint32_t *)&output[i*4]);
274 Y[i] = *((uint32_t *)&input[i*4]);
275 Z[i] = *((uint32_t *)&input2[i*4]);
276 }
277}
278
279void
280Crypto::store1Reg(uint8_t *output, uint32_t *X)
281{
282 for (int i = 0; i < 4; ++i) {
283 output[i*4] = (uint8_t)(X[i]);
284 output[i*4+1] = (uint8_t)(X[i] >> 8);
285 output[i*4+2] = (uint8_t)(X[i] >> 16);
286 output[i*4+3] = (uint8_t)(X[i] >> 24);
287 }
288}
289
290} // namespace ArmISA
278Crypto::sha256Op(
279 uint32_t *X,
280 uint32_t *Y,
281 uint32_t *Z)
282{
283 uint32_t T0, T1, T2, T3;
284 for (int i = 0; i < 4; ++i) {
285 T0 = choose(Y[0], Y[1], Y[2]);
286 T1 = majority(X[0], X[1], X[2]);
287 T2 = Y[3] + sigma1(Y[0]) + T0 + Z[i];
288 X[3] = T2 + X[3];
289 Y[3] = T2 + sigma0(X[0]) + T1;
290 // Rotate
291 T3 = Y[3];
292 Y[3] = Y[2]; Y[2] = Y[1]; Y[1] = Y[0]; Y[0] = X[3];
293 X[3] = X[2]; X[2] = X[1]; X[1] = X[0]; X[0] = T3;
294 }
295}
296
297void
298Crypto::_sha1Op(
299 uint32_t *X,
300 uint32_t *Y,
301 uint32_t *Z,
302 SHAOp op)
303{
304 uint32_t T1, T2;
305
306 for (int i = 0; i < 4; ++i) {
307 switch (op) {
308 case CHOOSE: T1 = choose(X[1], X[2], X[3]); break;
309 case PARITY: T1 = parity(X[1], X[2], X[3]); break;
310 case MAJORITY: T1 = majority(X[1], X[2], X[3]); break;
311 default: return;
312 }
313 Y[0] += ror(X[0], 27) + T1 + Z[i];
314 X[1] = ror(X[1], 2);
315 T2 = Y[0];
316 Y[0] = X[3];
317 X[3] = X[2]; X[2] = X[1]; X[1] = X[0]; X[0] = T2;
318 }
319}
320
321void
322Crypto::sha256H(
323 uint8_t *output,
324 uint8_t *input,
325 uint8_t *input2)
326{
327 uint32_t X[4], Y[4], Z[4];
328 load3Reg(&X[0], &Y[0], &Z[0], output, input, input2);
329 sha256Op(&X[0], &Y[0], &Z[0]);
330 store1Reg(output, &X[0]);
331}
332
333void
334Crypto::sha256H2(
335 uint8_t *output,
336 uint8_t *input,
337 uint8_t *input2)
338{
339 uint32_t X[4], Y[4], Z[4];
340 load3Reg(&X[0], &Y[0], &Z[0], output, input, input2);
341 sha256Op(&Y[0], &X[0], &Z[0]);
342 store1Reg(output, &X[0]);
343}
344
345void
346Crypto::sha256Su0(uint8_t *output, uint8_t *input)
347{
348 uint32_t X[4], Y[4];
349 uint32_t T[4];
350
351 load2Reg(&X[0], &Y[0], output, input);
352
353 T[3] = Y[0]; T[2] = X[3]; T[1] = X[2]; T[0] = X[1];
354
355 T[3] = ror(T[3], 7) ^ ror(T[3], 18) ^ (T[3] >> 3);
356 T[2] = ror(T[2], 7) ^ ror(T[2], 18) ^ (T[2] >> 3);
357 T[1] = ror(T[1], 7) ^ ror(T[1], 18) ^ (T[1] >> 3);
358 T[0] = ror(T[0], 7) ^ ror(T[0], 18) ^ (T[0] >> 3);
359
360 X[3] += T[3];
361 X[2] += T[2];
362 X[1] += T[1];
363 X[0] += T[0];
364
365 store1Reg(output, &X[0]);
366}
367
368void
369Crypto::sha256Su1(
370 uint8_t *output,
371 uint8_t *input,
372 uint8_t *input2)
373{
374 uint32_t X[4], Y[4], Z[4];
375 uint32_t T0[4], T1[4], T2[4], T3[4];
376
377 load3Reg(&X[0], &Y[0], &Z[0], output, input, input2);
378
379 T0[3] = Z[0]; T0[2] = Y[3]; T0[1] = Y[2]; T0[0] = Y[1];
380 T1[1] = Z[3]; T1[0] = Z[2];
381 T1[1] = ror(T1[1], 17) ^ ror(T1[1], 19) ^ (T1[1] >> 10);
382 T1[0] = ror(T1[0], 17) ^ ror(T1[0], 19) ^ (T1[0] >> 10);
383 T3[1] = X[1] + T0[1]; T3[0] = X[0] + T0[0];
384 T1[1] = T3[1] + T1[1]; T1[0] = T3[0] + T1[0];
385 T2[1] = ror(T1[1], 17) ^ ror(T1[1], 19) ^ (T1[1] >> 10);
386 T2[0] = ror(T1[0], 17) ^ ror(T1[0], 19) ^ (T1[0] >> 10);
387 T3[1] = X[3] + T0[3]; T3[0] = X[2] + T0[2];
388 X[3] = T3[1] + T2[1];
389 X[2] = T3[0] + T2[0];
390 X[1] = T1[1]; X[0] = T1[0];
391
392 store1Reg(output, &X[0]);
393}
394
395void
396Crypto::sha1Op(
397 uint8_t *output,
398 uint8_t *input,
399 uint8_t *input2,
400 SHAOp op)
401{
402 uint32_t X[4], Y[4], Z[4];
403 load3Reg(&X[0], &Y[0], &Z[0], output, input, input2);
404 _sha1Op(&X[0], &Y[0], &Z[0], op);
405 store1Reg(output, &X[0]);
406}
407
408void
409Crypto::sha1C(
410 uint8_t *output,
411 uint8_t *input,
412 uint8_t *input2)
413{
414 sha1Op(output, input, input2, CHOOSE);
415}
416
417void
418Crypto::sha1P(
419 uint8_t *output,
420 uint8_t *input,
421 uint8_t *input2)
422{
423 sha1Op(output, input, input2, PARITY);
424}
425
426void
427Crypto::sha1M(
428 uint8_t *output,
429 uint8_t *input,
430 uint8_t *input2)
431{
432 sha1Op(output, input, input2, MAJORITY);
433}
434
435void
436Crypto::sha1H(uint8_t *output, uint8_t *input)
437{
438 uint32_t X[4], Y[4];
439 load2Reg(&X[0], &Y[0], output, input);
440 X[0] = ror(Y[0], 2);
441 store1Reg(output, &X[0]);
442}
443
444void
445Crypto::sha1Su0(
446 uint8_t *output,
447 uint8_t *input,
448 uint8_t *input2)
449{
450 uint32_t X[4], Y[4], Z[4], T[4];
451 load3Reg(&X[0], &Y[0], &Z[0], output, input, input2);
452
453 T[3] = Y[1]; T[2] = Y[0]; T[1] = X[3]; T[0] = X[2];
454 X[3] = T[3] ^ X[3] ^ Z[3];
455 X[2] = T[2] ^ X[2] ^ Z[2];
456 X[1] = T[1] ^ X[1] ^ Z[1];
457 X[0] = T[0] ^ X[0] ^ Z[0];
458
459 store1Reg(output, &X[0]);
460}
461
462void
463Crypto::sha1Su1(uint8_t *output, uint8_t *input)
464{
465 uint32_t X[4], Y[4], T[4];
466 load2Reg(&X[0], &Y[0], output, input);
467
468 T[3] = X[3] ^ 0x0;
469 T[2] = X[2] ^ Y[3];
470 T[1] = X[1] ^ Y[2];
471 T[0] = X[0] ^ Y[1];
472 X[2] = ror(T[2], 31); X[1] = ror(T[1], 31); X[0] = ror(T[0], 31);
473 X[3] = ror(T[3], 31) ^ ror(T[0], 30);
474
475 store1Reg(output, &X[0]);
476}
477
478void
479Crypto::load2Reg(
480 uint32_t *X,
481 uint32_t *Y,
482 uint8_t *output,
483 uint8_t *input)
484{
485 for (int i = 0; i < 4; ++i) {
486 X[i] = *((uint32_t *)&output[i*4]);
487 Y[i] = *((uint32_t *)&input[i*4]);
488 }
489}
490
491void
492Crypto::load3Reg(
493 uint32_t *X,
494 uint32_t *Y,
495 uint32_t *Z,
496 uint8_t *output,
497 uint8_t *input,
498 uint8_t *input2)
499{
500 for (int i = 0; i < 4; ++i) {
501 X[i] = *((uint32_t *)&output[i*4]);
502 Y[i] = *((uint32_t *)&input[i*4]);
503 Z[i] = *((uint32_t *)&input2[i*4]);
504 }
505}
506
507void
508Crypto::store1Reg(uint8_t *output, uint32_t *X)
509{
510 for (int i = 0; i < 4; ++i) {
511 output[i*4] = (uint8_t)(X[i]);
512 output[i*4+1] = (uint8_t)(X[i] >> 8);
513 output[i*4+2] = (uint8_t)(X[i] >> 16);
514 output[i*4+3] = (uint8_t)(X[i] >> 24);
515 }
516}
517
518} // namespace ArmISA