Raven Core  3.0.0
P2P Digital Currency
sph_sha2.c
Go to the documentation of this file.
1 /* $Id: sha2.c 227 2010-06-16 17:28:38Z tp $ */
2 /*
3  * SHA-224 / SHA-256 implementation.
4  *
5  * ==========================(LICENSE BEGIN)============================
6  *
7  * Copyright (c) 2007-2010 Projet RNRT SAPHIR
8  *
9  * Permission is hereby granted, free of charge, to any person obtaining
10  * a copy of this software and associated documentation files (the
11  * "Software"), to deal in the Software without restriction, including
12  * without limitation the rights to use, copy, modify, merge, publish,
13  * distribute, sublicense, and/or sell copies of the Software, and to
14  * permit persons to whom the Software is furnished to do so, subject to
15  * the following conditions:
16  *
17  * The above copyright notice and this permission notice shall be
18  * included in all copies or substantial portions of the Software.
19  *
20  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
21  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
22  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
23  * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
24  * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
25  * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
26  * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
27  *
28  * ===========================(LICENSE END)=============================
29  *
30  * @author Thomas Pornin <thomas.pornin@cryptolog.com>
31  */
32 
33 #include <stddef.h>
34 #include <string.h>
35 
36 #include "sph_sha2.h"
37 
38 #ifdef __cplusplus
39  extern "C"{
40 #endif
41 
42 #if SPH_SMALL_FOOTPRINT && !defined SPH_SMALL_FOOTPRINT_SHA2
43 #define SPH_SMALL_FOOTPRINT_SHA2 1
44 #endif
45 
46 #define CH(X, Y, Z) ((((Y) ^ (Z)) & (X)) ^ (Z))
47 #define MAJ(X, Y, Z) (((Y) & (Z)) | (((Y) | (Z)) & (X)))
48 
49 #define ROTR SPH_ROTR32
50 
51 #define BSG2_0(x) (ROTR(x, 2) ^ ROTR(x, 13) ^ ROTR(x, 22))
52 #define BSG2_1(x) (ROTR(x, 6) ^ ROTR(x, 11) ^ ROTR(x, 25))
53 #define SSG2_0(x) (ROTR(x, 7) ^ ROTR(x, 18) ^ SPH_T32((x) >> 3))
54 #define SSG2_1(x) (ROTR(x, 17) ^ ROTR(x, 19) ^ SPH_T32((x) >> 10))
55 
56 static const sph_u32 H224[8] = {
57  SPH_C32(0xC1059ED8), SPH_C32(0x367CD507), SPH_C32(0x3070DD17),
58  SPH_C32(0xF70E5939), SPH_C32(0xFFC00B31), SPH_C32(0x68581511),
59  SPH_C32(0x64F98FA7), SPH_C32(0xBEFA4FA4)
60 };
61 
62 static const sph_u32 H256[8] = {
63  SPH_C32(0x6A09E667), SPH_C32(0xBB67AE85), SPH_C32(0x3C6EF372),
64  SPH_C32(0xA54FF53A), SPH_C32(0x510E527F), SPH_C32(0x9B05688C),
65  SPH_C32(0x1F83D9AB), SPH_C32(0x5BE0CD19)
66 };
67 
68 /*
69  * The SHA2_ROUND_BODY defines the body for a SHA-224 / SHA-256
70  * compression function implementation. The "in" parameter should
71  * evaluate, when applied to a numerical input parameter from 0 to 15,
72  * to an expression which yields the corresponding input block. The "r"
73  * parameter should evaluate to an array or pointer expression
74  * designating the array of 8 words which contains the input and output
75  * of the compression function.
76  */
77 
78 #if SPH_SMALL_FOOTPRINT_SHA2
79 
80 static const sph_u32 K[64] = {
81  SPH_C32(0x428A2F98), SPH_C32(0x71374491),
82  SPH_C32(0xB5C0FBCF), SPH_C32(0xE9B5DBA5),
83  SPH_C32(0x3956C25B), SPH_C32(0x59F111F1),
84  SPH_C32(0x923F82A4), SPH_C32(0xAB1C5ED5),
85  SPH_C32(0xD807AA98), SPH_C32(0x12835B01),
86  SPH_C32(0x243185BE), SPH_C32(0x550C7DC3),
87  SPH_C32(0x72BE5D74), SPH_C32(0x80DEB1FE),
88  SPH_C32(0x9BDC06A7), SPH_C32(0xC19BF174),
89  SPH_C32(0xE49B69C1), SPH_C32(0xEFBE4786),
90  SPH_C32(0x0FC19DC6), SPH_C32(0x240CA1CC),
91  SPH_C32(0x2DE92C6F), SPH_C32(0x4A7484AA),
92  SPH_C32(0x5CB0A9DC), SPH_C32(0x76F988DA),
93  SPH_C32(0x983E5152), SPH_C32(0xA831C66D),
94  SPH_C32(0xB00327C8), SPH_C32(0xBF597FC7),
95  SPH_C32(0xC6E00BF3), SPH_C32(0xD5A79147),
96  SPH_C32(0x06CA6351), SPH_C32(0x14292967),
97  SPH_C32(0x27B70A85), SPH_C32(0x2E1B2138),
98  SPH_C32(0x4D2C6DFC), SPH_C32(0x53380D13),
99  SPH_C32(0x650A7354), SPH_C32(0x766A0ABB),
100  SPH_C32(0x81C2C92E), SPH_C32(0x92722C85),
101  SPH_C32(0xA2BFE8A1), SPH_C32(0xA81A664B),
102  SPH_C32(0xC24B8B70), SPH_C32(0xC76C51A3),
103  SPH_C32(0xD192E819), SPH_C32(0xD6990624),
104  SPH_C32(0xF40E3585), SPH_C32(0x106AA070),
105  SPH_C32(0x19A4C116), SPH_C32(0x1E376C08),
106  SPH_C32(0x2748774C), SPH_C32(0x34B0BCB5),
107  SPH_C32(0x391C0CB3), SPH_C32(0x4ED8AA4A),
108  SPH_C32(0x5B9CCA4F), SPH_C32(0x682E6FF3),
109  SPH_C32(0x748F82EE), SPH_C32(0x78A5636F),
110  SPH_C32(0x84C87814), SPH_C32(0x8CC70208),
111  SPH_C32(0x90BEFFFA), SPH_C32(0xA4506CEB),
112  SPH_C32(0xBEF9A3F7), SPH_C32(0xC67178F2)
113 };
114 
115 #define SHA2_MEXP1(in, pc) do { \
116  W[pc] = in(pc); \
117  } while (0)
118 
119 #define SHA2_MEXP2(in, pc) do { \
120  W[(pc) & 0x0F] = SPH_T32(SSG2_1(W[((pc) - 2) & 0x0F]) \
121  + W[((pc) - 7) & 0x0F] \
122  + SSG2_0(W[((pc) - 15) & 0x0F]) + W[(pc) & 0x0F]); \
123  } while (0)
124 
125 #define SHA2_STEPn(n, a, b, c, d, e, f, g, h, in, pc) do { \
126  sph_u32 t1, t2; \
127  SHA2_MEXP ## n(in, pc); \
128  t1 = SPH_T32(h + BSG2_1(e) + CH(e, f, g) \
129  + K[pcount + (pc)] + W[(pc) & 0x0F]); \
130  t2 = SPH_T32(BSG2_0(a) + MAJ(a, b, c)); \
131  d = SPH_T32(d + t1); \
132  h = SPH_T32(t1 + t2); \
133  } while (0)
134 
135 #define SHA2_STEP1(a, b, c, d, e, f, g, h, in, pc) \
136  SHA2_STEPn(1, a, b, c, d, e, f, g, h, in, pc)
137 #define SHA2_STEP2(a, b, c, d, e, f, g, h, in, pc) \
138  SHA2_STEPn(2, a, b, c, d, e, f, g, h, in, pc)
139 
140 #define SHA2_ROUND_BODY(in, r) do { \
141  sph_u32 A, B, C, D, E, F, G, H; \
142  sph_u32 W[16]; \
143  unsigned pcount; \
144  \
145  A = (r)[0]; \
146  B = (r)[1]; \
147  C = (r)[2]; \
148  D = (r)[3]; \
149  E = (r)[4]; \
150  F = (r)[5]; \
151  G = (r)[6]; \
152  H = (r)[7]; \
153  pcount = 0; \
154  SHA2_STEP1(A, B, C, D, E, F, G, H, in, 0); \
155  SHA2_STEP1(H, A, B, C, D, E, F, G, in, 1); \
156  SHA2_STEP1(G, H, A, B, C, D, E, F, in, 2); \
157  SHA2_STEP1(F, G, H, A, B, C, D, E, in, 3); \
158  SHA2_STEP1(E, F, G, H, A, B, C, D, in, 4); \
159  SHA2_STEP1(D, E, F, G, H, A, B, C, in, 5); \
160  SHA2_STEP1(C, D, E, F, G, H, A, B, in, 6); \
161  SHA2_STEP1(B, C, D, E, F, G, H, A, in, 7); \
162  SHA2_STEP1(A, B, C, D, E, F, G, H, in, 8); \
163  SHA2_STEP1(H, A, B, C, D, E, F, G, in, 9); \
164  SHA2_STEP1(G, H, A, B, C, D, E, F, in, 10); \
165  SHA2_STEP1(F, G, H, A, B, C, D, E, in, 11); \
166  SHA2_STEP1(E, F, G, H, A, B, C, D, in, 12); \
167  SHA2_STEP1(D, E, F, G, H, A, B, C, in, 13); \
168  SHA2_STEP1(C, D, E, F, G, H, A, B, in, 14); \
169  SHA2_STEP1(B, C, D, E, F, G, H, A, in, 15); \
170  for (pcount = 16; pcount < 64; pcount += 16) { \
171  SHA2_STEP2(A, B, C, D, E, F, G, H, in, 0); \
172  SHA2_STEP2(H, A, B, C, D, E, F, G, in, 1); \
173  SHA2_STEP2(G, H, A, B, C, D, E, F, in, 2); \
174  SHA2_STEP2(F, G, H, A, B, C, D, E, in, 3); \
175  SHA2_STEP2(E, F, G, H, A, B, C, D, in, 4); \
176  SHA2_STEP2(D, E, F, G, H, A, B, C, in, 5); \
177  SHA2_STEP2(C, D, E, F, G, H, A, B, in, 6); \
178  SHA2_STEP2(B, C, D, E, F, G, H, A, in, 7); \
179  SHA2_STEP2(A, B, C, D, E, F, G, H, in, 8); \
180  SHA2_STEP2(H, A, B, C, D, E, F, G, in, 9); \
181  SHA2_STEP2(G, H, A, B, C, D, E, F, in, 10); \
182  SHA2_STEP2(F, G, H, A, B, C, D, E, in, 11); \
183  SHA2_STEP2(E, F, G, H, A, B, C, D, in, 12); \
184  SHA2_STEP2(D, E, F, G, H, A, B, C, in, 13); \
185  SHA2_STEP2(C, D, E, F, G, H, A, B, in, 14); \
186  SHA2_STEP2(B, C, D, E, F, G, H, A, in, 15); \
187  } \
188  (r)[0] = SPH_T32((r)[0] + A); \
189  (r)[1] = SPH_T32((r)[1] + B); \
190  (r)[2] = SPH_T32((r)[2] + C); \
191  (r)[3] = SPH_T32((r)[3] + D); \
192  (r)[4] = SPH_T32((r)[4] + E); \
193  (r)[5] = SPH_T32((r)[5] + F); \
194  (r)[6] = SPH_T32((r)[6] + G); \
195  (r)[7] = SPH_T32((r)[7] + H); \
196  } while (0)
197 
198 #else
199 
200 #define SHA2_ROUND_BODY(in, r) do { \
201  sph_u32 A, B, C, D, E, F, G, H, T1, T2; \
202  sph_u32 W00, W01, W02, W03, W04, W05, W06, W07; \
203  sph_u32 W08, W09, W10, W11, W12, W13, W14, W15; \
204  int i; \
205  \
206 /* for (i=0;i<8;i++) {printf("in[%d]=%08x in[%d]=%08x \n",2*i,in(2*i),2*i+1,in(2*i+1));} */ \
207  A = (r)[0]; \
208  B = (r)[1]; \
209  C = (r)[2]; \
210  D = (r)[3]; \
211  E = (r)[4]; \
212  F = (r)[5]; \
213  G = (r)[6]; \
214  H = (r)[7]; \
215  W00 = in(0); \
216  T1 = SPH_T32(H + BSG2_1(E) + CH(E, F, G) \
217  + SPH_C32(0x428A2F98) + W00); \
218  T2 = SPH_T32(BSG2_0(A) + MAJ(A, B, C)); \
219  D = SPH_T32(D + T1); \
220  H = SPH_T32(T1 + T2); \
221  W01 = in(1); \
222  T1 = SPH_T32(G + BSG2_1(D) + CH(D, E, F) \
223  + SPH_C32(0x71374491) + W01); \
224  T2 = SPH_T32(BSG2_0(H) + MAJ(H, A, B)); \
225  C = SPH_T32(C + T1); \
226  G = SPH_T32(T1 + T2); \
227  W02 = in(2); \
228  T1 = SPH_T32(F + BSG2_1(C) + CH(C, D, E) \
229  + SPH_C32(0xB5C0FBCF) + W02); \
230  T2 = SPH_T32(BSG2_0(G) + MAJ(G, H, A)); \
231  B = SPH_T32(B + T1); \
232  F = SPH_T32(T1 + T2); \
233  W03 = in(3); \
234  T1 = SPH_T32(E + BSG2_1(B) + CH(B, C, D) \
235  + SPH_C32(0xE9B5DBA5) + W03); \
236  T2 = SPH_T32(BSG2_0(F) + MAJ(F, G, H)); \
237  A = SPH_T32(A + T1); \
238  E = SPH_T32(T1 + T2); \
239  W04 = in(4); \
240  T1 = SPH_T32(D + BSG2_1(A) + CH(A, B, C) \
241  + SPH_C32(0x3956C25B) + W04); \
242  T2 = SPH_T32(BSG2_0(E) + MAJ(E, F, G)); \
243  H = SPH_T32(H + T1); \
244  D = SPH_T32(T1 + T2); \
245  W05 = in(5); \
246  T1 = SPH_T32(C + BSG2_1(H) + CH(H, A, B) \
247  + SPH_C32(0x59F111F1) + W05); \
248  T2 = SPH_T32(BSG2_0(D) + MAJ(D, E, F)); \
249  G = SPH_T32(G + T1); \
250  C = SPH_T32(T1 + T2); \
251  W06 = in(6); \
252  T1 = SPH_T32(B + BSG2_1(G) + CH(G, H, A) \
253  + SPH_C32(0x923F82A4) + W06); \
254  T2 = SPH_T32(BSG2_0(C) + MAJ(C, D, E)); \
255  F = SPH_T32(F + T1); \
256  B = SPH_T32(T1 + T2); \
257  W07 = in(7); \
258  T1 = SPH_T32(A + BSG2_1(F) + CH(F, G, H) \
259  + SPH_C32(0xAB1C5ED5) + W07); \
260  T2 = SPH_T32(BSG2_0(B) + MAJ(B, C, D)); \
261  E = SPH_T32(E + T1); \
262  A = SPH_T32(T1 + T2); \
263  W08 = in(8); \
264  T1 = SPH_T32(H + BSG2_1(E) + CH(E, F, G) \
265  + SPH_C32(0xD807AA98) + W08); \
266  T2 = SPH_T32(BSG2_0(A) + MAJ(A, B, C)); \
267  D = SPH_T32(D + T1); \
268  H = SPH_T32(T1 + T2); \
269  W09 = in(9); \
270  T1 = SPH_T32(G + BSG2_1(D) + CH(D, E, F) \
271  + SPH_C32(0x12835B01) + W09); \
272  T2 = SPH_T32(BSG2_0(H) + MAJ(H, A, B)); \
273  C = SPH_T32(C + T1); \
274  G = SPH_T32(T1 + T2); \
275  W10 = in(10); \
276  T1 = SPH_T32(F + BSG2_1(C) + CH(C, D, E) \
277  + SPH_C32(0x243185BE) + W10); \
278  T2 = SPH_T32(BSG2_0(G) + MAJ(G, H, A)); \
279  B = SPH_T32(B + T1); \
280  F = SPH_T32(T1 + T2); \
281  W11 = in(11); \
282  T1 = SPH_T32(E + BSG2_1(B) + CH(B, C, D) \
283  + SPH_C32(0x550C7DC3) + W11); \
284  T2 = SPH_T32(BSG2_0(F) + MAJ(F, G, H)); \
285  A = SPH_T32(A + T1); \
286  E = SPH_T32(T1 + T2); \
287  W12 = in(12); \
288  T1 = SPH_T32(D + BSG2_1(A) + CH(A, B, C) \
289  + SPH_C32(0x72BE5D74) + W12); \
290  T2 = SPH_T32(BSG2_0(E) + MAJ(E, F, G)); \
291  H = SPH_T32(H + T1); \
292  D = SPH_T32(T1 + T2); \
293  W13 = in(13); \
294  T1 = SPH_T32(C + BSG2_1(H) + CH(H, A, B) \
295  + SPH_C32(0x80DEB1FE) + W13); \
296  T2 = SPH_T32(BSG2_0(D) + MAJ(D, E, F)); \
297  G = SPH_T32(G + T1); \
298  C = SPH_T32(T1 + T2); \
299  W14 = in(14); \
300  T1 = SPH_T32(B + BSG2_1(G) + CH(G, H, A) \
301  + SPH_C32(0x9BDC06A7) + W14); \
302  T2 = SPH_T32(BSG2_0(C) + MAJ(C, D, E)); \
303  F = SPH_T32(F + T1); \
304  B = SPH_T32(T1 + T2); \
305  W15 = in(15); \
306  T1 = SPH_T32(A + BSG2_1(F) + CH(F, G, H) \
307  + SPH_C32(0xC19BF174) + W15); \
308  T2 = SPH_T32(BSG2_0(B) + MAJ(B, C, D)); \
309  E = SPH_T32(E + T1); \
310  A = SPH_T32(T1 + T2); \
311  W00 = SPH_T32(SSG2_1(W14) + W09 + SSG2_0(W01) + W00); \
312  T1 = SPH_T32(H + BSG2_1(E) + CH(E, F, G) \
313  + SPH_C32(0xE49B69C1) + W00); \
314  T2 = SPH_T32(BSG2_0(A) + MAJ(A, B, C)); \
315  D = SPH_T32(D + T1); \
316  H = SPH_T32(T1 + T2); \
317  W01 = SPH_T32(SSG2_1(W15) + W10 + SSG2_0(W02) + W01); \
318  T1 = SPH_T32(G + BSG2_1(D) + CH(D, E, F) \
319  + SPH_C32(0xEFBE4786) + W01); \
320  T2 = SPH_T32(BSG2_0(H) + MAJ(H, A, B)); \
321  C = SPH_T32(C + T1); \
322  G = SPH_T32(T1 + T2); \
323  W02 = SPH_T32(SSG2_1(W00) + W11 + SSG2_0(W03) + W02); \
324  T1 = SPH_T32(F + BSG2_1(C) + CH(C, D, E) \
325  + SPH_C32(0x0FC19DC6) + W02); \
326  T2 = SPH_T32(BSG2_0(G) + MAJ(G, H, A)); \
327  B = SPH_T32(B + T1); \
328  F = SPH_T32(T1 + T2); \
329  W03 = SPH_T32(SSG2_1(W01) + W12 + SSG2_0(W04) + W03); \
330  T1 = SPH_T32(E + BSG2_1(B) + CH(B, C, D) \
331  + SPH_C32(0x240CA1CC) + W03); \
332  T2 = SPH_T32(BSG2_0(F) + MAJ(F, G, H)); \
333  A = SPH_T32(A + T1); \
334  E = SPH_T32(T1 + T2); \
335  W04 = SPH_T32(SSG2_1(W02) + W13 + SSG2_0(W05) + W04); \
336  T1 = SPH_T32(D + BSG2_1(A) + CH(A, B, C) \
337  + SPH_C32(0x2DE92C6F) + W04); \
338  T2 = SPH_T32(BSG2_0(E) + MAJ(E, F, G)); \
339  H = SPH_T32(H + T1); \
340  D = SPH_T32(T1 + T2); \
341  W05 = SPH_T32(SSG2_1(W03) + W14 + SSG2_0(W06) + W05); \
342  T1 = SPH_T32(C + BSG2_1(H) + CH(H, A, B) \
343  + SPH_C32(0x4A7484AA) + W05); \
344  T2 = SPH_T32(BSG2_0(D) + MAJ(D, E, F)); \
345  G = SPH_T32(G + T1); \
346  C = SPH_T32(T1 + T2); \
347  W06 = SPH_T32(SSG2_1(W04) + W15 + SSG2_0(W07) + W06); \
348  T1 = SPH_T32(B + BSG2_1(G) + CH(G, H, A) \
349  + SPH_C32(0x5CB0A9DC) + W06); \
350  T2 = SPH_T32(BSG2_0(C) + MAJ(C, D, E)); \
351  F = SPH_T32(F + T1); \
352  B = SPH_T32(T1 + T2); \
353  W07 = SPH_T32(SSG2_1(W05) + W00 + SSG2_0(W08) + W07); \
354  T1 = SPH_T32(A + BSG2_1(F) + CH(F, G, H) \
355  + SPH_C32(0x76F988DA) + W07); \
356  T2 = SPH_T32(BSG2_0(B) + MAJ(B, C, D)); \
357  E = SPH_T32(E + T1); \
358  A = SPH_T32(T1 + T2); \
359  W08 = SPH_T32(SSG2_1(W06) + W01 + SSG2_0(W09) + W08); \
360  T1 = SPH_T32(H + BSG2_1(E) + CH(E, F, G) \
361  + SPH_C32(0x983E5152) + W08); \
362  T2 = SPH_T32(BSG2_0(A) + MAJ(A, B, C)); \
363  D = SPH_T32(D + T1); \
364  H = SPH_T32(T1 + T2); \
365  W09 = SPH_T32(SSG2_1(W07) + W02 + SSG2_0(W10) + W09); \
366  T1 = SPH_T32(G + BSG2_1(D) + CH(D, E, F) \
367  + SPH_C32(0xA831C66D) + W09); \
368  T2 = SPH_T32(BSG2_0(H) + MAJ(H, A, B)); \
369  C = SPH_T32(C + T1); \
370  G = SPH_T32(T1 + T2); \
371  W10 = SPH_T32(SSG2_1(W08) + W03 + SSG2_0(W11) + W10); \
372  T1 = SPH_T32(F + BSG2_1(C) + CH(C, D, E) \
373  + SPH_C32(0xB00327C8) + W10); \
374  T2 = SPH_T32(BSG2_0(G) + MAJ(G, H, A)); \
375  B = SPH_T32(B + T1); \
376  F = SPH_T32(T1 + T2); \
377  W11 = SPH_T32(SSG2_1(W09) + W04 + SSG2_0(W12) + W11); \
378  T1 = SPH_T32(E + BSG2_1(B) + CH(B, C, D) \
379  + SPH_C32(0xBF597FC7) + W11); \
380  T2 = SPH_T32(BSG2_0(F) + MAJ(F, G, H)); \
381  A = SPH_T32(A + T1); \
382  E = SPH_T32(T1 + T2); \
383  W12 = SPH_T32(SSG2_1(W10) + W05 + SSG2_0(W13) + W12); \
384  T1 = SPH_T32(D + BSG2_1(A) + CH(A, B, C) \
385  + SPH_C32(0xC6E00BF3) + W12); \
386  T2 = SPH_T32(BSG2_0(E) + MAJ(E, F, G)); \
387  H = SPH_T32(H + T1); \
388  D = SPH_T32(T1 + T2); \
389  W13 = SPH_T32(SSG2_1(W11) + W06 + SSG2_0(W14) + W13); \
390  T1 = SPH_T32(C + BSG2_1(H) + CH(H, A, B) \
391  + SPH_C32(0xD5A79147) + W13); \
392  T2 = SPH_T32(BSG2_0(D) + MAJ(D, E, F)); \
393  G = SPH_T32(G + T1); \
394  C = SPH_T32(T1 + T2); \
395  W14 = SPH_T32(SSG2_1(W12) + W07 + SSG2_0(W15) + W14); \
396  T1 = SPH_T32(B + BSG2_1(G) + CH(G, H, A) \
397  + SPH_C32(0x06CA6351) + W14); \
398  T2 = SPH_T32(BSG2_0(C) + MAJ(C, D, E)); \
399  F = SPH_T32(F + T1); \
400  B = SPH_T32(T1 + T2); \
401  W15 = SPH_T32(SSG2_1(W13) + W08 + SSG2_0(W00) + W15); \
402  T1 = SPH_T32(A + BSG2_1(F) + CH(F, G, H) \
403  + SPH_C32(0x14292967) + W15); \
404  T2 = SPH_T32(BSG2_0(B) + MAJ(B, C, D)); \
405  E = SPH_T32(E + T1); \
406  A = SPH_T32(T1 + T2); \
407  W00 = SPH_T32(SSG2_1(W14) + W09 + SSG2_0(W01) + W00); \
408  T1 = SPH_T32(H + BSG2_1(E) + CH(E, F, G) \
409  + SPH_C32(0x27B70A85) + W00); \
410  T2 = SPH_T32(BSG2_0(A) + MAJ(A, B, C)); \
411  D = SPH_T32(D + T1); \
412  H = SPH_T32(T1 + T2); \
413  W01 = SPH_T32(SSG2_1(W15) + W10 + SSG2_0(W02) + W01); \
414  T1 = SPH_T32(G + BSG2_1(D) + CH(D, E, F) \
415  + SPH_C32(0x2E1B2138) + W01); \
416  T2 = SPH_T32(BSG2_0(H) + MAJ(H, A, B)); \
417  C = SPH_T32(C + T1); \
418  G = SPH_T32(T1 + T2); \
419  W02 = SPH_T32(SSG2_1(W00) + W11 + SSG2_0(W03) + W02); \
420  T1 = SPH_T32(F + BSG2_1(C) + CH(C, D, E) \
421  + SPH_C32(0x4D2C6DFC) + W02); \
422  T2 = SPH_T32(BSG2_0(G) + MAJ(G, H, A)); \
423  B = SPH_T32(B + T1); \
424  F = SPH_T32(T1 + T2); \
425  W03 = SPH_T32(SSG2_1(W01) + W12 + SSG2_0(W04) + W03); \
426  T1 = SPH_T32(E + BSG2_1(B) + CH(B, C, D) \
427  + SPH_C32(0x53380D13) + W03); \
428  T2 = SPH_T32(BSG2_0(F) + MAJ(F, G, H)); \
429  A = SPH_T32(A + T1); \
430  E = SPH_T32(T1 + T2); \
431  W04 = SPH_T32(SSG2_1(W02) + W13 + SSG2_0(W05) + W04); \
432  T1 = SPH_T32(D + BSG2_1(A) + CH(A, B, C) \
433  + SPH_C32(0x650A7354) + W04); \
434  T2 = SPH_T32(BSG2_0(E) + MAJ(E, F, G)); \
435  H = SPH_T32(H + T1); \
436  D = SPH_T32(T1 + T2); \
437  W05 = SPH_T32(SSG2_1(W03) + W14 + SSG2_0(W06) + W05); \
438  T1 = SPH_T32(C + BSG2_1(H) + CH(H, A, B) \
439  + SPH_C32(0x766A0ABB) + W05); \
440  T2 = SPH_T32(BSG2_0(D) + MAJ(D, E, F)); \
441  G = SPH_T32(G + T1); \
442  C = SPH_T32(T1 + T2); \
443  W06 = SPH_T32(SSG2_1(W04) + W15 + SSG2_0(W07) + W06); \
444  T1 = SPH_T32(B + BSG2_1(G) + CH(G, H, A) \
445  + SPH_C32(0x81C2C92E) + W06); \
446  T2 = SPH_T32(BSG2_0(C) + MAJ(C, D, E)); \
447  F = SPH_T32(F + T1); \
448  B = SPH_T32(T1 + T2); \
449  W07 = SPH_T32(SSG2_1(W05) + W00 + SSG2_0(W08) + W07); \
450  T1 = SPH_T32(A + BSG2_1(F) + CH(F, G, H) \
451  + SPH_C32(0x92722C85) + W07); \
452  T2 = SPH_T32(BSG2_0(B) + MAJ(B, C, D)); \
453  E = SPH_T32(E + T1); \
454  A = SPH_T32(T1 + T2); \
455  W08 = SPH_T32(SSG2_1(W06) + W01 + SSG2_0(W09) + W08); \
456  T1 = SPH_T32(H + BSG2_1(E) + CH(E, F, G) \
457  + SPH_C32(0xA2BFE8A1) + W08); \
458  T2 = SPH_T32(BSG2_0(A) + MAJ(A, B, C)); \
459  D = SPH_T32(D + T1); \
460  H = SPH_T32(T1 + T2); \
461  W09 = SPH_T32(SSG2_1(W07) + W02 + SSG2_0(W10) + W09); \
462  T1 = SPH_T32(G + BSG2_1(D) + CH(D, E, F) \
463  + SPH_C32(0xA81A664B) + W09); \
464  T2 = SPH_T32(BSG2_0(H) + MAJ(H, A, B)); \
465  C = SPH_T32(C + T1); \
466  G = SPH_T32(T1 + T2); \
467  W10 = SPH_T32(SSG2_1(W08) + W03 + SSG2_0(W11) + W10); \
468  T1 = SPH_T32(F + BSG2_1(C) + CH(C, D, E) \
469  + SPH_C32(0xC24B8B70) + W10); \
470  T2 = SPH_T32(BSG2_0(G) + MAJ(G, H, A)); \
471  B = SPH_T32(B + T1); \
472  F = SPH_T32(T1 + T2); \
473  W11 = SPH_T32(SSG2_1(W09) + W04 + SSG2_0(W12) + W11); \
474  T1 = SPH_T32(E + BSG2_1(B) + CH(B, C, D) \
475  + SPH_C32(0xC76C51A3) + W11); \
476  T2 = SPH_T32(BSG2_0(F) + MAJ(F, G, H)); \
477  A = SPH_T32(A + T1); \
478  E = SPH_T32(T1 + T2); \
479  W12 = SPH_T32(SSG2_1(W10) + W05 + SSG2_0(W13) + W12); \
480  T1 = SPH_T32(D + BSG2_1(A) + CH(A, B, C) \
481  + SPH_C32(0xD192E819) + W12); \
482  T2 = SPH_T32(BSG2_0(E) + MAJ(E, F, G)); \
483  H = SPH_T32(H + T1); \
484  D = SPH_T32(T1 + T2); \
485  W13 = SPH_T32(SSG2_1(W11) + W06 + SSG2_0(W14) + W13); \
486  T1 = SPH_T32(C + BSG2_1(H) + CH(H, A, B) \
487  + SPH_C32(0xD6990624) + W13); \
488  T2 = SPH_T32(BSG2_0(D) + MAJ(D, E, F)); \
489  G = SPH_T32(G + T1); \
490  C = SPH_T32(T1 + T2); \
491  W14 = SPH_T32(SSG2_1(W12) + W07 + SSG2_0(W15) + W14); \
492  T1 = SPH_T32(B + BSG2_1(G) + CH(G, H, A) \
493  + SPH_C32(0xF40E3585) + W14); \
494  T2 = SPH_T32(BSG2_0(C) + MAJ(C, D, E)); \
495  F = SPH_T32(F + T1); \
496  B = SPH_T32(T1 + T2); \
497  W15 = SPH_T32(SSG2_1(W13) + W08 + SSG2_0(W00) + W15); \
498  T1 = SPH_T32(A + BSG2_1(F) + CH(F, G, H) \
499  + SPH_C32(0x106AA070) + W15); \
500  T2 = SPH_T32(BSG2_0(B) + MAJ(B, C, D)); \
501  E = SPH_T32(E + T1); \
502  A = SPH_T32(T1 + T2); \
503  W00 = SPH_T32(SSG2_1(W14) + W09 + SSG2_0(W01) + W00); \
504  T1 = SPH_T32(H + BSG2_1(E) + CH(E, F, G) \
505  + SPH_C32(0x19A4C116) + W00); \
506  T2 = SPH_T32(BSG2_0(A) + MAJ(A, B, C)); \
507  D = SPH_T32(D + T1); \
508  H = SPH_T32(T1 + T2); \
509  W01 = SPH_T32(SSG2_1(W15) + W10 + SSG2_0(W02) + W01); \
510  T1 = SPH_T32(G + BSG2_1(D) + CH(D, E, F) \
511  + SPH_C32(0x1E376C08) + W01); \
512  T2 = SPH_T32(BSG2_0(H) + MAJ(H, A, B)); \
513  C = SPH_T32(C + T1); \
514  G = SPH_T32(T1 + T2); \
515  W02 = SPH_T32(SSG2_1(W00) + W11 + SSG2_0(W03) + W02); \
516  T1 = SPH_T32(F + BSG2_1(C) + CH(C, D, E) \
517  + SPH_C32(0x2748774C) + W02); \
518  T2 = SPH_T32(BSG2_0(G) + MAJ(G, H, A)); \
519  B = SPH_T32(B + T1); \
520  F = SPH_T32(T1 + T2); \
521  W03 = SPH_T32(SSG2_1(W01) + W12 + SSG2_0(W04) + W03); \
522  T1 = SPH_T32(E + BSG2_1(B) + CH(B, C, D) \
523  + SPH_C32(0x34B0BCB5) + W03); \
524  T2 = SPH_T32(BSG2_0(F) + MAJ(F, G, H)); \
525  A = SPH_T32(A + T1); \
526  E = SPH_T32(T1 + T2); \
527  W04 = SPH_T32(SSG2_1(W02) + W13 + SSG2_0(W05) + W04); \
528  T1 = SPH_T32(D + BSG2_1(A) + CH(A, B, C) \
529  + SPH_C32(0x391C0CB3) + W04); \
530  T2 = SPH_T32(BSG2_0(E) + MAJ(E, F, G)); \
531  H = SPH_T32(H + T1); \
532  D = SPH_T32(T1 + T2); \
533  W05 = SPH_T32(SSG2_1(W03) + W14 + SSG2_0(W06) + W05); \
534  T1 = SPH_T32(C + BSG2_1(H) + CH(H, A, B) \
535  + SPH_C32(0x4ED8AA4A) + W05); \
536  T2 = SPH_T32(BSG2_0(D) + MAJ(D, E, F)); \
537  G = SPH_T32(G + T1); \
538  C = SPH_T32(T1 + T2); \
539  W06 = SPH_T32(SSG2_1(W04) + W15 + SSG2_0(W07) + W06); \
540  T1 = SPH_T32(B + BSG2_1(G) + CH(G, H, A) \
541  + SPH_C32(0x5B9CCA4F) + W06); \
542  T2 = SPH_T32(BSG2_0(C) + MAJ(C, D, E)); \
543  F = SPH_T32(F + T1); \
544  B = SPH_T32(T1 + T2); \
545  W07 = SPH_T32(SSG2_1(W05) + W00 + SSG2_0(W08) + W07); \
546  T1 = SPH_T32(A + BSG2_1(F) + CH(F, G, H) \
547  + SPH_C32(0x682E6FF3) + W07); \
548  T2 = SPH_T32(BSG2_0(B) + MAJ(B, C, D)); \
549  E = SPH_T32(E + T1); \
550  A = SPH_T32(T1 + T2); \
551  W08 = SPH_T32(SSG2_1(W06) + W01 + SSG2_0(W09) + W08); \
552  T1 = SPH_T32(H + BSG2_1(E) + CH(E, F, G) \
553  + SPH_C32(0x748F82EE) + W08); \
554  T2 = SPH_T32(BSG2_0(A) + MAJ(A, B, C)); \
555  D = SPH_T32(D + T1); \
556  H = SPH_T32(T1 + T2); \
557  W09 = SPH_T32(SSG2_1(W07) + W02 + SSG2_0(W10) + W09); \
558  T1 = SPH_T32(G + BSG2_1(D) + CH(D, E, F) \
559  + SPH_C32(0x78A5636F) + W09); \
560  T2 = SPH_T32(BSG2_0(H) + MAJ(H, A, B)); \
561  C = SPH_T32(C + T1); \
562  G = SPH_T32(T1 + T2); \
563  W10 = SPH_T32(SSG2_1(W08) + W03 + SSG2_0(W11) + W10); \
564  T1 = SPH_T32(F + BSG2_1(C) + CH(C, D, E) \
565  + SPH_C32(0x84C87814) + W10); \
566  T2 = SPH_T32(BSG2_0(G) + MAJ(G, H, A)); \
567  B = SPH_T32(B + T1); \
568  F = SPH_T32(T1 + T2); \
569  W11 = SPH_T32(SSG2_1(W09) + W04 + SSG2_0(W12) + W11); \
570  T1 = SPH_T32(E + BSG2_1(B) + CH(B, C, D) \
571  + SPH_C32(0x8CC70208) + W11); \
572  T2 = SPH_T32(BSG2_0(F) + MAJ(F, G, H)); \
573  A = SPH_T32(A + T1); \
574  E = SPH_T32(T1 + T2); \
575  W12 = SPH_T32(SSG2_1(W10) + W05 + SSG2_0(W13) + W12); \
576  T1 = SPH_T32(D + BSG2_1(A) + CH(A, B, C) \
577  + SPH_C32(0x90BEFFFA) + W12); \
578  T2 = SPH_T32(BSG2_0(E) + MAJ(E, F, G)); \
579  H = SPH_T32(H + T1); \
580  D = SPH_T32(T1 + T2); \
581  W13 = SPH_T32(SSG2_1(W11) + W06 + SSG2_0(W14) + W13); \
582  T1 = SPH_T32(C + BSG2_1(H) + CH(H, A, B) \
583  + SPH_C32(0xA4506CEB) + W13); \
584  T2 = SPH_T32(BSG2_0(D) + MAJ(D, E, F)); \
585  G = SPH_T32(G + T1); \
586  C = SPH_T32(T1 + T2); \
587  W14 = SPH_T32(SSG2_1(W12) + W07 + SSG2_0(W15) + W14); \
588  T1 = SPH_T32(B + BSG2_1(G) + CH(G, H, A) \
589  + SPH_C32(0xBEF9A3F7) + W14); \
590  T2 = SPH_T32(BSG2_0(C) + MAJ(C, D, E)); \
591  F = SPH_T32(F + T1); \
592  B = SPH_T32(T1 + T2); \
593  W15 = SPH_T32(SSG2_1(W13) + W08 + SSG2_0(W00) + W15); \
594  T1 = SPH_T32(A + BSG2_1(F) + CH(F, G, H) \
595  + SPH_C32(0xC67178F2) + W15); \
596  T2 = SPH_T32(BSG2_0(B) + MAJ(B, C, D)); \
597  E = SPH_T32(E + T1); \
598  A = SPH_T32(T1 + T2); \
599  (r)[0] = SPH_T32((r)[0] + A); \
600  (r)[1] = SPH_T32((r)[1] + B); \
601  (r)[2] = SPH_T32((r)[2] + C); \
602  (r)[3] = SPH_T32((r)[3] + D); \
603  (r)[4] = SPH_T32((r)[4] + E); \
604  (r)[5] = SPH_T32((r)[5] + F); \
605  (r)[6] = SPH_T32((r)[6] + G); \
606  (r)[7] = SPH_T32((r)[7] + H); \
607 /* for (i=0;i<4;i++) {printf("r[%d]=%08x r[%d]=%08x\n",2*i,(r)[2*i],2*i+1,(r)[2*i+1]);} */ \
608  } while (0)
609 
610 #endif
611 
612 /*
613  * One round of SHA-224 / SHA-256. The data must be aligned for 32-bit access.
614  */
615 static void
616 sha2_round(const unsigned char *data, sph_u32 r[8])
617 {
618 #define SHA2_IN(x) sph_dec32be_aligned(data + (4 * (x)))
620 #undef SHA2_IN
621 }
622 
623 /* see sph_sha2.h */
624 void
626 {
627  sph_sha224_context *sc;
628 
629  sc = cc;
630  memcpy(sc->val, H224, sizeof H224);
631 #if SPH_64
632  sc->count = 0;
633 #else
634  sc->count_high = sc->count_low = 0;
635 #endif
636 }
637 
638 /* see sph_sha2.h */
639 void
641 {
642  sph_sha256_context *sc;
643 
644  sc = cc;
645  memcpy(sc->val, H256, sizeof H256);
646 #if SPH_64
647  sc->count = 0;
648 #else
649  sc->count_high = sc->count_low = 0;
650 #endif
651 }
652 
653 #define RFUN sha2_round
654 #define HASH sha224
655 #define BE32 1
656 #include "md_helper.c"
657 
658 /* see sph_sha2.h */
659 void
660 sph_sha224_close(void *cc, void *dst)
661 {
662  sha224_close(cc, dst, 7);
663 // sph_sha224_init(cc);
664 }
665 
666 /* see sph_sha2.h */
667 void
668 sph_sha224_addbits_and_close(void *cc, unsigned ub, unsigned n, void *dst)
669 {
670  sha224_addbits_and_close(cc, ub, n, dst, 7);
671 // sph_sha224_init(cc);
672 }
673 
674 /* see sph_sha2.h */
675 void
676 sph_sha256_close(void *cc, void *dst)
677 {
678  sha224_close(cc, dst, 8);
679 // sph_sha256_init(cc);
680 }
681 
682 /* see sph_sha2.h */
683 void
684 sph_sha256_addbits_and_close(void *cc, unsigned ub, unsigned n, void *dst)
685 {
686  sha224_addbits_and_close(cc, ub, n, dst, 8);
687 // sph_sha256_init(cc);
688 }
689 
690 /* see sph_sha2.h */
691 void
692 sph_sha224_comp(const sph_u32 msg[16], sph_u32 val[8])
693 {
694 #define SHA2_IN(x) msg[x]
695  SHA2_ROUND_BODY(SHA2_IN, val);
696 #undef SHA2_IN
697 }
698 
699 #ifdef __cplusplus
700 }
701 #endif
sph_u32 count_low
Definition: sph_sha2.h:77
sph_u32 count_high
Definition: sph_sha2.h:77
void sph_sha224_close(void *cc, void *dst)
Terminate the current SHA-224 computation and output the result into the provided buffer...
Definition: sph_sha2.c:660
void sph_sha224_addbits_and_close(void *cc, unsigned ub, unsigned n, void *dst)
Add a few additional bits (0 to 7) to the current computation, then terminate it and output the resul...
Definition: sph_sha2.c:668
void sph_sha224_comp(const sph_u32 msg[16], sph_u32 val[8])
Apply the SHA-224 compression function on the provided data.
Definition: sph_sha2.c:692
#define SHA2_ROUND_BODY(in, r)
Definition: sph_sha2.c:200
#define SPH_C32(x)
Definition: sph_types.h:873
void sph_sha224_init(void *cc)
Initialize a SHA-224 context.
Definition: sph_sha2.c:625
SHA-224, SHA-256, SHA-384 and SHA-512 interface.
void sph_sha256_init(void *cc)
Initialize a SHA-256 context.
Definition: sph_sha2.c:640
#define SHA2_IN(x)
This structure is a context for SHA-224 computations: it contains the intermediate values and some da...
Definition: sph_sha2.h:70
void sph_sha256_addbits_and_close(void *cc, unsigned ub, unsigned n, void *dst)
Add a few additional bits (0 to 7) to the current computation, then terminate it and output the resul...
Definition: sph_sha2.c:684
void * memcpy(void *a, const void *b, size_t c)
unsigned long sph_u32
Definition: sph_types.h:870
sph_u32 val[8]
Definition: sph_sha2.h:73
void sph_sha256_close(void *cc, void *dst)
Terminate the current SHA-256 computation and output the result into the provided buffer...
Definition: sph_sha2.c:676