ASCON Suite
ascon-x3-c32.c
Go to the documentation of this file.
1 /*
2  * Copyright (C) 2022 Southern Storm Software, Pty Ltd.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included
12  * in all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
15  * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
17  * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
20  * DEALINGS IN THE SOFTWARE.
21  */
22 
23 #include "ascon-masked-state.h"
24 #include "ascon-masked-backend.h"
25 #include "core/ascon-util.h"
26 
27 #if defined(ASCON_MASKED_X3_BACKEND_C32) && ASCON_MASKED_MAX_SHARES >= 3
28 
37 #define and_not_xor(x, y, z, w) \
38  do { \
39  x##_a##w ^= (~(y##_a##w) & z##_a##w); \
40  x##_a##w ^= ((y##_a##w) & ascon_mask32_unrotate_share1_0(z##_b##w)); \
41  x##_a##w ^= (y##_a##w & ascon_mask32_unrotate_share2_0(z##_c##w)); \
42  \
43  x##_b##w ^= (y##_b##w & ascon_mask32_rotate_share1_0(z##_a##w)); \
44  x##_b##w ^= ((~y##_b##w) & z##_b##w); \
45  x##_b##w ^= (y##_b##w & ascon_mask32_unrotate_share2_1(z##_c##w)); \
46  \
47  x##_c##w ^= (y##_c##w & ascon_mask32_rotate_share2_0(~z##_a##w)); \
48  x##_c##w ^= (y##_c##w & ascon_mask32_rotate_share2_1(z##_b##w)); \
49  x##_c##w ^= (y##_c##w | z##_c##w); \
50  } while (0)
51 
57 #define linear(w) \
58  do { \
59  t0 = x0_##w##e ^ rightRotate4(x0_##w##o); \
60  t1 = x0_##w##o ^ rightRotate5(x0_##w##e); \
61  t2 = x1_##w##e ^ rightRotate11(x1_##w##e); \
62  t3 = x1_##w##o ^ rightRotate11(x1_##w##o); \
63  t4 = x2_##w##e ^ rightRotate2(x2_##w##o); \
64  t5 = x2_##w##o ^ rightRotate3(x2_##w##e); \
65  t6 = x3_##w##e ^ rightRotate3(x3_##w##o); \
66  t7 = x3_##w##o ^ rightRotate4(x3_##w##e); \
67  t8 = x4_##w##e ^ rightRotate17(x4_##w##e); \
68  t9 = x4_##w##o ^ rightRotate17(x4_##w##o); \
69  x0_##w##e ^= rightRotate9(t1); \
70  x0_##w##o ^= rightRotate10(t0); \
71  x1_##w##e ^= rightRotate19(t3); \
72  x1_##w##o ^= rightRotate20(t2); \
73  x2_##w##e ^= t5; \
74  x2_##w##o ^= rightRotate1(t4); \
75  x3_##w##e ^= rightRotate5(t6); \
76  x3_##w##o ^= rightRotate5(t7); \
77  x4_##w##e ^= rightRotate3(t9); \
78  x4_##w##o ^= rightRotate4(t8); \
79  } while (0)
80 
81 /* Generate a pair of pre-inverted round constants so that we can
82  * avoid NOT'ing x2 in the S-box during the rounds */
83 #define ROUND_CONSTANT_PAIR(rc1, rc2) \
84  (~((uint32_t)(rc1))), (~((uint32_t)(rc2)))
85 
87  (ascon_masked_state_t *state, uint8_t first_round, uint64_t *preserve)
88 {
89  static const uint32_t RC[12 * 2] = {
90  ROUND_CONSTANT_PAIR(12, 12),
91  ROUND_CONSTANT_PAIR( 9, 12),
92  ROUND_CONSTANT_PAIR(12, 9),
93  ROUND_CONSTANT_PAIR( 9, 9),
94  ROUND_CONSTANT_PAIR( 6, 12),
95  ROUND_CONSTANT_PAIR( 3, 12),
96  ROUND_CONSTANT_PAIR( 6, 9),
97  ROUND_CONSTANT_PAIR( 3, 9),
98  ROUND_CONSTANT_PAIR(12, 6),
99  ROUND_CONSTANT_PAIR( 9, 6),
100  ROUND_CONSTANT_PAIR(12, 3),
101  ROUND_CONSTANT_PAIR( 9, 3)
102  };
103  const uint32_t *rc = RC + first_round * 2;
104  uint32_t x0_ae, x1_ae, x2_ae, x3_ae, x4_ae;
105  uint32_t x0_ao, x1_ao, x2_ao, x3_ao, x4_ao;
106  uint32_t x0_be, x1_be, x2_be, x3_be, x4_be;
107  uint32_t x0_bo, x1_bo, x2_bo, x3_bo, x4_bo;
108  uint32_t x0_ce, x1_ce, x2_ce, x3_ce, x4_ce;
109  uint32_t x0_co, x1_co, x2_co, x3_co, x4_co;
110  uint32_t t0_ao, t0_bo, t0_co, t1_ao, t1_bo, t1_co;
111  uint32_t t0_ae, t0_be, t0_ce, t1_ae, t1_be, t1_ce;
112  uint32_t t8, t9;
113 
114  /* Start with the randomness that the caller provided */
115  t0_ae = ((uint32_t *)preserve)[0];
116  t0_ao = ((uint32_t *)preserve)[1];
117  t0_be = ((uint32_t *)preserve)[2];
118  t0_bo = ((uint32_t *)preserve)[3];
119 
120  /* Load the state into local variables */
121  x0_ae = state->M[0].W[0];
122  x0_ao = state->M[0].W[1];
123  x0_be = state->M[0].W[2];
124  x0_bo = state->M[0].W[3];
125  x0_ce = state->M[0].W[4];
126  x0_co = state->M[0].W[5];
127  x1_ae = state->M[1].W[0];
128  x1_ao = state->M[1].W[1];
129  x1_be = state->M[1].W[2];
130  x1_bo = state->M[1].W[3];
131  x1_ce = state->M[1].W[4];
132  x1_co = state->M[1].W[5];
133  x2_ae = state->M[2].W[0];
134  x2_ao = state->M[2].W[1];
135  x2_be = state->M[2].W[2];
136  x2_bo = state->M[2].W[3];
137  x2_ce = state->M[2].W[4];
138  x2_co = state->M[2].W[5];
139  x3_ae = state->M[3].W[0];
140  x3_ao = state->M[3].W[1];
141  x3_be = state->M[3].W[2];
142  x3_bo = state->M[3].W[3];
143  x3_ce = state->M[3].W[4];
144  x3_co = state->M[3].W[5];
145  x4_ae = state->M[4].W[0];
146  x4_ao = state->M[4].W[1];
147  x4_be = state->M[4].W[2];
148  x4_bo = state->M[4].W[3];
149  x4_ce = state->M[4].W[4];
150  x4_co = state->M[4].W[5];
151 
152  /* The round constants invert x2 as part of the rounds so that we
153  * don't need an explicit "x2 = ~x2" step in the S-box. Pre-invert
154  * x2 before the first round to compensate. */
155  x2_ae = ~x2_ae;
156  x2_ao = ~x2_ao;
157 
158  /* Perform all encryption rounds */
159  while (first_round++ < 12) {
160  /* Substitution layer, even words */
161 
162  /* Add the inverted round constant to x2 */
163  x2_ae ^= *rc++;
164 
165  /* Start of the substitution layer, first share */
166  x0_ae ^= x4_ae;
167  x4_ae ^= x3_ae;
168  x2_ae ^= x1_ae;
169  t1_ae = x0_ae;
170 
171  /* Start of the substitution layer, second share */
172  x0_be ^= x4_be;
173  x4_be ^= x3_be;
174  x2_be ^= x1_be;
175  t1_be = x0_be;
176 
177  /* Start of the substitution layer, third share */
178  x0_ce ^= x4_ce;
179  x4_ce ^= x3_ce;
180  x2_ce ^= x1_ce;
181  t1_ce = x0_ce;
182 
183  /* Middle part of the substitution layer, Chi5 */
184  t0_ce = ascon_mask32_rotate_share2_0(t0_ae) ^ /* t0 = random shares */
186  and_not_xor(t0, x0, x1, e); /* t0 ^= (~x0) & x1; */
187  and_not_xor(x0, x1, x2, e); /* x0 ^= (~x1) & x2; */
188  and_not_xor(x1, x2, x3, e); /* x1 ^= (~x2) & x3; */
189  and_not_xor(x2, x3, x4, e); /* x2 ^= (~x3) & x4; */
190  and_not_xor(x3, x4, t1, e); /* x3 ^= (~x4) & t1; */
191  x4_ae ^= t0_ae; /* x4 ^= t0; */
192  x4_be ^= t0_be;
193  x4_ce ^= t0_ce;
194 
195  /* End of the substitution layer */
196  x1_ae ^= x0_ae;
197  x0_ae ^= x4_ae;
198  x3_ae ^= x2_ae;
199  x1_be ^= x0_be;
200  x0_be ^= x4_be;
201  x3_be ^= x2_be;
202  x1_ce ^= x0_ce;
203  x0_ce ^= x4_ce;
204  x3_ce ^= x2_ce;
205 
206  /* NOT'ing x2 is done as part of the next round constant */
207  /* x2_ae = ~x2_ae; */
208 
209  /* Substitution layer, odd words */
210 
211  /* Add the inverted round constant to x2 */
212  x2_ao ^= *rc++;
213 
214  /* Start of the substitution layer, first share */
215  x0_ao ^= x4_ao;
216  x4_ao ^= x3_ao;
217  x2_ao ^= x1_ao;
218  t1_ao = x0_ao;
219 
220  /* Start of the substitution layer, second share */
221  x0_bo ^= x4_bo;
222  x4_bo ^= x3_bo;
223  x2_bo ^= x1_bo;
224  t1_bo = x0_bo;
225 
226  /* Start of the substitution layer, third share */
227  x0_co ^= x4_co;
228  x4_co ^= x3_co;
229  x2_co ^= x1_co;
230  t1_co = x0_co;
231 
232  /* Middle part of the substitution layer, Chi5 */
233  t0_co = ascon_mask32_rotate_share2_0(t0_ao) ^ /* t0 = random shares */
235  and_not_xor(t0, x0, x1, o); /* t0 ^= (~x0) & x1; */
236  and_not_xor(x0, x1, x2, o); /* x0 ^= (~x1) & x2; */
237  and_not_xor(x1, x2, x3, o); /* x1 ^= (~x2) & x3; */
238  and_not_xor(x2, x3, x4, o); /* x2 ^= (~x3) & x4; */
239  and_not_xor(x3, x4, t1, o); /* x3 ^= (~x4) & t1; */
240  x4_ao ^= t0_ao; /* x4 ^= t0; */
241  x4_bo ^= t0_bo;
242  x4_co ^= t0_co;
243 
244  /* End of the substitution layer */
245  x1_ao ^= x0_ao;
246  x0_ao ^= x4_ao;
247  x3_ao ^= x2_ao;
248  x1_bo ^= x0_bo;
249  x0_bo ^= x4_bo;
250  x3_bo ^= x2_bo;
251  x1_co ^= x0_co;
252  x0_co ^= x4_co;
253  x3_co ^= x2_co;
254 
255  /* NOT'ing x2 is done as part of the next round constant */
256  /* x2_ao = ~x2_ao; */
257 
258  /* Linear diffusion layer on each of the shares. Reuse some of
259  * the temporaries from substitution that we no longer require. */
260  #define t0 t0_ce
261  #define t1 t0_co
262  #define t2 t1_ao
263  #define t3 t1_bo
264  #define t4 t1_co
265  #define t5 t1_ae
266  #define t6 t1_be
267  #define t7 t1_ce
268  linear(c);
269  linear(b);
270  linear(a);
271 
272  /* Rotate the randomness in t0 before the next round */
273  t0_ae = rightRotate7(t0_ae);
274  t0_ao = rightRotate7(t0_ao);
275  t0_be = rightRotate13(t0_be);
276  t0_bo = rightRotate13(t0_bo);
277  }
278 
279  /* Return the final randomness to the caller to preserve it */
280  ((uint32_t *)preserve)[0] = t0_ae;
281  ((uint32_t *)preserve)[1] = t0_ao;
282  ((uint32_t *)preserve)[2] = t0_be;
283  ((uint32_t *)preserve)[3] = t0_bo;
284 
285  /* Store the local variables back to the state with a final invert of x2 */
286  state->M[0].W[0] = x0_ae;
287  state->M[0].W[1] = x0_ao;
288  state->M[0].W[2] = x0_be;
289  state->M[0].W[3] = x0_bo;
290  state->M[0].W[4] = x0_ce;
291  state->M[0].W[5] = x0_co;
292  state->M[1].W[0] = x1_ae;
293  state->M[1].W[1] = x1_ao;
294  state->M[1].W[2] = x1_be;
295  state->M[1].W[3] = x1_bo;
296  state->M[1].W[4] = x1_ce;
297  state->M[1].W[5] = x1_co;
298  state->M[2].W[0] = ~x2_ae;
299  state->M[2].W[1] = ~x2_ao;
300  state->M[2].W[2] = x2_be;
301  state->M[2].W[3] = x2_bo;
302  state->M[2].W[4] = x2_ce;
303  state->M[2].W[5] = x2_co;
304  state->M[3].W[0] = x3_ae;
305  state->M[3].W[1] = x3_ao;
306  state->M[3].W[2] = x3_be;
307  state->M[3].W[3] = x3_bo;
308  state->M[3].W[4] = x3_ce;
309  state->M[3].W[5] = x3_co;
310  state->M[4].W[0] = x4_ae;
311  state->M[4].W[1] = x4_ao;
312  state->M[4].W[2] = x4_be;
313  state->M[4].W[3] = x4_bo;
314  state->M[4].W[4] = x4_ce;
315  state->M[4].W[5] = x4_co;
316 }
317 
318 #endif /* ASCON_MASKED_X3_BACKEND_C32 */
#define ROUND_CONSTANT_PAIR(rc1, rc2)
Definition: ascon-c32.c:33
Utility functions for operating on masked ASCON states with between 2 and 4 shares.
void ascon_x3_permute(ascon_masked_state_t *state, uint8_t first_round, uint64_t *preserve)
Permutes the ASCON-x3 state with a specified number of rounds.
#define ascon_mask32_rotate_share2_1(x)
Rotates 32-bit masked share 2 with respect to share 1.
#define ascon_mask32_rotate_share2_0(x)
Rotates 32-bit masked share 2 with respect to share 0.
#define rightRotate13(a)
Definition: ascon-util.h:332
#define rightRotate7(a)
Definition: ascon-util.h:326
#define and_not_xor(x, y, z)
Computes x ^= (~y & z) with a 2-share masked representation.
Definition: ascon-x2-c64.c:36
ascon_state_t state
[snippet_key]
Definition: snippets.c:2
State of the ASCON permutation which has been masked with up to 4 shares.
uint32_t W[10]
Definition: permutation.h:65