ASCON Suite
ascon-x2-c32.c
Go to the documentation of this file.
1 /*
2  * Copyright (C) 2022 Southern Storm Software, Pty Ltd.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included
12  * in all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
15  * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
17  * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
20  * DEALINGS IN THE SOFTWARE.
21  */
22 
23 #include "ascon-masked-state.h"
24 #include "ascon-masked-backend.h"
25 #include "core/ascon-util.h"
26 
27 #if defined(ASCON_MASKED_X2_BACKEND_C32)
28 
37 #define and_not_xor(x, y, z, w) \
38  do { \
39  x##_a##w ^= ((~y##_a##w) & ascon_mask32_unrotate_share1_0(z##_b##w)); \
40  x##_a##w ^= ((~y##_a##w) & z##_a##w); \
41  x##_b##w ^= (y##_b##w & z##_b##w); \
42  x##_b##w ^= (y##_b##w & ascon_mask32_rotate_share1_0(z##_a##w)); \
43  } while (0)
44 
50 #define linear(w) \
51  do { \
52  t0 = x0_##w##e ^ rightRotate4(x0_##w##o); \
53  t1 = x0_##w##o ^ rightRotate5(x0_##w##e); \
54  t2 = x1_##w##e ^ rightRotate11(x1_##w##e); \
55  t3 = x1_##w##o ^ rightRotate11(x1_##w##o); \
56  t4 = x2_##w##e ^ rightRotate2(x2_##w##o); \
57  t5 = x2_##w##o ^ rightRotate3(x2_##w##e); \
58  t6 = x3_##w##e ^ rightRotate3(x3_##w##o); \
59  t7 = x3_##w##o ^ rightRotate4(x3_##w##e); \
60  t8 = x4_##w##e ^ rightRotate17(x4_##w##e); \
61  t9 = x4_##w##o ^ rightRotate17(x4_##w##o); \
62  x0_##w##e ^= rightRotate9(t1); \
63  x0_##w##o ^= rightRotate10(t0); \
64  x1_##w##e ^= rightRotate19(t3); \
65  x1_##w##o ^= rightRotate20(t2); \
66  x2_##w##e ^= t5; \
67  x2_##w##o ^= rightRotate1(t4); \
68  x3_##w##e ^= rightRotate5(t6); \
69  x3_##w##o ^= rightRotate5(t7); \
70  x4_##w##e ^= rightRotate3(t9); \
71  x4_##w##o ^= rightRotate4(t8); \
72  } while (0)
73 
74 /* Generate a pair of pre-inverted round constants so that we can
75  * avoid NOT'ing x2 in the S-box during the rounds */
76 #define ROUND_CONSTANT_PAIR(rc1, rc2) \
77  (~((uint32_t)(rc1))), (~((uint32_t)(rc2)))
78 
80  (ascon_masked_state_t *state, uint8_t first_round, uint64_t *preserve)
81 {
82  static const uint32_t RC[12 * 2] = {
83  ROUND_CONSTANT_PAIR(12, 12),
84  ROUND_CONSTANT_PAIR( 9, 12),
85  ROUND_CONSTANT_PAIR(12, 9),
86  ROUND_CONSTANT_PAIR( 9, 9),
87  ROUND_CONSTANT_PAIR( 6, 12),
88  ROUND_CONSTANT_PAIR( 3, 12),
89  ROUND_CONSTANT_PAIR( 6, 9),
90  ROUND_CONSTANT_PAIR( 3, 9),
91  ROUND_CONSTANT_PAIR(12, 6),
92  ROUND_CONSTANT_PAIR( 9, 6),
93  ROUND_CONSTANT_PAIR(12, 3),
94  ROUND_CONSTANT_PAIR( 9, 3)
95  };
96  const uint32_t *rc = RC + first_round * 2;
97  uint32_t x0_ae, x1_ae, x2_ae, x3_ae, x4_ae;
98  uint32_t x0_ao, x1_ao, x2_ao, x3_ao, x4_ao;
99  uint32_t x0_be, x1_be, x2_be, x3_be, x4_be;
100  uint32_t x0_bo, x1_bo, x2_bo, x3_bo, x4_bo;
101  uint32_t t0_ao, t0_bo, t1_ao, t1_bo;
102  uint32_t t0_ae, t0_be, t1_ae, t1_be;
103  uint32_t t6, t7, t8, t9;
104 
105  /* Start with the randomness that the caller provided */
106  t0_ae = ((uint32_t *)preserve)[0];
107  t0_ao = ((uint32_t *)preserve)[1];
108 
109  /* Load the state into local variables */
110  x0_ae = state->M[0].W[0];
111  x0_ao = state->M[0].W[1];
112  x0_be = state->M[0].W[2];
113  x0_bo = state->M[0].W[3];
114  x1_ae = state->M[1].W[0];
115  x1_ao = state->M[1].W[1];
116  x1_be = state->M[1].W[2];
117  x1_bo = state->M[1].W[3];
118  x2_ae = state->M[2].W[0];
119  x2_ao = state->M[2].W[1];
120  x2_be = state->M[2].W[2];
121  x2_bo = state->M[2].W[3];
122  x3_ae = state->M[3].W[0];
123  x3_ao = state->M[3].W[1];
124  x3_be = state->M[3].W[2];
125  x3_bo = state->M[3].W[3];
126  x4_ae = state->M[4].W[0];
127  x4_ao = state->M[4].W[1];
128  x4_be = state->M[4].W[2];
129  x4_bo = state->M[4].W[3];
130 
131  /* The round constants invert x2 as part of the rounds so that we
132  * don't need an explicit "x2 = ~x2" step in the S-box. Pre-invert
133  * x2 before the first round to compensate. */
134  x2_ae = ~x2_ae;
135  x2_ao = ~x2_ao;
136 
137  /* Perform all encryption rounds */
138  while (first_round++ < 12) {
139  /* Substitution layer, even words */
140 
141  /* Add the inverted round constant to x2 */
142  x2_ae ^= *rc++;
143 
144  /* Start of the substitution layer, first share */
145  x0_ae ^= x4_ae;
146  x4_ae ^= x3_ae;
147  x2_ae ^= x1_ae;
148  t1_ae = x0_ae;
149 
150  /* Start of the substitution layer, second share */
151  x0_be ^= x4_be;
152  x4_be ^= x3_be;
153  x2_be ^= x1_be;
154  t1_be = x0_be;
155 
156  /* Middle part of the substitution layer, Chi5 */
157  t0_be = ascon_mask32_rotate_share1_0(t0_ae); /* t0 = random shares */
158  and_not_xor(t0, x0, x1, e); /* t0 ^= (~x0) & x1; */
159  and_not_xor(x0, x1, x2, e); /* x0 ^= (~x1) & x2; */
160  and_not_xor(x1, x2, x3, e); /* x1 ^= (~x2) & x3; */
161  and_not_xor(x2, x3, x4, e); /* x2 ^= (~x3) & x4; */
162  and_not_xor(x3, x4, t1, e); /* x3 ^= (~x4) & t1; */
163  x4_ae ^= t0_ae; /* x4 ^= t0; */
164  x4_be ^= t0_be;
165 
166  /* End of the substitution layer */
167  x1_ae ^= x0_ae;
168  x0_ae ^= x4_ae;
169  x3_ae ^= x2_ae;
170  x1_be ^= x0_be;
171  x0_be ^= x4_be;
172  x3_be ^= x2_be;
173 
174  /* NOT'ing x2 is done as part of the next round constant */
175  /* x2_ae = ~x2_ae; */
176 
177  /* Substitution layer, odd words */
178 
179  /* Add the inverted round constant to x2 */
180  x2_ao ^= *rc++;
181 
182  /* Start of the substitution layer, first share */
183  x0_ao ^= x4_ao;
184  x4_ao ^= x3_ao;
185  x2_ao ^= x1_ao;
186  t1_ao = x0_ao;
187 
188  /* Start of the substitution layer, second share */
189  x0_bo ^= x4_bo;
190  x4_bo ^= x3_bo;
191  x2_bo ^= x1_bo;
192  t1_bo = x0_bo;
193 
194  /* Middle part of the substitution layer, Chi5 */
195  t0_bo = ascon_mask32_rotate_share1_0(t0_ao); /* t0 = random shares */
196  and_not_xor(t0, x0, x1, o); /* t0 ^= (~x0) & x1; */
197  and_not_xor(x0, x1, x2, o); /* x0 ^= (~x1) & x2; */
198  and_not_xor(x1, x2, x3, o); /* x1 ^= (~x2) & x3; */
199  and_not_xor(x2, x3, x4, o); /* x2 ^= (~x3) & x4; */
200  and_not_xor(x3, x4, t1, o); /* x3 ^= (~x4) & t1; */
201  x4_ao ^= t0_ao; /* x4 ^= t0; */
202  x4_bo ^= t0_bo;
203 
204  /* End of the substitution layer */
205  x1_ao ^= x0_ao;
206  x0_ao ^= x4_ao;
207  x3_ao ^= x2_ao;
208  x1_bo ^= x0_bo;
209  x0_bo ^= x4_bo;
210  x3_bo ^= x2_bo;
211 
212  /* NOT'ing x2 is done as part of the next round constant */
213  /* x2_ao = ~x2_ao; */
214 
215  /* Linear diffusion layer on each of the shares. Reuse some of
216  * the temporaries from substitution that we no longer require. */
217  #define t0 t0_bo
218  #define t1 t1_ao
219  #define t2 t1_bo
220  #define t3 t0_be
221  #define t4 t1_ae
222  #define t5 t1_be
223  linear(b);
224  linear(a);
225 
226  /* Rotate the randomness in t0 before the next round */
227  t0_ae = rightRotate7(t0_ae);
228  t0_ao = rightRotate7(t0_ao);
229  }
230 
231  /* Return the final randomness to the caller to preserve it */
232  ((uint32_t *)preserve)[0] = t0_ae;
233  ((uint32_t *)preserve)[1] = t0_ao;
234 
235  /* Store the local variables back to the state with a final invert of x2 */
236  state->M[0].W[0] = x0_ae;
237  state->M[0].W[1] = x0_ao;
238  state->M[0].W[2] = x0_be;
239  state->M[0].W[3] = x0_bo;
240  state->M[1].W[0] = x1_ae;
241  state->M[1].W[1] = x1_ao;
242  state->M[1].W[2] = x1_be;
243  state->M[1].W[3] = x1_bo;
244  state->M[2].W[0] = ~x2_ae;
245  state->M[2].W[1] = ~x2_ao;
246  state->M[2].W[2] = x2_be;
247  state->M[2].W[3] = x2_bo;
248  state->M[3].W[0] = x3_ae;
249  state->M[3].W[1] = x3_ao;
250  state->M[3].W[2] = x3_be;
251  state->M[3].W[3] = x3_bo;
252  state->M[4].W[0] = x4_ae;
253  state->M[4].W[1] = x4_ao;
254  state->M[4].W[2] = x4_be;
255  state->M[4].W[3] = x4_bo;
256 }
257 
258 #endif /* ASCON_MASKED_X2_BACKEND_C32 */
#define ROUND_CONSTANT_PAIR(rc1, rc2)
Definition: ascon-c32.c:33
Utility functions for operating on masked ASCON states with between 2 and 4 shares.
void ascon_x2_permute(ascon_masked_state_t *state, uint8_t first_round, uint64_t *preserve)
Permutes the ASCON-x2 state with a specified number of rounds.
Definition: ascon-x2-c64.c:50
#define ascon_mask32_rotate_share1_0(x)
Rotates 32-bit masked share 1 with respect to share 0.
#define rightRotate7(a)
Definition: ascon-util.h:326
#define and_not_xor(x, y, z)
Computes x ^= (~y & z) with a 2-share masked representation.
Definition: ascon-x2-c64.c:36
ascon_state_t state
[snippet_key]
Definition: snippets.c:2
State of the ASCON permutation which has been masked with up to 4 shares.
uint32_t W[10]
Definition: permutation.h:65