ASCON Suite
ascon-x4-c64.c
Go to the documentation of this file.
1 /*
2  * Copyright (C) 2022 Southern Storm Software, Pty Ltd.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included
12  * in all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
15  * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
17  * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
20  * DEALINGS IN THE SOFTWARE.
21  */
22 
23 #include "ascon-masked-state.h"
24 #include "ascon-masked-backend.h"
25 #include "core/ascon-util.h"
26 
27 #if defined(ASCON_MASKED_X4_BACKEND_C64) && ASCON_MASKED_MAX_SHARES >= 4
28 
36 #define and_not_xor(x, y, z) \
37  do { \
38  x##_a ^= (~(y##_a) & z##_a); \
39  x##_a ^= (ascon_mask64_unrotate_share1_0(y##_b) & z##_a); \
40  x##_a ^= (ascon_mask64_unrotate_share2_0(y##_c) & z##_a); \
41  x##_a ^= (ascon_mask64_unrotate_share3_0(y##_d) & z##_a); \
42  \
43  x##_b ^= (ascon_mask64_rotate_share1_0(~(y##_a)) & z##_b); \
44  x##_b ^= (y##_b & z##_b); \
45  x##_b ^= (ascon_mask64_unrotate_share2_1(y##_c) & z##_b); \
46  x##_b ^= (ascon_mask64_unrotate_share3_1(y##_d) & z##_b); \
47  \
48  x##_c ^= (ascon_mask64_rotate_share2_0(~(y##_a)) & z##_c); \
49  x##_c ^= (ascon_mask64_rotate_share2_1(y##_b) & z##_c); \
50  x##_c ^= (y##_c & z##_c); \
51  x##_c ^= (ascon_mask64_unrotate_share3_2(y##_d) & z##_c); \
52  \
53  x##_d ^= (ascon_mask64_rotate_share3_0(~(y##_a)) & z##_d); \
54  x##_d ^= (ascon_mask64_rotate_share3_1(y##_b) & z##_d); \
55  x##_d ^= (ascon_mask64_rotate_share3_2(y##_c) & z##_d); \
56  x##_d ^= (y##_d & z##_d); \
57  } while (0)
58 
59 /* Generate a pre-inverted round constant so that we can
60  * avoid NOT'ing x2 in the S-box during the rounds */
61 #define ROUND_CONSTANT(round) \
62  (~(uint64_t)(((0x0F - (round)) << 4) | (round)))
63 
65  (ascon_masked_state_t *state, uint8_t first_round, uint64_t preserve[3])
66 {
67  static const uint64_t RC[12] = {
68  ROUND_CONSTANT(0),
69  ROUND_CONSTANT(1),
70  ROUND_CONSTANT(2),
71  ROUND_CONSTANT(3),
72  ROUND_CONSTANT(4),
73  ROUND_CONSTANT(5),
74  ROUND_CONSTANT(6),
75  ROUND_CONSTANT(7),
76  ROUND_CONSTANT(8),
77  ROUND_CONSTANT(9),
78  ROUND_CONSTANT(10),
79  ROUND_CONSTANT(11)
80  };
81  uint64_t x0_a, x1_a, x2_a, x3_a, x4_a;
82  uint64_t x0_b, x1_b, x2_b, x3_b, x4_b;
83  uint64_t x0_c, x1_c, x2_c, x3_c, x4_c;
84  uint64_t x0_d, x1_d, x2_d, x3_d, x4_d;
85  uint64_t t0_a, t0_b, t0_c, t0_d;
86  uint64_t t1_a, t1_b, t1_c, t1_d;
87 
88  /* Start with the randomness that the caller provided */
89  t0_a = preserve[0];
90  t0_b = preserve[1];
91  t0_c = preserve[2];
92 
93  /* Load the state into local variables */
94 #if defined(ASCON_MASKED_WORD_BACKEND_DIRECT_XOR)
95  x0_a = be_load_word64(&(state->M[0].B[ 0]));
96  x0_b = be_load_word64(&(state->M[0].B[ 8]));
97  x0_c = be_load_word64(&(state->M[0].B[16]));
98  x0_d = be_load_word64(&(state->M[0].B[24]));
99  x1_a = be_load_word64(&(state->M[1].B[ 0]));
100  x1_b = be_load_word64(&(state->M[1].B[ 8]));
101  x1_c = be_load_word64(&(state->M[1].B[16]));
102  x1_d = be_load_word64(&(state->M[1].B[24]));
103  x2_a = be_load_word64(&(state->M[2].B[ 0]));
104  x2_b = be_load_word64(&(state->M[2].B[ 8]));
105  x2_c = be_load_word64(&(state->M[2].B[16]));
106  x2_d = be_load_word64(&(state->M[2].B[24]));
107  x3_a = be_load_word64(&(state->M[3].B[ 0]));
108  x3_b = be_load_word64(&(state->M[3].B[ 8]));
109  x3_c = be_load_word64(&(state->M[3].B[16]));
110  x3_d = be_load_word64(&(state->M[3].B[24]));
111  x4_a = be_load_word64(&(state->M[4].B[ 0]));
112  x4_b = be_load_word64(&(state->M[4].B[ 8]));
113  x4_c = be_load_word64(&(state->M[4].B[16]));
114  x4_d = be_load_word64(&(state->M[4].B[24]));
115 #else
116  x0_a = state->M[0].S[0];
117  x0_b = state->M[0].S[1];
118  x0_c = state->M[0].S[2];
119  x0_d = state->M[0].S[3];
120  x1_a = state->M[1].S[0];
121  x1_b = state->M[1].S[1];
122  x1_c = state->M[1].S[2];
123  x1_d = state->M[1].S[3];
124  x2_a = state->M[2].S[0];
125  x2_b = state->M[2].S[1];
126  x2_c = state->M[2].S[2];
127  x2_d = state->M[2].S[3];
128  x3_a = state->M[3].S[0];
129  x3_b = state->M[3].S[1];
130  x3_c = state->M[3].S[2];
131  x3_d = state->M[3].S[3];
132  x4_a = state->M[4].S[0];
133  x4_b = state->M[4].S[1];
134  x4_c = state->M[4].S[2];
135  x4_d = state->M[4].S[3];
136 #endif
137 
138  /* The round constants invert x2 as part of the rounds so that we
139  * don't need an explicit "x2 = ~x2" step in the S-box. Pre-invert
140  * x2 before the first round to compensate. */
141  x2_a = ~x2_a;
142 
143  /* Perform all encryption rounds */
144  while (first_round < 12) {
145  /* Add the inverted round constant to x2 */
146  x2_a ^= RC[first_round++];
147 
148  /* Start of the substitution layer, first share */
149  x0_a ^= x4_a;
150  x4_a ^= x3_a;
151  x2_a ^= x1_a;
152  t1_a = x0_a;
153 
154  /* Start of the substitution layer, second share */
155  x0_b ^= x4_b;
156  x4_b ^= x3_b;
157  x2_b ^= x1_b;
158  t1_b = x0_b;
159 
160  /* Start of the substitution layer, third share */
161  x0_c ^= x4_c;
162  x4_c ^= x3_c;
163  x2_c ^= x1_c;
164  t1_c = x0_c;
165 
166  /* Start of the substitution layer, fourth share */
167  x0_d ^= x4_d;
168  x4_d ^= x3_d;
169  x2_d ^= x1_d;
170  t1_d = x0_d;
171 
172  /* Middle part of the substitution layer, Chi5 */
173  t0_d = ascon_mask64_rotate_share3_0(t0_a) ^ /* t0 = random shares */
176  and_not_xor(t0, x0, x1); /* t0 ^= (~x0) & x1; */
177  and_not_xor(x0, x1, x2); /* x0 ^= (~x1) & x2; */
178  and_not_xor(x1, x2, x3); /* x1 ^= (~x2) & x3; */
179  and_not_xor(x2, x3, x4); /* x2 ^= (~x3) & x4; */
180  and_not_xor(x3, x4, t1); /* x3 ^= (~x4) & t1; */
181  x4_a ^= t0_a; /* x4 ^= t0; */
182  x4_b ^= t0_b;
183  x4_c ^= t0_c;
184  x4_d ^= t0_d;
185 
186  /* End of the substitution layer */
187  x1_a ^= x0_a;
188  x0_a ^= x4_a;
189  x3_a ^= x2_a;
190  x1_b ^= x0_b;
191  x0_b ^= x4_b;
192  x3_b ^= x2_b;
193  x1_c ^= x0_c;
194  x0_c ^= x4_c;
195  x3_c ^= x2_c;
196  x1_d ^= x0_d;
197  x0_d ^= x4_d;
198  x3_d ^= x2_d;
199 
200  /* NOT'ing x2 is done as part of the next round constant */
201  /* x2_a = ~x2_a; */
202 
203  /* Linear diffusion layer, fourth share */
204  x0_d ^= rightRotate19_64(x0_d) ^ rightRotate28_64(x0_d);
205  x1_d ^= rightRotate61_64(x1_d) ^ rightRotate39_64(x1_d);
206  x2_d ^= rightRotate1_64(x2_d) ^ rightRotate6_64(x2_d);
207  x3_d ^= rightRotate10_64(x3_d) ^ rightRotate17_64(x3_d);
208  x4_d ^= rightRotate7_64(x4_d) ^ rightRotate41_64(x4_d);
209 
210  /* Linear diffusion layer, third share */
211  x0_c ^= rightRotate19_64(x0_c) ^ rightRotate28_64(x0_c);
212  x1_c ^= rightRotate61_64(x1_c) ^ rightRotate39_64(x1_c);
213  x2_c ^= rightRotate1_64(x2_c) ^ rightRotate6_64(x2_c);
214  x3_c ^= rightRotate10_64(x3_c) ^ rightRotate17_64(x3_c);
215  x4_c ^= rightRotate7_64(x4_c) ^ rightRotate41_64(x4_c);
216 
217  /* Linear diffusion layer, second share */
218  x0_b ^= rightRotate19_64(x0_b) ^ rightRotate28_64(x0_b);
219  x1_b ^= rightRotate61_64(x1_b) ^ rightRotate39_64(x1_b);
220  x2_b ^= rightRotate1_64(x2_b) ^ rightRotate6_64(x2_b);
221  x3_b ^= rightRotate10_64(x3_b) ^ rightRotate17_64(x3_b);
222  x4_b ^= rightRotate7_64(x4_b) ^ rightRotate41_64(x4_b);
223 
224  /* Linear diffusion layer, first share */
225  x0_a ^= rightRotate19_64(x0_a) ^ rightRotate28_64(x0_a);
226  x1_a ^= rightRotate61_64(x1_a) ^ rightRotate39_64(x1_a);
227  x2_a ^= rightRotate1_64(x2_a) ^ rightRotate6_64(x2_a);
228  x3_a ^= rightRotate10_64(x3_a) ^ rightRotate17_64(x3_a);
229  x4_a ^= rightRotate7_64(x4_a) ^ rightRotate41_64(x4_a);
230 
231  /* Rotate the randomness in t0 before the next round */
232  t0_a = rightRotate13_64(t0_a);
233  t0_b = rightRotate29_64(t0_b);
234  t0_c = rightRotate59_64(t0_c);
235  }
236 
237  /* Return the final randomness to the caller to preserve it */
238  preserve[0] = t0_a;
239  preserve[1] = t0_b;
240  preserve[2] = t0_c;
241 
242  /* Store the local variables back to the state with a final invert of x2 */
243 #if defined(ASCON_MASKED_WORD_BACKEND_DIRECT_XOR)
244  be_store_word64(&(state->M[0].B[ 0]), x0_a);
245  be_store_word64(&(state->M[0].B[ 8]), x0_b);
246  be_store_word64(&(state->M[0].B[16]), x0_c);
247  be_store_word64(&(state->M[0].B[24]), x0_d);
248  be_store_word64(&(state->M[1].B[ 0]), x1_a);
249  be_store_word64(&(state->M[1].B[ 8]), x1_b);
250  be_store_word64(&(state->M[1].B[16]), x1_c);
251  be_store_word64(&(state->M[1].B[24]), x1_d);
252  be_store_word64(&(state->M[2].B[ 0]), ~x2_a);
253  be_store_word64(&(state->M[2].B[ 8]), x2_b);
254  be_store_word64(&(state->M[2].B[16]), x2_c);
255  be_store_word64(&(state->M[2].B[24]), x2_d);
256  be_store_word64(&(state->M[3].B[ 0]), x3_a);
257  be_store_word64(&(state->M[3].B[ 8]), x3_b);
258  be_store_word64(&(state->M[3].B[16]), x3_c);
259  be_store_word64(&(state->M[3].B[24]), x3_d);
260  be_store_word64(&(state->M[4].B[ 0]), x4_a);
261  be_store_word64(&(state->M[4].B[ 8]), x4_b);
262  be_store_word64(&(state->M[4].B[16]), x4_c);
263  be_store_word64(&(state->M[4].B[24]), x4_d);
264 #else
265  state->M[0].S[0] = x0_a;
266  state->M[0].S[1] = x0_b;
267  state->M[0].S[2] = x0_c;
268  state->M[0].S[3] = x0_d;
269  state->M[1].S[0] = x1_a;
270  state->M[1].S[1] = x1_b;
271  state->M[1].S[2] = x1_c;
272  state->M[1].S[3] = x1_d;
273  state->M[2].S[0] = ~x2_a;
274  state->M[2].S[1] = x2_b;
275  state->M[2].S[2] = x2_c;
276  state->M[2].S[3] = x2_d;
277  state->M[3].S[0] = x3_a;
278  state->M[3].S[1] = x3_b;
279  state->M[3].S[2] = x3_c;
280  state->M[3].S[3] = x3_d;
281  state->M[4].S[0] = x4_a;
282  state->M[4].S[1] = x4_b;
283  state->M[4].S[2] = x4_c;
284  state->M[4].S[3] = x4_d;
285 #endif
286 }
287 
288 #endif /* ASCON_MASKED_X4_BACKEND_C64 */
Utility functions for operating on masked ASCON states with between 2 and 4 shares.
#define ascon_mask64_rotate_share3_1(x)
Rotates 64-bit masked share 3 with respect to share 1.
#define ascon_mask64_rotate_share3_0(x)
Rotates 64-bit masked share 3 with respect to share 0.
#define ascon_mask64_rotate_share3_2(x)
Rotates 64-bit masked share 3 with respect to share 2.
#define rightRotate39_64(a)
Definition: ascon-util.h:606
#define rightRotate59_64(a)
Definition: ascon-util.h:626
#define rightRotate61_64(a)
Definition: ascon-util.h:628
#define rightRotate41_64(a)
Definition: ascon-util.h:608
#define rightRotate1_64(a)
Definition: ascon-util.h:568
#define rightRotate29_64(a)
Definition: ascon-util.h:596
#define rightRotate10_64(a)
Definition: ascon-util.h:577
#define be_store_word64(ptr, x)
Definition: ascon-util.h:118
#define rightRotate19_64(a)
Definition: ascon-util.h:586
#define rightRotate6_64(a)
Definition: ascon-util.h:573
#define rightRotate7_64(a)
Definition: ascon-util.h:574
#define rightRotate28_64(a)
Definition: ascon-util.h:595
#define rightRotate17_64(a)
Definition: ascon-util.h:584
#define be_load_word64(ptr)
Definition: ascon-util.h:107
#define rightRotate13_64(a)
Definition: ascon-util.h:580
void ascon_x4_permute(ascon_masked_state_t *state, uint8_t first_round, uint64_t preserve[3])
Definition: ascon-x4-c64.c:65
#define and_not_xor(x, y, z)
Computes x ^= (~y & z) with a 4-share masked representation.
Definition: ascon-x4-c64.c:36
#define ROUND_CONSTANT(round)
Definition: ascon-x4-c64.c:61
ascon_state_t state
[snippet_key]
Definition: snippets.c:2
State of the ASCON permutation which has been masked with up to 4 shares.
uint64_t S[5]
Definition: permutation.h:64
uint8_t B[40]
Definition: permutation.h:66