ASCON Suite
ascon-x3-c64.c
Go to the documentation of this file.
1 /*
2  * Copyright (C) 2022 Southern Storm Software, Pty Ltd.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included
12  * in all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
15  * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
17  * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
20  * DEALINGS IN THE SOFTWARE.
21  */
22 
23 #include "ascon-masked-state.h"
24 #include "ascon-masked-backend.h"
25 #include "core/ascon-util.h"
26 
27 #if defined(ASCON_MASKED_X3_BACKEND_C64) && ASCON_MASKED_MAX_SHARES >= 3
28 
36 #define and_not_xor(x, y, z) \
37  do { \
38  x##_a ^= (~(y##_a) & z##_a); \
39  x##_a ^= ((y##_a) & ascon_mask64_unrotate_share1_0(z##_b)); \
40  x##_a ^= (y##_a & ascon_mask64_unrotate_share2_0(z##_c)); \
41  \
42  x##_b ^= (y##_b & ascon_mask64_rotate_share1_0(z##_a)); \
43  x##_b ^= ((~y##_b) & z##_b); \
44  x##_b ^= (y##_b & ascon_mask64_unrotate_share2_1(z##_c)); \
45  \
46  x##_c ^= (y##_c & ascon_mask64_rotate_share2_0(~z##_a)); \
47  x##_c ^= (y##_c & ascon_mask64_rotate_share2_1(z##_b)); \
48  x##_c ^= (y##_c | z##_c); \
49  } while (0)
50 
51 /* Generate a pre-inverted round constant so that we can
52  * avoid NOT'ing x2 in the S-box during the rounds */
53 #define ROUND_CONSTANT(round) \
54  (~(uint64_t)(((0x0F - (round)) << 4) | (round)))
55 
57  (ascon_masked_state_t *state, uint8_t first_round, uint64_t preserve[2])
58 {
59  static const uint64_t RC[12] = {
60  ROUND_CONSTANT(0),
61  ROUND_CONSTANT(1),
62  ROUND_CONSTANT(2),
63  ROUND_CONSTANT(3),
64  ROUND_CONSTANT(4),
65  ROUND_CONSTANT(5),
66  ROUND_CONSTANT(6),
67  ROUND_CONSTANT(7),
68  ROUND_CONSTANT(8),
69  ROUND_CONSTANT(9),
70  ROUND_CONSTANT(10),
71  ROUND_CONSTANT(11)
72  };
73  uint64_t x0_a, x1_a, x2_a, x3_a, x4_a;
74  uint64_t x0_b, x1_b, x2_b, x3_b, x4_b;
75  uint64_t x0_c, x1_c, x2_c, x3_c, x4_c;
76  uint64_t t0_a, t0_b, t0_c, t1_a, t1_b, t1_c;
77 
78  /* Start with the randomness that the caller provided */
79  t0_a = preserve[0];
80  t0_b = preserve[1];
81 
82  /* Load the state into local variables */
83 #if defined(ASCON_MASKED_WORD_BACKEND_DIRECT_XOR)
84  x0_a = be_load_word64(&(state->M[0].B[0]));
85  x0_b = be_load_word64(&(state->M[0].B[8]));
86  x0_c = be_load_word64(&(state->M[0].B[16]));
87  x1_a = be_load_word64(&(state->M[1].B[0]));
88  x1_b = be_load_word64(&(state->M[1].B[8]));
89  x1_c = be_load_word64(&(state->M[1].B[16]));
90  x2_a = be_load_word64(&(state->M[2].B[0]));
91  x2_b = be_load_word64(&(state->M[2].B[8]));
92  x2_c = be_load_word64(&(state->M[2].B[16]));
93  x3_a = be_load_word64(&(state->M[3].B[0]));
94  x3_b = be_load_word64(&(state->M[3].B[8]));
95  x3_c = be_load_word64(&(state->M[3].B[16]));
96  x4_a = be_load_word64(&(state->M[4].B[0]));
97  x4_b = be_load_word64(&(state->M[4].B[8]));
98  x4_c = be_load_word64(&(state->M[4].B[16]));
99 #else
100  x0_a = state->M[0].S[0];
101  x0_b = state->M[0].S[1];
102  x0_c = state->M[0].S[2];
103  x1_a = state->M[1].S[0];
104  x1_b = state->M[1].S[1];
105  x1_c = state->M[1].S[2];
106  x2_a = state->M[2].S[0];
107  x2_b = state->M[2].S[1];
108  x2_c = state->M[2].S[2];
109  x3_a = state->M[3].S[0];
110  x3_b = state->M[3].S[1];
111  x3_c = state->M[3].S[2];
112  x4_a = state->M[4].S[0];
113  x4_b = state->M[4].S[1];
114  x4_c = state->M[4].S[2];
115 #endif
116 
117  /* The round constants invert x2 as part of the rounds so that we
118  * don't need an explicit "x2 = ~x2" step in the S-box. Pre-invert
119  * x2 before the first round to compensate. */
120  x2_a = ~x2_a;
121 
122  /* Perform all encryption rounds */
123  while (first_round < 12) {
124  /* Add the inverted round constant to x2 */
125  x2_a ^= RC[first_round++];
126 
127  /* Start of the substitution layer, first share */
128  x0_a ^= x4_a;
129  x4_a ^= x3_a;
130  x2_a ^= x1_a;
131  t1_a = x0_a;
132 
133  /* Start of the substitution layer, second share */
134  x0_b ^= x4_b;
135  x4_b ^= x3_b;
136  x2_b ^= x1_b;
137  t1_b = x0_b;
138 
139  /* Start of the substitution layer, third share */
140  x0_c ^= x4_c;
141  x4_c ^= x3_c;
142  x2_c ^= x1_c;
143  t1_c = x0_c;
144 
145  /* Middle part of the substitution layer, Chi5 */
146  t0_c = ascon_mask64_rotate_share2_0(t0_a) ^ /* t0 = random shares */
148  and_not_xor(t0, x0, x1); /* t0 ^= (~x0) & x1; */
149  and_not_xor(x0, x1, x2); /* x0 ^= (~x1) & x2; */
150  and_not_xor(x1, x2, x3); /* x1 ^= (~x2) & x3; */
151  and_not_xor(x2, x3, x4); /* x2 ^= (~x3) & x4; */
152  and_not_xor(x3, x4, t1); /* x3 ^= (~x4) & t1; */
153  x4_a ^= t0_a; /* x4 ^= t0; */
154  x4_b ^= t0_b;
155  x4_c ^= t0_c;
156 
157  /* End of the substitution layer */
158  x1_a ^= x0_a;
159  x0_a ^= x4_a;
160  x3_a ^= x2_a;
161  x1_b ^= x0_b;
162  x0_b ^= x4_b;
163  x3_b ^= x2_b;
164  x1_c ^= x0_c;
165  x0_c ^= x4_c;
166  x3_c ^= x2_c;
167 
168  /* NOT'ing x2 is done as part of the next round constant */
169  /* x2_a = ~x2_a; */
170 
171  /* Linear diffusion layer, third share */
172  x0_c ^= rightRotate19_64(x0_c) ^ rightRotate28_64(x0_c);
173  x1_c ^= rightRotate61_64(x1_c) ^ rightRotate39_64(x1_c);
174  x2_c ^= rightRotate1_64(x2_c) ^ rightRotate6_64(x2_c);
175  x3_c ^= rightRotate10_64(x3_c) ^ rightRotate17_64(x3_c);
176  x4_c ^= rightRotate7_64(x4_c) ^ rightRotate41_64(x4_c);
177 
178  /* Linear diffusion layer, second share */
179  x0_b ^= rightRotate19_64(x0_b) ^ rightRotate28_64(x0_b);
180  x1_b ^= rightRotate61_64(x1_b) ^ rightRotate39_64(x1_b);
181  x2_b ^= rightRotate1_64(x2_b) ^ rightRotate6_64(x2_b);
182  x3_b ^= rightRotate10_64(x3_b) ^ rightRotate17_64(x3_b);
183  x4_b ^= rightRotate7_64(x4_b) ^ rightRotate41_64(x4_b);
184 
185  /* Linear diffusion layer, first share */
186  x0_a ^= rightRotate19_64(x0_a) ^ rightRotate28_64(x0_a);
187  x1_a ^= rightRotate61_64(x1_a) ^ rightRotate39_64(x1_a);
188  x2_a ^= rightRotate1_64(x2_a) ^ rightRotate6_64(x2_a);
189  x3_a ^= rightRotate10_64(x3_a) ^ rightRotate17_64(x3_a);
190  x4_a ^= rightRotate7_64(x4_a) ^ rightRotate41_64(x4_a);
191 
192  /* Rotate the randomness in t0 before the next round */
193  t0_a = rightRotate13_64(t0_a);
194  t0_b = rightRotate29_64(t0_b);
195  }
196 
197  /* Return the final randomness to the caller to preserve it */
198  preserve[0] = t0_a;
199  preserve[1] = t0_b;
200 
201  /* Store the local variables back to the state with a final invert of x2 */
202 #if defined(ASCON_MASKED_WORD_BACKEND_DIRECT_XOR)
203  be_store_word64(&(state->M[0].B[0]), x0_a);
204  be_store_word64(&(state->M[0].B[8]), x0_b);
205  be_store_word64(&(state->M[0].B[16]), x0_c);
206  be_store_word64(&(state->M[1].B[0]), x1_a);
207  be_store_word64(&(state->M[1].B[8]), x1_b);
208  be_store_word64(&(state->M[1].B[16]), x1_c);
209  be_store_word64(&(state->M[2].B[0]), ~x2_a);
210  be_store_word64(&(state->M[2].B[8]), x2_b);
211  be_store_word64(&(state->M[2].B[16]), x2_c);
212  be_store_word64(&(state->M[3].B[0]), x3_a);
213  be_store_word64(&(state->M[3].B[8]), x3_b);
214  be_store_word64(&(state->M[3].B[16]), x3_c);
215  be_store_word64(&(state->M[4].B[0]), x4_a);
216  be_store_word64(&(state->M[4].B[8]), x4_b);
217  be_store_word64(&(state->M[4].B[16]), x4_c);
218 #else
219  state->M[0].S[0] = x0_a;
220  state->M[0].S[1] = x0_b;
221  state->M[0].S[2] = x0_c;
222  state->M[1].S[0] = x1_a;
223  state->M[1].S[1] = x1_b;
224  state->M[1].S[2] = x1_c;
225  state->M[2].S[0] = ~x2_a;
226  state->M[2].S[1] = x2_b;
227  state->M[2].S[2] = x2_c;
228  state->M[3].S[0] = x3_a;
229  state->M[3].S[1] = x3_b;
230  state->M[3].S[2] = x3_c;
231  state->M[4].S[0] = x4_a;
232  state->M[4].S[1] = x4_b;
233  state->M[4].S[2] = x4_c;
234 #endif
235 }
236 
237 #endif /* ASCON_MASKED_X3_BACKEND_C64 */
Utility functions for operating on masked ASCON states with between 2 and 4 shares.
#define ascon_mask64_rotate_share2_1(x)
Rotates 64-bit masked share 2 with respect to share 1.
#define ascon_mask64_rotate_share2_0(x)
Rotates 64-bit masked share 2 with respect to share 0.
#define rightRotate39_64(a)
Definition: ascon-util.h:606
#define rightRotate61_64(a)
Definition: ascon-util.h:628
#define rightRotate41_64(a)
Definition: ascon-util.h:608
#define rightRotate1_64(a)
Definition: ascon-util.h:568
#define rightRotate29_64(a)
Definition: ascon-util.h:596
#define rightRotate10_64(a)
Definition: ascon-util.h:577
#define be_store_word64(ptr, x)
Definition: ascon-util.h:118
#define rightRotate19_64(a)
Definition: ascon-util.h:586
#define rightRotate6_64(a)
Definition: ascon-util.h:573
#define rightRotate7_64(a)
Definition: ascon-util.h:574
#define rightRotate28_64(a)
Definition: ascon-util.h:595
#define rightRotate17_64(a)
Definition: ascon-util.h:584
#define be_load_word64(ptr)
Definition: ascon-util.h:107
#define rightRotate13_64(a)
Definition: ascon-util.h:580
#define and_not_xor(x, y, z)
Computes x ^= (~y & z) with a 3-share masked representation.
Definition: ascon-x3-c64.c:36
#define ROUND_CONSTANT(round)
Definition: ascon-x3-c64.c:53
void ascon_x3_permute(ascon_masked_state_t *state, uint8_t first_round, uint64_t preserve[2])
Definition: ascon-x3-c64.c:57
ascon_state_t state
[snippet_key]
Definition: snippets.c:2
State of the ASCON permutation which has been masked with up to 4 shares.
uint64_t S[5]
Definition: permutation.h:64
uint8_t B[40]
Definition: permutation.h:66