ASCON Suite
ascon-x2-c64.c
Go to the documentation of this file.
1 /*
2  * Copyright (C) 2022 Southern Storm Software, Pty Ltd.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included
12  * in all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
15  * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
17  * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
20  * DEALINGS IN THE SOFTWARE.
21  */
22 
23 #include "ascon-masked-state.h"
24 #include "ascon-masked-backend.h"
25 #include "core/ascon-util.h"
26 
27 #if defined(ASCON_MASKED_X2_BACKEND_C64)
28 
36 #define and_not_xor(x, y, z) \
37  do { \
38  x##_a ^= ((~y##_a) & ascon_mask64_unrotate_share1_0(z##_b)); \
39  x##_a ^= ((~y##_a) & z##_a); \
40  x##_b ^= (y##_b & z##_b); \
41  x##_b ^= (y##_b & ascon_mask64_rotate_share1_0(z##_a)); \
42  } while (0)
43 
44 /* Generate a pre-inverted round constant so that we can
45  * avoid NOT'ing x2 in the S-box during the rounds */
46 #define ROUND_CONSTANT(round) \
47  (~(uint64_t)(((0x0F - (round)) << 4) | (round)))
48 
50  (ascon_masked_state_t *state, uint8_t first_round, uint64_t *preserve)
51 {
52  static const uint64_t RC[12] = {
53  ROUND_CONSTANT(0),
54  ROUND_CONSTANT(1),
55  ROUND_CONSTANT(2),
56  ROUND_CONSTANT(3),
57  ROUND_CONSTANT(4),
58  ROUND_CONSTANT(5),
59  ROUND_CONSTANT(6),
60  ROUND_CONSTANT(7),
61  ROUND_CONSTANT(8),
62  ROUND_CONSTANT(9),
63  ROUND_CONSTANT(10),
64  ROUND_CONSTANT(11)
65  };
66  uint64_t x0_a, x1_a, x2_a, x3_a, x4_a;
67  uint64_t x0_b, x1_b, x2_b, x3_b, x4_b;
68  uint64_t t0_a, t0_b, t1_a, t1_b;
69 
70  /* Start with the randomness that the caller provided */
71  t0_a = *preserve;
72 
73  /* Load the state into local variables */
74 #if defined(ASCON_MASKED_WORD_BACKEND_DIRECT_XOR)
75  x0_a = be_load_word64(&(state->M[0].B[0]));
76  x0_b = be_load_word64(&(state->M[0].B[8]));
77  x1_a = be_load_word64(&(state->M[1].B[0]));
78  x1_b = be_load_word64(&(state->M[1].B[8]));
79  x2_a = be_load_word64(&(state->M[2].B[0]));
80  x2_b = be_load_word64(&(state->M[2].B[8]));
81  x3_a = be_load_word64(&(state->M[3].B[0]));
82  x3_b = be_load_word64(&(state->M[3].B[8]));
83  x4_a = be_load_word64(&(state->M[4].B[0]));
84  x4_b = be_load_word64(&(state->M[4].B[8]));
85 #else
86  x0_a = state->M[0].S[0];
87  x0_b = state->M[0].S[1];
88  x1_a = state->M[1].S[0];
89  x1_b = state->M[1].S[1];
90  x2_a = state->M[2].S[0];
91  x2_b = state->M[2].S[1];
92  x3_a = state->M[3].S[0];
93  x3_b = state->M[3].S[1];
94  x4_a = state->M[4].S[0];
95  x4_b = state->M[4].S[1];
96 #endif
97 
98  /* The round constants invert x2 as part of the rounds so that we
99  * don't need an explicit "x2 = ~x2" step in the S-box. Pre-invert
100  * x2 before the first round to compensate. */
101  x2_a = ~x2_a;
102 
103  /* Perform all encryption rounds */
104  while (first_round < 12) {
105  /* Add the inverted round constant to x2 */
106  x2_a ^= RC[first_round++];
107 
108  /* Start of the substitution layer, first share */
109  x0_a ^= x4_a;
110  x4_a ^= x3_a;
111  x2_a ^= x1_a;
112  t1_a = x0_a;
113 
114  /* Start of the substitution layer, second share */
115  x0_b ^= x4_b;
116  x4_b ^= x3_b;
117  x2_b ^= x1_b;
118  t1_b = x0_b;
119 
120  /* Middle part of the substitution layer, Chi5 */
121  t0_b = ascon_mask64_rotate_share1_0(t0_a); /* t0 = random shares */
122  and_not_xor(t0, x0, x1); /* t0 ^= (~x0) & x1; */
123  and_not_xor(x0, x1, x2); /* x0 ^= (~x1) & x2; */
124  and_not_xor(x1, x2, x3); /* x1 ^= (~x2) & x3; */
125  and_not_xor(x2, x3, x4); /* x2 ^= (~x3) & x4; */
126  and_not_xor(x3, x4, t1); /* x3 ^= (~x4) & t1; */
127  x4_a ^= t0_a; /* x4 ^= t0; */
128  x4_b ^= t0_b;
129 
130  /* End of the substitution layer */
131  x1_a ^= x0_a;
132  x0_a ^= x4_a;
133  x3_a ^= x2_a;
134  x1_b ^= x0_b;
135  x0_b ^= x4_b;
136  x3_b ^= x2_b;
137 
138  /* NOT'ing x2 is done as part of the next round constant */
139  /* x2_a = ~x2_a; */
140 
141  /* Linear diffusion layer, second share */
142  x0_b ^= rightRotate19_64(x0_b) ^ rightRotate28_64(x0_b);
143  x1_b ^= rightRotate61_64(x1_b) ^ rightRotate39_64(x1_b);
144  x2_b ^= rightRotate1_64(x2_b) ^ rightRotate6_64(x2_b);
145  x3_b ^= rightRotate10_64(x3_b) ^ rightRotate17_64(x3_b);
146  x4_b ^= rightRotate7_64(x4_b) ^ rightRotate41_64(x4_b);
147 
148  /* Linear diffusion layer, first share */
149  x0_a ^= rightRotate19_64(x0_a) ^ rightRotate28_64(x0_a);
150  x1_a ^= rightRotate61_64(x1_a) ^ rightRotate39_64(x1_a);
151  x2_a ^= rightRotate1_64(x2_a) ^ rightRotate6_64(x2_a);
152  x3_a ^= rightRotate10_64(x3_a) ^ rightRotate17_64(x3_a);
153  x4_a ^= rightRotate7_64(x4_a) ^ rightRotate41_64(x4_a);
154 
155  /* Rotate the randomness in t0 before the next round */
156  t0_a = rightRotate13_64(t0_a);
157  }
158 
159  /* Return the final randomness to the caller to preserve it */
160  *preserve = t0_a;
161 
162  /* Store the local variables back to the state with a final invert of x2 */
163 #if defined(ASCON_MASKED_WORD_BACKEND_DIRECT_XOR)
164  be_store_word64(&(state->M[0].B[0]), x0_a);
165  be_store_word64(&(state->M[0].B[8]), x0_b);
166  be_store_word64(&(state->M[1].B[0]), x1_a);
167  be_store_word64(&(state->M[1].B[8]), x1_b);
168  be_store_word64(&(state->M[2].B[0]), ~x2_a);
169  be_store_word64(&(state->M[2].B[8]), x2_b);
170  be_store_word64(&(state->M[3].B[0]), x3_a);
171  be_store_word64(&(state->M[3].B[8]), x3_b);
172  be_store_word64(&(state->M[4].B[0]), x4_a);
173  be_store_word64(&(state->M[4].B[8]), x4_b);
174 #else
175  state->M[0].S[0] = x0_a;
176  state->M[0].S[1] = x0_b;
177  state->M[1].S[0] = x1_a;
178  state->M[1].S[1] = x1_b;
179  state->M[2].S[0] = ~x2_a;
180  state->M[2].S[1] = x2_b;
181  state->M[3].S[0] = x3_a;
182  state->M[3].S[1] = x3_b;
183  state->M[4].S[0] = x4_a;
184  state->M[4].S[1] = x4_b;
185 #endif
186 }
187 
188 #endif /* ASCON_MASKED_X2_BACKEND_C64 */
Utility functions for operating on masked ASCON states with between 2 and 4 shares.
#define ascon_mask64_rotate_share1_0(x)
Rotates 64-bit masked share 1 with respect to share 0.
#define rightRotate39_64(a)
Definition: ascon-util.h:606
#define rightRotate61_64(a)
Definition: ascon-util.h:628
#define rightRotate41_64(a)
Definition: ascon-util.h:608
#define rightRotate1_64(a)
Definition: ascon-util.h:568
#define rightRotate10_64(a)
Definition: ascon-util.h:577
#define be_store_word64(ptr, x)
Definition: ascon-util.h:118
#define rightRotate19_64(a)
Definition: ascon-util.h:586
#define rightRotate6_64(a)
Definition: ascon-util.h:573
#define rightRotate7_64(a)
Definition: ascon-util.h:574
#define rightRotate28_64(a)
Definition: ascon-util.h:595
#define rightRotate17_64(a)
Definition: ascon-util.h:584
#define be_load_word64(ptr)
Definition: ascon-util.h:107
#define rightRotate13_64(a)
Definition: ascon-util.h:580
void ascon_x2_permute(ascon_masked_state_t *state, uint8_t first_round, uint64_t *preserve)
Permutes the ASCON-x2 state with a specified number of rounds.
Definition: ascon-x2-c64.c:50
#define and_not_xor(x, y, z)
Computes x ^= (~y & z) with a 2-share masked representation.
Definition: ascon-x2-c64.c:36
#define ROUND_CONSTANT(round)
Definition: ascon-x2-c64.c:46
ascon_state_t state
[snippet_key]
Definition: snippets.c:2
State of the ASCON permutation which has been masked with up to 4 shares.
uint64_t S[5]
Definition: permutation.h:64
uint8_t B[40]
Definition: permutation.h:66