27 #if defined(ASCON_MASKED_X4_BACKEND_C32) && ASCON_MASKED_MAX_SHARES >= 4
37 #define and_not_xor(x, y, z, w) \
39 x##_a##w ^= (~(y##_a##w) & z##_a##w); \
40 x##_a##w ^= (ascon_mask32_unrotate_share1_0(y##_b##w) & z##_a##w); \
41 x##_a##w ^= (ascon_mask32_unrotate_share2_0(y##_c##w) & z##_a##w); \
42 x##_a##w ^= (ascon_mask32_unrotate_share3_0(y##_d##w) & z##_a##w); \
44 x##_b##w ^= (ascon_mask32_rotate_share1_0(~(y##_a##w)) & z##_b##w); \
45 x##_b##w ^= (y##_b##w & z##_b##w); \
46 x##_b##w ^= (ascon_mask32_unrotate_share2_1(y##_c##w) & z##_b##w); \
47 x##_b##w ^= (ascon_mask32_unrotate_share3_1(y##_d##w) & z##_b##w); \
49 x##_c##w ^= (ascon_mask32_rotate_share2_0(~(y##_a##w)) & z##_c##w); \
50 x##_c##w ^= (ascon_mask32_rotate_share2_1(y##_b##w) & z##_c##w); \
51 x##_c##w ^= (y##_c##w & z##_c##w); \
52 x##_c##w ^= (ascon_mask32_unrotate_share3_2(y##_d##w) & z##_c##w); \
54 x##_d##w ^= (ascon_mask32_rotate_share3_0(~(y##_a##w)) & z##_d##w); \
55 x##_d##w ^= (ascon_mask32_rotate_share3_1(y##_b##w) & z##_d##w); \
56 x##_d##w ^= (ascon_mask32_rotate_share3_2(y##_c##w) & z##_d##w); \
57 x##_d##w ^= (y##_d##w & z##_d##w); \
67 t0 = x0_##w##e ^ rightRotate4(x0_##w##o); \
68 t1 = x0_##w##o ^ rightRotate5(x0_##w##e); \
69 t2 = x1_##w##e ^ rightRotate11(x1_##w##e); \
70 t3 = x1_##w##o ^ rightRotate11(x1_##w##o); \
71 t4 = x2_##w##e ^ rightRotate2(x2_##w##o); \
72 t5 = x2_##w##o ^ rightRotate3(x2_##w##e); \
73 t6 = x3_##w##e ^ rightRotate3(x3_##w##o); \
74 t7 = x3_##w##o ^ rightRotate4(x3_##w##e); \
75 t8 = x4_##w##e ^ rightRotate17(x4_##w##e); \
76 t9 = x4_##w##o ^ rightRotate17(x4_##w##o); \
77 x0_##w##e ^= rightRotate9(t1); \
78 x0_##w##o ^= rightRotate10(t0); \
79 x1_##w##e ^= rightRotate19(t3); \
80 x1_##w##o ^= rightRotate20(t2); \
82 x2_##w##o ^= rightRotate1(t4); \
83 x3_##w##e ^= rightRotate5(t6); \
84 x3_##w##o ^= rightRotate5(t7); \
85 x4_##w##e ^= rightRotate3(t9); \
86 x4_##w##o ^= rightRotate4(t8); \
91 #define ROUND_CONSTANT_PAIR(rc1, rc2) \
92 (~((uint32_t)(rc1))), (~((uint32_t)(rc2)))
97 static const uint32_t RC[12 * 2] = {
111 const uint32_t *rc = RC + first_round * 2;
112 uint32_t x0_ae, x1_ae, x2_ae, x3_ae, x4_ae;
113 uint32_t x0_ao, x1_ao, x2_ao, x3_ao, x4_ao;
114 uint32_t x0_be, x1_be, x2_be, x3_be, x4_be;
115 uint32_t x0_bo, x1_bo, x2_bo, x3_bo, x4_bo;
116 uint32_t x0_ce, x1_ce, x2_ce, x3_ce, x4_ce;
117 uint32_t x0_co, x1_co, x2_co, x3_co, x4_co;
118 uint32_t x0_de, x1_de, x2_de, x3_de, x4_de;
119 uint32_t x0_do, x1_do, x2_do, x3_do, x4_do;
120 uint32_t t0_ao, t0_bo, t0_co, t0_do, t1_ao, t1_bo, t1_co, t1_do;
121 uint32_t t0_ae, t0_be, t0_ce, t0_de, t1_ae, t1_be, t1_ce, t1_de;
124 t0_ae = ((uint32_t *)preserve)[0];
125 t0_ao = ((uint32_t *)preserve)[1];
126 t0_be = ((uint32_t *)preserve)[2];
127 t0_bo = ((uint32_t *)preserve)[3];
128 t0_ce = ((uint32_t *)preserve)[4];
129 t0_co = ((uint32_t *)preserve)[5];
180 while (first_round++ < 12) {
328 ((uint32_t *)preserve)[0] = t0_ae;
329 ((uint32_t *)preserve)[1] = t0_ao;
330 ((uint32_t *)preserve)[2] = t0_be;
331 ((uint32_t *)preserve)[3] = t0_bo;
332 ((uint32_t *)preserve)[4] = t0_ce;
333 ((uint32_t *)preserve)[5] = t0_co;
352 state->M[2].
W[0] = ~x2_ae;
353 state->M[2].
W[1] = ~x2_ao;
#define ROUND_CONSTANT_PAIR(rc1, rc2)
Utility functions for operating on masked ASCON states with between 2 and 4 shares.
void ascon_x4_permute(ascon_masked_state_t *state, uint8_t first_round, uint64_t *preserve)
Permutes the ASCON-x4 state with a specified number of rounds.
#define ascon_mask32_rotate_share3_2(x)
Rotates 32-bit masked share 3 with respect to share 2.
#define ascon_mask32_rotate_share3_1(x)
Rotates 32-bit masked share 3 with respect to share 1.
#define ascon_mask32_rotate_share3_0(x)
Rotates 32-bit masked share 3 with respect to share 0.
#define and_not_xor(x, y, z)
Computes x ^= (~y & z) with a 2-share masked representation.
ascon_state_t state
[snippet_key]
State of the ASCON permutation which has been masked with up to 4 shares.