27 #if defined(ASCON_MASKED_X3_BACKEND_C32) && ASCON_MASKED_MAX_SHARES >= 3
37 #define and_not_xor(x, y, z, w) \
39 x##_a##w ^= (~(y##_a##w) & z##_a##w); \
40 x##_a##w ^= ((y##_a##w) & ascon_mask32_unrotate_share1_0(z##_b##w)); \
41 x##_a##w ^= (y##_a##w & ascon_mask32_unrotate_share2_0(z##_c##w)); \
43 x##_b##w ^= (y##_b##w & ascon_mask32_rotate_share1_0(z##_a##w)); \
44 x##_b##w ^= ((~y##_b##w) & z##_b##w); \
45 x##_b##w ^= (y##_b##w & ascon_mask32_unrotate_share2_1(z##_c##w)); \
47 x##_c##w ^= (y##_c##w & ascon_mask32_rotate_share2_0(~z##_a##w)); \
48 x##_c##w ^= (y##_c##w & ascon_mask32_rotate_share2_1(z##_b##w)); \
49 x##_c##w ^= (y##_c##w | z##_c##w); \
59 t0 = x0_##w##e ^ rightRotate4(x0_##w##o); \
60 t1 = x0_##w##o ^ rightRotate5(x0_##w##e); \
61 t2 = x1_##w##e ^ rightRotate11(x1_##w##e); \
62 t3 = x1_##w##o ^ rightRotate11(x1_##w##o); \
63 t4 = x2_##w##e ^ rightRotate2(x2_##w##o); \
64 t5 = x2_##w##o ^ rightRotate3(x2_##w##e); \
65 t6 = x3_##w##e ^ rightRotate3(x3_##w##o); \
66 t7 = x3_##w##o ^ rightRotate4(x3_##w##e); \
67 t8 = x4_##w##e ^ rightRotate17(x4_##w##e); \
68 t9 = x4_##w##o ^ rightRotate17(x4_##w##o); \
69 x0_##w##e ^= rightRotate9(t1); \
70 x0_##w##o ^= rightRotate10(t0); \
71 x1_##w##e ^= rightRotate19(t3); \
72 x1_##w##o ^= rightRotate20(t2); \
74 x2_##w##o ^= rightRotate1(t4); \
75 x3_##w##e ^= rightRotate5(t6); \
76 x3_##w##o ^= rightRotate5(t7); \
77 x4_##w##e ^= rightRotate3(t9); \
78 x4_##w##o ^= rightRotate4(t8); \
83 #define ROUND_CONSTANT_PAIR(rc1, rc2) \
84 (~((uint32_t)(rc1))), (~((uint32_t)(rc2)))
89 static const uint32_t RC[12 * 2] = {
103 const uint32_t *rc = RC + first_round * 2;
104 uint32_t x0_ae, x1_ae, x2_ae, x3_ae, x4_ae;
105 uint32_t x0_ao, x1_ao, x2_ao, x3_ao, x4_ao;
106 uint32_t x0_be, x1_be, x2_be, x3_be, x4_be;
107 uint32_t x0_bo, x1_bo, x2_bo, x3_bo, x4_bo;
108 uint32_t x0_ce, x1_ce, x2_ce, x3_ce, x4_ce;
109 uint32_t x0_co, x1_co, x2_co, x3_co, x4_co;
110 uint32_t t0_ao, t0_bo, t0_co, t1_ao, t1_bo, t1_co;
111 uint32_t t0_ae, t0_be, t0_ce, t1_ae, t1_be, t1_ce;
115 t0_ae = ((uint32_t *)preserve)[0];
116 t0_ao = ((uint32_t *)preserve)[1];
117 t0_be = ((uint32_t *)preserve)[2];
118 t0_bo = ((uint32_t *)preserve)[3];
159 while (first_round++ < 12) {
280 ((uint32_t *)preserve)[0] = t0_ae;
281 ((uint32_t *)preserve)[1] = t0_ao;
282 ((uint32_t *)preserve)[2] = t0_be;
283 ((uint32_t *)preserve)[3] = t0_bo;
298 state->M[2].
W[0] = ~x2_ae;
299 state->M[2].
W[1] = ~x2_ao;
#define ROUND_CONSTANT_PAIR(rc1, rc2)
Utility functions for operating on masked ASCON states with between 2 and 4 shares.
void ascon_x3_permute(ascon_masked_state_t *state, uint8_t first_round, uint64_t *preserve)
Permutes the ASCON-x3 state with a specified number of rounds.
#define ascon_mask32_rotate_share2_1(x)
Rotates 32-bit masked share 2 with respect to share 1.
#define ascon_mask32_rotate_share2_0(x)
Rotates 32-bit masked share 2 with respect to share 0.
#define and_not_xor(x, y, z)
Computes x ^= (~y & z) with a 2-share masked representation.
ascon_state_t state
[snippet_key]
State of the ASCON permutation which has been masked with up to 4 shares.