Lightweight Cryptography Primitives
 All Data Structures Files Functions Variables Typedefs Macros Pages
internal-masking.h
Go to the documentation of this file.
1 /*
2  * Copyright (C) 2020 Southern Storm Software, Pty Ltd.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included
12  * in all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
15  * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
17  * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
20  * DEALINGS IN THE SOFTWARE.
21  */
22 
23 #ifndef LW_INTERNAL_MASKING_H
24 #define LW_INTERNAL_MASKING_H
25 
26 #include "aead-random.h"
27 
36 #ifdef __cplusplus
37 extern "C" {
38 #endif
39 
46 #if !defined(AEAD_MASKING_SHARES)
47 #define AEAD_MASKING_SHARES 4
48 #endif
49 
64 #if !defined(AEAD_MASKING_KEY_ONLY)
65 #define AEAD_MASKING_KEY_ONLY 0
66 #endif
67 
71 typedef struct
72 {
73  uint16_t a;
74  uint16_t b;
77 
81 typedef struct
82 {
83  uint16_t a;
84  uint16_t b;
85  uint16_t c;
88 
92 typedef struct
93 {
94  uint16_t a;
95  uint16_t b;
96  uint16_t c;
97  uint16_t d;
100 
104 typedef struct
105 {
106  uint16_t a;
107  uint16_t b;
108  uint16_t c;
109  uint16_t d;
110  uint16_t e;
113 
117 typedef struct
118 {
119  uint16_t a;
120  uint16_t b;
121  uint16_t c;
122  uint16_t d;
123  uint16_t e;
124  uint16_t f;
127 
131 typedef struct
132 {
133  uint32_t a;
134  uint32_t b;
137 
141 typedef struct
142 {
143  uint32_t a;
144  uint32_t b;
145  uint32_t c;
148 
152 typedef struct
153 {
154  uint32_t a;
155  uint32_t b;
156  uint32_t c;
157  uint32_t d;
160 
164 typedef struct
165 {
166  uint32_t a;
167  uint32_t b;
168  uint32_t c;
169  uint32_t d;
170  uint32_t e;
173 
177 typedef struct
178 {
179  uint32_t a;
180  uint32_t b;
181  uint32_t c;
182  uint32_t d;
183  uint32_t e;
184  uint32_t f;
187 
191 typedef struct
192 {
193  uint64_t a;
194  uint64_t b;
197 
201 typedef struct
202 {
203  uint64_t a;
204  uint64_t b;
205  uint64_t c;
208 
212 typedef struct
213 {
214  uint64_t a;
215  uint64_t b;
216  uint64_t c;
217  uint64_t d;
220 
224 typedef struct
225 {
226  uint64_t a;
227  uint64_t b;
228  uint64_t c;
229  uint64_t d;
230  uint64_t e;
233 
237 typedef struct
238 {
239  uint64_t a;
240  uint64_t b;
241  uint64_t c;
242  uint64_t d;
243  uint64_t e;
244  uint64_t f;
247 
254 #define mask_x2_input(value, input) \
255  do { \
256  if (sizeof((value).b) <= 4) \
257  (value).b = aead_random_generate_32(); \
258  else \
259  (value).b = aead_random_generate_64(); \
260  (value).a = (input) ^ (value).b; \
261  } while (0)
262 
269 #define mask_x2_output(value) ((value).a ^ (value).b)
270 
276 #define mask_x2_zero(value) \
277  do { \
278  (value).a = 0; \
279  (value).b = 0; \
280  } while (0)
281 
291 #define mask_x2_xor_const(value, cvalue) \
292  do { \
293  (value).a ^= (cvalue); \
294  } while (0)
295 
304 #define mask_x2_xor(value1, value2) \
305  do { \
306  (value1).a ^= (value2).a; \
307  (value1).b ^= (value2).b; \
308  } while (0)
309 
319 #define mask_x2_xor3(value1, value2, value3) \
320  do { \
321  (value1).a ^= ((value2).a ^ (value3).a); \
322  (value1).b ^= ((value2).b ^ (value3).b); \
323  } while (0)
324 
332 #define mask_x2_not(value) \
333  do { \
334  (value).a = ~((value).a); \
335  } while (0)
336 
339 /* Inner implementation of AND'ing two 2-share masked words */
340 #define mask_mix_and(x2, x1, x0, y2, y1, y0) \
341  do { \
342  if (sizeof(temp) <= 4) \
343  temp = aead_random_generate_32(); \
344  else \
345  temp = aead_random_generate_64(); \
346  (x2) ^= temp; \
347  temp ^= ((y0) & (x1)); \
348  (y2) = ((y2) ^ temp) ^ ((y1) & (x0)); \
349  } while (0)
350 
367 #define mask_x2_and(value1, value2, value3) \
368  do { \
369  (value1).a ^= ((value2).a & (value3).a); \
370  mask_mix_and((value1).a, (value2).a, (value3).a, \
371  (value1).b, (value2).b, (value3).b); \
372  (value1).b ^= ((value2).b & (value3).b); \
373  } while (0)
374 
390 #define mask_x2_and_not(value1, value2, value3) \
391  do { \
392  (value1).a ^= ((~(value2).a) & (value3).a); \
393  mask_mix_and((value1).a, ~(value2).a, (value3).a, \
394  (value1).b, (value2).b, (value3).b); \
395  (value1).b ^= ((value2).b & (value3).b); \
396  } while (0)
397 
412 #define mask_x2_or(value1, value2, value3) \
413  do { \
414  (value1).a ^= ((value2).a) | ((value3).a); \
415  mask_mix_and((value1).a, ~(value2).a, ~(value3).a, \
416  (value1).b, (value2).b, (value3).b); \
417  (value1).b ^= ((value2).b & (value3).b); \
418  } while (0)
419 
427 #define mask_x2_shl(value1, value2, bits) \
428  do { \
429  (value1).a = (value2).a << (bits); \
430  (value1).b = (value2).b << (bits); \
431  } while (0)
432 
440 #define mask_x2_shr(value1, value2, bits) \
441  do { \
442  (value1).a = (value2).a >> (bits); \
443  (value1).b = (value2).b >> (bits); \
444  } while (0)
445 
453 #define mask_x2_rol(value1, value2, bits) \
454  do { \
455  (value1).a = ((value2).a << (bits)) | \
456  ((value2).a >> (sizeof((value1).a) * 8 - (bits))); \
457  (value1).b = ((value2).b << (bits)) | \
458  ((value2).b >> (sizeof((value1).b) * 8 - (bits))); \
459  } while (0)
460 
468 #define mask_x2_ror(value1, value2, bits) \
469  do { \
470  (value1).a = ((value2).a >> (bits)) | \
471  ((value2).a << (sizeof((value1).a) * 8 - (bits))); \
472  (value1).b = ((value2).b >> (bits)) | \
473  ((value2).b << (sizeof((value1).b) * 8 - (bits))); \
474  } while (0)
475 
482 #define mask_x2_swap(value1, value2) \
483  do { \
484  (value1).a ^= (value2).a; \
485  (value2).a ^= (value1).a; \
486  (value1).a ^= (value2).a; \
487  (value1).b ^= (value2).b; \
488  (value2).b ^= (value1).b; \
489  (value1).b ^= (value2).b; \
490  } while (0)
491 
494 #define mask_swap_move_internal(a, b, mask, shift) \
495  do { \
496  temp = ((b) ^ ((a) >> (shift))) & (mask); \
497  (b) ^= temp; \
498  (a) ^= temp << (shift); \
499  } while (0)
500 
511 #define mask_x2_swap_move(value1, value2, mask, shift) \
512  do { \
513  mask_swap_move_internal((value1).a, (value2).a, (mask), (shift)); \
514  mask_swap_move_internal((value1).b, (value2).b, (mask), (shift)); \
515  } while (0)
516 
523 #define mask_x3_input(value, input) \
524  do { \
525  if (sizeof((value).b) <= 4) { \
526  (value).b = aead_random_generate_32(); \
527  (value).c = aead_random_generate_32(); \
528  } else { \
529  (value).b = aead_random_generate_64(); \
530  (value).c = aead_random_generate_64(); \
531  } \
532  (value).a = (input) ^ (value).b ^ (value).c; \
533  } while (0)
534 
541 #define mask_x3_output(value) ((value).a ^ (value).b ^ (value).c)
542 
548 #define mask_x3_zero(value) \
549  do { \
550  (value).a = 0; \
551  (value).b = 0; \
552  (value).c = 0; \
553  } while (0)
554 
564 #define mask_x3_xor_const(value, cvalue) \
565  do { \
566  (value).a ^= (cvalue); \
567  } while (0)
568 
577 #define mask_x3_xor(value1, value2) \
578  do { \
579  (value1).a ^= (value2).a; \
580  (value1).b ^= (value2).b; \
581  (value1).c ^= (value2).c; \
582  } while (0)
583 
593 #define mask_x3_xor3(value1, value2, value3) \
594  do { \
595  (value1).a ^= ((value2).a ^ (value3).a); \
596  (value1).b ^= ((value2).b ^ (value3).b); \
597  (value1).c ^= ((value2).c ^ (value3).c); \
598  } while (0)
599 
607 #define mask_x3_not(value) \
608  do { \
609  (value).a = ~((value).a); \
610  } while (0)
611 
626 #define mask_x3_and(value1, value2, value3) \
627  do { \
628  (value1).a ^= ((value2).a & (value3).a); \
629  mask_mix_and((value1).a, (value2).a, (value3).a, \
630  (value1).b, (value2).b, (value3).b); \
631  mask_mix_and((value1).a, (value2).a, (value3).a, \
632  (value1).c, (value2).c, (value3).c); \
633  (value1).b ^= ((value2).b & (value3).b); \
634  mask_mix_and((value1).b, (value2).b, (value3).b, \
635  (value1).c, (value2).c, (value3).c); \
636  (value1).c ^= ((value2).c & (value3).c); \
637  } while (0)
638 
654 #define mask_x3_and_not(value1, value2, value3) \
655  do { \
656  (value1).a ^= ((~(value2).a) & (value3).a); \
657  mask_mix_and((value1).a, ~(value2).a, (value3).a, \
658  (value1).b, (value2).b, (value3).b); \
659  mask_mix_and((value1).a, ~(value2).a, (value3).a, \
660  (value1).c, (value2).c, (value3).c); \
661  (value1).b ^= ((value2).b & (value3).b); \
662  mask_mix_and((value1).b, (value2).b, (value3).b, \
663  (value1).c, (value2).c, (value3).c); \
664  (value1).c ^= ((value2).c & (value3).c); \
665  } while (0)
666 
681 #define mask_x3_or(value1, value2, value3) \
682  do { \
683  (value1).a ^= ((value2).a | (value3).a); \
684  mask_mix_and((value1).a, ~(value2).a, ~(value3).a, \
685  (value1).b, (value2).b, (value3).b); \
686  mask_mix_and((value1).a, ~(value2).a, ~(value3).a, \
687  (value1).c, (value2).c, (value3).c); \
688  (value1).b ^= ((value2).b & (value3).b); \
689  mask_mix_and((value1).b, (value2).b, (value3).b, \
690  (value1).c, (value2).c, (value3).c); \
691  (value1).c ^= ((value2).c & (value3).c); \
692  } while (0)
693 
701 #define mask_x3_shl(value1, value2, bits) \
702  do { \
703  (value1).a = (value2).a << (bits); \
704  (value1).b = (value2).b << (bits); \
705  (value1).c = (value2).c << (bits); \
706  } while (0)
707 
715 #define mask_x3_shr(value1, value2, bits) \
716  do { \
717  (value1).a = (value2).a >> (bits); \
718  (value1).b = (value2).b >> (bits); \
719  (value1).c = (value2).c >> (bits); \
720  } while (0)
721 
729 #define mask_x3_rol(value1, value2, bits) \
730  do { \
731  (value1).a = ((value2).a << (bits)) | \
732  ((value2).a >> (sizeof((value1).a) * 8 - (bits))); \
733  (value1).b = ((value2).b << (bits)) | \
734  ((value2).b >> (sizeof((value1).b) * 8 - (bits))); \
735  (value1).c = ((value2).c << (bits)) | \
736  ((value2).c >> (sizeof((value1).c) * 8 - (bits))); \
737  } while (0)
738 
746 #define mask_x3_ror(value1, value2, bits) \
747  do { \
748  (value1).a = ((value2).a >> (bits)) | \
749  ((value2).a << (sizeof((value1).a) * 8 - (bits))); \
750  (value1).b = ((value2).b >> (bits)) | \
751  ((value2).b << (sizeof((value1).b) * 8 - (bits))); \
752  (value1).c = ((value2).c >> (bits)) | \
753  ((value2).c << (sizeof((value1).c) * 8 - (bits))); \
754  } while (0)
755 
762 #define mask_x3_swap(value1, value2) \
763  do { \
764  (value1).a ^= (value2).a; \
765  (value2).a ^= (value1).a; \
766  (value1).a ^= (value2).a; \
767  (value1).b ^= (value2).b; \
768  (value2).b ^= (value1).b; \
769  (value1).b ^= (value2).b; \
770  (value1).c ^= (value2).c; \
771  (value2).c ^= (value1).c; \
772  (value1).c ^= (value2).c; \
773  } while (0)
774 
783 #define mask_x3_swap_move(value1, value2, mask, shift) \
784  do { \
785  mask_swap_move_internal((value1).a, (value2).a, (mask), (shift)); \
786  mask_swap_move_internal((value1).b, (value2).b, (mask), (shift)); \
787  mask_swap_move_internal((value1).c, (value2).c, (mask), (shift)); \
788  } while (0)
789 
796 #define mask_x4_input(value, input) \
797  do { \
798  if (sizeof((value).b) <= 4) { \
799  (value).b = aead_random_generate_32(); \
800  (value).c = aead_random_generate_32(); \
801  (value).d = aead_random_generate_32(); \
802  } else { \
803  (value).b = aead_random_generate_64(); \
804  (value).c = aead_random_generate_64(); \
805  (value).d = aead_random_generate_64(); \
806  } \
807  (value).a = (input) ^ (value).b ^ (value).c ^ (value).d; \
808  } while (0)
809 
816 #define mask_x4_output(value) ((value).a ^ (value).b ^ (value).c ^ (value).d)
817 
823 #define mask_x4_zero(value) \
824  do { \
825  (value).a = 0; \
826  (value).b = 0; \
827  (value).c = 0; \
828  (value).d = 0; \
829  } while (0)
830 
840 #define mask_x4_xor_const(value, cvalue) \
841  do { \
842  (value).a ^= (cvalue); \
843  } while (0)
844 
853 #define mask_x4_xor(value1, value2) \
854  do { \
855  (value1).a ^= (value2).a; \
856  (value1).b ^= (value2).b; \
857  (value1).c ^= (value2).c; \
858  (value1).d ^= (value2).d; \
859  } while (0)
860 
870 #define mask_x4_xor3(value1, value2, value3) \
871  do { \
872  (value1).a ^= ((value2).a ^ (value3).a); \
873  (value1).b ^= ((value2).b ^ (value3).b); \
874  (value1).c ^= ((value2).c ^ (value3).c); \
875  (value1).d ^= ((value2).d ^ (value3).d); \
876  } while (0)
877 
885 #define mask_x4_not(value) \
886  do { \
887  (value).a = ~((value).a); \
888  } while (0)
889 
904 #define mask_x4_and(value1, value2, value3) \
905  do { \
906  (value1).a ^= ((value2).a & (value3).a); \
907  mask_mix_and((value1).a, (value2).a, (value3).a, \
908  (value1).b, (value2).b, (value3).b); \
909  mask_mix_and((value1).a, (value2).a, (value3).a, \
910  (value1).c, (value2).c, (value3).c); \
911  mask_mix_and((value1).a, (value2).a, (value3).a, \
912  (value1).d, (value2).d, (value3).d); \
913  (value1).b ^= ((value2).b & (value3).b); \
914  mask_mix_and((value1).b, (value2).b, (value3).b, \
915  (value1).c, (value2).c, (value3).c); \
916  mask_mix_and((value1).b, (value2).b, (value3).b, \
917  (value1).d, (value2).d, (value3).d); \
918  (value1).c ^= ((value2).c & (value3).c); \
919  mask_mix_and((value1).c, (value2).c, (value3).c, \
920  (value1).d, (value2).d, (value3).d); \
921  (value1).d ^= ((value2).d & (value3).d); \
922  } while (0)
923 
939 #define mask_x4_and_not(value1, value2, value3) \
940  do { \
941  (value1).a ^= ((~(value2).a) & (value3).a); \
942  mask_mix_and((value1).a, ~(value2).a, (value3).a, \
943  (value1).b, (value2).b, (value3).b); \
944  mask_mix_and((value1).a, ~(value2).a, (value3).a, \
945  (value1).c, (value2).c, (value3).c); \
946  mask_mix_and((value1).a, ~(value2).a, (value3).a, \
947  (value1).d, (value2).d, (value3).d); \
948  (value1).b ^= ((value2).b & (value3).b); \
949  mask_mix_and((value1).b, (value2).b, (value3).b, \
950  (value1).c, (value2).c, (value3).c); \
951  mask_mix_and((value1).b, (value2).b, (value3).b, \
952  (value1).d, (value2).d, (value3).d); \
953  (value1).c ^= ((value2).c & (value3).c); \
954  mask_mix_and((value1).c, (value2).c, (value3).c, \
955  (value1).d, (value2).d, (value3).d); \
956  (value1).d ^= ((value2).d & (value3).d); \
957  } while (0)
958 
973 #define mask_x4_or(value1, value2, value3) \
974  do { \
975  (value1).a ^= ((value2).a | (value3).a); \
976  mask_mix_and((value1).a, ~(value2).a, ~(value3).a, \
977  (value1).b, (value2).b, (value3).b); \
978  mask_mix_and((value1).a, ~(value2).a, ~(value3).a, \
979  (value1).c, (value2).c, (value3).c); \
980  mask_mix_and((value1).a, ~(value2).a, ~(value3).a, \
981  (value1).d, (value2).d, (value3).d); \
982  (value1).b ^= ((value2).b & (value3).b); \
983  mask_mix_and((value1).b, (value2).b, (value3).b, \
984  (value1).c, (value2).c, (value3).c); \
985  mask_mix_and((value1).b, (value2).b, (value3).b, \
986  (value1).d, (value2).d, (value3).d); \
987  (value1).c ^= ((value2).c & (value3).c); \
988  mask_mix_and((value1).c, (value2).c, (value3).c, \
989  (value1).d, (value2).d, (value3).d); \
990  (value1).d ^= ((value2).d & (value3).d); \
991  } while (0)
992 
1000 #define mask_x4_shl(value1, value2, bits) \
1001  do { \
1002  (value1).a = (value2).a << (bits); \
1003  (value1).b = (value2).b << (bits); \
1004  (value1).c = (value2).c << (bits); \
1005  (value1).d = (value2).d << (bits); \
1006  } while (0)
1007 
1015 #define mask_x4_shr(value1, value2, bits) \
1016  do { \
1017  (value1).a = (value2).a >> (bits); \
1018  (value1).b = (value2).b >> (bits); \
1019  (value1).c = (value2).c >> (bits); \
1020  (value1).d = (value2).d >> (bits); \
1021  } while (0)
1022 
1030 #define mask_x4_rol(value1, value2, bits) \
1031  do { \
1032  (value1).a = ((value2).a << (bits)) | \
1033  ((value2).a >> (sizeof((value1).a) * 8 - (bits))); \
1034  (value1).b = ((value2).b << (bits)) | \
1035  ((value2).b >> (sizeof((value1).b) * 8 - (bits))); \
1036  (value1).c = ((value2).c << (bits)) | \
1037  ((value2).c >> (sizeof((value1).c) * 8 - (bits))); \
1038  (value1).d = ((value2).d << (bits)) | \
1039  ((value2).d >> (sizeof((value1).d) * 8 - (bits))); \
1040  } while (0)
1041 
1049 #define mask_x4_ror(value1, value2, bits) \
1050  do { \
1051  (value1).a = ((value2).a >> (bits)) | \
1052  ((value2).a << (sizeof((value1).a) * 8 - (bits))); \
1053  (value1).b = ((value2).b >> (bits)) | \
1054  ((value2).b << (sizeof((value1).b) * 8 - (bits))); \
1055  (value1).c = ((value2).c >> (bits)) | \
1056  ((value2).c << (sizeof((value1).c) * 8 - (bits))); \
1057  (value1).d = ((value2).d >> (bits)) | \
1058  ((value2).d << (sizeof((value1).d) * 8 - (bits))); \
1059  } while (0)
1060 
1067 #define mask_x4_swap(value1, value2) \
1068  do { \
1069  (value1).a ^= (value2).a; \
1070  (value2).a ^= (value1).a; \
1071  (value1).a ^= (value2).a; \
1072  (value1).b ^= (value2).b; \
1073  (value2).b ^= (value1).b; \
1074  (value1).b ^= (value2).b; \
1075  (value1).c ^= (value2).c; \
1076  (value2).c ^= (value1).c; \
1077  (value1).c ^= (value2).c; \
1078  (value1).d ^= (value2).d; \
1079  (value2).d ^= (value1).d; \
1080  (value1).d ^= (value2).d; \
1081  } while (0)
1082 
1091 #define mask_x4_swap_move(value1, value2, mask, shift) \
1092  do { \
1093  mask_swap_move_internal((value1).a, (value2).a, (mask), (shift)); \
1094  mask_swap_move_internal((value1).b, (value2).b, (mask), (shift)); \
1095  mask_swap_move_internal((value1).c, (value2).c, (mask), (shift)); \
1096  mask_swap_move_internal((value1).d, (value2).d, (mask), (shift)); \
1097  } while (0)
1098 
1105 #define mask_x5_input(value, input) \
1106  do { \
1107  if (sizeof((value).b) <= 4) { \
1108  (value).b = aead_random_generate_32(); \
1109  (value).c = aead_random_generate_32(); \
1110  (value).d = aead_random_generate_32(); \
1111  (value).e = aead_random_generate_32(); \
1112  } else { \
1113  (value).b = aead_random_generate_64(); \
1114  (value).c = aead_random_generate_64(); \
1115  (value).d = aead_random_generate_64(); \
1116  (value).e = aead_random_generate_64(); \
1117  } \
1118  (value).a = (input) ^ (value).b ^ (value).c ^ (value).d ^ (value).e; \
1119  } while (0)
1120 
1127 #define mask_x5_output(value) \
1128  ((value).a ^ (value).b ^ (value).c ^ (value).d ^ (value).e)
1129 
1135 #define mask_x5_zero(value) \
1136  do { \
1137  (value).a = 0; \
1138  (value).b = 0; \
1139  (value).c = 0; \
1140  (value).d = 0; \
1141  (value).e = 0; \
1142  } while (0)
1143 
1153 #define mask_x5_xor_const(value, cvalue) \
1154  do { \
1155  (value).a ^= (cvalue); \
1156  } while (0)
1157 
1166 #define mask_x5_xor(value1, value2) \
1167  do { \
1168  (value1).a ^= (value2).a; \
1169  (value1).b ^= (value2).b; \
1170  (value1).c ^= (value2).c; \
1171  (value1).d ^= (value2).d; \
1172  (value1).e ^= (value2).e; \
1173  } while (0)
1174 
1184 #define mask_x5_xor3(value1, value2, value3) \
1185  do { \
1186  (value1).a ^= ((value2).a ^ (value3).a); \
1187  (value1).b ^= ((value2).b ^ (value3).b); \
1188  (value1).c ^= ((value2).c ^ (value3).c); \
1189  (value1).d ^= ((value2).d ^ (value3).d); \
1190  (value1).e ^= ((value2).e ^ (value3).e); \
1191  } while (0)
1192 
1200 #define mask_x5_not(value) \
1201  do { \
1202  (value).a = ~((value).a); \
1203  } while (0)
1204 
1219 #define mask_x5_and(value1, value2, value3) \
1220  do { \
1221  (value1).a ^= ((value2).a & (value3).a); \
1222  mask_mix_and((value1).a, (value2).a, (value3).a, \
1223  (value1).b, (value2).b, (value3).b); \
1224  mask_mix_and((value1).a, (value2).a, (value3).a, \
1225  (value1).c, (value2).c, (value3).c); \
1226  mask_mix_and((value1).a, (value2).a, (value3).a, \
1227  (value1).d, (value2).d, (value3).d); \
1228  mask_mix_and((value1).a, (value2).a, (value3).a, \
1229  (value1).e, (value2).e, (value3).e); \
1230  (value1).b ^= ((value2).b & (value3).b); \
1231  mask_mix_and((value1).b, (value2).b, (value3).b, \
1232  (value1).c, (value2).c, (value3).c); \
1233  mask_mix_and((value1).b, (value2).b, (value3).b, \
1234  (value1).d, (value2).d, (value3).d); \
1235  mask_mix_and((value1).b, (value2).b, (value3).b, \
1236  (value1).e, (value2).e, (value3).e); \
1237  (value1).c ^= ((value2).c & (value3).c); \
1238  mask_mix_and((value1).c, (value2).c, (value3).c, \
1239  (value1).d, (value2).d, (value3).d); \
1240  mask_mix_and((value1).c, (value2).c, (value3).c, \
1241  (value1).e, (value2).e, (value3).e); \
1242  (value1).d ^= ((value2).d & (value3).d); \
1243  mask_mix_and((value1).d, (value2).d, (value3).d, \
1244  (value1).e, (value2).e, (value3).e); \
1245  (value1).e ^= ((value2).e & (value3).e); \
1246  } while (0)
1247 
1263 #define mask_x5_and_not(value1, value2, value3) \
1264  do { \
1265  (value1).a ^= ((~(value2).a) & (value3).a); \
1266  mask_mix_and((value1).a, ~(value2).a, (value3).a, \
1267  (value1).b, (value2).b, (value3).b); \
1268  mask_mix_and((value1).a, ~(value2).a, (value3).a, \
1269  (value1).c, (value2).c, (value3).c); \
1270  mask_mix_and((value1).a, ~(value2).a, (value3).a, \
1271  (value1).d, (value2).d, (value3).d); \
1272  mask_mix_and((value1).a, ~(value2).a, (value3).a, \
1273  (value1).e, (value2).e, (value3).e); \
1274  (value1).b ^= ((value2).b & (value3).b); \
1275  mask_mix_and((value1).b, (value2).b, (value3).b, \
1276  (value1).c, (value2).c, (value3).c); \
1277  mask_mix_and((value1).b, (value2).b, (value3).b, \
1278  (value1).d, (value2).d, (value3).d); \
1279  mask_mix_and((value1).b, (value2).b, (value3).b, \
1280  (value1).e, (value2).e, (value3).e); \
1281  (value1).c ^= ((value2).c & (value3).c); \
1282  mask_mix_and((value1).c, (value2).c, (value3).c, \
1283  (value1).d, (value2).d, (value3).d); \
1284  mask_mix_and((value1).c, (value2).c, (value3).c, \
1285  (value1).e, (value2).e, (value3).e); \
1286  (value1).d ^= ((value2).d & (value3).d); \
1287  mask_mix_and((value1).d, (value2).d, (value3).d, \
1288  (value1).e, (value2).e, (value3).e); \
1289  (value1).e ^= ((value2).e & (value3).e); \
1290  } while (0)
1291 
1306 #define mask_x5_or(value1, value2, value3) \
1307  do { \
1308  (value1).a ^= ((value2).a | (value3).a); \
1309  mask_mix_and((value1).a, ~(value2).a, ~(value3).a, \
1310  (value1).b, (value2).b, (value3).b); \
1311  mask_mix_and((value1).a, ~(value2).a, ~(value3).a, \
1312  (value1).c, (value2).c, (value3).c); \
1313  mask_mix_and((value1).a, ~(value2).a, ~(value3).a, \
1314  (value1).d, (value2).d, (value3).d); \
1315  mask_mix_and((value1).a, ~(value2).a, ~(value3).a, \
1316  (value1).e, (value2).e, (value3).e); \
1317  (value1).b ^= ((value2).b & (value3).b); \
1318  mask_mix_and((value1).b, (value2).b, (value3).b, \
1319  (value1).c, (value2).c, (value3).c); \
1320  mask_mix_and((value1).b, (value2).b, (value3).b, \
1321  (value1).d, (value2).d, (value3).d); \
1322  mask_mix_and((value1).b, (value2).b, (value3).b, \
1323  (value1).e, (value2).e, (value3).e); \
1324  (value1).c ^= ((value2).c & (value3).c); \
1325  mask_mix_and((value1).c, (value2).c, (value3).c, \
1326  (value1).d, (value2).d, (value3).d); \
1327  mask_mix_and((value1).c, (value2).c, (value3).c, \
1328  (value1).e, (value2).e, (value3).e); \
1329  (value1).d ^= ((value2).d & (value3).d); \
1330  mask_mix_and((value1).d, (value2).d, (value3).d, \
1331  (value1).e, (value2).e, (value3).e); \
1332  (value1).e ^= ((value2).e & (value3).e); \
1333  } while (0)
1334 
1342 #define mask_x5_shl(value1, value2, bits) \
1343  do { \
1344  (value1).a = (value2).a << (bits); \
1345  (value1).b = (value2).b << (bits); \
1346  (value1).c = (value2).c << (bits); \
1347  (value1).d = (value2).d << (bits); \
1348  (value1).e = (value2).e << (bits); \
1349  } while (0)
1350 
1358 #define mask_x5_shr(value1, value2, bits) \
1359  do { \
1360  (value1).a = (value2).a >> (bits); \
1361  (value1).b = (value2).b >> (bits); \
1362  (value1).c = (value2).c >> (bits); \
1363  (value1).d = (value2).d >> (bits); \
1364  (value1).e = (value2).e >> (bits); \
1365  } while (0)
1366 
1374 #define mask_x5_rol(value1, value2, bits) \
1375  do { \
1376  (value1).a = ((value2).a << (bits)) | \
1377  ((value2).a >> (sizeof((value1).a) * 8 - (bits))); \
1378  (value1).b = ((value2).b << (bits)) | \
1379  ((value2).b >> (sizeof((value1).b) * 8 - (bits))); \
1380  (value1).c = ((value2).c << (bits)) | \
1381  ((value2).c >> (sizeof((value1).c) * 8 - (bits))); \
1382  (value1).d = ((value2).d << (bits)) | \
1383  ((value2).d >> (sizeof((value1).d) * 8 - (bits))); \
1384  (value1).e = ((value2).e << (bits)) | \
1385  ((value2).e >> (sizeof((value1).d) * 8 - (bits))); \
1386  } while (0)
1387 
1395 #define mask_x5_ror(value1, value2, bits) \
1396  do { \
1397  (value1).a = ((value2).a >> (bits)) | \
1398  ((value2).a << (sizeof((value1).a) * 8 - (bits))); \
1399  (value1).b = ((value2).b >> (bits)) | \
1400  ((value2).b << (sizeof((value1).b) * 8 - (bits))); \
1401  (value1).c = ((value2).c >> (bits)) | \
1402  ((value2).c << (sizeof((value1).c) * 8 - (bits))); \
1403  (value1).d = ((value2).d >> (bits)) | \
1404  ((value2).d << (sizeof((value1).d) * 8 - (bits))); \
1405  (value1).e = ((value2).e >> (bits)) | \
1406  ((value2).e << (sizeof((value1).d) * 8 - (bits))); \
1407  } while (0)
1408 
1415 #define mask_x5_swap(value1, value2) \
1416  do { \
1417  (value1).a ^= (value2).a; \
1418  (value2).a ^= (value1).a; \
1419  (value1).a ^= (value2).a; \
1420  (value1).b ^= (value2).b; \
1421  (value2).b ^= (value1).b; \
1422  (value1).b ^= (value2).b; \
1423  (value1).c ^= (value2).c; \
1424  (value2).c ^= (value1).c; \
1425  (value1).c ^= (value2).c; \
1426  (value1).d ^= (value2).d; \
1427  (value2).d ^= (value1).d; \
1428  (value1).d ^= (value2).d; \
1429  (value1).e ^= (value2).e; \
1430  (value2).e ^= (value1).e; \
1431  (value1).e ^= (value2).e; \
1432  } while (0)
1433 
1442 #define mask_x5_swap_move(value1, value2, mask, shift) \
1443  do { \
1444  mask_swap_move_internal((value1).a, (value2).a, (mask), (shift)); \
1445  mask_swap_move_internal((value1).b, (value2).b, (mask), (shift)); \
1446  mask_swap_move_internal((value1).c, (value2).c, (mask), (shift)); \
1447  mask_swap_move_internal((value1).d, (value2).d, (mask), (shift)); \
1448  mask_swap_move_internal((value1).e, (value2).e, (mask), (shift)); \
1449  } while (0)
1450 
1457 #define mask_x6_input(value, input) \
1458  do { \
1459  if (sizeof((value).b) <= 4) { \
1460  (value).b = aead_random_generate_32(); \
1461  (value).c = aead_random_generate_32(); \
1462  (value).d = aead_random_generate_32(); \
1463  (value).e = aead_random_generate_32(); \
1464  (value).f = aead_random_generate_32(); \
1465  } else { \
1466  (value).b = aead_random_generate_64(); \
1467  (value).c = aead_random_generate_64(); \
1468  (value).d = aead_random_generate_64(); \
1469  (value).e = aead_random_generate_64(); \
1470  (value).f = aead_random_generate_64(); \
1471  } \
1472  (value).a = (input) ^ (value).b ^ (value).c ^ \
1473  (value).d ^ (value).e ^ (value).f; \
1474  } while (0)
1475 
1482 #define mask_x6_output(value) \
1483  ((value).a ^ (value).b ^ (value).c ^ (value).d ^ (value).e ^ (value).f)
1484 
1490 #define mask_x6_zero(value) \
1491  do { \
1492  (value).a = 0; \
1493  (value).b = 0; \
1494  (value).c = 0; \
1495  (value).d = 0; \
1496  (value).e = 0; \
1497  (value).f = 0; \
1498  } while (0)
1499 
1509 #define mask_x6_xor_const(value, cvalue) \
1510  do { \
1511  (value).a ^= (cvalue); \
1512  } while (0)
1513 
1522 #define mask_x6_xor(value1, value2) \
1523  do { \
1524  (value1).a ^= (value2).a; \
1525  (value1).b ^= (value2).b; \
1526  (value1).c ^= (value2).c; \
1527  (value1).d ^= (value2).d; \
1528  (value1).e ^= (value2).e; \
1529  (value1).f ^= (value2).f; \
1530  } while (0)
1531 
1541 #define mask_x6_xor3(value1, value2, value3) \
1542  do { \
1543  (value1).a ^= ((value2).a ^ (value3).a); \
1544  (value1).b ^= ((value2).b ^ (value3).b); \
1545  (value1).c ^= ((value2).c ^ (value3).c); \
1546  (value1).d ^= ((value2).d ^ (value3).d); \
1547  (value1).e ^= ((value2).e ^ (value3).e); \
1548  (value1).f ^= ((value2).f ^ (value3).f); \
1549  } while (0)
1550 
1558 #define mask_x6_not(value) \
1559  do { \
1560  (value).a = ~((value).a); \
1561  } while (0)
1562 
1577 #define mask_x6_and(value1, value2, value3) \
1578  do { \
1579  (value1).a ^= ((value2).a & (value3).a); \
1580  mask_mix_and((value1).a, (value2).a, (value3).a, \
1581  (value1).b, (value2).b, (value3).b); \
1582  mask_mix_and((value1).a, (value2).a, (value3).a, \
1583  (value1).c, (value2).c, (value3).c); \
1584  mask_mix_and((value1).a, (value2).a, (value3).a, \
1585  (value1).d, (value2).d, (value3).d); \
1586  mask_mix_and((value1).a, (value2).a, (value3).a, \
1587  (value1).e, (value2).e, (value3).e); \
1588  mask_mix_and((value1).a, (value2).a, (value3).a, \
1589  (value1).f, (value2).f, (value3).f); \
1590  (value1).b ^= ((value2).b & (value3).b); \
1591  mask_mix_and((value1).b, (value2).b, (value3).b, \
1592  (value1).c, (value2).c, (value3).c); \
1593  mask_mix_and((value1).b, (value2).b, (value3).b, \
1594  (value1).d, (value2).d, (value3).d); \
1595  mask_mix_and((value1).b, (value2).b, (value3).b, \
1596  (value1).e, (value2).e, (value3).e); \
1597  mask_mix_and((value1).b, (value2).b, (value3).b, \
1598  (value1).f, (value2).f, (value3).f); \
1599  (value1).c ^= ((value2).c & (value3).c); \
1600  mask_mix_and((value1).c, (value2).c, (value3).c, \
1601  (value1).d, (value2).d, (value3).d); \
1602  mask_mix_and((value1).c, (value2).c, (value3).c, \
1603  (value1).e, (value2).e, (value3).e); \
1604  mask_mix_and((value1).c, (value2).c, (value3).c, \
1605  (value1).f, (value2).f, (value3).f); \
1606  (value1).d ^= ((value2).d & (value3).d); \
1607  mask_mix_and((value1).d, (value2).d, (value3).d, \
1608  (value1).e, (value2).e, (value3).e); \
1609  mask_mix_and((value1).d, (value2).d, (value3).d, \
1610  (value1).f, (value2).f, (value3).f); \
1611  (value1).e ^= ((value2).e & (value3).e); \
1612  mask_mix_and((value1).e, (value2).e, (value3).e, \
1613  (value1).f, (value2).f, (value3).f); \
1614  (value1).f ^= ((value2).f & (value3).f); \
1615  } while (0)
1616 
1632 #define mask_x6_and_not(value1, value2, value3) \
1633  do { \
1634  (value1).a ^= ((~(value2).a) & (value3).a); \
1635  mask_mix_and((value1).a, ~(value2).a, (value3).a, \
1636  (value1).b, (value2).b, (value3).b); \
1637  mask_mix_and((value1).a, ~(value2).a, (value3).a, \
1638  (value1).c, (value2).c, (value3).c); \
1639  mask_mix_and((value1).a, ~(value2).a, (value3).a, \
1640  (value1).d, (value2).d, (value3).d); \
1641  mask_mix_and((value1).a, ~(value2).a, (value3).a, \
1642  (value1).e, (value2).e, (value3).e); \
1643  mask_mix_and((value1).a, ~(value2).a, (value3).a, \
1644  (value1).f, (value2).f, (value3).f); \
1645  (value1).b ^= ((value2).b & (value3).b); \
1646  mask_mix_and((value1).b, (value2).b, (value3).b, \
1647  (value1).c, (value2).c, (value3).c); \
1648  mask_mix_and((value1).b, (value2).b, (value3).b, \
1649  (value1).d, (value2).d, (value3).d); \
1650  mask_mix_and((value1).b, (value2).b, (value3).b, \
1651  (value1).e, (value2).e, (value3).e); \
1652  mask_mix_and((value1).b, (value2).b, (value3).b, \
1653  (value1).f, (value2).f, (value3).f); \
1654  (value1).c ^= ((value2).c & (value3).c); \
1655  mask_mix_and((value1).c, (value2).c, (value3).c, \
1656  (value1).d, (value2).d, (value3).d); \
1657  mask_mix_and((value1).c, (value2).c, (value3).c, \
1658  (value1).e, (value2).e, (value3).e); \
1659  mask_mix_and((value1).c, (value2).c, (value3).c, \
1660  (value1).f, (value2).f, (value3).f); \
1661  (value1).d ^= ((value2).d & (value3).d); \
1662  mask_mix_and((value1).d, (value2).d, (value3).d, \
1663  (value1).e, (value2).e, (value3).e); \
1664  mask_mix_and((value1).d, (value2).d, (value3).d, \
1665  (value1).f, (value2).f, (value3).f); \
1666  (value1).e ^= ((value2).e & (value3).e); \
1667  mask_mix_and((value1).e, (value2).e, (value3).e, \
1668  (value1).f, (value2).f, (value3).f); \
1669  (value1).f ^= ((value2).f & (value3).f); \
1670  } while (0)
1671 
1686 #define mask_x6_or(value1, value2, value3) \
1687  do { \
1688  (value1).a ^= ((value2).a | (value3).a); \
1689  mask_mix_and((value1).a, ~(value2).a, ~(value3).a, \
1690  (value1).b, (value2).b, (value3).b); \
1691  mask_mix_and((value1).a, ~(value2).a, ~(value3).a, \
1692  (value1).c, (value2).c, (value3).c); \
1693  mask_mix_and((value1).a, ~(value2).a, ~(value3).a, \
1694  (value1).d, (value2).d, (value3).d); \
1695  mask_mix_and((value1).a, ~(value2).a, ~(value3).a, \
1696  (value1).e, (value2).e, (value3).e); \
1697  mask_mix_and((value1).a, ~(value2).a, ~(value3).a, \
1698  (value1).f, (value2).f, (value3).f); \
1699  (value1).b ^= ((value2).b & (value3).b); \
1700  mask_mix_and((value1).b, (value2).b, (value3).b, \
1701  (value1).c, (value2).c, (value3).c); \
1702  mask_mix_and((value1).b, (value2).b, (value3).b, \
1703  (value1).d, (value2).d, (value3).d); \
1704  mask_mix_and((value1).b, (value2).b, (value3).b, \
1705  (value1).e, (value2).e, (value3).e); \
1706  mask_mix_and((value1).b, (value2).b, (value3).b, \
1707  (value1).f, (value2).f, (value3).f); \
1708  (value1).c ^= ((value2).c & (value3).c); \
1709  mask_mix_and((value1).c, (value2).c, (value3).c, \
1710  (value1).d, (value2).d, (value3).d); \
1711  mask_mix_and((value1).c, (value2).c, (value3).c, \
1712  (value1).e, (value2).e, (value3).e); \
1713  mask_mix_and((value1).c, (value2).c, (value3).c, \
1714  (value1).f, (value2).f, (value3).f); \
1715  (value1).d ^= ((value2).d & (value3).d); \
1716  mask_mix_and((value1).d, (value2).d, (value3).d, \
1717  (value1).e, (value2).e, (value3).e); \
1718  mask_mix_and((value1).d, (value2).d, (value3).d, \
1719  (value1).f, (value2).f, (value3).f); \
1720  (value1).e ^= ((value2).e & (value3).e); \
1721  mask_mix_and((value1).e, (value2).e, (value3).e, \
1722  (value1).f, (value2).f, (value3).f); \
1723  (value1).f ^= ((value2).f & (value3).f); \
1724  } while (0)
1725 
1733 #define mask_x6_shl(value1, value2, bits) \
1734  do { \
1735  (value1).a = (value2).a << (bits); \
1736  (value1).b = (value2).b << (bits); \
1737  (value1).c = (value2).c << (bits); \
1738  (value1).d = (value2).d << (bits); \
1739  (value1).e = (value2).e << (bits); \
1740  (value1).f = (value2).f << (bits); \
1741  } while (0)
1742 
1750 #define mask_x6_shr(value1, value2, bits) \
1751  do { \
1752  (value1).a = (value2).a >> (bits); \
1753  (value1).b = (value2).b >> (bits); \
1754  (value1).c = (value2).c >> (bits); \
1755  (value1).d = (value2).d >> (bits); \
1756  (value1).e = (value2).e >> (bits); \
1757  (value1).f = (value2).f >> (bits); \
1758  } while (0)
1759 
1767 #define mask_x6_rol(value1, value2, bits) \
1768  do { \
1769  (value1).a = ((value2).a << (bits)) | \
1770  ((value2).a >> (sizeof((value1).a) * 8 - (bits))); \
1771  (value1).b = ((value2).b << (bits)) | \
1772  ((value2).b >> (sizeof((value1).b) * 8 - (bits))); \
1773  (value1).c = ((value2).c << (bits)) | \
1774  ((value2).c >> (sizeof((value1).c) * 8 - (bits))); \
1775  (value1).d = ((value2).d << (bits)) | \
1776  ((value2).d >> (sizeof((value1).d) * 8 - (bits))); \
1777  (value1).e = ((value2).e << (bits)) | \
1778  ((value2).e >> (sizeof((value1).e) * 8 - (bits))); \
1779  (value1).f = ((value2).f << (bits)) | \
1780  ((value2).f >> (sizeof((value1).f) * 8 - (bits))); \
1781  } while (0)
1782 
1790 #define mask_x6_ror(value1, value2, bits) \
1791  do { \
1792  (value1).a = ((value2).a >> (bits)) | \
1793  ((value2).a << (sizeof((value1).a) * 8 - (bits))); \
1794  (value1).b = ((value2).b >> (bits)) | \
1795  ((value2).b << (sizeof((value1).b) * 8 - (bits))); \
1796  (value1).c = ((value2).c >> (bits)) | \
1797  ((value2).c << (sizeof((value1).c) * 8 - (bits))); \
1798  (value1).d = ((value2).d >> (bits)) | \
1799  ((value2).d << (sizeof((value1).d) * 8 - (bits))); \
1800  (value1).e = ((value2).e >> (bits)) | \
1801  ((value2).e << (sizeof((value1).e) * 8 - (bits))); \
1802  (value1).f = ((value2).f >> (bits)) | \
1803  ((value2).f << (sizeof((value1).f) * 8 - (bits))); \
1804  } while (0)
1805 
1812 #define mask_x6_swap(value1, value2) \
1813  do { \
1814  (value1).a ^= (value2).a; \
1815  (value2).a ^= (value1).a; \
1816  (value1).a ^= (value2).a; \
1817  (value1).b ^= (value2).b; \
1818  (value2).b ^= (value1).b; \
1819  (value1).b ^= (value2).b; \
1820  (value1).c ^= (value2).c; \
1821  (value2).c ^= (value1).c; \
1822  (value1).c ^= (value2).c; \
1823  (value1).d ^= (value2).d; \
1824  (value2).d ^= (value1).d; \
1825  (value1).d ^= (value2).d; \
1826  (value1).e ^= (value2).e; \
1827  (value2).e ^= (value1).e; \
1828  (value1).e ^= (value2).e; \
1829  (value1).f ^= (value2).f; \
1830  (value2).f ^= (value1).f; \
1831  (value1).f ^= (value2).f; \
1832  } while (0)
1833 
1842 #define mask_x6_swap_move(value1, value2, mask, shift) \
1843  do { \
1844  mask_swap_move_internal((value1).a, (value2).a, (mask), (shift)); \
1845  mask_swap_move_internal((value1).b, (value2).b, (mask), (shift)); \
1846  mask_swap_move_internal((value1).c, (value2).c, (mask), (shift)); \
1847  mask_swap_move_internal((value1).d, (value2).d, (mask), (shift)); \
1848  mask_swap_move_internal((value1).e, (value2).e, (mask), (shift)); \
1849  mask_swap_move_internal((value1).f, (value2).f, (mask), (shift)); \
1850  } while (0)
1851 
1852 /* Define aliases for operating on shares in a generic fashion */
1853 #if AEAD_MASKING_SHARES == 2
1857 #define mask_input(value, input) mask_x2_input((value), (input))
1858 #define mask_output(value) mask_x2_output((value))
1859 #define mask_zero(value) mask_x2_zero((value))
1860 #define mask_xor_const(value, cvalue) mask_x2_xor_const((value), (cvalue))
1861 #define mask_xor(value1, value2) mask_x2_xor((value1), (value2))
1862 #define mask_xor3(value1, value2, value3) mask_x2_xor3((value1), (value2), (value3))
1863 #define mask_not(value) mask_x2_not((value))
1864 #define mask_and(value1, value2, value3) mask_x2_and((value1), (value2), (value3))
1865 #define mask_and_not(value1, value2, value3) mask_x2_and_not((value1), (value2), (value3))
1866 #define mask_or(value1, value2, value3) mask_x2_or((value1), (value2), (value3))
1867 #define mask_shl(value1, value2, bits) mask_x2_shl((value1), (value2), (bits))
1868 #define mask_shr(value1, value2, bits) mask_x2_shr((value1), (value2), (bits))
1869 #define mask_rol(value1, value2, bits) mask_x2_rol((value1), (value2), (bits))
1870 #define mask_ror(value1, value2, bits) mask_x2_ror((value1), (value2), (bits))
1871 #define mask_swap(value1, value2) mask_x2_swap((value1), (value2))
1872 #define mask_swap_move(value1, value2, mask, shift) mask_x2_swap_move((value1), (value2), (mask), (shift))
1873 #elif AEAD_MASKING_SHARES == 3
1877 #define mask_input(value, input) mask_x3_input((value), (input))
1878 #define mask_output(value) mask_x3_output((value))
1879 #define mask_zero(value) mask_x3_zero((value))
1880 #define mask_xor_const(value, cvalue) mask_x3_xor_const((value), (cvalue))
1881 #define mask_xor(value1, value2) mask_x3_xor((value1), (value2))
1882 #define mask_xor3(value1, value2, value3) mask_x3_xor3((value1), (value2), (value3))
1883 #define mask_not(value) mask_x3_not((value))
1884 #define mask_and(value1, value2, value3) mask_x3_and((value1), (value2), (value3))
1885 #define mask_and_not(value1, value2, value3) mask_x3_and_not((value1), (value2), (value3))
1886 #define mask_or(value1, value2, value3) mask_x3_or((value1), (value2), (value3))
1887 #define mask_shl(value1, value2, bits) mask_x3_shl((value1), (value2), (bits))
1888 #define mask_shr(value1, value2, bits) mask_x3_shr((value1), (value2), (bits))
1889 #define mask_rol(value1, value2, bits) mask_x3_rol((value1), (value2), (bits))
1890 #define mask_ror(value1, value2, bits) mask_x3_ror((value1), (value2), (bits))
1891 #define mask_swap(value1, value2) mask_x3_swap((value1), (value2))
1892 #define mask_swap_move(value1, value2, mask, shift) mask_x3_swap_move((value1), (value2), (mask), (shift))
1893 #elif AEAD_MASKING_SHARES == 4
1894 
1899 
1904 
1909 
1916 #define mask_input(value, input) mask_x4_input((value), (input))
1917 
1924 #define mask_output(value) mask_x4_output((value))
1925 
1931 #define mask_zero(value) mask_x4_zero((value))
1932 
1942 #define mask_xor_const(value, cvalue) mask_x4_xor_const((value), (cvalue))
1943 
1952 #define mask_xor(value1, value2) mask_x4_xor((value1), (value2))
1953 
1963 #define mask_xor3(value1, value2, value3) mask_x4_xor3((value1), (value2), (value3))
1964 
1972 #define mask_not(value) mask_x4_not((value))
1973 
1988 #define mask_and(value1, value2, value3) mask_x4_and((value1), (value2), (value3))
1989 
2005 #define mask_and_not(value1, value2, value3) mask_x4_and_not((value1), (value2), (value3))
2006 
2021 #define mask_or(value1, value2, value3) mask_x4_or((value1), (value2), (value3))
2022 
2030 #define mask_shl(value1, value2, bits) mask_x4_shl((value1), (value2), (bits))
2031 
2039 #define mask_shr(value1, value2, bits) mask_x4_shr((value1), (value2), (bits))
2040 
2048 #define mask_rol(value1, value2, bits) mask_x4_rol((value1), (value2), (bits))
2049 
2057 #define mask_ror(value1, value2, bits) mask_x4_ror((value1), (value2), (bits))
2058 
2065 #define mask_swap(value1, value2) mask_x4_swap((value1), (value2))
2066 
2075 #define mask_swap_move(value1, value2, mask, shift) mask_x4_swap_move((value1), (value2), (mask), (shift))
2076 
2077 #elif AEAD_MASKING_SHARES == 5
2081 #define mask_input(value, input) mask_x5_input((value), (input))
2082 #define mask_output(value) mask_x5_output((value))
2083 #define mask_zero(value) mask_x5_zero((value))
2084 #define mask_xor_const(value, cvalue) mask_x5_xor_const((value), (cvalue))
2085 #define mask_xor(value1, value2) mask_x5_xor((value1), (value2))
2086 #define mask_xor3(value1, value2, value3) mask_x5_xor3((value1), (value2), (value3))
2087 #define mask_not(value) mask_x5_not((value))
2088 #define mask_and(value1, value2, value3) mask_x5_and((value1), (value2), (value3))
2089 #define mask_and_not(value1, value2, value3) mask_x5_and_not((value1), (value2), (value3))
2090 #define mask_or(value1, value2, value3) mask_x5_or((value1), (value2), (value3))
2091 #define mask_shl(value1, value2, bits) mask_x5_shl((value1), (value2), (bits))
2092 #define mask_shr(value1, value2, bits) mask_x5_shr((value1), (value2), (bits))
2093 #define mask_rol(value1, value2, bits) mask_x5_rol((value1), (value2), (bits))
2094 #define mask_ror(value1, value2, bits) mask_x5_ror((value1), (value2), (bits))
2095 #define mask_swap(value1, value2) mask_x5_swap((value1), (value2))
2096 #define mask_swap_move(value1, value2, mask, shift) mask_x5_swap_move((value1), (value2), (mask), (shift))
2097 #elif AEAD_MASKING_SHARES == 6
2101 #define mask_input(value, input) mask_x6_input((value), (input))
2102 #define mask_output(value) mask_x6_output((value))
2103 #define mask_zero(value) mask_x6_zero((value))
2104 #define mask_xor_const(value, cvalue) mask_x6_xor_const((value), (cvalue))
2105 #define mask_xor(value1, value2) mask_x6_xor((value1), (value2))
2106 #define mask_xor3(value1, value2, value3) mask_x6_xor3((value1), (value2), (value3))
2107 #define mask_not(value) mask_x6_not((value))
2108 #define mask_and(value1, value2, value3) mask_x6_and((value1), (value2), (value3))
2109 #define mask_and_not(value1, value2, value3) mask_x6_and_not((value1), (value2), (value3))
2110 #define mask_or(value1, value2, value3) mask_x6_or((value1), (value2), (value3))
2111 #define mask_shl(value1, value2, bits) mask_x6_shl((value1), (value2), (bits))
2112 #define mask_shr(value1, value2, bits) mask_x6_shr((value1), (value2), (bits))
2113 #define mask_rol(value1, value2, bits) mask_x6_rol((value1), (value2), (bits))
2114 #define mask_ror(value1, value2, bits) mask_x6_ror((value1), (value2), (bits))
2115 #define mask_swap(value1, value2) mask_x6_swap((value1), (value2))
2116 #define mask_swap_move(value1, value2, mask, shift) mask_x6_swap_move((value1), (value2), (mask), (shift))
2117 #else
2118 #error "AEAD_MASKING_SHARES value is not supported"
2119 #endif
2120 
2121 #ifdef __cplusplus
2122 }
2123 #endif
2124 
2125 #endif
uint32_t d
Definition: internal-masking.h:169
uint32_t a
Definition: internal-masking.h:166
Masked 16-bit word with six shares.
Definition: internal-masking.h:117
uint64_t d
Definition: internal-masking.h:229
uint16_t a
Definition: internal-masking.h:106
uint32_t a
Definition: internal-masking.h:179
uint16_t b
Definition: internal-masking.h:120
uint32_t c
Definition: internal-masking.h:168
uint16_t d
Definition: internal-masking.h:109
uint16_t c
Definition: internal-masking.h:85
uint32_t b
Definition: internal-masking.h:167
uint32_t c
Definition: internal-masking.h:156
Masked 32-bit word with five shares.
Definition: internal-masking.h:164
uint16_t a
Definition: internal-masking.h:119
uint64_t d
Definition: internal-masking.h:242
uint64_t a
Definition: internal-masking.h:214
uint16_t a
Definition: internal-masking.h:83
Masked 64-bit word with five shares.
Definition: internal-masking.h:224
uint16_t b
Definition: internal-masking.h:74
uint64_t b
Definition: internal-masking.h:240
uint16_t c
Definition: internal-masking.h:108
uint32_t a
Definition: internal-masking.h:133
uint32_t b
Definition: internal-masking.h:180
uint64_t a
Definition: internal-masking.h:239
uint32_t a
Definition: internal-masking.h:154
uint32_t d
Definition: internal-masking.h:157
uint16_t a
Definition: internal-masking.h:94
uint16_t c
Definition: internal-masking.h:96
uint16_t e
Definition: internal-masking.h:110
Masked 16-bit word with five shares.
Definition: internal-masking.h:104
uint16_t a
Definition: internal-masking.h:73
Masked 32-bit word with three shares.
Definition: internal-masking.h:141
uint32_t b
Definition: internal-masking.h:155
uint64_t d
Definition: internal-masking.h:217
uint64_t e
Definition: internal-masking.h:230
uint32_t f
Definition: internal-masking.h:184
uint32_t c
Definition: internal-masking.h:181
uint64_t c
Definition: internal-masking.h:205
uint32_t b
Definition: internal-masking.h:144
uint16_t f
Definition: internal-masking.h:124
mask_x4_uint16_t mask_uint16_t
Generic masked 16-bit word.
Definition: internal-masking.h:1898
uint64_t f
Definition: internal-masking.h:244
uint64_t c
Definition: internal-masking.h:241
Masked 64-bit word with three shares.
Definition: internal-masking.h:201
uint16_t b
Definition: internal-masking.h:84
Masked 32-bit word with two shares.
Definition: internal-masking.h:131
uint16_t d
Definition: internal-masking.h:122
uint16_t b
Definition: internal-masking.h:95
uint64_t a
Definition: internal-masking.h:226
uint64_t b
Definition: internal-masking.h:227
uint64_t b
Definition: internal-masking.h:215
uint32_t e
Definition: internal-masking.h:170
uint64_t c
Definition: internal-masking.h:228
Masked 32-bit word with four shares.
Definition: internal-masking.h:152
Masked 64-bit word with two shares.
Definition: internal-masking.h:191
uint32_t c
Definition: internal-masking.h:145
uint16_t e
Definition: internal-masking.h:123
Masked 16-bit word with three shares.
Definition: internal-masking.h:81
uint32_t d
Definition: internal-masking.h:182
Masked 32-bit word with six shares.
Definition: internal-masking.h:177
uint64_t b
Definition: internal-masking.h:194
mask_x4_uint64_t mask_uint64_t
Generic masked 64-bit word.
Definition: internal-masking.h:1908
Masked 64-bit word with four shares.
Definition: internal-masking.h:212
uint32_t a
Definition: internal-masking.h:143
uint64_t a
Definition: internal-masking.h:193
Masked 16-bit word with two shares.
Definition: internal-masking.h:71
mask_x4_uint32_t mask_uint32_t
Generic masked 32-bit word.
Definition: internal-masking.h:1903
uint32_t b
Definition: internal-masking.h:134
uint32_t e
Definition: internal-masking.h:183
Masked 64-bit word with six shares.
Definition: internal-masking.h:237
uint16_t d
Definition: internal-masking.h:97
uint16_t c
Definition: internal-masking.h:121
Utilities that help with the generation of random masking material.
uint64_t e
Definition: internal-masking.h:243
Masked 16-bit word with four shares.
Definition: internal-masking.h:92
uint64_t c
Definition: internal-masking.h:216
uint16_t b
Definition: internal-masking.h:107
uint64_t a
Definition: internal-masking.h:203
uint64_t b
Definition: internal-masking.h:204