23 #ifndef LW_INTERNAL_MASKING_H
24 #define LW_INTERNAL_MASKING_H
46 #if !defined(AEAD_MASKING_SHARES)
47 #define AEAD_MASKING_SHARES 4
64 #if !defined(AEAD_MASKING_KEY_ONLY)
65 #define AEAD_MASKING_KEY_ONLY 0
254 #define mask_x2_input(value, input) \
256 if (sizeof((value).b) <= 4) \
257 (value).b = aead_random_generate_32(); \
259 (value).b = aead_random_generate_64(); \
260 (value).a = (input) ^ (value).b; \
269 #define mask_x2_output(value) ((value).a ^ (value).b)
276 #define mask_x2_zero(value) \
291 #define mask_x2_xor_const(value, cvalue) \
293 (value).a ^= (cvalue); \
304 #define mask_x2_xor(value1, value2) \
306 (value1).a ^= (value2).a; \
307 (value1).b ^= (value2).b; \
319 #define mask_x2_xor3(value1, value2, value3) \
321 (value1).a ^= ((value2).a ^ (value3).a); \
322 (value1).b ^= ((value2).b ^ (value3).b); \
332 #define mask_x2_not(value) \
334 (value).a = ~((value).a); \
340 #define mask_mix_and(x2, x1, x0, y2, y1, y0) \
342 if (sizeof(temp) <= 4) \
343 temp = aead_random_generate_32(); \
345 temp = aead_random_generate_64(); \
347 temp ^= ((y0) & (x1)); \
348 (y2) = ((y2) ^ temp) ^ ((y1) & (x0)); \
367 #define mask_x2_and(value1, value2, value3) \
369 (value1).a ^= ((value2).a & (value3).a); \
370 mask_mix_and((value1).a, (value2).a, (value3).a, \
371 (value1).b, (value2).b, (value3).b); \
372 (value1).b ^= ((value2).b & (value3).b); \
390 #define mask_x2_and_not(value1, value2, value3) \
392 (value1).a ^= ((~(value2).a) & (value3).a); \
393 mask_mix_and((value1).a, ~(value2).a, (value3).a, \
394 (value1).b, (value2).b, (value3).b); \
395 (value1).b ^= ((value2).b & (value3).b); \
412 #define mask_x2_or(value1, value2, value3) \
414 (value1).a ^= ((value2).a) | ((value3).a); \
415 mask_mix_and((value1).a, ~(value2).a, ~(value3).a, \
416 (value1).b, (value2).b, (value3).b); \
417 (value1).b ^= ((value2).b & (value3).b); \
427 #define mask_x2_shl(value1, value2, bits) \
429 (value1).a = (value2).a << (bits); \
430 (value1).b = (value2).b << (bits); \
440 #define mask_x2_shr(value1, value2, bits) \
442 (value1).a = (value2).a >> (bits); \
443 (value1).b = (value2).b >> (bits); \
453 #define mask_x2_rol(value1, value2, bits) \
455 (value1).a = ((value2).a << (bits)) | \
456 ((value2).a >> (sizeof((value1).a) * 8 - (bits))); \
457 (value1).b = ((value2).b << (bits)) | \
458 ((value2).b >> (sizeof((value1).b) * 8 - (bits))); \
468 #define mask_x2_ror(value1, value2, bits) \
470 (value1).a = ((value2).a >> (bits)) | \
471 ((value2).a << (sizeof((value1).a) * 8 - (bits))); \
472 (value1).b = ((value2).b >> (bits)) | \
473 ((value2).b << (sizeof((value1).b) * 8 - (bits))); \
482 #define mask_x2_swap(value1, value2) \
484 (value1).a ^= (value2).a; \
485 (value2).a ^= (value1).a; \
486 (value1).a ^= (value2).a; \
487 (value1).b ^= (value2).b; \
488 (value2).b ^= (value1).b; \
489 (value1).b ^= (value2).b; \
494 #define mask_swap_move_internal(a, b, mask, shift) \
496 temp = ((b) ^ ((a) >> (shift))) & (mask); \
498 (a) ^= temp << (shift); \
511 #define mask_x2_swap_move(value1, value2, mask, shift) \
513 mask_swap_move_internal((value1).a, (value2).a, (mask), (shift)); \
514 mask_swap_move_internal((value1).b, (value2).b, (mask), (shift)); \
523 #define mask_x3_input(value, input) \
525 if (sizeof((value).b) <= 4) { \
526 (value).b = aead_random_generate_32(); \
527 (value).c = aead_random_generate_32(); \
529 (value).b = aead_random_generate_64(); \
530 (value).c = aead_random_generate_64(); \
532 (value).a = (input) ^ (value).b ^ (value).c; \
541 #define mask_x3_output(value) ((value).a ^ (value).b ^ (value).c)
548 #define mask_x3_zero(value) \
564 #define mask_x3_xor_const(value, cvalue) \
566 (value).a ^= (cvalue); \
577 #define mask_x3_xor(value1, value2) \
579 (value1).a ^= (value2).a; \
580 (value1).b ^= (value2).b; \
581 (value1).c ^= (value2).c; \
593 #define mask_x3_xor3(value1, value2, value3) \
595 (value1).a ^= ((value2).a ^ (value3).a); \
596 (value1).b ^= ((value2).b ^ (value3).b); \
597 (value1).c ^= ((value2).c ^ (value3).c); \
607 #define mask_x3_not(value) \
609 (value).a = ~((value).a); \
626 #define mask_x3_and(value1, value2, value3) \
628 (value1).a ^= ((value2).a & (value3).a); \
629 mask_mix_and((value1).a, (value2).a, (value3).a, \
630 (value1).b, (value2).b, (value3).b); \
631 mask_mix_and((value1).a, (value2).a, (value3).a, \
632 (value1).c, (value2).c, (value3).c); \
633 (value1).b ^= ((value2).b & (value3).b); \
634 mask_mix_and((value1).b, (value2).b, (value3).b, \
635 (value1).c, (value2).c, (value3).c); \
636 (value1).c ^= ((value2).c & (value3).c); \
654 #define mask_x3_and_not(value1, value2, value3) \
656 (value1).a ^= ((~(value2).a) & (value3).a); \
657 mask_mix_and((value1).a, ~(value2).a, (value3).a, \
658 (value1).b, (value2).b, (value3).b); \
659 mask_mix_and((value1).a, ~(value2).a, (value3).a, \
660 (value1).c, (value2).c, (value3).c); \
661 (value1).b ^= ((value2).b & (value3).b); \
662 mask_mix_and((value1).b, (value2).b, (value3).b, \
663 (value1).c, (value2).c, (value3).c); \
664 (value1).c ^= ((value2).c & (value3).c); \
681 #define mask_x3_or(value1, value2, value3) \
683 (value1).a ^= ((value2).a | (value3).a); \
684 mask_mix_and((value1).a, ~(value2).a, ~(value3).a, \
685 (value1).b, (value2).b, (value3).b); \
686 mask_mix_and((value1).a, ~(value2).a, ~(value3).a, \
687 (value1).c, (value2).c, (value3).c); \
688 (value1).b ^= ((value2).b & (value3).b); \
689 mask_mix_and((value1).b, (value2).b, (value3).b, \
690 (value1).c, (value2).c, (value3).c); \
691 (value1).c ^= ((value2).c & (value3).c); \
701 #define mask_x3_shl(value1, value2, bits) \
703 (value1).a = (value2).a << (bits); \
704 (value1).b = (value2).b << (bits); \
705 (value1).c = (value2).c << (bits); \
715 #define mask_x3_shr(value1, value2, bits) \
717 (value1).a = (value2).a >> (bits); \
718 (value1).b = (value2).b >> (bits); \
719 (value1).c = (value2).c >> (bits); \
729 #define mask_x3_rol(value1, value2, bits) \
731 (value1).a = ((value2).a << (bits)) | \
732 ((value2).a >> (sizeof((value1).a) * 8 - (bits))); \
733 (value1).b = ((value2).b << (bits)) | \
734 ((value2).b >> (sizeof((value1).b) * 8 - (bits))); \
735 (value1).c = ((value2).c << (bits)) | \
736 ((value2).c >> (sizeof((value1).c) * 8 - (bits))); \
746 #define mask_x3_ror(value1, value2, bits) \
748 (value1).a = ((value2).a >> (bits)) | \
749 ((value2).a << (sizeof((value1).a) * 8 - (bits))); \
750 (value1).b = ((value2).b >> (bits)) | \
751 ((value2).b << (sizeof((value1).b) * 8 - (bits))); \
752 (value1).c = ((value2).c >> (bits)) | \
753 ((value2).c << (sizeof((value1).c) * 8 - (bits))); \
762 #define mask_x3_swap(value1, value2) \
764 (value1).a ^= (value2).a; \
765 (value2).a ^= (value1).a; \
766 (value1).a ^= (value2).a; \
767 (value1).b ^= (value2).b; \
768 (value2).b ^= (value1).b; \
769 (value1).b ^= (value2).b; \
770 (value1).c ^= (value2).c; \
771 (value2).c ^= (value1).c; \
772 (value1).c ^= (value2).c; \
783 #define mask_x3_swap_move(value1, value2, mask, shift) \
785 mask_swap_move_internal((value1).a, (value2).a, (mask), (shift)); \
786 mask_swap_move_internal((value1).b, (value2).b, (mask), (shift)); \
787 mask_swap_move_internal((value1).c, (value2).c, (mask), (shift)); \
796 #define mask_x4_input(value, input) \
798 if (sizeof((value).b) <= 4) { \
799 (value).b = aead_random_generate_32(); \
800 (value).c = aead_random_generate_32(); \
801 (value).d = aead_random_generate_32(); \
803 (value).b = aead_random_generate_64(); \
804 (value).c = aead_random_generate_64(); \
805 (value).d = aead_random_generate_64(); \
807 (value).a = (input) ^ (value).b ^ (value).c ^ (value).d; \
816 #define mask_x4_output(value) ((value).a ^ (value).b ^ (value).c ^ (value).d)
823 #define mask_x4_zero(value) \
840 #define mask_x4_xor_const(value, cvalue) \
842 (value).a ^= (cvalue); \
853 #define mask_x4_xor(value1, value2) \
855 (value1).a ^= (value2).a; \
856 (value1).b ^= (value2).b; \
857 (value1).c ^= (value2).c; \
858 (value1).d ^= (value2).d; \
870 #define mask_x4_xor3(value1, value2, value3) \
872 (value1).a ^= ((value2).a ^ (value3).a); \
873 (value1).b ^= ((value2).b ^ (value3).b); \
874 (value1).c ^= ((value2).c ^ (value3).c); \
875 (value1).d ^= ((value2).d ^ (value3).d); \
885 #define mask_x4_not(value) \
887 (value).a = ~((value).a); \
904 #define mask_x4_and(value1, value2, value3) \
906 (value1).a ^= ((value2).a & (value3).a); \
907 mask_mix_and((value1).a, (value2).a, (value3).a, \
908 (value1).b, (value2).b, (value3).b); \
909 mask_mix_and((value1).a, (value2).a, (value3).a, \
910 (value1).c, (value2).c, (value3).c); \
911 mask_mix_and((value1).a, (value2).a, (value3).a, \
912 (value1).d, (value2).d, (value3).d); \
913 (value1).b ^= ((value2).b & (value3).b); \
914 mask_mix_and((value1).b, (value2).b, (value3).b, \
915 (value1).c, (value2).c, (value3).c); \
916 mask_mix_and((value1).b, (value2).b, (value3).b, \
917 (value1).d, (value2).d, (value3).d); \
918 (value1).c ^= ((value2).c & (value3).c); \
919 mask_mix_and((value1).c, (value2).c, (value3).c, \
920 (value1).d, (value2).d, (value3).d); \
921 (value1).d ^= ((value2).d & (value3).d); \
939 #define mask_x4_and_not(value1, value2, value3) \
941 (value1).a ^= ((~(value2).a) & (value3).a); \
942 mask_mix_and((value1).a, ~(value2).a, (value3).a, \
943 (value1).b, (value2).b, (value3).b); \
944 mask_mix_and((value1).a, ~(value2).a, (value3).a, \
945 (value1).c, (value2).c, (value3).c); \
946 mask_mix_and((value1).a, ~(value2).a, (value3).a, \
947 (value1).d, (value2).d, (value3).d); \
948 (value1).b ^= ((value2).b & (value3).b); \
949 mask_mix_and((value1).b, (value2).b, (value3).b, \
950 (value1).c, (value2).c, (value3).c); \
951 mask_mix_and((value1).b, (value2).b, (value3).b, \
952 (value1).d, (value2).d, (value3).d); \
953 (value1).c ^= ((value2).c & (value3).c); \
954 mask_mix_and((value1).c, (value2).c, (value3).c, \
955 (value1).d, (value2).d, (value3).d); \
956 (value1).d ^= ((value2).d & (value3).d); \
973 #define mask_x4_or(value1, value2, value3) \
975 (value1).a ^= ((value2).a | (value3).a); \
976 mask_mix_and((value1).a, ~(value2).a, ~(value3).a, \
977 (value1).b, (value2).b, (value3).b); \
978 mask_mix_and((value1).a, ~(value2).a, ~(value3).a, \
979 (value1).c, (value2).c, (value3).c); \
980 mask_mix_and((value1).a, ~(value2).a, ~(value3).a, \
981 (value1).d, (value2).d, (value3).d); \
982 (value1).b ^= ((value2).b & (value3).b); \
983 mask_mix_and((value1).b, (value2).b, (value3).b, \
984 (value1).c, (value2).c, (value3).c); \
985 mask_mix_and((value1).b, (value2).b, (value3).b, \
986 (value1).d, (value2).d, (value3).d); \
987 (value1).c ^= ((value2).c & (value3).c); \
988 mask_mix_and((value1).c, (value2).c, (value3).c, \
989 (value1).d, (value2).d, (value3).d); \
990 (value1).d ^= ((value2).d & (value3).d); \
1000 #define mask_x4_shl(value1, value2, bits) \
1002 (value1).a = (value2).a << (bits); \
1003 (value1).b = (value2).b << (bits); \
1004 (value1).c = (value2).c << (bits); \
1005 (value1).d = (value2).d << (bits); \
1015 #define mask_x4_shr(value1, value2, bits) \
1017 (value1).a = (value2).a >> (bits); \
1018 (value1).b = (value2).b >> (bits); \
1019 (value1).c = (value2).c >> (bits); \
1020 (value1).d = (value2).d >> (bits); \
1030 #define mask_x4_rol(value1, value2, bits) \
1032 (value1).a = ((value2).a << (bits)) | \
1033 ((value2).a >> (sizeof((value1).a) * 8 - (bits))); \
1034 (value1).b = ((value2).b << (bits)) | \
1035 ((value2).b >> (sizeof((value1).b) * 8 - (bits))); \
1036 (value1).c = ((value2).c << (bits)) | \
1037 ((value2).c >> (sizeof((value1).c) * 8 - (bits))); \
1038 (value1).d = ((value2).d << (bits)) | \
1039 ((value2).d >> (sizeof((value1).d) * 8 - (bits))); \
1049 #define mask_x4_ror(value1, value2, bits) \
1051 (value1).a = ((value2).a >> (bits)) | \
1052 ((value2).a << (sizeof((value1).a) * 8 - (bits))); \
1053 (value1).b = ((value2).b >> (bits)) | \
1054 ((value2).b << (sizeof((value1).b) * 8 - (bits))); \
1055 (value1).c = ((value2).c >> (bits)) | \
1056 ((value2).c << (sizeof((value1).c) * 8 - (bits))); \
1057 (value1).d = ((value2).d >> (bits)) | \
1058 ((value2).d << (sizeof((value1).d) * 8 - (bits))); \
1067 #define mask_x4_swap(value1, value2) \
1069 (value1).a ^= (value2).a; \
1070 (value2).a ^= (value1).a; \
1071 (value1).a ^= (value2).a; \
1072 (value1).b ^= (value2).b; \
1073 (value2).b ^= (value1).b; \
1074 (value1).b ^= (value2).b; \
1075 (value1).c ^= (value2).c; \
1076 (value2).c ^= (value1).c; \
1077 (value1).c ^= (value2).c; \
1078 (value1).d ^= (value2).d; \
1079 (value2).d ^= (value1).d; \
1080 (value1).d ^= (value2).d; \
1091 #define mask_x4_swap_move(value1, value2, mask, shift) \
1093 mask_swap_move_internal((value1).a, (value2).a, (mask), (shift)); \
1094 mask_swap_move_internal((value1).b, (value2).b, (mask), (shift)); \
1095 mask_swap_move_internal((value1).c, (value2).c, (mask), (shift)); \
1096 mask_swap_move_internal((value1).d, (value2).d, (mask), (shift)); \
1105 #define mask_x5_input(value, input) \
1107 if (sizeof((value).b) <= 4) { \
1108 (value).b = aead_random_generate_32(); \
1109 (value).c = aead_random_generate_32(); \
1110 (value).d = aead_random_generate_32(); \
1111 (value).e = aead_random_generate_32(); \
1113 (value).b = aead_random_generate_64(); \
1114 (value).c = aead_random_generate_64(); \
1115 (value).d = aead_random_generate_64(); \
1116 (value).e = aead_random_generate_64(); \
1118 (value).a = (input) ^ (value).b ^ (value).c ^ (value).d ^ (value).e; \
1127 #define mask_x5_output(value) \
1128 ((value).a ^ (value).b ^ (value).c ^ (value).d ^ (value).e)
1135 #define mask_x5_zero(value) \
1153 #define mask_x5_xor_const(value, cvalue) \
1155 (value).a ^= (cvalue); \
1166 #define mask_x5_xor(value1, value2) \
1168 (value1).a ^= (value2).a; \
1169 (value1).b ^= (value2).b; \
1170 (value1).c ^= (value2).c; \
1171 (value1).d ^= (value2).d; \
1172 (value1).e ^= (value2).e; \
1184 #define mask_x5_xor3(value1, value2, value3) \
1186 (value1).a ^= ((value2).a ^ (value3).a); \
1187 (value1).b ^= ((value2).b ^ (value3).b); \
1188 (value1).c ^= ((value2).c ^ (value3).c); \
1189 (value1).d ^= ((value2).d ^ (value3).d); \
1190 (value1).e ^= ((value2).e ^ (value3).e); \
1200 #define mask_x5_not(value) \
1202 (value).a = ~((value).a); \
1219 #define mask_x5_and(value1, value2, value3) \
1221 (value1).a ^= ((value2).a & (value3).a); \
1222 mask_mix_and((value1).a, (value2).a, (value3).a, \
1223 (value1).b, (value2).b, (value3).b); \
1224 mask_mix_and((value1).a, (value2).a, (value3).a, \
1225 (value1).c, (value2).c, (value3).c); \
1226 mask_mix_and((value1).a, (value2).a, (value3).a, \
1227 (value1).d, (value2).d, (value3).d); \
1228 mask_mix_and((value1).a, (value2).a, (value3).a, \
1229 (value1).e, (value2).e, (value3).e); \
1230 (value1).b ^= ((value2).b & (value3).b); \
1231 mask_mix_and((value1).b, (value2).b, (value3).b, \
1232 (value1).c, (value2).c, (value3).c); \
1233 mask_mix_and((value1).b, (value2).b, (value3).b, \
1234 (value1).d, (value2).d, (value3).d); \
1235 mask_mix_and((value1).b, (value2).b, (value3).b, \
1236 (value1).e, (value2).e, (value3).e); \
1237 (value1).c ^= ((value2).c & (value3).c); \
1238 mask_mix_and((value1).c, (value2).c, (value3).c, \
1239 (value1).d, (value2).d, (value3).d); \
1240 mask_mix_and((value1).c, (value2).c, (value3).c, \
1241 (value1).e, (value2).e, (value3).e); \
1242 (value1).d ^= ((value2).d & (value3).d); \
1243 mask_mix_and((value1).d, (value2).d, (value3).d, \
1244 (value1).e, (value2).e, (value3).e); \
1245 (value1).e ^= ((value2).e & (value3).e); \
1263 #define mask_x5_and_not(value1, value2, value3) \
1265 (value1).a ^= ((~(value2).a) & (value3).a); \
1266 mask_mix_and((value1).a, ~(value2).a, (value3).a, \
1267 (value1).b, (value2).b, (value3).b); \
1268 mask_mix_and((value1).a, ~(value2).a, (value3).a, \
1269 (value1).c, (value2).c, (value3).c); \
1270 mask_mix_and((value1).a, ~(value2).a, (value3).a, \
1271 (value1).d, (value2).d, (value3).d); \
1272 mask_mix_and((value1).a, ~(value2).a, (value3).a, \
1273 (value1).e, (value2).e, (value3).e); \
1274 (value1).b ^= ((value2).b & (value3).b); \
1275 mask_mix_and((value1).b, (value2).b, (value3).b, \
1276 (value1).c, (value2).c, (value3).c); \
1277 mask_mix_and((value1).b, (value2).b, (value3).b, \
1278 (value1).d, (value2).d, (value3).d); \
1279 mask_mix_and((value1).b, (value2).b, (value3).b, \
1280 (value1).e, (value2).e, (value3).e); \
1281 (value1).c ^= ((value2).c & (value3).c); \
1282 mask_mix_and((value1).c, (value2).c, (value3).c, \
1283 (value1).d, (value2).d, (value3).d); \
1284 mask_mix_and((value1).c, (value2).c, (value3).c, \
1285 (value1).e, (value2).e, (value3).e); \
1286 (value1).d ^= ((value2).d & (value3).d); \
1287 mask_mix_and((value1).d, (value2).d, (value3).d, \
1288 (value1).e, (value2).e, (value3).e); \
1289 (value1).e ^= ((value2).e & (value3).e); \
1306 #define mask_x5_or(value1, value2, value3) \
1308 (value1).a ^= ((value2).a | (value3).a); \
1309 mask_mix_and((value1).a, ~(value2).a, ~(value3).a, \
1310 (value1).b, (value2).b, (value3).b); \
1311 mask_mix_and((value1).a, ~(value2).a, ~(value3).a, \
1312 (value1).c, (value2).c, (value3).c); \
1313 mask_mix_and((value1).a, ~(value2).a, ~(value3).a, \
1314 (value1).d, (value2).d, (value3).d); \
1315 mask_mix_and((value1).a, ~(value2).a, ~(value3).a, \
1316 (value1).e, (value2).e, (value3).e); \
1317 (value1).b ^= ((value2).b & (value3).b); \
1318 mask_mix_and((value1).b, (value2).b, (value3).b, \
1319 (value1).c, (value2).c, (value3).c); \
1320 mask_mix_and((value1).b, (value2).b, (value3).b, \
1321 (value1).d, (value2).d, (value3).d); \
1322 mask_mix_and((value1).b, (value2).b, (value3).b, \
1323 (value1).e, (value2).e, (value3).e); \
1324 (value1).c ^= ((value2).c & (value3).c); \
1325 mask_mix_and((value1).c, (value2).c, (value3).c, \
1326 (value1).d, (value2).d, (value3).d); \
1327 mask_mix_and((value1).c, (value2).c, (value3).c, \
1328 (value1).e, (value2).e, (value3).e); \
1329 (value1).d ^= ((value2).d & (value3).d); \
1330 mask_mix_and((value1).d, (value2).d, (value3).d, \
1331 (value1).e, (value2).e, (value3).e); \
1332 (value1).e ^= ((value2).e & (value3).e); \
1342 #define mask_x5_shl(value1, value2, bits) \
1344 (value1).a = (value2).a << (bits); \
1345 (value1).b = (value2).b << (bits); \
1346 (value1).c = (value2).c << (bits); \
1347 (value1).d = (value2).d << (bits); \
1348 (value1).e = (value2).e << (bits); \
1358 #define mask_x5_shr(value1, value2, bits) \
1360 (value1).a = (value2).a >> (bits); \
1361 (value1).b = (value2).b >> (bits); \
1362 (value1).c = (value2).c >> (bits); \
1363 (value1).d = (value2).d >> (bits); \
1364 (value1).e = (value2).e >> (bits); \
1374 #define mask_x5_rol(value1, value2, bits) \
1376 (value1).a = ((value2).a << (bits)) | \
1377 ((value2).a >> (sizeof((value1).a) * 8 - (bits))); \
1378 (value1).b = ((value2).b << (bits)) | \
1379 ((value2).b >> (sizeof((value1).b) * 8 - (bits))); \
1380 (value1).c = ((value2).c << (bits)) | \
1381 ((value2).c >> (sizeof((value1).c) * 8 - (bits))); \
1382 (value1).d = ((value2).d << (bits)) | \
1383 ((value2).d >> (sizeof((value1).d) * 8 - (bits))); \
1384 (value1).e = ((value2).e << (bits)) | \
1385 ((value2).e >> (sizeof((value1).d) * 8 - (bits))); \
1395 #define mask_x5_ror(value1, value2, bits) \
1397 (value1).a = ((value2).a >> (bits)) | \
1398 ((value2).a << (sizeof((value1).a) * 8 - (bits))); \
1399 (value1).b = ((value2).b >> (bits)) | \
1400 ((value2).b << (sizeof((value1).b) * 8 - (bits))); \
1401 (value1).c = ((value2).c >> (bits)) | \
1402 ((value2).c << (sizeof((value1).c) * 8 - (bits))); \
1403 (value1).d = ((value2).d >> (bits)) | \
1404 ((value2).d << (sizeof((value1).d) * 8 - (bits))); \
1405 (value1).e = ((value2).e >> (bits)) | \
1406 ((value2).e << (sizeof((value1).d) * 8 - (bits))); \
1415 #define mask_x5_swap(value1, value2) \
1417 (value1).a ^= (value2).a; \
1418 (value2).a ^= (value1).a; \
1419 (value1).a ^= (value2).a; \
1420 (value1).b ^= (value2).b; \
1421 (value2).b ^= (value1).b; \
1422 (value1).b ^= (value2).b; \
1423 (value1).c ^= (value2).c; \
1424 (value2).c ^= (value1).c; \
1425 (value1).c ^= (value2).c; \
1426 (value1).d ^= (value2).d; \
1427 (value2).d ^= (value1).d; \
1428 (value1).d ^= (value2).d; \
1429 (value1).e ^= (value2).e; \
1430 (value2).e ^= (value1).e; \
1431 (value1).e ^= (value2).e; \
1442 #define mask_x5_swap_move(value1, value2, mask, shift) \
1444 mask_swap_move_internal((value1).a, (value2).a, (mask), (shift)); \
1445 mask_swap_move_internal((value1).b, (value2).b, (mask), (shift)); \
1446 mask_swap_move_internal((value1).c, (value2).c, (mask), (shift)); \
1447 mask_swap_move_internal((value1).d, (value2).d, (mask), (shift)); \
1448 mask_swap_move_internal((value1).e, (value2).e, (mask), (shift)); \
1457 #define mask_x6_input(value, input) \
1459 if (sizeof((value).b) <= 4) { \
1460 (value).b = aead_random_generate_32(); \
1461 (value).c = aead_random_generate_32(); \
1462 (value).d = aead_random_generate_32(); \
1463 (value).e = aead_random_generate_32(); \
1464 (value).f = aead_random_generate_32(); \
1466 (value).b = aead_random_generate_64(); \
1467 (value).c = aead_random_generate_64(); \
1468 (value).d = aead_random_generate_64(); \
1469 (value).e = aead_random_generate_64(); \
1470 (value).f = aead_random_generate_64(); \
1472 (value).a = (input) ^ (value).b ^ (value).c ^ \
1473 (value).d ^ (value).e ^ (value).f; \
1482 #define mask_x6_output(value) \
1483 ((value).a ^ (value).b ^ (value).c ^ (value).d ^ (value).e ^ (value).f)
1490 #define mask_x6_zero(value) \
1509 #define mask_x6_xor_const(value, cvalue) \
1511 (value).a ^= (cvalue); \
1522 #define mask_x6_xor(value1, value2) \
1524 (value1).a ^= (value2).a; \
1525 (value1).b ^= (value2).b; \
1526 (value1).c ^= (value2).c; \
1527 (value1).d ^= (value2).d; \
1528 (value1).e ^= (value2).e; \
1529 (value1).f ^= (value2).f; \
1541 #define mask_x6_xor3(value1, value2, value3) \
1543 (value1).a ^= ((value2).a ^ (value3).a); \
1544 (value1).b ^= ((value2).b ^ (value3).b); \
1545 (value1).c ^= ((value2).c ^ (value3).c); \
1546 (value1).d ^= ((value2).d ^ (value3).d); \
1547 (value1).e ^= ((value2).e ^ (value3).e); \
1548 (value1).f ^= ((value2).f ^ (value3).f); \
1558 #define mask_x6_not(value) \
1560 (value).a = ~((value).a); \
1577 #define mask_x6_and(value1, value2, value3) \
1579 (value1).a ^= ((value2).a & (value3).a); \
1580 mask_mix_and((value1).a, (value2).a, (value3).a, \
1581 (value1).b, (value2).b, (value3).b); \
1582 mask_mix_and((value1).a, (value2).a, (value3).a, \
1583 (value1).c, (value2).c, (value3).c); \
1584 mask_mix_and((value1).a, (value2).a, (value3).a, \
1585 (value1).d, (value2).d, (value3).d); \
1586 mask_mix_and((value1).a, (value2).a, (value3).a, \
1587 (value1).e, (value2).e, (value3).e); \
1588 mask_mix_and((value1).a, (value2).a, (value3).a, \
1589 (value1).f, (value2).f, (value3).f); \
1590 (value1).b ^= ((value2).b & (value3).b); \
1591 mask_mix_and((value1).b, (value2).b, (value3).b, \
1592 (value1).c, (value2).c, (value3).c); \
1593 mask_mix_and((value1).b, (value2).b, (value3).b, \
1594 (value1).d, (value2).d, (value3).d); \
1595 mask_mix_and((value1).b, (value2).b, (value3).b, \
1596 (value1).e, (value2).e, (value3).e); \
1597 mask_mix_and((value1).b, (value2).b, (value3).b, \
1598 (value1).f, (value2).f, (value3).f); \
1599 (value1).c ^= ((value2).c & (value3).c); \
1600 mask_mix_and((value1).c, (value2).c, (value3).c, \
1601 (value1).d, (value2).d, (value3).d); \
1602 mask_mix_and((value1).c, (value2).c, (value3).c, \
1603 (value1).e, (value2).e, (value3).e); \
1604 mask_mix_and((value1).c, (value2).c, (value3).c, \
1605 (value1).f, (value2).f, (value3).f); \
1606 (value1).d ^= ((value2).d & (value3).d); \
1607 mask_mix_and((value1).d, (value2).d, (value3).d, \
1608 (value1).e, (value2).e, (value3).e); \
1609 mask_mix_and((value1).d, (value2).d, (value3).d, \
1610 (value1).f, (value2).f, (value3).f); \
1611 (value1).e ^= ((value2).e & (value3).e); \
1612 mask_mix_and((value1).e, (value2).e, (value3).e, \
1613 (value1).f, (value2).f, (value3).f); \
1614 (value1).f ^= ((value2).f & (value3).f); \
1632 #define mask_x6_and_not(value1, value2, value3) \
1634 (value1).a ^= ((~(value2).a) & (value3).a); \
1635 mask_mix_and((value1).a, ~(value2).a, (value3).a, \
1636 (value1).b, (value2).b, (value3).b); \
1637 mask_mix_and((value1).a, ~(value2).a, (value3).a, \
1638 (value1).c, (value2).c, (value3).c); \
1639 mask_mix_and((value1).a, ~(value2).a, (value3).a, \
1640 (value1).d, (value2).d, (value3).d); \
1641 mask_mix_and((value1).a, ~(value2).a, (value3).a, \
1642 (value1).e, (value2).e, (value3).e); \
1643 mask_mix_and((value1).a, ~(value2).a, (value3).a, \
1644 (value1).f, (value2).f, (value3).f); \
1645 (value1).b ^= ((value2).b & (value3).b); \
1646 mask_mix_and((value1).b, (value2).b, (value3).b, \
1647 (value1).c, (value2).c, (value3).c); \
1648 mask_mix_and((value1).b, (value2).b, (value3).b, \
1649 (value1).d, (value2).d, (value3).d); \
1650 mask_mix_and((value1).b, (value2).b, (value3).b, \
1651 (value1).e, (value2).e, (value3).e); \
1652 mask_mix_and((value1).b, (value2).b, (value3).b, \
1653 (value1).f, (value2).f, (value3).f); \
1654 (value1).c ^= ((value2).c & (value3).c); \
1655 mask_mix_and((value1).c, (value2).c, (value3).c, \
1656 (value1).d, (value2).d, (value3).d); \
1657 mask_mix_and((value1).c, (value2).c, (value3).c, \
1658 (value1).e, (value2).e, (value3).e); \
1659 mask_mix_and((value1).c, (value2).c, (value3).c, \
1660 (value1).f, (value2).f, (value3).f); \
1661 (value1).d ^= ((value2).d & (value3).d); \
1662 mask_mix_and((value1).d, (value2).d, (value3).d, \
1663 (value1).e, (value2).e, (value3).e); \
1664 mask_mix_and((value1).d, (value2).d, (value3).d, \
1665 (value1).f, (value2).f, (value3).f); \
1666 (value1).e ^= ((value2).e & (value3).e); \
1667 mask_mix_and((value1).e, (value2).e, (value3).e, \
1668 (value1).f, (value2).f, (value3).f); \
1669 (value1).f ^= ((value2).f & (value3).f); \
1686 #define mask_x6_or(value1, value2, value3) \
1688 (value1).a ^= ((value2).a | (value3).a); \
1689 mask_mix_and((value1).a, ~(value2).a, ~(value3).a, \
1690 (value1).b, (value2).b, (value3).b); \
1691 mask_mix_and((value1).a, ~(value2).a, ~(value3).a, \
1692 (value1).c, (value2).c, (value3).c); \
1693 mask_mix_and((value1).a, ~(value2).a, ~(value3).a, \
1694 (value1).d, (value2).d, (value3).d); \
1695 mask_mix_and((value1).a, ~(value2).a, ~(value3).a, \
1696 (value1).e, (value2).e, (value3).e); \
1697 mask_mix_and((value1).a, ~(value2).a, ~(value3).a, \
1698 (value1).f, (value2).f, (value3).f); \
1699 (value1).b ^= ((value2).b & (value3).b); \
1700 mask_mix_and((value1).b, (value2).b, (value3).b, \
1701 (value1).c, (value2).c, (value3).c); \
1702 mask_mix_and((value1).b, (value2).b, (value3).b, \
1703 (value1).d, (value2).d, (value3).d); \
1704 mask_mix_and((value1).b, (value2).b, (value3).b, \
1705 (value1).e, (value2).e, (value3).e); \
1706 mask_mix_and((value1).b, (value2).b, (value3).b, \
1707 (value1).f, (value2).f, (value3).f); \
1708 (value1).c ^= ((value2).c & (value3).c); \
1709 mask_mix_and((value1).c, (value2).c, (value3).c, \
1710 (value1).d, (value2).d, (value3).d); \
1711 mask_mix_and((value1).c, (value2).c, (value3).c, \
1712 (value1).e, (value2).e, (value3).e); \
1713 mask_mix_and((value1).c, (value2).c, (value3).c, \
1714 (value1).f, (value2).f, (value3).f); \
1715 (value1).d ^= ((value2).d & (value3).d); \
1716 mask_mix_and((value1).d, (value2).d, (value3).d, \
1717 (value1).e, (value2).e, (value3).e); \
1718 mask_mix_and((value1).d, (value2).d, (value3).d, \
1719 (value1).f, (value2).f, (value3).f); \
1720 (value1).e ^= ((value2).e & (value3).e); \
1721 mask_mix_and((value1).e, (value2).e, (value3).e, \
1722 (value1).f, (value2).f, (value3).f); \
1723 (value1).f ^= ((value2).f & (value3).f); \
1733 #define mask_x6_shl(value1, value2, bits) \
1735 (value1).a = (value2).a << (bits); \
1736 (value1).b = (value2).b << (bits); \
1737 (value1).c = (value2).c << (bits); \
1738 (value1).d = (value2).d << (bits); \
1739 (value1).e = (value2).e << (bits); \
1740 (value1).f = (value2).f << (bits); \
1750 #define mask_x6_shr(value1, value2, bits) \
1752 (value1).a = (value2).a >> (bits); \
1753 (value1).b = (value2).b >> (bits); \
1754 (value1).c = (value2).c >> (bits); \
1755 (value1).d = (value2).d >> (bits); \
1756 (value1).e = (value2).e >> (bits); \
1757 (value1).f = (value2).f >> (bits); \
1767 #define mask_x6_rol(value1, value2, bits) \
1769 (value1).a = ((value2).a << (bits)) | \
1770 ((value2).a >> (sizeof((value1).a) * 8 - (bits))); \
1771 (value1).b = ((value2).b << (bits)) | \
1772 ((value2).b >> (sizeof((value1).b) * 8 - (bits))); \
1773 (value1).c = ((value2).c << (bits)) | \
1774 ((value2).c >> (sizeof((value1).c) * 8 - (bits))); \
1775 (value1).d = ((value2).d << (bits)) | \
1776 ((value2).d >> (sizeof((value1).d) * 8 - (bits))); \
1777 (value1).e = ((value2).e << (bits)) | \
1778 ((value2).e >> (sizeof((value1).e) * 8 - (bits))); \
1779 (value1).f = ((value2).f << (bits)) | \
1780 ((value2).f >> (sizeof((value1).f) * 8 - (bits))); \
1790 #define mask_x6_ror(value1, value2, bits) \
1792 (value1).a = ((value2).a >> (bits)) | \
1793 ((value2).a << (sizeof((value1).a) * 8 - (bits))); \
1794 (value1).b = ((value2).b >> (bits)) | \
1795 ((value2).b << (sizeof((value1).b) * 8 - (bits))); \
1796 (value1).c = ((value2).c >> (bits)) | \
1797 ((value2).c << (sizeof((value1).c) * 8 - (bits))); \
1798 (value1).d = ((value2).d >> (bits)) | \
1799 ((value2).d << (sizeof((value1).d) * 8 - (bits))); \
1800 (value1).e = ((value2).e >> (bits)) | \
1801 ((value2).e << (sizeof((value1).e) * 8 - (bits))); \
1802 (value1).f = ((value2).f >> (bits)) | \
1803 ((value2).f << (sizeof((value1).f) * 8 - (bits))); \
1812 #define mask_x6_swap(value1, value2) \
1814 (value1).a ^= (value2).a; \
1815 (value2).a ^= (value1).a; \
1816 (value1).a ^= (value2).a; \
1817 (value1).b ^= (value2).b; \
1818 (value2).b ^= (value1).b; \
1819 (value1).b ^= (value2).b; \
1820 (value1).c ^= (value2).c; \
1821 (value2).c ^= (value1).c; \
1822 (value1).c ^= (value2).c; \
1823 (value1).d ^= (value2).d; \
1824 (value2).d ^= (value1).d; \
1825 (value1).d ^= (value2).d; \
1826 (value1).e ^= (value2).e; \
1827 (value2).e ^= (value1).e; \
1828 (value1).e ^= (value2).e; \
1829 (value1).f ^= (value2).f; \
1830 (value2).f ^= (value1).f; \
1831 (value1).f ^= (value2).f; \
1842 #define mask_x6_swap_move(value1, value2, mask, shift) \
1844 mask_swap_move_internal((value1).a, (value2).a, (mask), (shift)); \
1845 mask_swap_move_internal((value1).b, (value2).b, (mask), (shift)); \
1846 mask_swap_move_internal((value1).c, (value2).c, (mask), (shift)); \
1847 mask_swap_move_internal((value1).d, (value2).d, (mask), (shift)); \
1848 mask_swap_move_internal((value1).e, (value2).e, (mask), (shift)); \
1849 mask_swap_move_internal((value1).f, (value2).f, (mask), (shift)); \
1853 #if AEAD_MASKING_SHARES == 2
1857 #define mask_input(value, input) mask_x2_input((value), (input))
1858 #define mask_output(value) mask_x2_output((value))
1859 #define mask_zero(value) mask_x2_zero((value))
1860 #define mask_xor_const(value, cvalue) mask_x2_xor_const((value), (cvalue))
1861 #define mask_xor(value1, value2) mask_x2_xor((value1), (value2))
1862 #define mask_xor3(value1, value2, value3) mask_x2_xor3((value1), (value2), (value3))
1863 #define mask_not(value) mask_x2_not((value))
1864 #define mask_and(value1, value2, value3) mask_x2_and((value1), (value2), (value3))
1865 #define mask_and_not(value1, value2, value3) mask_x2_and_not((value1), (value2), (value3))
1866 #define mask_or(value1, value2, value3) mask_x2_or((value1), (value2), (value3))
1867 #define mask_shl(value1, value2, bits) mask_x2_shl((value1), (value2), (bits))
1868 #define mask_shr(value1, value2, bits) mask_x2_shr((value1), (value2), (bits))
1869 #define mask_rol(value1, value2, bits) mask_x2_rol((value1), (value2), (bits))
1870 #define mask_ror(value1, value2, bits) mask_x2_ror((value1), (value2), (bits))
1871 #define mask_swap(value1, value2) mask_x2_swap((value1), (value2))
1872 #define mask_swap_move(value1, value2, mask, shift) mask_x2_swap_move((value1), (value2), (mask), (shift))
1873 #elif AEAD_MASKING_SHARES == 3
1877 #define mask_input(value, input) mask_x3_input((value), (input))
1878 #define mask_output(value) mask_x3_output((value))
1879 #define mask_zero(value) mask_x3_zero((value))
1880 #define mask_xor_const(value, cvalue) mask_x3_xor_const((value), (cvalue))
1881 #define mask_xor(value1, value2) mask_x3_xor((value1), (value2))
1882 #define mask_xor3(value1, value2, value3) mask_x3_xor3((value1), (value2), (value3))
1883 #define mask_not(value) mask_x3_not((value))
1884 #define mask_and(value1, value2, value3) mask_x3_and((value1), (value2), (value3))
1885 #define mask_and_not(value1, value2, value3) mask_x3_and_not((value1), (value2), (value3))
1886 #define mask_or(value1, value2, value3) mask_x3_or((value1), (value2), (value3))
1887 #define mask_shl(value1, value2, bits) mask_x3_shl((value1), (value2), (bits))
1888 #define mask_shr(value1, value2, bits) mask_x3_shr((value1), (value2), (bits))
1889 #define mask_rol(value1, value2, bits) mask_x3_rol((value1), (value2), (bits))
1890 #define mask_ror(value1, value2, bits) mask_x3_ror((value1), (value2), (bits))
1891 #define mask_swap(value1, value2) mask_x3_swap((value1), (value2))
1892 #define mask_swap_move(value1, value2, mask, shift) mask_x3_swap_move((value1), (value2), (mask), (shift))
1893 #elif AEAD_MASKING_SHARES == 4
1916 #define mask_input(value, input) mask_x4_input((value), (input))
1924 #define mask_output(value) mask_x4_output((value))
1931 #define mask_zero(value) mask_x4_zero((value))
1942 #define mask_xor_const(value, cvalue) mask_x4_xor_const((value), (cvalue))
1952 #define mask_xor(value1, value2) mask_x4_xor((value1), (value2))
1963 #define mask_xor3(value1, value2, value3) mask_x4_xor3((value1), (value2), (value3))
1972 #define mask_not(value) mask_x4_not((value))
1988 #define mask_and(value1, value2, value3) mask_x4_and((value1), (value2), (value3))
2005 #define mask_and_not(value1, value2, value3) mask_x4_and_not((value1), (value2), (value3))
2021 #define mask_or(value1, value2, value3) mask_x4_or((value1), (value2), (value3))
2030 #define mask_shl(value1, value2, bits) mask_x4_shl((value1), (value2), (bits))
2039 #define mask_shr(value1, value2, bits) mask_x4_shr((value1), (value2), (bits))
2048 #define mask_rol(value1, value2, bits) mask_x4_rol((value1), (value2), (bits))
2057 #define mask_ror(value1, value2, bits) mask_x4_ror((value1), (value2), (bits))
2065 #define mask_swap(value1, value2) mask_x4_swap((value1), (value2))
2075 #define mask_swap_move(value1, value2, mask, shift) mask_x4_swap_move((value1), (value2), (mask), (shift))
2077 #elif AEAD_MASKING_SHARES == 5
2081 #define mask_input(value, input) mask_x5_input((value), (input))
2082 #define mask_output(value) mask_x5_output((value))
2083 #define mask_zero(value) mask_x5_zero((value))
2084 #define mask_xor_const(value, cvalue) mask_x5_xor_const((value), (cvalue))
2085 #define mask_xor(value1, value2) mask_x5_xor((value1), (value2))
2086 #define mask_xor3(value1, value2, value3) mask_x5_xor3((value1), (value2), (value3))
2087 #define mask_not(value) mask_x5_not((value))
2088 #define mask_and(value1, value2, value3) mask_x5_and((value1), (value2), (value3))
2089 #define mask_and_not(value1, value2, value3) mask_x5_and_not((value1), (value2), (value3))
2090 #define mask_or(value1, value2, value3) mask_x5_or((value1), (value2), (value3))
2091 #define mask_shl(value1, value2, bits) mask_x5_shl((value1), (value2), (bits))
2092 #define mask_shr(value1, value2, bits) mask_x5_shr((value1), (value2), (bits))
2093 #define mask_rol(value1, value2, bits) mask_x5_rol((value1), (value2), (bits))
2094 #define mask_ror(value1, value2, bits) mask_x5_ror((value1), (value2), (bits))
2095 #define mask_swap(value1, value2) mask_x5_swap((value1), (value2))
2096 #define mask_swap_move(value1, value2, mask, shift) mask_x5_swap_move((value1), (value2), (mask), (shift))
2097 #elif AEAD_MASKING_SHARES == 6
2101 #define mask_input(value, input) mask_x6_input((value), (input))
2102 #define mask_output(value) mask_x6_output((value))
2103 #define mask_zero(value) mask_x6_zero((value))
2104 #define mask_xor_const(value, cvalue) mask_x6_xor_const((value), (cvalue))
2105 #define mask_xor(value1, value2) mask_x6_xor((value1), (value2))
2106 #define mask_xor3(value1, value2, value3) mask_x6_xor3((value1), (value2), (value3))
2107 #define mask_not(value) mask_x6_not((value))
2108 #define mask_and(value1, value2, value3) mask_x6_and((value1), (value2), (value3))
2109 #define mask_and_not(value1, value2, value3) mask_x6_and_not((value1), (value2), (value3))
2110 #define mask_or(value1, value2, value3) mask_x6_or((value1), (value2), (value3))
2111 #define mask_shl(value1, value2, bits) mask_x6_shl((value1), (value2), (bits))
2112 #define mask_shr(value1, value2, bits) mask_x6_shr((value1), (value2), (bits))
2113 #define mask_rol(value1, value2, bits) mask_x6_rol((value1), (value2), (bits))
2114 #define mask_ror(value1, value2, bits) mask_x6_ror((value1), (value2), (bits))
2115 #define mask_swap(value1, value2) mask_x6_swap((value1), (value2))
2116 #define mask_swap_move(value1, value2, mask, shift) mask_x6_swap_move((value1), (value2), (mask), (shift))
2118 #error "AEAD_MASKING_SHARES value is not supported"
uint32_t d
Definition: internal-masking.h:169
uint32_t a
Definition: internal-masking.h:166
Masked 16-bit word with six shares.
Definition: internal-masking.h:117
uint64_t d
Definition: internal-masking.h:229
uint16_t a
Definition: internal-masking.h:106
uint32_t a
Definition: internal-masking.h:179
uint16_t b
Definition: internal-masking.h:120
uint32_t c
Definition: internal-masking.h:168
uint16_t d
Definition: internal-masking.h:109
uint16_t c
Definition: internal-masking.h:85
uint32_t b
Definition: internal-masking.h:167
uint32_t c
Definition: internal-masking.h:156
Masked 32-bit word with five shares.
Definition: internal-masking.h:164
uint16_t a
Definition: internal-masking.h:119
uint64_t d
Definition: internal-masking.h:242
uint64_t a
Definition: internal-masking.h:214
uint16_t a
Definition: internal-masking.h:83
Masked 64-bit word with five shares.
Definition: internal-masking.h:224
uint16_t b
Definition: internal-masking.h:74
uint64_t b
Definition: internal-masking.h:240
uint16_t c
Definition: internal-masking.h:108
uint32_t a
Definition: internal-masking.h:133
uint32_t b
Definition: internal-masking.h:180
uint64_t a
Definition: internal-masking.h:239
uint32_t a
Definition: internal-masking.h:154
uint32_t d
Definition: internal-masking.h:157
uint16_t a
Definition: internal-masking.h:94
uint16_t c
Definition: internal-masking.h:96
uint16_t e
Definition: internal-masking.h:110
Masked 16-bit word with five shares.
Definition: internal-masking.h:104
uint16_t a
Definition: internal-masking.h:73
Masked 32-bit word with three shares.
Definition: internal-masking.h:141
uint32_t b
Definition: internal-masking.h:155
uint64_t d
Definition: internal-masking.h:217
uint64_t e
Definition: internal-masking.h:230
uint32_t f
Definition: internal-masking.h:184
uint32_t c
Definition: internal-masking.h:181
uint64_t c
Definition: internal-masking.h:205
uint32_t b
Definition: internal-masking.h:144
uint16_t f
Definition: internal-masking.h:124
mask_x4_uint16_t mask_uint16_t
Generic masked 16-bit word.
Definition: internal-masking.h:1898
uint64_t f
Definition: internal-masking.h:244
uint64_t c
Definition: internal-masking.h:241
Masked 64-bit word with three shares.
Definition: internal-masking.h:201
uint16_t b
Definition: internal-masking.h:84
Masked 32-bit word with two shares.
Definition: internal-masking.h:131
uint16_t d
Definition: internal-masking.h:122
uint16_t b
Definition: internal-masking.h:95
uint64_t a
Definition: internal-masking.h:226
uint64_t b
Definition: internal-masking.h:227
uint64_t b
Definition: internal-masking.h:215
uint32_t e
Definition: internal-masking.h:170
uint64_t c
Definition: internal-masking.h:228
Masked 32-bit word with four shares.
Definition: internal-masking.h:152
Masked 64-bit word with two shares.
Definition: internal-masking.h:191
uint32_t c
Definition: internal-masking.h:145
uint16_t e
Definition: internal-masking.h:123
Masked 16-bit word with three shares.
Definition: internal-masking.h:81
uint32_t d
Definition: internal-masking.h:182
Masked 32-bit word with six shares.
Definition: internal-masking.h:177
uint64_t b
Definition: internal-masking.h:194
mask_x4_uint64_t mask_uint64_t
Generic masked 64-bit word.
Definition: internal-masking.h:1908
Masked 64-bit word with four shares.
Definition: internal-masking.h:212
uint32_t a
Definition: internal-masking.h:143
uint64_t a
Definition: internal-masking.h:193
Masked 16-bit word with two shares.
Definition: internal-masking.h:71
mask_x4_uint32_t mask_uint32_t
Generic masked 32-bit word.
Definition: internal-masking.h:1903
uint32_t b
Definition: internal-masking.h:134
uint32_t e
Definition: internal-masking.h:183
Masked 64-bit word with six shares.
Definition: internal-masking.h:237
uint16_t d
Definition: internal-masking.h:97
uint16_t c
Definition: internal-masking.h:121
Utilities that help with the generation of random masking material.
uint64_t e
Definition: internal-masking.h:243
Masked 16-bit word with four shares.
Definition: internal-masking.h:92
uint64_t c
Definition: internal-masking.h:216
uint16_t b
Definition: internal-masking.h:107
uint64_t a
Definition: internal-masking.h:203
uint64_t b
Definition: internal-masking.h:204