25 #include "utility/EndianUtil.h"
26 #include "utility/RotateUtil.h"
27 #include "utility/ProgMemUtil.h"
96 #define USE_AVR_INLINE_ASM 1
99 #ifndef CRYPTO_LITTLE_ENDIAN
100 #error "Arduino platforms are assumed to be little-endian"
110 : s(schedule), r(rounds)
131 #if USE_AVR_INLINE_ASM
135 #define ALIGN256 __attribute__((aligned(256)))
143 static uint8_t
const sbox[256] PROGMEM ALIGN256 = {
144 0xcc, 0xc6, 0xc9, 0xc0, 0xc1, 0xca, 0xc2, 0xcb, 0xc3, 0xc8, 0xc5, 0xcd,
145 0xc4, 0xce, 0xc7, 0xcf, 0x6c, 0x66, 0x69, 0x60, 0x61, 0x6a, 0x62, 0x6b,
146 0x63, 0x68, 0x65, 0x6d, 0x64, 0x6e, 0x67, 0x6f, 0x9c, 0x96, 0x99, 0x90,
147 0x91, 0x9a, 0x92, 0x9b, 0x93, 0x98, 0x95, 0x9d, 0x94, 0x9e, 0x97, 0x9f,
148 0x0c, 0x06, 0x09, 0x00, 0x01, 0x0a, 0x02, 0x0b, 0x03, 0x08, 0x05, 0x0d,
149 0x04, 0x0e, 0x07, 0x0f, 0x1c, 0x16, 0x19, 0x10, 0x11, 0x1a, 0x12, 0x1b,
150 0x13, 0x18, 0x15, 0x1d, 0x14, 0x1e, 0x17, 0x1f, 0xac, 0xa6, 0xa9, 0xa0,
151 0xa1, 0xaa, 0xa2, 0xab, 0xa3, 0xa8, 0xa5, 0xad, 0xa4, 0xae, 0xa7, 0xaf,
152 0x2c, 0x26, 0x29, 0x20, 0x21, 0x2a, 0x22, 0x2b, 0x23, 0x28, 0x25, 0x2d,
153 0x24, 0x2e, 0x27, 0x2f, 0xbc, 0xb6, 0xb9, 0xb0, 0xb1, 0xba, 0xb2, 0xbb,
154 0xb3, 0xb8, 0xb5, 0xbd, 0xb4, 0xbe, 0xb7, 0xbf, 0x3c, 0x36, 0x39, 0x30,
155 0x31, 0x3a, 0x32, 0x3b, 0x33, 0x38, 0x35, 0x3d, 0x34, 0x3e, 0x37, 0x3f,
156 0x8c, 0x86, 0x89, 0x80, 0x81, 0x8a, 0x82, 0x8b, 0x83, 0x88, 0x85, 0x8d,
157 0x84, 0x8e, 0x87, 0x8f, 0x5c, 0x56, 0x59, 0x50, 0x51, 0x5a, 0x52, 0x5b,
158 0x53, 0x58, 0x55, 0x5d, 0x54, 0x5e, 0x57, 0x5f, 0xdc, 0xd6, 0xd9, 0xd0,
159 0xd1, 0xda, 0xd2, 0xdb, 0xd3, 0xd8, 0xd5, 0xdd, 0xd4, 0xde, 0xd7, 0xdf,
160 0x4c, 0x46, 0x49, 0x40, 0x41, 0x4a, 0x42, 0x4b, 0x43, 0x48, 0x45, 0x4d,
161 0x44, 0x4e, 0x47, 0x4f, 0xec, 0xe6, 0xe9, 0xe0, 0xe1, 0xea, 0xe2, 0xeb,
162 0xe3, 0xe8, 0xe5, 0xed, 0xe4, 0xee, 0xe7, 0xef, 0x7c, 0x76, 0x79, 0x70,
163 0x71, 0x7a, 0x72, 0x7b, 0x73, 0x78, 0x75, 0x7d, 0x74, 0x7e, 0x77, 0x7f,
164 0xfc, 0xf6, 0xf9, 0xf0, 0xf1, 0xfa, 0xf2, 0xfb, 0xf3, 0xf8, 0xf5, 0xfd,
165 0xf4, 0xfe, 0xf7, 0xff
167 static uint8_t
const sbox_inv[256] PROGMEM ALIGN256 = {
168 0x33, 0x34, 0x36, 0x38, 0x3c, 0x3a, 0x31, 0x3e, 0x39, 0x32, 0x35, 0x37,
169 0x30, 0x3b, 0x3d, 0x3f, 0x43, 0x44, 0x46, 0x48, 0x4c, 0x4a, 0x41, 0x4e,
170 0x49, 0x42, 0x45, 0x47, 0x40, 0x4b, 0x4d, 0x4f, 0x63, 0x64, 0x66, 0x68,
171 0x6c, 0x6a, 0x61, 0x6e, 0x69, 0x62, 0x65, 0x67, 0x60, 0x6b, 0x6d, 0x6f,
172 0x83, 0x84, 0x86, 0x88, 0x8c, 0x8a, 0x81, 0x8e, 0x89, 0x82, 0x85, 0x87,
173 0x80, 0x8b, 0x8d, 0x8f, 0xc3, 0xc4, 0xc6, 0xc8, 0xcc, 0xca, 0xc1, 0xce,
174 0xc9, 0xc2, 0xc5, 0xc7, 0xc0, 0xcb, 0xcd, 0xcf, 0xa3, 0xa4, 0xa6, 0xa8,
175 0xac, 0xaa, 0xa1, 0xae, 0xa9, 0xa2, 0xa5, 0xa7, 0xa0, 0xab, 0xad, 0xaf,
176 0x13, 0x14, 0x16, 0x18, 0x1c, 0x1a, 0x11, 0x1e, 0x19, 0x12, 0x15, 0x17,
177 0x10, 0x1b, 0x1d, 0x1f, 0xe3, 0xe4, 0xe6, 0xe8, 0xec, 0xea, 0xe1, 0xee,
178 0xe9, 0xe2, 0xe5, 0xe7, 0xe0, 0xeb, 0xed, 0xef, 0x93, 0x94, 0x96, 0x98,
179 0x9c, 0x9a, 0x91, 0x9e, 0x99, 0x92, 0x95, 0x97, 0x90, 0x9b, 0x9d, 0x9f,
180 0x23, 0x24, 0x26, 0x28, 0x2c, 0x2a, 0x21, 0x2e, 0x29, 0x22, 0x25, 0x27,
181 0x20, 0x2b, 0x2d, 0x2f, 0x53, 0x54, 0x56, 0x58, 0x5c, 0x5a, 0x51, 0x5e,
182 0x59, 0x52, 0x55, 0x57, 0x50, 0x5b, 0x5d, 0x5f, 0x73, 0x74, 0x76, 0x78,
183 0x7c, 0x7a, 0x71, 0x7e, 0x79, 0x72, 0x75, 0x77, 0x70, 0x7b, 0x7d, 0x7f,
184 0x03, 0x04, 0x06, 0x08, 0x0c, 0x0a, 0x01, 0x0e, 0x09, 0x02, 0x05, 0x07,
185 0x00, 0x0b, 0x0d, 0x0f, 0xb3, 0xb4, 0xb6, 0xb8, 0xbc, 0xba, 0xb1, 0xbe,
186 0xb9, 0xb2, 0xb5, 0xb7, 0xb0, 0xbb, 0xbd, 0xbf, 0xd3, 0xd4, 0xd6, 0xd8,
187 0xdc, 0xda, 0xd1, 0xde, 0xd9, 0xd2, 0xd5, 0xd7, 0xd0, 0xdb, 0xdd, 0xdf,
188 0xf3, 0xf4, 0xf6, 0xf8, 0xfc, 0xfa, 0xf1, 0xfe, 0xf9, 0xf2, 0xf5, 0xf7,
189 0xf0, 0xfb, 0xfd, 0xff
195 "mov r30," reg "\n" \
197 #elif defined(__AVR_HAVE_LPMX__)
199 "mov r30," reg "\n" \
201 #elif defined(__AVR_TINY__)
203 "mov r30," reg "\n" \
207 "mov r30," reg "\n" \
213 #define MIX_COLUMNS(row0, row1, row2, row3) \
214 "eor " row1 "," row2 "\n" \
215 "eor " row2 "," row0 "\n" \
216 "mov __tmp_reg__," row3 "\n" \
217 "eor __tmp_reg__," row2 "\n" \
218 "mov " row3 "," row2 "\n" \
219 "mov " row2 "," row1 "\n" \
220 "mov " row1 "," row0 "\n" \
221 "mov " row0 ",__tmp_reg__\n"
224 #define MIX_COLUMNS_INV(row0, row1, row2, row3) \
225 "mov __tmp_reg__," row3 "\n" \
226 "mov " row3 "," row0 "\n" \
227 "mov " row0 "," row1 "\n" \
228 "mov " row1 "," row2 "\n" \
229 "eor " row3 ",__tmp_reg__\n" \
230 "eor __tmp_reg__," row0 "\n" \
231 "mov " row2 ",__tmp_reg__\n" \
232 "eor " row1 "," row2 "\n"
236 #define LOAD_BLOCK() \
247 #define STORE_BLOCK() \
257 #else // !USE_AVR_INLINE_ASM
266 inline uint32_t skinny64_sbox(uint32_t x)
288 x = (((x >> 3) & (x >> 2)) & 0x11111111U) ^ x;
289 x = (((x << 1) & (x << 2)) & 0x88888888U) ^ x;
290 x = (((x << 1) & (x << 2)) & 0x44444444U) ^ x;
291 x = (((x >> 2) & (x << 1)) & 0x22222222U) ^ x;
293 return ((x >> 1) & 0x77777777U) | ((x << 3) & 0x88888888U);
296 inline uint32_t skinny64_inv_sbox(uint32_t x)
318 x = (((x >> 3) & (x >> 2)) & 0x11111111U) ^ x;
319 x = (((x << 1) & (x >> 2)) & 0x22222222U) ^ x;
320 x = (((x << 1) & (x << 2)) & 0x44444444U) ^ x;
321 x = (((x << 1) & (x << 2)) & 0x88888888U) ^ x;
323 return ((x << 1) & 0xEEEEEEEEU) | ((x >> 3) & 0x11111111U);
326 inline uint16_t rightRotate_16bit(uint16_t x, uint8_t shift)
328 return (x >> shift) | (x << (16 - shift));
331 inline void skinny64_unpack(
Skinny64Cells_t *cells,
const uint8_t *input)
333 cells->
lrow[0] = ((uint32_t)(input[0])) |
334 (((uint32_t)(input[1])) << 8) |
335 (((uint32_t)(input[2])) << 16) |
336 (((uint32_t)(input[3])) << 24);
337 cells->
lrow[1] = ((uint32_t)(input[4])) |
338 (((uint32_t)(input[5])) << 8) |
339 (((uint32_t)(input[6])) << 16) |
340 (((uint32_t)(input[7])) << 24);
343 inline void skinny64_pack(uint8_t *output,
const Skinny64Cells_t *cells)
345 uint32_t x = cells->
lrow[0];
346 output[0] = (uint8_t)x;
347 output[1] = (uint8_t)(x >> 8);
348 output[2] = (uint8_t)(x >> 16);
349 output[3] = (uint8_t)(x >> 24);
351 output[4] = (uint8_t)x;
352 output[5] = (uint8_t)(x >> 8);
353 output[6] = (uint8_t)(x >> 16);
354 output[7] = (uint8_t)(x >> 24);
357 #endif // !USE_AVR_INLINE_ASM
361 #if USE_AVR_INLINE_ASM
363 uint32_t sbox_addr = (uint32_t)sbox;
365 uint16_t sbox_addr = (uint16_t)sbox;
367 __asm__ __volatile__ (
375 "in __tmp_reg__,%5\n"
377 "ldd __tmp_reg__,%C3\n"
378 "out %5,__tmp_reg__\n"
395 "ld __tmp_reg__,X+\n"
396 "eor r16,__tmp_reg__\n"
397 "ld __tmp_reg__,X+\n"
398 "eor r17,__tmp_reg__\n"
399 "ld __tmp_reg__,X+\n"
400 "eor r18,__tmp_reg__\n"
401 "ld __tmp_reg__,X+\n"
402 "eor r19,__tmp_reg__\n"
417 "mov __tmp_reg__,r20\n"
419 "mov r21,__tmp_reg__\n"
432 MIX_COLUMNS(
"r16",
"r18",
"r20",
"r22")
433 MIX_COLUMNS(
"r17",
"r19",
"r21",
"r23")
444 "out %5,__tmp_reg__\n"
452 : :
"x"(s),
"z"(input),
"Q"(output),
"Q"(sbox_addr),
455 ,
"I" (_SFR_IO_ADDR(RAMPZ))
457 :
"r16",
"r17",
"r18",
"r19",
"r20",
"r21",
"r22",
"r23",
458 "r24",
"r25",
"memory"
460 #else // !USE_AVR_INLINE_ASM
462 const uint32_t *schedule;
466 skinny64_unpack(&state, input);
470 for (uint8_t index = r; index > 0; --index, ++schedule) {
472 state.
lrow[0] = skinny64_sbox(state.
lrow[0]);
473 state.
lrow[1] = skinny64_sbox(state.
lrow[1]);
476 state.
lrow[0] ^= schedule[0];
477 state.
row[2] ^= 0x20;
480 state.
row[1] = rightRotate_16bit(state.
row[1], 4);
481 state.
row[2] = rightRotate_16bit(state.
row[2], 8);
482 state.
row[3] = rightRotate_16bit(state.
row[3], 12);
485 state.
row[1] ^= state.
row[2];
486 state.
row[2] ^= state.
row[0];
487 temp = state.
row[3] ^ state.
row[2];
488 state.
row[3] = state.
row[2];
489 state.
row[2] = state.
row[1];
490 state.
row[1] = state.
row[0];
495 skinny64_pack(output, &state);
496 #endif // !USE_AVR_INLINE_ASM
501 #if USE_AVR_INLINE_ASM
503 uint32_t sbox_addr = (uint32_t)sbox_inv;
505 uint16_t sbox_addr = (uint16_t)sbox_inv;
507 __asm__ __volatile__ (
515 "in __tmp_reg__,%5\n"
517 "ldd __tmp_reg__,%C3\n"
518 "out %5,__tmp_reg__\n"
525 MIX_COLUMNS_INV(
"r16",
"r18",
"r20",
"r22")
526 MIX_COLUMNS_INV(
"r17",
"r19",
"r21",
"r23")
539 "mov __tmp_reg__,r20\n"
541 "mov r21,__tmp_reg__\n"
554 "ld __tmp_reg__,-X\n"
555 "eor r19,__tmp_reg__\n"
556 "ld __tmp_reg__,-X\n"
557 "eor r18,__tmp_reg__\n"
558 "ld __tmp_reg__,-X\n"
559 "eor r17,__tmp_reg__\n"
560 "ld __tmp_reg__,-X\n"
561 "eor r16,__tmp_reg__\n"
584 "out %5,__tmp_reg__\n"
592 : :
"x"(s + r),
"z"(input),
"Q"(output),
"Q"(sbox_addr),
595 ,
"I" (_SFR_IO_ADDR(RAMPZ))
597 :
"r16",
"r17",
"r18",
"r19",
"r20",
"r21",
"r22",
"r23",
598 "r24",
"r25",
"memory"
600 #else // !USE_AVR_INLINE_ASM
602 const uint32_t *schedule;
606 skinny64_unpack(&state, input);
609 schedule = &(s[r - 1]);
610 for (uint8_t index = r; index > 0; --index, --schedule) {
613 state.
row[3] = state.
row[0];
614 state.
row[0] = state.
row[1];
615 state.
row[1] = state.
row[2];
616 state.
row[3] ^= temp;
617 state.
row[2] = temp ^ state.
row[0];
618 state.
row[1] ^= state.
row[2];
621 state.
row[1] = rightRotate_16bit(state.
row[1], 12);
622 state.
row[2] = rightRotate_16bit(state.
row[2], 8);
623 state.
row[3] = rightRotate_16bit(state.
row[3], 4);
626 state.
lrow[0] ^= schedule[0];
627 state.
row[2] ^= 0x20;
630 state.
lrow[0] = skinny64_inv_sbox(state.
lrow[0]);
631 state.
lrow[1] = skinny64_inv_sbox(state.
lrow[1]);
635 skinny64_pack(output, &state);
636 #endif // !USE_AVR_INLINE_ASM
641 clean(s, r *
sizeof(uint32_t));
644 #if USE_AVR_INLINE_ASM
648 #define PERMUTE_TKn() \
676 #else // !USE_AVR_INLINE_ASM
681 #define skinny64_permute_tk(tk) \
683 uint32_t x = tk.lrow[1]; \
684 tk.lrow[1] = tk.lrow[0]; \
685 tk.lrow[0] = ((x & 0x0000000FU) << 4) | \
686 ((x & 0x00F0F0F0U) << 8) | \
687 ((x & 0x0F000000U) >> 24) | \
688 ((x & 0x00000F00U) << 16) | \
689 ((x & 0xF0000000U) >> 12) | \
690 ((x & 0x000F0000U) >> 8); \
693 #endif // !USE_AVR_INLINE_ASM
703 #if USE_AVR_INLINE_ASM
704 __asm__ __volatile__ (
751 : :
"x"(s),
"z"(key),
"r"(r),
"r"((uint8_t)(tweaked ? 0x20 : 0x00))
752 :
"r8",
"r9",
"r10",
"r11",
"r16",
"r17",
"r18",
"r19",
753 "r20",
"r21",
"r22",
"r23",
"r24",
"r25",
"memory"
755 #else // !USE_AVR_INLINE_ASM
757 uint32_t *schedule = s;
761 skinny64_unpack(&TK1, key);
764 for (uint8_t index = r; index > 0; --index, ++schedule) {
768 rc = (rc << 1) ^ ((rc >> 5) & 0x01) ^ ((rc >> 4) & 0x01) ^ 0x01;
770 schedule[0] = TK1.
lrow[0] ^ ((rc << 4) & 0xF0) ^
771 ((((uint32_t)rc) << 16) & 0x300000U);
777 schedule[0] ^= 0x2000;
780 skinny64_permute_tk(TK1);
785 #endif // !USE_AVR_INLINE_ASM
798 #if USE_AVR_INLINE_ASM
799 __asm__ __volatile__ (
808 "eor __tmp_reg__,r16\n"
809 "st X+,__tmp_reg__\n"
811 "eor __tmp_reg__,r17\n"
812 "st X+,__tmp_reg__\n"
814 "eor __tmp_reg__,r18\n"
815 "st X+,__tmp_reg__\n"
817 "eor __tmp_reg__,r19\n"
828 : :
"x"(s),
"z"(key),
"r"(r)
829 :
"r8",
"r9",
"r10",
"r11",
"r16",
"r17",
"r18",
"r19",
830 "r20",
"r21",
"r22",
"r23",
"r24",
"memory"
832 #else // !USE_AVR_INLINE_ASM
834 uint32_t *schedule = s;
837 skinny64_unpack(&TK1, key);
840 for (uint8_t index = r; index > 0; --index, ++schedule) {
841 schedule[0] ^= TK1.
lrow[0];
842 skinny64_permute_tk(TK1);
847 #endif // !USE_AVR_INLINE_ASM
850 #if USE_AVR_INLINE_ASM
854 "mov r24, " reg "\n" \
867 "mov r24, " reg "\n" \
876 #else // !USE_AVR_INLINE_ASM
878 inline uint32_t skinny64_LFSR2(uint32_t x)
880 return ((x << 1) & 0xEEEEEEEEU) ^ (((x >> 3) ^ (x >> 2)) & 0x11111111U);
883 inline uint32_t skinny64_LFSR3(uint32_t x)
885 return ((x >> 1) & 0x77777777U) ^ ((x ^ (x << 3)) & 0x88888888U);
888 #endif // !USE_AVR_INLINE_ASM
897 #if USE_AVR_INLINE_ASM
898 __asm__ __volatile__ (
907 "eor __tmp_reg__,r16\n"
908 "st X+,__tmp_reg__\n"
910 "eor __tmp_reg__,r17\n"
911 "st X+,__tmp_reg__\n"
913 "eor __tmp_reg__,r18\n"
914 "st X+,__tmp_reg__\n"
916 "eor __tmp_reg__,r19\n"
917 "st X+,__tmp_reg__\n"
934 : :
"x"(s),
"z"(key),
"r"(r)
935 :
"r8",
"r9",
"r10",
"r11",
"r16",
"r17",
"r18",
"r19",
936 "r20",
"r21",
"r22",
"r23",
"r24",
"memory"
938 #else // !USE_AVR_INLINE_ASM
940 uint32_t *schedule = s;
943 skinny64_unpack(&TK2, key);
946 for (uint8_t index = r; index > 0; --index, ++schedule) {
948 schedule[0] ^= TK2.
lrow[0];
951 skinny64_permute_tk(TK2);
954 TK2.
lrow[0] = skinny64_LFSR2(TK2.
lrow[0]);
959 #endif // !USE_AVR_INLINE_ASM
969 #if USE_AVR_INLINE_ASM
970 __asm__ __volatile__ (
979 "eor __tmp_reg__,r16\n"
980 "st X+,__tmp_reg__\n"
982 "eor __tmp_reg__,r17\n"
983 "st X+,__tmp_reg__\n"
985 "eor __tmp_reg__,r18\n"
986 "st X+,__tmp_reg__\n"
988 "eor __tmp_reg__,r19\n"
989 "st X+,__tmp_reg__\n"
1006 : :
"x"(s),
"z"(key),
"r"(r)
1007 :
"r8",
"r9",
"r10",
"r11",
"r16",
"r17",
"r18",
"r19",
1008 "r20",
"r21",
"r22",
"r23",
"r24",
"memory"
1010 #else // !USE_AVR_INLINE_ASM
1012 uint32_t *schedule = s;
1015 skinny64_unpack(&TK3, key);
1018 for (uint8_t index = r; index > 0; --index, ++schedule) {
1020 schedule[0] ^= TK3.
lrow[0];
1023 skinny64_permute_tk(TK3);
1026 TK3.
lrow[0] = skinny64_LFSR3(TK3.
lrow[0]);
1031 #endif // !USE_AVR_INLINE_ASM
1075 memcpy(t, tweak, len);
1078 memset(t, 0,
sizeof(t));
1096 memset(t, 0,
sizeof(t));
void encryptBlock(uint8_t *output, const uint8_t *input)
Encrypts a single block using this cipher.
void resetTweak()
Resets the tweak to all-zeroes.
size_t keySize() const
Size of a Skinny64_128_Tweaked key in bytes.
bool setKey(const uint8_t *key, size_t len)
Sets the key to use for future encryption and decryption operations.
Skinny64_128_Tweaked()
Constructs a tweakable Skinny-64 block cipher with a 64-bit key and a 64-bit tweak.
void clear()
Clears all security-sensitive state from this block cipher.
size_t keySize() const
Size of a Skinny64_128 key in bytes.
size_t keySize() const
Size of a Skinny64_192 key in bytes.
virtual ~Skinny64_64()
Destroys this Skinny-64 block cipher object after clearing sensitive information. ...
void xorTK1(const uint8_t *key)
XOR's the key schedule with the schedule for TK1.
virtual ~Skinny64_192_Tweaked()
Destroys this tweakable Skinny-64 block cipher object after clearing sensitive information.
bool setKey(const uint8_t *key, size_t len)
Sets the key to use for future encryption and decryption operations.
virtual ~Skinny64_192()
Destroys this Skinny-64 block cipher object after clearing sensitive information. ...
size_t blockSize() const
Size of a Skinny-64 block in bytes.
size_t keySize() const
Size of a Skinny64_64 key in bytes.
Abstract base class for SKINNY block ciphers with 64-bit blocks.
Skinny64_Tweaked(uint32_t *schedule, uint8_t rounds)
Constructs a tweakable Skinny-64 block cipher object.
size_t keySize() const
Size of a Skinny64_192_Tweaked key in bytes.
void setTK3(const uint8_t *key)
XOR's the key schedule with the schedule for TK3.
Skinny64_192_Tweaked()
Constructs a tweakable Skinny-64 block cipher with a 128-bit key and a 64-bit tweak.
Skinny64_128()
Constructs a Skinny-64 block cipher with a 128-bit key.
virtual ~Skinny64_128_Tweaked()
Destroys this tweakable Skinny-64 block cipher object after clearing sensitive information.
Skinny64(uint32_t *schedule, uint8_t rounds)
Constructs a Skinny-64 block cipher object.
void setTK1(const uint8_t *key, bool tweaked=false)
Clears the key schedule and sets it to the schedule for TK1.
bool setTweak(const uint8_t *tweak, size_t len)
Sets the 64-bit tweak value for this block cipher.
Skinny64_64()
Constructs a Skinny-64 block cipher with a 64-bit key.
virtual ~Skinny64()
Destroys this Skinny-64 block cipher object after clearing sensitive information. ...
void clear()
Clears all security-sensitive state from this block cipher.
virtual ~Skinny64_Tweaked()
Destroys this tweakable Skinny-64 block cipher object after clearing sensitive information.
bool setKey(const uint8_t *key, size_t len)
Sets the key to use for future encryption and decryption operations.
void decryptBlock(uint8_t *output, const uint8_t *input)
Decrypts a single block using this cipher.
Skinny64_192()
Constructs a Skinny-64 block cipher with a 192-bit key.
virtual ~Skinny64_128()
Destroys this Skinny-64 block cipher object after clearing sensitive information. ...
Union that describes a 64-bit 4x4 array of cells.
Abstract base class for SKINNY tweakable block ciphers with 64-bit blocks.
bool setKey(const uint8_t *key, size_t len)
Sets the key to use for future encryption and decryption operations.
void setTK2(const uint8_t *key)
XOR's the key schedule with the schedule for TK2.
bool setKey(const uint8_t *key, size_t len)
Sets the key to use for future encryption and decryption operations.