Skinny-C
 All Data Structures Files Functions Variables Groups Pages
Skinny64.cpp
1 /*
2  * Copyright (C) 2017 Southern Storm Software, Pty Ltd.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included
12  * in all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
15  * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
17  * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
20  * DEALINGS IN THE SOFTWARE.
21  */
22 
23 #include "Skinny64.h"
24 #include "Crypto.h"
25 #include "utility/EndianUtil.h"
26 #include "utility/RotateUtil.h"
27 #include "utility/ProgMemUtil.h"
28 #include <string.h>
29 
95 #if defined(__AVR__)
96 #define USE_AVR_INLINE_ASM 1
97 #endif
98 
99 #ifndef CRYPTO_LITTLE_ENDIAN
100 #error "Arduino platforms are assumed to be little-endian"
101 #endif
102 
109 Skinny64::Skinny64(uint32_t *schedule, uint8_t rounds)
110  : s(schedule), r(rounds)
111 {
112 }
113 
119 {
120 }
121 
126 size_t Skinny64::blockSize() const
127 {
128  return 8;
129 }
130 
131 #if USE_AVR_INLINE_ASM
132 
133 // Force the sboxes to be aligned on a 256-byte boundary.
134 // This makes sbox lookups more efficient.
135 #define ALIGN256 __attribute__((aligned(256)))
136 
137 // S-box tables for Skinny-64. We only use this for AVR platforms,
138 // as there will be issues with constant cache behaviour on ARM.
139 // It would be nice to avoid this for AVR as well, but the S-box
140 // operations are simply too slow using bit operations on AVR.
141 // Technically the S-boxes for Skinny-64 are 4-bit but we expand
142 // them to 8-bit to make the lookups easier.
143 static uint8_t const sbox[256] PROGMEM ALIGN256 = {
144  0xcc, 0xc6, 0xc9, 0xc0, 0xc1, 0xca, 0xc2, 0xcb, 0xc3, 0xc8, 0xc5, 0xcd,
145  0xc4, 0xce, 0xc7, 0xcf, 0x6c, 0x66, 0x69, 0x60, 0x61, 0x6a, 0x62, 0x6b,
146  0x63, 0x68, 0x65, 0x6d, 0x64, 0x6e, 0x67, 0x6f, 0x9c, 0x96, 0x99, 0x90,
147  0x91, 0x9a, 0x92, 0x9b, 0x93, 0x98, 0x95, 0x9d, 0x94, 0x9e, 0x97, 0x9f,
148  0x0c, 0x06, 0x09, 0x00, 0x01, 0x0a, 0x02, 0x0b, 0x03, 0x08, 0x05, 0x0d,
149  0x04, 0x0e, 0x07, 0x0f, 0x1c, 0x16, 0x19, 0x10, 0x11, 0x1a, 0x12, 0x1b,
150  0x13, 0x18, 0x15, 0x1d, 0x14, 0x1e, 0x17, 0x1f, 0xac, 0xa6, 0xa9, 0xa0,
151  0xa1, 0xaa, 0xa2, 0xab, 0xa3, 0xa8, 0xa5, 0xad, 0xa4, 0xae, 0xa7, 0xaf,
152  0x2c, 0x26, 0x29, 0x20, 0x21, 0x2a, 0x22, 0x2b, 0x23, 0x28, 0x25, 0x2d,
153  0x24, 0x2e, 0x27, 0x2f, 0xbc, 0xb6, 0xb9, 0xb0, 0xb1, 0xba, 0xb2, 0xbb,
154  0xb3, 0xb8, 0xb5, 0xbd, 0xb4, 0xbe, 0xb7, 0xbf, 0x3c, 0x36, 0x39, 0x30,
155  0x31, 0x3a, 0x32, 0x3b, 0x33, 0x38, 0x35, 0x3d, 0x34, 0x3e, 0x37, 0x3f,
156  0x8c, 0x86, 0x89, 0x80, 0x81, 0x8a, 0x82, 0x8b, 0x83, 0x88, 0x85, 0x8d,
157  0x84, 0x8e, 0x87, 0x8f, 0x5c, 0x56, 0x59, 0x50, 0x51, 0x5a, 0x52, 0x5b,
158  0x53, 0x58, 0x55, 0x5d, 0x54, 0x5e, 0x57, 0x5f, 0xdc, 0xd6, 0xd9, 0xd0,
159  0xd1, 0xda, 0xd2, 0xdb, 0xd3, 0xd8, 0xd5, 0xdd, 0xd4, 0xde, 0xd7, 0xdf,
160  0x4c, 0x46, 0x49, 0x40, 0x41, 0x4a, 0x42, 0x4b, 0x43, 0x48, 0x45, 0x4d,
161  0x44, 0x4e, 0x47, 0x4f, 0xec, 0xe6, 0xe9, 0xe0, 0xe1, 0xea, 0xe2, 0xeb,
162  0xe3, 0xe8, 0xe5, 0xed, 0xe4, 0xee, 0xe7, 0xef, 0x7c, 0x76, 0x79, 0x70,
163  0x71, 0x7a, 0x72, 0x7b, 0x73, 0x78, 0x75, 0x7d, 0x74, 0x7e, 0x77, 0x7f,
164  0xfc, 0xf6, 0xf9, 0xf0, 0xf1, 0xfa, 0xf2, 0xfb, 0xf3, 0xf8, 0xf5, 0xfd,
165  0xf4, 0xfe, 0xf7, 0xff
166 };
167 static uint8_t const sbox_inv[256] PROGMEM ALIGN256 = {
168  0x33, 0x34, 0x36, 0x38, 0x3c, 0x3a, 0x31, 0x3e, 0x39, 0x32, 0x35, 0x37,
169  0x30, 0x3b, 0x3d, 0x3f, 0x43, 0x44, 0x46, 0x48, 0x4c, 0x4a, 0x41, 0x4e,
170  0x49, 0x42, 0x45, 0x47, 0x40, 0x4b, 0x4d, 0x4f, 0x63, 0x64, 0x66, 0x68,
171  0x6c, 0x6a, 0x61, 0x6e, 0x69, 0x62, 0x65, 0x67, 0x60, 0x6b, 0x6d, 0x6f,
172  0x83, 0x84, 0x86, 0x88, 0x8c, 0x8a, 0x81, 0x8e, 0x89, 0x82, 0x85, 0x87,
173  0x80, 0x8b, 0x8d, 0x8f, 0xc3, 0xc4, 0xc6, 0xc8, 0xcc, 0xca, 0xc1, 0xce,
174  0xc9, 0xc2, 0xc5, 0xc7, 0xc0, 0xcb, 0xcd, 0xcf, 0xa3, 0xa4, 0xa6, 0xa8,
175  0xac, 0xaa, 0xa1, 0xae, 0xa9, 0xa2, 0xa5, 0xa7, 0xa0, 0xab, 0xad, 0xaf,
176  0x13, 0x14, 0x16, 0x18, 0x1c, 0x1a, 0x11, 0x1e, 0x19, 0x12, 0x15, 0x17,
177  0x10, 0x1b, 0x1d, 0x1f, 0xe3, 0xe4, 0xe6, 0xe8, 0xec, 0xea, 0xe1, 0xee,
178  0xe9, 0xe2, 0xe5, 0xe7, 0xe0, 0xeb, 0xed, 0xef, 0x93, 0x94, 0x96, 0x98,
179  0x9c, 0x9a, 0x91, 0x9e, 0x99, 0x92, 0x95, 0x97, 0x90, 0x9b, 0x9d, 0x9f,
180  0x23, 0x24, 0x26, 0x28, 0x2c, 0x2a, 0x21, 0x2e, 0x29, 0x22, 0x25, 0x27,
181  0x20, 0x2b, 0x2d, 0x2f, 0x53, 0x54, 0x56, 0x58, 0x5c, 0x5a, 0x51, 0x5e,
182  0x59, 0x52, 0x55, 0x57, 0x50, 0x5b, 0x5d, 0x5f, 0x73, 0x74, 0x76, 0x78,
183  0x7c, 0x7a, 0x71, 0x7e, 0x79, 0x72, 0x75, 0x77, 0x70, 0x7b, 0x7d, 0x7f,
184  0x03, 0x04, 0x06, 0x08, 0x0c, 0x0a, 0x01, 0x0e, 0x09, 0x02, 0x05, 0x07,
185  0x00, 0x0b, 0x0d, 0x0f, 0xb3, 0xb4, 0xb6, 0xb8, 0xbc, 0xba, 0xb1, 0xbe,
186  0xb9, 0xb2, 0xb5, 0xb7, 0xb0, 0xbb, 0xbd, 0xbf, 0xd3, 0xd4, 0xd6, 0xd8,
187  0xdc, 0xda, 0xd1, 0xde, 0xd9, 0xd2, 0xd5, 0xd7, 0xd0, 0xdb, 0xdd, 0xdf,
188  0xf3, 0xf4, 0xf6, 0xf8, 0xfc, 0xfa, 0xf1, 0xfe, 0xf9, 0xf2, 0xf5, 0xf7,
189  0xf0, 0xfb, 0xfd, 0xff
190 };
191 
192 // Figure out how to do lookups from a pgmspace sbox table on this platform.
193 #if defined(RAMPZ)
194 #define SBOX(reg) \
195  "mov r30," reg "\n" \
196  "elpm " reg ",Z\n"
197 #elif defined(__AVR_HAVE_LPMX__)
198 #define SBOX(reg) \
199  "mov r30," reg "\n" \
200  "lpm " reg ",Z\n"
201 #elif defined(__AVR_TINY__)
202 #define SBOX(reg) \
203  "mov r30," reg "\n" \
204  "ld " reg ",Z\n"
205 #else
206 #define SBOX(reg) \
207  "mov r30," reg "\n" \
208  "lpm\n" \
209  "mov " reg ",r0\n"
210 #endif
211 
212 // Mix the columns during an encryption round.
213 #define MIX_COLUMNS(row0, row1, row2, row3) \
214  "eor " row1 "," row2 "\n" \
215  "eor " row2 "," row0 "\n" \
216  "mov __tmp_reg__," row3 "\n" \
217  "eor __tmp_reg__," row2 "\n" \
218  "mov " row3 "," row2 "\n" \
219  "mov " row2 "," row1 "\n" \
220  "mov " row1 "," row0 "\n" \
221  "mov " row0 ",__tmp_reg__\n"
222 
223 // Inverse mix of the columns during a decryption round.
224 #define MIX_COLUMNS_INV(row0, row1, row2, row3) \
225  "mov __tmp_reg__," row3 "\n" \
226  "mov " row3 "," row0 "\n" \
227  "mov " row0 "," row1 "\n" \
228  "mov " row1 "," row2 "\n" \
229  "eor " row3 ",__tmp_reg__\n" \
230  "eor __tmp_reg__," row0 "\n" \
231  "mov " row2 ",__tmp_reg__\n" \
232  "eor " row1 "," row2 "\n"
233 
234 // Load a 64-bit input block into r16..r23. Note that the even cells
235 // are in the high nibbles of each byte rather than the low nibble.
236 #define LOAD_BLOCK() \
237  "ld r16,Z\n" \
238  "ldd r17,Z+1\n" \
239  "ldd r18,Z+2\n" \
240  "ldd r19,Z+3\n" \
241  "ldd r20,Z+4\n" \
242  "ldd r21,Z+5\n" \
243  "ldd r22,Z+6\n" \
244  "ldd r23,Z+7\n" \
245 
246 // Store r16..r23 to a 64-bit output block.
247 #define STORE_BLOCK() \
248  "st Z,r16\n" \
249  "std Z+1,r17\n" \
250  "std Z+2,r18\n" \
251  "std Z+3,r19\n" \
252  "std Z+4,r20\n" \
253  "std Z+5,r21\n" \
254  "std Z+6,r22\n" \
255  "std Z+7,r23\n"
256 
257 #else // !USE_AVR_INLINE_ASM
258 
259 typedef union
260 {
261  uint16_t row[4];
262  uint32_t lrow[2];
263 
265 
266 inline uint32_t skinny64_sbox(uint32_t x)
267 {
268  /* Original version from the specification is equivalent to:
269  *
270  * #define SBOX_MIX(x)
271  * (((~((((x) >> 1) | (x)) >> 2)) & 0x11111111U) ^ (x))
272  * #define SBOX_SHIFT(x)
273  * ((((x) << 1) & 0xEEEEEEEEU) | (((x) >> 3) & 0x11111111U))
274  *
275  * x = SBOX_MIX(x);
276  * x = SBOX_SHIFT(x);
277  * x = SBOX_MIX(x);
278  * x = SBOX_SHIFT(x);
279  * x = SBOX_MIX(x);
280  * x = SBOX_SHIFT(x);
281  * return SBOX_MIX(x);
282  *
283  * However, we can mix the bits in their original positions and then
284  * delay the SBOX_SHIFT steps to be performed with one final rotation.
285  * This reduces the number of required shift operations from 14 to 10.
286  */
287  x = ~x;
288  x = (((x >> 3) & (x >> 2)) & 0x11111111U) ^ x;
289  x = (((x << 1) & (x << 2)) & 0x88888888U) ^ x;
290  x = (((x << 1) & (x << 2)) & 0x44444444U) ^ x;
291  x = (((x >> 2) & (x << 1)) & 0x22222222U) ^ x;
292  x = ~x;
293  return ((x >> 1) & 0x77777777U) | ((x << 3) & 0x88888888U);
294 }
295 
296 inline uint32_t skinny64_inv_sbox(uint32_t x)
297 {
298  /* Original version from the specification is equivalent to:
299  *
300  * #define SBOX_MIX(x)
301  * (((~((((x) >> 1) | (x)) >> 2)) & 0x11111111U) ^ (x))
302  * #define SBOX_SHIFT_INV(x)
303  * ((((x) >> 1) & 0x77777777U) | (((x) << 3) & 0x88888888U))
304  *
305  * x = SBOX_MIX(x);
306  * x = SBOX_SHIFT_INV(x);
307  * x = SBOX_MIX(x);
308  * x = SBOX_SHIFT_INV(x);
309  * x = SBOX_MIX(x);
310  * x = SBOX_SHIFT_INV(x);
311  * return SBOX_MIX(x);
312  *
313  * However, we can mix the bits in their original positions and then
314  * delay the SBOX_SHIFT_INV steps to be performed with one final rotation.
315  * This reduces the number of required shift operations from 14 to 10.
316  */
317  x = ~x;
318  x = (((x >> 3) & (x >> 2)) & 0x11111111U) ^ x;
319  x = (((x << 1) & (x >> 2)) & 0x22222222U) ^ x;
320  x = (((x << 1) & (x << 2)) & 0x44444444U) ^ x;
321  x = (((x << 1) & (x << 2)) & 0x88888888U) ^ x;
322  x = ~x;
323  return ((x << 1) & 0xEEEEEEEEU) | ((x >> 3) & 0x11111111U);
324 }
325 
326 inline uint16_t rightRotate_16bit(uint16_t x, uint8_t shift)
327 {
328  return (x >> shift) | (x << (16 - shift));
329 }
330 
331 inline void skinny64_unpack(Skinny64Cells_t *cells, const uint8_t *input)
332 {
333  cells->lrow[0] = ((uint32_t)(input[0])) |
334  (((uint32_t)(input[1])) << 8) |
335  (((uint32_t)(input[2])) << 16) |
336  (((uint32_t)(input[3])) << 24);
337  cells->lrow[1] = ((uint32_t)(input[4])) |
338  (((uint32_t)(input[5])) << 8) |
339  (((uint32_t)(input[6])) << 16) |
340  (((uint32_t)(input[7])) << 24);
341 }
342 
343 inline void skinny64_pack(uint8_t *output, const Skinny64Cells_t *cells)
344 {
345  uint32_t x = cells->lrow[0];
346  output[0] = (uint8_t)x;
347  output[1] = (uint8_t)(x >> 8);
348  output[2] = (uint8_t)(x >> 16);
349  output[3] = (uint8_t)(x >> 24);
350  x = cells->lrow[1];
351  output[4] = (uint8_t)x;
352  output[5] = (uint8_t)(x >> 8);
353  output[6] = (uint8_t)(x >> 16);
354  output[7] = (uint8_t)(x >> 24);
355 }
356 
357 #endif // !USE_AVR_INLINE_ASM
358 
359 void Skinny64::encryptBlock(uint8_t *output, const uint8_t *input)
360 {
361 #if USE_AVR_INLINE_ASM
362 #if defined(RAMPZ)
363  uint32_t sbox_addr = (uint32_t)sbox;
364 #else
365  uint16_t sbox_addr = (uint16_t)sbox;
366 #endif
367  __asm__ __volatile__ (
368  // Load the input block from Z[0..15] into r16..r23.
369  LOAD_BLOCK()
370 
371  // Set up Z to point to the start of the sbox table.
372  "ldd r30,%A3\n"
373  "ldd r31,%B3\n"
374 #if defined(RAMPZ)
375  "in __tmp_reg__,%5\n"
376  "push __tmp_reg__\n"
377  "ldd __tmp_reg__,%C3\n"
378  "out %5,__tmp_reg__\n"
379 #endif
380 
381  // Top of the loop.
382  "1:\n"
383 
384  // Transform the state using the sbox.
385  SBOX("r16")
386  SBOX("r17")
387  SBOX("r18")
388  SBOX("r19")
389  SBOX("r20")
390  SBOX("r21")
391  SBOX("r22")
392  SBOX("r23")
393 
394  // XOR the state with the key schedule.
395  "ld __tmp_reg__,X+\n"
396  "eor r16,__tmp_reg__\n"
397  "ld __tmp_reg__,X+\n"
398  "eor r17,__tmp_reg__\n"
399  "ld __tmp_reg__,X+\n"
400  "eor r18,__tmp_reg__\n"
401  "ld __tmp_reg__,X+\n"
402  "eor r19,__tmp_reg__\n"
403  "ldi r24,0x20\n"
404  "eor r20,r24\n"
405 
406  // Shift the rows.
407  "swap r18\n" // r18:r19 = shift_right_4(r18:r19)
408  "swap r19\n"
409  "mov r24,r18\n"
410  "mov r25,r19\n"
411  "andi r24,0xF0\n"
412  "andi r25,0xF0\n"
413  "andi r18,0x0F\n"
414  "andi r19,0x0F\n"
415  "or r18,r25\n"
416  "or r19,r24\n"
417  "mov __tmp_reg__,r20\n" // r20:r21 = shift_right_8(r20:r21)
418  "mov r20,r21\n"
419  "mov r21,__tmp_reg__\n"
420  "swap r22\n" // r22:r23 = shift_right_12(r22:r23)
421  "swap r23\n"
422  "mov r24,r22\n"
423  "mov r25,r23\n"
424  "andi r24,0x0F\n"
425  "andi r25,0x0F\n"
426  "andi r22,0xF0\n"
427  "andi r23,0xF0\n"
428  "or r22,r25\n"
429  "or r23,r24\n"
430 
431  // Mix the columns.
432  MIX_COLUMNS("r16", "r18", "r20", "r22")
433  MIX_COLUMNS("r17", "r19", "r21", "r23")
434 
435  // Bottom of the loop.
436  "dec %4\n"
437  "breq 2f\n"
438  "rjmp 1b\n"
439  "2:\n"
440 
441  // Restore the original RAMPZ value.
442 #if defined(RAMPZ)
443  "pop __tmp_reg__\n"
444  "out %5,__tmp_reg__\n"
445 #endif
446 
447  // Store the final state into the output buffer.
448  "ldd r30,%A2\n"
449  "ldd r31,%B2\n"
450  STORE_BLOCK()
451 
452  : : "x"(s), "z"(input), "Q"(output), "Q"(sbox_addr),
453  "r"((uint8_t)r)
454 #if defined(RAMPZ)
455  , "I" (_SFR_IO_ADDR(RAMPZ))
456 #endif
457  : "r16", "r17", "r18", "r19", "r20", "r21", "r22", "r23",
458  "r24", "r25", "memory"
459  );
460 #else // !USE_AVR_INLINE_ASM
461  Skinny64Cells_t state;
462  const uint32_t *schedule;
463  uint16_t temp;
464 
465  // Unpack the input block into the state array.
466  skinny64_unpack(&state, input);
467 
468  // Perform all encryption rounds.
469  schedule = s;
470  for (uint8_t index = r; index > 0; --index, ++schedule) {
471  // Apply the S-box to all bytes in the state.
472  state.lrow[0] = skinny64_sbox(state.lrow[0]);
473  state.lrow[1] = skinny64_sbox(state.lrow[1]);
474 
475  // Apply the subkey for this round.
476  state.lrow[0] ^= schedule[0];
477  state.row[2] ^= 0x20;
478 
479  // Shift the cells in the rows right.
480  state.row[1] = rightRotate_16bit(state.row[1], 4);
481  state.row[2] = rightRotate_16bit(state.row[2], 8);
482  state.row[3] = rightRotate_16bit(state.row[3], 12);
483 
484  // Mix the columns.
485  state.row[1] ^= state.row[2];
486  state.row[2] ^= state.row[0];
487  temp = state.row[3] ^ state.row[2];
488  state.row[3] = state.row[2];
489  state.row[2] = state.row[1];
490  state.row[1] = state.row[0];
491  state.row[0] = temp;
492  }
493 
494  // Pack the result into the output buffer.
495  skinny64_pack(output, &state);
496 #endif // !USE_AVR_INLINE_ASM
497 }
498 
499 void Skinny64::decryptBlock(uint8_t *output, const uint8_t *input)
500 {
501 #if USE_AVR_INLINE_ASM
502 #if defined(RAMPZ)
503  uint32_t sbox_addr = (uint32_t)sbox_inv;
504 #else
505  uint16_t sbox_addr = (uint16_t)sbox_inv;
506 #endif
507  __asm__ __volatile__ (
508  // Load the input block from Z[0..15] into r16..r23.
509  LOAD_BLOCK()
510 
511  // Set up Z to point to the start of the sbox table.
512  "ldd r30,%A3\n"
513  "ldd r31,%B3\n"
514 #if defined(RAMPZ)
515  "in __tmp_reg__,%5\n"
516  "push __tmp_reg__\n"
517  "ldd __tmp_reg__,%C3\n"
518  "out %5,__tmp_reg__\n"
519 #endif
520 
521  // Top of the loop.
522  "1:\n"
523 
524  // Inverse mix of the columns.
525  MIX_COLUMNS_INV("r16", "r18", "r20", "r22")
526  MIX_COLUMNS_INV("r17", "r19", "r21", "r23")
527 
528  // Inverse shift of the rows.
529  "swap r18\n" // r18:r19 = shift_right_12(r18:r19)
530  "swap r19\n"
531  "mov r24,r18\n"
532  "mov r25,r19\n"
533  "andi r24,0x0F\n"
534  "andi r25,0x0F\n"
535  "andi r18,0xF0\n"
536  "andi r19,0xF0\n"
537  "or r18,r25\n"
538  "or r19,r24\n"
539  "mov __tmp_reg__,r20\n" // r20:r21 = shift_right_8(r20:r21)
540  "mov r20,r21\n"
541  "mov r21,__tmp_reg__\n"
542  "swap r22\n" // r22:r23 = shift_right_4(r22:r23)
543  "swap r23\n"
544  "mov r24,r22\n"
545  "mov r25,r23\n"
546  "andi r24,0xF0\n"
547  "andi r25,0xF0\n"
548  "andi r22,0x0F\n"
549  "andi r23,0x0F\n"
550  "or r22,r25\n"
551  "or r23,r24\n"
552 
553  // XOR the state with the key schedule.
554  "ld __tmp_reg__,-X\n"
555  "eor r19,__tmp_reg__\n"
556  "ld __tmp_reg__,-X\n"
557  "eor r18,__tmp_reg__\n"
558  "ld __tmp_reg__,-X\n"
559  "eor r17,__tmp_reg__\n"
560  "ld __tmp_reg__,-X\n"
561  "eor r16,__tmp_reg__\n"
562  "ldi r24,0x20\n"
563  "eor r20,r24\n"
564 
565  // Transform the state using the inverse sbox.
566  SBOX("r16")
567  SBOX("r17")
568  SBOX("r18")
569  SBOX("r19")
570  SBOX("r20")
571  SBOX("r21")
572  SBOX("r22")
573  SBOX("r23")
574 
575  // Bottom of the loop.
576  "dec %4\n"
577  "breq 2f\n"
578  "rjmp 1b\n"
579  "2:\n"
580 
581  // Restore the original RAMPZ value.
582 #if defined(RAMPZ)
583  "pop __tmp_reg__\n"
584  "out %5,__tmp_reg__\n"
585 #endif
586 
587  // Store the final state into the output buffer.
588  "ldd r30,%A2\n"
589  "ldd r31,%B2\n"
590  STORE_BLOCK()
591 
592  : : "x"(s + r), "z"(input), "Q"(output), "Q"(sbox_addr),
593  "r"((uint8_t)r)
594 #if defined(RAMPZ)
595  , "I" (_SFR_IO_ADDR(RAMPZ))
596 #endif
597  : "r16", "r17", "r18", "r19", "r20", "r21", "r22", "r23",
598  "r24", "r25", "memory"
599  );
600 #else // !USE_AVR_INLINE_ASM
601  Skinny64Cells_t state;
602  const uint32_t *schedule;
603  uint16_t temp;
604 
605  // Unpack the input block into the state array.
606  skinny64_unpack(&state, input);
607 
608  // Perform all decryption rounds.
609  schedule = &(s[r - 1]);
610  for (uint8_t index = r; index > 0; --index, --schedule) {
611  // Inverse mix of the columns.
612  temp = state.row[3];
613  state.row[3] = state.row[0];
614  state.row[0] = state.row[1];
615  state.row[1] = state.row[2];
616  state.row[3] ^= temp;
617  state.row[2] = temp ^ state.row[0];
618  state.row[1] ^= state.row[2];
619 
620  // Inverse shift of the rows.
621  state.row[1] = rightRotate_16bit(state.row[1], 12);
622  state.row[2] = rightRotate_16bit(state.row[2], 8);
623  state.row[3] = rightRotate_16bit(state.row[3], 4);
624 
625  // Apply the subkey for this round.
626  state.lrow[0] ^= schedule[0];
627  state.row[2] ^= 0x20;
628 
629  // Apply the inverse of the S-box to all bytes in the state.
630  state.lrow[0] = skinny64_inv_sbox(state.lrow[0]);
631  state.lrow[1] = skinny64_inv_sbox(state.lrow[1]);
632  }
633 
634  // Pack the result into the output buffer.
635  skinny64_pack(output, &state);
636 #endif // !USE_AVR_INLINE_ASM
637 }
638 
640 {
641  clean(s, r * sizeof(uint32_t));
642 }
643 
644 #if USE_AVR_INLINE_ASM
645 
646 // Permutes the cells within a TKn value while expanding the key schedule.
647 // PT = [9, 15, 8, 13, 10, 14, 12, 11, 0, 1, 2, 3, 4, 5, 6, 7]
648 #define PERMUTE_TKn() \
649  "mov r8,r16\n" /* r8..r11 = TK[0..7] */ \
650  "mov r9,r17\n" \
651  "mov r10,r18\n" \
652  "mov r11,r19\n" \
653  "mov r16,r23\n" /* TK[1] = TK[15] */ \
654  "andi r16,0x0F\n" \
655  "mov r17,r20\n" /* TK[2] = TK[8] */ \
656  "andi r17,0xF0\n" \
657  "swap r20\n" /* TK[0] = TK[9] */ \
658  "andi r20,0xF0\n" \
659  "or r16,r20\n" \
660  "mov r19,r22\n" /* TK[6] = TK[12] */ \
661  "andi r19,0xF0\n" \
662  "andi r22,0x0F\n" /* TK[3] = TK[13] */ \
663  "or r17,r22\n" \
664  "mov r18,r21\n" /* TK[4] = TK[10] */ \
665  "andi r18,0xF0\n" \
666  "swap r23\n" /* TK[5] = TK[14] */ \
667  "andi r23,0x0F\n" \
668  "or r18,r23\n" \
669  "andi r21,0x0F\n" /* TK[7] = TK[11] */ \
670  "or r19,r21\n" \
671  "mov r20,r8\n" /* TK[8..15] = r8..r11 */ \
672  "mov r21,r9\n" \
673  "mov r22,r10\n" \
674  "mov r23,r11\n"
675 
676 #else // !USE_AVR_INLINE_ASM
677 
678 // Permutes the cells within a TKn value while expanding the key schedule.
679 // PT = [9, 15, 8, 13, 10, 14, 12, 11, 0, 1, 2, 3, 4, 5, 6, 7]
680 // Permutation generated by http://programming.sirrida.de/calcperm.php
681 #define skinny64_permute_tk(tk) \
682  do { \
683  uint32_t x = tk.lrow[1]; \
684  tk.lrow[1] = tk.lrow[0]; \
685  tk.lrow[0] = ((x & 0x0000000FU) << 4) | \
686  ((x & 0x00F0F0F0U) << 8) | \
687  ((x & 0x0F000000U) >> 24) | \
688  ((x & 0x00000F00U) << 16) | \
689  ((x & 0xF0000000U) >> 12) | \
690  ((x & 0x000F0000U) >> 8); \
691  } while (0)
692 
693 #endif // !USE_AVR_INLINE_ASM
694 
701 void Skinny64::setTK1(const uint8_t *key, bool tweaked)
702 {
703 #if USE_AVR_INLINE_ASM
704  __asm__ __volatile__ (
705  // Load the TK1 cells into r16..r23.
706  LOAD_BLOCK()
707 
708  // Set rc to zero (stored in r25).
709  "clr r25\n"
710 
711  // Top of the loop.
712  "1:\n"
713 
714  // Generate the rc value for the next round.
715  // rc = (rc << 1) ^ ((rc >> 5) & 0x01) ^ ((rc >> 4) & 0x01) ^ 0x01;
716  "clr r24\n"
717  "lsl r25\n"
718  "bst r25,6\n"
719  "bld r24,0\n"
720  "eor r25,r24\n"
721  "bst r25,5\n"
722  "bld r24,0\n"
723  "eor r25,r24\n"
724  "ldi r24,1\n"
725  "eor r25,r24\n"
726 
727  // Store the first 8 cells of TK1 into the key schedule and XOR with rc.
728  "mov r24,r25\n"
729  "andi r24,0x0F\n"
730  "swap r24\n"
731  "eor r24,r16\n"
732  "st X+,r24\n"
733  "mov r24,%3\n"
734  "eor r24,r17\n"
735  "st X+,r24\n"
736  "mov r24,r25\n"
737  "andi r24,0x30\n"
738  "eor r24,r18\n"
739  "st X+,r24\n"
740  "st X+,r19\n"
741 
742  // Permute TK1 for the next round.
743  PERMUTE_TKn()
744 
745  // Bottom of the loop.
746  "dec %2\n"
747  "breq 2f\n"
748  "rjmp 1b\n"
749  "2:\n"
750 
751  : : "x"(s), "z"(key), "r"(r), "r"((uint8_t)(tweaked ? 0x20 : 0x00))
752  : "r8", "r9", "r10", "r11", "r16", "r17", "r18", "r19",
753  "r20", "r21", "r22", "r23", "r24", "r25", "memory"
754  );
755 #else // !USE_AVR_INLINE_ASM
756  Skinny64Cells_t TK1;
757  uint32_t *schedule = s;
758  uint8_t rc = 0;
759 
760  // Unpack the incoming key value into the TK1 array.
761  skinny64_unpack(&TK1, key);
762 
763  // Generate the key schedule words for all rounds.
764  for (uint8_t index = r; index > 0; --index, ++schedule) {
765  // XOR the round constants with the current schedule words.
766  // The round constants for the 3rd and 4th rows are
767  // fixed and will be applied during encrypt/decrypt.
768  rc = (rc << 1) ^ ((rc >> 5) & 0x01) ^ ((rc >> 4) & 0x01) ^ 0x01;
769  rc &= 0x3F;
770  schedule[0] = TK1.lrow[0] ^ ((rc << 4) & 0xF0) ^
771  ((((uint32_t)rc) << 16) & 0x300000U);
772 
773  // If we have a tweak, then we need to XOR a 1 bit into the
774  // second bit of the top cell of the third column as recommended
775  // by the SKINNY specification.
776  if (tweaked)
777  schedule[0] ^= 0x2000;
778 
779  // Permute TK1 for the next round.
780  skinny64_permute_tk(TK1);
781  }
782 
783  // Clean up and exit.
784  clean(TK1);
785 #endif // !USE_AVR_INLINE_ASM
786 }
787 
796 void Skinny64::xorTK1(const uint8_t *key)
797 {
798 #if USE_AVR_INLINE_ASM
799  __asm__ __volatile__ (
800  // Load the TK1 bytes into r16..r23.
801  LOAD_BLOCK()
802 
803  // Top of the loop.
804  "1:\n"
805 
806  // XOR the first two rows of TK1 with the key schedule.
807  "ld __tmp_reg__,X\n"
808  "eor __tmp_reg__,r16\n"
809  "st X+,__tmp_reg__\n"
810  "ld __tmp_reg__,X\n"
811  "eor __tmp_reg__,r17\n"
812  "st X+,__tmp_reg__\n"
813  "ld __tmp_reg__,X\n"
814  "eor __tmp_reg__,r18\n"
815  "st X+,__tmp_reg__\n"
816  "ld __tmp_reg__,X\n"
817  "eor __tmp_reg__,r19\n"
818 
819  // Permute TK1 for the next round.
820  PERMUTE_TKn()
821 
822  // Bottom of the loop.
823  "dec %2\n"
824  "breq 2f\n"
825  "rjmp 1b\n"
826  "2:\n"
827 
828  : : "x"(s), "z"(key), "r"(r)
829  : "r8", "r9", "r10", "r11", "r16", "r17", "r18", "r19",
830  "r20", "r21", "r22", "r23", "r24", "memory"
831  );
832 #else // !USE_AVR_INLINE_ASM
833  Skinny64Cells_t TK1;
834  uint32_t *schedule = s;
835 
836  // Unpack the incoming key value into the TK1 array.
837  skinny64_unpack(&TK1, key);
838 
839  // XOR against the key schedule words for all rounds.
840  for (uint8_t index = r; index > 0; --index, ++schedule) {
841  schedule[0] ^= TK1.lrow[0];
842  skinny64_permute_tk(TK1);
843  }
844 
845  // Clean up and exit.
846  clean(TK1);
847 #endif // !USE_AVR_INLINE_ASM
848 }
849 
850 #if USE_AVR_INLINE_ASM
851 
852 // Transform the contents of a register using LFSR2.
853 #define LFSR2(reg) \
854  "mov r24, " reg "\n" \
855  "lsl " reg "\n" \
856  "bst r24,7\n" \
857  "bld " reg ",4\n" \
858  "bst r24,3\n" \
859  "bld " reg ",0\n" \
860  "lsr r24\n" \
861  "lsr r24\n" \
862  "andi r24,0x11\n" \
863  "eor " reg ",r24\n"
864 
865 // Transform the contents of a register using LFSR3.
866 #define LFSR3(reg) \
867  "mov r24, " reg "\n" \
868  "lsr " reg "\n" \
869  "bst r24,4\n" \
870  "bld " reg ",7\n" \
871  "bst r24,0\n" \
872  "bld " reg ",3\n" \
873  "andi r24,0x88\n" \
874  "eor " reg ",r24\n"
875 
876 #else // !USE_AVR_INLINE_ASM
877 
878 inline uint32_t skinny64_LFSR2(uint32_t x)
879 {
880  return ((x << 1) & 0xEEEEEEEEU) ^ (((x >> 3) ^ (x >> 2)) & 0x11111111U);
881 }
882 
883 inline uint32_t skinny64_LFSR3(uint32_t x)
884 {
885  return ((x >> 1) & 0x77777777U) ^ ((x ^ (x << 3)) & 0x88888888U);
886 }
887 
888 #endif // !USE_AVR_INLINE_ASM
889 
895 void Skinny64::setTK2(const uint8_t *key)
896 {
897 #if USE_AVR_INLINE_ASM
898  __asm__ __volatile__ (
899  // Load the TK2 bytes into r16..r23.
900  LOAD_BLOCK()
901 
902  // Top of the loop.
903  "1:\n"
904 
905  // XOR the first two rows of TK2 with the key schedule.
906  "ld __tmp_reg__,X\n"
907  "eor __tmp_reg__,r16\n"
908  "st X+,__tmp_reg__\n"
909  "ld __tmp_reg__,X\n"
910  "eor __tmp_reg__,r17\n"
911  "st X+,__tmp_reg__\n"
912  "ld __tmp_reg__,X\n"
913  "eor __tmp_reg__,r18\n"
914  "st X+,__tmp_reg__\n"
915  "ld __tmp_reg__,X\n"
916  "eor __tmp_reg__,r19\n"
917  "st X+,__tmp_reg__\n"
918 
919  // Permute TK2 for the next round.
920  PERMUTE_TKn()
921 
922  // Apply LFSR2 to the first two rows of TK2.
923  LFSR2("r16")
924  LFSR2("r17")
925  LFSR2("r18")
926  LFSR2("r19")
927 
928  // Bottom of the loop.
929  "dec %2\n"
930  "breq 2f\n"
931  "rjmp 1b\n"
932  "2:\n"
933 
934  : : "x"(s), "z"(key), "r"(r)
935  : "r8", "r9", "r10", "r11", "r16", "r17", "r18", "r19",
936  "r20", "r21", "r22", "r23", "r24", "memory"
937  );
938 #else // !USE_AVR_INLINE_ASM
939  Skinny64Cells_t TK2;
940  uint32_t *schedule = s;
941 
942  // Unpack the incoming key value into the TK2 array.
943  skinny64_unpack(&TK2, key);
944 
945  // XOR against the key schedule words for all rounds.
946  for (uint8_t index = r; index > 0; --index, ++schedule) {
947  // XOR TK2 against the key schedule.
948  schedule[0] ^= TK2.lrow[0];
949 
950  // Permute TK2 for the next round.
951  skinny64_permute_tk(TK2);
952 
953  // Apply LFSR2 to the first two rows of TK2.
954  TK2.lrow[0] = skinny64_LFSR2(TK2.lrow[0]);
955  }
956 
957  // Clean up and exit.
958  clean(TK2);
959 #endif // !USE_AVR_INLINE_ASM
960 }
961 
967 void Skinny64::setTK3(const uint8_t *key)
968 {
969 #if USE_AVR_INLINE_ASM
970  __asm__ __volatile__ (
971  // Load the TK3 bytes into r16..r23.
972  LOAD_BLOCK()
973 
974  // Top of the loop.
975  "1:\n"
976 
977  // XOR the first two rows of TK3 with the key schedule.
978  "ld __tmp_reg__,X\n"
979  "eor __tmp_reg__,r16\n"
980  "st X+,__tmp_reg__\n"
981  "ld __tmp_reg__,X\n"
982  "eor __tmp_reg__,r17\n"
983  "st X+,__tmp_reg__\n"
984  "ld __tmp_reg__,X\n"
985  "eor __tmp_reg__,r18\n"
986  "st X+,__tmp_reg__\n"
987  "ld __tmp_reg__,X\n"
988  "eor __tmp_reg__,r19\n"
989  "st X+,__tmp_reg__\n"
990 
991  // Permute TK3 for the next round.
992  PERMUTE_TKn()
993 
994  // Apply LFSR3 to the first two rows of TK3.
995  LFSR3("r16")
996  LFSR3("r17")
997  LFSR3("r18")
998  LFSR3("r19")
999 
1000  // Bottom of the loop.
1001  "dec %2\n"
1002  "breq 2f\n"
1003  "rjmp 1b\n"
1004  "2:\n"
1005 
1006  : : "x"(s), "z"(key), "r"(r)
1007  : "r8", "r9", "r10", "r11", "r16", "r17", "r18", "r19",
1008  "r20", "r21", "r22", "r23", "r24", "memory"
1009  );
1010 #else // !USE_AVR_INLINE_ASM
1011  Skinny64Cells_t TK3;
1012  uint32_t *schedule = s;
1013 
1014  // Unpack the incoming key value into the TK3 array.
1015  skinny64_unpack(&TK3, key);
1016 
1017  // XOR against the key schedule words for all rounds.
1018  for (uint8_t index = r; index > 0; --index, ++schedule) {
1019  // XOR TK2 against the key schedule.
1020  schedule[0] ^= TK3.lrow[0];
1021 
1022  // Permute TK3 for the next round.
1023  skinny64_permute_tk(TK3);
1024 
1025  // Apply LFSR3 to the first two rows of TK3.
1026  TK3.lrow[0] = skinny64_LFSR3(TK3.lrow[0]);
1027  }
1028 
1029  // Clean up and exit.
1030  clean(TK3);
1031 #endif // !USE_AVR_INLINE_ASM
1032 }
1033 
1040 Skinny64_Tweaked::Skinny64_Tweaked(uint32_t *schedule, uint8_t rounds)
1041  : Skinny64(schedule, rounds)
1042 {
1043 }
1044 
1050 {
1051  clean(t);
1052 }
1053 
1069 bool Skinny64_Tweaked::setTweak(const uint8_t *tweak, size_t len)
1070 {
1071  if (len != 8)
1072  return false;
1073  xorTK1(t);
1074  if (tweak) {
1075  memcpy(t, tweak, len);
1076  xorTK1(t);
1077  } else {
1078  memset(t, 0, sizeof(t));
1079  }
1080  return true;
1081 }
1082 
1084 {
1085  clean(t);
1086  Skinny64::clear();
1087 }
1088 
1095 {
1096  memset(t, 0, sizeof(t));
1097  setTK1(t, true);
1098 }
1099 
1104  : Skinny64(sched, 32)
1105 {
1106 }
1107 
1113 {
1114  clean(sched);
1115 }
1116 
1121 size_t Skinny64_64::keySize() const
1122 {
1123  return 8;
1124 }
1125 
1126 bool Skinny64_64::setKey(const uint8_t *key, size_t len)
1127 {
1128  if (len != 8)
1129  return false;
1130  setTK1(key);
1131  return true;
1132 }
1133 
1138  : Skinny64(sched, 36)
1139 {
1140 }
1141 
1147 {
1148  clean(sched);
1149 }
1150 
1156 {
1157  return 16;
1158 }
1159 
1160 bool Skinny64_128::setKey(const uint8_t *key, size_t len)
1161 {
1162  if (len != 16)
1163  return false;
1164  setTK1(key);
1165  setTK2(key + 8);
1166  return true;
1167 }
1168 
1174  : Skinny64_Tweaked(sched, 36)
1175 {
1176 }
1177 
1183 {
1184  clean(sched);
1185 }
1186 
1192 {
1193  return 8;
1194 }
1195 
1196 bool Skinny64_128_Tweaked::setKey(const uint8_t *key, size_t len)
1197 {
1198  if (len != 8)
1199  return false;
1200  resetTweak();
1201  setTK2(key);
1202  return true;
1203 }
1204 
1209  : Skinny64(sched, 40)
1210 {
1211 }
1212 
1218 {
1219  clean(sched);
1220 }
1221 
1227 {
1228  return 24;
1229 }
1230 
1231 bool Skinny64_192::setKey(const uint8_t *key, size_t len)
1232 {
1233  if (len != 24)
1234  return false;
1235  setTK1(key);
1236  setTK2(key + 8);
1237  setTK3(key + 16);
1238  return true;
1239 }
1240 
1246  : Skinny64_Tweaked(sched, 40)
1247 {
1248 }
1249 
1255 {
1256  clean(sched);
1257 }
1258 
1264 {
1265  return 16;
1266 }
1267 
1268 bool Skinny64_192_Tweaked::setKey(const uint8_t *key, size_t len)
1269 {
1270  if (len != 16)
1271  return false;
1272  resetTweak();
1273  setTK2(key);
1274  setTK3(key + 8);
1275  return true;
1276 }
void encryptBlock(uint8_t *output, const uint8_t *input)
Encrypts a single block using this cipher.
Definition: Skinny64.cpp:359
void resetTweak()
Resets the tweak to all-zeroes.
Definition: Skinny64.cpp:1094
uint32_t lrow[2]
size_t keySize() const
Size of a Skinny64_128_Tweaked key in bytes.
Definition: Skinny64.cpp:1191
bool setKey(const uint8_t *key, size_t len)
Sets the key to use for future encryption and decryption operations.
Definition: Skinny64.cpp:1268
Skinny64_128_Tweaked()
Constructs a tweakable Skinny-64 block cipher with a 64-bit key and a 64-bit tweak.
Definition: Skinny64.cpp:1173
void clear()
Clears all security-sensitive state from this block cipher.
Definition: Skinny64.cpp:1083
size_t keySize() const
Size of a Skinny64_128 key in bytes.
Definition: Skinny64.cpp:1155
size_t keySize() const
Size of a Skinny64_192 key in bytes.
Definition: Skinny64.cpp:1226
virtual ~Skinny64_64()
Destroys this Skinny-64 block cipher object after clearing sensitive information. ...
Definition: Skinny64.cpp:1112
void xorTK1(const uint8_t *key)
XOR's the key schedule with the schedule for TK1.
Definition: Skinny64.cpp:796
virtual ~Skinny64_192_Tweaked()
Destroys this tweakable Skinny-64 block cipher object after clearing sensitive information.
Definition: Skinny64.cpp:1254
bool setKey(const uint8_t *key, size_t len)
Sets the key to use for future encryption and decryption operations.
Definition: Skinny64.cpp:1126
virtual ~Skinny64_192()
Destroys this Skinny-64 block cipher object after clearing sensitive information. ...
Definition: Skinny64.cpp:1217
size_t blockSize() const
Size of a Skinny-64 block in bytes.
Definition: Skinny64.cpp:126
size_t keySize() const
Size of a Skinny64_64 key in bytes.
Definition: Skinny64.cpp:1121
Abstract base class for SKINNY block ciphers with 64-bit blocks.
Definition: Skinny64.h:28
Skinny64_Tweaked(uint32_t *schedule, uint8_t rounds)
Constructs a tweakable Skinny-64 block cipher object.
Definition: Skinny64.cpp:1040
size_t keySize() const
Size of a Skinny64_192_Tweaked key in bytes.
Definition: Skinny64.cpp:1263
uint16_t row[4]
void setTK3(const uint8_t *key)
XOR's the key schedule with the schedule for TK3.
Definition: Skinny64.cpp:967
Skinny64_192_Tweaked()
Constructs a tweakable Skinny-64 block cipher with a 128-bit key and a 64-bit tweak.
Definition: Skinny64.cpp:1245
Skinny64_128()
Constructs a Skinny-64 block cipher with a 128-bit key.
Definition: Skinny64.cpp:1137
virtual ~Skinny64_128_Tweaked()
Destroys this tweakable Skinny-64 block cipher object after clearing sensitive information.
Definition: Skinny64.cpp:1182
Skinny64(uint32_t *schedule, uint8_t rounds)
Constructs a Skinny-64 block cipher object.
Definition: Skinny64.cpp:109
void setTK1(const uint8_t *key, bool tweaked=false)
Clears the key schedule and sets it to the schedule for TK1.
Definition: Skinny64.cpp:701
bool setTweak(const uint8_t *tweak, size_t len)
Sets the 64-bit tweak value for this block cipher.
Definition: Skinny64.cpp:1069
Skinny64_64()
Constructs a Skinny-64 block cipher with a 64-bit key.
Definition: Skinny64.cpp:1103
virtual ~Skinny64()
Destroys this Skinny-64 block cipher object after clearing sensitive information. ...
Definition: Skinny64.cpp:118
void clear()
Clears all security-sensitive state from this block cipher.
Definition: Skinny64.cpp:639
virtual ~Skinny64_Tweaked()
Destroys this tweakable Skinny-64 block cipher object after clearing sensitive information.
Definition: Skinny64.cpp:1049
bool setKey(const uint8_t *key, size_t len)
Sets the key to use for future encryption and decryption operations.
Definition: Skinny64.cpp:1196
void decryptBlock(uint8_t *output, const uint8_t *input)
Decrypts a single block using this cipher.
Definition: Skinny64.cpp:499
Skinny64_192()
Constructs a Skinny-64 block cipher with a 192-bit key.
Definition: Skinny64.cpp:1208
virtual ~Skinny64_128()
Destroys this Skinny-64 block cipher object after clearing sensitive information. ...
Definition: Skinny64.cpp:1146
Union that describes a 64-bit 4x4 array of cells.
Abstract base class for SKINNY tweakable block ciphers with 64-bit blocks.
Definition: Skinny64.h:53
bool setKey(const uint8_t *key, size_t len)
Sets the key to use for future encryption and decryption operations.
Definition: Skinny64.cpp:1160
void setTK2(const uint8_t *key)
XOR's the key schedule with the schedule for TK2.
Definition: Skinny64.cpp:895
bool setKey(const uint8_t *key, size_t len)
Sets the key to use for future encryption and decryption operations.
Definition: Skinny64.cpp:1231