Skinny-C
 All Data Structures Files Functions Variables Groups Pages
skinny64-cipher.c
1 /*
2  * Copyright (C) 2017 Southern Storm Software, Pty Ltd.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included
12  * in all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
15  * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
17  * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
20  * DEALINGS IN THE SOFTWARE.
21  */
22 
23 #include "skinny64-cipher.h"
24 #include "skinny-internal.h"
25 
26 STATIC_INLINE uint32_t skinny64_LFSR2(uint32_t x)
27 {
28  return ((x << 1) & 0xEEEEEEEEU) ^ (((x >> 3) ^ (x >> 2)) & 0x11111111U);
29 }
30 
31 STATIC_INLINE uint32_t skinny64_LFSR3(uint32_t x)
32 {
33  return ((x >> 1) & 0x77777777U) ^ ((x ^ (x << 3)) & 0x88888888U);
34 }
35 
36 STATIC_INLINE void skinny64_permute_tk(Skinny64Cells_t *tk)
37 {
38  /* PT = [9, 15, 8, 13, 10, 14, 12, 11, 0, 1, 2, 3, 4, 5, 6, 7] */
39 #if SKINNY_LITTLE_ENDIAN
40  /* Permutation generated by http://programming.sirrida.de/calcperm.php */
41  uint32_t x = tk->lrow[1];
42  tk->lrow[1] = tk->lrow[0];
43  tk->lrow[0] = ((x & 0x0000000FU) << 4) |
44  ((x & 0x00F0F0F0U) << 8) |
45  ((x & 0x0F000000U) >> 24) |
46  ((x & 0x00000F00U) << 16) |
47  ((x & 0xF0000000U) >> 12) |
48  ((x & 0x000F0000U) >> 8);
49 #else
50  uint16_t row2 = tk->row[2];
51  uint16_t row3 = tk->row[3];
52  tk->row[2] = tk->row[0];
53  tk->row[3] = tk->row[1];
54  row3 = (row3 << 8) | (row3 >> 8);
55  tk->row[0] = ((row2 << 4) & 0x00F0U) |
56  ((row2 << 8) & 0xF000U) |
57  (row3 & 0x0F0FU);
58  tk->row[1] = ((row2 >> 8) & 0x00F0U) |
59  (row2 & 0x0F00U) |
60  ((row3 >> 4) & 0x000FU) |
61  ( row3 & 0xF000U);
62 #endif
63 }
64 
65 /* Initializes the key schedule with TK1 */
66 static void skinny64_set_tk1
67  (Skinny64Key_t *ks, const void *key, unsigned key_size, int tweaked)
68 {
69  Skinny64Cells_t tk;
70  unsigned index;
71  uint16_t word;
72  uint8_t rc = 0;
73 
74  /* Unpack the key and convert from little-endian to host-endian */
75  if (key_size >= SKINNY64_BLOCK_SIZE) {
76 #if SKINNY_64BIT && SKINNY_LITTLE_ENDIAN
77  tk.llrow = READ_WORD64(key, 0);
78 #elif SKINNY_LITTLE_ENDIAN
79  tk.lrow[0] = READ_WORD32(key, 0);
80  tk.lrow[1] = READ_WORD32(key, 4);
81 #else
82  tk.row[0] = READ_WORD16(key, 0);
83  tk.row[1] = READ_WORD16(key, 2);
84  tk.row[2] = READ_WORD16(key, 4);
85  tk.row[3] = READ_WORD16(key, 6);
86 #endif
87  } else {
88  for (index = 0; index < key_size; index += 2) {
89  if ((index + 2) <= key_size) {
90  word = READ_WORD16(key, index);
91  } else {
92  word = READ_BYTE(key, index);
93  }
94  tk.row[index / 2] = word;
95  }
96  }
97 
98  /* Generate the key schedule words for all rounds */
99  for (index = 0; index < ks->rounds; ++index) {
100  /* Determine the subkey to use at this point in the key schedule */
101  ks->schedule[index].lrow = tk.lrow[0];
102 
103  /* XOR in the round constants for the first two rows.
104  The round constants for the 3rd and 4th rows are
105  fixed and will be applied during encrypt/decrypt */
106  rc = (rc << 1) ^ ((rc >> 5) & 0x01) ^ ((rc >> 4) & 0x01) ^ 0x01;
107  rc &= 0x3F;
108  ks->schedule[index].row[0] ^= ((rc & 0x0F) << 4);
109  ks->schedule[index].row[1] ^= (rc & 0x30);
110 
111  /* If we have a tweak, then we need to XOR a 1 bit into the
112  second bit of the top cell of the third column as recommended
113  by the SKINNY specification */
114  if (tweaked)
115  ks->schedule[index].row[0] ^= 0x2000;
116 
117  /* Permute TK1 for the next round */
118  skinny64_permute_tk(&tk);
119  }
120 }
121 
122 /* XOR the key schedule with TK1 */
123 static void skinny64_xor_tk1(Skinny64Key_t *ks, const void *key)
124 {
125  Skinny64Cells_t tk;
126  unsigned index;
127 
128  /* Unpack the key and convert from little-endian to host-endian */
129 #if SKINNY_64BIT && SKINNY_LITTLE_ENDIAN
130  tk.llrow = READ_WORD64(key, 0);
131 #elif SKINNY_LITTLE_ENDIAN
132  tk.lrow[0] = READ_WORD32(key, 0);
133  tk.lrow[1] = READ_WORD32(key, 4);
134 #else
135  tk.row[0] = READ_WORD16(key, 0);
136  tk.row[1] = READ_WORD16(key, 2);
137  tk.row[2] = READ_WORD16(key, 4);
138  tk.row[3] = READ_WORD16(key, 6);
139 #endif
140 
141  /* Generate the key schedule words for all rounds */
142  for (index = 0; index < ks->rounds; ++index) {
143  /* Determine the subkey to use at this point in the key schedule */
144  ks->schedule[index].lrow ^= tk.lrow[0];
145 
146  /* Permute TK1 for the next round */
147  skinny64_permute_tk(&tk);
148  }
149 }
150 
151 /* XOR the key schedule with TK2 */
152 static void skinny64_set_tk2
153  (Skinny64Key_t *ks, const void *key, unsigned key_size)
154 {
155  Skinny64Cells_t tk;
156  unsigned index;
157  uint16_t word;
158 
159  /* Unpack the key and convert from little-endian to host-endian */
160  if (key_size >= SKINNY64_BLOCK_SIZE) {
161 #if SKINNY_64BIT && SKINNY_LITTLE_ENDIAN
162  tk.llrow = READ_WORD64(key, 0);
163 #elif SKINNY_LITTLE_ENDIAN
164  tk.lrow[0] = READ_WORD32(key, 0);
165  tk.lrow[1] = READ_WORD32(key, 4);
166 #else
167  tk.row[0] = READ_WORD16(key, 0);
168  tk.row[1] = READ_WORD16(key, 2);
169  tk.row[2] = READ_WORD16(key, 4);
170  tk.row[3] = READ_WORD16(key, 6);
171 #endif
172  } else {
173  for (index = 0; index < key_size; index += 2) {
174  if ((index + 2) <= key_size) {
175  word = READ_WORD16(key, index);
176  } else {
177  word = READ_BYTE(key, index);
178  }
179  tk.row[index / 2] = word;
180  }
181  }
182 
183  /* Generate the key schedule words for all rounds */
184  for (index = 0; index < ks->rounds; ++index) {
185  /* Determine the subkey to use at this point in the key schedule */
186  ks->schedule[index].lrow ^= tk.lrow[0];
187 
188  /* Permute TK2 for the next round */
189  skinny64_permute_tk(&tk);
190 
191  /* Apply LFSR2 to the first two rows of TK2 */
192  tk.lrow[0] = skinny64_LFSR2(tk.lrow[0]);
193  }
194 }
195 
196 /* XOR the key schedule with TK3 */
197 static void skinny64_set_tk3
198  (Skinny64Key_t *ks, const void *key, unsigned key_size)
199 {
200  Skinny64Cells_t tk;
201  unsigned index;
202  uint16_t word;
203 
204  /* Unpack the key and convert from little-endian to host-endian */
205  if (key_size >= SKINNY64_BLOCK_SIZE) {
206 #if SKINNY_64BIT && SKINNY_LITTLE_ENDIAN
207  tk.llrow = READ_WORD64(key, 0);
208 #elif SKINNY_LITTLE_ENDIAN
209  tk.lrow[0] = READ_WORD32(key, 0);
210  tk.lrow[1] = READ_WORD32(key, 4);
211 #else
212  tk.row[0] = READ_WORD16(key, 0);
213  tk.row[1] = READ_WORD16(key, 2);
214  tk.row[2] = READ_WORD16(key, 4);
215  tk.row[3] = READ_WORD16(key, 6);
216 #endif
217  } else {
218  for (index = 0; index < key_size; index += 2) {
219  if ((index + 2) <= key_size) {
220  word = READ_WORD16(key, index);
221  } else {
222  word = READ_BYTE(key, index);
223  }
224  tk.row[index / 2] = word;
225  }
226  }
227 
228  /* Generate the key schedule words for all rounds */
229  for (index = 0; index < ks->rounds; ++index) {
230  /* Determine the subkey to use at this point in the key schedule */
231  ks->schedule[index].lrow ^= tk.lrow[0];
232 
233  /* Permute TK3 for the next round */
234  skinny64_permute_tk(&tk);
235 
236  /* Apply LFSR3 to the first two rows of TK3 */
237  tk.lrow[0] = skinny64_LFSR3(tk.lrow[0]);
238  }
239 }
240 
241 static void skinny64_set_key_inner
242  (Skinny64Key_t *ks, const void *key, unsigned key_size, const void *tweak)
243 {
244  if (!tweak) {
245  /* Key only, no tweak */
246  if (key_size == SKINNY64_BLOCK_SIZE) {
247  ks->rounds = 32;
248  skinny64_set_tk1(ks, key, key_size, 0);
249  } else if (key_size <= (2 * SKINNY64_BLOCK_SIZE)) {
250  ks->rounds = 36;
251  skinny64_set_tk1(ks, key, SKINNY64_BLOCK_SIZE, 0);
252  skinny64_set_tk2(ks, key + SKINNY64_BLOCK_SIZE,
253  key_size - SKINNY64_BLOCK_SIZE);
254  } else {
255  ks->rounds = 40;
256  skinny64_set_tk1(ks, key, SKINNY64_BLOCK_SIZE, 0);
257  skinny64_set_tk2(ks, key + SKINNY64_BLOCK_SIZE,
259  skinny64_set_tk3(ks, key + SKINNY64_BLOCK_SIZE * 2,
260  key_size - SKINNY64_BLOCK_SIZE * 2);
261  }
262  } else {
263  /* Key and tweak */
264  if (key_size == SKINNY64_BLOCK_SIZE) {
265  ks->rounds = 36;
266  skinny64_set_tk1(ks, tweak, SKINNY64_BLOCK_SIZE, 1);
267  skinny64_set_tk2(ks, key, key_size);
268  } else {
269  ks->rounds = 40;
270  skinny64_set_tk1(ks, tweak, SKINNY64_BLOCK_SIZE, 1);
271  skinny64_set_tk2(ks, key, SKINNY64_BLOCK_SIZE);
272  skinny64_set_tk3(ks, key + SKINNY64_BLOCK_SIZE,
273  key_size - SKINNY64_BLOCK_SIZE);
274  }
275  }
276 }
277 
278 int skinny64_set_key(Skinny64Key_t *ks, const void *key, unsigned size)
279 {
280  /* Validate the parameters */
281  if (!ks || !key || size < SKINNY64_BLOCK_SIZE ||
282  size > (SKINNY64_BLOCK_SIZE * 3)) {
283  return 0;
284  }
285 
286  /* Set the key directly with no tweak */
287  skinny64_set_key_inner(ks, key, size, 0);
288  return 1;
289 }
290 
292  (Skinny64TweakedKey_t *ks, const void *key, unsigned key_size)
293 {
294  /* Validate the parameters */
295  if (!ks || !key || key_size < SKINNY64_BLOCK_SIZE ||
296  key_size > (SKINNY64_BLOCK_SIZE * 2)) {
297  return 0;
298  }
299 
300  /* Set the initial tweak to all-zeroes */
301  memset(ks->tweak, 0, sizeof(ks->tweak));
302 
303  /* Set the initial key and tweak value */
304  skinny64_set_key_inner(&(ks->ks), key, key_size, ks->tweak);
305  return 1;
306 }
307 
309  (Skinny64TweakedKey_t *ks, const void *tweak, unsigned tweak_size)
310 {
311  uint8_t tk_prev[SKINNY64_BLOCK_SIZE];
312 
313  /* Validate the parameters */
314  if (!ks || tweak_size < 1 || tweak_size > SKINNY64_BLOCK_SIZE) {
315  return 0;
316  }
317 
318  /* Read the new tweak value and swap with the original */
319  memcpy(tk_prev, ks->tweak, sizeof(tk_prev));
320  memcpy(ks->tweak, tweak, tweak_size);
321  memset(ks->tweak + tweak_size, 0, sizeof(ks->tweak) - tweak_size);
322 
323  /* XOR the original tweak out of the key schedule */
324  skinny64_xor_tk1(&(ks->ks), tk_prev);
325 
326  /* XOR the new tweak into the key schedule */
327  skinny64_xor_tk1(&(ks->ks), ks->tweak);
328  return 1;
329 }
330 
331 STATIC_INLINE uint16_t skinny64_rotate_right(uint16_t x, unsigned count)
332 {
333  return (x >> count) | (x << (16 - count));
334 }
335 
336 #if SKINNY_64BIT
337 
338 STATIC_INLINE uint64_t skinny64_sbox(uint64_t x)
339 {
340  /* Splitting the bits out individually gives better performance on
341  64-bit platforms because we have more spare registers to work with.
342  This doesn't work as well on 32-bit platforms because register
343  spills start to impact performance. See below. */
344  uint64_t bit0 = ~x;
345  uint64_t bit1 = bit0 >> 1;
346  uint64_t bit2 = bit0 >> 2;
347  uint64_t bit3 = bit0 >> 3;
348  bit0 ^= bit3 & bit2;
349  bit3 ^= bit1 & bit2;
350  bit2 ^= bit1 & bit0;
351  bit1 ^= bit0 & bit3;
352  x = ((bit0 << 3) & 0x8888888888888888ULL) |
353  ( bit1 & 0x1111111111111111ULL) |
354  ((bit2 << 1) & 0x2222222222222222ULL) |
355  ((bit3 << 2) & 0x4444444444444444ULL);
356  return ~x;
357 }
358 
359 STATIC_INLINE uint64_t skinny64_inv_sbox(uint64_t x)
360 {
361  uint64_t bit0 = ~x;
362  uint64_t bit1 = bit0 >> 1;
363  uint64_t bit2 = bit0 >> 2;
364  uint64_t bit3 = bit0 >> 3;
365  bit0 ^= bit3 & bit2;
366  bit1 ^= bit3 & bit0;
367  bit2 ^= bit1 & bit0;
368  bit3 ^= bit1 & bit2;
369  x = ((bit0 << 1) & 0x2222222222222222ULL) |
370  ((bit1 << 2) & 0x4444444444444444ULL) |
371  ((bit2 << 3) & 0x8888888888888888ULL) |
372  ( bit3 & 0x1111111111111111ULL);
373  return ~x;
374 }
375 
376 #else
377 
378 STATIC_INLINE uint32_t skinny64_sbox(uint32_t x)
379 {
380  /* Original version from the specification is equivalent to:
381  *
382  * #define SBOX_MIX(x)
383  * (((~((((x) >> 1) | (x)) >> 2)) & 0x11111111U) ^ (x))
384  * #define SBOX_SHIFT(x)
385  * ((((x) << 1) & 0xEEEEEEEEU) | (((x) >> 3) & 0x11111111U))
386  *
387  * x = SBOX_MIX(x);
388  * x = SBOX_SHIFT(x);
389  * x = SBOX_MIX(x);
390  * x = SBOX_SHIFT(x);
391  * x = SBOX_MIX(x);
392  * x = SBOX_SHIFT(x);
393  * return SBOX_MIX(x);
394  *
395  * However, we can mix the bits in their original positions and then
396  * delay the SBOX_SHIFT steps to be performed with one final rotation.
397  * This reduces the number of required shift operations from 14 to 10.
398  *
399  * It is possible to reduce the number of shifts and AND's even further
400  * as shown in the 64-bit version of skinny64_sbox() above. However on
401  * 32-bit platforms this causes extra register spills which slows down
402  * the implementation more than the improvement gained by reducing the
403  * number of bit operations.
404  *
405  * We can further reduce the number of NOT operations from 4 to 2
406  * using the technique from https://github.com/kste/skinny_avx to
407  * convert NOR-XOR operations into AND-XOR operations by converting
408  * the S-box into its NOT-inverse.
409  */
410  x = ~x;
411  x = (((x >> 3) & (x >> 2)) & 0x11111111U) ^ x;
412  x = (((x << 1) & (x << 2)) & 0x88888888U) ^ x;
413  x = (((x << 1) & (x << 2)) & 0x44444444U) ^ x;
414  x = (((x >> 2) & (x << 1)) & 0x22222222U) ^ x;
415  x = ~x;
416  return ((x >> 1) & 0x77777777U) | ((x << 3) & 0x88888888U);
417 }
418 
419 STATIC_INLINE uint32_t skinny64_inv_sbox(uint32_t x)
420 {
421  /* Original version from the specification is equivalent to:
422  *
423  * #define SBOX_MIX(x)
424  * (((~((((x) >> 1) | (x)) >> 2)) & 0x11111111U) ^ (x))
425  * #define SBOX_SHIFT_INV(x)
426  * ((((x) >> 1) & 0x77777777U) | (((x) << 3) & 0x88888888U))
427  *
428  * x = SBOX_MIX(x);
429  * x = SBOX_SHIFT_INV(x);
430  * x = SBOX_MIX(x);
431  * x = SBOX_SHIFT_INV(x);
432  * x = SBOX_MIX(x);
433  * x = SBOX_SHIFT_INV(x);
434  * return SBOX_MIX(x);
435  */
436  x = ~x;
437  x = (((x >> 3) & (x >> 2)) & 0x11111111U) ^ x;
438  x = (((x << 1) & (x >> 2)) & 0x22222222U) ^ x;
439  x = (((x << 1) & (x << 2)) & 0x44444444U) ^ x;
440  x = (((x << 1) & (x << 2)) & 0x88888888U) ^ x;
441  x = ~x;
442  return ((x << 1) & 0xEEEEEEEEU) | ((x >> 3) & 0x11111111U);
443 }
444 
445 #endif
446 
448  (void *output, const void *input, const Skinny64Key_t *ks)
449 {
450  Skinny64Cells_t state;
451  const Skinny64HalfCells_t *schedule;
452  unsigned index;
453  uint32_t temp;
454 
455  /* Read the input buffer and convert little-endian to host-endian */
456 #if SKINNY_64BIT && SKINNY_LITTLE_ENDIAN
457  state.llrow = READ_WORD64(input, 0);
458 #elif SKINNY_LITTLE_ENDIAN
459  state.lrow[0] = READ_WORD32(input, 0);
460  state.lrow[1] = READ_WORD32(input, 4);
461 #else
462  state.row[0] = READ_WORD16(input, 0);
463  state.row[1] = READ_WORD16(input, 2);
464  state.row[2] = READ_WORD16(input, 4);
465  state.row[3] = READ_WORD16(input, 6);
466 #endif
467 
468  /* Perform all encryption rounds */
469  schedule = ks->schedule;
470  for (index = ks->rounds; index > 0; --index, ++schedule) {
471  /* Apply the S-box to all bytes in the state */
472 #if SKINNY_64BIT
473  state.llrow = skinny64_sbox(state.llrow);
474 #else
475  state.lrow[0] = skinny64_sbox(state.lrow[0]);
476  state.lrow[1] = skinny64_sbox(state.lrow[1]);
477 #endif
478 
479  /* Apply the subkey for this round */
480 #if SKINNY_64BIT && SKINNY_LITTLE_ENDIAN
481  state.llrow ^= schedule->lrow | 0x2000000000ULL;
482 #else
483  state.lrow[0] ^= schedule->lrow;
484  state.row[2] ^= 0x20;
485 #endif
486 
487  /* Shift the rows */
488  state.row[1] = skinny64_rotate_right(state.row[1], 4);
489  state.row[2] = skinny64_rotate_right(state.row[2], 8);
490  state.row[3] = skinny64_rotate_right(state.row[3], 12);
491 
492  /* Mix the columns */
493  state.row[1] ^= state.row[2];
494  state.row[2] ^= state.row[0];
495  temp = state.row[3] ^ state.row[2];
496  state.row[3] = state.row[2];
497  state.row[2] = state.row[1];
498  state.row[1] = state.row[0];
499  state.row[0] = temp;
500  }
501 
502  /* Convert host-endian back into little-endian in the output buffer */
503 #if SKINNY_64BIT && SKINNY_LITTLE_ENDIAN
504  WRITE_WORD64(output, 0, state.llrow);
505 #elif SKINNY_LITTLE_ENDIAN
506  WRITE_WORD32(output, 0, state.lrow[0]);
507  WRITE_WORD32(output, 4, state.lrow[1]);
508 #else
509  WRITE_WORD16(output, 0, state.row[0]);
510  WRITE_WORD16(output, 2, state.row[1]);
511  WRITE_WORD16(output, 4, state.row[2]);
512  WRITE_WORD16(output, 6, state.row[3]);
513 #endif
514 }
515 
517  (void *output, const void *input, const Skinny64Key_t *ks)
518 {
519  Skinny64Cells_t state;
520  const Skinny64HalfCells_t *schedule;
521  unsigned index;
522  uint32_t temp;
523 
524  /* Read the input buffer and convert little-endian to host-endian */
525 #if SKINNY_64BIT && SKINNY_LITTLE_ENDIAN
526  state.llrow = READ_WORD64(input, 0);
527 #elif SKINNY_LITTLE_ENDIAN
528  state.lrow[0] = READ_WORD32(input, 0);
529  state.lrow[1] = READ_WORD32(input, 4);
530 #else
531  state.row[0] = READ_WORD16(input, 0);
532  state.row[1] = READ_WORD16(input, 2);
533  state.row[2] = READ_WORD16(input, 4);
534  state.row[3] = READ_WORD16(input, 6);
535 #endif
536 
537  /* Perform all decryption rounds */
538  schedule = &(ks->schedule[ks->rounds - 1]);
539  for (index = ks->rounds; index > 0; --index, --schedule) {
540  /* Inverse mix of the columns */
541  temp = state.row[3];
542  state.row[3] = state.row[0];
543  state.row[0] = state.row[1];
544  state.row[1] = state.row[2];
545  state.row[3] ^= temp;
546  state.row[2] = temp ^ state.row[0];
547  state.row[1] ^= state.row[2];
548 
549  /* Inverse shift of the rows */
550  state.row[1] = skinny64_rotate_right(state.row[1], 12);
551  state.row[2] = skinny64_rotate_right(state.row[2], 8);
552  state.row[3] = skinny64_rotate_right(state.row[3], 4);
553 
554  /* Apply the subkey for this round */
555 #if SKINNY_64BIT && SKINNY_LITTLE_ENDIAN
556  state.llrow ^= schedule->lrow | 0x2000000000ULL;
557 #else
558  state.lrow[0] ^= schedule->lrow;
559  state.row[2] ^= 0x20;
560 #endif
561 
562  /* Apply the inverse of the S-box to all bytes in the state */
563 #if SKINNY_64BIT
564  state.llrow = skinny64_inv_sbox(state.llrow);
565 #else
566  state.lrow[0] = skinny64_inv_sbox(state.lrow[0]);
567  state.lrow[1] = skinny64_inv_sbox(state.lrow[1]);
568 #endif
569  }
570 
571  /* Convert host-endian back into little-endian in the output buffer */
572 #if SKINNY_64BIT && SKINNY_LITTLE_ENDIAN
573  WRITE_WORD64(output, 0, state.llrow);
574 #elif SKINNY_LITTLE_ENDIAN
575  WRITE_WORD32(output, 0, state.lrow[0]);
576  WRITE_WORD32(output, 4, state.lrow[1]);
577 #else
578  WRITE_WORD16(output, 0, state.row[0]);
579  WRITE_WORD16(output, 2, state.row[1]);
580  WRITE_WORD16(output, 4, state.row[2]);
581  WRITE_WORD16(output, 6, state.row[3]);
582 #endif
583 }
uint32_t lrow[2]
int skinny64_set_tweak(Skinny64TweakedKey_t *ks, const void *tweak, unsigned tweak_size)
Changes the tweak value for a previously-initialized key schedule.
int skinny64_set_key(Skinny64Key_t *ks, const void *key, unsigned size)
Sets the key schedule for a Skinny64 block cipher.
int skinny64_set_tweaked_key(Skinny64TweakedKey_t *ks, const void *key, unsigned key_size)
Sets the key schedule for a Skinny64 block cipher, and prepare for tweaked encryption.
Key schedule for Skinny64 block ciphers when a tweak is in use.
void skinny64_ecb_decrypt(void *output, const void *input, const Skinny64Key_t *ks)
Decrypts a single block using the Skinny64 block cipher in ECB mode.
uint16_t row[4]
void skinny64_ecb_encrypt(void *output, const void *input, const Skinny64Key_t *ks)
Encrypts a single block using the Skinny64 block cipher in ECB mode.
Skinny64HalfCells_t schedule[SKINNY64_MAX_ROUNDS]
uint8_t tweak[SKINNY64_BLOCK_SIZE]
Key schedule for Skinny64 block ciphers.
Union that describes a 32-bit 2x4 array of cells.
#define SKINNY64_BLOCK_SIZE
Size of a block for Skinny64 block ciphers.
Union that describes a 64-bit 4x4 array of cells.