FD.io VPP  v21.06
Vector Packet Processing
main.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2020 Intel and/or its affiliates.
3  * Licensed under the Apache License, Version 2.0 (the "License");
4  * you may not use this file except in compliance with the License.
5  * You may obtain a copy of the License at:
6  *
7  * http://www.apache.org/licenses/LICENSE-2.0
8  *
9  * Unless required by applicable law or agreed to in writing, software
10  * distributed under the License is distributed on an "AS IS" BASIS,
11  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12  * See the License for the specific language governing permissions and
13  * limitations under the License.
14  */
15 
16 #include <vlib/vlib.h>
17 #include <vnet/plugin/plugin.h>
18 #include <vpp/app/version.h>
19 
20 #include "crypto_sw_scheduler.h"
21 
22 int
24 {
28  u32 count = 0, i = vlib_num_workers () > 0;
29 
30  if (worker_idx >= vlib_num_workers ())
31  {
32  return VNET_API_ERROR_INVALID_VALUE;
33  }
34 
35  for (; i < tm->n_vlib_mains; i++)
36  {
37  ptd = cm->per_thread_data + i;
38  count += ptd->self_crypto_enabled;
39  }
40 
41  if (enabled || count > 1)
42  {
44  (worker_idx)].self_crypto_enabled = enabled;
45  }
46  else /* cannot disable all crypto workers */
47  {
48  return VNET_API_ERROR_INVALID_VALUE_2;
49  }
50  return 0;
51 }
52 
53 static void
56 {
59 
60  vec_validate (cm->keys, idx);
61 
62  if (key->type == VNET_CRYPTO_KEY_TYPE_LINK)
63  {
64  if (kop == VNET_CRYPTO_KEY_OP_DEL)
65  {
66  cm->keys[idx].index_crypto = UINT32_MAX;
67  cm->keys[idx].index_integ = UINT32_MAX;
68  }
69  else
70  {
71  cm->keys[idx] = *key;
72  }
73  }
74 }
75 
76 static int
79 {
83  crypto_sw_scheduler_queue_t *q = ptd->queues[frame->op];
84  u64 head = q->head;
85 
86  if (q->jobs[head & CRYPTO_SW_SCHEDULER_QUEUE_MASK])
87  {
88  u32 n_elts = frame->n_elts, i;
89  for (i = 0; i < n_elts; i++)
90  frame->elts[i].status = VNET_CRYPTO_OP_STATUS_FAIL_ENGINE_ERR;
91  return -1;
92  }
94  head += 1;
96  q->head = head;
97  return 0;
98 }
99 
102 {
104  u32 i;
105  u32 tail = q->tail;
106  u32 head = q->head;
107 
108  for (i = tail; i < head; i++)
109  {
111  if (!f)
112  continue;
116  {
117  return f;
118  }
119  }
120  return NULL;
121 }
122 
125 {
130  {
131  u32 tail = q->tail;
133  q->tail++;
134  f = q->jobs[tail & CRYPTO_SW_SCHEDULER_QUEUE_MASK];
135  q->jobs[tail & CRYPTO_SW_SCHEDULER_QUEUE_MASK] = 0;
136  }
137  return f;
138 }
139 
144  i32 offset, i32 len)
145 {
147  vlib_buffer_t *nb = b;
148  u32 n_chunks = 0;
149  u32 chunk_index = vec_len (ptd->chunks);
150 
151  while (len)
152  {
153  if (nb->current_data + nb->current_length > offset)
154  {
155  vec_add2 (ptd->chunks, ch, 1);
156  ch->src = ch->dst = nb->data + offset;
157  ch->len
158  = clib_min (nb->current_data + nb->current_length - offset, len);
159  len -= ch->len;
160  offset = 0;
161  n_chunks++;
162  if (!len)
163  break;
164  }
165  if (offset)
166  offset -= nb->current_data + nb->current_length;
167  if (nb->flags & VLIB_BUFFER_NEXT_PRESENT)
168  nb = vlib_get_buffer (vm, nb->next_buffer);
169  else
170  break;
171  }
172 
173  ASSERT (offset == 0);
174  if (n_chunks && len)
175  {
176  /* Some async crypto users can use buffers in creative ways, let's allow
177  * some flexibility here...
178  * Current example is ESP decrypt with ESN in async mode: it will stash
179  * ESN at the end of the last buffer (if it can) because it must be part
180  * of the integrity check but it will not update the buffer length.
181  * Fixup the last operation chunk length if we have room.
182  */
183  ASSERT (vlib_buffer_space_left_at_end (vm, nb) >= len);
184  if (vlib_buffer_space_left_at_end (vm, nb) >= len)
185  ch->len += len;
186  }
187 
188  op->chunk_index = chunk_index;
189  op->n_chunks = n_chunks;
190 }
191 
196  u32 index, u32 bi,
197  vnet_crypto_op_id_t op_id, u16 aad_len,
198  u8 tag_len)
199 {
200  vlib_buffer_t *b = vlib_get_buffer (vm, bi);
201  vnet_crypto_op_t *op = 0;
202 
204  {
205  vec_add2 (ptd->chained_crypto_ops, op, 1);
206  cryptodev_sw_scheduler_sgl (vm, ptd, b, op, fe->crypto_start_offset,
207  fe->crypto_total_length);
208  }
209  else
210  {
211  vec_add2 (ptd->crypto_ops, op, 1);
212  op->src = op->dst = b->data + fe->crypto_start_offset;
213  op->len = fe->crypto_total_length;
214  }
215 
216  op->op = op_id;
217  op->tag = fe->tag;
218  op->flags = fe->flags;
219  op->key_index = fe->key_index;
220  op->iv = fe->iv;
221  op->aad = fe->aad;
222  op->aad_len = aad_len;
223  op->tag_len = tag_len;
224  op->user_data = index;
225 }
226 
230  * ptd, vnet_crypto_key_t * key,
232  u32 index, u32 bi,
233  vnet_crypto_op_id_t crypto_op_id,
234  vnet_crypto_op_id_t integ_op_id,
235  u32 digest_len, u8 is_enc)
236 {
237  vlib_buffer_t *b = vlib_get_buffer (vm, bi);
238  vnet_crypto_op_t *crypto_op = 0, *integ_op = 0;
239 
241  {
242  vec_add2 (ptd->chained_crypto_ops, crypto_op, 1);
243  vec_add2 (ptd->chained_integ_ops, integ_op, 1);
244  cryptodev_sw_scheduler_sgl (vm, ptd, b, crypto_op,
246  fe->crypto_total_length);
247  cryptodev_sw_scheduler_sgl (vm, ptd, b, integ_op,
248  fe->integ_start_offset,
249  fe->crypto_total_length +
250  fe->integ_length_adj);
251  }
252  else
253  {
254  vec_add2 (ptd->crypto_ops, crypto_op, 1);
255  vec_add2 (ptd->integ_ops, integ_op, 1);
256  crypto_op->src = crypto_op->dst = b->data + fe->crypto_start_offset;
257  crypto_op->len = fe->crypto_total_length;
258  integ_op->src = integ_op->dst = b->data + fe->integ_start_offset;
259  integ_op->len = fe->crypto_total_length + fe->integ_length_adj;
260  }
261 
262  crypto_op->op = crypto_op_id;
263  crypto_op->iv = fe->iv;
264  crypto_op->key_index = key->index_crypto;
265  crypto_op->user_data = 0;
266  crypto_op->flags = fe->flags & ~VNET_CRYPTO_OP_FLAG_HMAC_CHECK;
267  integ_op->op = integ_op_id;
268  integ_op->digest = fe->digest;
269  integ_op->digest_len = digest_len;
270  integ_op->key_index = key->index_integ;
271  integ_op->flags = fe->flags & ~VNET_CRYPTO_OP_FLAG_INIT_IV;
272  crypto_op->user_data = integ_op->user_data = index;
273 }
274 
277  vnet_crypto_op_t * ops, u8 * state)
278 {
279  u32 n_fail, n_ops = vec_len (ops);
280  vnet_crypto_op_t *op = ops;
281 
282  if (n_ops == 0)
283  return;
284 
285  n_fail = n_ops - vnet_crypto_process_ops (vm, op, n_ops);
286 
287  while (n_fail)
288  {
289  ASSERT (op - ops < n_ops);
290 
291  if (op->status != VNET_CRYPTO_OP_STATUS_COMPLETED)
292  {
293  f->elts[op->user_data].status = op->status;
295  n_fail--;
296  }
297  op++;
298  }
299 }
300 
304  u8 * state)
305 {
306  u32 n_fail, n_ops = vec_len (ops);
307  vnet_crypto_op_t *op = ops;
308 
309  if (n_ops == 0)
310  return;
311 
312  n_fail = n_ops - vnet_crypto_process_chained_ops (vm, op, chunks, n_ops);
313 
314  while (n_fail)
315  {
316  ASSERT (op - ops < n_ops);
317 
318  if (op->status != VNET_CRYPTO_OP_STATUS_COMPLETED)
319  {
320  f->elts[op->user_data].status = op->status;
322  n_fail--;
323  }
324  op++;
325  }
326 }
327 
330  vnet_crypto_async_op_id_t async_op_id,
331  vnet_crypto_op_id_t sync_op_id, u8 tag_len,
332  u8 aad_len, u32 * nb_elts_processed,
333  u32 * enqueue_thread_idx)
334 {
340  u32 *bi;
341  u32 n_elts;
342  int i = 0;
344 
346  {
347  /* *INDENT-OFF* */
349  {
350  ptd = cm->per_thread_data + i;
351  q = ptd->queues[async_op_id];
353  if (f)
354  break;
355  }
356  /* *INDENT-ON* */
357  }
358 
359  ptd = cm->per_thread_data + vm->thread_index;
360 
361  if (f)
362  {
363  *nb_elts_processed = n_elts = f->n_elts;
364  fe = f->elts;
365  bi = f->buffer_indices;
366 
369  vec_reset_length (ptd->chunks);
370 
371  while (n_elts--)
372  {
373  if (n_elts > 1)
374  CLIB_PREFETCH (fe + 1, CLIB_CACHE_LINE_BYTES, LOAD);
375 
376  crypto_sw_scheduler_convert_aead (vm, ptd, fe, fe - f->elts, bi[0],
377  sync_op_id, aad_len, tag_len);
378  bi++;
379  fe++;
380  }
381 
382  process_ops (vm, f, ptd->crypto_ops, &state);
383  process_chained_ops (vm, f, ptd->chained_crypto_ops, ptd->chunks,
384  &state);
385  f->state = state;
386  *enqueue_thread_idx = f->enqueue_thread_index;
387  }
388 
389  return crypto_sw_scheduler_get_completed_frame (ptd->queues[async_op_id]);
390 }
391 
394  vnet_crypto_async_op_id_t async_op_id,
395  vnet_crypto_op_id_t sync_crypto_op_id,
396  vnet_crypto_op_id_t sync_integ_op_id,
397  u16 digest_len, u8 is_enc,
398  u32 * nb_elts_processed,
399  u32 * enqueue_thread_idx)
400 {
406  u32 *bi;
407  u32 n_elts;
408  int i = 0;
410 
412  {
413  /* *INDENT-OFF* */
415  {
416  ptd = cm->per_thread_data + i;
417  q = ptd->queues[async_op_id];
419  if (f)
420  break;
421  }
422  /* *INDENT-ON* */
423  }
424 
425  ptd = cm->per_thread_data + vm->thread_index;
426 
427  if (f)
428  {
433  vec_reset_length (ptd->chunks);
434 
435  *nb_elts_processed = n_elts = f->n_elts;
436  fe = f->elts;
437  bi = f->buffer_indices;
438 
439  while (n_elts--)
440  {
441  if (n_elts > 1)
442  CLIB_PREFETCH (fe + 1, CLIB_CACHE_LINE_BYTES, LOAD);
443 
445  cm->keys + fe->key_index,
446  fe, fe - f->elts, bi[0],
447  sync_crypto_op_id,
448  sync_integ_op_id,
449  digest_len, is_enc);
450  bi++;
451  fe++;
452  }
453 
454  if (is_enc)
455  {
456  process_ops (vm, f, ptd->crypto_ops, &state);
457  process_chained_ops (vm, f, ptd->chained_crypto_ops, ptd->chunks,
458  &state);
459  process_ops (vm, f, ptd->integ_ops, &state);
460  process_chained_ops (vm, f, ptd->chained_integ_ops, ptd->chunks,
461  &state);
462  }
463  else
464  {
465  process_ops (vm, f, ptd->integ_ops, &state);
466  process_chained_ops (vm, f, ptd->chained_integ_ops, ptd->chunks,
467  &state);
468  process_ops (vm, f, ptd->crypto_ops, &state);
469  process_chained_ops (vm, f, ptd->chained_crypto_ops, ptd->chunks,
470  &state);
471  }
472 
473  f->state = state;
474  *enqueue_thread_idx = f->enqueue_thread_index;
475  }
476 
477  return crypto_sw_scheduler_get_completed_frame (ptd->queues[async_op_id]);
478 }
479 
480 static clib_error_t *
482  vlib_cli_command_t * cmd)
483 {
484  unformat_input_t _line_input, *line_input = &_line_input;
485  u32 worker_index;
486  u8 crypto_enable;
487  int rv;
488 
489  /* Get a line of input. */
490  if (!unformat_user (input, unformat_line_input, line_input))
491  return 0;
492 
493  while (unformat_check_input (line_input) != UNFORMAT_END_OF_INPUT)
494  {
495  if (unformat (line_input, "worker %u", &worker_index))
496  {
497  if (unformat (line_input, "crypto"))
498  {
499  if (unformat (line_input, "on"))
500  crypto_enable = 1;
501  else if (unformat (line_input, "off"))
502  crypto_enable = 0;
503  else
504  return (clib_error_return (0, "unknown input '%U'",
506  line_input));
507  }
508  else
509  return (clib_error_return (0, "unknown input '%U'",
510  format_unformat_error, line_input));
511  }
512  else
513  return (clib_error_return (0, "unknown input '%U'",
514  format_unformat_error, line_input));
515  }
516 
517  rv = crypto_sw_scheduler_set_worker_crypto (worker_index, crypto_enable);
518  if (rv == VNET_API_ERROR_INVALID_VALUE)
519  {
520  return (clib_error_return (0, "invalid worker idx: %d", worker_index));
521  }
522  else if (rv == VNET_API_ERROR_INVALID_VALUE_2)
523  {
524  return (clib_error_return (0, "cannot disable all crypto workers"));
525  }
526  return 0;
527 }
528 
529 /*?
530  * This command sets if worker will do crypto processing.
531  *
532  * @cliexpar
533  * Example of how to set worker crypto processing off:
534  * @cliexstart{set sw_scheduler worker 0 crypto off}
535  * @cliexend
536  ?*/
537 /* *INDENT-OFF* */
538 VLIB_CLI_COMMAND (cmd_set_sw_scheduler_worker_crypto, static) = {
539  .path = "set sw_scheduler",
540  .short_help = "set sw_scheduler worker <idx> crypto <on|off>",
541  .function = sw_scheduler_set_worker_crypto,
542  .is_mp_safe = 1,
543 };
544 /* *INDENT-ON* */
545 
546 static clib_error_t *
548  vlib_cli_command_t * cmd)
549 {
551  u32 i;
552 
553  vlib_cli_output (vm, "%-7s%-20s%-8s", "ID", "Name", "Crypto");
554  for (i = 1; i < vlib_thread_main.n_vlib_mains; i++)
555  {
556  vlib_cli_output (vm, "%-7d%-20s%-8s", vlib_get_worker_index (i),
557  (vlib_worker_threads + i)->name,
558  cm->
559  per_thread_data[i].self_crypto_enabled ? "on" : "off");
560  }
561 
562  return 0;
563 }
564 
565 /*?
566  * This command displays sw_scheduler workers.
567  *
568  * @cliexpar
569  * Example of how to show workers:
570  * @cliexstart{show sw_scheduler workers}
571  * @cliexend
572  ?*/
573 /* *INDENT-OFF* */
574 VLIB_CLI_COMMAND (cmd_show_sw_scheduler_workers, static) = {
575  .path = "show sw_scheduler workers",
576  .short_help = "show sw_scheduler workers",
577  .function = sw_scheduler_show_workers,
578  .is_mp_safe = 1,
579 };
580 /* *INDENT-ON* */
581 
582 clib_error_t *
584 {
585  return 0;
586 }
587 
589 
590 /* *INDENT-OFF* */
591 #define _(n, s, k, t, a) \
592  static vnet_crypto_async_frame_t \
593  *crypto_sw_scheduler_frame_dequeue_##n##_TAG_##t##_AAD_##a##_enc ( \
594  vlib_main_t *vm, u32 *nb_elts_processed, u32 * thread_idx) \
595  { \
596  return crypto_sw_scheduler_dequeue_aead ( \
597  vm, VNET_CRYPTO_OP_##n##_TAG##t##_AAD##a##_ENC, \
598  VNET_CRYPTO_OP_##n##_ENC, t, a, nb_elts_processed, thread_idx); \
599  } \
600  static vnet_crypto_async_frame_t \
601  *crypto_sw_scheduler_frame_dequeue_##n##_TAG_##t##_AAD_##a##_dec ( \
602  vlib_main_t *vm, u32 *nb_elts_processed, u32 * thread_idx) \
603  { \
604  return crypto_sw_scheduler_dequeue_aead ( \
605  vm, VNET_CRYPTO_OP_##n##_TAG##t##_AAD##a##_DEC, \
606  VNET_CRYPTO_OP_##n##_DEC, t, a, nb_elts_processed, thread_idx); \
607  }
609 #undef _
610 
611 #define _(c, h, s, k, d) \
612  static vnet_crypto_async_frame_t \
613  *crypto_sw_scheduler_frame_dequeue_##c##_##h##_TAG##d##_enc ( \
614  vlib_main_t *vm, u32 *nb_elts_processed, u32 * thread_idx) \
615  { \
616  return crypto_sw_scheduler_dequeue_link ( \
617  vm, VNET_CRYPTO_OP_##c##_##h##_TAG##d##_ENC, \
618  VNET_CRYPTO_OP_##c##_ENC, VNET_CRYPTO_OP_##h##_HMAC, d, 1, \
619  nb_elts_processed, thread_idx); \
620  } \
621  static vnet_crypto_async_frame_t \
622  *crypto_sw_scheduler_frame_dequeue_##c##_##h##_TAG##d##_dec ( \
623  vlib_main_t *vm, u32 *nb_elts_processed, u32 * thread_idx) \
624  { \
625  return crypto_sw_scheduler_dequeue_link ( \
626  vm, VNET_CRYPTO_OP_##c##_##h##_TAG##d##_DEC, \
627  VNET_CRYPTO_OP_##c##_DEC, VNET_CRYPTO_OP_##h##_HMAC, d, 0, \
628  nb_elts_processed, thread_idx); \
629  }
631 #undef _
632  /* *INDENT-ON* */
633 
635 clib_error_t *
637 {
640  clib_error_t *error = 0;
642 
643  u32 queue_size = CRYPTO_SW_SCHEDULER_QUEUE_SIZE * sizeof (void *)
644  + sizeof (crypto_sw_scheduler_queue_t);
645 
648 
649  vec_foreach (ptd, cm->per_thread_data)
650  {
651  ptd->self_crypto_enabled = 1;
652  u32 i;
653  for (i = 0; i < VNET_CRYPTO_ASYNC_OP_N_IDS; i++)
654  {
657  ASSERT (q != 0);
658  ptd->queues[i] = q;
659  clib_memset_u8 (q, 0, queue_size);
660  }
661  }
662 
663  cm->crypto_engine_index =
664  vnet_crypto_register_engine (vm, "sw_scheduler", 100,
665  "SW Scheduler Async Engine");
666 
669 
671 
672  /* *INDENT-OFF* */
673 #define _(n, s, k, t, a) \
674  vnet_crypto_register_async_handler ( \
675  vm, cm->crypto_engine_index, \
676  VNET_CRYPTO_OP_##n##_TAG##t##_AAD##a##_ENC, \
677  crypto_sw_scheduler_frame_enqueue, \
678  crypto_sw_scheduler_frame_dequeue_##n##_TAG_##t##_AAD_##a##_enc); \
679  vnet_crypto_register_async_handler ( \
680  vm, cm->crypto_engine_index, \
681  VNET_CRYPTO_OP_##n##_TAG##t##_AAD##a##_DEC, \
682  crypto_sw_scheduler_frame_enqueue, \
683  crypto_sw_scheduler_frame_dequeue_##n##_TAG_##t##_AAD_##a##_dec);
685 #undef _
686 
687 #define _(c, h, s, k, d) \
688  vnet_crypto_register_async_handler ( \
689  vm, cm->crypto_engine_index, VNET_CRYPTO_OP_##c##_##h##_TAG##d##_ENC, \
690  crypto_sw_scheduler_frame_enqueue, \
691  crypto_sw_scheduler_frame_dequeue_##c##_##h##_TAG##d##_enc); \
692  vnet_crypto_register_async_handler ( \
693  vm, cm->crypto_engine_index, VNET_CRYPTO_OP_##c##_##h##_TAG##d##_DEC, \
694  crypto_sw_scheduler_frame_enqueue, \
695  crypto_sw_scheduler_frame_dequeue_##c##_##h##_TAG##d##_dec);
697 #undef _
698  /* *INDENT-ON* */
699 
700  if (error)
702 
703  return error;
704 }
705 
706 /* *INDENT-OFF* */
708  .runs_after = VLIB_INITS ("vnet_crypto_init"),
709 };
710 
712  .version = VPP_BUILD_VER,
713  .description = "SW Scheduler Crypto Async Engine plugin",
714 };
715 /* *INDENT-ON* */
716 
717 /*
718  * fd.io coding-style-patch-verification: ON
719  *
720  * Local Variables:
721  * eval: (c-set-style "gnu")
722  * End:
723  */
u32 vnet_crypto_process_ops(vlib_main_t *vm, vnet_crypto_op_t ops[], u32 n_ops)
Definition: crypto.c:99
#define vec_validate(V, I)
Make sure vector is long enough for given index (no header, unspecified alignment) ...
Definition: vec.h:524
u32 flags
buffer flags: VLIB_BUFFER_FREE_LIST_INDEX_MASK: bits used to store free list index, VLIB_BUFFER_IS_TRACED: trace this buffer.
Definition: buffer.h:133
#define vec_foreach_index(var, v)
Iterate over vector indices.
#define clib_min(x, y)
Definition: clib.h:342
#define foreach_crypto_link_async_alg
Definition: crypto.h:96
VLIB_PLUGIN_REGISTER()
#define VNET_CRYPTO_KEY_TYPE_LINK
Definition: crypto.h:215
static clib_error_t * sw_scheduler_show_workers(vlib_main_t *vm, unformat_input_t *input, vlib_cli_command_t *cmd)
Definition: main.c:547
i16 current_data
signed offset in data[], pre_data[] that we are currently processing.
Definition: buffer.h:119
unsigned long u64
Definition: types.h:89
static int crypto_sw_scheduler_frame_enqueue(vlib_main_t *vm, vnet_crypto_async_frame_t *frame)
Definition: main.c:77
#define CLIB_MEMORY_STORE_BARRIER()
Definition: clib.h:140
static_always_inline void process_chained_ops(vlib_main_t *vm, vnet_crypto_async_frame_t *f, vnet_crypto_op_t *ops, vnet_crypto_op_chunk_t *chunks, u8 *state)
Definition: main.c:302
clib_error_t * crypto_sw_scheduler_init(vlib_main_t *vm)
Definition: main.c:636
vnet_feature_config_main_t * cm
#define VNET_CRYPTO_OP_FLAG_CHAINED_BUFFERS
Definition: crypto.h:264
static void crypto_sw_scheduler_key_handler(vlib_main_t *vm, vnet_crypto_key_op_t kop, vnet_crypto_key_index_t idx)
Definition: main.c:54
u32 thread_index
Definition: main.h:213
u16 current_length
Nbytes between current data and the end of this buffer.
Definition: buffer.h:122
vlib_main_t vlib_node_runtime_t vlib_frame_t * frame
Definition: nat44_ei.c:3048
static_always_inline void crypto_sw_scheduler_convert_aead(vlib_main_t *vm, crypto_sw_scheduler_per_thread_data_t *ptd, vnet_crypto_async_frame_elt_t *fe, u32 index, u32 bi, vnet_crypto_op_id_t op_id, u16 aad_len, u8 tag_len)
Definition: main.c:193
#define vec_add2(V, P, N)
Add N elements to end of vector V, return pointer to new elements in P.
Definition: vec.h:645
uword unformat_user(unformat_input_t *input, unformat_function_t *func,...)
Definition: unformat.c:989
static_always_inline void cryptodev_sw_scheduler_sgl(vlib_main_t *vm, crypto_sw_scheduler_per_thread_data_t *ptd, vlib_buffer_t *b, vnet_crypto_op_t *op, i32 offset, i32 len)
Definition: main.c:141
string name[64]
Definition: fib.api:25
vnet_crypto_async_frame_state_t state
Definition: crypto.h:361
#define vec_validate_aligned(V, I, A)
Make sure vector is long enough for given index (no header, specified alignment)
Definition: vec.h:535
unsigned char u8
Definition: types.h:56
vlib_buffer_t ** b
#define vec_reset_length(v)
Reset vector length to zero NULL-pointer tolerant.
unsigned int u32
Definition: types.h:88
clib_error_t * sw_scheduler_cli_init(vlib_main_t *vm)
Definition: main.c:583
vlib_frame_t * f
static_always_inline void crypto_sw_scheduler_convert_link_crypto(vlib_main_t *vm, crypto_sw_scheduler_per_thread_data_t *ptd, vnet_crypto_key_t *key, vnet_crypto_async_frame_elt_t *fe, u32 index, u32 bi, vnet_crypto_op_id_t crypto_op_id, vnet_crypto_op_id_t integ_op_id, u32 digest_len, u8 is_enc)
Definition: main.c:228
static u32 vlib_get_worker_index(u32 thread_index)
Definition: threads.h:366
void vnet_crypto_register_key_handler(vlib_main_t *vm, u32 engine_index, vnet_crypto_key_handler_t *key_handler)
Definition: crypto.c:316
#define static_always_inline
Definition: clib.h:112
vnet_crypto_key_op_t
Definition: crypto.h:129
#define VLIB_INIT_FUNCTION(x)
Definition: init.h:172
static_always_inline vnet_crypto_async_frame_t * crypto_sw_scheduler_get_completed_frame(crypto_sw_scheduler_queue_t *q)
Definition: main.c:124
static_always_inline vnet_crypto_async_frame_t * crypto_sw_scheduler_dequeue_aead(vlib_main_t *vm, vnet_crypto_async_op_id_t async_op_id, vnet_crypto_op_id_t sync_op_id, u8 tag_len, u8 aad_len, u32 *nb_elts_processed, u32 *enqueue_thread_idx)
Definition: main.c:329
#define vec_elt_at_index(v, i)
Get vector value at index i checking that i is in bounds.
#define clib_error_return(e, args...)
Definition: error.h:99
int __clib_unused rv
Definition: application.c:491
vnet_crypto_op_id_t op
Definition: crypto.h:259
unformat_function_t unformat_line_input
Definition: format.h:275
Definition: cJSON.c:88
vlib_worker_thread_t * vlib_worker_threads
Definition: threads.c:35
uword user_data
Definition: crypto.h:258
crypto_sw_scheduler_queue_t * queues[VNET_CRYPTO_ASYNC_OP_N_IDS]
int crypto_sw_scheduler_set_worker_crypto(u32 worker_idx, u8 enabled)
Definition: main.c:23
struct _unformat_input_t unformat_input_t
unsigned short u16
Definition: types.h:57
#define CRYPTO_SW_SCHEDULER_QUEUE_SIZE
clib_error_t * crypto_sw_scheduler_api_init(vlib_main_t *vm)
Definition: api.c:63
vlib_main_t * vm
X-connect all packets from the HOST to the PHY.
Definition: nat44_ei.c:3047
vlib_thread_main_t vlib_thread_main
Definition: threads.c:36
#define VNET_CRYPTO_OP_FLAG_HMAC_CHECK
Definition: crypto.h:263
u8 len
Definition: ip_types.api:103
u32 buffer_indices[VNET_CRYPTO_FRAME_SIZE]
Definition: crypto.h:365
#define UNFORMAT_END_OF_INPUT
Definition: format.h:137
#define VNET_CRYPTO_OP_FLAG_INIT_IV
Definition: crypto.h:262
#define CLIB_PREFETCH(addr, size, type)
Definition: cache.h:80
static_always_inline vnet_crypto_async_frame_t * crypto_sw_scheduler_dequeue_link(vlib_main_t *vm, vnet_crypto_async_op_id_t async_op_id, vnet_crypto_op_id_t sync_crypto_op_id, vnet_crypto_op_id_t sync_integ_op_id, u16 digest_len, u8 is_enc, u32 *nb_elts_processed, u32 *enqueue_thread_idx)
Definition: main.c:393
sll srl srl sll sra u16x4 i
Definition: vector_sse42.h:261
#define vec_free(V)
Free vector&#39;s memory (no header).
Definition: vec.h:395
u32 index
Definition: flow_types.api:221
u8 data[]
Packet data.
Definition: buffer.h:204
#define CRYPTO_SW_SCHEDULER_QUEUE_MASK
static u32 vlib_buffer_space_left_at_end(vlib_main_t *vm, vlib_buffer_t *b)
#define VLIB_CLI_COMMAND(x,...)
Definition: cli.h:163
signed int i32
Definition: types.h:77
#define ASSERT(truth)
void vlib_cli_output(vlib_main_t *vm, char *fmt,...)
Definition: cli.c:716
vnet_crypto_async_frame_t * jobs[0]
u32 vnet_crypto_process_chained_ops(vlib_main_t *vm, vnet_crypto_op_t ops[], vnet_crypto_op_chunk_t *chunks, u32 n_ops)
Definition: crypto.c:105
u8 flags
share same VNET_CRYPTO_OP_FLAG_* values
Definition: crypto.h:342
u32 vnet_crypto_key_index_t
Definition: crypto.h:378
crypto_sw_scheduler_per_thread_data_t * per_thread_data
#define foreach_crypto_aead_async_alg
async crypto
Definition: crypto.h:85
static_always_inline void process_ops(vlib_main_t *vm, vnet_crypto_async_frame_t *f, vnet_crypto_op_t *ops, u8 *state)
Definition: main.c:276
vnet_crypto_async_op_id_t
Definition: crypto.h:182
static_always_inline void clib_memset_u8(void *p, u8 val, uword count)
Definition: string.h:441
typedef key
Definition: ipsec_types.api:88
template key/value backing page structure
Definition: bihash_doc.h:44
vnet_crypto_async_op_id_t op
Definition: crypto.h:362
#define clib_atomic_bool_cmp_and_swap(addr, old, new)
Definition: atomics.h:38
static_always_inline vnet_crypto_key_t * vnet_crypto_get_key(vnet_crypto_key_index_t index)
Definition: crypto.h:548
#define vec_len(v)
Number of elements in vector (rvalue-only, NULL tolerant)
u32 next_buffer
Next buffer for this linked-list of buffers.
Definition: buffer.h:149
VLIB buffer representation.
Definition: buffer.h:111
foreach_crypto_link_async_alg crypto_sw_scheduler_main_t crypto_sw_scheduler_main
Definition: main.c:634
struct clib_bihash_value offset
template key/value backing page structure
vnet_crypto_op_status_t status
Definition: crypto.h:260
static void * clib_mem_alloc_aligned(uword size, uword align)
Definition: mem.h:261
vnet_crypto_op_id_t
Definition: crypto.h:219
u8 * format_unformat_error(u8 *s, va_list *va)
Definition: unformat.c:91
static vlib_thread_main_t * vlib_get_thread_main()
Definition: global_funcs.h:56
vl_api_dhcp_client_state_t state
Definition: dhcp.api:201
static u32 vlib_num_workers()
Definition: threads.h:354
#define vec_foreach(var, vec)
Vector iterator.
u8 count
Definition: dhcp.api:208
#define CLIB_CACHE_LINE_BYTES
Definition: cache.h:59
u32 vnet_crypto_register_engine(vlib_main_t *vm, char *name, int prio, char *desc)
Definition: crypto.c:112
static clib_error_t * sw_scheduler_set_worker_crypto(vlib_main_t *vm, unformat_input_t *input, vlib_cli_command_t *cmd)
Definition: main.c:481
#define VLIB_INITS(...)
Definition: init.h:352
static vlib_buffer_t * vlib_get_buffer(vlib_main_t *vm, u32 buffer_index)
Translate buffer index into buffer pointer.
Definition: buffer_funcs.h:111
static_always_inline vnet_crypto_async_frame_t * crypto_sw_scheduler_get_pending_frame(crypto_sw_scheduler_queue_t *q)
Definition: main.c:101
uword unformat(unformat_input_t *i, const char *fmt,...)
Definition: unformat.c:978
vnet_crypto_op_status_t status
Definition: crypto.h:341
vnet_crypto_async_frame_elt_t elts[VNET_CRYPTO_FRAME_SIZE]
Definition: crypto.h:364
static uword unformat_check_input(unformat_input_t *i)
Definition: format.h:163
static u32 vlib_get_worker_thread_index(u32 worker_index)
Definition: threads.h:360