FD.io VPP  v21.06
Vector Packet Processing
cryptodev.c
Go to the documentation of this file.
1 /*
2  *------------------------------------------------------------------
3  * Copyright (c) 2020 Intel and/or its affiliates.
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at:
7  *
8  * http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  *------------------------------------------------------------------
16  */
17 
18 #include <vlib/vlib.h>
19 #include <vnet/plugin/plugin.h>
20 #include <vnet/crypto/crypto.h>
21 #include <vnet/ipsec/ipsec.h>
22 #include <vpp/app/version.h>
23 
24 #include <dpdk/buffer.h>
25 #include <dpdk/device/dpdk.h>
26 #include <dpdk/device/dpdk_priv.h>
27 #undef always_inline
28 #include <rte_bus_vdev.h>
29 #include <rte_cryptodev.h>
30 #include <rte_crypto_sym.h>
31 #include <rte_crypto.h>
32 #include <rte_cryptodev_pmd.h>
33 #include <rte_config.h>
34 
35 #include "cryptodev.h"
36 
37 #if CLIB_DEBUG > 0
38 #define always_inline static inline
39 #else
40 #define always_inline static inline __attribute__ ((__always_inline__))
41 #endif
42 
44 
46 prepare_aead_xform (struct rte_crypto_sym_xform *xform,
48  u32 aad_len)
49 {
50  struct rte_crypto_aead_xform *aead_xform = &xform->aead;
51  memset (xform, 0, sizeof (*xform));
52  xform->type = RTE_CRYPTO_SYM_XFORM_AEAD;
53  xform->next = 0;
54 
55  if (key->alg != VNET_CRYPTO_ALG_AES_128_GCM &&
56  key->alg != VNET_CRYPTO_ALG_AES_192_GCM &&
57  key->alg != VNET_CRYPTO_ALG_AES_256_GCM)
58  return -1;
59 
60  aead_xform->algo = RTE_CRYPTO_AEAD_AES_GCM;
61  aead_xform->op = (op_type == CRYPTODEV_OP_TYPE_ENCRYPT) ?
62  RTE_CRYPTO_AEAD_OP_ENCRYPT : RTE_CRYPTO_AEAD_OP_DECRYPT;
63  aead_xform->aad_length = aad_len;
64  aead_xform->digest_length = 16;
65  aead_xform->iv.offset = CRYPTODEV_IV_OFFSET;
66  aead_xform->iv.length = 12;
67  aead_xform->key.data = key->data;
68  aead_xform->key.length = vec_len (key->data);
69 
70  return 0;
71 }
72 
74 prepare_linked_xform (struct rte_crypto_sym_xform *xforms,
75  cryptodev_op_type_t op_type,
76  const vnet_crypto_key_t *key)
77 {
78  struct rte_crypto_sym_xform *xform_cipher, *xform_auth;
79  vnet_crypto_key_t *key_cipher, *key_auth;
80  enum rte_crypto_cipher_algorithm cipher_algo = ~0;
81  enum rte_crypto_auth_algorithm auth_algo = ~0;
82  u32 digest_len = ~0;
83 
84  key_cipher = vnet_crypto_get_key (key->index_crypto);
85  key_auth = vnet_crypto_get_key (key->index_integ);
86  if (!key_cipher || !key_auth)
87  return -1;
88 
89  if (op_type == CRYPTODEV_OP_TYPE_ENCRYPT)
90  {
91  xform_cipher = xforms;
92  xform_auth = xforms + 1;
93  xform_cipher->cipher.op = RTE_CRYPTO_CIPHER_OP_ENCRYPT;
94  xform_auth->auth.op = RTE_CRYPTO_AUTH_OP_GENERATE;
95  }
96  else
97  {
98  xform_cipher = xforms + 1;
99  xform_auth = xforms;
100  xform_cipher->cipher.op = RTE_CRYPTO_CIPHER_OP_DECRYPT;
101  xform_auth->auth.op = RTE_CRYPTO_AUTH_OP_VERIFY;
102  }
103 
104  xform_cipher->type = RTE_CRYPTO_SYM_XFORM_CIPHER;
105  xform_auth->type = RTE_CRYPTO_SYM_XFORM_AUTH;
106  xforms->next = xforms + 1;
107 
108  switch (key->async_alg)
109  {
110 #define _(a, b, c, d, e) \
111  case VNET_CRYPTO_ALG_##a##_##d##_TAG##e: \
112  cipher_algo = RTE_CRYPTO_CIPHER_##b; \
113  auth_algo = RTE_CRYPTO_AUTH_##d##_HMAC; \
114  digest_len = e; \
115  break;
116 
118 #undef _
119  default:
120  return -1;
121  }
122 
123  xform_cipher->cipher.algo = cipher_algo;
124  xform_cipher->cipher.key.data = key_cipher->data;
125  xform_cipher->cipher.key.length = vec_len (key_cipher->data);
126  xform_cipher->cipher.iv.length = 16;
127  xform_cipher->cipher.iv.offset = CRYPTODEV_IV_OFFSET;
128 
129  xform_auth->auth.algo = auth_algo;
130  xform_auth->auth.digest_length = digest_len;
131  xform_auth->auth.key.data = key_auth->data;
132  xform_auth->auth.key.length = vec_len (key_auth->data);
133 
134  return 0;
135 }
136 
138 cryptodev_session_del (struct rte_cryptodev_sym_session *sess)
139 {
140  u32 n_devs, i;
141 
142  if (sess == NULL)
143  return;
144 
145  n_devs = rte_cryptodev_count ();
146 
147  for (i = 0; i < n_devs; i++)
148  rte_cryptodev_sym_session_clear (i, sess);
149 
150  rte_cryptodev_sym_session_free (sess);
151 }
152 
153 static int
154 check_cipher_support (enum rte_crypto_cipher_algorithm algo, u32 key_size)
155 {
158  u32 *s;
159 
160  vec_foreach (vcap, cmt->supported_caps)
161  {
162  if (vcap->xform_type != RTE_CRYPTO_SYM_XFORM_CIPHER)
163  continue;
164  if (vcap->cipher.algo != algo)
165  continue;
166  vec_foreach (s, vcap->cipher.key_sizes)
167  if (*s == key_size)
168  return 1;
169  }
170 
171  return 0;
172 }
173 
174 static int
175 check_auth_support (enum rte_crypto_auth_algorithm algo, u32 digest_size)
176 {
179  u32 *s;
180 
181  vec_foreach (vcap, cmt->supported_caps)
182  {
183  if (vcap->xform_type != RTE_CRYPTO_SYM_XFORM_AUTH)
184  continue;
185  if (vcap->auth.algo != algo)
186  continue;
187  vec_foreach (s, vcap->auth.digest_sizes)
188  if (*s == digest_size)
189  return 1;
190  }
191 
192  return 0;
193 }
194 
196 check_aead_support (enum rte_crypto_aead_algorithm algo, u32 key_size,
197  u32 digest_size, u32 aad_size)
198 {
201  u32 *s;
202  u32 key_match = 0, digest_match = 0, aad_match = 0;
203 
204  vec_foreach (vcap, cmt->supported_caps)
205  {
206  if (vcap->xform_type != RTE_CRYPTO_SYM_XFORM_AEAD)
207  continue;
208  if (vcap->aead.algo != algo)
209  continue;
210  vec_foreach (s, vcap->aead.digest_sizes)
211  if (*s == digest_size)
212  {
213  digest_match = 1;
214  break;
215  }
216  vec_foreach (s, vcap->aead.key_sizes)
217  if (*s == key_size)
218  {
219  key_match = 1;
220  break;
221  }
222  vec_foreach (s, vcap->aead.aad_sizes)
223  if (*s == aad_size)
224  {
225  aad_match = 1;
226  break;
227  }
228  }
229 
230  if (key_match == 1 && digest_match == 1 && aad_match == 1)
231  return 1;
232 
233  return 0;
234 }
235 
238 {
239  u32 matched = 0;
240 
241  if (key->type == VNET_CRYPTO_KEY_TYPE_LINK)
242  {
243  switch (key->async_alg)
244  {
245 #define _(a, b, c, d, e) \
246  case VNET_CRYPTO_ALG_##a##_##d##_TAG##e: \
247  if (check_cipher_support (RTE_CRYPTO_CIPHER_##b, c) && \
248  check_auth_support (RTE_CRYPTO_AUTH_##d##_HMAC, e)) \
249  return 1;
251 #undef _
252  default : return 0;
253  }
254  return 0;
255  }
256 
257 #define _(a, b, c, d, e, f, g) \
258  if (key->alg == VNET_CRYPTO_ALG_##a) \
259  { \
260  if (check_aead_support (RTE_CRYPTO_AEAD_##c, g, e, f)) \
261  matched++; \
262  }
264 #undef _
265 
266  if (matched < 2) return 0;
267 
268  return 1;
269 }
270 
271 void
273  vnet_crypto_key_index_t idx, u32 aad_len)
274 {
277  cryptodev_key_t *ckey = 0;
278  u32 i;
279 
280  vec_validate (cmt->keys, idx);
281  ckey = vec_elt_at_index (cmt->keys, idx);
282 
284  {
285  if (idx >= vec_len (cmt->keys))
286  return;
287 
289  {
290  if (!ckey->keys)
291  continue;
292  if (!ckey->keys[i])
293  continue;
294  if (ckey->keys[i][CRYPTODEV_OP_TYPE_ENCRYPT])
295  {
296  cryptodev_session_del (ckey->keys[i][CRYPTODEV_OP_TYPE_ENCRYPT]);
298 
300  ckey->keys[i][CRYPTODEV_OP_TYPE_ENCRYPT] = 0;
301  ckey->keys[i][CRYPTODEV_OP_TYPE_DECRYPT] = 0;
302  }
303  }
304  return;
305  }
306 
307  /* create key */
308 
309  /* do not create session for unsupported alg */
310  if (cryptodev_check_supported_vnet_alg (key) == 0)
311  return;
312 
313  vec_validate (ckey->keys, idx);
314  vec_foreach_index (i, ckey->keys)
315  vec_validate (ckey->keys[i], CRYPTODEV_N_OP_TYPES - 1);
316 }
317 
318 /*static*/ void
321 {
322  cryptodev_sess_handler (vm, kop, idx, 8);
323 }
324 
325 int
327  u32 aad_len)
328 {
330  cryptodev_numa_data_t *numa_data;
331  cryptodev_inst_t *dev_inst;
333  struct rte_mempool *sess_pool, *sess_priv_pool;
334  cryptodev_key_t *ckey = vec_elt_at_index (cmt->keys, idx);
335  struct rte_crypto_sym_xform xforms_enc[2] = { { 0 } };
336  struct rte_crypto_sym_xform xforms_dec[2] = { { 0 } };
337  struct rte_cryptodev_sym_session *sessions[CRYPTODEV_N_OP_TYPES] = { 0 };
338  u32 numa_node = vm->numa_node;
339  int ret;
340 
341  numa_data = vec_elt_at_index (cmt->per_numa_data, numa_node);
342  sess_pool = numa_data->sess_pool;
343  sess_priv_pool = numa_data->sess_priv_pool;
344 
345  sessions[CRYPTODEV_OP_TYPE_ENCRYPT] =
346  rte_cryptodev_sym_session_create (sess_pool);
347  if (!sessions[CRYPTODEV_OP_TYPE_ENCRYPT])
348  {
349  ret = -1;
350  goto clear_key;
351  }
352 
353  sessions[CRYPTODEV_OP_TYPE_DECRYPT] =
354  rte_cryptodev_sym_session_create (sess_pool);
355  if (!sessions[CRYPTODEV_OP_TYPE_DECRYPT])
356  {
357  ret = -1;
358  goto clear_key;
359  }
360 
361  if (key->type == VNET_CRYPTO_KEY_TYPE_LINK)
362  ret = prepare_linked_xform (xforms_enc, CRYPTODEV_OP_TYPE_ENCRYPT, key);
363  else
364  ret =
365  prepare_aead_xform (xforms_enc, CRYPTODEV_OP_TYPE_ENCRYPT, key, aad_len);
366  if (ret)
367  return 0;
368 
369  if (key->type == VNET_CRYPTO_KEY_TYPE_LINK)
370  prepare_linked_xform (xforms_dec, CRYPTODEV_OP_TYPE_DECRYPT, key);
371  else
372  prepare_aead_xform (xforms_dec, CRYPTODEV_OP_TYPE_DECRYPT, key, aad_len);
373 
374  vec_foreach (dev_inst, cmt->cryptodev_inst)
375  {
376  u32 dev_id = dev_inst->dev_id;
377  struct rte_cryptodev *cdev = rte_cryptodev_pmd_get_dev (dev_id);
378  u32 driver_id = cdev->driver_id;
379 
380  /* if the session is already configured for the driver type, avoid
381  configuring it again to increase the session data's refcnt */
382  if (sessions[CRYPTODEV_OP_TYPE_ENCRYPT]->sess_data[driver_id].data &&
383  sessions[CRYPTODEV_OP_TYPE_DECRYPT]->sess_data[driver_id].data)
384  continue;
385 
386  ret = rte_cryptodev_sym_session_init (
387  dev_id, sessions[CRYPTODEV_OP_TYPE_ENCRYPT], xforms_enc,
388  sess_priv_pool);
389  ret = rte_cryptodev_sym_session_init (
390  dev_id, sessions[CRYPTODEV_OP_TYPE_DECRYPT], xforms_dec,
391  sess_priv_pool);
392  if (ret < 0)
393  return ret;
394  }
395 
396  sessions[CRYPTODEV_OP_TYPE_ENCRYPT]->opaque_data = aad_len;
397  sessions[CRYPTODEV_OP_TYPE_DECRYPT]->opaque_data = aad_len;
398 
400  ckey->keys[numa_node][CRYPTODEV_OP_TYPE_ENCRYPT] =
401  sessions[CRYPTODEV_OP_TYPE_ENCRYPT];
402  ckey->keys[numa_node][CRYPTODEV_OP_TYPE_DECRYPT] =
403  sessions[CRYPTODEV_OP_TYPE_DECRYPT];
404 
405 clear_key:
406  if (ret != 0)
407  {
408  cryptodev_session_del (sessions[CRYPTODEV_OP_TYPE_ENCRYPT]);
409  cryptodev_session_del (sessions[CRYPTODEV_OP_TYPE_DECRYPT]);
410  }
411  return ret;
412 }
413 
414 typedef enum
415 {
419 
420 /**
421  * assign a cryptodev resource to a worker.
422  * @param cet: the worker thread data
423  * @param cryptodev_inst_index: if op is "ASSIGN_AUTO" this param is ignored.
424  * @param op: the assignment method.
425  * @return: 0 if successfully, negative number otherwise.
426  **/
429  u32 cryptodev_inst_index,
431 {
433  cryptodev_inst_t *cinst = 0;
434  uword idx;
435 
436  /* assign resource is only allowed when no inflight op is in the queue */
437  if (cet->inflight)
438  return -EBUSY;
439 
440  switch (op)
441  {
444  vec_len (cmt->cryptodev_inst))
445  return -1;
446 
447  clib_spinlock_lock (&cmt->tlock);
449  clib_bitmap_set (cmt->active_cdev_inst_mask, idx, 1);
450  cinst = vec_elt_at_index (cmt->cryptodev_inst, idx);
451  cet->cryptodev_id = cinst->dev_id;
452  cet->cryptodev_q = cinst->q_id;
453  clib_spinlock_unlock (&cmt->tlock);
454  break;
456  /* assigning a used cryptodev resource is not allowed */
457  if (clib_bitmap_get (cmt->active_cdev_inst_mask, cryptodev_inst_index)
458  == 1)
459  return -EBUSY;
460 
462  {
463  cinst = cmt->cryptodev_inst + idx;
464  if (cinst->dev_id == cet->cryptodev_id &&
465  cinst->q_id == cet->cryptodev_q)
466  break;
467  }
468  /* invalid existing worker resource assignment */
469  if (idx == vec_len (cmt->cryptodev_inst))
470  return -EINVAL;
471  clib_spinlock_lock (&cmt->tlock);
474  cryptodev_inst_index, 1);
475  cinst = cmt->cryptodev_inst + cryptodev_inst_index;
476  cet->cryptodev_id = cinst->dev_id;
477  cet->cryptodev_q = cinst->q_id;
478  clib_spinlock_unlock (&cmt->tlock);
479  break;
480  default:
481  return -EINVAL;
482  }
483  return 0;
484 }
485 
486 static u8 *
487 format_cryptodev_inst (u8 * s, va_list * args)
488 {
490  u32 inst = va_arg (*args, u32);
491  cryptodev_inst_t *cit = cmt->cryptodev_inst + inst;
492  u32 thread_index = 0;
493  struct rte_cryptodev_info info;
494 
495  rte_cryptodev_info_get (cit->dev_id, &info);
496  s = format (s, "%-25s%-10u", info.device->name, cit->q_id);
497 
498  vec_foreach_index (thread_index, cmt->per_thread_data)
499  {
501  if (vlib_num_workers () > 0 && thread_index == 0)
502  continue;
503 
504  if (cet->cryptodev_id == cit->dev_id && cet->cryptodev_q == cit->q_id)
505  {
506  s = format (s, "%u (%v)\n", thread_index,
507  vlib_worker_threads[thread_index].name);
508  break;
509  }
510  }
511 
512  if (thread_index == vec_len (cmt->per_thread_data))
513  s = format (s, "%s\n", "free");
514 
515  return s;
516 }
517 
518 static clib_error_t *
520  vlib_cli_command_t * cmd)
521 {
523  u32 inst;
524 
525  vlib_cli_output (vm, "%-5s%-25s%-10s%s\n", "No.", "Name", "Queue-id",
526  "Assigned-to");
527  if (vec_len (cmt->cryptodev_inst) == 0)
528  {
529  vlib_cli_output (vm, "(nil)\n");
530  return 0;
531  }
532 
533  vec_foreach_index (inst, cmt->cryptodev_inst)
534  vlib_cli_output (vm, "%-5u%U", inst, format_cryptodev_inst, inst);
535 
536  if (cmt->is_raw_api)
537  vlib_cli_output (vm, "Cryptodev Data Path API used: RAW Data Path API");
538  else
539  vlib_cli_output (vm, "Cryptodev Data Path API used: crypto operation API");
540  return 0;
541 }
542 
543 VLIB_CLI_COMMAND (show_cryptodev_assignment, static) = {
544  .path = "show cryptodev assignment",
545  .short_help = "show cryptodev assignment",
546  .function = cryptodev_show_assignment_fn,
547 };
548 
549 static clib_error_t *
551  vlib_cli_command_t * cmd)
552 {
555  unformat_input_t _line_input, *line_input = &_line_input;
556  u32 thread_index, inst_index;
557  u32 thread_present = 0, inst_present = 0;
558  clib_error_t *error = 0;
559  int ret;
560 
561  /* Get a line of input. */
562  if (!unformat_user (input, unformat_line_input, line_input))
563  return 0;
564 
565  while (unformat_check_input (line_input) != UNFORMAT_END_OF_INPUT)
566  {
567  if (unformat (line_input, "thread %u", &thread_index))
568  thread_present = 1;
569  else if (unformat (line_input, "resource %u", &inst_index))
570  inst_present = 1;
571  else
572  {
573  error = clib_error_return (0, "unknown input `%U'",
574  format_unformat_error, line_input);
575  return error;
576  }
577  }
578 
579  if (!thread_present || !inst_present)
580  {
581  error = clib_error_return (0, "mandatory argument(s) missing");
582  return error;
583  }
584 
585  if (thread_index == 0 && vlib_num_workers () > 0)
586  {
587  error =
588  clib_error_return (0, "assign crypto resource for master thread");
589  return error;
590  }
591 
592  if (thread_index > vec_len (cmt->per_thread_data) ||
593  inst_index > vec_len (cmt->cryptodev_inst))
594  {
595  error = clib_error_return (0, "wrong thread id or resource id");
596  return error;
597  }
598 
599  cet = cmt->per_thread_data + thread_index;
600  ret = cryptodev_assign_resource (cet, inst_index,
602  if (ret)
603  {
604  error =
605  clib_error_return (0, "cryptodev_assign_resource returned %d", ret);
606  return error;
607  }
608 
609  return 0;
610 }
611 
612 VLIB_CLI_COMMAND (set_cryptodev_assignment, static) = {
613  .path = "set cryptodev assignment",
614  .short_help = "set cryptodev assignment thread <thread_index> "
615  "resource <inst_index>",
616  .function = cryptodev_set_assignment_fn,
617 };
618 
619 static u32
621 {
622  struct rte_cryptodev_info info;
623  u32 n_cryptodev = rte_cryptodev_count ();
624  u32 i, q_count = 0;
625 
626  for (i = 0; i < n_cryptodev; i++)
627  {
628  rte_cryptodev_info_get (i, &info);
629  q_count += info.max_nb_queue_pairs;
630  }
631 
632  return q_count;
633 }
634 
635 static int
637 {
638  struct rte_cryptodev_config cfg;
639  struct rte_cryptodev_info info;
641  u32 i;
642  int ret;
643 
644  rte_cryptodev_info_get (cryptodev_id, &info);
645 
646  if (!(info.feature_flags & RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO))
647  return -1;
648 
649  cfg.socket_id = info.device->numa_node;
650  cfg.nb_queue_pairs = info.max_nb_queue_pairs;
651 
652  rte_cryptodev_configure (cryptodev_id, &cfg);
653 
654  for (i = 0; i < info.max_nb_queue_pairs; i++)
655  {
656  struct rte_cryptodev_qp_conf qp_cfg;
657 
658  qp_cfg.mp_session = 0;
659  qp_cfg.mp_session_private = 0;
660  qp_cfg.nb_descriptors = CRYPTODEV_NB_CRYPTO_OPS;
661 
662  ret = rte_cryptodev_queue_pair_setup (cryptodev_id, i, &qp_cfg,
663  info.device->numa_node);
664  if (ret)
665  {
666  clib_warning ("Cryptodev: Configure device %u queue %u failed %d",
667  cryptodev_id, i, ret);
668  break;
669  }
670  }
671 
672  if (i != info.max_nb_queue_pairs)
673  return -1;
674 
675  /* start the device */
676  rte_cryptodev_start (cryptodev_id);
677 
678  for (i = 0; i < info.max_nb_queue_pairs; i++)
679  {
680  cryptodev_inst_t *cdev_inst;
681  vec_add2(cmt->cryptodev_inst, cdev_inst, 1);
682  cdev_inst->desc = vec_new (char, strlen (info.device->name) + 10);
683  cdev_inst->dev_id = cryptodev_id;
684  cdev_inst->q_id = i;
685 
686  snprintf (cdev_inst->desc, strlen (info.device->name) + 9,
687  "%s_q%u", info.device->name, i);
688  }
689 
690  return 0;
691 }
692 
693 static int
694 cryptodev_cmp (void *v1, void *v2)
695 {
696  cryptodev_inst_t *a1 = v1;
697  cryptodev_inst_t *a2 = v2;
698 
699  if (a1->q_id > a2->q_id)
700  return 1;
701  if (a1->q_id < a2->q_id)
702  return -1;
703  return 0;
704 }
705 
706 static int
707 cryptodev_supports_param_value (u32 *params, u32 param_value)
708 {
709  u32 *value;
710  vec_foreach (value, params)
711  {
712  if (*value == param_value)
713  return 1;
714  }
715  return 0;
716 }
717 
718 int
719 cryptodev_check_cap_support (struct rte_cryptodev_sym_capability_idx *idx,
720  u32 key_size, u32 digest_size, u32 aad_size)
721 {
724  vec_foreach (cap, cmt->supported_caps)
725  {
726 
727  if (cap->xform_type != idx->type)
728  continue;
729 
730  if (idx->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
731  cap->auth.algo == idx->algo.auth &&
732  cryptodev_supports_param_value (cap->auth.digest_sizes, digest_size))
733  return 1;
734 
735  if (idx->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
736  cap->cipher.algo == idx->algo.cipher &&
737  cryptodev_supports_param_value (cap->cipher.key_sizes, key_size))
738  return 1;
739 
740  if (idx->type == RTE_CRYPTO_SYM_XFORM_AEAD &&
741  cap->aead.algo == idx->algo.aead &&
742  cryptodev_supports_param_value (cap->aead.key_sizes, key_size) &&
743  cryptodev_supports_param_value (cap->aead.digest_sizes,
744  digest_size) &&
745  cryptodev_supports_param_value (cap->aead.aad_sizes, aad_size))
746  return 1;
747  }
748  return 0;
749 }
750 
751 static void
752 remove_unsupported_param_size (u32 **param_sizes, u32 param_size_min,
753  u32 param_size_max, u32 increment)
754 {
755  u32 i = 0;
756  u32 cap_param_size;
757 
758  while (i < vec_len (*param_sizes))
759  {
760  u32 found_param = 0;
761  for (cap_param_size = param_size_min; cap_param_size <= param_size_max;
762  cap_param_size += increment)
763  {
764  if ((*param_sizes)[i] == cap_param_size)
765  {
766  found_param = 1;
767  break;
768  }
769  if (increment == 0)
770  break;
771  }
772  if (!found_param)
773  /* no such param_size in cap so delete this size in temp_cap params */
774  vec_delete (*param_sizes, 1, i);
775  else
776  i++;
777  }
778 }
779 
780 static void
782 {
783  cryptodev_capability_t temp_cap = (*temp_caps)[temp_cap_id];
784 
785  switch (temp_cap.xform_type)
786  {
787  case RTE_CRYPTO_SYM_XFORM_AUTH:
788  vec_free (temp_cap.auth.digest_sizes);
789  break;
790  case RTE_CRYPTO_SYM_XFORM_CIPHER:
791  vec_free (temp_cap.cipher.key_sizes);
792  break;
793  case RTE_CRYPTO_SYM_XFORM_AEAD:
794  vec_free (temp_cap.aead.key_sizes);
795  vec_free (temp_cap.aead.aad_sizes);
796  vec_free (temp_cap.aead.digest_sizes);
797  break;
798  default:
799  break;
800  }
801  vec_delete (*temp_caps, 1, temp_cap_id);
802 }
803 
804 static u32
806  cryptodev_capability_t *temp_cap,
807  const struct rte_cryptodev_capabilities *dev_caps)
808 {
809  u32 cap_found = 0;
810  const struct rte_cryptodev_capabilities *cap = &dev_caps[0];
811 
812  while (cap->op != RTE_CRYPTO_OP_TYPE_UNDEFINED)
813  {
814  if (cap->sym.xform_type == temp_cap->xform_type)
815  switch (cap->sym.xform_type)
816  {
817  case RTE_CRYPTO_SYM_XFORM_CIPHER:
818  if (cap->sym.cipher.algo == temp_cap->cipher.algo)
819  {
821  &temp_cap->cipher.key_sizes, cap->sym.cipher.key_size.min,
822  cap->sym.cipher.key_size.max,
823  cap->sym.cipher.key_size.increment);
824  if (vec_len (temp_cap->cipher.key_sizes) > 0)
825  cap_found = 1;
826  }
827  break;
828  case RTE_CRYPTO_SYM_XFORM_AUTH:
829  if (cap->sym.auth.algo == temp_cap->auth.algo)
830  {
832  &temp_cap->auth.digest_sizes, cap->sym.auth.digest_size.min,
833  cap->sym.auth.digest_size.max,
834  cap->sym.auth.digest_size.increment);
835  if (vec_len (temp_cap->auth.digest_sizes) > 0)
836  cap_found = 1;
837  }
838  break;
839  case RTE_CRYPTO_SYM_XFORM_AEAD:
840  if (cap->sym.aead.algo == temp_cap->aead.algo)
841  {
843  &temp_cap->aead.key_sizes, cap->sym.aead.key_size.min,
844  cap->sym.aead.key_size.max,
845  cap->sym.aead.key_size.increment);
847  &temp_cap->aead.aad_sizes, cap->sym.aead.aad_size.min,
848  cap->sym.aead.aad_size.max,
849  cap->sym.aead.aad_size.increment);
851  &temp_cap->aead.digest_sizes, cap->sym.aead.digest_size.min,
852  cap->sym.aead.digest_size.max,
853  cap->sym.aead.digest_size.increment);
854  if (vec_len (temp_cap->aead.key_sizes) > 0 &&
855  vec_len (temp_cap->aead.aad_sizes) > 0 &&
856  vec_len (temp_cap->aead.digest_sizes) > 0)
857  cap_found = 1;
858  }
859  break;
860  default:
861  break;
862  }
863  if (cap_found)
864  break;
865  cap++;
866  }
867 
868  return cap_found;
869 }
870 
871 static void
873 {
875  cryptodev_inst_t *dev_inst;
876  struct rte_cryptodev_info dev_info;
877  u32 previous_dev_id, dev_id;
878  u32 cap_id = 0;
879  u32 param;
880  cryptodev_capability_t tmp_cap;
881  const struct rte_cryptodev_capabilities *cap;
882  const struct rte_cryptodev_capabilities *dev_caps;
883 
884  clib_memset (&tmp_cap, 0, sizeof (cryptodev_capability_t));
885  if (vec_len (cmt->cryptodev_inst) == 0)
886  return;
887  dev_inst = vec_elt_at_index (cmt->cryptodev_inst, 0);
888  rte_cryptodev_info_get (dev_inst->dev_id, &dev_info);
889  cap = &dev_info.capabilities[0];
890 
891  /*init capabilities vector*/
892  while (cap->op != RTE_CRYPTO_OP_TYPE_UNDEFINED)
893  {
894  if (cap->op != RTE_CRYPTO_OP_TYPE_SYMMETRIC)
895  {
896  cap++;
897  continue;
898  }
899 
900  tmp_cap.xform_type = cap->sym.xform_type;
901  switch (cap->sym.xform_type)
902  {
903  case RTE_CRYPTO_SYM_XFORM_CIPHER:
904  tmp_cap.cipher.key_sizes = 0;
905  tmp_cap.cipher.algo = cap->sym.cipher.algo;
906  for (param = cap->sym.cipher.key_size.min;
907  param <= cap->sym.cipher.key_size.max;
908  param += cap->sym.cipher.key_size.increment)
909  {
910  vec_add1 (tmp_cap.cipher.key_sizes, param);
911  if (cap->sym.cipher.key_size.increment == 0)
912  break;
913  }
914  break;
915  case RTE_CRYPTO_SYM_XFORM_AUTH:
916  tmp_cap.auth.algo = cap->sym.auth.algo;
917  tmp_cap.auth.digest_sizes = 0;
918  for (param = cap->sym.auth.digest_size.min;
919  param <= cap->sym.auth.digest_size.max;
920  param += cap->sym.auth.digest_size.increment)
921  {
922  vec_add1 (tmp_cap.auth.digest_sizes, param);
923  if (cap->sym.auth.digest_size.increment == 0)
924  break;
925  }
926  break;
927  case RTE_CRYPTO_SYM_XFORM_AEAD:
928  tmp_cap.aead.key_sizes = 0;
929  tmp_cap.aead.aad_sizes = 0;
930  tmp_cap.aead.digest_sizes = 0;
931  tmp_cap.aead.algo = cap->sym.aead.algo;
932  for (param = cap->sym.aead.key_size.min;
933  param <= cap->sym.aead.key_size.max;
934  param += cap->sym.aead.key_size.increment)
935  {
936  vec_add1 (tmp_cap.aead.key_sizes, param);
937  if (cap->sym.aead.key_size.increment == 0)
938  break;
939  }
940  for (param = cap->sym.aead.aad_size.min;
941  param <= cap->sym.aead.aad_size.max;
942  param += cap->sym.aead.aad_size.increment)
943  {
944  vec_add1 (tmp_cap.aead.aad_sizes, param);
945  if (cap->sym.aead.aad_size.increment == 0)
946  break;
947  }
948  for (param = cap->sym.aead.digest_size.min;
949  param <= cap->sym.aead.digest_size.max;
950  param += cap->sym.aead.digest_size.increment)
951  {
952  vec_add1 (tmp_cap.aead.digest_sizes, param);
953  if (cap->sym.aead.digest_size.increment == 0)
954  break;
955  }
956  break;
957  default:
958  break;
959  }
960 
961  vec_add1 (cmt->supported_caps, tmp_cap);
962  cap++;
963  }
964 
965  while (cap_id < vec_len (cmt->supported_caps))
966  {
967  u32 cap_is_supported = 1;
968  previous_dev_id = cmt->cryptodev_inst->dev_id;
969 
970  vec_foreach (dev_inst, cmt->cryptodev_inst)
971  {
972  dev_id = dev_inst->dev_id;
973  if (previous_dev_id != dev_id)
974  {
975  previous_dev_id = dev_id;
976  rte_cryptodev_info_get (dev_id, &dev_info);
977  dev_caps = &dev_info.capabilities[0];
978  cap_is_supported = cryptodev_remove_unsupported_param_sizes (
979  &cmt->supported_caps[cap_id], dev_caps);
980  if (!cap_is_supported)
981  {
982  cryptodev_delete_cap (&cmt->supported_caps, cap_id);
983  /*no need to check other devices as this one doesn't support
984  * this temp_cap*/
985  break;
986  }
987  }
988  }
989  if (cap_is_supported)
990  cap_id++;
991  }
992 }
993 
994 static int
996 {
998  u32 n_queues = cryptodev_count_queue (vm->numa_node);
999  u32 i;
1000 
1001  if (n_queues < n_workers)
1002  return -1;
1003 
1004  for (i = 0; i < rte_cryptodev_count (); i++)
1005  cryptodev_configure (vm, i);
1006 
1007  if (vec_len (cmt->cryptodev_inst) == 0)
1008  return -1;
1011 
1012  /* if there is not enough device stop cryptodev */
1013  if (vec_len (cmt->cryptodev_inst) < n_workers)
1014  return -1;
1015 
1016  return 0;
1017 }
1018 
1019 static void
1020 cryptodev_get_max_sz (u32 *max_sess_sz, u32 *max_dp_sz)
1021 {
1023  cryptodev_inst_t *cinst;
1024  u32 max_sess = 0, max_dp = 0;
1025 
1026  vec_foreach (cinst, cmt->cryptodev_inst)
1027  {
1028  u32 sess_sz = rte_cryptodev_sym_get_private_session_size (cinst->dev_id);
1029  u32 dp_sz = rte_cryptodev_get_raw_dp_ctx_size (cinst->dev_id);
1030 
1031  max_sess = clib_max (sess_sz, max_sess);
1032  max_dp = clib_max (dp_sz, max_dp);
1033  }
1034 
1035  *max_sess_sz = max_sess;
1036  *max_dp_sz = max_dp;
1037 }
1038 
1039 static void
1041 {
1044  u32 i;
1045 
1046  for (i = (vlib_num_workers () > 0); i < tm->n_vlib_mains; i++)
1047  {
1048  u32 numa = vlib_get_main_by_index (i)->numa_node;
1049  cryptodev_numa_data_t *numa_data;
1050 
1051  vec_validate (cmt->per_numa_data, numa);
1052  numa_data = cmt->per_numa_data + numa;
1053  if (numa_data->sess_pool)
1054  rte_mempool_free (numa_data->sess_pool);
1055  if (numa_data->sess_priv_pool)
1056  rte_mempool_free (numa_data->sess_priv_pool);
1057  }
1058 }
1059 
1060 clib_error_t *
1062 {
1066  cryptodev_numa_data_t *numa_data;
1067  struct rte_mempool *mp;
1068  u32 skip_master = vlib_num_workers () > 0;
1069  u32 n_workers = tm->n_vlib_mains - skip_master;
1070  u32 numa = vm->numa_node;
1071  u32 sess_sz, dp_sz;
1072  u32 eidx;
1073  u32 i;
1074  u8 *name = 0;
1076 
1077  cmt->iova_mode = rte_eal_iova_mode ();
1078 
1079  vec_validate (cmt->per_numa_data, vm->numa_node);
1080 
1081  /* probe all cryptodev devices and get queue info */
1082  if (cryptodev_probe (vm, n_workers) < 0)
1083  {
1084  error = clib_error_return (0, "Failed to configure cryptodev");
1085  goto err_handling;
1086  }
1087 
1088  cryptodev_get_max_sz (&sess_sz, &dp_sz);
1089 
1091  clib_spinlock_init (&cmt->tlock);
1092 
1095  for (i = skip_master; i < tm->n_vlib_mains; i++)
1096  {
1097  cet = cmt->per_thread_data + i;
1098  numa = vlib_get_main_by_index (i)->numa_node;
1099 
1100  vec_validate (cmt->per_numa_data, numa);
1101  numa_data = vec_elt_at_index (cmt->per_numa_data, numa);
1102 
1103  if (!numa_data->sess_pool)
1104  {
1105  /* create session pool for the numa node */
1106  name = format (0, "vcryptodev_sess_pool_%u%c", numa, 0);
1107  mp = rte_cryptodev_sym_session_pool_create (
1108  (char *) name, CRYPTODEV_NB_SESSION, 0, 0, 0, numa);
1109  if (!mp)
1110  {
1111  error =
1112  clib_error_return (0, "Not enough memory for mp %s", name);
1113  goto err_handling;
1114  }
1115  vec_free (name);
1116 
1117  numa_data->sess_pool = mp;
1118 
1119  /* create session private pool for the numa node */
1120  name = format (0, "cryptodev_sess_pool_%u%c", numa, 0);
1121  mp =
1122  rte_mempool_create ((char *) name, CRYPTODEV_NB_SESSION, sess_sz,
1123  0, 0, NULL, NULL, NULL, NULL, numa, 0);
1124  if (!mp)
1125  {
1126  error =
1127  clib_error_return (0, "Not enough memory for mp %s", name);
1128  vec_free (name);
1129  goto err_handling;
1130  }
1131 
1132  vec_free (name);
1133 
1134  numa_data->sess_priv_pool = mp;
1135  }
1136 
1138  }
1139 
1140  /* register handler */
1141  eidx = vnet_crypto_register_engine (vm, "dpdk_cryptodev", 100,
1142  "DPDK Cryptodev Engine");
1143 
1145 
1147  error = cryptodev_register_raw_hdl (vm, eidx);
1148  else
1149  error = cryptodev_register_cop_hdl (vm, eidx);
1150 
1151  if (error)
1152  goto err_handling;
1153 
1154  /* this engine is only enabled when cryptodev device(s) are presented in
1155  * startup.conf. Assume it is wanted to be used, turn on async mode here.
1156  */
1159 
1160  return 0;
1161 
1162 err_handling:
1164 
1165  return error;
1166 }
#define vec_validate(V, I)
Make sure vector is long enough for given index (no header, unspecified alignment) ...
Definition: vec.h:524
#define CRYPTODEV_IV_OFFSET
Definition: cryptodev.h:35
#define vec_foreach_index(var, v)
Iterate over vector indices.
enum rte_iova_mode iova_mode
Definition: cryptodev.h:168
static_always_inline int cryptodev_check_supported_vnet_alg(vnet_crypto_key_t *key)
Definition: cryptodev.c:237
static u32 cryptodev_remove_unsupported_param_sizes(cryptodev_capability_t *temp_cap, const struct rte_cryptodev_capabilities *dev_caps)
Definition: cryptodev.c:805
static_always_inline void clib_spinlock_unlock(clib_spinlock_t *p)
Definition: lock.h:121
static_always_inline void clib_spinlock_lock(clib_spinlock_t *p)
Definition: lock.h:82
static void remove_unsupported_param_size(u32 **param_sizes, u32 param_size_min, u32 param_size_max, u32 increment)
Definition: cryptodev.c:752
cryptodev_resource_assign_op_t
Definition: cryptodev.c:414
static void cryptodev_get_common_capabilities()
Definition: cryptodev.c:872
static int check_auth_support(enum rte_crypto_auth_algorithm algo, u32 digest_size)
Definition: cryptodev.c:175
static clib_error_t * cryptodev_set_assignment_fn(vlib_main_t *vm, unformat_input_t *input, vlib_cli_command_t *cmd)
Definition: cryptodev.c:550
#define VNET_CRYPTO_KEY_TYPE_LINK
Definition: crypto.h:215
u32 thread_index
#define CLIB_MEMORY_STORE_BARRIER()
Definition: clib.h:140
clib_memset(h->entries, 0, sizeof(h->entries[0]) *entries)
void cryptodev_key_handler(vlib_main_t *vm, vnet_crypto_key_op_t kop, vnet_crypto_key_index_t idx)
Definition: cryptodev.c:319
static u32 cryptodev_count_queue(u32 numa)
Definition: cryptodev.c:620
#define foreach_cryptodev_link_async_alg
crypto (alg, cryptodev_alg, key_size), hash (alg, digest-size)
Definition: cryptodev.h:51
void vnet_crypto_request_async_mode(int is_enable)
Definition: crypto.c:571
#define vec_add1(V, E)
Add 1 element to end of vector (unspecified alignment).
Definition: vec.h:607
enum rte_crypto_sym_xform_type xform_type
Definition: cryptodev.h:88
#define vec_add2(V, P, N)
Add N elements to end of vector V, return pointer to new elements in P.
Definition: vec.h:645
static uword * clib_bitmap_set(uword *ai, uword i, uword value)
Sets the ith bit of a bitmap to new_value Removes trailing zeros from the bitmap. ...
Definition: bitmap.h:167
uword unformat_user(unformat_input_t *input, unformat_function_t *func,...)
Definition: unformat.c:989
string name[64]
Definition: fib.api:25
u32 numa_node
Definition: main.h:215
#define vec_validate_aligned(V, I, A)
Make sure vector is long enough for given index (no header, specified alignment)
Definition: vec.h:535
clib_bitmap_t * active_cdev_inst_mask
Definition: cryptodev.h:170
unsigned char u8
Definition: types.h:56
u8 data[128]
Definition: ipsec_types.api:92
unsigned int u32
Definition: types.h:88
static uword clib_bitmap_set_no_check(uword *a, uword i, uword new_value)
Sets the ith bit of a bitmap to new_value.
Definition: bitmap.h:141
clib_error_t * cryptodev_register_cop_hdl(vlib_main_t *vm, u32 eidx)
static int cryptodev_supports_param_value(u32 *params, u32 param_value)
Definition: cryptodev.c:707
void vnet_crypto_register_key_handler(vlib_main_t *vm, u32 engine_index, vnet_crypto_key_handler_t *key_handler)
Definition: crypto.c:316
int cryptodev_session_create(vlib_main_t *vm, vnet_crypto_key_index_t idx, u32 aad_len)
Definition: cryptodev.c:326
#define static_always_inline
Definition: clib.h:112
#define CRYPTODEV_NB_SESSION
Definition: cryptodev.h:30
vnet_crypto_key_op_t
Definition: crypto.h:129
#define vec_new(T, N)
Create new vector of given type and length (unspecified alignment, no header).
Definition: vec.h:365
description fragment has unexpected format
Definition: map.api:433
struct rte_mempool * sess_pool
Definition: cryptodev.h:121
struct rte_cryptodev_sym_session *** keys
Definition: cryptodev.h:81
#define vec_elt_at_index(v, i)
Get vector value at index i checking that i is in bounds.
#define clib_error_return(e, args...)
Definition: error.h:99
static int check_cipher_support(enum rte_crypto_cipher_algorithm algo, u32 key_size)
Definition: cryptodev.c:154
static_always_inline int check_aead_support(enum rte_crypto_aead_algorithm algo, u32 key_size, u32 digest_size, u32 aad_size)
Definition: cryptodev.c:196
struct cryptodev_capability_t::@641::@643 auth
struct cryptodev_capability_t::@641::@645 aead
unformat_function_t unformat_line_input
Definition: format.h:275
static void clib_spinlock_init(clib_spinlock_t *p)
Definition: lock.h:65
Definition: cJSON.c:88
vlib_worker_thread_t * vlib_worker_threads
Definition: threads.c:35
cryptodev_engine_thread_t * per_thread_data
Definition: cryptodev.h:167
clib_error_t *__clib_weak cryptodev_register_raw_hdl(vlib_main_t *vm, u32 eidx)
clib_spinlock_t tlock
Definition: cryptodev.h:171
static void cryptodev_get_max_sz(u32 *max_sess_sz, u32 *max_dp_sz)
Definition: cryptodev.c:1020
static int cryptodev_configure(vlib_main_t *vm, u32 cryptodev_id)
Definition: cryptodev.c:636
struct _unformat_input_t unformat_input_t
static_always_inline int cryptodev_assign_resource(cryptodev_engine_thread_t *cet, u32 cryptodev_inst_index, cryptodev_resource_assign_op_t op)
assign a cryptodev resource to a worker.
Definition: cryptodev.c:428
static_always_inline int prepare_linked_xform(struct rte_crypto_sym_xform *xforms, cryptodev_op_type_t op_type, const vnet_crypto_key_t *key)
Definition: cryptodev.c:74
vlib_main_t * vm
X-connect all packets from the HOST to the PHY.
Definition: nat44_ei.c:3047
static void cryptodev_delete_cap(cryptodev_capability_t **temp_caps, u32 temp_cap_id)
Definition: cryptodev.c:781
vnet_crypto_alg_t alg
Definition: crypto.h:205
#define foreach_vnet_aead_crypto_conversion
Definition: cryptodev.h:40
clib_error_t * dpdk_cryptodev_init(vlib_main_t *vm)
Definition: cryptodev.c:1061
vnet_crypto_async_alg_t async_alg
Definition: crypto.h:211
int cryptodev_check_cap_support(struct rte_cryptodev_sym_capability_idx *idx, u32 key_size, u32 digest_size, u32 aad_size)
Definition: cryptodev.c:719
#define UNFORMAT_END_OF_INPUT
Definition: format.h:137
sll srl srl sll sra u16x4 i
Definition: vector_sse42.h:261
cryptodev_main_t cryptodev_main
Definition: cryptodev.c:43
#define vec_free(V)
Free vector&#39;s memory (no header).
Definition: vec.h:395
#define clib_warning(format, args...)
Definition: error.h:59
#define CRYPTODEV_NB_CRYPTO_OPS
Definition: cryptodev.h:24
static uword clib_bitmap_get(uword *ai, uword i)
Gets the ith bit value from a bitmap.
Definition: bitmap.h:197
cryptodev_numa_data_t * per_numa_data
Definition: cryptodev.h:165
cryptodev_inst_t * cryptodev_inst
Definition: cryptodev.h:169
#define clib_bitmap_vec_validate(v, i)
Definition: bitmap.h:112
static int cryptodev_probe(vlib_main_t *vm, u32 n_workers)
Definition: cryptodev.c:995
#define VLIB_CLI_COMMAND(x,...)
Definition: cli.h:163
u8 value
Definition: qos.api:54
void vlib_cli_output(vlib_main_t *vm, char *fmt,...)
Definition: cli.c:716
static_always_inline void cryptodev_session_del(struct rte_cryptodev_sym_session *sess)
Definition: cryptodev.c:138
#define vec_delete(V, N, M)
Delete N elements starting at element M.
Definition: vec.h:876
static u8 * format_cryptodev_inst(u8 *s, va_list *args)
Definition: cryptodev.c:487
static int cryptodev_cmp(void *v1, void *v2)
Definition: cryptodev.c:694
static clib_error_t * cryptodev_show_assignment_fn(vlib_main_t *vm, unformat_input_t *input, vlib_cli_command_t *cmd)
Definition: cryptodev.c:519
u32 vnet_crypto_key_index_t
Definition: crypto.h:378
struct rte_mempool * sess_priv_pool
Definition: cryptodev.h:122
#define clib_max(x, y)
Definition: clib.h:335
typedef key
Definition: ipsec_types.api:88
static uword clib_bitmap_count_set_bits(uword *ai)
Return the number of set bits in a bitmap.
Definition: bitmap.h:468
static vlib_main_t * vlib_get_main_by_index(u32 thread_index)
Definition: global_funcs.h:29
static_always_inline vnet_crypto_key_t * vnet_crypto_get_key(vnet_crypto_key_index_t index)
Definition: crypto.h:548
#define vec_len(v)
Number of elements in vector (rvalue-only, NULL tolerant)
cryptodev_capability_t * supported_caps
Definition: cryptodev.h:172
u64 uword
Definition: types.h:112
#define vec_sort_with_function(vec, f)
Sort a vector using the supplied element comparison function.
Definition: vec.h:1098
void cryptodev_sess_handler(vlib_main_t *vm, vnet_crypto_key_op_t kop, vnet_crypto_key_index_t idx, u32 aad_len)
Definition: cryptodev.c:272
cryptodev_op_type_t
Definition: cryptodev.h:71
static void dpdk_disable_cryptodev_engine(vlib_main_t *vm)
Definition: cryptodev.c:1040
u8 * format_unformat_error(u8 *s, va_list *va)
Definition: unformat.c:91
struct cryptodev_capability_t::@641::@644 cipher
static vlib_thread_main_t * vlib_get_thread_main()
Definition: global_funcs.h:56
static u32 vlib_num_workers()
Definition: threads.h:354
#define vec_foreach(var, vec)
Vector iterator.
void ipsec_set_async_mode(u32 is_enabled)
Definition: ipsec.c:327
static uword clib_bitmap_first_clear(uword *ai)
Return the lowest numbered clear bit in a bitmap.
Definition: bitmap.h:451
#define CLIB_CACHE_LINE_BYTES
Definition: cache.h:59
u32 vnet_crypto_register_engine(vlib_main_t *vm, char *name, int prio, char *desc)
Definition: crypto.c:112
static_always_inline int prepare_aead_xform(struct rte_crypto_sym_xform *xform, cryptodev_op_type_t op_type, const vnet_crypto_key_t *key, u32 aad_len)
Definition: cryptodev.c:46
uword unformat(unformat_input_t *i, const char *fmt,...)
Definition: unformat.c:978
static uword unformat_check_input(unformat_input_t *i)
Definition: format.h:163
cryptodev_key_t * keys
Definition: cryptodev.h:166