FD.io VPP  v20.09-64-g4f7b92f0a
Vector Packet Processing
cryptodev.c
Go to the documentation of this file.
1 /*
2  *------------------------------------------------------------------
3  * Copyright (c) 2020 Intel and/or its affiliates.
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at:
7  *
8  * http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  *------------------------------------------------------------------
16  */
17 
18 #include <vlib/vlib.h>
19 #include <vnet/plugin/plugin.h>
20 #include <vnet/crypto/crypto.h>
21 #include <vnet/vnet.h>
22 #include <vpp/app/version.h>
23 
24 #include <dpdk/buffer.h>
25 #include <dpdk/device/dpdk.h>
26 #include <dpdk/device/dpdk_priv.h>
27 #undef always_inline
28 #include <rte_bus_vdev.h>
29 #include <rte_cryptodev.h>
30 #include <rte_crypto_sym.h>
31 #include <rte_crypto.h>
32 #include <rte_cryptodev_pmd.h>
33 #include <rte_config.h>
34 
35 #if CLIB_DEBUG > 0
36 #define always_inline static inline
37 #else
38 #define always_inline static inline __attribute__ ((__always_inline__))
39 #endif
40 
41 #define CRYPTODEV_NB_CRYPTO_OPS 1024
42 #define CRYPTODEV_NB_SESSION 10240
43 #define CRYPTODEV_DEF_DRIVE crypto_aesni_mb
44 
45 #define CRYPTODEV_IV_OFFSET (offsetof (cryptodev_op_t, iv))
46 #define CRYPTODEV_AAD_OFFSET (offsetof (cryptodev_op_t, aad))
47 
48 /* VNET_CRYPTO_ALGO, TYPE, DPDK_CRYPTO_ALGO, IV_LEN, TAG_LEN, AAD_LEN */
49 #define foreach_vnet_aead_crypto_conversion \
50  _(AES_128_GCM, AEAD, AES_GCM, 12, 16, 8) \
51  _(AES_128_GCM, AEAD, AES_GCM, 12, 16, 12) \
52  _(AES_192_GCM, AEAD, AES_GCM, 12, 16, 8) \
53  _(AES_192_GCM, AEAD, AES_GCM, 12, 16, 12) \
54  _(AES_256_GCM, AEAD, AES_GCM, 12, 16, 8) \
55  _(AES_256_GCM, AEAD, AES_GCM, 12, 16, 12)
56 
57 /**
58  * crypto (alg, cryptodev_alg), hash (alg, digest-size)
59  **/
60 #define foreach_cryptodev_link_async_alg \
61  _ (AES_128_CBC, AES_CBC, SHA1, 12) \
62  _ (AES_192_CBC, AES_CBC, SHA1, 12) \
63  _ (AES_256_CBC, AES_CBC, SHA1, 12) \
64  _ (AES_128_CBC, AES_CBC, SHA224, 14) \
65  _ (AES_192_CBC, AES_CBC, SHA224, 14) \
66  _ (AES_256_CBC, AES_CBC, SHA224, 14) \
67  _ (AES_128_CBC, AES_CBC, SHA256, 16) \
68  _ (AES_192_CBC, AES_CBC, SHA256, 16) \
69  _ (AES_256_CBC, AES_CBC, SHA256, 16) \
70  _ (AES_128_CBC, AES_CBC, SHA384, 24) \
71  _ (AES_192_CBC, AES_CBC, SHA384, 24) \
72  _ (AES_256_CBC, AES_CBC, SHA384, 24) \
73  _ (AES_128_CBC, AES_CBC, SHA512, 32) \
74  _ (AES_192_CBC, AES_CBC, SHA512, 32) \
75  _ (AES_256_CBC, AES_CBC, SHA512, 32)
76 
77 #define foreach_vnet_crypto_status_conversion \
78  _(SUCCESS, COMPLETED) \
79  _(NOT_PROCESSED, WORK_IN_PROGRESS) \
80  _(AUTH_FAILED, FAIL_BAD_HMAC) \
81  _(INVALID_SESSION, FAIL_ENGINE_ERR) \
82  _(INVALID_ARGS, FAIL_ENGINE_ERR) \
83  _(ERROR, FAIL_ENGINE_ERR)
84 
86 #define _(a, b) VNET_CRYPTO_OP_STATUS_##b,
88 #undef _
89 };
90 
91 typedef struct
92 {
93  CLIB_CACHE_LINE_ALIGN_MARK (cacheline0);
94  struct rte_crypto_op op;
95  struct rte_crypto_sym_op sop;
96  u8 iv[16];
97  u8 aad[16];
101 
102 typedef enum
103 {
108 
109 typedef struct
110 {
111  struct rte_cryptodev_sym_session *keys[CRYPTODEV_N_OP_TYPES];
113 
114 typedef struct
115 {
118  char *desc;
120 
121 typedef struct
122 {
123  struct rte_mempool *cop_pool;
124  struct rte_mempool *sess_pool;
125  struct rte_mempool *sess_priv_pool;
127 
128 typedef struct
129 {
130  CLIB_CACHE_LINE_ALIGN_MARK (cacheline0);
135  struct rte_ring *ring;
137 
138 typedef struct
139 {
143  enum rte_iova_mode iova_mode;
148 
150 
151 static int
152 prepare_aead_xform (struct rte_crypto_sym_xform *xform,
153  cryptodev_op_type_t op_type,
154  const vnet_crypto_key_t * key, u32 aad_len)
155 {
156  struct rte_crypto_aead_xform *aead_xform = &xform->aead;
157  memset (xform, 0, sizeof (*xform));
158  xform->type = RTE_CRYPTO_SYM_XFORM_AEAD;
159  xform->next = 0;
160 
161  if (key->alg != VNET_CRYPTO_ALG_AES_128_GCM &&
162  key->alg != VNET_CRYPTO_ALG_AES_192_GCM &&
163  key->alg != VNET_CRYPTO_ALG_AES_256_GCM)
164  return -1;
165 
166  aead_xform->algo = RTE_CRYPTO_AEAD_AES_GCM;
167  aead_xform->op = (op_type == CRYPTODEV_OP_TYPE_ENCRYPT) ?
168  RTE_CRYPTO_AEAD_OP_ENCRYPT : RTE_CRYPTO_AEAD_OP_DECRYPT;
169  aead_xform->aad_length = aad_len;
170  aead_xform->digest_length = 16;
171  aead_xform->iv.offset = CRYPTODEV_IV_OFFSET;
172  aead_xform->iv.length = 12;
173  aead_xform->key.data = key->data;
174  aead_xform->key.length = vec_len (key->data);
175 
176  return 0;
177 }
178 
179 static int
180 prepare_linked_xform (struct rte_crypto_sym_xform *xforms,
181  cryptodev_op_type_t op_type,
182  const vnet_crypto_key_t * key)
183 {
184  struct rte_crypto_sym_xform *xform_cipher, *xform_auth;
185  vnet_crypto_key_t *key_cipher, *key_auth;
186  enum rte_crypto_cipher_algorithm cipher_algo = ~0;
187  enum rte_crypto_auth_algorithm auth_algo = ~0;
188  u32 digest_len = ~0;
189 
190  key_cipher = vnet_crypto_get_key (key->index_crypto);
191  key_auth = vnet_crypto_get_key (key->index_integ);
192  if (!key_cipher || !key_auth)
193  return -1;
194 
195  if (op_type == CRYPTODEV_OP_TYPE_ENCRYPT)
196  {
197  xform_cipher = xforms;
198  xform_auth = xforms + 1;
199  xform_cipher->cipher.op = RTE_CRYPTO_CIPHER_OP_ENCRYPT;
200  xform_auth->auth.op = RTE_CRYPTO_AUTH_OP_GENERATE;
201  }
202  else
203  {
204  xform_cipher = xforms + 1;
205  xform_auth = xforms;
206  xform_cipher->cipher.op = RTE_CRYPTO_CIPHER_OP_DECRYPT;
207  xform_auth->auth.op = RTE_CRYPTO_AUTH_OP_VERIFY;
208  }
209 
210  xform_cipher->type = RTE_CRYPTO_SYM_XFORM_CIPHER;
211  xform_auth->type = RTE_CRYPTO_SYM_XFORM_AUTH;
212  xforms->next = xforms + 1;
213 
214  switch (key->async_alg)
215  {
216 #define _(a, b, c, d) \
217  case VNET_CRYPTO_ALG_##a##_##c##_TAG##d:\
218  cipher_algo = RTE_CRYPTO_CIPHER_##b; \
219  auth_algo = RTE_CRYPTO_AUTH_##c##_HMAC; \
220  digest_len = d; \
221  break;
222 
224 #undef _
225  default:
226  return -1;
227  }
228 
229  xform_cipher->cipher.algo = cipher_algo;
230  xform_cipher->cipher.key.data = key_cipher->data;
231  xform_cipher->cipher.key.length = vec_len (key_cipher->data);
232  xform_cipher->cipher.iv.length = 16;
233  xform_cipher->cipher.iv.offset = CRYPTODEV_IV_OFFSET;
234 
235  xform_auth->auth.algo = auth_algo;
236  xform_auth->auth.digest_length = digest_len;
237  xform_auth->auth.key.data = key_auth->data;
238  xform_auth->auth.key.length = vec_len (key_auth->data);
239 
240  return 0;
241 }
242 
243 static int
245  struct rte_mempool *sess_priv_pool,
246  cryptodev_key_t * session_pair, u32 aad_len)
247 {
248  struct rte_crypto_sym_xform xforms_enc[2] = { {0} };
249  struct rte_crypto_sym_xform xforms_dec[2] = { {0} };
251  cryptodev_inst_t *dev_inst;
252  struct rte_cryptodev *cdev;
253  int ret;
254  uint8_t dev_id = 0;
255 
256  if (key->type == VNET_CRYPTO_KEY_TYPE_LINK)
257  ret = prepare_linked_xform (xforms_enc, CRYPTODEV_OP_TYPE_ENCRYPT, key);
258  else
259  ret = prepare_aead_xform (xforms_enc, CRYPTODEV_OP_TYPE_ENCRYPT, key,
260  aad_len);
261  if (ret)
262  return 0;
263 
264  if (key->type == VNET_CRYPTO_KEY_TYPE_LINK)
266  else
267  prepare_aead_xform (xforms_dec, CRYPTODEV_OP_TYPE_DECRYPT, key, aad_len);
268 
269  vec_foreach (dev_inst, cmt->cryptodev_inst)
270  {
271  dev_id = dev_inst->dev_id;
272  cdev = rte_cryptodev_pmd_get_dev (dev_id);
273 
274  /* if the session is already configured for the driver type, avoid
275  configuring it again to increase the session data's refcnt */
276  if (session_pair->keys[0]->sess_data[cdev->driver_id].data &&
277  session_pair->keys[1]->sess_data[cdev->driver_id].data)
278  continue;
279 
280  ret = rte_cryptodev_sym_session_init (dev_id, session_pair->keys[0],
281  xforms_enc, sess_priv_pool);
282  ret = rte_cryptodev_sym_session_init (dev_id, session_pair->keys[1],
283  xforms_dec, sess_priv_pool);
284  if (ret < 0)
285  return ret;
286  }
287  session_pair->keys[0]->opaque_data = aad_len;
288  session_pair->keys[1]->opaque_data = aad_len;
289 
290  return 0;
291 }
292 
293 static void
294 cryptodev_session_del (struct rte_cryptodev_sym_session *sess)
295 {
296  u32 n_devs, i;
297 
298  if (sess == NULL)
299  return;
300 
301  n_devs = rte_cryptodev_count ();
302 
303  for (i = 0; i < n_devs; i++)
304  rte_cryptodev_sym_session_clear (i, sess);
305 
306  rte_cryptodev_sym_session_free (sess);
307 }
308 
309 static int
311 {
312  vnet_crypto_alg_t alg;
313  if (key->type == VNET_CRYPTO_KEY_TYPE_LINK)
314  return 0;
315 
316  alg = key->alg;
317 
318 #define _(a, b, c, d, e, f) \
319  if (alg == VNET_CRYPTO_ALG_##a) \
320  return 0;
321 
323 #undef _
324  return -1;
325 }
326 
329  vnet_crypto_key_index_t idx, u32 aad_len)
330 {
332  cryptodev_numa_data_t *numa_data;
334  struct rte_mempool *sess_pool, *sess_priv_pool;
335  cryptodev_key_t *ckey = 0;
336  int ret = 0;
337 
338  if (kop == VNET_CRYPTO_KEY_OP_DEL)
339  {
340  if (idx >= vec_len (cmt->keys))
341  return;
342 
343  ckey = pool_elt_at_index (cmt->keys, idx);
344  cryptodev_session_del (ckey->keys[0]);
345  cryptodev_session_del (ckey->keys[1]);
346  ckey->keys[0] = 0;
347  ckey->keys[1] = 0;
348  pool_put (cmt->keys, ckey);
349  return;
350  }
351  else if (kop == VNET_CRYPTO_KEY_OP_MODIFY)
352  {
353  if (idx >= vec_len (cmt->keys))
354  return;
355 
356  ckey = pool_elt_at_index (cmt->keys, idx);
357 
358  cryptodev_session_del (ckey->keys[0]);
359  cryptodev_session_del (ckey->keys[1]);
360  ckey->keys[0] = 0;
361  ckey->keys[1] = 0;
362  }
363  else /* create key */
364  pool_get_zero (cmt->keys, ckey);
365 
366  /* do not create session for unsupported alg */
368  return;
369 
370  numa_data = vec_elt_at_index (cmt->per_numa_data, vm->numa_node);
371  sess_pool = numa_data->sess_pool;
372  sess_priv_pool = numa_data->sess_priv_pool;
373 
374  ckey->keys[0] = rte_cryptodev_sym_session_create (sess_pool);
375  if (!ckey->keys[0])
376  {
377  ret = -1;
378  goto clear_key;
379  }
380 
381  ckey->keys[1] = rte_cryptodev_sym_session_create (sess_pool);
382  if (!ckey->keys[1])
383  {
384  ret = -1;
385  goto clear_key;
386  }
387 
388  ret = cryptodev_session_create (key, sess_priv_pool, ckey, aad_len);
389 
390 clear_key:
391  if (ret != 0)
392  {
393  cryptodev_session_del (ckey->keys[0]);
394  cryptodev_session_del (ckey->keys[1]);
395  memset (ckey, 0, sizeof (*ckey));
396  pool_put (cmt->keys, ckey);
397  }
398 }
399 
400 /*static*/ void
403 {
404  cryptodev_sess_handler (vm, kop, idx, 8);
405 }
406 
410 {
411  u32 n_elts = f->n_elts, i;
412 
413  for (i = 0; i < n_elts; i++)
414  f->elts[i].status = s;
416 }
417 
418 static_always_inline rte_iova_t
419 cryptodev_get_iova (clib_pmalloc_main_t * pm, enum rte_iova_mode mode,
420  void *data)
421 {
422  u64 index;
423  if (mode == RTE_IOVA_VA)
424  return (rte_iova_t) pointer_to_uword (data);
425 
426  index = clib_pmalloc_get_page_index (pm, data);
427  return pointer_to_uword (data) - pm->lookup_table[index];
428 }
429 
432  vlib_buffer_t * b)
433 {
434  struct rte_mbuf *first_mb = mb, *last_mb = mb; /**< last mbuf */
435  /* when input node is not dpdk, mbuf data len is not initialized, for
436  * single buffer it is not a problem since the data length is written
437  * into cryptodev operation. For chained buffer a reference data length
438  * has to be computed through vlib_buffer.
439  *
440  * even when input node is dpdk, it is possible chained vlib_buffers
441  * are updated (either added or removed a buffer) but not not mbuf fields.
442  * we have to re-link every mbuf in the chain.
443  */
444  u16 data_len = b->current_length + (b->data + b->current_data -
445  rte_pktmbuf_mtod (mb, u8 *));
446 
447  first_mb->nb_segs = 1;
448  first_mb->pkt_len = first_mb->data_len = data_len;
449 
450  while (b->flags & VLIB_BUFFER_NEXT_PRESENT)
451  {
452  b = vlib_get_buffer (vm, b->next_buffer);
453  mb = rte_mbuf_from_vlib_buffer (b);
454  if (PREDICT_FALSE ((b->flags & VLIB_BUFFER_EXT_HDR_VALID) == 0))
455  rte_pktmbuf_reset (mb);
456  last_mb->next = mb;
457  last_mb = mb;
458  mb->data_len = b->current_length;
459  mb->pkt_len = b->current_length;
460  mb->data_off = VLIB_BUFFER_PRE_DATA_SIZE + b->current_data;
461  first_mb->nb_segs++;
462  if (PREDICT_FALSE (b->ref_count > 1))
463  mb->pool =
465  }
466 }
467 
471  cryptodev_op_type_t op_type)
472 {
475  cryptodev_numa_data_t *numa = cmt->per_numa_data + vm->numa_node;
478  cryptodev_op_t **cop;
479  u32 *bi;
480  u32 n_enqueue, n_elts;
482  u32 last_key_index;
483 
484  if (PREDICT_FALSE (frame == 0 || frame->n_elts == 0))
485  return -1;
486  n_elts = frame->n_elts;
487 
488  if (PREDICT_FALSE (CRYPTODEV_NB_CRYPTO_OPS - cet->inflight < n_elts))
489  {
491  VNET_CRYPTO_OP_STATUS_FAIL_ENGINE_ERR);
492  return -1;
493  }
494 
495  if (PREDICT_FALSE (rte_mempool_get_bulk (numa->cop_pool,
496  (void **) cet->cops, n_elts) < 0))
497  {
499  VNET_CRYPTO_OP_STATUS_FAIL_ENGINE_ERR);
500  return -1;
501  }
502 
503  cop = cet->cops;
504  fe = frame->elts;
505  bi = frame->buffer_indices;
506  cop[0]->frame = frame;
507  cop[0]->n_elts = n_elts;
508 
509  key = pool_elt_at_index (cmt->keys, fe->key_index);
510  last_key_index = fe->key_index;
511 
512  while (n_elts)
513  {
514  vlib_buffer_t *b = vlib_get_buffer (vm, bi[0]);
515  struct rte_crypto_sym_op *sop = &cop[0]->sop;
516  i16 crypto_offset = fe->crypto_start_offset;
517  i16 integ_offset = fe->integ_start_offset;
518  u32 offset_diff = crypto_offset - integ_offset;
519 
520  if (n_elts > 2)
521  {
522  CLIB_PREFETCH (cop[1], CLIB_CACHE_LINE_BYTES * 3, STORE);
523  CLIB_PREFETCH (cop[2], CLIB_CACHE_LINE_BYTES * 3, STORE);
524  CLIB_PREFETCH (&fe[1], CLIB_CACHE_LINE_BYTES, LOAD);
525  CLIB_PREFETCH (&fe[2], CLIB_CACHE_LINE_BYTES, LOAD);
526  }
527  if (last_key_index != fe->key_index)
528  {
529  key = pool_elt_at_index (cmt->keys, fe->key_index);
530  last_key_index = fe->key_index;
531  }
532 
533  sop->m_src = rte_mbuf_from_vlib_buffer (b);
534  sop->m_src->data_off = VLIB_BUFFER_PRE_DATA_SIZE;
535  sop->m_dst = 0;
536  /* mbuf prepend happens in the tx, but vlib_buffer happens in the nodes,
537  * so we have to manually adjust mbuf data_off here so cryptodev can
538  * correctly compute the data pointer. The prepend here will be later
539  * rewritten by tx. */
540  if (PREDICT_TRUE (fe->integ_start_offset < 0))
541  {
542  sop->m_src->data_off += fe->integ_start_offset;
543  integ_offset = 0;
544  crypto_offset = offset_diff;
545  }
546  sop->session = key->keys[op_type];
547  sop->cipher.data.offset = crypto_offset;
548  sop->cipher.data.length = fe->crypto_total_length;
549  sop->auth.data.offset = integ_offset;
550  sop->auth.data.length = fe->crypto_total_length + fe->integ_length_adj;
551  sop->auth.digest.data = fe->digest;
552  sop->auth.digest.phys_addr = cryptodev_get_iova (pm, cmt->iova_mode,
553  fe->digest);
555  cryptodev_validate_mbuf_chain (vm, sop->m_src, b);
556  else
557  /* for input nodes that are not dpdk-input, it is possible the mbuf
558  * was updated before as one of the chained mbufs. Setting nb_segs
559  * to 1 here to prevent the cryptodev PMD to access potentially
560  * invalid m_src->next pointers.
561  */
562  sop->m_src->nb_segs = 1;
563  clib_memcpy_fast (cop[0]->iv, fe->iv, 16);
564  cop++;
565  bi++;
566  fe++;
567  n_elts--;
568  }
569 
570  n_enqueue = rte_cryptodev_enqueue_burst (cet->cryptodev_id,
571  cet->cryptodev_q,
572  (struct rte_crypto_op **)
573  cet->cops, frame->n_elts);
574  ASSERT (n_enqueue == frame->n_elts);
575  cet->inflight += n_enqueue;
576 
577  return 0;
578 }
579 
583  cryptodev_op_type_t op_type, u8 aad_len)
584 {
587  cryptodev_numa_data_t *numa = cmt->per_numa_data + vm->numa_node;
590  cryptodev_op_t **cop;
591  u32 *bi;
592  u32 n_enqueue = 0, n_elts;
594  u32 last_key_index;
595  u8 sess_aad_len;
596 
597  if (PREDICT_FALSE (frame == 0 || frame->n_elts == 0))
598  return -1;
599  n_elts = frame->n_elts;
600 
601  if (PREDICT_FALSE (CRYPTODEV_NB_CRYPTO_OPS - cet->inflight < n_elts))
602  {
604  VNET_CRYPTO_OP_STATUS_FAIL_ENGINE_ERR);
605  return -1;
606  }
607 
608  if (PREDICT_FALSE (rte_mempool_get_bulk (numa->cop_pool,
609  (void **) cet->cops, n_elts) < 0))
610  {
612  VNET_CRYPTO_OP_STATUS_FAIL_ENGINE_ERR);
613  return -1;
614  }
615 
616  cop = cet->cops;
617  fe = frame->elts;
618  bi = frame->buffer_indices;
619  cop[0]->frame = frame;
620  cop[0]->n_elts = n_elts;
621 
622  key = pool_elt_at_index (cmt->keys, fe->key_index);
623  last_key_index = fe->key_index;
624  sess_aad_len = (u8) key->keys[op_type]->opaque_data;
625  if (PREDICT_FALSE (sess_aad_len != aad_len))
627  fe->key_index, aad_len);
628 
629  while (n_elts)
630  {
631  vlib_buffer_t *b = vlib_get_buffer (vm, bi[0]);
632  struct rte_crypto_sym_op *sop = &cop[0]->sop;
633  u16 crypto_offset = fe->crypto_start_offset;
634 
635  if (n_elts > 2)
636  {
637  CLIB_PREFETCH (cop[1], CLIB_CACHE_LINE_BYTES * 3, STORE);
638  CLIB_PREFETCH (cop[2], CLIB_CACHE_LINE_BYTES * 3, STORE);
639  CLIB_PREFETCH (&fe[1], CLIB_CACHE_LINE_BYTES, LOAD);
640  CLIB_PREFETCH (&fe[2], CLIB_CACHE_LINE_BYTES, LOAD);
641  }
642  if (last_key_index != fe->key_index)
643  {
644  key = pool_elt_at_index (cmt->keys, fe->key_index);
645  sess_aad_len = (u8) key->keys[op_type]->opaque_data;
646  if (PREDICT_FALSE (sess_aad_len != aad_len))
647  {
649  fe->key_index, aad_len);
650  }
651  last_key_index = fe->key_index;
652  }
653 
654  sop->m_src = rte_mbuf_from_vlib_buffer (b);
655  sop->m_dst = 0;
656  /* mbuf prepend happens in the tx, but vlib_buffer happens in the nodes,
657  * so we have to manually adjust mbuf data_off here so cryptodev can
658  * correctly compute the data pointer. The prepend here will be later
659  * rewritten by tx. */
660  if (PREDICT_FALSE (fe->crypto_start_offset < 0))
661  {
662  rte_pktmbuf_prepend (sop->m_src, -fe->crypto_start_offset);
663  crypto_offset = 0;
664  }
665 
666  sop->session = key->keys[op_type];
667  sop->aead.aad.data = cop[0]->aad;
668  sop->aead.aad.phys_addr = cop[0]->op.phys_addr + CRYPTODEV_AAD_OFFSET;
669  sop->aead.data.length = fe->crypto_total_length;
670  sop->aead.data.offset = crypto_offset;
671  sop->aead.digest.data = fe->tag;
672  sop->aead.digest.phys_addr = cryptodev_get_iova (pm, cmt->iova_mode,
673  fe->tag);
675  cryptodev_validate_mbuf_chain (vm, sop->m_src, b);
676  else
677  /* for input nodes that are not dpdk-input, it is possible the mbuf
678  * was updated before as one of the chained mbufs. Setting nb_segs
679  * to 1 here to prevent the cryptodev PMD to access potentially
680  * invalid m_src->next pointers.
681  */
682  sop->m_src->nb_segs = 1;
683  clib_memcpy_fast (cop[0]->iv, fe->iv, 12);
684  clib_memcpy_fast (cop[0]->aad, fe->aad, aad_len);
685  cop++;
686  bi++;
687  fe++;
688  n_elts--;
689  }
690 
691  n_enqueue = rte_cryptodev_enqueue_burst (cet->cryptodev_id,
692  cet->cryptodev_q,
693  (struct rte_crypto_op **)
694  cet->cops, frame->n_elts);
695  ASSERT (n_enqueue == frame->n_elts);
696  cet->inflight += n_enqueue;
697 
698  return 0;
699 }
700 
702 cryptodev_get_ring_head (struct rte_ring * ring)
703 {
704  cryptodev_op_t **r = (void *) &ring[1];
705  return r[ring->cons.head & ring->mask];
706 }
707 
709 cryptodev_frame_dequeue (vlib_main_t * vm, u32 * nb_elts_processed,
710  u32 * enqueue_thread_idx)
711 {
713  cryptodev_numa_data_t *numa = cmt->per_numa_data + vm->numa_node;
715  cryptodev_op_t *cop0, **cop = cet->cops;
718  u32 n_elts, n_completed_ops = rte_ring_count (cet->ring);
719  u32 ss0 = 0, ss1 = 0, ss2 = 0, ss3 = 0; /* sum of status */
720 
721  if (cet->inflight)
722  {
723  n_elts = clib_min (CRYPTODEV_NB_CRYPTO_OPS - n_completed_ops,
725  n_elts = rte_cryptodev_dequeue_burst
726  (cet->cryptodev_id, cet->cryptodev_q,
727  (struct rte_crypto_op **) cet->cops, n_elts);
728  cet->inflight -= n_elts;
729  n_completed_ops += n_elts;
730 
731  rte_ring_sp_enqueue_burst (cet->ring, (void *) cet->cops, n_elts, NULL);
732  }
733 
734  if (PREDICT_FALSE (n_completed_ops == 0))
735  return 0;
736 
737  cop0 = cryptodev_get_ring_head (cet->ring);
738  /* not a single frame is finished */
739  if (PREDICT_FALSE (cop0->n_elts > rte_ring_count (cet->ring)))
740  return 0;
741 
742  frame = cop0->frame;
743  n_elts = cop0->n_elts;
744  n_elts = rte_ring_sc_dequeue_bulk (cet->ring, (void **) cet->cops,
745  n_elts, 0);
746  fe = frame->elts;
747 
748  while (n_elts > 4)
749  {
750  ss0 |= fe[0].status = cryptodev_status_conversion[cop[0]->op.status];
751  ss1 |= fe[1].status = cryptodev_status_conversion[cop[1]->op.status];
752  ss2 |= fe[2].status = cryptodev_status_conversion[cop[2]->op.status];
753  ss3 |= fe[3].status = cryptodev_status_conversion[cop[3]->op.status];
754 
755  cop += 4;
756  fe += 4;
757  n_elts -= 4;
758  }
759 
760  while (n_elts)
761  {
762  ss0 |= fe[0].status = cryptodev_status_conversion[cop[0]->op.status];
763  fe++;
764  cop++;
765  n_elts--;
766  }
767 
768  frame->state = (ss0 | ss1 | ss2 | ss3) == VNET_CRYPTO_OP_STATUS_COMPLETED ?
770 
771  rte_mempool_put_bulk (numa->cop_pool, (void **) cet->cops, frame->n_elts);
772  *nb_elts_processed = frame->n_elts;
773  *enqueue_thread_idx = frame->enqueue_thread_index;
774  return frame;
775 }
776 
777 /* *INDENT-OFF* */
781 {
782  return cryptodev_frame_gcm_enqueue (vm, frame,
784 }
788 {
789  return cryptodev_frame_gcm_enqueue (vm, frame,
791 }
792 
796 {
797  return cryptodev_frame_gcm_enqueue (vm, frame,
799 }
803 {
804  return cryptodev_frame_gcm_enqueue (vm, frame,
806 }
807 
811 {
812  return cryptodev_frame_linked_algs_enqueue (vm, frame,
814 }
815 
819 {
820  return cryptodev_frame_linked_algs_enqueue (vm, frame,
822 }
823 
824 typedef enum
825 {
829 
830 /**
831  * assign a cryptodev resource to a worker.
832  * @param cet: the worker thread data
833  * @param cryptodev_inst_index: if op is "ASSIGN_AUTO" this param is ignored.
834  * @param op: the assignment method.
835  * @return: 0 if successfully, negative number otherwise.
836  **/
839  u32 cryptodev_inst_index,
840  cryptodev_resource_assign_op_t op)
841 {
843  cryptodev_inst_t *cinst = 0;
844  uword idx;
845 
846  /* assign resource is only allowed when no inflight op is in the queue */
847  if (cet->inflight)
848  return -EBUSY;
849 
850  switch (op)
851  {
854  vec_len (cmt->cryptodev_inst))
855  return -1;
856 
857  clib_spinlock_lock (&cmt->tlock);
859  clib_bitmap_set (cmt->active_cdev_inst_mask, idx, 1);
860  cinst = vec_elt_at_index (cmt->cryptodev_inst, idx);
861  cet->cryptodev_id = cinst->dev_id;
862  cet->cryptodev_q = cinst->q_id;
863  clib_spinlock_unlock (&cmt->tlock);
864  break;
866  /* assigning a used cryptodev resource is not allowed */
867  if (clib_bitmap_get (cmt->active_cdev_inst_mask, cryptodev_inst_index)
868  == 1)
869  return -EBUSY;
871  {
872  cinst = cmt->cryptodev_inst + idx;
873  if (cinst->dev_id == cet->cryptodev_id &&
874  cinst->q_id == cet->cryptodev_q)
875  break;
876  }
877  /* invalid existing worker resource assignment */
878  if (idx == vec_len (cmt->cryptodev_inst))
879  return -EINVAL;
880  clib_spinlock_lock (&cmt->tlock);
883  cryptodev_inst_index, 1);
884  cinst = cmt->cryptodev_inst + cryptodev_inst_index;
885  cet->cryptodev_id = cinst->dev_id;
886  cet->cryptodev_q = cinst->q_id;
887  clib_spinlock_unlock (&cmt->tlock);
888  break;
889  default:
890  return -EINVAL;
891  }
892  return 0;
893 }
894 
895 static u8 *
896 format_cryptodev_inst (u8 * s, va_list * args)
897 {
899  u32 inst = va_arg (*args, u32);
900  cryptodev_inst_t *cit = cmt->cryptodev_inst + inst;
901  u32 thread_index = 0;
902  struct rte_cryptodev_info info;
903 
904  rte_cryptodev_info_get (cit->dev_id, &info);
905  s = format (s, "%-25s%-10u", info.device->name, cit->q_id);
906 
907  vec_foreach_index (thread_index, cmt->per_thread_data)
908  {
909  cryptodev_engine_thread_t *cet = cmt->per_thread_data + thread_index;
910  if (vlib_num_workers () > 0 && thread_index == 0)
911  continue;
912 
913  if (cet->cryptodev_id == cit->dev_id && cet->cryptodev_q == cit->q_id)
914  {
915  s = format (s, "%u (%v)\n", thread_index,
916  vlib_worker_threads[thread_index].name);
917  break;
918  }
919  }
920 
921  if (thread_index == vec_len (cmt->per_thread_data))
922  s = format (s, "%s\n", "free");
923 
924  return s;
925 }
926 
927 static clib_error_t *
929  vlib_cli_command_t * cmd)
930 {
932  u32 inst;
933 
934  vlib_cli_output (vm, "%-5s%-25s%-10s%s\n", "No.", "Name", "Queue-id",
935  "Assigned-to");
936  if (vec_len (cmt->cryptodev_inst) == 0)
937  {
938  vlib_cli_output (vm, "(nil)\n");
939  return 0;
940  }
941 
942  vec_foreach_index (inst, cmt->cryptodev_inst)
943  vlib_cli_output (vm, "%-5u%U", inst, format_cryptodev_inst, inst);
944 
945  return 0;
946 }
947 
948 VLIB_CLI_COMMAND (show_cryptodev_assignment, static) = {
949  .path = "show cryptodev assignment",
950  .short_help = "show cryptodev assignment",
951  .function = cryptodev_show_assignment_fn,
952 };
953 
954 static clib_error_t *
956  vlib_cli_command_t * cmd)
957 {
960  unformat_input_t _line_input, *line_input = &_line_input;
961  u32 thread_index, inst_index;
962  u32 thread_present = 0, inst_present = 0;
963  clib_error_t *error = 0;
964  int ret;
965 
966  /* Get a line of input. */
967  if (!unformat_user (input, unformat_line_input, line_input))
968  return 0;
969 
970  while (unformat_check_input (line_input) != UNFORMAT_END_OF_INPUT)
971  {
972  if (unformat (line_input, "thread %u", &thread_index))
973  thread_present = 1;
974  else if (unformat (line_input, "resource %u", &inst_index))
975  inst_present = 1;
976  else
977  {
978  error = clib_error_return (0, "unknown input `%U'",
979  format_unformat_error, line_input);
980  return error;
981  }
982  }
983 
984  if (!thread_present || !inst_present)
985  {
986  error = clib_error_return (0, "mandatory argument(s) missing");
987  return error;
988  }
989 
990  if (thread_index == 0 && vlib_num_workers () > 0)
991  {
992  error =
993  clib_error_return (0, "assign crypto resource for master thread");
994  return error;
995  }
996 
997  if (thread_index > vec_len (cmt->per_thread_data) ||
998  inst_index > vec_len (cmt->cryptodev_inst))
999  {
1000  error = clib_error_return (0, "wrong thread id or resource id");
1001  return error;
1002  }
1003 
1004  cet = cmt->per_thread_data + thread_index;
1005  ret = cryptodev_assign_resource (cet, inst_index,
1007  if (ret)
1008  {
1009  error = clib_error_return (0, "cryptodev_assign_resource returned %i",
1010  ret);
1011  return error;
1012  }
1013 
1014  return 0;
1015 }
1016 
1017 VLIB_CLI_COMMAND (set_cryptodev_assignment, static) = {
1018  .path = "set cryptodev assignment",
1019  .short_help = "set cryptodev assignment thread <thread_index> "
1020  "resource <inst_index>",
1021  .function = cryptodev_set_assignment_fn,
1022 };
1023 
1024 static int
1026 {
1027  const struct rte_cryptodev_symmetric_capability *cap;
1028  struct rte_cryptodev_sym_capability_idx cap_idx;
1029 
1030 #define _(a, b, c, d, e, f) \
1031  cap_idx.type = RTE_CRYPTO_SYM_XFORM_##b; \
1032  cap_idx.algo.aead = RTE_CRYPTO_##b##_##c; \
1033  cap = rte_cryptodev_sym_capability_get (dev_id, &cap_idx); \
1034  if (!cap) \
1035  return -RTE_CRYPTO_##b##_##c; \
1036  else \
1037  { \
1038  if (cap->aead.digest_size.min > e || cap->aead.digest_size.max < e) \
1039  return -RTE_CRYPTO_##b##_##c; \
1040  if (cap->aead.aad_size.min > f || cap->aead.aad_size.max < f) \
1041  return -RTE_CRYPTO_##b##_##c; \
1042  if (cap->aead.iv_size.min > d || cap->aead.iv_size.max < d) \
1043  return -RTE_CRYPTO_##b##_##c; \
1044  }
1045 
1047 #undef _
1048 
1049 #define _(a, b, c, d) \
1050  cap_idx.type = RTE_CRYPTO_SYM_XFORM_CIPHER; \
1051  cap_idx.algo.cipher = RTE_CRYPTO_CIPHER_##b; \
1052  cap = rte_cryptodev_sym_capability_get (dev_id, &cap_idx); \
1053  if (!cap) \
1054  return -RTE_CRYPTO_CIPHER_##b; \
1055  cap_idx.type = RTE_CRYPTO_SYM_XFORM_AUTH; \
1056  cap_idx.algo.auth = RTE_CRYPTO_AUTH_##c##_HMAC; \
1057  cap = rte_cryptodev_sym_capability_get (dev_id, &cap_idx); \
1058  if (!cap) \
1059  return -RTE_CRYPTO_AUTH_##c;
1060 
1062 #undef _
1063  return 0;
1064 }
1065 
1066 static u32
1068 {
1069  struct rte_cryptodev_info info;
1070  u32 n_cryptodev = rte_cryptodev_count ();
1071  u32 i, q_count = 0;
1072 
1073  for (i = 0; i < n_cryptodev; i++)
1074  {
1075  rte_cryptodev_info_get (i, &info);
1076  if (rte_cryptodev_socket_id (i) != numa)
1077  {
1078  clib_warning ("DPDK crypto resource %s is in different numa node "
1079  "as %u, ignored", info.device->name, numa);
1080  continue;
1081  }
1082  q_count += info.max_nb_queue_pairs;
1083  }
1084 
1085  return q_count;
1086 }
1087 
1088 static int
1089 cryptodev_configure (vlib_main_t *vm, uint32_t cryptodev_id)
1090 {
1091  struct rte_cryptodev_info info;
1092  struct rte_cryptodev *cdev;
1095  vm->numa_node);
1096  u32 i;
1097  int ret;
1098 
1099  cdev = rte_cryptodev_pmd_get_dev (cryptodev_id);
1100  rte_cryptodev_info_get (cryptodev_id, &info);
1101 
1102  ret = check_cryptodev_alg_support (cryptodev_id);
1103  if (ret != 0)
1104  return ret;
1105 
1106  /** If the device is already started, we reuse it, otherwise configure
1107  * both the device and queue pair.
1108  **/
1109  if (!cdev->data->dev_started)
1110  {
1111  struct rte_cryptodev_config cfg;
1112 
1113  cfg.socket_id = vm->numa_node;
1114  cfg.nb_queue_pairs = info.max_nb_queue_pairs;
1115 
1116  rte_cryptodev_configure (cryptodev_id, &cfg);
1117 
1118  for (i = 0; i < info.max_nb_queue_pairs; i++)
1119  {
1120  struct rte_cryptodev_qp_conf qp_cfg;
1121 
1122  int ret;
1123 
1124  qp_cfg.mp_session = numa_data->sess_pool;
1125  qp_cfg.mp_session_private = numa_data->sess_priv_pool;
1126  qp_cfg.nb_descriptors = CRYPTODEV_NB_CRYPTO_OPS;
1127 
1128  ret = rte_cryptodev_queue_pair_setup (cryptodev_id, i, &qp_cfg,
1129  vm->numa_node);
1130  if (ret)
1131  break;
1132  }
1133  if (i != info.max_nb_queue_pairs)
1134  return -1;
1135  /* start the device */
1136  rte_cryptodev_start (i);
1137  }
1138 
1139  for (i = 0; i < info.max_nb_queue_pairs; i++)
1140  {
1141  cryptodev_inst_t *cdev_inst;
1142  vec_add2(cmt->cryptodev_inst, cdev_inst, 1);
1143  cdev_inst->desc = vec_new (char, strlen (info.device->name) + 10);
1144  cdev_inst->dev_id = cryptodev_id;
1145  cdev_inst->q_id = i;
1146 
1147  snprintf (cdev_inst->desc, strlen (info.device->name) + 9,
1148  "%s_q%u", info.device->name, i);
1149  }
1150 
1151  return 0;
1152 }
1153 
1154 static int
1156 {
1157  char name[RTE_CRYPTODEV_NAME_MAX_LEN], args[128];
1158  u32 dev_id = 0;
1159  int ret;
1160 
1161  /* find an unused name to create the device */
1162  while (dev_id < RTE_CRYPTO_MAX_DEVS)
1163  {
1164  snprintf (name, RTE_CRYPTODEV_NAME_MAX_LEN - 1, "%s%u",
1165  RTE_STR (CRYPTODEV_DEF_DRIVE), dev_id);
1166  if (rte_cryptodev_get_dev_id (name) < 0)
1167  break;
1168  dev_id++;
1169  }
1170 
1171  if (dev_id == RTE_CRYPTO_MAX_DEVS)
1172  return -1;
1173 
1174  snprintf (args, 127, "socket_id=%u,max_nb_queue_pairs=%u",
1175  vm->numa_node, n_queues);
1176 
1177  ret = rte_vdev_init(name, args);
1178  if (ret < 0)
1179  return ret;
1180 
1181  clib_warning ("Created cryptodev device %s (%s)", name, args);
1182 
1183  return 0;
1184 }
1185 
1186 static int
1187 cryptodev_cmp (void *v1, void *v2)
1188 {
1189  cryptodev_inst_t *a1 = v1;
1190  cryptodev_inst_t *a2 = v2;
1191 
1192  if (a1->q_id > a2->q_id)
1193  return 1;
1194  if (a1->q_id < a2->q_id)
1195  return -1;
1196  return 0;
1197 }
1198 
1199 static int
1201 {
1203  u32 n_queues = cryptodev_count_queue (vm->numa_node);
1204  u32 i;
1205  int ret;
1206 
1207  /* create an AESNI_MB PMD so the service is available */
1208  if (n_queues < n_workers)
1209  {
1210  u32 q_num = max_pow2 (n_workers - n_queues);
1211  ret = cryptodev_create_device (vm, q_num);
1212  if (ret < 0)
1213  return ret;
1214  }
1215 
1216  for (i = 0; i < rte_cryptodev_count (); i++)
1217  {
1218  ret = cryptodev_configure (vm, i);
1219  if (ret)
1220  return ret;
1221  }
1222 
1224 
1225  return 0;
1226 }
1227 
1228 static int
1230 {
1231  u32 sess_data_sz = 0, i;
1232  int ret;
1233 
1234  if (rte_cryptodev_count () == 0)
1235  {
1236  clib_warning ("No cryptodev device available, creating...");
1237  ret = cryptodev_create_device (vm, max_pow2 (n_workers));
1238  if (ret < 0)
1239  {
1240  clib_warning ("Failed");
1241  return ret;
1242  }
1243  }
1244 
1245  for (i = 0; i < rte_cryptodev_count (); i++)
1246  {
1247  u32 dev_sess_sz = rte_cryptodev_sym_get_private_session_size (i);
1248 
1249  sess_data_sz = dev_sess_sz > sess_data_sz ? dev_sess_sz : sess_data_sz;
1250  }
1251 
1252  return sess_data_sz;
1253 }
1254 
1255 static void
1257 {
1259  cryptodev_numa_data_t *numa_data;
1260 
1261  vec_validate (cmt->per_numa_data, vm->numa_node);
1262  numa_data = vec_elt_at_index (cmt->per_numa_data, vm->numa_node);
1263 
1264  if (numa_data->sess_pool)
1265  rte_mempool_free (numa_data->sess_pool);
1266  if (numa_data->sess_priv_pool)
1267  rte_mempool_free (numa_data->sess_priv_pool);
1268  if (numa_data->cop_pool)
1269  rte_mempool_free (numa_data->cop_pool);
1270 }
1271 
1272 static void
1273 crypto_op_init (struct rte_mempool *mempool,
1274  void *_arg __attribute__ ((unused)),
1275  void *_obj, unsigned i __attribute__ ((unused)))
1276 {
1277  struct rte_crypto_op *op = _obj;
1278 
1279  op->sess_type = RTE_CRYPTO_OP_WITH_SESSION;
1280  op->type = RTE_CRYPTO_OP_TYPE_SYMMETRIC;
1281  op->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
1282  op->phys_addr = rte_mempool_virt2iova (_obj);
1283  op->mempool = mempool;
1284 }
1285 
1286 
1287 clib_error_t *
1289 {
1293  cryptodev_numa_data_t *numa_data;
1294  struct rte_mempool *mp;
1295  u32 skip_master = vlib_num_workers () > 0;
1296  u32 n_workers = tm->n_vlib_mains - skip_master;
1297  u32 numa = vm->numa_node;
1298  i32 sess_sz;
1299  u64 n_cop_elts;
1300  u32 eidx;
1301  u32 i;
1302  u8 *name = 0;
1303  clib_error_t *error;
1304  struct rte_crypto_op_pool_private *priv;
1305 
1306  cmt->iova_mode = rte_eal_iova_mode ();
1307 
1308  sess_sz = cryptodev_get_session_sz(vm, n_workers);
1309  if (sess_sz < 0)
1310  {
1311  error = clib_error_return (0, "Not enough cryptodevs");
1312  return error;
1313  }
1314 
1315  /* A total of 4 times n_worker threads * frame size as crypto ops */
1316  n_cop_elts = max_pow2 ((u64)n_workers * CRYPTODEV_NB_CRYPTO_OPS);
1317 
1318  vec_validate (cmt->per_numa_data, vm->numa_node);
1319  numa_data = vec_elt_at_index (cmt->per_numa_data, numa);
1320 
1321  /* create session pool for the numa node */
1322  name = format (0, "vcryptodev_sess_pool_%u%c", numa, 0);
1323  mp = rte_cryptodev_sym_session_pool_create ((char *) name,
1325  0, 0, 0, numa);
1326  if (!mp)
1327  {
1328  error = clib_error_return (0, "Not enough memory for mp %s", name);
1329  goto err_handling;
1330  }
1331  vec_free (name);
1332 
1333  numa_data->sess_pool = mp;
1334 
1335  /* create session private pool for the numa node */
1336  name = format (0, "cryptodev_sess_pool_%u%c", numa, 0);
1337  mp = rte_mempool_create ((char *) name, CRYPTODEV_NB_SESSION, sess_sz, 0,
1338  0, NULL, NULL, NULL, NULL, numa, 0);
1339  if (!mp)
1340  {
1341  error = clib_error_return (0, "Not enough memory for mp %s", name);
1342  vec_free (name);
1343  goto err_handling;
1344  }
1345 
1346  vec_free (name);
1347 
1348  numa_data->sess_priv_pool = mp;
1349 
1350  /* create cryptodev op pool */
1351  name = format (0, "cryptodev_op_pool_%u%c", numa, 0);
1352 
1353  mp = rte_mempool_create ((char *) name, n_cop_elts,
1354  sizeof (cryptodev_op_t), VLIB_FRAME_SIZE * 2,
1355  sizeof (struct rte_crypto_op_pool_private), NULL,
1356  NULL, crypto_op_init, NULL, numa, 0);
1357  if (!mp)
1358  {
1359  error = clib_error_return (0, "Not enough memory for mp %s", name);
1360  vec_free (name);
1361  goto err_handling;
1362  }
1363 
1364  priv = rte_mempool_get_priv (mp);
1365  priv->priv_size = sizeof (struct rte_crypto_op_pool_private);
1366  priv->type = RTE_CRYPTO_OP_TYPE_SYMMETRIC;
1367  vec_free (name);
1368  numa_data->cop_pool = mp;
1369 
1370  /* probe all cryptodev devices and get queue info */
1371  if (cryptodev_probe (vm, n_workers) < 0)
1372  {
1373  error = clib_error_return (0, "Failed to configure cryptodev");
1374  goto err_handling;
1375  }
1376 
1378  clib_spinlock_init (&cmt->tlock);
1379 
1382  for (i = skip_master; i < tm->n_vlib_mains; i++)
1383  {
1384  ptd = cmt->per_thread_data + i;
1386  name = format (0, "frames_ring_%u%c", i, 0);
1387  ptd->ring = rte_ring_create((char *) name, CRYPTODEV_NB_CRYPTO_OPS,
1388  vm->numa_node, RING_F_SP_ENQ|RING_F_SC_DEQ);
1389  if (!ptd->ring)
1390  {
1391  error = clib_error_return (0, "Not enough memory for mp %s", name);
1392  vec_free (name);
1393  goto err_handling;
1394  }
1396  vec_free(name);
1397  }
1398 
1399  /* register handler */
1400  eidx = vnet_crypto_register_engine (vm, "dpdk_cryptodev", 79,
1401  "DPDK Cryptodev Engine");
1402 
1403 #define _(a, b, c, d, e, f) \
1404  vnet_crypto_register_async_handler \
1405  (vm, eidx, VNET_CRYPTO_OP_##a##_TAG##e##_AAD##f##_ENC, \
1406  cryptodev_enqueue_gcm_aad_##f##_enc,\
1407  cryptodev_frame_dequeue); \
1408  vnet_crypto_register_async_handler \
1409  (vm, eidx, VNET_CRYPTO_OP_##a##_TAG##e##_AAD##f##_DEC, \
1410  cryptodev_enqueue_gcm_aad_##f##_dec, \
1411  cryptodev_frame_dequeue);
1412 
1414 #undef _
1415 
1416 #define _(a, b, c, d) \
1417  vnet_crypto_register_async_handler \
1418  (vm, eidx, VNET_CRYPTO_OP_##a##_##c##_TAG##d##_ENC, \
1419  cryptodev_enqueue_linked_alg_enc, \
1420  cryptodev_frame_dequeue); \
1421  vnet_crypto_register_async_handler \
1422  (vm, eidx, VNET_CRYPTO_OP_##a##_##c##_TAG##d##_DEC, \
1423  cryptodev_enqueue_linked_alg_dec, \
1424  cryptodev_frame_dequeue);
1425 
1427 #undef _
1428 
1430 
1431  return 0;
1432 
1433 err_handling:
1435 
1436  return error;
1437 }
1438 /* *INDENT-On* */
1439 
1440 /*
1441  * fd.io coding-style-patch-verification: ON
1442  *
1443  * Local Variables:
1444  * eval: (c-set-style "gnu")
1445  * End:
1446  */
#define vec_validate(V, I)
Make sure vector is long enough for given index (no header, unspecified alignment) ...
Definition: vec.h:509
static_always_inline vnet_crypto_async_frame_t * cryptodev_frame_dequeue(vlib_main_t *vm, u32 *nb_elts_processed, u32 *enqueue_thread_idx)
Definition: cryptodev.c:709
u32 flags
buffer flags: VLIB_BUFFER_FREE_LIST_INDEX_MASK: bits used to store free list index, VLIB_BUFFER_IS_TRACED: trace this buffer.
Definition: buffer.h:124
struct rte_crypto_sym_op sop
Definition: cryptodev.c:95
static_always_inline int cryptodev_frame_linked_algs_enqueue(vlib_main_t *vm, vnet_crypto_async_frame_t *frame, cryptodev_op_type_t op_type)
Definition: cryptodev.c:469
#define vec_foreach_index(var, v)
Iterate over vector indices.
#define CRYPTODEV_NB_CRYPTO_OPS
Definition: cryptodev.c:41
enum rte_iova_mode iova_mode
Definition: cryptodev.c:143
static_always_inline int cryptodev_enqueue_linked_alg_dec(vlib_main_t *vm, vnet_crypto_async_frame_t *frame)
Definition: cryptodev.c:817
#define CLIB_CACHE_LINE_ALIGN_MARK(mark)
Definition: cache.h:60
#define clib_min(x, y)
Definition: clib.h:327
static_always_inline void clib_spinlock_unlock(clib_spinlock_t *p)
Definition: lock.h:119
static_always_inline void clib_spinlock_lock(clib_spinlock_t *p)
Definition: lock.h:80
static_always_inline int cryptodev_enqueue_gcm_aad_8_enc(vlib_main_t *vm, vnet_crypto_async_frame_t *frame)
Definition: cryptodev.c:779
cryptodev_op_type_t
Definition: cryptodev.c:102
cryptodev_resource_assign_op_t
Definition: cryptodev.c:824
static int prepare_aead_xform(struct rte_crypto_sym_xform *xform, cryptodev_op_type_t op_type, const vnet_crypto_key_t *key, u32 aad_len)
Definition: cryptodev.c:152
static clib_error_t * cryptodev_set_assignment_fn(vlib_main_t *vm, unformat_input_t *input, vlib_cli_command_t *cmd)
Definition: cryptodev.c:955
#define VNET_CRYPTO_KEY_TYPE_LINK
Definition: crypto.h:192
#define pool_get_zero(P, E)
Allocate an object E from a pool P and zero it.
Definition: pool.h:255
#define PREDICT_TRUE(x)
Definition: clib.h:121
vlib_physmem_main_t physmem_main
Definition: main.h:186
#define foreach_vnet_aead_crypto_conversion
Definition: cryptodev.c:49
i16 current_data
signed offset in data[], pre_data[] that we are currently processing.
Definition: buffer.h:110
unsigned long u64
Definition: types.h:89
#define clib_memcpy_fast(a, b, c)
Definition: string.h:81
#define VNET_CRYPTO_OP_FLAG_CHAINED_BUFFERS
Definition: crypto.h:239
void cryptodev_key_handler(vlib_main_t *vm, vnet_crypto_key_op_t kop, vnet_crypto_key_index_t idx)
Definition: cryptodev.c:401
static u32 cryptodev_count_queue(u32 numa)
Definition: cryptodev.c:1067
#define EINVAL
Definition: string.h:93
#define VLIB_BUFFER_PRE_DATA_SIZE
Definition: buffer.h:51
u32 thread_index
Definition: main.h:249
u16 current_length
Nbytes between current data and the end of this buffer.
Definition: buffer.h:113
u8 aad[16]
Definition: cryptodev.c:97
static u32 clib_pmalloc_get_page_index(clib_pmalloc_main_t *pm, void *va)
Definition: pmalloc.h:131
#define vec_add2(V, P, N)
Add N elements to end of vector V, return pointer to new elements in P.
Definition: vec.h:630
static uword * clib_bitmap_set(uword *ai, uword i, uword value)
Sets the ith bit of a bitmap to new_value Removes trailing zeros from the bitmap. ...
Definition: bitmap.h:167
uword unformat_user(unformat_input_t *input, unformat_function_t *func,...)
Definition: unformat.c:989
vlib_main_t * vm
Definition: in2out_ed.c:1582
u8 * format(u8 *s, const char *fmt,...)
Definition: format.c:424
u32 numa_node
Definition: main.h:251
#define CRYPTODEV_AAD_OFFSET
Definition: cryptodev.c:46
struct rte_mempool * sess_pool
Definition: cryptodev.c:124
#define vec_validate_aligned(V, I, A)
Make sure vector is long enough for given index (no header, specified alignment)
Definition: vec.h:520
unsigned char u8
Definition: types.h:56
cryptodev_inst_t * cryptodev_inst
Definition: cryptodev.c:144
u8 data[128]
Definition: ipsec_types.api:89
u8 buffer_pool_index
index of buffer pool this buffer belongs.
Definition: buffer.h:133
static_always_inline void cryptodev_sess_handler(vlib_main_t *vm, vnet_crypto_key_op_t kop, vnet_crypto_key_index_t idx, u32 aad_len)
Definition: cryptodev.c:328
clib_pmalloc_main_t * pmalloc_main
Definition: physmem.h:64
static uword clib_bitmap_set_no_check(uword *a, uword i, uword new_value)
Sets the ith bit of a bitmap to new_value.
Definition: bitmap.h:141
void vnet_crypto_register_key_handler(vlib_main_t *vm, u32 engine_index, vnet_crypto_key_handler_t *key_handler)
Definition: crypto.c:307
static int prepare_linked_xform(struct rte_crypto_sym_xform *xforms, cryptodev_op_type_t op_type, const vnet_crypto_key_t *key)
Definition: cryptodev.c:180
#define static_always_inline
Definition: clib.h:108
vnet_crypto_key_op_t
Definition: crypto.h:108
static const vnet_crypto_op_status_t cryptodev_status_conversion[]
Definition: cryptodev.c:85
static int cryptodev_configure(vlib_main_t *vm, uint32_t cryptodev_id)
Definition: cryptodev.c:1089
#define vec_new(T, N)
Create new vector of given type and length (unspecified alignment, no header).
Definition: vec.h:350
static_always_inline int cryptodev_frame_gcm_enqueue(vlib_main_t *vm, vnet_crypto_async_frame_t *frame, cryptodev_op_type_t op_type, u8 aad_len)
Definition: cryptodev.c:581
#define vec_elt_at_index(v, i)
Get vector value at index i checking that i is in bounds.
#define clib_error_return(e, args...)
Definition: error.h:99
unsigned int u32
Definition: types.h:88
#define foreach_cryptodev_link_async_alg
crypto (alg, cryptodev_alg), hash (alg, digest-size)
Definition: cryptodev.c:60
static_always_inline void cryptodev_mark_frame_err_status(vnet_crypto_async_frame_t *f, vnet_crypto_op_status_t s)
Definition: cryptodev.c:408
#define VLIB_FRAME_SIZE
Definition: node.h:377
static void crypto_op_init(struct rte_mempool *mempool, void *_arg, void *_obj, unsigned i)
Definition: cryptodev.c:1273
vnet_crypto_alg_t
Definition: crypto.h:124
#define VNET_CRYPTO_FRAME_SIZE
Definition: crypto.h:21
unformat_function_t unformat_line_input
Definition: format.h:283
static void clib_spinlock_init(clib_spinlock_t *p)
Definition: lock.h:63
vlib_worker_thread_t * vlib_worker_threads
Definition: threads.c:34
#define pool_elt_at_index(p, i)
Returns pointer to element at given index.
Definition: pool.h:534
static u8 iv[]
Definition: aes_cbc.c:24
clib_spinlock_t tlock
Definition: cryptodev.c:146
clib_bitmap_t * active_cdev_inst_mask
Definition: cryptodev.c:145
static int cryptodev_session_create(vnet_crypto_key_t *const key, struct rte_mempool *sess_priv_pool, cryptodev_key_t *session_pair, u32 aad_len)
Definition: cryptodev.c:244
static int cryptodev_get_session_sz(vlib_main_t *vm, uint32_t n_workers)
Definition: cryptodev.c:1229
struct _unformat_input_t unformat_input_t
unsigned short u16
Definition: types.h:57
#define rte_mbuf_from_vlib_buffer(x)
Definition: buffer.h:19
u8 data_len
Definition: ikev2_types.api:24
#define pool_put(P, E)
Free an object E in pool P.
Definition: pool.h:302
static_always_inline int cryptodev_assign_resource(cryptodev_engine_thread_t *cet, u32 cryptodev_inst_index, cryptodev_resource_assign_op_t op)
assign a cryptodev resource to a worker.
Definition: cryptodev.c:838
#define PREDICT_FALSE(x)
Definition: clib.h:120
vnet_crypto_alg_t alg
Definition: crypto.h:182
struct rte_crypto_op op
Definition: cryptodev.c:94
cryptodev_numa_data_t * per_numa_data
Definition: cryptodev.c:140
vl_api_tunnel_mode_t mode
Definition: gre.api:48
clib_error_t * dpdk_cryptodev_init(vlib_main_t *vm)
Definition: cryptodev.c:1288
vnet_crypto_async_alg_t async_alg
Definition: crypto.h:188
u32 buffer_indices[VNET_CRYPTO_FRAME_SIZE]
Definition: crypto.h:332
uword * lookup_table
Definition: pmalloc.h:96
#define UNFORMAT_END_OF_INPUT
Definition: format.h:145
cryptodev_engine_thread_t * per_thread_data
Definition: cryptodev.c:142
static_always_inline int cryptodev_enqueue_gcm_aad_8_dec(vlib_main_t *vm, vnet_crypto_async_frame_t *frame)
Definition: cryptodev.c:794
#define CLIB_PREFETCH(addr, size, type)
Definition: cache.h:80
static int cryptodev_create_device(vlib_main_t *vm, u32 n_queues)
Definition: cryptodev.c:1155
sll srl srl sll sra u16x4 i
Definition: vector_sse42.h:317
cryptodev_main_t cryptodev_main
Definition: cryptodev.c:149
#define vec_free(V)
Free vector&#39;s memory (no header).
Definition: vec.h:380
cryptodev_op_t ** cops
Definition: cryptodev.c:134
static_always_inline cryptodev_op_t * cryptodev_get_ring_head(struct rte_ring *ring)
Definition: cryptodev.c:702
#define clib_warning(format, args...)
Definition: error.h:59
static_always_inline rte_iova_t cryptodev_get_iova(clib_pmalloc_main_t *pm, enum rte_iova_mode mode, void *data)
Definition: cryptodev.c:419
u8 data[]
Packet data.
Definition: buffer.h:181
#define VNET_CRYPTO_FRAME_STATE_SUCCESS
Definition: crypto.h:326
static uword max_pow2(uword x)
Definition: clib.h:243
static_always_inline int cryptodev_enqueue_gcm_aad_12_enc(vlib_main_t *vm, vnet_crypto_async_frame_t *frame)
Definition: cryptodev.c:786
static uword clib_bitmap_get(uword *ai, uword i)
Gets the ith bit value from a bitmap.
Definition: bitmap.h:197
#define VNET_CRYPTO_FRAME_STATE_ELT_ERROR
Definition: crypto.h:327
struct rte_ring * ring
Definition: cryptodev.c:135
string name[64]
Definition: ip.api:44
#define clib_bitmap_vec_validate(v, i)
Definition: bitmap.h:112
static int cryptodev_probe(vlib_main_t *vm, u32 n_workers)
Definition: cryptodev.c:1200
#define VLIB_CLI_COMMAND(x,...)
Definition: cli.h:158
static void cryptodev_session_del(struct rte_cryptodev_sym_session *sess)
Definition: cryptodev.c:294
vnet_crypto_op_status_t
Definition: crypto.h:115
signed int i32
Definition: types.h:77
#define ASSERT(truth)
void vlib_cli_output(vlib_main_t *vm, char *fmt,...)
Definition: cli.c:696
struct rte_mempool * cop_pool
Definition: cryptodev.c:123
static u8 * format_cryptodev_inst(u8 *s, va_list *args)
Definition: cryptodev.c:896
static int cryptodev_cmp(void *v1, void *v2)
Definition: cryptodev.c:1187
struct rte_mempool * sess_priv_pool
Definition: cryptodev.c:125
static clib_error_t * cryptodev_show_assignment_fn(vlib_main_t *vm, unformat_input_t *input, vlib_cli_command_t *cmd)
Definition: cryptodev.c:928
u8 flags
share same VNET_CRYPTO_OP_FLAG_* values
Definition: crypto.h:317
u32 vnet_crypto_key_index_t
Definition: crypto.h:346
static_always_inline int cryptodev_enqueue_gcm_aad_12_dec(vlib_main_t *vm, vnet_crypto_async_frame_t *frame)
Definition: cryptodev.c:801
static uword pointer_to_uword(const void *p)
Definition: types.h:131
static int check_cryptodev_alg_support(u32 dev_id)
Definition: cryptodev.c:1025
typedef key
Definition: ipsec_types.api:85
static uword clib_bitmap_count_set_bits(uword *ai)
Return the number of set bits in a bitmap.
Definition: bitmap.h:462
#define CRYPTODEV_IV_OFFSET
Definition: cryptodev.c:45
static_always_inline int cryptodev_enqueue_linked_alg_enc(vlib_main_t *vm, vnet_crypto_async_frame_t *frame)
Definition: cryptodev.c:809
#define foreach_vnet_crypto_status_conversion
Definition: cryptodev.c:77
static_always_inline vnet_crypto_key_t * vnet_crypto_get_key(vnet_crypto_key_index_t index)
Definition: crypto.h:516
#define vec_len(v)
Number of elements in vector (rvalue-only, NULL tolerant)
u32 next_buffer
Next buffer for this linked-list of buffers.
Definition: buffer.h:140
vlib_main_t vlib_node_runtime_t vlib_frame_t * frame
Definition: in2out_ed.c:1583
struct rte_mempool ** dpdk_no_cache_mempool_by_buffer_pool_index
Definition: buffer.c:34
VLIB buffer representation.
Definition: buffer.h:102
u64 uword
Definition: types.h:112
#define vec_sort_with_function(vec, f)
Sort a vector using the supplied element comparison function.
Definition: vec.h:1055
u32 index
Definition: flow_types.api:221
static int cryptodev_check_supported_vnet_alg(vnet_crypto_key_t *key)
Definition: cryptodev.c:310
static void dpdk_disable_cryptodev_engine(vlib_main_t *vm)
Definition: cryptodev.c:1256
#define CRYPTODEV_DEF_DRIVE
Definition: cryptodev.c:43
static_always_inline void cryptodev_validate_mbuf_chain(vlib_main_t *vm, struct rte_mbuf *mb, vlib_buffer_t *b)
Definition: cryptodev.c:431
u8 * format_unformat_error(u8 *s, va_list *va)
Definition: unformat.c:91
static vlib_thread_main_t * vlib_get_thread_main()
Definition: global_funcs.h:32
static u32 vlib_num_workers()
Definition: threads.h:377
#define CRYPTODEV_NB_SESSION
Definition: cryptodev.c:42
uword clib_bitmap_t
Definition: bitmap.h:50
#define vec_foreach(var, vec)
Vector iterator.
struct rte_cryptodev_sym_session * keys[CRYPTODEV_N_OP_TYPES]
Definition: cryptodev.c:111
vnet_crypto_async_frame_t * frame
Definition: cryptodev.c:98
cryptodev_key_t * keys
Definition: cryptodev.c:141
static uword clib_bitmap_first_clear(uword *ai)
Return the lowest numbered clear bit in a bitmap.
Definition: bitmap.h:445
#define CLIB_CACHE_LINE_BYTES
Definition: cache.h:59
u32 vnet_crypto_register_engine(vlib_main_t *vm, char *name, int prio, char *desc)
Definition: crypto.c:112
vl_api_ikev2_keys_t keys
volatile u8 ref_count
Reference count for this buffer.
Definition: buffer.h:130
static vlib_buffer_t * vlib_get_buffer(vlib_main_t *vm, u32 buffer_index)
Translate buffer index into buffer pointer.
Definition: buffer_funcs.h:85
uword unformat(unformat_input_t *i, const char *fmt,...)
Definition: unformat.c:978
vnet_crypto_op_status_t status
Definition: crypto.h:303
vnet_crypto_async_frame_elt_t elts[VNET_CRYPTO_FRAME_SIZE]
Definition: crypto.h:331
static uword unformat_check_input(unformat_input_t *i)
Definition: format.h:171
signed short i16
Definition: types.h:46