FD.io VPP  v21.06
Vector Packet Processing
cryptodev_raw_data_path.c
Go to the documentation of this file.
1 /*
2  *------------------------------------------------------------------
3  * Copyright (c) 2020 Intel and/or its affiliates.
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at:
7  *
8  * http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  *------------------------------------------------------------------
16  */
17 
18 #include <vlib/vlib.h>
19 #include <vnet/plugin/plugin.h>
20 #include <vnet/crypto/crypto.h>
21 #include <vnet/ipsec/ipsec.h>
22 #include <vpp/app/version.h>
23 
24 #include <dpdk/buffer.h>
25 #include <dpdk/device/dpdk.h>
26 #include <dpdk/device/dpdk_priv.h>
27 #undef always_inline
28 #include <rte_bus_vdev.h>
29 #include <rte_cryptodev.h>
30 #include <rte_crypto_sym.h>
31 #include <rte_crypto.h>
32 #include <rte_cryptodev_pmd.h>
33 #include <rte_config.h>
34 
35 #include "cryptodev.h"
36 
37 #if CLIB_DEBUG > 0
38 #define always_inline static inline
39 #else
40 #define always_inline static inline __attribute__ ((__always_inline__))
41 #endif
42 
45  u32 *max_end)
46 {
47  union rte_crypto_sym_ofs ofs;
48  u32 crypto_end = fe->crypto_start_offset + fe->crypto_total_length;
49  u32 integ_end =
51 
52  *min_ofs = clib_min (fe->crypto_start_offset, fe->integ_start_offset);
53  *max_end = clib_max (crypto_end, integ_end);
54 
55  ofs.ofs.cipher.head = fe->crypto_start_offset - *min_ofs;
56  ofs.ofs.cipher.tail = *max_end - crypto_end;
57  ofs.ofs.auth.head = fe->integ_start_offset - *min_ofs;
58  ofs.ofs.auth.tail = *max_end - integ_end;
59 
60  return ofs.raw;
61 }
62 
64 cryptodev_frame_build_sgl (vlib_main_t *vm, enum rte_iova_mode iova_mode,
65  struct rte_crypto_vec *data_vec, u16 *n_seg,
67 {
68  struct rte_crypto_vec *vec = data_vec + 1;
70  return -1;
71 
72  while ((b->flags & VLIB_BUFFER_NEXT_PRESENT) && size)
73  {
74  u32 len;
75  b = vlib_get_buffer (vm, b->next_buffer);
76  len = clib_min (b->current_length, size);
77  vec->base = (void *) vlib_buffer_get_current (b);
78  if (iova_mode == RTE_IOVA_VA)
79  vec->iova = pointer_to_uword (vec->base);
80  else
81  vec->iova = vlib_buffer_get_current_pa (vm, b);
82  vec->len = len;
83  size -= len;
84  vec++;
85  *n_seg += 1;
86  }
87 
88  if (size)
89  return -1;
90 
91  return 0;
92 }
93 
96 {
97  union rte_cryptodev_session_ctx sess_ctx;
98 
99  ASSERT (cet->reset_sess != 0);
100 
101  sess_ctx.crypto_sess = cet->reset_sess;
102 
103  rte_cryptodev_configure_raw_dp_ctx (cet->cryptodev_id, cet->cryptodev_q,
104  cet->ctx, RTE_CRYPTO_OP_WITH_SESSION,
105  sess_ctx, 0);
106 }
107 
111  cryptodev_op_type_t op_type)
112 {
116  vlib_buffer_t **b;
117  struct rte_crypto_vec vec[CRYPTODEV_MAX_N_SGL];
118  struct rte_crypto_va_iova_ptr iv_vec, digest_vec;
119  u32 n_elts;
120  u32 last_key_index = ~0;
121  i16 min_ofs;
122  u32 max_end;
123  u8 is_update = 0;
124  int status;
125 
126  n_elts = frame->n_elts;
127 
128  if (PREDICT_FALSE (CRYPTODEV_MAX_INFLIGHT - cet->inflight < n_elts))
129  {
131  VNET_CRYPTO_OP_STATUS_FAIL_ENGINE_ERR);
132  return -1;
133  }
134 
135  vlib_get_buffers (vm, frame->buffer_indices, cet->b, frame->n_elts);
136 
137  b = cet->b;
138  fe = frame->elts;
139 
140  while (n_elts)
141  {
142  union rte_crypto_sym_ofs cofs;
143  u16 n_seg = 1;
144 
145  if (n_elts > 2)
146  {
147  CLIB_PREFETCH (&fe[1], CLIB_CACHE_LINE_BYTES, LOAD);
148  CLIB_PREFETCH (&fe[2], CLIB_CACHE_LINE_BYTES, LOAD);
149  vlib_prefetch_buffer_header (b[1], LOAD);
150  vlib_prefetch_buffer_header (b[2], LOAD);
151  }
152 
153  if (PREDICT_FALSE (last_key_index != fe->key_index))
154  {
156  union rte_cryptodev_session_ctx sess_ctx;
157 
158  if (PREDICT_FALSE (key->keys[vm->numa_node][op_type] == 0))
159  {
160  status = cryptodev_session_create (vm, fe->key_index, 0);
161  if (PREDICT_FALSE (status < 0))
162  goto error_exit;
163  }
164 
165  /* Borrow a created session to reset session ctx, based on a valid
166  * assumption that the session reset won't happen until first valid
167  * packet is processed */
168  if (PREDICT_FALSE (cet->reset_sess == 0))
169  cet->reset_sess = key->keys[vm->numa_node][op_type];
170 
171  sess_ctx.crypto_sess = key->keys[vm->numa_node][op_type];
172 
173  status = rte_cryptodev_configure_raw_dp_ctx (
174  cet->cryptodev_id, cet->cryptodev_q, cet->ctx,
175  RTE_CRYPTO_OP_WITH_SESSION, sess_ctx, is_update);
176  if (PREDICT_FALSE (status < 0))
177  goto error_exit;
178 
179  last_key_index = fe->key_index;
180  is_update = 1;
181  }
182 
183  cofs.raw = compute_ofs_linked_alg (fe, &min_ofs, &max_end);
184 
185  vec->len = max_end - min_ofs;
186  if (cmt->iova_mode == RTE_IOVA_VA)
187  {
188  vec[0].base = (void *) (b[0]->data + min_ofs);
189  vec[0].iova = pointer_to_uword (b[0]->data) + min_ofs;
190  iv_vec.va = (void *) fe->iv;
191  iv_vec.iova = pointer_to_uword (fe->iv);
192  digest_vec.va = (void *) fe->tag;
193  digest_vec.iova = pointer_to_uword (fe->tag);
194  }
195  else
196  {
197  vec[0].base = (void *) (b[0]->data + min_ofs);
198  vec[0].iova = vlib_buffer_get_pa (vm, b[0]) + min_ofs;
199  iv_vec.va = (void *) fe->iv;
200  iv_vec.iova = vlib_physmem_get_pa (vm, fe->iv);
201  digest_vec.va = (void *) fe->tag;
202  digest_vec.iova = vlib_physmem_get_pa (vm, fe->digest);
203  }
204 
206  {
207  vec[0].len = b[0]->current_data + b[0]->current_length - min_ofs;
208  if (cryptodev_frame_build_sgl (vm, cmt->iova_mode, vec, &n_seg, b[0],
209  max_end - min_ofs - vec->len) < 0)
210  goto error_exit;
211  }
212 
213  status = rte_cryptodev_raw_enqueue (cet->ctx, vec, n_seg, cofs, &iv_vec,
214  &digest_vec, 0, (void *) frame);
215  if (PREDICT_FALSE (status < 0))
216  goto error_exit;
217 
218  b++;
219  fe++;
220  n_elts--;
221  }
222 
223  status = rte_cryptodev_raw_enqueue_done (cet->ctx, frame->n_elts);
224  if (PREDICT_FALSE (status < 0))
225  {
226  cryptodev_reset_ctx (cet);
227  return -1;
228  }
229 
230  cet->inflight += frame->n_elts;
231  return 0;
232 
233 error_exit:
235  VNET_CRYPTO_OP_STATUS_FAIL_ENGINE_ERR);
236  cryptodev_reset_ctx (cet);
237  return -1;
238 }
239 
242  cryptodev_op_type_t op_type, u8 aad_len)
243 {
247  vlib_buffer_t **b;
248  u32 n_elts;
249  union rte_crypto_sym_ofs cofs;
250  struct rte_crypto_vec vec[CRYPTODEV_MAX_N_SGL];
251  struct rte_crypto_va_iova_ptr iv_vec, digest_vec, aad_vec;
252  u32 last_key_index = ~0;
253  u8 is_update = 0;
254  int status;
255 
256  n_elts = frame->n_elts;
257 
258  if (PREDICT_FALSE (CRYPTODEV_MAX_INFLIGHT - cet->inflight < n_elts))
259  {
261  VNET_CRYPTO_OP_STATUS_FAIL_ENGINE_ERR);
262  return -1;
263  }
264 
265  vlib_get_buffers (vm, frame->buffer_indices, cet->b, frame->n_elts);
266 
267  fe = frame->elts;
268  b = cet->b;
269  cofs.raw = 0;
270 
271  while (n_elts)
272  {
273  u32 aad_offset = ((cet->aad_index++) & CRYPTODEV_AAD_MASK) << 4;
274  u16 n_seg = 1;
275 
276  if (n_elts > 1)
277  {
278  CLIB_PREFETCH (&fe[1], CLIB_CACHE_LINE_BYTES, LOAD);
279  vlib_prefetch_buffer_header (b[1], LOAD);
280  }
281 
282  if (PREDICT_FALSE (last_key_index != fe->key_index))
283  {
285  union rte_cryptodev_session_ctx sess_ctx;
286 
287  if (PREDICT_FALSE (key->keys[vm->numa_node][op_type] == 0))
288  {
289  status = cryptodev_session_create (vm, fe->key_index, aad_len);
290  if (PREDICT_FALSE (status < 0))
291  goto error_exit;
292  }
293 
294  if (PREDICT_FALSE (
295  (u8) key->keys[vm->numa_node][op_type]->opaque_data !=
296  aad_len))
297  {
299  fe->key_index, aad_len);
300  status = cryptodev_session_create (vm, fe->key_index, aad_len);
301  if (PREDICT_FALSE (status < 0))
302  goto error_exit;
303  }
304 
305  /* Borrow a created session to reset session ctx, based on a valid
306  * assumption that the session reset won't happen until first valid
307  * packet is processed */
308 
309  if (PREDICT_FALSE (cet->reset_sess == 0))
310  cet->reset_sess = key->keys[vm->numa_node][op_type];
311 
312  sess_ctx.crypto_sess = key->keys[vm->numa_node][op_type];
313 
314  status = rte_cryptodev_configure_raw_dp_ctx (
315  cet->cryptodev_id, cet->cryptodev_q, cet->ctx,
316  RTE_CRYPTO_OP_WITH_SESSION, sess_ctx, is_update);
317  if (PREDICT_FALSE (status < 0))
318  goto error_exit;
319 
320  last_key_index = fe->key_index;
321  is_update = 1;
322  }
323 
324  if (cmt->iova_mode == RTE_IOVA_VA)
325  {
326  vec[0].base = (void *) (b[0]->data + fe->crypto_start_offset);
327  vec[0].iova = pointer_to_uword (vec[0].base);
328  vec[0].len = fe->crypto_total_length;
329  iv_vec.va = (void *) fe->iv;
330  iv_vec.iova = pointer_to_uword (fe->iv);
331  digest_vec.va = (void *) fe->tag;
332  digest_vec.iova = pointer_to_uword (fe->tag);
333  aad_vec.va = (void *) (cet->aad_buf + aad_offset);
334  aad_vec.iova = cet->aad_phy_addr + aad_offset;
335  }
336  else
337  {
338  vec[0].base = (void *) (b[0]->data + fe->crypto_start_offset);
339  vec[0].iova =
340  vlib_buffer_get_pa (vm, b[0]) + fe->crypto_start_offset;
341  vec[0].len = fe->crypto_total_length;
342  iv_vec.va = (void *) fe->iv;
343  iv_vec.iova = vlib_physmem_get_pa (vm, fe->iv);
344  aad_vec.va = (void *) (cet->aad_buf + aad_offset);
345  aad_vec.iova = cet->aad_phy_addr + aad_offset;
346  digest_vec.va = (void *) fe->tag;
347  digest_vec.iova = vlib_physmem_get_pa (vm, fe->tag);
348  }
349 
350  if (aad_len == 8)
351  *(u64 *) (cet->aad_buf + aad_offset) = *(u64 *) fe->aad;
352  else
353  {
354  /* aad_len == 12 */
355  *(u64 *) (cet->aad_buf + aad_offset) = *(u64 *) fe->aad;
356  *(u32 *) (cet->aad_buf + aad_offset + 8) = *(u32 *) (fe->aad + 8);
357  }
358 
360  {
361  vec[0].len = b[0]->current_data + b[0]->current_length -
363  status =
364  cryptodev_frame_build_sgl (vm, cmt->iova_mode, vec, &n_seg, b[0],
365  fe->crypto_total_length - vec[0].len);
366  if (status < 0)
367  goto error_exit;
368  }
369 
370  status =
371  rte_cryptodev_raw_enqueue (cet->ctx, vec, n_seg, cofs, &iv_vec,
372  &digest_vec, &aad_vec, (void *) frame);
373  if (PREDICT_FALSE (status < 0))
374  goto error_exit;
375 
376  fe++;
377  b++;
378  n_elts--;
379  }
380 
381  status = rte_cryptodev_raw_enqueue_done (cet->ctx, frame->n_elts);
382  if (PREDICT_FALSE (status < 0))
383  goto error_exit;
384 
385  cet->inflight += frame->n_elts;
386 
387  return 0;
388 
389 error_exit:
391  VNET_CRYPTO_OP_STATUS_FAIL_ENGINE_ERR);
392  cryptodev_reset_ctx (cet);
393  return -1;
394 }
395 
398 {
400  return f->n_elts;
401 }
402 
404 cryptodev_post_dequeue (void *frame, u32 index, u8 is_op_success)
405 {
407 
408  f->elts[index].status = is_op_success ? VNET_CRYPTO_OP_STATUS_COMPLETED :
409  VNET_CRYPTO_OP_STATUS_FAIL_BAD_HMAC;
410 }
411 
412 #define GET_RING_OBJ(r, pos, f) \
413  do \
414  { \
415  vnet_crypto_async_frame_t **ring = (void *) &r[1]; \
416  f = ring[(r->cons.head + pos) & r->mask]; \
417  } \
418  while (0)
419 
421 cryptodev_raw_dequeue (vlib_main_t *vm, u32 *nb_elts_processed,
422  u32 *enqueue_thread_idx)
423 {
426  vnet_crypto_async_frame_t *frame, *frame_ret = 0;
427  u32 n_deq, n_success;
428  u32 n_cached_frame = rte_ring_count (cet->cached_frame), n_room_left;
429  u8 no_job_to_deq = 0;
430  u16 inflight = cet->inflight;
431  int dequeue_status;
432 
433  n_room_left = CRYPTODEV_DEQ_CACHE_SZ - n_cached_frame - 1;
434 
435  if (n_cached_frame)
436  {
437  u32 i;
438  for (i = 0; i < n_cached_frame; i++)
439  {
441  void *f_ret;
442  enum rte_crypto_op_status op_status;
443  u8 n_left, err, j;
444 
445  GET_RING_OBJ (cet->cached_frame, i, f);
446 
447  if (i < n_cached_frame - 2)
448  {
449  vnet_crypto_async_frame_t *f1, *f2;
450  GET_RING_OBJ (cet->cached_frame, i + 1, f1);
451  GET_RING_OBJ (cet->cached_frame, i + 2, f2);
454  }
455 
456  n_left = f->state & 0x7f;
457  err = f->state & 0x80;
458 
459  for (j = f->n_elts - n_left; j < f->n_elts && inflight; j++)
460  {
461  int ret;
462  f_ret = rte_cryptodev_raw_dequeue (cet->ctx, &ret, &op_status);
463 
464  if (!f_ret)
465  break;
466 
467  switch (op_status)
468  {
469  case RTE_CRYPTO_OP_STATUS_SUCCESS:
470  f->elts[j].status = VNET_CRYPTO_OP_STATUS_COMPLETED;
471  break;
472  default:
473  f->elts[j].status = VNET_CRYPTO_OP_STATUS_FAIL_ENGINE_ERR;
474  err |= 1 << 7;
475  }
476 
477  inflight--;
478  }
479 
480  if (j == f->n_elts)
481  {
482  if (i == 0)
483  {
484  frame_ret = f;
487  }
488  else
489  {
490  f->state = f->n_elts - j;
491  f->state |= err;
492  }
493  if (inflight)
494  continue;
495  }
496 
497  /* to here f is not completed dequeued and no more job can be
498  * dequeued
499  */
500  f->state = f->n_elts - j;
501  f->state |= err;
502  no_job_to_deq = 1;
503  break;
504  }
505 
506  if (frame_ret)
507  {
508  rte_ring_sc_dequeue (cet->cached_frame, (void **) &frame_ret);
509  n_room_left++;
510  }
511  }
512 
513  /* no point to dequeue further */
514  if (!inflight || no_job_to_deq || !n_room_left)
515  goto end_deq;
516 
517  n_deq = rte_cryptodev_raw_dequeue_burst (
519  (void **) &frame, 0, &n_success, &dequeue_status);
520  if (!n_deq)
521  goto end_deq;
522 
523  inflight -= n_deq;
524  no_job_to_deq = n_deq < frame->n_elts;
525  /* we have to cache the frame */
526  if (frame_ret || n_cached_frame || no_job_to_deq)
527  {
528  frame->state = frame->n_elts - n_deq;
529  frame->state |= ((n_success < n_deq) << 7);
530  rte_ring_sp_enqueue (cet->cached_frame, (void *) frame);
531  n_room_left--;
532  }
533  else
534  {
535  frame->state = n_success == frame->n_elts ?
538  frame_ret = frame;
539  }
540 
541  /* see if we can dequeue more */
542  while (inflight && n_room_left && !no_job_to_deq)
543  {
544  n_deq = rte_cryptodev_raw_dequeue_burst (
546  (void **) &frame, 0, &n_success, &dequeue_status);
547  if (!n_deq)
548  break;
549  inflight -= n_deq;
550  no_job_to_deq = n_deq < frame->n_elts;
551  frame->state = frame->n_elts - n_deq;
552  frame->state |= ((n_success < n_deq) << 7);
553  rte_ring_sp_enqueue (cet->cached_frame, (void *) frame);
554  n_room_left--;
555  }
556 
557 end_deq:
558  if (inflight < cet->inflight)
559  {
560  int res =
561  rte_cryptodev_raw_dequeue_done (cet->ctx, cet->inflight - inflight);
562  ASSERT (res == 0);
563  cet->inflight = inflight;
564  }
565 
566  if (frame_ret)
567  {
568  *nb_elts_processed = frame_ret->n_elts;
569  *enqueue_thread_idx = frame_ret->enqueue_thread_index;
570  }
571 
572  return frame_ret;
573 }
574 
578 {
580 }
584 {
586 }
587 
591 {
593 }
597 {
599 }
600 
604 {
605  return cryptodev_frame_linked_algs_enqueue (vm, frame,
607 }
608 
612 {
613  return cryptodev_frame_linked_algs_enqueue (vm, frame,
615 }
616 
617 clib_error_t *
619 {
622  cryptodev_inst_t *cinst;
623  struct rte_cryptodev_info info;
624  struct rte_cryptodev_sym_capability_idx cap_auth_idx;
625  struct rte_cryptodev_sym_capability_idx cap_cipher_idx;
626  struct rte_cryptodev_sym_capability_idx cap_aead_idx;
627  u32 support_raw_api = 1, max_ctx_size = 0;
628  clib_error_t *error = 0;
629 
630  vec_foreach (cinst, cmt->cryptodev_inst)
631  {
632  u32 ctx_size;
633  rte_cryptodev_info_get (cinst->dev_id, &info);
634  if (!(info.feature_flags & RTE_CRYPTODEV_FF_SYM_RAW_DP))
635  {
636  support_raw_api = 0;
637  break;
638  }
639 
640  ctx_size = rte_cryptodev_get_raw_dp_ctx_size (cinst->dev_id);
641  max_ctx_size = clib_max (ctx_size, max_ctx_size);
642  }
643 
644  if (!support_raw_api)
645  return cryptodev_register_cop_hdl (vm, eidx);
646 
647  vec_foreach (cet, cmt->per_thread_data)
648  {
649  u32 thread_id = cet - cmt->per_thread_data;
650  u32 numa = vlib_get_main_by_index (thread_id)->numa_node;
651  u8 *name = format (0, "cache_frame_ring_%u_%u", numa, thread_id);
652 
653  cet->cached_frame =
654  rte_ring_create ((char *) name, CRYPTODEV_DEQ_CACHE_SZ, numa,
655  RING_F_SC_DEQ | RING_F_SP_ENQ);
656 
657  cet->aad_buf = rte_zmalloc_socket (
659  CLIB_CACHE_LINE_BYTES, numa);
660  if (cet->aad_buf == 0)
661  {
662  error = clib_error_return (0, "Failed to alloc aad buf");
663  goto err_handling;
664  }
665  cet->aad_phy_addr = rte_malloc_virt2iova (cet->aad_buf);
666 
667  cet->ctx =
668  rte_zmalloc_socket (0, max_ctx_size, CLIB_CACHE_LINE_BYTES, numa);
669  if (!cet->ctx)
670  {
671  error = clib_error_return (0, "Failed to alloc raw dp ctx");
672  goto err_handling;
673  }
674 
675  if (cet->cached_frame == 0)
676  {
677  error = clib_error_return (0, "Failed to alloc frame ring %s", name);
678  goto err_handling;
679  }
680 
681  vec_free (name);
682  }
683 
684 /** INDENT-OFF **/
685 #define _(a, b, c, d, e, f, g) \
686  cap_aead_idx.type = RTE_CRYPTO_SYM_XFORM_AEAD; \
687  cap_aead_idx.algo.aead = RTE_CRYPTO_##b##_##c; \
688  if (cryptodev_check_cap_support (&cap_aead_idx, g, e, f)) \
689  { \
690  vnet_crypto_register_async_handler ( \
691  vm, eidx, VNET_CRYPTO_OP_##a##_TAG##e##_AAD##f##_ENC, \
692  cryptodev_raw_enq_aead_aad_##f##_enc, cryptodev_raw_dequeue); \
693  vnet_crypto_register_async_handler ( \
694  vm, eidx, VNET_CRYPTO_OP_##a##_TAG##e##_AAD##f##_DEC, \
695  cryptodev_raw_enq_aead_aad_##f##_dec, cryptodev_raw_dequeue); \
696  }
698 #undef _
699 
700 #define _(a, b, c, d, e) \
701  cap_auth_idx.type = RTE_CRYPTO_SYM_XFORM_AUTH; \
702  cap_auth_idx.algo.auth = RTE_CRYPTO_AUTH_##d##_HMAC; \
703  cap_cipher_idx.type = RTE_CRYPTO_SYM_XFORM_CIPHER; \
704  cap_cipher_idx.algo.cipher = RTE_CRYPTO_CIPHER_##b; \
705  if (cryptodev_check_cap_support (&cap_cipher_idx, c, -1, -1) && \
706  cryptodev_check_cap_support (&cap_auth_idx, -1, e, -1)) \
707  { \
708  vnet_crypto_register_async_handler ( \
709  vm, eidx, VNET_CRYPTO_OP_##a##_##d##_TAG##e##_ENC, \
710  cryptodev_raw_enq_linked_alg_enc, cryptodev_raw_dequeue); \
711  vnet_crypto_register_async_handler ( \
712  vm, eidx, VNET_CRYPTO_OP_##a##_##d##_TAG##e##_DEC, \
713  cryptodev_raw_enq_linked_alg_dec, cryptodev_raw_dequeue); \
714  }
716 #undef _
717 
718  cmt->is_raw_api = 1;
719 
720  return 0;
721 
722 err_handling:
723  vec_foreach (cet, cmt->per_thread_data)
724  {
725  if (cet->cached_frame)
726  rte_ring_free (cet->cached_frame);
727  }
728 
729  return error;
730 }
u32 flags
buffer flags: VLIB_BUFFER_FREE_LIST_INDEX_MASK: bits used to store free list index, VLIB_BUFFER_IS_TRACED: trace this buffer.
Definition: buffer.h:133
static uword vlib_buffer_get_current_pa(vlib_main_t *vm, vlib_buffer_t *b)
Definition: buffer_funcs.h:494
enum rte_iova_mode iova_mode
Definition: cryptodev.h:168
#define clib_min(x, y)
Definition: clib.h:342
static uword vlib_buffer_get_pa(vlib_main_t *vm, vlib_buffer_t *b)
Definition: buffer_funcs.h:488
i16 current_data
signed offset in data[], pre_data[] that we are currently processing.
Definition: buffer.h:119
unsigned long u64
Definition: types.h:89
#define VNET_CRYPTO_OP_FLAG_CHAINED_BUFFERS
Definition: crypto.h:264
#define foreach_cryptodev_link_async_alg
crypto (alg, cryptodev_alg, key_size), hash (alg, digest-size)
Definition: cryptodev.h:51
static_always_inline void cryptodev_mark_frame_err_status(vnet_crypto_async_frame_t *f, vnet_crypto_op_status_t s)
Definition: cryptodev.h:179
u32 thread_index
Definition: main.h:213
u16 current_length
Nbytes between current data and the end of this buffer.
Definition: buffer.h:122
vlib_main_t vlib_node_runtime_t vlib_frame_t * frame
Definition: nat44_ei.c:3048
static_always_inline int cryptodev_frame_build_sgl(vlib_main_t *vm, enum rte_iova_mode iova_mode, struct rte_crypto_vec *data_vec, u16 *n_seg, vlib_buffer_t *b, u32 size)
struct rte_cryptodev_sym_session * reset_sess
Definition: cryptodev.h:155
static_always_inline u32 cryptodev_get_frame_n_elts(void *frame)
string name[64]
Definition: fib.api:25
vnet_crypto_async_frame_state_t state
Definition: crypto.h:361
u32 numa_node
Definition: main.h:215
unsigned char u8
Definition: types.h:56
vlib_buffer_t ** b
u8 data[128]
Definition: ipsec_types.api:92
unsigned int u32
Definition: types.h:88
vlib_frame_t * f
clib_error_t * cryptodev_register_cop_hdl(vlib_main_t *vm, u32 eidx)
int cryptodev_session_create(vlib_main_t *vm, vnet_crypto_key_index_t idx, u32 aad_len)
Definition: cryptodev.c:326
#define static_always_inline
Definition: clib.h:112
vlib_get_buffers(vm, from, b, n_left_from)
description fragment has unexpected format
Definition: map.api:433
#define vlib_prefetch_buffer_header(b, type)
Prefetch buffer metadata.
Definition: buffer.h:231
struct rte_cryptodev_sym_session *** keys
Definition: cryptodev.h:81
#define vec_elt_at_index(v, i)
Get vector value at index i checking that i is in bounds.
#define clib_error_return(e, args...)
Definition: error.h:99
static_always_inline int cryptodev_raw_enq_aead_aad_12_dec(vlib_main_t *vm, vnet_crypto_async_frame_t *frame)
static_always_inline int cryptodev_raw_enq_linked_alg_enc(vlib_main_t *vm, vnet_crypto_async_frame_t *frame)
static u32 vlib_buffer_chain_linearize(vlib_main_t *vm, vlib_buffer_t *b)
Definition: cJSON.c:88
cryptodev_engine_thread_t * per_thread_data
Definition: cryptodev.h:167
unsigned short u16
Definition: types.h:57
u32 size
Definition: vhost_user.h:125
static void * vlib_buffer_get_current(vlib_buffer_t *b)
Get pointer to current data to process.
Definition: buffer.h:257
#define PREDICT_FALSE(x)
Definition: clib.h:124
vlib_main_t * vm
X-connect all packets from the HOST to the PHY.
Definition: nat44_ei.c:3047
static_always_inline int cryptodev_raw_enq_aead_aad_8_dec(vlib_main_t *vm, vnet_crypto_async_frame_t *frame)
u32 n_left
#define foreach_vnet_aead_crypto_conversion
Definition: cryptodev.h:40
u8 len
Definition: ip_types.api:103
u32 buffer_indices[VNET_CRYPTO_FRAME_SIZE]
Definition: crypto.h:365
#define CLIB_PREFETCH(addr, size, type)
Definition: cache.h:80
static_always_inline int cryptodev_raw_aead_enqueue(vlib_main_t *vm, vnet_crypto_async_frame_t *frame, cryptodev_op_type_t op_type, u8 aad_len)
sll srl srl sll sra u16x4 i
Definition: vector_sse42.h:261
cryptodev_main_t cryptodev_main
Definition: cryptodev.c:43
#define vec_free(V)
Free vector&#39;s memory (no header).
Definition: vec.h:395
u32 index
Definition: flow_types.api:221
static_always_inline int cryptodev_raw_enq_linked_alg_dec(vlib_main_t *vm, vnet_crypto_async_frame_t *frame)
#define CRYPTODEV_NB_CRYPTO_OPS
Definition: cryptodev.h:24
#define CRYPTODEV_MAX_INFLIGHT
Definition: cryptodev.h:27
cryptodev_inst_t * cryptodev_inst
Definition: cryptodev.h:169
struct rte_ring * cached_frame
Definition: cryptodev.h:151
static_always_inline void cryptodev_reset_ctx(cryptodev_engine_thread_t *cet)
#define ASSERT(truth)
static_always_inline u64 compute_ofs_linked_alg(vnet_crypto_async_frame_elt_t *fe, i16 *min_ofs, u32 *max_end)
static_always_inline int cryptodev_frame_linked_algs_enqueue(vlib_main_t *vm, vnet_crypto_async_frame_t *frame, cryptodev_op_type_t op_type)
u8 flags
share same VNET_CRYPTO_OP_FLAG_* values
Definition: crypto.h:342
#define CRYPTODEV_MAX_N_SGL
maximum number of segments
Definition: cryptodev.h:33
struct rte_crypto_raw_dp_ctx * ctx
Definition: cryptodev.h:150
#define CRYPTODEV_AAD_MASK
Definition: cryptodev.h:28
vlib_buffer_t * b[VNET_CRYPTO_FRAME_SIZE]
Definition: cryptodev.h:139
#define CRYPTODEV_DEQ_CACHE_SZ
Definition: cryptodev.h:29
static uword pointer_to_uword(const void *p)
Definition: types.h:131
static_always_inline vnet_crypto_async_frame_t * cryptodev_raw_dequeue(vlib_main_t *vm, u32 *nb_elts_processed, u32 *enqueue_thread_idx)
#define clib_max(x, y)
Definition: clib.h:335
static_always_inline int cryptodev_raw_enq_aead_aad_8_enc(vlib_main_t *vm, vnet_crypto_async_frame_t *frame)
static void error_exit(int code)
Definition: error.c:88
typedef key
Definition: ipsec_types.api:88
clib_error_t * cryptodev_register_raw_hdl(vlib_main_t *vm, u32 eidx)
static u64 vlib_physmem_get_pa(vlib_main_t *vm, void *mem)
#define GET_RING_OBJ(r, pos, f)
static_always_inline void cryptodev_post_dequeue(void *frame, u32 index, u8 is_op_success)
static vlib_main_t * vlib_get_main_by_index(u32 thread_index)
Definition: global_funcs.h:29
u32 next_buffer
Next buffer for this linked-list of buffers.
Definition: buffer.h:149
VLIB buffer representation.
Definition: buffer.h:111
void cryptodev_sess_handler(vlib_main_t *vm, vnet_crypto_key_op_t kop, vnet_crypto_key_index_t idx, u32 aad_len)
Definition: cryptodev.c:272
cryptodev_op_type_t
Definition: cryptodev.h:71
#define vec_foreach(var, vec)
Vector iterator.
static_always_inline int cryptodev_raw_enq_aead_aad_12_enc(vlib_main_t *vm, vnet_crypto_async_frame_t *frame)
#define CLIB_CACHE_LINE_BYTES
Definition: cache.h:59
static vlib_buffer_t * vlib_get_buffer(vlib_main_t *vm, u32 buffer_index)
Translate buffer index into buffer pointer.
Definition: buffer_funcs.h:111
vnet_crypto_op_status_t status
Definition: crypto.h:341
#define CRYPTODEV_MAX_AAD_SIZE
Definition: cryptodev.h:32
vnet_crypto_async_frame_elt_t elts[VNET_CRYPTO_FRAME_SIZE]
Definition: crypto.h:364
signed short i16
Definition: types.h:46
cryptodev_key_t * keys
Definition: cryptodev.h:166