FD.io VPP  v17.01.1-3-gc6833f8
Vector Packet Processing
esp_decrypt.c
Go to the documentation of this file.
1 /*
2  * esp_decrypt.c : IPSec ESP Decrypt node using DPDK Cryptodev
3  *
4  * Copyright (c) 2016 Intel and/or its affiliates.
5  * Licensed under the Apache License, Version 2.0 (the "License");
6  * you may not use this file except in compliance with the License.
7  * You may obtain a copy of the License at:
8  *
9  * http://www.apache.org/licenses/LICENSE-2.0
10  *
11  * Unless required by applicable law or agreed to in writing, software
12  * distributed under the License is distributed on an "AS IS" BASIS,
13  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14  * See the License for the specific language governing permissions and
15  * limitations under the License.
16  */
17 
18 #include <vnet/vnet.h>
19 #include <vnet/api_errno.h>
20 #include <vnet/ip/ip.h>
21 
22 #include <vnet/ipsec/ipsec.h>
25 
26 #define foreach_esp_decrypt_next \
27 _(DROP, "error-drop") \
28 _(IP4_INPUT, "ip4-input") \
29 _(IP6_INPUT, "ip6-input")
30 
31 #define _(v, s) ESP_DECRYPT_NEXT_##v,
32 typedef enum {
34 #undef _
37 
38 #define foreach_esp_decrypt_error \
39  _(RX_PKTS, "ESP pkts received") \
40  _(DECRYPTION_FAILED, "ESP decryption failed") \
41  _(REPLAY, "SA replayed packet") \
42  _(NOT_IP, "Not IP packet (dropped)") \
43  _(ENQ_FAIL, "Enqueue failed (buffer full)") \
44  _(NO_CRYPTODEV, "Cryptodev not configured") \
45  _(BAD_LEN, "Invalid ciphertext length") \
46  _(UNSUPPORTED, "Cipher/Auth not supported")
47 
48 
49 typedef enum {
50 #define _(sym,str) ESP_DECRYPT_ERROR_##sym,
52 #undef _
55 
56 static char * esp_decrypt_error_strings[] = {
57 #define _(sym,string) string,
59 #undef _
60 };
61 
63 
64 typedef struct {
68 
69 /* packet trace format function */
70 static u8 * format_esp_decrypt_trace (u8 * s, va_list * args)
71 {
72  CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
73  CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
74  esp_decrypt_trace_t * t = va_arg (*args, esp_decrypt_trace_t *);
75 
76  s = format (s, "esp: crypto %U integrity %U",
79  return s;
80 }
81 
82 static uword
84  vlib_node_runtime_t * node,
85  vlib_frame_t * from_frame)
86 {
87  u32 n_left_from, *from, *to_next, next_index;
88  ipsec_main_t *im = &ipsec_main;
89  u32 cpu_index = os_get_cpu_number();
92  u32 i;
93 
94  from = vlib_frame_vector_args (from_frame);
95  n_left_from = from_frame->n_vectors;
96 
97  if (PREDICT_FALSE(!dcm->workers_main))
98  {
100  ESP_DECRYPT_ERROR_NO_CRYPTODEV, n_left_from);
101  vlib_buffer_free(vm, from, n_left_from);
102  return n_left_from;
103  }
104 
105  crypto_worker_main_t *cwm = vec_elt_at_index(dcm->workers_main, cpu_index);
106  u32 n_qps = vec_len(cwm->qp_data);
107  struct rte_crypto_op ** cops_to_enq[n_qps];
108  u32 n_cop_qp[n_qps], * bi_to_enq[n_qps];
109 
110  for (i = 0; i < n_qps; i++)
111  {
112  bi_to_enq[i] = cwm->qp_data[i].bi;
113  cops_to_enq[i] = cwm->qp_data[i].cops;
114  }
115 
116  memset(n_cop_qp, 0, n_qps * sizeof(u32));
117 
119 
120  next_index = ESP_DECRYPT_NEXT_DROP;
121 
122  while (n_left_from > 0)
123  {
124  u32 n_left_to_next;
125 
126  vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
127 
128  while (n_left_from > 0 && n_left_to_next > 0)
129  {
130  u32 bi0, sa_index0 = ~0, seq, icv_size, iv_size;
131  vlib_buffer_t * b0;
132  esp_header_t * esp0;
133  ipsec_sa_t * sa0;
134  struct rte_mbuf * mb0 = 0;
135  const int BLOCK_SIZE = 16;
136  crypto_sa_session_t * sa_sess;
137  void * sess;
138  u16 qp_index;
139  struct rte_crypto_op * cop = 0;
140 
141  bi0 = from[0];
142  from += 1;
143  n_left_from -= 1;
144 
145  b0 = vlib_get_buffer (vm, bi0);
146  esp0 = vlib_buffer_get_current (b0);
147 
148  sa_index0 = vnet_buffer(b0)->ipsec.sad_index;
149  sa0 = pool_elt_at_index (im->sad, sa_index0);
150 
151  seq = clib_host_to_net_u32(esp0->seq);
152 
153  /* anti-replay check */
154  if (sa0->use_anti_replay)
155  {
156  int rv = 0;
157 
158  if (PREDICT_TRUE(sa0->use_esn))
159  rv = esp_replay_check_esn(sa0, seq);
160  else
161  rv = esp_replay_check(sa0, seq);
162 
163  if (PREDICT_FALSE(rv))
164  {
165  clib_warning ("anti-replay SPI %u seq %u", sa0->spi, seq);
167  ESP_DECRYPT_ERROR_REPLAY, 1);
168  to_next[0] = bi0;
169  to_next += 1;
170  n_left_to_next -= 1;
171  goto trace;
172  }
173  }
174 
175  if (PREDICT_FALSE(sa0->integ_alg == IPSEC_INTEG_ALG_NONE) ||
176  PREDICT_FALSE(sa0->crypto_alg == IPSEC_CRYPTO_ALG_NONE))
177  {
178  clib_warning ("SPI %u : only cipher + auth supported", sa0->spi);
180  ESP_DECRYPT_ERROR_UNSUPPORTED, 1);
181  to_next[0] = bi0;
182  to_next += 1;
183  n_left_to_next -= 1;
184  goto trace;
185  }
186 
187  sa_sess = pool_elt_at_index(cwm->sa_sess_d[0], sa_index0);
188 
189  if (PREDICT_FALSE(!sa_sess->sess))
190  {
191  int ret = create_sym_sess(sa0, sa_sess, 0);
192  ASSERT(ret == 0);
193  }
194 
195  sess = sa_sess->sess;
196  qp_index = sa_sess->qp_index;
197 
198  ASSERT (vec_len (vec_elt (cwm->qp_data, qp_index).free_cops) > 0);
199  cop = vec_pop (vec_elt (cwm->qp_data, qp_index).free_cops);
200  ASSERT (cop->status == RTE_CRYPTO_OP_STATUS_NOT_PROCESSED);
201 
202  cops_to_enq[qp_index][0] = cop;
203  cops_to_enq[qp_index] += 1;
204  n_cop_qp[qp_index] += 1;
205  bi_to_enq[qp_index][0] = bi0;
206  bi_to_enq[qp_index] += 1;
207 
208  rte_crypto_op_attach_sym_session(cop, sess);
209 
210  icv_size = em->esp_integ_algs[sa0->integ_alg].trunc_size;
211  iv_size = em->esp_crypto_algs[sa0->crypto_alg].iv_len;
212 
213  /* Convert vlib buffer to mbuf */
214  mb0 = rte_mbuf_from_vlib_buffer(b0);
215  mb0->data_len = b0->current_length;
216  mb0->pkt_len = b0->current_length;
217  mb0->data_off = RTE_PKTMBUF_HEADROOM + b0->current_data;
218 
219  /* Outer IP header has already been stripped */
220  u16 payload_len = rte_pktmbuf_pkt_len(mb0) - sizeof (esp_header_t) -
221  iv_size - icv_size;
222 
223  if ((payload_len & (BLOCK_SIZE - 1)) || (payload_len <= 0))
224  {
225  clib_warning ("payload %u not multiple of %d\n",
226  payload_len, BLOCK_SIZE);
228  ESP_DECRYPT_ERROR_BAD_LEN, 1);
229  vec_add (vec_elt (cwm->qp_data, qp_index).free_cops, &cop, 1);
230  bi_to_enq[qp_index] -= 1;
231  cops_to_enq[qp_index] -= 1;
232  n_cop_qp[qp_index] -= 1;
233  to_next[0] = bi0;
234  to_next += 1;
235  n_left_to_next -= 1;
236  goto trace;
237  }
238 
239  struct rte_crypto_sym_op *sym_cop = (struct rte_crypto_sym_op *)(cop + 1);
240 
241  sym_cop->m_src = mb0;
242  sym_cop->cipher.data.offset = sizeof (esp_header_t) + iv_size;
243  sym_cop->cipher.data.length = payload_len;
244 
245  u8 *iv = rte_pktmbuf_mtod_offset(mb0, void*, sizeof (esp_header_t));
246  dpdk_cop_priv_t * priv = (dpdk_cop_priv_t *)(sym_cop + 1);
247 
248  if (sa0->crypto_alg == IPSEC_CRYPTO_ALG_AES_GCM_128)
249  {
250  dpdk_gcm_cnt_blk *icb = &priv->cb;
251  icb->salt = sa0->salt;
252  clib_memcpy(icb->iv, iv, 8);
253  icb->cnt = clib_host_to_net_u32(1);
254  sym_cop->cipher.iv.data = (u8 *)icb;
255  sym_cop->cipher.iv.phys_addr = cop->phys_addr +
256  (uintptr_t)icb - (uintptr_t)cop;
257  sym_cop->cipher.iv.length = 16;
258 
259  u8 *aad = priv->aad;
260  clib_memcpy(aad, iv - sizeof(esp_header_t), 8);
261  sym_cop->auth.aad.data = aad;
262  sym_cop->auth.aad.phys_addr = cop->phys_addr +
263  (uintptr_t)aad - (uintptr_t)cop;
264  if (sa0->use_esn)
265  {
266  *((u32*)&aad[8]) = sa0->seq_hi;
267  sym_cop->auth.aad.length = 12;
268  }
269  else
270  {
271  sym_cop->auth.aad.length = 8;
272  }
273 
274  sym_cop->auth.digest.data = rte_pktmbuf_mtod_offset(mb0, void*,
275  rte_pktmbuf_pkt_len(mb0) - icv_size);
276  sym_cop->auth.digest.phys_addr = rte_pktmbuf_mtophys_offset(mb0,
277  rte_pktmbuf_pkt_len(mb0) - icv_size);
278  sym_cop->auth.digest.length = icv_size;
279 
280  }
281  else
282  {
283  sym_cop->cipher.iv.data = rte_pktmbuf_mtod_offset(mb0, void*,
284  sizeof (esp_header_t));
285  sym_cop->cipher.iv.phys_addr = rte_pktmbuf_mtophys_offset(mb0,
286  sizeof (esp_header_t));
287  sym_cop->cipher.iv.length = iv_size;
288 
289  if (sa0->use_esn)
290  {
291  dpdk_cop_priv_t* priv = (dpdk_cop_priv_t*) (sym_cop + 1);
292  u8* payload_end = rte_pktmbuf_mtod_offset(
293  mb0, u8*, sizeof(esp_header_t) + iv_size + payload_len);
294 
295  clib_memcpy (priv->icv, payload_end, icv_size);
296  *((u32*) payload_end) = sa0->seq_hi;
297  sym_cop->auth.data.offset = 0;
298  sym_cop->auth.data.length = sizeof(esp_header_t) + iv_size
299  + payload_len + sizeof(sa0->seq_hi);
300  sym_cop->auth.digest.data = priv->icv;
301  sym_cop->auth.digest.phys_addr = cop->phys_addr
302  + (uintptr_t) priv->icv - (uintptr_t) cop;
303  sym_cop->auth.digest.length = icv_size;
304  }
305  else
306  {
307  sym_cop->auth.data.offset = 0;
308  sym_cop->auth.data.length = sizeof(esp_header_t) +
309  iv_size + payload_len;
310 
311  sym_cop->auth.digest.data = rte_pktmbuf_mtod_offset(mb0, void*,
312  rte_pktmbuf_pkt_len(mb0) - icv_size);
313  sym_cop->auth.digest.phys_addr = rte_pktmbuf_mtophys_offset(mb0,
314  rte_pktmbuf_pkt_len(mb0) - icv_size);
315  sym_cop->auth.digest.length = icv_size;
316  }
317  }
318 
319 trace:
321  {
322  esp_decrypt_trace_t *tr = vlib_add_trace (vm, node, b0, sizeof (*tr));
323  tr->crypto_alg = sa0->crypto_alg;
324  tr->integ_alg = sa0->integ_alg;
325  }
326  }
327  vlib_put_next_frame (vm, node, next_index, n_left_to_next);
328  }
330  ESP_DECRYPT_ERROR_RX_PKTS,
331  from_frame->n_vectors);
332  crypto_qp_data_t *qpd;
333  /* *INDENT-OFF* */
334  vec_foreach_index (i, cwm->qp_data)
335  {
336  u32 enq;
337 
338  qpd = vec_elt_at_index(cwm->qp_data, i);
339  enq = rte_cryptodev_enqueue_burst(qpd->dev_id, qpd->qp_id,
340  qpd->cops, n_cop_qp[i]);
341  qpd->inflights += enq;
342 
343  if (PREDICT_FALSE(enq < n_cop_qp[i]))
344  {
345  crypto_free_cop (qpd, &qpd->cops[enq], n_cop_qp[i] - enq);
346  vlib_buffer_free (vm, &qpd->bi[enq], n_cop_qp[i] - enq);
347 
349  ESP_DECRYPT_ERROR_ENQ_FAIL,
350  n_cop_qp[i] - enq);
351  }
352  }
353  /* *INDENT-ON* */
354 
355  return from_frame->n_vectors;
356 }
357 
359  .function = dpdk_esp_decrypt_node_fn,
360  .name = "dpdk-esp-decrypt",
361  .vector_size = sizeof (u32),
362  .format_trace = format_esp_decrypt_trace,
364 
366  .error_strings = esp_decrypt_error_strings,
367 
368  .n_next_nodes = ESP_DECRYPT_N_NEXT,
369  .next_nodes = {
370 #define _(s,n) [ESP_DECRYPT_NEXT_##s] = n,
372 #undef _
373  },
374 };
375 
377 
378 /*
379  * Decrypt Post Node
380  */
381 
382 #define foreach_esp_decrypt_post_error \
383  _(PKTS, "ESP post pkts")
384 
385 typedef enum {
386 #define _(sym,str) ESP_DECRYPT_POST_ERROR_##sym,
388 #undef _
391 
393 #define _(sym,string) string,
395 #undef _
396 };
397 
399 
400 static u8 * format_esp_decrypt_post_trace (u8 * s, va_list * args)
401 {
402  return s;
403 }
404 
405 static uword
407  vlib_node_runtime_t * node,
408  vlib_frame_t * from_frame)
409 {
410  u32 n_left_from, *from, *to_next = 0, next_index;
411  ipsec_sa_t * sa0;
412  u32 sa_index0 = ~0;
413  ipsec_main_t *im = &ipsec_main;
415 
416  from = vlib_frame_vector_args (from_frame);
417  n_left_from = from_frame->n_vectors;
418 
419  next_index = node->cached_next_index;
420 
421  while (n_left_from > 0)
422  {
423  u32 n_left_to_next;
424 
425  vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
426 
427  while (n_left_from > 0 && n_left_to_next > 0)
428  {
429  esp_footer_t * f0;
430  u32 bi0, next0, icv_size, iv_size;
431  vlib_buffer_t * b0 = 0;
432  ip4_header_t *ih4 = 0, *oh4 = 0;
433  ip6_header_t *ih6 = 0, *oh6 = 0;
434  u8 tunnel_mode = 1;
435  u8 transport_ip6 = 0;
436 
437  next0 = ESP_DECRYPT_NEXT_DROP;
438 
439  bi0 = from[0];
440  from += 1;
441  n_left_from -= 1;
442  n_left_to_next -= 1;
443 
444  b0 = vlib_get_buffer (vm, bi0);
445 
446  sa_index0 = vnet_buffer(b0)->ipsec.sad_index;
447  sa0 = pool_elt_at_index (im->sad, sa_index0);
448 
449  to_next[0] = bi0;
450  to_next += 1;
451 
452  icv_size = em->esp_integ_algs[sa0->integ_alg].trunc_size;
453  iv_size = em->esp_crypto_algs[sa0->crypto_alg].iv_len;
454 
455  if (sa0->use_anti_replay)
456  {
458  u32 seq;
459  seq = clib_host_to_net_u32(esp0->seq);
460  if (PREDICT_TRUE(sa0->use_esn))
461  esp_replay_advance_esn(sa0, seq);
462  else
463  esp_replay_advance(sa0, seq);
464  }
465 
466  ih4 = (ip4_header_t *) (b0->data + sizeof(ethernet_header_t));
467  vlib_buffer_advance (b0, sizeof (esp_header_t) + iv_size);
468 
469  b0->current_length -= (icv_size + 2);
471  f0 = (esp_footer_t *) ((u8 *) vlib_buffer_get_current (b0) +
472  b0->current_length);
473  b0->current_length -= f0->pad_length;
474 
475  /* transport mode */
476  if (PREDICT_FALSE(!sa0->is_tunnel && !sa0->is_tunnel_ip6))
477  {
478  tunnel_mode = 0;
479 
480  if (PREDICT_TRUE((ih4->ip_version_and_header_length & 0xF0) != 0x40))
481  {
482  if (PREDICT_TRUE((ih4->ip_version_and_header_length & 0xF0) == 0x60))
483  transport_ip6 = 1;
484  else
485  {
486  clib_warning("next header: 0x%x", f0->next_header);
488  ESP_DECRYPT_ERROR_NOT_IP, 1);
489  goto trace;
490  }
491  }
492  }
493 
494  if (PREDICT_TRUE (tunnel_mode))
495  {
496  if (PREDICT_TRUE(f0->next_header == IP_PROTOCOL_IP_IN_IP))
497  next0 = ESP_DECRYPT_NEXT_IP4_INPUT;
498  else if (f0->next_header == IP_PROTOCOL_IPV6)
499  next0 = ESP_DECRYPT_NEXT_IP6_INPUT;
500  else
501  {
502  clib_warning("next header: 0x%x", f0->next_header);
504  ESP_DECRYPT_ERROR_DECRYPTION_FAILED,
505  1);
506  goto trace;
507  }
508  }
509  /* transport mode */
510  else
511  {
512  if (PREDICT_FALSE(transport_ip6))
513  {
514  ih6 = (ip6_header_t *) (b0->data + sizeof(ethernet_header_t));
515  vlib_buffer_advance (b0, -sizeof(ip6_header_t));
516  oh6 = vlib_buffer_get_current (b0);
517  memmove(oh6, ih6, sizeof(ip6_header_t));
518 
519  next0 = ESP_DECRYPT_NEXT_IP6_INPUT;
520  oh6->protocol = f0->next_header;
521  oh6->payload_length =
522  clib_host_to_net_u16 (
524  sizeof (ip6_header_t));
525  }
526  else
527  {
528  vlib_buffer_advance (b0, -sizeof(ip4_header_t));
529  oh4 = vlib_buffer_get_current (b0);
530  memmove(oh4, ih4, sizeof(ip4_header_t));
531 
532  next0 = ESP_DECRYPT_NEXT_IP4_INPUT;
533  oh4->ip_version_and_header_length = 0x45;
534  oh4->fragment_id = 0;
535  oh4->flags_and_fragment_offset = 0;
536  oh4->protocol = f0->next_header;
537  oh4->length = clib_host_to_net_u16 (
538  vlib_buffer_length_in_chain (vm, b0));
539  oh4->checksum = ip4_header_checksum (oh4);
540  }
541  }
542 
543  vnet_buffer (b0)->sw_if_index[VLIB_TX] = (u32)~0;
544 
545 trace:
547  {
548  esp_decrypt_trace_t *tr = vlib_add_trace (vm, node, b0, sizeof (*tr));
549  tr->crypto_alg = sa0->crypto_alg;
550  tr->integ_alg = sa0->integ_alg;
551  }
552 
553  vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
554  to_next, n_left_to_next, bi0, next0);
555  }
556  vlib_put_next_frame (vm, node, next_index, n_left_to_next);
557  }
559  ESP_DECRYPT_POST_ERROR_PKTS,
560  from_frame->n_vectors);
561 
562  return from_frame->n_vectors;
563 }
564 
566  .function = dpdk_esp_decrypt_post_node_fn,
567  .name = "dpdk-esp-decrypt-post",
568  .vector_size = sizeof (u32),
569  .format_trace = format_esp_decrypt_post_trace,
571 
573  .error_strings = esp_decrypt_post_error_strings,
574 
575  .n_next_nodes = ESP_DECRYPT_N_NEXT,
576  .next_nodes = {
577 #define _(s,n) [ESP_DECRYPT_NEXT_##s] = n,
579 #undef _
580  },
581 };
582 
u32 bi[VLIB_FRAME_SIZE]
Definition: ipsec.h:63
void vlib_put_next_frame(vlib_main_t *vm, vlib_node_runtime_t *r, u32 next_index, u32 n_vectors_left)
Release pointer to next frame vector data.
Definition: main.c:459
#define vec_foreach_index(var, v)
Iterate over vector indices.
vlib_node_registration_t dpdk_esp_decrypt_post_node
(constructor) VLIB_REGISTER_NODE (dpdk_esp_decrypt_post_node)
Definition: esp_decrypt.c:398
sll srl srl sll sra u16x4 i
Definition: vector_sse2.h:343
#define CLIB_UNUSED(x)
Definition: clib.h:79
#define rte_mbuf_from_vlib_buffer(x)
Definition: buffer.h:389
static vlib_cli_command_t trace
(constructor) VLIB_CLI_COMMAND (trace)
Definition: memory_vlib.c:1115
bad routing header type(not 4)") sr_error (NO_MORE_SEGMENTS
u8 aad[12]
Definition: ipsec.h:45
#define PREDICT_TRUE(x)
Definition: clib.h:98
static int esp_replay_check_esn(ipsec_sa_t *sa, u32 seq)
Definition: esp.h:105
ipsec_integ_alg_t integ_alg
Definition: ipsec.h:102
ipsec_crypto_alg_t crypto_alg
Definition: esp_decrypt.c:65
u8 is_tunnel
Definition: ipsec.h:109
struct _vlib_node_registration vlib_node_registration_t
static uword vlib_buffer_length_in_chain(vlib_main_t *vm, vlib_buffer_t *b)
Get length in bytes of the buffer chain.
Definition: buffer_funcs.h:100
#define vec_pop(V)
Returns last element of a vector and decrements its length.
Definition: vec.h:576
u32 spi
Definition: ipsec.h:95
static_always_inline void crypto_free_cop(crypto_qp_data_t *qpd, struct rte_crypto_op **cops, u32 n)
Definition: ipsec.h:126
u32 seq_hi
Definition: ipsec.h:118
#define vec_add(V, E, N)
Add N elements to end of vector V (no header, unspecified alignment)
Definition: vec.h:559
static u8 * format_esp_decrypt_trace(u8 *s, va_list *args)
Definition: esp_decrypt.c:70
i16 current_data
signed offset in data[], pre_data[] that we are currently processing.
Definition: buffer.h:78
#define foreach_esp_decrypt_error
Definition: esp_decrypt.c:38
static int esp_replay_check(ipsec_sa_t *sa, u32 seq)
Definition: esp.h:87
static void * vlib_buffer_get_current(vlib_buffer_t *b)
Get pointer to current data to process.
Definition: buffer.h:194
u8 use_esn
Definition: ipsec.h:106
#define vec_elt_at_index(v, i)
Get vector value at index i checking that i is in bounds.
#define clib_warning(format, args...)
Definition: error.h:59
dpdk_esp_integ_alg_t * esp_integ_algs
Definition: esp.h:38
i16 inflights
Definition: ipsec.h:62
ipsec_integ_alg_t
Definition: ipsec.h:78
static_always_inline int create_sym_sess(ipsec_sa_t *sa, crypto_sa_session_t *sa_sess, u8 is_outbound)
Definition: esp.h:218
dpdk_esp_crypto_alg_t * esp_crypto_algs
Definition: esp.h:37
u32 iv[2]
Definition: ipsec.h:36
dpdk_crypto_main_t dpdk_crypto_main
Definition: ipsec.h:87
#define pool_elt_at_index(p, i)
Returns pointer to element at given index.
Definition: pool.h:369
u16 current_length
Nbytes between current data and the end of this buffer.
Definition: buffer.h:82
u8 is_tunnel_ip6
Definition: ipsec.h:110
esp_decrypt_error_t
Definition: esp_decrypt.c:49
static_always_inline void crypto_alloc_cops()
Definition: ipsec.h:94
u32 salt
Definition: ipsec.h:114
uword os_get_cpu_number(void)
Definition: unix-misc.c:224
static u8 * format_esp_decrypt_post_trace(u8 *s, va_list *args)
Definition: esp_decrypt.c:400
#define PREDICT_FALSE(x)
Definition: clib.h:97
struct rte_crypto_op * cops[VLIB_FRAME_SIZE]
Definition: ipsec.h:64
#define vlib_validate_buffer_enqueue_x1(vm, node, next_index, to_next, n_left_to_next, bi0, next0)
Finish enqueueing one buffer forward in the graph.
Definition: buffer_node.h:216
#define vlib_get_next_frame(vm, node, next_index, vectors, n_vectors_left)
Get pointer to next frame vector data by (vlib_node_runtime_t, next_index).
Definition: node_funcs.h:350
ipsec_crypto_alg_t
Definition: ipsec.h:49
static void vlib_node_increment_counter(vlib_main_t *vm, u32 node_index, u32 counter_index, u64 increment)
Definition: node_funcs.h:1113
#define foreach_esp_decrypt_post_error
Definition: esp_decrypt.c:382
ipsec_main_t ipsec_main
Definition: ipsec.h:260
dpdk_gcm_cnt_blk cb
Definition: ipsec.h:42
u16 n_vectors
Definition: node.h:344
dpdk_esp_main_t dpdk_esp_main
Definition: esp.h:41
esp_decrypt_next_t
Definition: esp_decrypt.c:32
#define clib_memcpy(a, b, c)
Definition: string.h:69
static void vlib_buffer_advance(vlib_buffer_t *b, word l)
Advance current data pointer by the supplied (signed!) amount.
Definition: buffer.h:207
#define VLIB_BUFFER_TOTAL_LENGTH_VALID
Definition: buffer.h:99
#define ARRAY_LEN(x)
Definition: clib.h:59
u8 icv[64]
Definition: ipsec.h:46
u16 cached_next_index
Definition: node.h:463
#define ASSERT(truth)
unsigned int u32
Definition: types.h:88
u8 * format_ipsec_crypto_alg(u8 *s, va_list *args)
Definition: ipsec_format.c:58
#define vnet_buffer(b)
Definition: buffer.h:361
ipsec_sa_t * sad
Definition: ipsec.h:230
void vlib_buffer_free(vlib_main_t *vm, u32 *buffers, u32 n_buffers)
Free buffers Frees the entire buffer chain for each buffer.
crypto_worker_main_t * workers_main
Definition: ipsec.h:84
static uword dpdk_esp_decrypt_post_node_fn(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *from_frame)
Definition: esp_decrypt.c:406
u32 seq
Definition: esp.h:25
#define VLIB_BUFFER_IS_TRACED
Definition: buffer.h:95
crypto_qp_data_t * qp_data
Definition: ipsec.h:77
crypto_sa_session_t * sa_sess_d[2]
Definition: ipsec.h:76
static void esp_replay_advance(ipsec_sa_t *sa, u32 seq)
Definition: esp.h:149
u64 uword
Definition: types.h:112
static void * vlib_add_trace(vlib_main_t *vm, vlib_node_runtime_t *r, vlib_buffer_t *b, u32 n_data_bytes)
Definition: trace_funcs.h:55
#define vec_elt(v, i)
Get vector value at index i.
Definition: defs.h:47
u8 * format_ipsec_integ_alg(u8 *s, va_list *args)
Definition: ipsec_format.c:90
vlib_node_registration_t dpdk_esp_decrypt_node
(constructor) VLIB_REGISTER_NODE (dpdk_esp_decrypt_node)
Definition: esp_decrypt.c:62
unsigned short u16
Definition: types.h:57
#define vec_len(v)
Number of elements in vector (rvalue-only, NULL tolerant)
unsigned char u8
Definition: types.h:56
static void * vlib_frame_vector_args(vlib_frame_t *f)
Get pointer to frame vector data.
Definition: node_funcs.h:253
esp_decrypt_post_error_t
Definition: esp_decrypt.c:385
static void esp_replay_advance_esn(ipsec_sa_t *sa, u32 seq)
Definition: esp.h:170
#define VLIB_NODE_FUNCTION_MULTIARCH(node, fn)
Definition: node.h:158
ipsec_crypto_alg_t crypto_alg
Definition: ipsec.h:98
#define VLIB_REGISTER_NODE(x,...)
Definition: node.h:143
u8 * format(u8 *s, const char *fmt,...)
Definition: format.c:418
#define foreach_esp_decrypt_next
Definition: esp_decrypt.c:26
u8 data[0]
Packet data.
Definition: buffer.h:158
static char * esp_decrypt_error_strings[]
Definition: esp_decrypt.c:56
u8 ip_version_and_header_length
Definition: ip4_packet.h:131
u32 flags
buffer flags: VLIB_BUFFER_IS_TRACED: trace this buffer.
Definition: buffer.h:85
u8 use_anti_replay
Definition: ipsec.h:107
static vlib_buffer_t * vlib_get_buffer(vlib_main_t *vm, u32 buffer_index)
Translate buffer index into buffer pointer.
Definition: buffer_funcs.h:57
static u16 ip4_header_checksum(ip4_header_t *i)
Definition: ip4_packet.h:238
ipsec_integ_alg_t integ_alg
Definition: esp_decrypt.c:66
static uword dpdk_esp_decrypt_node_fn(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *from_frame)
Definition: esp_decrypt.c:83
static char * esp_decrypt_post_error_strings[]
Definition: esp_decrypt.c:392