26 #include <dpdk/ipsec/ipsec.h> 31 #define foreach_esp_encrypt_next \ 32 _(DROP, "error-drop") \ 33 _(IP4_LOOKUP, "ip4-lookup") \ 34 _(IP6_LOOKUP, "ip6-lookup") \ 35 _(INTERFACE_OUTPUT, "interface-output") 37 #define _(v, s) ESP_ENCRYPT_NEXT_##v, 45 #define foreach_esp_encrypt_error \ 46 _(RX_PKTS, "ESP pkts received") \ 47 _(SEQ_CYCLED, "Sequence number cycled") \ 48 _(ENQ_FAIL, "Enqueue encrypt failed (queue full)") \ 49 _(DISCARD, "Not enough crypto operations") \ 50 _(SESSION, "Failed to get crypto session") \ 51 _(NOSUP, "Cipher/Auth not supported") 56 #define _(sym,str) ESP_ENCRYPT_ERROR_##sym, 63 #define _(sym,string) string, 75 ipsec_crypto_alg_t crypto_alg;
90 s =
format (s,
"cipher %U auth %U\n",
126 struct rte_cryptodev_sym_session *session = 0;
127 u32 ret, last_sa_index = ~0;
128 u8 numa = rte_socket_id ();
132 struct rte_crypto_op **ops = cwm->
ops;
143 ESP_ENCRYPT_ERROR_DISCARD, n_left_from);
146 ESP_ENCRYPT_ERROR_DISCARD, n_left_from);
152 next_index = ESP_ENCRYPT_NEXT_DROP;
154 while (n_left_from > 0)
160 while (n_left_from > 0 && n_left_to_next > 0)
166 ip4_and_esp_header_t *ih0, *oh0 = 0;
167 ip6_and_esp_header_t *ih6_0, *oh6_0 = 0;
168 ip4_and_udp_and_esp_header_t *ouh0 = 0;
176 u16 udp_encap_adv = 0;
177 struct rte_mbuf *mb0;
178 struct rte_crypto_op *op;
208 ASSERT (op->status == RTE_CRYPTO_OP_STATUS_NOT_PROCESSED);
216 sizeof (op[0]) +
sizeof (op[0].sym[0]) +
sizeof (priv[0]);
229 if (sa_index0 != last_sa_index)
237 is_aead = (cipher_alg->
type == RTE_CRYPTO_SYM_XFORM_AEAD);
240 auth_alg = cipher_alg;
249 ESP_ENCRYPT_ERROR_NOSUP, 1);
253 ESP_ENCRYPT_ERROR_NOSUP, 1);
267 ESP_ENCRYPT_ERROR_SESSION,
272 ESP_ENCRYPT_ERROR_SESSION,
280 last_sa_index = sa_index0;
288 ESP_ENCRYPT_ERROR_SEQ_CYCLED, 1);
292 ESP_ENCRYPT_ERROR_SEQ_CYCLED, 1);
322 iv_size = cipher_alg->
iv_len;
323 trunc_size = auth_alg->trunc_size;
326 if (ipsec_sa_is_set_UDP_ENCAP (sa0) && !is_ip6)
329 if (ipsec_sa_is_set_IS_TUNNEL (sa0))
332 if (!ipsec_sa_is_set_IS_TUNNEL_V6 (sa0))
335 priv->
next = DPDK_CRYPTO_INPUT_NEXT_IP4_LOOKUP;
341 next_hdr_type = (is_ip6 ?
342 IP_PROTOCOL_IPV6 : IP_PROTOCOL_IP_IN_IP);
349 oh0->ip4.checksum_data_64[0] =
350 clib_host_to_net_u64 (0x45ULL << 56);
355 oh0->ip4.checksum_data_32[2] =
356 clib_host_to_net_u32 (0xfe320000);
358 oh0->ip4.src_address.as_u32 =
360 oh0->ip4.dst_address.as_u32 =
363 if (ipsec_sa_is_set_UDP_ENCAP (sa0))
365 oh0->ip4.protocol = IP_PROTOCOL_UDP;
370 esp0->
spi = clib_host_to_net_u32 (sa0->
spi);
371 esp0->
seq = clib_host_to_net_u32 (sa0->
seq);
377 priv->
next = DPDK_CRYPTO_INPUT_NEXT_IP6_LOOKUP;
382 ih6_0 = (ip6_and_esp_header_t *) ih0;
385 next_hdr_type = (is_ip6 ?
386 IP_PROTOCOL_IPV6 : IP_PROTOCOL_IP_IN_IP);
388 oh6_0->ip6.ip_version_traffic_class_and_flow_label =
389 ih6_0->ip6.ip_version_traffic_class_and_flow_label;
391 oh6_0->ip6.protocol = IP_PROTOCOL_IPSEC_ESP;
392 oh6_0->ip6.hop_limit = 254;
393 oh6_0->ip6.src_address.as_u64[0] =
395 oh6_0->ip6.src_address.as_u64[1] =
397 oh6_0->ip6.dst_address.as_u64[0] =
399 oh6_0->ip6.dst_address.as_u64[1] =
402 oh6_0->esp.
spi = clib_host_to_net_u32 (sa0->
spi);
403 oh6_0->esp.seq = clib_host_to_net_u32 (sa0->
seq);
413 priv->
next = DPDK_CRYPTO_INPUT_NEXT_MIDCHAIN;
417 priv->
next = DPDK_CRYPTO_INPUT_NEXT_INTERFACE_OUTPUT;
418 rewrite_len =
vnet_buffer (b0)->ip.save_rewrite_length;
422 u8 *
src = ((
u8 *) ih0) - rewrite_len;
430 ih6_0 = (ip6_and_esp_header_t *) ih0;
431 next_hdr_type = ih6_0->ip6.protocol;
432 memmove (dst, src, rewrite_len +
sizeof (
ip6_header_t));
433 oh6_0 = (ip6_and_esp_header_t *) oh0;
434 oh6_0->ip6.protocol = IP_PROTOCOL_IPSEC_ESP;
441 next_hdr_type = ih0->ip4.protocol;
442 memmove (dst, src, rewrite_len + ip_size);
443 oh0->ip4.protocol = IP_PROTOCOL_IPSEC_ESP;
445 if (ipsec_sa_is_set_UDP_ENCAP (sa0))
447 oh0->ip4.protocol = IP_PROTOCOL_UDP;
449 (((
u8 *) oh0) + ip_size + udp_encap_adv);
453 oh0->ip4.protocol = IP_PROTOCOL_IPSEC_ESP;
457 esp0->
spi = clib_host_to_net_u32 (sa0->
spi);
458 esp0->
seq = clib_host_to_net_u32 (sa0->
seq);
461 if (ipsec_sa_is_set_UDP_ENCAP (sa0) && ouh0)
463 ouh0->udp.src_port = clib_host_to_net_u16 (UDP_DST_PORT_ipsec);
464 ouh0->udp.dst_port = clib_host_to_net_u16 (UDP_DST_PORT_ipsec);
465 ouh0->udp.checksum = 0;
469 u16 pad_payload_len = ((orig_sz + 2) + mask) & ~mask;
470 u8 pad_bytes = pad_payload_len - 2 - orig_sz;
486 oh6_0->ip6.payload_length =
487 clib_host_to_net_u16 (len - rewrite_len);
494 if (ipsec_sa_is_set_UDP_ENCAP (sa0) && ouh0)
497 clib_host_to_net_u16 (clib_net_to_host_u16
505 b0->
flags |= VLIB_BUFFER_TOTAL_LENGTH_VALID;
510 mb0->data_off = ((
void *) esp0) - mb0->buf_addr;
512 u32 cipher_off, cipher_len, auth_len = 0;
516 u64 digest_paddr = mb0->buf_iova + digest - ((
u8 *) mb0->buf_addr);
518 if (!is_aead && (cipher_alg->
alg == RTE_CRYPTO_CIPHER_AES_CBC ||
519 cipher_alg->
alg == RTE_CRYPTO_CIPHER_NULL))
522 cipher_len = iv_size + pad_payload_len;
526 u32 *esp_iv = (
u32 *) (esp0 + 1);
527 esp_iv[0] = sa0->
seq;
531 cipher_len = pad_payload_len;
542 aad[1] = clib_host_to_net_u32 (sa0->
seq_hi);
555 if (ipsec_sa_is_set_USE_ESN (sa0))
557 u32 *_digest = (
u32 *) digest;
558 _digest[0] = clib_host_to_net_u32 (sa0->
seq_hi);
564 0, auth_len, (
u8 *) aad, digest, digest_paddr);
574 if (!ipsec_sa_is_set_IS_TUNNEL (sa0) && !is_tun)
587 ESP_ENCRYPT_ERROR_RX_PKTS,
591 ESP_ENCRYPT_ERROR_ENQ_FAIL, numa, 1 );
599 ESP_ENCRYPT_ERROR_RX_PKTS,
603 ESP_ENCRYPT_ERROR_ENQ_FAIL, numa, 1 );
620 .name =
"dpdk-esp4-encrypt",
622 .vector_size =
sizeof (
u32),
629 [ESP_ENCRYPT_NEXT_DROP] =
"error-drop",
643 .name =
"dpdk-esp6-encrypt",
645 .vector_size =
sizeof (
u32),
652 [ESP_ENCRYPT_NEXT_DROP] =
"error-drop",
666 .name =
"dpdk-esp4-encrypt-tun",
668 .vector_size =
sizeof (
u32),
675 [ESP_ENCRYPT_NEXT_DROP] =
"error-drop",
689 .name =
"dpdk-esp6-encrypt-tun",
691 .vector_size =
sizeof (
u32),
698 [ESP_ENCRYPT_NEXT_DROP] =
"error-drop",
u32 flags
buffer flags: VLIB_BUFFER_FREE_LIST_INDEX_MASK: bits used to store free list index, VLIB_BUFFER_IS_TRACED: trace this buffer.
static vlib_cli_command_t trace
(constructor) VLIB_CLI_COMMAND (trace)
static u8 * vlib_buffer_get_tail(vlib_buffer_t *b)
Get pointer to the end of buffer's data.
static void vlib_buffer_free(vlib_main_t *vm, u32 *buffers, u32 n_buffers)
Free buffers Frees the entire buffer chain for each buffer.
vlib_main_t vlib_node_runtime_t vlib_frame_t * from_frame
vlib_increment_combined_counter(ccm, ti, sw_if_index, n_buffers, n_bytes)
ipsec_integ_alg_t integ_alg
u16 current_length
Nbytes between current data and the end of this buffer.
ipsec_integ_alg_t integ_alg
ipsec_crypto_alg_t crypto_alg
#define VLIB_NODE_FN(node)
static int esp_seq_advance(ipsec_sa_t *sa)
if(node->flags &VLIB_NODE_FLAG_TRACE) vnet_interface_output_trace(vm
description fragment has unexpected format
#define vec_elt_at_index(v, i)
Get vector value at index i checking that i is in bounds.
vnet_main_t * vnet_get_main(void)
vl_api_interface_index_t sw_if_index
static index_t ipsec_tun_protect_get_sa_out(adj_index_t ai)
crypto_alg_t * cipher_algs
#define rte_mbuf_from_vlib_buffer(x)
static void * vlib_buffer_get_current(vlib_buffer_t *b)
Get pointer to current data to process.
#define vlib_get_next_frame(vm, node, next_index, vectors, n_vectors_left)
Get pointer to next frame vector data by (vlib_node_runtime_t, next_index).
static void vlib_node_increment_counter(vlib_main_t *vm, u32 node_index, u32 counter_index, u64 increment)
#define VLIB_REGISTER_NODE(x,...)
static_always_inline uword vlib_get_thread_index(void)
#define CLIB_PREFETCH(addr, size, type)
#define clib_warning(format, args...)
vnet_interface_main_t * im
u8 * format_esp_header(u8 *s, va_list *args)
vlib_combined_counter_main_t ipsec_sa_counters
SA packet & bytes counters.
crypto_worker_main_t * workers_main
vlib_put_next_frame(vm, node, next_index, 0)
static void vlib_buffer_advance(vlib_buffer_t *b, word l)
Advance current data pointer by the supplied (signed!) amount.
static_always_inline void * clib_memcpy_fast(void *restrict dst, const void *restrict src, size_t n)
nat44_ei_hairpin_src_next_t next_index
crypto_resource_t * resource
static uword is_pow2(uword x)
struct _vlib_node_registration vlib_node_registration_t
template key/value backing page structure
static ipsec_sa_t * ipsec_sa_get(u32 sa_index)
VLIB buffer representation.
static void * vlib_frame_vector_args(vlib_frame_t *f)
Get pointer to frame vector data.
struct rte_crypto_op ** ops
struct clib_bihash_value offset
template key/value backing page structure
enum rte_crypto_sym_xform_type type
ipsec_crypto_alg_t crypto_alg
#define VLIB_NODE_FLAG_IS_OUTPUT
void * vlib_add_trace(vlib_main_t *vm, vlib_node_runtime_t *r, vlib_buffer_t *b, u32 n_data_bytes)
static void * vlib_buffer_put_uninit(vlib_buffer_t *b, u16 size)
Append uninitialized data to buffer.
static int ip4_header_bytes(const ip4_header_t *i)
#define CLIB_CACHE_LINE_BYTES
struct rte_crypto_op * ops[VLIB_FRAME_SIZE]
static vlib_buffer_t * vlib_get_buffer(vlib_main_t *vm, u32 buffer_index)
Translate buffer index into buffer pointer.
static u16 ip4_header_checksum(ip4_header_t *i)
vlib_main_t vlib_node_runtime_t * node