23 #define foreach_gtpu_encap_error \ 24 _(ENCAPSULATED, "good packets encapsulated") 27 #define _(sym,string) string, 33 #define _(sym,str) GTPU_ENCAP_ERROR_##sym, 39 #define foreach_gtpu_encap_next \ 40 _(DROP, "error-drop") \ 41 _(IP4_LOOKUP, "ip4-lookup") \ 42 _(IP6_LOOKUP, "ip6-lookup") 63 s =
format (s,
"GTPU encap to gtpu_tunnel%d teid %d",
69 #define foreach_fixed_header4_offset \ 72 #define foreach_fixed_header6_offset \ 73 _(0) _(1) _(2) _(3) _(4) _(5) _(6) 81 u32 n_left_from, next_index, * from, * to_next;
85 u32 pkts_encapsulated = 0;
86 u16 old_l0 = 0, old_l1 = 0, old_l2 = 0, old_l3 = 0;
88 u32 stats_sw_if_index, stats_n_packets, stats_n_bytes;
89 u32 sw_if_index0 = 0, sw_if_index1 = 0, sw_if_index2 = 0, sw_if_index3 = 0;
90 u32 next0 = 0, next1 = 0, next2 = 0, next3 = 0;
99 stats_n_packets = stats_n_bytes = 0;
101 while (n_left_from > 0)
106 to_next, n_left_to_next);
108 while (n_left_from >= 8 && n_left_to_next >= 4)
110 u32 bi0, bi1, bi2, bi3;
112 u32 flow_hash0, flow_hash1, flow_hash2, flow_hash3;
113 u32 len0, len1, len2, len3;
118 u64 * copy_src0, * copy_dst0;
119 u64 * copy_src1, * copy_dst1;
120 u64 * copy_src2, * copy_dst2;
121 u64 * copy_src3, * copy_dst3;
122 u32 * copy_src_last0, * copy_dst_last0;
123 u32 * copy_src_last1, * copy_dst_last1;
124 u32 * copy_src_last2, * copy_dst_last2;
125 u32 * copy_src_last3, * copy_dst_last3;
126 u16 new_l0, new_l1, new_l2, new_l3;
189 next1 = t1->next_dpo.dpoi_next_node;
191 next2 = t2->next_dpo.dpoi_next_node;
193 next3 = t3->next_dpo.dpoi_next_node;
210 copy_dst0 = (
u64 *) ip4_0;
212 copy_dst1 = (
u64 *) ip4_1;
213 copy_src1 = (
u64 *) t1->rewrite;
214 copy_dst2 = (
u64 *) ip4_2;
215 copy_src2 = (
u64 *) t2->rewrite;
216 copy_dst3 = (
u64 *) ip4_3;
217 copy_src3 = (
u64 *) t3->rewrite;
220 #define _(offs) copy_dst0[offs] = copy_src0[offs];
223 #define _(offs) copy_dst1[offs] = copy_src1[offs]; 226 #define _(offs) copy_dst2[offs] = copy_src2[offs]; 229 #define _(offs) copy_dst3[offs] = copy_src3[offs]; 233 copy_dst_last0 = (
u32 *)(©_dst0[4]);
234 copy_src_last0 = (
u32 *)(©_src0[4]);
235 copy_dst_last0[0] = copy_src_last0[0];
236 copy_dst_last1 = (
u32 *)(©_dst1[4]);
237 copy_src_last1 = (
u32 *)(©_src1[4]);
238 copy_dst_last1[0] = copy_src_last1[0];
239 copy_dst_last2 = (
u32 *)(©_dst2[4]);
240 copy_src_last2 = (
u32 *)(©_src2[4]);
241 copy_dst_last2[0] = copy_src_last2[0];
242 copy_dst_last3 = (
u32 *)(©_dst3[4]);
243 copy_src_last3 = (
u32 *)(©_src3[4]);
244 copy_dst_last3[0] = copy_src_last3[0];
301 -
sizeof (*ip4_0) -
sizeof(*udp0)
306 -
sizeof (*ip4_1) -
sizeof(*udp1)
311 -
sizeof (*ip4_2) -
sizeof(*udp2)
316 -
sizeof (*ip4_3) -
sizeof(*udp3)
330 copy_dst0 = (
u64 *) ip6_0;
332 copy_dst1 = (
u64 *) ip6_1;
333 copy_src1 = (
u64 *) t1->rewrite;
334 copy_dst2 = (
u64 *) ip6_2;
335 copy_src2 = (
u64 *) t2->rewrite;
336 copy_dst3 = (
u64 *) ip6_3;
337 copy_src3 = (
u64 *) t3->rewrite;
339 #define _(offs) copy_dst0[offs] = copy_src0[offs];
342 #define _(offs) copy_dst1[offs] = copy_src1[offs]; 345 #define _(offs) copy_dst2[offs] = copy_src2[offs]; 348 #define _(offs) copy_dst3[offs] = copy_src3[offs]; 404 -
sizeof (*ip6_0) -
sizeof(*udp0)
409 -
sizeof (*ip6_1) -
sizeof(*udp1)
414 -
sizeof (*ip6_2) -
sizeof(*udp2)
419 -
sizeof (*ip6_3) -
sizeof(*udp3)
424 pkts_encapsulated += 4;
429 stats_n_packets += 4;
430 stats_n_bytes += len0 + len1 + len2 + len3;
437 (sw_if_index1 != stats_sw_if_index) ||
438 (sw_if_index2 != stats_sw_if_index) ||
439 (sw_if_index3 != stats_sw_if_index) ))
441 stats_n_packets -= 4;
442 stats_n_bytes -= len0 + len1 + len2 + len3;
443 if ( (sw_if_index0 == sw_if_index1 ) &&
444 (sw_if_index1 == sw_if_index2 ) &&
445 (sw_if_index2 == sw_if_index3 ) )
450 thread_index, stats_sw_if_index,
451 stats_n_packets, stats_n_bytes);
452 stats_sw_if_index = sw_if_index0;
454 stats_n_bytes = len0 + len1 + len2 + len3;
460 thread_index, sw_if_index0, 1, len0);
463 thread_index, sw_if_index1, 1, len1);
466 thread_index, sw_if_index2, 1, len2);
469 thread_index, sw_if_index3, 1, len3);
490 to_next, n_left_to_next,
492 next0, next1, next2, next3);
495 while (n_left_from > 0 && n_left_to_next > 0)
505 u64 * copy_src0, * copy_dst0;
506 u32 * copy_src_last0, * copy_dst_last0;
537 copy_dst0 = (
u64 *) ip4_0;
540 #define _(offs) copy_dst0[offs] = copy_src0[offs];
544 copy_dst_last0 = (
u32 *)(©_dst0[4]);
545 copy_src_last0 = (
u32 *)(©_src0[4]);
546 copy_dst_last0[0] = copy_src_last0[0];
567 -
sizeof (*ip4_0) -
sizeof(*udp0)
578 copy_dst0 = (
u64 *) ip6_0;
581 #define _(offs) copy_dst0[offs] = copy_src0[offs];
604 -
sizeof (*ip4_0) -
sizeof(*udp0)
609 pkts_encapsulated ++;
611 stats_n_packets += 1;
612 stats_n_bytes += len0;
620 stats_n_packets -= 1;
621 stats_n_bytes -= len0;
625 thread_index, stats_sw_if_index,
626 stats_n_packets, stats_n_bytes);
628 stats_n_bytes = len0;
629 stats_sw_if_index = sw_if_index0;
640 to_next, n_left_to_next,
649 GTPU_ENCAP_ERROR_ENCAPSULATED,
657 thread_index, stats_sw_if_index, stats_n_packets, stats_n_bytes);
682 .name =
"gtpu4-encap",
683 .vector_size =
sizeof (
u32),
690 #define _(s,n) [GTPU_ENCAP_NEXT_##s] = n, 700 .name =
"gtpu6-encap",
701 .vector_size =
sizeof (
u32),
708 #define _(s,n) [GTPU_ENCAP_NEXT_##s] = n, static uword gtpu_encap_inline(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *from_frame, u32 is_ip4)
#define foreach_gtpu_encap_next
static void vlib_increment_combined_counter(vlib_combined_counter_main_t *cm, u32 thread_index, u32 index, u64 n_packets, u64 n_bytes)
Increment a combined counter.
u8 runtime_data[0]
Function dependent node-runtime data.
static vnet_hw_interface_t * vnet_get_sup_hw_interface(vnet_main_t *vnm, u32 sw_if_index)
vnet_interface_main_t interface_main
#define vlib_validate_buffer_enqueue_x4(vm, node, next_index, to_next, n_left_to_next, bi0, bi1, bi2, bi3, next0, next1, next2, next3)
Finish enqueueing four buffers forward in the graph.
static uword gtpu4_encap(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *from_frame)
static uword vlib_buffer_length_in_chain(vlib_main_t *vm, vlib_buffer_t *b)
Get length in bytes of the buffer chain.
#define foreach_fixed_header6_offset
vlib_combined_counter_main_t * combined_sw_if_counters
#define vlib_prefetch_buffer_header(b, type)
Prefetch buffer metadata.
u8 * format_gtpu_encap_trace(u8 *s, va_list *args)
#define VLIB_NODE_FUNCTION_MULTIARCH(node, fn)
static void * vlib_buffer_get_current(vlib_buffer_t *b)
Get pointer to current data to process.
u32 node_index
Node index.
#define vlib_validate_buffer_enqueue_x1(vm, node, next_index, to_next, n_left_to_next, bi0, next0)
Finish enqueueing one buffer forward in the graph.
#define vlib_get_next_frame(vm, node, next_index, vectors, n_vectors_left)
Get pointer to next frame vector data by (vlib_node_runtime_t, next_index).
static void vlib_node_increment_counter(vlib_main_t *vm, u32 node_index, u32 counter_index, u64 increment)
#define VLIB_REGISTER_NODE(x,...)
static_always_inline uword vlib_get_thread_index(void)
#define CLIB_PREFETCH(addr, size, type)
void vlib_put_next_frame(vlib_main_t *vm, vlib_node_runtime_t *r, u32 next_index, u32 n_vectors_left)
Release pointer to next frame vector data.
u16 ip6_tcp_udp_icmp_compute_checksum(vlib_main_t *vm, vlib_buffer_t *p0, ip6_header_t *ip0, int *bogus_lengthp)
u16 cached_next_index
Next frame index that vector arguments were last enqueued to last time this node ran.
static void vlib_buffer_advance(vlib_buffer_t *b, word l)
Advance current data pointer by the supplied (signed!) amount.
static void * vlib_add_trace(vlib_main_t *vm, vlib_node_runtime_t *r, vlib_buffer_t *b, u32 n_data_bytes)
#define foreach_fixed_header4_offset
index_t dpoi_index
the index of objects of that type
static void * vlib_frame_vector_args(vlib_frame_t *f)
Get pointer to frame vector data.
#define ip_csum_update(sum, old, new, type, field)
vlib_node_registration_t gtpu4_encap_node
(constructor) VLIB_REGISTER_NODE (gtpu4_encap_node)
static uword gtpu6_encap(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *from_frame)
u16 dpoi_next_node
The next VLIB node to follow.
#define CLIB_CACHE_LINE_BYTES
static char * gtpu_encap_error_strings[]
u32 flags
buffer flags: VLIB_BUFFER_FREE_LIST_INDEX_MASK: bits used to store free list index, VLIB_BUFFER_IS_TRACED: trace this buffer.
static vlib_buffer_t * vlib_get_buffer(vlib_main_t *vm, u32 buffer_index)
Translate buffer index into buffer pointer.
static u16 ip_csum_fold(ip_csum_t c)
vlib_node_registration_t gtpu6_encap_node
(constructor) VLIB_REGISTER_NODE (gtpu6_encap_node)
#define foreach_gtpu_encap_error