FD.io VPP  v20.09-64-g4f7b92f0a
Vector Packet Processing
gtpu_decap.c
Go to the documentation of this file.
1 /*
2  * decap.c: gtpu tunnel decap packet processing
3  *
4  * Copyright (c) 2017 Intel and/or its affiliates.
5  * Licensed under the Apache License, Version 2.0 (the "License");
6  * you may not use this file except in compliance with the License.
7  * You may obtain a copy of the License at:
8  *
9  * http://www.apache.org/licenses/LICENSE-2.0
10  *
11  * Unless required by applicable law or agreed to in writing, software
12  * distributed under the License is distributed on an "AS IS" BASIS,
13  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14  * See the License for the specific language governing permissions and
15  * limitations under the License.
16  */
17 
18 #include <vlib/vlib.h>
19 #include <vnet/pg/pg.h>
20 #include <gtpu/gtpu.h>
21 
24 
25 typedef struct {
31 
32 static u8 * format_gtpu_rx_trace (u8 * s, va_list * args)
33 {
34  CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
35  CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
36  gtpu_rx_trace_t * t = va_arg (*args, gtpu_rx_trace_t *);
37 
38  if (t->tunnel_index != ~0)
39  {
40  s = format (s, "GTPU decap from gtpu_tunnel%d teid %d next %d error %d",
41  t->tunnel_index, t->teid, t->next_index, t->error);
42  }
43  else
44  {
45  s = format (s, "GTPU decap error - tunnel for teid %d does not exist",
46  t->teid);
47  }
48  return s;
49 }
50 
53 {
54  return t->encap_fib_index == vlib_buffer_get_ip_fib_index (b, is_ip4);
55 }
56 
60  vlib_frame_t * from_frame,
61  u32 is_ip4)
62 {
63  u32 n_left_from, next_index, * from, * to_next;
64  gtpu_main_t * gtm = &gtpu_main;
65  vnet_main_t * vnm = gtm->vnet_main;
67  u32 last_tunnel_index = ~0;
68  gtpu4_tunnel_key_t last_key4;
69  gtpu6_tunnel_key_t last_key6;
70  u32 pkts_decapsulated = 0;
71  u32 thread_index = vlib_get_thread_index();
72  u32 stats_sw_if_index, stats_n_packets, stats_n_bytes;
73 
74  if (is_ip4)
75  last_key4.as_u64 = ~0;
76  else
77  clib_memset (&last_key6, 0xff, sizeof (last_key6));
78 
79  from = vlib_frame_vector_args (from_frame);
80  n_left_from = from_frame->n_vectors;
81 
82  next_index = node->cached_next_index;
83  stats_sw_if_index = node->runtime_data[0];
84  stats_n_packets = stats_n_bytes = 0;
85 
86  while (n_left_from > 0)
87  {
88  u32 n_left_to_next;
89 
90  vlib_get_next_frame (vm, node, next_index,
91  to_next, n_left_to_next);
92  while (n_left_from >= 4 && n_left_to_next >= 2)
93  {
94  u32 bi0, bi1;
95  vlib_buffer_t * b0, * b1;
96  u32 next0, next1;
97  ip4_header_t * ip4_0, * ip4_1;
98  ip6_header_t * ip6_0, * ip6_1;
99  gtpu_header_t * gtpu0, * gtpu1;
100  u32 gtpu_hdr_len0, gtpu_hdr_len1;
101  uword * p0, * p1;
102  u32 tunnel_index0, tunnel_index1;
103  gtpu_tunnel_t * t0, * t1, * mt0 = NULL, * mt1 = NULL;
104  gtpu4_tunnel_key_t key4_0, key4_1;
105  gtpu6_tunnel_key_t key6_0, key6_1;
106  u32 error0, error1;
107  u32 sw_if_index0, sw_if_index1, len0, len1;
108  u8 has_space0, has_space1;
109  u8 ver0, ver1;
110 
111  /* Prefetch next iteration. */
112  {
113  vlib_buffer_t * p2, * p3;
114 
115  p2 = vlib_get_buffer (vm, from[2]);
116  p3 = vlib_get_buffer (vm, from[3]);
117 
118  vlib_prefetch_buffer_header (p2, LOAD);
119  vlib_prefetch_buffer_header (p3, LOAD);
120 
123  }
124 
125  bi0 = from[0];
126  bi1 = from[1];
127  to_next[0] = bi0;
128  to_next[1] = bi1;
129  from += 2;
130  to_next += 2;
131  n_left_to_next -= 2;
132  n_left_from -= 2;
133 
134  b0 = vlib_get_buffer (vm, bi0);
135  b1 = vlib_get_buffer (vm, bi1);
136 
137  /* udp leaves current_data pointing at the gtpu header */
138  gtpu0 = vlib_buffer_get_current (b0);
139  gtpu1 = vlib_buffer_get_current (b1);
140  if (is_ip4)
141  {
142  ip4_0 = (void *)((u8*)gtpu0 - sizeof(udp_header_t) - sizeof(ip4_header_t));
143  ip4_1 = (void *)((u8*)gtpu1 - sizeof(udp_header_t) - sizeof(ip4_header_t));
144  }
145  else
146  {
147  ip6_0 = (void *)((u8*)gtpu0 - sizeof(udp_header_t) - sizeof(ip6_header_t));
148  ip6_1 = (void *)((u8*)gtpu1 - sizeof(udp_header_t) - sizeof(ip6_header_t));
149  }
150 
151  tunnel_index0 = ~0;
152  error0 = 0;
153 
154  tunnel_index1 = ~0;
155  error1 = 0;
156 
157  /* speculatively load gtp header version field */
158  ver0 = gtpu0->ver_flags;
159  ver1 = gtpu1->ver_flags;
160 
161  /*
162  * Manipulate gtpu header
163  * TBD: Manipulate Sequence Number and N-PDU Number
164  * TBD: Manipulate Next Extension Header
165  */
166  gtpu_hdr_len0 = sizeof(gtpu_header_t) - (((ver0 & GTPU_E_S_PN_BIT) == 0) * 4);
167  gtpu_hdr_len1 = sizeof(gtpu_header_t) - (((ver1 & GTPU_E_S_PN_BIT) == 0) * 4);
168 
169  has_space0 = vlib_buffer_has_space (b0, gtpu_hdr_len0);
170  has_space1 = vlib_buffer_has_space (b1, gtpu_hdr_len1);
171 
172  if (PREDICT_FALSE (((ver0 & GTPU_VER_MASK) != GTPU_V1_VER) | (!has_space0)))
173  {
174  error0 = has_space0 ? GTPU_ERROR_BAD_VER : GTPU_ERROR_TOO_SMALL;
175  next0 = GTPU_INPUT_NEXT_DROP;
176  goto trace0;
177  }
178 
179  /* Manipulate packet 0 */
180  if (is_ip4) {
181  key4_0.src = ip4_0->src_address.as_u32;
182  key4_0.teid = gtpu0->teid;
183 
184  /* Make sure GTPU tunnel exist according to packet SIP and teid
185  * SIP identify a GTPU path, and teid identify a tunnel in a given GTPU path */
186  if (PREDICT_FALSE (key4_0.as_u64 != last_key4.as_u64))
187  {
188  p0 = hash_get (gtm->gtpu4_tunnel_by_key, key4_0.as_u64);
189  if (PREDICT_FALSE (p0 == NULL))
190  {
191  error0 = GTPU_ERROR_NO_SUCH_TUNNEL;
192  next0 = GTPU_INPUT_NEXT_DROP;
193  goto trace0;
194  }
195  last_key4.as_u64 = key4_0.as_u64;
196  tunnel_index0 = last_tunnel_index = p0[0];
197  }
198  else
199  tunnel_index0 = last_tunnel_index;
200  t0 = pool_elt_at_index (gtm->tunnels, tunnel_index0);
201 
202  /* Validate GTPU tunnel encap-fib index against packet */
203  if (PREDICT_FALSE (validate_gtpu_fib (b0, t0, is_ip4) == 0))
204  {
205  error0 = GTPU_ERROR_NO_SUCH_TUNNEL;
206  next0 = GTPU_INPUT_NEXT_DROP;
207  goto trace0;
208  }
209 
210  /* Validate GTPU tunnel SIP against packet DIP */
211  if (PREDICT_TRUE (ip4_0->dst_address.as_u32 == t0->src.ip4.as_u32))
212  goto next0; /* valid packet */
214  {
215  key4_0.src = ip4_0->dst_address.as_u32;
216  key4_0.teid = gtpu0->teid;
217  /* Make sure mcast GTPU tunnel exist by packet DIP and teid */
218  p0 = hash_get (gtm->gtpu4_tunnel_by_key, key4_0.as_u64);
219  if (PREDICT_TRUE (p0 != NULL))
220  {
221  mt0 = pool_elt_at_index (gtm->tunnels, p0[0]);
222  goto next0; /* valid packet */
223  }
224  }
225  error0 = GTPU_ERROR_NO_SUCH_TUNNEL;
226  next0 = GTPU_INPUT_NEXT_DROP;
227  goto trace0;
228 
229  } else /* !is_ip4 */ {
230  key6_0.src.as_u64[0] = ip6_0->src_address.as_u64[0];
231  key6_0.src.as_u64[1] = ip6_0->src_address.as_u64[1];
232  key6_0.teid = gtpu0->teid;
233 
234  /* Make sure GTPU tunnel exist according to packet SIP and teid
235  * SIP identify a GTPU path, and teid identify a tunnel in a given GTPU path */
236  if (PREDICT_FALSE (memcmp(&key6_0, &last_key6, sizeof(last_key6)) != 0))
237  {
238  p0 = hash_get_mem (gtm->gtpu6_tunnel_by_key, &key6_0);
239  if (PREDICT_FALSE (p0 == NULL))
240  {
241  error0 = GTPU_ERROR_NO_SUCH_TUNNEL;
242  next0 = GTPU_INPUT_NEXT_DROP;
243  goto trace0;
244  }
245  clib_memcpy_fast (&last_key6, &key6_0, sizeof(key6_0));
246  tunnel_index0 = last_tunnel_index = p0[0];
247  }
248  else
249  tunnel_index0 = last_tunnel_index;
250  t0 = pool_elt_at_index (gtm->tunnels, tunnel_index0);
251 
252  /* Validate GTPU tunnel encap-fib index against packet */
253  if (PREDICT_FALSE (validate_gtpu_fib (b0, t0, is_ip4) == 0))
254  {
255  error0 = GTPU_ERROR_NO_SUCH_TUNNEL;
256  next0 = GTPU_INPUT_NEXT_DROP;
257  goto trace0;
258  }
259 
260  /* Validate GTPU tunnel SIP against packet DIP */
262  &t0->src.ip6)))
263  goto next0; /* valid packet */
265  {
266  key6_0.src.as_u64[0] = ip6_0->dst_address.as_u64[0];
267  key6_0.src.as_u64[1] = ip6_0->dst_address.as_u64[1];
268  key6_0.teid = gtpu0->teid;
269  p0 = hash_get_mem (gtm->gtpu6_tunnel_by_key, &key6_0);
270  if (PREDICT_TRUE (p0 != NULL))
271  {
272  mt0 = pool_elt_at_index (gtm->tunnels, p0[0]);
273  goto next0; /* valid packet */
274  }
275  }
276  error0 = GTPU_ERROR_NO_SUCH_TUNNEL;
277  next0 = GTPU_INPUT_NEXT_DROP;
278  goto trace0;
279  }
280 
281  next0:
282  /* Pop gtpu header */
283  vlib_buffer_advance (b0, gtpu_hdr_len0);
284 
285  next0 = t0->decap_next_index;
286  sw_if_index0 = t0->sw_if_index;
287  len0 = vlib_buffer_length_in_chain (vm, b0);
288 
289  /* Required to make the l2 tag push / pop code work on l2 subifs */
290  if (PREDICT_TRUE(next0 == GTPU_INPUT_NEXT_L2_INPUT))
291  vnet_update_l2_len (b0);
292 
293  /* Set packet input sw_if_index to unicast GTPU tunnel for learning */
294  vnet_buffer(b0)->sw_if_index[VLIB_RX] = sw_if_index0;
295  sw_if_index0 = (mt0) ? mt0->sw_if_index : sw_if_index0;
296 
297  pkts_decapsulated ++;
298  stats_n_packets += 1;
299  stats_n_bytes += len0;
300 
301  /* Batch stats increment on the same gtpu tunnel so counter
302  is not incremented per packet */
303  if (PREDICT_FALSE (sw_if_index0 != stats_sw_if_index))
304  {
305  stats_n_packets -= 1;
306  stats_n_bytes -= len0;
307  if (stats_n_packets)
310  thread_index, stats_sw_if_index,
311  stats_n_packets, stats_n_bytes);
312  stats_n_packets = 1;
313  stats_n_bytes = len0;
314  stats_sw_if_index = sw_if_index0;
315  }
316 
317  trace0:
318  b0->error = error0 ? node->errors[error0] : 0;
319 
320  if (PREDICT_FALSE(b0->flags & VLIB_BUFFER_IS_TRACED))
321  {
322  gtpu_rx_trace_t *tr
323  = vlib_add_trace (vm, node, b0, sizeof (*tr));
324  tr->next_index = next0;
325  tr->error = error0;
326  tr->tunnel_index = tunnel_index0;
327  tr->teid = has_space0 ? clib_net_to_host_u32(gtpu0->teid) : ~0;
328  }
329 
330  if (PREDICT_FALSE (((ver1 & GTPU_VER_MASK) != GTPU_V1_VER) | (!has_space1)))
331  {
332  error1 = has_space1 ? GTPU_ERROR_BAD_VER : GTPU_ERROR_TOO_SMALL;
333  next1 = GTPU_INPUT_NEXT_DROP;
334  goto trace1;
335  }
336 
337  /* Manipulate packet 1 */
338  if (is_ip4) {
339  key4_1.src = ip4_1->src_address.as_u32;
340  key4_1.teid = gtpu1->teid;
341 
342  /* Make sure GTPU tunnel exist according to packet SIP and teid
343  * SIP identify a GTPU path, and teid identify a tunnel in a given GTPU path */
344  if (PREDICT_FALSE (key4_1.as_u64 != last_key4.as_u64))
345  {
346  p1 = hash_get (gtm->gtpu4_tunnel_by_key, key4_1.as_u64);
347  if (PREDICT_FALSE (p1 == NULL))
348  {
349  error1 = GTPU_ERROR_NO_SUCH_TUNNEL;
350  next1 = GTPU_INPUT_NEXT_DROP;
351  goto trace1;
352  }
353  last_key4.as_u64 = key4_1.as_u64;
354  tunnel_index1 = last_tunnel_index = p1[0];
355  }
356  else
357  tunnel_index1 = last_tunnel_index;
358  t1 = pool_elt_at_index (gtm->tunnels, tunnel_index1);
359 
360  /* Validate GTPU tunnel encap-fib index against packet */
361  if (PREDICT_FALSE (validate_gtpu_fib (b1, t1, is_ip4) == 0))
362  {
363  error1 = GTPU_ERROR_NO_SUCH_TUNNEL;
364  next1 = GTPU_INPUT_NEXT_DROP;
365  goto trace1;
366  }
367 
368  /* Validate GTPU tunnel SIP against packet DIP */
369  if (PREDICT_TRUE (ip4_1->dst_address.as_u32 == t1->src.ip4.as_u32))
370  goto next1; /* valid packet */
372  {
373  key4_1.src = ip4_1->dst_address.as_u32;
374  key4_1.teid = gtpu1->teid;
375  /* Make sure mcast GTPU tunnel exist by packet DIP and teid */
376  p1 = hash_get (gtm->gtpu4_tunnel_by_key, key4_1.as_u64);
377  if (PREDICT_TRUE (p1 != NULL))
378  {
379  mt1 = pool_elt_at_index (gtm->tunnels, p1[0]);
380  goto next1; /* valid packet */
381  }
382  }
383  error1 = GTPU_ERROR_NO_SUCH_TUNNEL;
384  next1 = GTPU_INPUT_NEXT_DROP;
385  goto trace1;
386 
387  } else /* !is_ip4 */ {
388  key6_1.src.as_u64[0] = ip6_1->src_address.as_u64[0];
389  key6_1.src.as_u64[1] = ip6_1->src_address.as_u64[1];
390  key6_1.teid = gtpu1->teid;
391 
392  /* Make sure GTPU tunnel exist according to packet SIP and teid
393  * SIP identify a GTPU path, and teid identify a tunnel in a given GTPU path */
394  if (PREDICT_FALSE (memcmp(&key6_1, &last_key6, sizeof(last_key6)) != 0))
395  {
396  p1 = hash_get_mem (gtm->gtpu6_tunnel_by_key, &key6_1);
397 
398  if (PREDICT_FALSE (p1 == NULL))
399  {
400  error1 = GTPU_ERROR_NO_SUCH_TUNNEL;
401  next1 = GTPU_INPUT_NEXT_DROP;
402  goto trace1;
403  }
404 
405  clib_memcpy_fast (&last_key6, &key6_1, sizeof(key6_1));
406  tunnel_index1 = last_tunnel_index = p1[0];
407  }
408  else
409  tunnel_index1 = last_tunnel_index;
410  t1 = pool_elt_at_index (gtm->tunnels, tunnel_index1);
411 
412  /* Validate GTPU tunnel encap-fib index against packet */
413  if (PREDICT_FALSE (validate_gtpu_fib (b1, t1, is_ip4) == 0))
414  {
415  error1 = GTPU_ERROR_NO_SUCH_TUNNEL;
416  next1 = GTPU_INPUT_NEXT_DROP;
417  goto trace1;
418  }
419 
420  /* Validate GTPU tunnel SIP against packet DIP */
422  &t1->src.ip6)))
423  goto next1; /* valid packet */
425  {
426  key6_1.src.as_u64[0] = ip6_1->dst_address.as_u64[0];
427  key6_1.src.as_u64[1] = ip6_1->dst_address.as_u64[1];
428  key6_1.teid = gtpu1->teid;
429  p1 = hash_get_mem (gtm->gtpu6_tunnel_by_key, &key6_1);
430  if (PREDICT_TRUE (p1 != NULL))
431  {
432  mt1 = pool_elt_at_index (gtm->tunnels, p1[0]);
433  goto next1; /* valid packet */
434  }
435  }
436  error1 = GTPU_ERROR_NO_SUCH_TUNNEL;
437  next1 = GTPU_INPUT_NEXT_DROP;
438  goto trace1;
439  }
440 
441  next1:
442  /* Pop gtpu header */
443  vlib_buffer_advance (b1, gtpu_hdr_len1);
444 
445  next1 = t1->decap_next_index;
446  sw_if_index1 = t1->sw_if_index;
447  len1 = vlib_buffer_length_in_chain (vm, b1);
448 
449  /* Required to make the l2 tag push / pop code work on l2 subifs */
450  if (PREDICT_TRUE(next1 == GTPU_INPUT_NEXT_L2_INPUT))
451  vnet_update_l2_len (b1);
452 
453  /* Set packet input sw_if_index to unicast GTPU tunnel for learning */
454  vnet_buffer(b1)->sw_if_index[VLIB_RX] = sw_if_index1;
455  sw_if_index1 = (mt1) ? mt1->sw_if_index : sw_if_index1;
456 
457  pkts_decapsulated ++;
458  stats_n_packets += 1;
459  stats_n_bytes += len1;
460 
461  /* Batch stats increment on the same gtpu tunnel so counter
462  is not incremented per packet */
463  if (PREDICT_FALSE (sw_if_index1 != stats_sw_if_index))
464  {
465  stats_n_packets -= 1;
466  stats_n_bytes -= len1;
467  if (stats_n_packets)
470  thread_index, stats_sw_if_index,
471  stats_n_packets, stats_n_bytes);
472  stats_n_packets = 1;
473  stats_n_bytes = len1;
474  stats_sw_if_index = sw_if_index1;
475  }
476 
477  trace1:
478  b1->error = error1 ? node->errors[error1] : 0;
479 
480  if (PREDICT_FALSE(b1->flags & VLIB_BUFFER_IS_TRACED))
481  {
482  gtpu_rx_trace_t *tr
483  = vlib_add_trace (vm, node, b1, sizeof (*tr));
484  tr->next_index = next1;
485  tr->error = error1;
486  tr->tunnel_index = tunnel_index1;
487  tr->teid = has_space1 ? clib_net_to_host_u32(gtpu1->teid) : ~0;
488  }
489 
490  vlib_validate_buffer_enqueue_x2 (vm, node, next_index,
491  to_next, n_left_to_next,
492  bi0, bi1, next0, next1);
493  }
494 
495  while (n_left_from > 0 && n_left_to_next > 0)
496  {
497  u32 bi0;
498  vlib_buffer_t * b0;
499  u32 next0;
500  ip4_header_t * ip4_0;
501  ip6_header_t * ip6_0;
502  gtpu_header_t * gtpu0;
503  u32 gtpu_hdr_len0;
504  uword * p0;
505  u32 tunnel_index0;
506  gtpu_tunnel_t * t0, * mt0 = NULL;
507  gtpu4_tunnel_key_t key4_0;
508  gtpu6_tunnel_key_t key6_0;
509  u32 error0;
510  u32 sw_if_index0, len0;
511  u8 has_space0;
512  u8 ver0;
513 
514  bi0 = from[0];
515  to_next[0] = bi0;
516  from += 1;
517  to_next += 1;
518  n_left_from -= 1;
519  n_left_to_next -= 1;
520 
521  b0 = vlib_get_buffer (vm, bi0);
522 
523  /* udp leaves current_data pointing at the gtpu header */
524  gtpu0 = vlib_buffer_get_current (b0);
525  if (is_ip4) {
526  ip4_0 = (void *)((u8*)gtpu0 - sizeof(udp_header_t) - sizeof(ip4_header_t));
527  } else {
528  ip6_0 = (void *)((u8*)gtpu0 - sizeof(udp_header_t) - sizeof(ip6_header_t));
529  }
530 
531  tunnel_index0 = ~0;
532  error0 = 0;
533 
534  /* speculatively load gtp header version field */
535  ver0 = gtpu0->ver_flags;
536 
537  /*
538  * Manipulate gtpu header
539  * TBD: Manipulate Sequence Number and N-PDU Number
540  * TBD: Manipulate Next Extension Header
541  */
542  gtpu_hdr_len0 = sizeof(gtpu_header_t) - (((ver0 & GTPU_E_S_PN_BIT) == 0) * 4);
543 
544  has_space0 = vlib_buffer_has_space (b0, gtpu_hdr_len0);
545 
546  if (PREDICT_FALSE (((ver0 & GTPU_VER_MASK) != GTPU_V1_VER) | (!has_space0)))
547  {
548  error0 = has_space0 ? GTPU_ERROR_BAD_VER : GTPU_ERROR_TOO_SMALL;
549  next0 = GTPU_INPUT_NEXT_DROP;
550  goto trace00;
551  }
552 
553  if (is_ip4) {
554  key4_0.src = ip4_0->src_address.as_u32;
555  key4_0.teid = gtpu0->teid;
556 
557  /* Make sure GTPU tunnel exist according to packet SIP and teid
558  * SIP identify a GTPU path, and teid identify a tunnel in a given GTPU path */
559  if (PREDICT_FALSE (key4_0.as_u64 != last_key4.as_u64))
560  {
561  p0 = hash_get (gtm->gtpu4_tunnel_by_key, key4_0.as_u64);
562  if (PREDICT_FALSE (p0 == NULL))
563  {
564  error0 = GTPU_ERROR_NO_SUCH_TUNNEL;
565  next0 = GTPU_INPUT_NEXT_DROP;
566  goto trace00;
567  }
568  last_key4.as_u64 = key4_0.as_u64;
569  tunnel_index0 = last_tunnel_index = p0[0];
570  }
571  else
572  tunnel_index0 = last_tunnel_index;
573  t0 = pool_elt_at_index (gtm->tunnels, tunnel_index0);
574 
575  /* Validate GTPU tunnel encap-fib index against packet */
576  if (PREDICT_FALSE (validate_gtpu_fib (b0, t0, is_ip4) == 0))
577  {
578  error0 = GTPU_ERROR_NO_SUCH_TUNNEL;
579  next0 = GTPU_INPUT_NEXT_DROP;
580  goto trace00;
581  }
582 
583  /* Validate GTPU tunnel SIP against packet DIP */
584  if (PREDICT_TRUE (ip4_0->dst_address.as_u32 == t0->src.ip4.as_u32))
585  goto next00; /* valid packet */
587  {
588  key4_0.src = ip4_0->dst_address.as_u32;
589  key4_0.teid = gtpu0->teid;
590  /* Make sure mcast GTPU tunnel exist by packet DIP and teid */
591  p0 = hash_get (gtm->gtpu4_tunnel_by_key, key4_0.as_u64);
592  if (PREDICT_TRUE (p0 != NULL))
593  {
594  mt0 = pool_elt_at_index (gtm->tunnels, p0[0]);
595  goto next00; /* valid packet */
596  }
597  }
598  error0 = GTPU_ERROR_NO_SUCH_TUNNEL;
599  next0 = GTPU_INPUT_NEXT_DROP;
600  goto trace00;
601 
602  } else /* !is_ip4 */ {
603  key6_0.src.as_u64[0] = ip6_0->src_address.as_u64[0];
604  key6_0.src.as_u64[1] = ip6_0->src_address.as_u64[1];
605  key6_0.teid = gtpu0->teid;
606 
607  /* Make sure GTPU tunnel exist according to packet SIP and teid
608  * SIP identify a GTPU path, and teid identify a tunnel in a given GTPU path */
609  if (PREDICT_FALSE (memcmp(&key6_0, &last_key6, sizeof(last_key6)) != 0))
610  {
611  p0 = hash_get_mem (gtm->gtpu6_tunnel_by_key, &key6_0);
612  if (PREDICT_FALSE (p0 == NULL))
613  {
614  error0 = GTPU_ERROR_NO_SUCH_TUNNEL;
615  next0 = GTPU_INPUT_NEXT_DROP;
616  goto trace00;
617  }
618  clib_memcpy_fast (&last_key6, &key6_0, sizeof(key6_0));
619  tunnel_index0 = last_tunnel_index = p0[0];
620  }
621  else
622  tunnel_index0 = last_tunnel_index;
623  t0 = pool_elt_at_index (gtm->tunnels, tunnel_index0);
624 
625  /* Validate GTPU tunnel encap-fib index against packet */
626  if (PREDICT_FALSE (validate_gtpu_fib (b0, t0, is_ip4) == 0))
627  {
628  error0 = GTPU_ERROR_NO_SUCH_TUNNEL;
629  next0 = GTPU_INPUT_NEXT_DROP;
630  goto trace00;
631  }
632 
633  /* Validate GTPU tunnel SIP against packet DIP */
635  &t0->src.ip6)))
636  goto next00; /* valid packet */
638  {
639  key6_0.src.as_u64[0] = ip6_0->dst_address.as_u64[0];
640  key6_0.src.as_u64[1] = ip6_0->dst_address.as_u64[1];
641  key6_0.teid = gtpu0->teid;
642  p0 = hash_get_mem (gtm->gtpu6_tunnel_by_key, &key6_0);
643  if (PREDICT_TRUE (p0 != NULL))
644  {
645  mt0 = pool_elt_at_index (gtm->tunnels, p0[0]);
646  goto next00; /* valid packet */
647  }
648  }
649  error0 = GTPU_ERROR_NO_SUCH_TUNNEL;
650  next0 = GTPU_INPUT_NEXT_DROP;
651  goto trace00;
652  }
653 
654  next00:
655  /* Pop gtpu header */
656  vlib_buffer_advance (b0, gtpu_hdr_len0);
657 
658  next0 = t0->decap_next_index;
659  sw_if_index0 = t0->sw_if_index;
660  len0 = vlib_buffer_length_in_chain (vm, b0);
661 
662  /* Required to make the l2 tag push / pop code work on l2 subifs */
663  if (PREDICT_TRUE(next0 == GTPU_INPUT_NEXT_L2_INPUT))
664  vnet_update_l2_len (b0);
665 
666  /* Set packet input sw_if_index to unicast GTPU tunnel for learning */
667  vnet_buffer(b0)->sw_if_index[VLIB_RX] = sw_if_index0;
668  sw_if_index0 = (mt0) ? mt0->sw_if_index : sw_if_index0;
669 
670  pkts_decapsulated ++;
671  stats_n_packets += 1;
672  stats_n_bytes += len0;
673 
674  /* Batch stats increment on the same gtpu tunnel so counter
675  is not incremented per packet */
676  if (PREDICT_FALSE (sw_if_index0 != stats_sw_if_index))
677  {
678  stats_n_packets -= 1;
679  stats_n_bytes -= len0;
680  if (stats_n_packets)
683  thread_index, stats_sw_if_index,
684  stats_n_packets, stats_n_bytes);
685  stats_n_packets = 1;
686  stats_n_bytes = len0;
687  stats_sw_if_index = sw_if_index0;
688  }
689 
690  trace00:
691  b0->error = error0 ? node->errors[error0] : 0;
692 
693  if (PREDICT_FALSE(b0->flags & VLIB_BUFFER_IS_TRACED))
694  {
695  gtpu_rx_trace_t *tr
696  = vlib_add_trace (vm, node, b0, sizeof (*tr));
697  tr->next_index = next0;
698  tr->error = error0;
699  tr->tunnel_index = tunnel_index0;
700  tr->teid = has_space0 ? clib_net_to_host_u32(gtpu0->teid) : ~0;
701  }
702  vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
703  to_next, n_left_to_next,
704  bi0, next0);
705  }
706 
707  vlib_put_next_frame (vm, node, next_index, n_left_to_next);
708  }
709  /* Do we still need this now that tunnel tx stats is kept? */
710  vlib_node_increment_counter (vm, is_ip4?
712  GTPU_ERROR_DECAPSULATED,
713  pkts_decapsulated);
714 
715  /* Increment any remaining batch stats */
716  if (stats_n_packets)
717  {
720  thread_index, stats_sw_if_index, stats_n_packets, stats_n_bytes);
721  node->runtime_data[0] = stats_sw_if_index;
722  }
723 
724  return from_frame->n_vectors;
725 }
726 
729  vlib_frame_t * from_frame)
730 {
731  return gtpu_input(vm, node, from_frame, /* is_ip4 */ 1);
732 }
733 
736  vlib_frame_t * from_frame)
737 {
738  return gtpu_input(vm, node, from_frame, /* is_ip4 */ 0);
739 }
740 
741 static char * gtpu_error_strings[] = {
742 #define gtpu_error(n,s) s,
743 #include <gtpu/gtpu_error.def>
744 #undef gtpu_error
745 #undef _
746 };
747 
749  .name = "gtpu4-input",
750  /* Takes a vector of packets. */
751  .vector_size = sizeof (u32),
752 
753  .n_errors = GTPU_N_ERROR,
754  .error_strings = gtpu_error_strings,
755 
756  .n_next_nodes = GTPU_INPUT_N_NEXT,
757  .next_nodes = {
758 #define _(s,n) [GTPU_INPUT_NEXT_##s] = n,
760 #undef _
761  },
762 
763 //temp .format_buffer = format_gtpu_header,
764  .format_trace = format_gtpu_rx_trace,
765  // $$$$ .unformat_buffer = unformat_gtpu_header,
766 };
767 
769  .name = "gtpu6-input",
770  /* Takes a vector of packets. */
771  .vector_size = sizeof (u32),
772 
773  .n_errors = GTPU_N_ERROR,
774  .error_strings = gtpu_error_strings,
775 
776  .n_next_nodes = GTPU_INPUT_N_NEXT,
777  .next_nodes = {
778 #define _(s,n) [GTPU_INPUT_NEXT_##s] = n,
780 #undef _
781  },
782 
783 //temp .format_buffer = format_gtpu_header,
784  .format_trace = format_gtpu_rx_trace,
785  // $$$$ .unformat_buffer = unformat_gtpu_header,
786 };
787 
788 typedef enum {
793 
798  u32 is_ip4)
799 {
800  gtpu_main_t * gtm = &gtpu_main;
801  u32 * from, * to_next, n_left_from, n_left_to_next, next_index;
802  vlib_node_runtime_t * error_node = vlib_node_get_runtime (vm, ip4_input_node.index);
803  vtep4_key_t last_vtep4; /* last IPv4 address / fib index
804  matching a local VTEP address */
805  vtep6_key_t last_vtep6; /* last IPv6 address / fib index
806  matching a local VTEP address */
807  vlib_buffer_t *bufs[VLIB_FRAME_SIZE], **b = bufs;
808 #ifdef CLIB_HAVE_VEC512
809  vtep4_cache_t vtep4_u512;
810  clib_memset (&vtep4_u512, 0, sizeof (vtep4_u512));
811 #endif
812 
813  from = vlib_frame_vector_args (frame);
814  n_left_from = frame->n_vectors;
815  next_index = node->cached_next_index;
816  vlib_get_buffers (vm, from, bufs, n_left_from);
817 
818  if (node->flags & VLIB_NODE_FLAG_TRACE)
819  ip4_forward_next_trace (vm, node, frame, VLIB_TX);
820 
821  if (is_ip4)
822  vtep4_key_init (&last_vtep4);
823  else
824  vtep6_key_init (&last_vtep6);
825 
826  while (n_left_from > 0)
827  {
828  vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
829 
830  while (n_left_from >= 4 && n_left_to_next >= 2)
831  {
832  vlib_buffer_t * b0, * b1;
833  ip4_header_t * ip40, * ip41;
834  ip6_header_t * ip60, * ip61;
835  udp_header_t * udp0, * udp1;
836  u32 bi0, ip_len0, udp_len0, flags0, next0;
837  u32 bi1, ip_len1, udp_len1, flags1, next1;
838  i32 len_diff0, len_diff1;
839  u8 error0, good_udp0, proto0;
840  u8 error1, good_udp1, proto1;
841 
842  /* Prefetch next iteration. */
843  {
844  vlib_prefetch_buffer_header (b[2], LOAD);
845  vlib_prefetch_buffer_header (b[3], LOAD);
846 
847  CLIB_PREFETCH (b[2]->data, 2*CLIB_CACHE_LINE_BYTES, LOAD);
848  CLIB_PREFETCH (b[3]->data, 2*CLIB_CACHE_LINE_BYTES, LOAD);
849  }
850 
851  bi0 = to_next[0] = from[0];
852  bi1 = to_next[1] = from[1];
853  from += 2;
854  n_left_from -= 2;
855  to_next += 2;
856  n_left_to_next -= 2;
857 
858  b0 = b[0];
859  b1 = b[1];
860  b += 2;
861  if (is_ip4)
862  {
863  ip40 = vlib_buffer_get_current (b0);
864  ip41 = vlib_buffer_get_current (b1);
865  }
866  else
867  {
868  ip60 = vlib_buffer_get_current (b0);
869  ip61 = vlib_buffer_get_current (b1);
870  }
871 
872  /* Setup packet for next IP feature */
873  vnet_feature_next(&next0, b0);
874  vnet_feature_next(&next1, b1);
875 
876  if (is_ip4)
877  {
878  /* Treat IP frag packets as "experimental" protocol for now
879  until support of IP frag reassembly is implemented */
880  proto0 = ip4_is_fragment(ip40) ? 0xfe : ip40->protocol;
881  proto1 = ip4_is_fragment(ip41) ? 0xfe : ip41->protocol;
882  }
883  else
884  {
885  proto0 = ip60->protocol;
886  proto1 = ip61->protocol;
887  }
888 
889  /* Process packet 0 */
890  if (proto0 != IP_PROTOCOL_UDP)
891  goto exit0; /* not UDP packet */
892 
893  if (is_ip4)
894  udp0 = ip4_next_header (ip40);
895  else
896  udp0 = ip6_next_header (ip60);
897 
898  if (udp0->dst_port != clib_host_to_net_u16 (UDP_DST_PORT_GTPU))
899  goto exit0; /* not GTPU packet */
900 
901  /* Validate DIP against VTEPs*/
902  if (is_ip4)
903  {
904 #ifdef CLIB_HAVE_VEC512
905  if (!vtep4_check_vector
906  (&gtm->vtep_table, b0, ip40, &last_vtep4, &vtep4_u512))
907 #else
908  if (!vtep4_check (&gtm->vtep_table, b0, ip40, &last_vtep4))
909 #endif
910  goto exit0; /* no local VTEP for GTPU packet */
911  }
912  else
913  {
914  if (!vtep6_check (&gtm->vtep_table, b0, ip60, &last_vtep6))
915  goto exit0; /* no local VTEP for GTPU packet */
916  }
917 
918  flags0 = b0->flags;
919  good_udp0 = (flags0 & VNET_BUFFER_F_L4_CHECKSUM_CORRECT) != 0;
920 
921  /* Don't verify UDP checksum for packets with explicit zero checksum. */
922  good_udp0 |= udp0->checksum == 0;
923 
924  /* Verify UDP length */
925  if (is_ip4)
926  ip_len0 = clib_net_to_host_u16 (ip40->length);
927  else
928  ip_len0 = clib_net_to_host_u16 (ip60->payload_length);
929  udp_len0 = clib_net_to_host_u16 (udp0->length);
930  len_diff0 = ip_len0 - udp_len0;
931 
932  /* Verify UDP checksum */
933  if (PREDICT_FALSE (!good_udp0))
934  {
935  if ((flags0 & VNET_BUFFER_F_L4_CHECKSUM_COMPUTED) == 0)
936  {
937  if (is_ip4)
938  flags0 = ip4_tcp_udp_validate_checksum (vm, b0);
939  else
940  flags0 = ip6_tcp_udp_icmp_validate_checksum (vm, b0);
941  good_udp0 =
942  (flags0 & VNET_BUFFER_F_L4_CHECKSUM_CORRECT) != 0;
943  }
944  }
945 
946  if (is_ip4)
947  {
948  error0 = good_udp0 ? 0 : IP4_ERROR_UDP_CHECKSUM;
949  error0 = (len_diff0 >= 0) ? error0 : IP4_ERROR_UDP_LENGTH;
950  }
951  else
952  {
953  error0 = good_udp0 ? 0 : IP6_ERROR_UDP_CHECKSUM;
954  error0 = (len_diff0 >= 0) ? error0 : IP6_ERROR_UDP_LENGTH;
955  }
956 
957  next0 = error0 ?
959  b0->error = error0 ? error_node->errors[error0] : 0;
960 
961  /* gtpu-input node expect current at GTPU header */
962  if (is_ip4)
963  vlib_buffer_advance (b0, sizeof(ip4_header_t)+sizeof(udp_header_t));
964  else
965  vlib_buffer_advance (b0, sizeof(ip6_header_t)+sizeof(udp_header_t));
966 
967  exit0:
968  /* Process packet 1 */
969  if (proto1 != IP_PROTOCOL_UDP)
970  goto exit1; /* not UDP packet */
971 
972  if (is_ip4)
973  udp1 = ip4_next_header (ip41);
974  else
975  udp1 = ip6_next_header (ip61);
976 
977  if (udp1->dst_port != clib_host_to_net_u16 (UDP_DST_PORT_GTPU))
978  goto exit1; /* not GTPU packet */
979 
980  /* Validate DIP against VTEPs*/
981  if (is_ip4)
982  {
983 #ifdef CLIB_HAVE_VEC512
984  if (!vtep4_check_vector
985  (&gtm->vtep_table, b1, ip41, &last_vtep4, &vtep4_u512))
986 #else
987  if (!vtep4_check (&gtm->vtep_table, b1, ip41, &last_vtep4))
988 #endif
989  goto exit1; /* no local VTEP for GTPU packet */
990  }
991  else
992  {
993  if (!vtep6_check (&gtm->vtep_table, b1, ip61, &last_vtep6))
994  goto exit1; /* no local VTEP for GTPU packet */
995  }
996 
997  flags1 = b1->flags;
998  good_udp1 = (flags1 & VNET_BUFFER_F_L4_CHECKSUM_CORRECT) != 0;
999 
1000  /* Don't verify UDP checksum for packets with explicit zero checksum. */
1001  good_udp1 |= udp1->checksum == 0;
1002 
1003  /* Verify UDP length */
1004  if (is_ip4)
1005  ip_len1 = clib_net_to_host_u16 (ip41->length);
1006  else
1007  ip_len1 = clib_net_to_host_u16 (ip61->payload_length);
1008  udp_len1 = clib_net_to_host_u16 (udp1->length);
1009  len_diff1 = ip_len1 - udp_len1;
1010 
1011  /* Verify UDP checksum */
1012  if (PREDICT_FALSE (!good_udp1))
1013  {
1014  if ((flags1 & VNET_BUFFER_F_L4_CHECKSUM_COMPUTED) == 0)
1015  {
1016  if (is_ip4)
1017  flags1 = ip4_tcp_udp_validate_checksum (vm, b1);
1018  else
1019  flags1 = ip6_tcp_udp_icmp_validate_checksum (vm, b1);
1020  good_udp1 =
1021  (flags1 & VNET_BUFFER_F_L4_CHECKSUM_CORRECT) != 0;
1022  }
1023  }
1024 
1025  if (is_ip4)
1026  {
1027  error1 = good_udp1 ? 0 : IP4_ERROR_UDP_CHECKSUM;
1028  error1 = (len_diff1 >= 0) ? error1 : IP4_ERROR_UDP_LENGTH;
1029  }
1030  else
1031  {
1032  error1 = good_udp1 ? 0 : IP6_ERROR_UDP_CHECKSUM;
1033  error1 = (len_diff1 >= 0) ? error1 : IP6_ERROR_UDP_LENGTH;
1034  }
1035 
1036  next1 = error1 ?
1038  b1->error = error1 ? error_node->errors[error1] : 0;
1039 
1040  /* gtpu-input node expect current at GTPU header */
1041  if (is_ip4)
1042  vlib_buffer_advance (b1, sizeof(ip4_header_t)+sizeof(udp_header_t));
1043  else
1044  vlib_buffer_advance (b1, sizeof(ip6_header_t)+sizeof(udp_header_t));
1045 
1046  exit1:
1047  vlib_validate_buffer_enqueue_x2 (vm, node, next_index,
1048  to_next, n_left_to_next,
1049  bi0, bi1, next0, next1);
1050  }
1051 
1052  while (n_left_from > 0 && n_left_to_next > 0)
1053  {
1054  vlib_buffer_t * b0;
1055  ip4_header_t * ip40;
1056  ip6_header_t * ip60;
1057  udp_header_t * udp0;
1058  u32 bi0, ip_len0, udp_len0, flags0, next0;
1059  i32 len_diff0;
1060  u8 error0, good_udp0, proto0;
1061 
1062  bi0 = to_next[0] = from[0];
1063  from += 1;
1064  n_left_from -= 1;
1065  to_next += 1;
1066  n_left_to_next -= 1;
1067 
1068  b0 = b[0];
1069  b++;
1070  if (is_ip4)
1071  ip40 = vlib_buffer_get_current (b0);
1072  else
1073  ip60 = vlib_buffer_get_current (b0);
1074 
1075  /* Setup packet for next IP feature */
1076  vnet_feature_next(&next0, b0);
1077 
1078  if (is_ip4)
1079  /* Treat IP4 frag packets as "experimental" protocol for now
1080  until support of IP frag reassembly is implemented */
1081  proto0 = ip4_is_fragment(ip40) ? 0xfe : ip40->protocol;
1082  else
1083  proto0 = ip60->protocol;
1084 
1085  if (proto0 != IP_PROTOCOL_UDP)
1086  goto exit; /* not UDP packet */
1087 
1088  if (is_ip4)
1089  udp0 = ip4_next_header (ip40);
1090  else
1091  udp0 = ip6_next_header (ip60);
1092 
1093  if (udp0->dst_port != clib_host_to_net_u16 (UDP_DST_PORT_GTPU))
1094  goto exit; /* not GTPU packet */
1095 
1096  /* Validate DIP against VTEPs*/
1097  if (is_ip4)
1098  {
1099 #ifdef CLIB_HAVE_VEC512
1100  if (!vtep4_check_vector
1101  (&gtm->vtep_table, b0, ip40, &last_vtep4, &vtep4_u512))
1102 #else
1103  if (!vtep4_check (&gtm->vtep_table, b0, ip40, &last_vtep4))
1104 #endif
1105  goto exit; /* no local VTEP for GTPU packet */
1106  }
1107  else
1108  {
1109  if (!vtep6_check (&gtm->vtep_table, b0, ip60, &last_vtep6))
1110  goto exit; /* no local VTEP for GTPU packet */
1111  }
1112 
1113  flags0 = b0->flags;
1114  good_udp0 = (flags0 & VNET_BUFFER_F_L4_CHECKSUM_CORRECT) != 0;
1115 
1116  /* Don't verify UDP checksum for packets with explicit zero checksum. */
1117  good_udp0 |= udp0->checksum == 0;
1118 
1119  /* Verify UDP length */
1120  if (is_ip4)
1121  ip_len0 = clib_net_to_host_u16 (ip40->length);
1122  else
1123  ip_len0 = clib_net_to_host_u16 (ip60->payload_length);
1124  udp_len0 = clib_net_to_host_u16 (udp0->length);
1125  len_diff0 = ip_len0 - udp_len0;
1126 
1127  /* Verify UDP checksum */
1128  if (PREDICT_FALSE (!good_udp0))
1129  {
1130  if ((flags0 & VNET_BUFFER_F_L4_CHECKSUM_COMPUTED) == 0)
1131  {
1132  if (is_ip4)
1133  flags0 = ip4_tcp_udp_validate_checksum (vm, b0);
1134  else
1135  flags0 = ip6_tcp_udp_icmp_validate_checksum (vm, b0);
1136  good_udp0 =
1137  (flags0 & VNET_BUFFER_F_L4_CHECKSUM_CORRECT) != 0;
1138  }
1139  }
1140 
1141  if (is_ip4)
1142  {
1143  error0 = good_udp0 ? 0 : IP4_ERROR_UDP_CHECKSUM;
1144  error0 = (len_diff0 >= 0) ? error0 : IP4_ERROR_UDP_LENGTH;
1145  }
1146  else
1147  {
1148  error0 = good_udp0 ? 0 : IP6_ERROR_UDP_CHECKSUM;
1149  error0 = (len_diff0 >= 0) ? error0 : IP6_ERROR_UDP_LENGTH;
1150  }
1151 
1152  next0 = error0 ?
1154  b0->error = error0 ? error_node->errors[error0] : 0;
1155 
1156  /* gtpu-input node expect current at GTPU header */
1157  if (is_ip4)
1158  vlib_buffer_advance (b0, sizeof(ip4_header_t)+sizeof(udp_header_t));
1159  else
1160  vlib_buffer_advance (b0, sizeof(ip6_header_t)+sizeof(udp_header_t));
1161 
1162  exit:
1163  vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
1164  to_next, n_left_to_next,
1165  bi0, next0);
1166  }
1167 
1168  vlib_put_next_frame (vm, node, next_index, n_left_to_next);
1169  }
1170 
1171  return frame->n_vectors;
1172 }
1173 
1176  vlib_frame_t * frame)
1177 {
1178  return ip_gtpu_bypass_inline (vm, node, frame, /* is_ip4 */ 1);
1179 }
1180 
1182  .name = "ip4-gtpu-bypass",
1183  .vector_size = sizeof (u32),
1184 
1185  .n_next_nodes = IP_GTPU_BYPASS_N_NEXT,
1186  .next_nodes = {
1187  [IP_GTPU_BYPASS_NEXT_DROP] = "error-drop",
1188  [IP_GTPU_BYPASS_NEXT_GTPU] = "gtpu4-input",
1189  },
1190 
1191  .format_buffer = format_ip4_header,
1192  .format_trace = format_ip4_forward_next_trace,
1193 };
1194 
1195 #ifndef CLIB_MARCH_VARIANT
1196 /* Dummy init function to get us linked in. */
1198 { return 0; }
1199 
1201 #endif /* CLIB_MARCH_VARIANT */
1202 
1205  vlib_frame_t * frame)
1206 {
1207  return ip_gtpu_bypass_inline (vm, node, frame, /* is_ip4 */ 0);
1208 }
1209 
1211  .name = "ip6-gtpu-bypass",
1212  .vector_size = sizeof (u32),
1213 
1214  .n_next_nodes = IP_GTPU_BYPASS_N_NEXT,
1215  .next_nodes = {
1216  [IP_GTPU_BYPASS_NEXT_DROP] = "error-drop",
1217  [IP_GTPU_BYPASS_NEXT_GTPU] = "gtpu6-input",
1218  },
1219 
1220  .format_buffer = format_ip6_header,
1221  .format_trace = format_ip6_forward_next_trace,
1222 };
1223 
1224 #ifndef CLIB_MARCH_VARIANT
1225 /* Dummy init function to get us linked in. */
1227 { return 0; }
1228 
1230 
1231 #define foreach_gtpu_flow_error \
1232  _(NONE, "no error") \
1233  _(PAYLOAD_ERROR, "Payload type errors") \
1234  _(IP_CHECKSUM_ERROR, "Rx ip checksum errors") \
1235  _(IP_HEADER_ERROR, "Rx ip header errors") \
1236  _(UDP_CHECKSUM_ERROR, "Rx udp checksum errors") \
1237  _(UDP_LENGTH_ERROR, "Rx udp length errors")
1238 
1239 typedef enum
1240 {
1241 #define _(f,s) GTPU_FLOW_ERROR_##f,
1243 #undef _
1244 #define gtpu_error(n,s) GTPU_FLOW_ERROR_##n,
1245 #include <gtpu/gtpu_error.def>
1246 #undef gtpu_error
1249 
1250 static char *gtpu_flow_error_strings[] = {
1251 #define _(n,s) s,
1253 #undef _
1254 #define gtpu_error(n,s) s,
1255 #include <gtpu/gtpu_error.def>
1256 #undef gtpu_error
1257 #undef _
1258 
1259 };
1260 
1261 #define gtpu_local_need_csum_check(_b) \
1262  (!(_b->flags & VNET_BUFFER_F_L4_CHECKSUM_COMPUTED \
1263  || _b->flags & VNET_BUFFER_F_OFFLOAD_UDP_CKSUM))
1264 
1265 #define gtpu_local_csum_is_valid(_b) \
1266  ((_b->flags & VNET_BUFFER_F_L4_CHECKSUM_CORRECT \
1267  || _b->flags & VNET_BUFFER_F_OFFLOAD_UDP_CKSUM) != 0)
1268 
1271 {
1272  u32 flags = b->flags;
1273  enum { offset = sizeof(ip4_header_t) + sizeof(udp_header_t)};
1274 
1275  /* Verify UDP checksum */
1276  if ((flags & VNET_BUFFER_F_L4_CHECKSUM_COMPUTED) == 0)
1277  {
1279  flags = ip4_tcp_udp_validate_checksum (vm, b);
1281  }
1282 
1283  return (flags & VNET_BUFFER_F_L4_CHECKSUM_CORRECT) != 0;
1284 }
1285 
1288 {
1289  ip4_header_t * ip4_hdr = vlib_buffer_get_current(b) -
1290  sizeof(ip4_header_t) - sizeof(udp_header_t);
1291  u16 ip_len = clib_net_to_host_u16 (ip4_hdr->length);
1292  u16 expected = payload_len + sizeof(ip4_header_t) + sizeof(udp_header_t);
1293  return ip_len > expected || ip4_hdr->ttl == 0 || ip4_hdr->ip_version_and_header_length != 0x45;
1294 }
1295 
1298 {
1299  ip4_header_t * ip4_hdr = vlib_buffer_get_current(b) -
1300  sizeof(ip4_header_t) - sizeof(udp_header_t);
1301  udp_header_t * udp_hdr = vlib_buffer_get_current(b) - sizeof(udp_header_t);
1302  u16 ip_len = clib_net_to_host_u16 (ip4_hdr->length);
1303  u16 udp_len = clib_net_to_host_u16 (udp_hdr->length);
1304  return udp_len > ip_len;
1305 }
1306 
1308 gtpu_err_code (u8 ip_err0, u8 udp_err0, u8 csum_err0)
1309 {
1310  u8 error0 = GTPU_FLOW_ERROR_NONE;
1311  if (ip_err0)
1312  error0 = GTPU_FLOW_ERROR_IP_HEADER_ERROR;
1313  if (udp_err0)
1314  error0 = GTPU_FLOW_ERROR_UDP_LENGTH_ERROR;
1315  if (csum_err0)
1316  error0 = GTPU_FLOW_ERROR_UDP_CHECKSUM_ERROR;
1317  return error0;
1318 }
1319 
1320 
1324  vlib_frame_t * from_frame)
1325 {
1326  u32 n_left_from, next_index, * from, * to_next;
1327  gtpu_main_t * gtm = &gtpu_main;
1328  vnet_main_t * vnm = gtm->vnet_main;
1330  u32 pkts_decapsulated = 0;
1331  u32 thread_index = vlib_get_thread_index();
1332  u32 stats_sw_if_index, stats_n_packets, stats_n_bytes;
1333  u8 ip_err0, ip_err1, udp_err0, udp_err1, csum_err0, csum_err1;
1334 
1335  from = vlib_frame_vector_args (from_frame);
1336  n_left_from = from_frame->n_vectors;
1337 
1338  next_index = node->cached_next_index;
1339  stats_sw_if_index = node->runtime_data[0];
1340  stats_n_packets = stats_n_bytes = 0;
1341 
1342  while (n_left_from > 0)
1343  {
1344  u32 n_left_to_next;
1345 
1346  vlib_get_next_frame (vm, node, next_index,
1347  to_next, n_left_to_next);
1348 
1349  while (n_left_from >= 4 && n_left_to_next >= 2)
1350  {
1351  u32 bi0, bi1;
1352  vlib_buffer_t * b0, * b1;
1353  u32 next0, next1;
1354  gtpu_header_t * gtpu0, * gtpu1;
1355  u32 gtpu_hdr_len0, gtpu_hdr_len1;
1356  u32 tunnel_index0, tunnel_index1;
1357  gtpu_tunnel_t * t0, * t1;
1358  u32 error0, error1;
1359  u32 sw_if_index0, sw_if_index1, len0, len1;
1360  u8 has_space0 = 0, has_space1 = 0;
1361  u8 ver0, ver1;
1362 
1363  /* Prefetch next iteration. */
1364  {
1365  vlib_buffer_t * p2, * p3;
1366 
1367  p2 = vlib_get_buffer (vm, from[2]);
1368  p3 = vlib_get_buffer (vm, from[3]);
1369 
1370  vlib_prefetch_buffer_header (p2, LOAD);
1371  vlib_prefetch_buffer_header (p3, LOAD);
1372 
1373  CLIB_PREFETCH (p2->data, 2*CLIB_CACHE_LINE_BYTES, LOAD);
1374  CLIB_PREFETCH (p3->data, 2*CLIB_CACHE_LINE_BYTES, LOAD);
1375  }
1376 
1377  bi0 = from[0];
1378  bi1 = from[1];
1379  to_next[0] = bi0;
1380  to_next[1] = bi1;
1381  from += 2;
1382  to_next += 2;
1383  n_left_to_next -= 2;
1384  n_left_from -= 2;
1385 
1386  b0 = vlib_get_buffer (vm, bi0);
1387  b1 = vlib_get_buffer (vm, bi1);
1388 
1389  /* udp leaves current_data pointing at the gtpu header */
1390  gtpu0 = vlib_buffer_get_current (b0);
1391  gtpu1 = vlib_buffer_get_current (b1);
1392 
1393  len0 = vlib_buffer_length_in_chain (vm, b0);
1394  len1 = vlib_buffer_length_in_chain (vm, b1);
1395 
1396  tunnel_index0 = ~0;
1397  error0 = 0;
1398 
1399  tunnel_index1 = ~0;
1400  error1 = 0;
1401 
1402  ip_err0 = gtpu_check_ip (b0, len0);
1403  udp_err0 = gtpu_check_ip_udp_len (b0);
1404  ip_err1 = gtpu_check_ip (b1, len1);
1405  udp_err1 = gtpu_check_ip_udp_len (b1);
1406 
1408  csum_err0 = !gtpu_validate_udp_csum (vm, b0);
1409  else
1410  csum_err0 = !gtpu_local_csum_is_valid (b0);
1412  csum_err1 = !gtpu_validate_udp_csum (vm, b1);
1413  else
1414  csum_err1 = !gtpu_local_csum_is_valid (b1);
1415 
1416  if (ip_err0 || udp_err0 || csum_err0)
1417  {
1418  next0 = GTPU_INPUT_NEXT_DROP;
1419  error0 = gtpu_err_code (ip_err0, udp_err0, csum_err0);
1420  goto trace0;
1421  }
1422 
1423  /* speculatively load gtp header version field */
1424  ver0 = gtpu0->ver_flags;
1425 
1426  /*
1427  * Manipulate gtpu header
1428  * TBD: Manipulate Sequence Number and N-PDU Number
1429  * TBD: Manipulate Next Extension Header
1430  */
1431  gtpu_hdr_len0 = sizeof(gtpu_header_t) - (((ver0 & GTPU_E_S_PN_BIT) == 0) * 4);
1432 
1433  has_space0 = vlib_buffer_has_space (b0, gtpu_hdr_len0);
1434  if (PREDICT_FALSE (((ver0 & GTPU_VER_MASK) != GTPU_V1_VER) | (!has_space0)))
1435  {
1436  error0 = has_space0 ? GTPU_ERROR_BAD_VER : GTPU_ERROR_TOO_SMALL;
1437  next0 = GTPU_INPUT_NEXT_DROP;
1438  goto trace0;
1439  }
1440 
1441  /* Manipulate packet 0 */
1442  ASSERT (b0->flow_id != 0);
1443  tunnel_index0 = b0->flow_id - gtm->flow_id_start;
1444  t0 = pool_elt_at_index (gtm->tunnels, tunnel_index0);
1445  b0->flow_id = 0;
1446 
1447  /* Pop gtpu header */
1448  vlib_buffer_advance (b0, gtpu_hdr_len0);
1449 
1450  /* assign the next node */
1451  if (PREDICT_FALSE (t0->decap_next_index != GTPU_INPUT_NEXT_IP4_INPUT) &&
1452  (t0->decap_next_index != GTPU_INPUT_NEXT_IP6_INPUT))
1453  {
1454  error0 = GTPU_FLOW_ERROR_PAYLOAD_ERROR;
1455  next0 = GTPU_INPUT_NEXT_DROP;
1456  goto trace0;
1457  }
1458  next0 = t0->decap_next_index;
1459 
1460  sw_if_index0 = t0->sw_if_index;
1461 
1462  /* Set packet input sw_if_index to unicast GTPU tunnel for learning */
1463  vnet_buffer(b0)->sw_if_index[VLIB_RX] = sw_if_index0;
1464 
1465  pkts_decapsulated ++;
1466  stats_n_packets += 1;
1467  stats_n_bytes += len0;
1468 
1469  /* Batch stats increment on the same gtpu tunnel so counter
1470  is not incremented per packet */
1471  if (PREDICT_FALSE (sw_if_index0 != stats_sw_if_index))
1472  {
1473  stats_n_packets -= 1;
1474  stats_n_bytes -= len0;
1475  if (stats_n_packets)
1478  thread_index, stats_sw_if_index,
1479  stats_n_packets, stats_n_bytes);
1480  stats_n_packets = 1;
1481  stats_n_bytes = len0;
1482  stats_sw_if_index = sw_if_index0;
1483  }
1484 
1485 trace0:
1486  b0->error = error0 ? node->errors[error0] : 0;
1487 
1488  if (PREDICT_FALSE(b0->flags & VLIB_BUFFER_IS_TRACED))
1489  {
1490  gtpu_rx_trace_t *tr
1491  = vlib_add_trace (vm, node, b0, sizeof (*tr));
1492  tr->next_index = next0;
1493  tr->error = error0;
1494  tr->tunnel_index = tunnel_index0;
1495  tr->teid = has_space0 ? clib_net_to_host_u32(gtpu0->teid) : ~0;
1496  }
1497 
1498  if (ip_err1 || udp_err1 || csum_err1)
1499  {
1500  next1 = GTPU_INPUT_NEXT_DROP;
1501  error1 = gtpu_err_code (ip_err1, udp_err1, csum_err1);
1502  goto trace1;
1503  }
1504 
1505  /* speculatively load gtp header version field */
1506  ver1 = gtpu1->ver_flags;
1507 
1508  /*
1509  * Manipulate gtpu header
1510  * TBD: Manipulate Sequence Number and N-PDU Number
1511  * TBD: Manipulate Next Extension Header
1512  */
1513  gtpu_hdr_len1 = sizeof(gtpu_header_t) - (((ver1 & GTPU_E_S_PN_BIT) == 0) * 4);
1514  has_space1 = vlib_buffer_has_space (b1, gtpu_hdr_len1);
1515  if (PREDICT_FALSE (((ver1 & GTPU_VER_MASK) != GTPU_V1_VER) | (!has_space1)))
1516  {
1517  error1 = has_space1 ? GTPU_ERROR_BAD_VER : GTPU_ERROR_TOO_SMALL;
1518  next1 = GTPU_INPUT_NEXT_DROP;
1519  goto trace1;
1520  }
1521 
1522  /* Manipulate packet 1 */
1523  ASSERT (b1->flow_id != 0);
1524  tunnel_index1 = b1->flow_id - gtm->flow_id_start;
1525  t1 = pool_elt_at_index (gtm->tunnels, tunnel_index1);
1526  b1->flow_id = 0;
1527 
1528  /* Pop gtpu header */
1529  vlib_buffer_advance (b1, gtpu_hdr_len1);
1530 
1531  /* assign the next node */
1532  if (PREDICT_FALSE (t1->decap_next_index != GTPU_INPUT_NEXT_IP4_INPUT) &&
1533  (t1->decap_next_index != GTPU_INPUT_NEXT_IP6_INPUT))
1534  {
1535  error1 = GTPU_FLOW_ERROR_PAYLOAD_ERROR;
1536  next1 = GTPU_INPUT_NEXT_DROP;
1537  goto trace1;
1538  }
1539  next1 = t1->decap_next_index;
1540 
1541  sw_if_index1 = t1->sw_if_index;
1542 
1543  /* Required to make the l2 tag push / pop code work on l2 subifs */
1544  /* This won't happen in current implementation as only
1545  ipv4/udp/gtpu/IPV4 type packets can be matched */
1546  if (PREDICT_FALSE(next1 == GTPU_INPUT_NEXT_L2_INPUT))
1547  vnet_update_l2_len (b1);
1548 
1549  /* Set packet input sw_if_index to unicast GTPU tunnel for learning */
1550  vnet_buffer(b1)->sw_if_index[VLIB_RX] = sw_if_index1;
1551 
1552  pkts_decapsulated ++;
1553  stats_n_packets += 1;
1554  stats_n_bytes += len1;
1555 
1556  /* Batch stats increment on the same gtpu tunnel so counter
1557  is not incremented per packet */
1558  if (PREDICT_FALSE (sw_if_index1 != stats_sw_if_index))
1559  {
1560  stats_n_packets -= 1;
1561  stats_n_bytes -= len1;
1562  if (stats_n_packets)
1565  thread_index, stats_sw_if_index,
1566  stats_n_packets, stats_n_bytes);
1567  stats_n_packets = 1;
1568  stats_n_bytes = len1;
1569  stats_sw_if_index = sw_if_index1;
1570  }
1571 
1572 trace1:
1573  b1->error = error1 ? node->errors[error1] : 0;
1574 
1575  if (PREDICT_FALSE(b1->flags & VLIB_BUFFER_IS_TRACED))
1576  {
1577  gtpu_rx_trace_t *tr
1578  = vlib_add_trace (vm, node, b1, sizeof (*tr));
1579  tr->next_index = next1;
1580  tr->error = error1;
1581  tr->tunnel_index = tunnel_index1;
1582  tr->teid = has_space1 ? clib_net_to_host_u32(gtpu1->teid) : ~0;
1583  }
1584 
1585  vlib_validate_buffer_enqueue_x2 (vm, node, next_index,
1586  to_next, n_left_to_next,
1587  bi0, bi1, next0, next1);
1588  }
1589 
1590  while (n_left_from > 0 && n_left_to_next > 0)
1591  {
1592  u32 bi0;
1593  vlib_buffer_t * b0;
1594  u32 next0;
1595  gtpu_header_t * gtpu0;
1596  u32 gtpu_hdr_len0;
1597  u32 error0;
1598  u32 tunnel_index0;
1599  gtpu_tunnel_t * t0;
1600  u32 sw_if_index0, len0;
1601  u8 has_space0 = 0;
1602  u8 ver0;
1603 
1604  bi0 = from[0];
1605  to_next[0] = bi0;
1606  from += 1;
1607  to_next += 1;
1608  n_left_from -= 1;
1609  n_left_to_next -= 1;
1610 
1611  b0 = vlib_get_buffer (vm, bi0);
1612  len0 = vlib_buffer_length_in_chain (vm, b0);
1613 
1614  tunnel_index0 = ~0;
1615  error0 = 0;
1616 
1617  ip_err0 = gtpu_check_ip (b0, len0);
1618  udp_err0 = gtpu_check_ip_udp_len (b0);
1620  csum_err0 = !gtpu_validate_udp_csum (vm, b0);
1621  else
1622  csum_err0 = !gtpu_local_csum_is_valid (b0);
1623 
1624  if (ip_err0 || udp_err0 || csum_err0)
1625  {
1626  next0 = GTPU_INPUT_NEXT_DROP;
1627  error0 = gtpu_err_code (ip_err0, udp_err0, csum_err0);
1628  goto trace00;
1629  }
1630 
1631  /* udp leaves current_data pointing at the gtpu header */
1632  gtpu0 = vlib_buffer_get_current (b0);
1633 
1634  /* speculatively load gtp header version field */
1635  ver0 = gtpu0->ver_flags;
1636 
1637  /*
1638  * Manipulate gtpu header
1639  * TBD: Manipulate Sequence Number and N-PDU Number
1640  * TBD: Manipulate Next Extension Header
1641  */
1642  gtpu_hdr_len0 = sizeof(gtpu_header_t) - (((ver0 & GTPU_E_S_PN_BIT) == 0) * 4);
1643 
1644  has_space0 = vlib_buffer_has_space (b0, gtpu_hdr_len0);
1645  if (PREDICT_FALSE (((ver0 & GTPU_VER_MASK) != GTPU_V1_VER) | (!has_space0)))
1646  {
1647  error0 = has_space0 ? GTPU_ERROR_BAD_VER : GTPU_ERROR_TOO_SMALL;
1648  next0 = GTPU_INPUT_NEXT_DROP;
1649  goto trace00;
1650  }
1651 
1652  ASSERT (b0->flow_id != 0);
1653  tunnel_index0 = b0->flow_id - gtm->flow_id_start;
1654  t0 = pool_elt_at_index (gtm->tunnels, tunnel_index0);
1655  b0->flow_id = 0;
1656 
1657  /* Pop gtpu header */
1658  vlib_buffer_advance (b0, gtpu_hdr_len0);
1659 
1660  /* assign the next node */
1661  if (PREDICT_FALSE (t0->decap_next_index != GTPU_INPUT_NEXT_IP4_INPUT) &&
1662  (t0->decap_next_index != GTPU_INPUT_NEXT_IP6_INPUT))
1663  {
1664  error0 = GTPU_FLOW_ERROR_PAYLOAD_ERROR;
1665  next0 = GTPU_INPUT_NEXT_DROP;
1666  goto trace00;
1667  }
1668  next0 = t0->decap_next_index;
1669 
1670  sw_if_index0 = t0->sw_if_index;
1671 
1672  /* Set packet input sw_if_index to unicast GTPU tunnel for learning */
1673  vnet_buffer(b0)->sw_if_index[VLIB_RX] = sw_if_index0;
1674 
1675  pkts_decapsulated ++;
1676  stats_n_packets += 1;
1677  stats_n_bytes += len0;
1678 
1679  /* Batch stats increment on the same gtpu tunnel so counter
1680  is not incremented per packet */
1681  if (PREDICT_FALSE (sw_if_index0 != stats_sw_if_index))
1682  {
1683  stats_n_packets -= 1;
1684  stats_n_bytes -= len0;
1685  if (stats_n_packets)
1688  thread_index, stats_sw_if_index,
1689  stats_n_packets, stats_n_bytes);
1690  stats_n_packets = 1;
1691  stats_n_bytes = len0;
1692  stats_sw_if_index = sw_if_index0;
1693  }
1694  trace00:
1695  b0->error = error0 ? node->errors[error0] : 0;
1696 
1697  if (PREDICT_FALSE(b0->flags & VLIB_BUFFER_IS_TRACED))
1698  {
1699  gtpu_rx_trace_t *tr
1700  = vlib_add_trace (vm, node, b0, sizeof (*tr));
1701  tr->next_index = next0;
1702  tr->error = error0;
1703  tr->tunnel_index = tunnel_index0;
1704  tr->teid = has_space0 ? clib_net_to_host_u32(gtpu0->teid) : ~0;
1705  }
1706  vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
1707  to_next, n_left_to_next,
1708  bi0, next0);
1709  }
1710 
1711  vlib_put_next_frame (vm, node, next_index, n_left_to_next);
1712  }
1713 
1714  /* Do we still need this now that tunnel tx stats is kept? */
1716  GTPU_ERROR_DECAPSULATED,
1717  pkts_decapsulated);
1718 
1719  /* Increment any remaining batch stats */
1720  if (stats_n_packets)
1721  {
1724  thread_index, stats_sw_if_index, stats_n_packets, stats_n_bytes);
1725  node->runtime_data[0] = stats_sw_if_index;
1726  }
1727 
1728  return from_frame->n_vectors;
1729 }
1730 
1733  vlib_frame_t * from_frame)
1734 {
1735  return gtpu_flow_input(vm, node, from_frame);
1736 }
1737 
1738 
1739 /* *INDENT-OFF* */
1740 #ifndef CLIB_MULTIARCH_VARIANT
1742  .name = "gtpu4-flow-input",
1743  .type = VLIB_NODE_TYPE_INTERNAL,
1744  .vector_size = sizeof (u32),
1745 
1746  .format_trace = format_gtpu_rx_trace,
1747 
1748  .n_errors = GTPU_FLOW_N_ERROR,
1749  .error_strings = gtpu_flow_error_strings,
1750 
1751  .n_next_nodes = GTPU_INPUT_N_NEXT,
1752  .next_nodes = {
1753 #define _(s,n) [GTPU_INPUT_NEXT_##s] = n,
1755 #undef _
1756 
1757  },
1758 };
1759 #endif
1760 /* *INDENT-ON* */
1761 
1762 #endif /* CLIB_MARCH_VARIANT */
u32 flags
buffer flags: VLIB_BUFFER_FREE_LIST_INDEX_MASK: bits used to store free list index, VLIB_BUFFER_IS_TRACED: trace this buffer.
Definition: buffer.h:124
#define CLIB_UNUSED(x)
Definition: clib.h:87
vnet_main_t * vnet_main
Definition: gtpu.h:236
vl_api_wireguard_peer_flags_t flags
Definition: wireguard.api:103
static void vlib_increment_combined_counter(vlib_combined_counter_main_t *cm, u32 thread_index, u32 index, u64 n_packets, u64 n_bytes)
Increment a combined counter.
Definition: counter.h:220
u8 runtime_data[0]
Function dependent node-runtime data.
Definition: node.h:518
ip4_address_t src_address
Definition: ip4_packet.h:125
u32 teid
Definition: mobile.h:95
static u8 * format_gtpu_rx_trace(u8 *s, va_list *args)
Definition: gtpu_decap.c:32
vnet_interface_main_t interface_main
Definition: vnet.h:59
format_function_t format_ip4_header
Definition: format.h:81
#define PREDICT_TRUE(x)
Definition: clib.h:121
#define clib_memcpy_fast(a, b, c)
Definition: string.h:81
clib_memset(h->entries, 0, sizeof(h->entries[0]) *entries)
static u32 validate_gtpu_fib(vlib_buffer_t *b, gtpu_tunnel_t *t, u32 is_ip4)
Definition: gtpu_decap.c:52
vlib_node_registration_t ip6_gtpu_bypass_node
(constructor) VLIB_REGISTER_NODE (ip6_gtpu_bypass_node)
Definition: gtpu_decap.c:1210
vlib_main_t * vm
Definition: in2out_ed.c:1582
u8 * format(u8 *s, const char *fmt,...)
Definition: format.c:424
static_always_inline u8 gtpu_check_ip(vlib_buffer_t *b, u16 payload_len)
Definition: gtpu_decap.c:1287
clib_error_t * ip6_gtpu_bypass_init(vlib_main_t *vm)
Definition: gtpu_decap.c:1226
#define VLIB_NODE_FN(node)
Definition: node.h:202
static uword ip4_address_is_multicast(const ip4_address_t *a)
Definition: ip4_packet.h:434
vlib_error_t * errors
Vector of errors for this node.
Definition: node.h:469
static uword vlib_buffer_length_in_chain(vlib_main_t *vm, vlib_buffer_t *b)
Get length in bytes of the buffer chain.
Definition: buffer_funcs.h:402
ip6_address_t src_address
Definition: ip6_packet.h:310
static u32 vlib_buffer_get_ip_fib_index(vlib_buffer_t *b, u8 is_ip4)
Definition: ip.h:284
unsigned char u8
Definition: types.h:56
u8 data[128]
Definition: ipsec_types.api:89
static int ip4_is_fragment(const ip4_header_t *i)
Definition: ip4_packet.h:168
#define static_always_inline
Definition: clib.h:108
#define VLIB_INIT_FUNCTION(x)
Definition: init.h:173
ip46_address_t src
Definition: gtpu.h:143
vlib_node_registration_t ip4_gtpu_bypass_node
(constructor) VLIB_REGISTER_NODE (ip4_gtpu_bypass_node)
Definition: gtpu_decap.c:1181
ip4_address_t dst_address
Definition: ip4_packet.h:125
vlib_combined_counter_main_t * combined_sw_if_counters
Definition: interface.h:881
#define vlib_prefetch_buffer_header(b, type)
Prefetch buffer metadata.
Definition: buffer.h:203
static void * ip4_next_header(ip4_header_t *i)
Definition: ip4_packet.h:196
vlib_node_registration_t gtpu4_flow_input_node
(constructor) VLIB_REGISTER_NODE (gtpu4_flow_input_node)
Definition: gtpu_decap.c:1741
unsigned int u32
Definition: types.h:88
gtpu_main_t gtpu_main
Definition: gtpu.c:36
static void vtep4_key_init(vtep4_key_t *k4)
Definition: vtep.h:81
#define VLIB_FRAME_SIZE
Definition: node.h:377
static char * gtpu_flow_error_strings[]
Definition: gtpu_decap.c:1250
vlib_error_t error
Error code for buffers to be enqueued to error handler.
Definition: buffer.h:136
#define hash_get(h, key)
Definition: hash.h:249
#define pool_elt_at_index(p, i)
Returns pointer to element at given index.
Definition: pool.h:534
u32 decap_next_index
Definition: gtpu.h:150
vlib_node_registration_t ip4_input_node
Global ip4 input node.
Definition: ip4_input.c:385
unsigned short u16
Definition: types.h:57
static void * vlib_buffer_get_current(vlib_buffer_t *b)
Get pointer to current data to process.
Definition: buffer.h:229
u32 flow_id_start
Definition: gtpu.h:237
static char * gtpu_error_strings[]
Definition: gtpu_decap.c:741
#define PREDICT_FALSE(x)
Definition: clib.h:120
#define always_inline
Definition: ipsec.h:28
static uword ip_gtpu_bypass_inline(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame, u32 is_ip4)
Definition: gtpu_decap.c:795
static_always_inline u8 gtpu_validate_udp_csum(vlib_main_t *vm, vlib_buffer_t *b)
Definition: gtpu_decap.c:1270
u32 ip4_tcp_udp_validate_checksum(vlib_main_t *vm, vlib_buffer_t *p0)
Definition: ip4_forward.c:1400
#define vlib_validate_buffer_enqueue_x2(vm, node, next_index, to_next, n_left_to_next, bi0, bi1, next0, next1)
Finish enqueueing two buffers forward in the graph.
Definition: buffer_node.h:70
#define vlib_validate_buffer_enqueue_x1(vm, node, next_index, to_next, n_left_to_next, bi0, next0)
Finish enqueueing one buffer forward in the graph.
Definition: buffer_node.h:224
#define vlib_get_next_frame(vm, node, next_index, vectors, n_vectors_left)
Get pointer to next frame vector data by (vlib_node_runtime_t, next_index).
Definition: node_funcs.h:391
vlib_node_registration_t gtpu4_input_node
(constructor) VLIB_REGISTER_NODE (gtpu4_input_node)
Definition: gtpu_decap.c:748
static u8 vtep6_check(vtep_table_t *t, vlib_buffer_t *b0, ip6_header_t *ip60, vtep6_key_t *last_k6)
Definition: vtep.h:158
static void vlib_node_increment_counter(vlib_main_t *vm, u32 node_index, u32 counter_index, u64 increment)
Definition: node_funcs.h:1231
static u8 vtep4_check(vtep_table_t *t, vlib_buffer_t *b0, ip4_header_t *ip40, vtep4_key_t *last_k4)
Definition: vtep.h:101
clib_error_t * ip4_gtpu_bypass_init(vlib_main_t *vm)
Definition: gtpu_decap.c:1197
#define VLIB_REGISTER_NODE(x,...)
Definition: node.h:169
u16 n_vectors
Definition: node.h:396
static_always_inline uword vlib_get_thread_index(void)
Definition: threads.h:219
#define CLIB_PREFETCH(addr, size, type)
Definition: cache.h:80
u32 flow_id
Generic flow identifier.
Definition: buffer.h:127
static_always_inline void vnet_feature_next(u32 *next0, vlib_buffer_t *b0)
Definition: feature.h:322
static uword gtpu_input(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *from_frame, u32 is_ip4)
Definition: gtpu_decap.c:58
static vlib_node_runtime_t * vlib_node_get_runtime(vlib_main_t *vm, u32 node_index)
Get node runtime by node index.
Definition: node_funcs.h:115
u8 data[]
Packet data.
Definition: buffer.h:181
#define GTPU_E_S_PN_BIT
Definition: gtpu.h:73
u32 sw_if_index
Definition: gtpu.h:156
void vlib_put_next_frame(vlib_main_t *vm, vlib_node_runtime_t *r, u32 next_index, u32 n_vectors_left)
Release pointer to next frame vector data.
Definition: main.c:483
static void * ip6_next_header(ip6_header_t *i)
Definition: ip6_packet.h:371
vlib_main_t vlib_node_runtime_t * node
Definition: in2out_ed.c:1582
#define foreach_gtpu_input_next
Definition: gtpu.h:182
signed int i32
Definition: types.h:77
u16 cached_next_index
Next frame index that vector arguments were last enqueued to last time this node ran.
Definition: node.h:510
#define ASSERT(truth)
static void vtep6_key_init(vtep6_key_t *k6)
Definition: vtep.h:87
static void vlib_buffer_advance(vlib_buffer_t *b, word l)
Advance current data pointer by the supplied (signed!) amount.
Definition: buffer.h:248
#define gtpu_local_need_csum_check(_b)
Definition: gtpu_decap.c:1261
u32 encap_fib_index
Definition: gtpu.h:153
format_function_t format_ip6_header
Definition: format.h:95
static uword ip6_address_is_equal(const ip6_address_t *a, const ip6_address_t *b)
Definition: ip6_packet.h:167
#define GTPU_V1_VER
Definition: mobile.h:162
#define GTPU_VER_MASK
Definition: gtpu.h:68
static uword ip6_address_is_multicast(const ip6_address_t *a)
Definition: ip6_packet.h:121
vtep_table_t vtep_table
Definition: gtpu.h:215
ip_vxan_bypass_next_t
Definition: gtpu_decap.c:788
uword * gtpu4_tunnel_by_key
Definition: gtpu.h:210
static_always_inline u8 gtpu_err_code(u8 ip_err0, u8 udp_err0, u8 csum_err0)
Definition: gtpu_decap.c:1308
struct _vlib_node_registration vlib_node_registration_t
template key/value backing page structure
Definition: bihash_doc.h:44
Definition: defs.h:47
static u8 vlib_buffer_has_space(vlib_buffer_t *b, word l)
Check if there is enough space in buffer to advance.
Definition: buffer.h:265
u16 payload_length
Definition: ip6_packet.h:301
u32 ip6_tcp_udp_icmp_validate_checksum(vlib_main_t *vm, vlib_buffer_t *p0)
Definition: ip6_forward.c:1160
static void vnet_update_l2_len(vlib_buffer_t *b)
Definition: l2_input.h:236
void ip4_forward_next_trace(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame, vlib_rx_or_tx_t which_adj_index)
Definition: ip4_forward.c:1279
vlib_main_t vlib_node_runtime_t vlib_frame_t * frame
Definition: in2out_ed.c:1583
uword * gtpu6_tunnel_by_key
Definition: gtpu.h:211
VLIB buffer representation.
Definition: buffer.h:102
u64 uword
Definition: types.h:112
static void * vlib_frame_vector_args(vlib_frame_t *f)
Get pointer to frame vector data.
Definition: node_funcs.h:297
u8 * format_ip4_forward_next_trace(u8 *s, va_list *args)
Definition: ip4_forward.c:1229
static uword gtpu_flow_input(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *from_frame)
Definition: gtpu_decap.c:1322
Bits Octets 8 7 6 5 4 3 2 1 1 Version PT (*) E S PN 2 Message Type 3 Length (1st Octet) 4 Length...
Definition: mobile.h:90
#define hash_get_mem(h, key)
Definition: hash.h:269
#define vnet_buffer(b)
Definition: buffer.h:417
gtpu_flow_error_t
Definition: gtpu_decap.c:1239
gtpu_tunnel_t * tunnels
Definition: gtpu.h:207
u8 ver_flags
Definition: mobile.h:92
u16 flags
Copy of main node flags.
Definition: node.h:500
void * vlib_add_trace(vlib_main_t *vm, vlib_node_runtime_t *r, vlib_buffer_t *b, u32 n_data_bytes)
Definition: trace.c:577
static_always_inline u8 gtpu_check_ip_udp_len(vlib_buffer_t *b)
Definition: gtpu_decap.c:1297
u8 ip_version_and_header_length
Definition: ip4_packet.h:93
static_always_inline void vlib_get_buffers(vlib_main_t *vm, u32 *bi, vlib_buffer_t **b, int count)
Translate array of buffer indices into buffer pointers.
Definition: buffer_funcs.h:280
#define VLIB_NODE_FLAG_TRACE
Definition: node.h:301
#define CLIB_CACHE_LINE_BYTES
Definition: cache.h:59
vlib_node_registration_t gtpu6_input_node
(constructor) VLIB_REGISTER_NODE (gtpu6_input_node)
Definition: gtpu_decap.c:768
static vlib_buffer_t * vlib_get_buffer(vlib_main_t *vm, u32 buffer_index)
Translate buffer index into buffer pointer.
Definition: buffer_funcs.h:85
#define foreach_gtpu_flow_error
Definition: gtpu_decap.c:1231
#define gtpu_local_csum_is_valid(_b)
Definition: gtpu_decap.c:1265
Definition: defs.h:46
ip6_address_t dst_address
Definition: ip6_packet.h:310
static u8 vtep4_check_vector(vtep_table_t *t, vlib_buffer_t *b0, ip4_header_t *ip40, vtep4_key_t *last_k4, vtep4_cache_t *vtep4_u512)
Definition: vtep.h:122
u8 * format_ip6_forward_next_trace(u8 *s, va_list *args)
Definition: ip6_forward.c:951