FD.io VPP  v20.01-48-g3e0dafb74
Vector Packet Processing
gtpu_decap.c
Go to the documentation of this file.
1 /*
2  * decap.c: gtpu tunnel decap packet processing
3  *
4  * Copyright (c) 2017 Intel and/or its affiliates.
5  * Licensed under the Apache License, Version 2.0 (the "License");
6  * you may not use this file except in compliance with the License.
7  * You may obtain a copy of the License at:
8  *
9  * http://www.apache.org/licenses/LICENSE-2.0
10  *
11  * Unless required by applicable law or agreed to in writing, software
12  * distributed under the License is distributed on an "AS IS" BASIS,
13  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14  * See the License for the specific language governing permissions and
15  * limitations under the License.
16  */
17 
18 #include <vlib/vlib.h>
19 #include <vnet/pg/pg.h>
20 #include <gtpu/gtpu.h>
21 
24 
25 typedef struct {
31 
32 static u8 * format_gtpu_rx_trace (u8 * s, va_list * args)
33 {
34  CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
35  CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
36  gtpu_rx_trace_t * t = va_arg (*args, gtpu_rx_trace_t *);
37 
38  if (t->tunnel_index != ~0)
39  {
40  s = format (s, "GTPU decap from gtpu_tunnel%d teid %d next %d error %d",
41  t->tunnel_index, t->teid, t->next_index, t->error);
42  }
43  else
44  {
45  s = format (s, "GTPU decap error - tunnel for teid %d does not exist",
46  t->teid);
47  }
48  return s;
49 }
50 
53 {
54  u32 fib_index, sw_if_index;
55 
56  sw_if_index = vnet_buffer (b)->sw_if_index[VLIB_RX];
57 
58  if (is_ip4)
59  fib_index = (vnet_buffer (b)->sw_if_index[VLIB_TX] == (u32) ~ 0) ?
61  vnet_buffer (b)->sw_if_index[VLIB_TX];
62  else
63  fib_index = (vnet_buffer (b)->sw_if_index[VLIB_TX] == (u32) ~ 0) ?
65  vnet_buffer (b)->sw_if_index[VLIB_TX];
66 
67  return (fib_index == t->encap_fib_index);
68 }
69 
73  vlib_frame_t * from_frame,
74  u32 is_ip4)
75 {
76  u32 n_left_from, next_index, * from, * to_next;
77  gtpu_main_t * gtm = &gtpu_main;
78  vnet_main_t * vnm = gtm->vnet_main;
80  u32 last_tunnel_index = ~0;
81  gtpu4_tunnel_key_t last_key4;
82  gtpu6_tunnel_key_t last_key6;
83  u32 pkts_decapsulated = 0;
84  u32 thread_index = vlib_get_thread_index();
85  u32 stats_sw_if_index, stats_n_packets, stats_n_bytes;
86 
87  if (is_ip4)
88  last_key4.as_u64 = ~0;
89  else
90  clib_memset (&last_key6, 0xff, sizeof (last_key6));
91 
92  from = vlib_frame_vector_args (from_frame);
93  n_left_from = from_frame->n_vectors;
94 
95  next_index = node->cached_next_index;
96  stats_sw_if_index = node->runtime_data[0];
97  stats_n_packets = stats_n_bytes = 0;
98 
99  while (n_left_from > 0)
100  {
101  u32 n_left_to_next;
102 
103  vlib_get_next_frame (vm, node, next_index,
104  to_next, n_left_to_next);
105  while (n_left_from >= 4 && n_left_to_next >= 2)
106  {
107  u32 bi0, bi1;
108  vlib_buffer_t * b0, * b1;
109  u32 next0, next1;
110  ip4_header_t * ip4_0, * ip4_1;
111  ip6_header_t * ip6_0, * ip6_1;
112  gtpu_header_t * gtpu0, * gtpu1;
113  u32 gtpu_hdr_len0, gtpu_hdr_len1;
114  uword * p0, * p1;
115  u32 tunnel_index0, tunnel_index1;
116  gtpu_tunnel_t * t0, * t1, * mt0 = NULL, * mt1 = NULL;
117  gtpu4_tunnel_key_t key4_0, key4_1;
118  gtpu6_tunnel_key_t key6_0, key6_1;
119  u32 error0, error1;
120  u32 sw_if_index0, sw_if_index1, len0, len1;
121  u8 has_space0, has_space1;
122  u8 ver0, ver1;
123 
124  /* Prefetch next iteration. */
125  {
126  vlib_buffer_t * p2, * p3;
127 
128  p2 = vlib_get_buffer (vm, from[2]);
129  p3 = vlib_get_buffer (vm, from[3]);
130 
131  vlib_prefetch_buffer_header (p2, LOAD);
132  vlib_prefetch_buffer_header (p3, LOAD);
133 
136  }
137 
138  bi0 = from[0];
139  bi1 = from[1];
140  to_next[0] = bi0;
141  to_next[1] = bi1;
142  from += 2;
143  to_next += 2;
144  n_left_to_next -= 2;
145  n_left_from -= 2;
146 
147  b0 = vlib_get_buffer (vm, bi0);
148  b1 = vlib_get_buffer (vm, bi1);
149 
150  /* udp leaves current_data pointing at the gtpu header */
151  gtpu0 = vlib_buffer_get_current (b0);
152  gtpu1 = vlib_buffer_get_current (b1);
153  if (is_ip4)
154  {
155  ip4_0 = (void *)((u8*)gtpu0 - sizeof(udp_header_t) - sizeof(ip4_header_t));
156  ip4_1 = (void *)((u8*)gtpu1 - sizeof(udp_header_t) - sizeof(ip4_header_t));
157  }
158  else
159  {
160  ip6_0 = (void *)((u8*)gtpu0 - sizeof(udp_header_t) - sizeof(ip6_header_t));
161  ip6_1 = (void *)((u8*)gtpu1 - sizeof(udp_header_t) - sizeof(ip6_header_t));
162  }
163 
164  tunnel_index0 = ~0;
165  error0 = 0;
166 
167  tunnel_index1 = ~0;
168  error1 = 0;
169 
170  /* speculatively load gtp header version field */
171  ver0 = gtpu0->ver_flags;
172  ver1 = gtpu1->ver_flags;
173 
174  /*
175  * Manipulate gtpu header
176  * TBD: Manipulate Sequence Number and N-PDU Number
177  * TBD: Manipulate Next Extension Header
178  */
179  gtpu_hdr_len0 = sizeof(gtpu_header_t) - (((ver0 & GTPU_E_S_PN_BIT) == 0) * 4);
180  gtpu_hdr_len1 = sizeof(gtpu_header_t) - (((ver1 & GTPU_E_S_PN_BIT) == 0) * 4);
181 
182  has_space0 = vlib_buffer_has_space (b0, gtpu_hdr_len0);
183  has_space1 = vlib_buffer_has_space (b1, gtpu_hdr_len1);
184 
185  if (PREDICT_FALSE (((ver0 & GTPU_VER_MASK) != GTPU_V1_VER) | (!has_space0)))
186  {
187  error0 = has_space0 ? GTPU_ERROR_BAD_VER : GTPU_ERROR_TOO_SMALL;
188  next0 = GTPU_INPUT_NEXT_DROP;
189  goto trace0;
190  }
191 
192  /* Manipulate packet 0 */
193  if (is_ip4) {
194  key4_0.src = ip4_0->src_address.as_u32;
195  key4_0.teid = gtpu0->teid;
196 
197  /* Make sure GTPU tunnel exist according to packet SIP and teid
198  * SIP identify a GTPU path, and teid identify a tunnel in a given GTPU path */
199  if (PREDICT_FALSE (key4_0.as_u64 != last_key4.as_u64))
200  {
201  p0 = hash_get (gtm->gtpu4_tunnel_by_key, key4_0.as_u64);
202  if (PREDICT_FALSE (p0 == NULL))
203  {
204  error0 = GTPU_ERROR_NO_SUCH_TUNNEL;
205  next0 = GTPU_INPUT_NEXT_DROP;
206  goto trace0;
207  }
208  last_key4.as_u64 = key4_0.as_u64;
209  tunnel_index0 = last_tunnel_index = p0[0];
210  }
211  else
212  tunnel_index0 = last_tunnel_index;
213  t0 = pool_elt_at_index (gtm->tunnels, tunnel_index0);
214 
215  /* Validate GTPU tunnel encap-fib index against packet */
216  if (PREDICT_FALSE (validate_gtpu_fib (b0, t0, is_ip4) == 0))
217  {
218  error0 = GTPU_ERROR_NO_SUCH_TUNNEL;
219  next0 = GTPU_INPUT_NEXT_DROP;
220  goto trace0;
221  }
222 
223  /* Validate GTPU tunnel SIP against packet DIP */
224  if (PREDICT_TRUE (ip4_0->dst_address.as_u32 == t0->src.ip4.as_u32))
225  goto next0; /* valid packet */
227  {
228  key4_0.src = ip4_0->dst_address.as_u32;
229  key4_0.teid = gtpu0->teid;
230  /* Make sure mcast GTPU tunnel exist by packet DIP and teid */
231  p0 = hash_get (gtm->gtpu4_tunnel_by_key, key4_0.as_u64);
232  if (PREDICT_TRUE (p0 != NULL))
233  {
234  mt0 = pool_elt_at_index (gtm->tunnels, p0[0]);
235  goto next0; /* valid packet */
236  }
237  }
238  error0 = GTPU_ERROR_NO_SUCH_TUNNEL;
239  next0 = GTPU_INPUT_NEXT_DROP;
240  goto trace0;
241 
242  } else /* !is_ip4 */ {
243  key6_0.src.as_u64[0] = ip6_0->src_address.as_u64[0];
244  key6_0.src.as_u64[1] = ip6_0->src_address.as_u64[1];
245  key6_0.teid = gtpu0->teid;
246 
247  /* Make sure GTPU tunnel exist according to packet SIP and teid
248  * SIP identify a GTPU path, and teid identify a tunnel in a given GTPU path */
249  if (PREDICT_FALSE (memcmp(&key6_0, &last_key6, sizeof(last_key6)) != 0))
250  {
251  p0 = hash_get_mem (gtm->gtpu6_tunnel_by_key, &key6_0);
252  if (PREDICT_FALSE (p0 == NULL))
253  {
254  error0 = GTPU_ERROR_NO_SUCH_TUNNEL;
255  next0 = GTPU_INPUT_NEXT_DROP;
256  goto trace0;
257  }
258  clib_memcpy_fast (&last_key6, &key6_0, sizeof(key6_0));
259  tunnel_index0 = last_tunnel_index = p0[0];
260  }
261  else
262  tunnel_index0 = last_tunnel_index;
263  t0 = pool_elt_at_index (gtm->tunnels, tunnel_index0);
264 
265  /* Validate GTPU tunnel encap-fib index against packet */
266  if (PREDICT_FALSE (validate_gtpu_fib (b0, t0, is_ip4) == 0))
267  {
268  error0 = GTPU_ERROR_NO_SUCH_TUNNEL;
269  next0 = GTPU_INPUT_NEXT_DROP;
270  goto trace0;
271  }
272 
273  /* Validate GTPU tunnel SIP against packet DIP */
275  &t0->src.ip6)))
276  goto next0; /* valid packet */
278  {
279  key6_0.src.as_u64[0] = ip6_0->dst_address.as_u64[0];
280  key6_0.src.as_u64[1] = ip6_0->dst_address.as_u64[1];
281  key6_0.teid = gtpu0->teid;
282  p0 = hash_get_mem (gtm->gtpu6_tunnel_by_key, &key6_0);
283  if (PREDICT_TRUE (p0 != NULL))
284  {
285  mt0 = pool_elt_at_index (gtm->tunnels, p0[0]);
286  goto next0; /* valid packet */
287  }
288  }
289  error0 = GTPU_ERROR_NO_SUCH_TUNNEL;
290  next0 = GTPU_INPUT_NEXT_DROP;
291  goto trace0;
292  }
293 
294  next0:
295  /* Pop gtpu header */
296  vlib_buffer_advance (b0, gtpu_hdr_len0);
297 
298  next0 = t0->decap_next_index;
299  sw_if_index0 = t0->sw_if_index;
300  len0 = vlib_buffer_length_in_chain (vm, b0);
301 
302  /* Required to make the l2 tag push / pop code work on l2 subifs */
303  if (PREDICT_TRUE(next0 == GTPU_INPUT_NEXT_L2_INPUT))
304  vnet_update_l2_len (b0);
305 
306  /* Set packet input sw_if_index to unicast GTPU tunnel for learning */
307  vnet_buffer(b0)->sw_if_index[VLIB_RX] = sw_if_index0;
308  sw_if_index0 = (mt0) ? mt0->sw_if_index : sw_if_index0;
309 
310  pkts_decapsulated ++;
311  stats_n_packets += 1;
312  stats_n_bytes += len0;
313 
314  /* Batch stats increment on the same gtpu tunnel so counter
315  is not incremented per packet */
316  if (PREDICT_FALSE (sw_if_index0 != stats_sw_if_index))
317  {
318  stats_n_packets -= 1;
319  stats_n_bytes -= len0;
320  if (stats_n_packets)
323  thread_index, stats_sw_if_index,
324  stats_n_packets, stats_n_bytes);
325  stats_n_packets = 1;
326  stats_n_bytes = len0;
327  stats_sw_if_index = sw_if_index0;
328  }
329 
330  trace0:
331  b0->error = error0 ? node->errors[error0] : 0;
332 
333  if (PREDICT_FALSE(b0->flags & VLIB_BUFFER_IS_TRACED))
334  {
335  gtpu_rx_trace_t *tr
336  = vlib_add_trace (vm, node, b0, sizeof (*tr));
337  tr->next_index = next0;
338  tr->error = error0;
339  tr->tunnel_index = tunnel_index0;
340  tr->teid = has_space0 ? clib_net_to_host_u32(gtpu0->teid) : ~0;
341  }
342 
343  if (PREDICT_FALSE (((ver1 & GTPU_VER_MASK) != GTPU_V1_VER) | (!has_space1)))
344  {
345  error1 = has_space1 ? GTPU_ERROR_BAD_VER : GTPU_ERROR_TOO_SMALL;
346  next1 = GTPU_INPUT_NEXT_DROP;
347  goto trace1;
348  }
349 
350  /* Manipulate packet 1 */
351  if (is_ip4) {
352  key4_1.src = ip4_1->src_address.as_u32;
353  key4_1.teid = gtpu1->teid;
354 
355  /* Make sure GTPU tunnel exist according to packet SIP and teid
356  * SIP identify a GTPU path, and teid identify a tunnel in a given GTPU path */
357  if (PREDICT_FALSE (key4_1.as_u64 != last_key4.as_u64))
358  {
359  p1 = hash_get (gtm->gtpu4_tunnel_by_key, key4_1.as_u64);
360  if (PREDICT_FALSE (p1 == NULL))
361  {
362  error1 = GTPU_ERROR_NO_SUCH_TUNNEL;
363  next1 = GTPU_INPUT_NEXT_DROP;
364  goto trace1;
365  }
366  last_key4.as_u64 = key4_1.as_u64;
367  tunnel_index1 = last_tunnel_index = p1[0];
368  }
369  else
370  tunnel_index1 = last_tunnel_index;
371  t1 = pool_elt_at_index (gtm->tunnels, tunnel_index1);
372 
373  /* Validate GTPU tunnel encap-fib index against packet */
374  if (PREDICT_FALSE (validate_gtpu_fib (b1, t1, is_ip4) == 0))
375  {
376  error1 = GTPU_ERROR_NO_SUCH_TUNNEL;
377  next1 = GTPU_INPUT_NEXT_DROP;
378  goto trace1;
379  }
380 
381  /* Validate GTPU tunnel SIP against packet DIP */
382  if (PREDICT_TRUE (ip4_1->dst_address.as_u32 == t1->src.ip4.as_u32))
383  goto next1; /* valid packet */
385  {
386  key4_1.src = ip4_1->dst_address.as_u32;
387  key4_1.teid = gtpu1->teid;
388  /* Make sure mcast GTPU tunnel exist by packet DIP and teid */
389  p1 = hash_get (gtm->gtpu4_tunnel_by_key, key4_1.as_u64);
390  if (PREDICT_TRUE (p1 != NULL))
391  {
392  mt1 = pool_elt_at_index (gtm->tunnels, p1[0]);
393  goto next1; /* valid packet */
394  }
395  }
396  error1 = GTPU_ERROR_NO_SUCH_TUNNEL;
397  next1 = GTPU_INPUT_NEXT_DROP;
398  goto trace1;
399 
400  } else /* !is_ip4 */ {
401  key6_1.src.as_u64[0] = ip6_1->src_address.as_u64[0];
402  key6_1.src.as_u64[1] = ip6_1->src_address.as_u64[1];
403  key6_1.teid = gtpu1->teid;
404 
405  /* Make sure GTPU tunnel exist according to packet SIP and teid
406  * SIP identify a GTPU path, and teid identify a tunnel in a given GTPU path */
407  if (PREDICT_FALSE (memcmp(&key6_1, &last_key6, sizeof(last_key6)) != 0))
408  {
409  p1 = hash_get_mem (gtm->gtpu6_tunnel_by_key, &key6_1);
410 
411  if (PREDICT_FALSE (p1 == NULL))
412  {
413  error1 = GTPU_ERROR_NO_SUCH_TUNNEL;
414  next1 = GTPU_INPUT_NEXT_DROP;
415  goto trace1;
416  }
417 
418  clib_memcpy_fast (&last_key6, &key6_1, sizeof(key6_1));
419  tunnel_index1 = last_tunnel_index = p1[0];
420  }
421  else
422  tunnel_index1 = last_tunnel_index;
423  t1 = pool_elt_at_index (gtm->tunnels, tunnel_index1);
424 
425  /* Validate GTPU tunnel encap-fib index against packet */
426  if (PREDICT_FALSE (validate_gtpu_fib (b1, t1, is_ip4) == 0))
427  {
428  error1 = GTPU_ERROR_NO_SUCH_TUNNEL;
429  next1 = GTPU_INPUT_NEXT_DROP;
430  goto trace1;
431  }
432 
433  /* Validate GTPU tunnel SIP against packet DIP */
435  &t1->src.ip6)))
436  goto next1; /* valid packet */
438  {
439  key6_1.src.as_u64[0] = ip6_1->dst_address.as_u64[0];
440  key6_1.src.as_u64[1] = ip6_1->dst_address.as_u64[1];
441  key6_1.teid = gtpu1->teid;
442  p1 = hash_get_mem (gtm->gtpu6_tunnel_by_key, &key6_1);
443  if (PREDICT_TRUE (p1 != NULL))
444  {
445  mt1 = pool_elt_at_index (gtm->tunnels, p1[0]);
446  goto next1; /* valid packet */
447  }
448  }
449  error1 = GTPU_ERROR_NO_SUCH_TUNNEL;
450  next1 = GTPU_INPUT_NEXT_DROP;
451  goto trace1;
452  }
453 
454  next1:
455  /* Pop gtpu header */
456  vlib_buffer_advance (b1, gtpu_hdr_len1);
457 
458  next1 = t1->decap_next_index;
459  sw_if_index1 = t1->sw_if_index;
460  len1 = vlib_buffer_length_in_chain (vm, b1);
461 
462  /* Required to make the l2 tag push / pop code work on l2 subifs */
463  if (PREDICT_TRUE(next1 == GTPU_INPUT_NEXT_L2_INPUT))
464  vnet_update_l2_len (b1);
465 
466  /* Set packet input sw_if_index to unicast GTPU tunnel for learning */
467  vnet_buffer(b1)->sw_if_index[VLIB_RX] = sw_if_index1;
468  sw_if_index1 = (mt1) ? mt1->sw_if_index : sw_if_index1;
469 
470  pkts_decapsulated ++;
471  stats_n_packets += 1;
472  stats_n_bytes += len1;
473 
474  /* Batch stats increment on the same gtpu tunnel so counter
475  is not incremented per packet */
476  if (PREDICT_FALSE (sw_if_index1 != stats_sw_if_index))
477  {
478  stats_n_packets -= 1;
479  stats_n_bytes -= len1;
480  if (stats_n_packets)
483  thread_index, stats_sw_if_index,
484  stats_n_packets, stats_n_bytes);
485  stats_n_packets = 1;
486  stats_n_bytes = len1;
487  stats_sw_if_index = sw_if_index1;
488  }
489 
490  trace1:
491  b1->error = error1 ? node->errors[error1] : 0;
492 
493  if (PREDICT_FALSE(b1->flags & VLIB_BUFFER_IS_TRACED))
494  {
495  gtpu_rx_trace_t *tr
496  = vlib_add_trace (vm, node, b1, sizeof (*tr));
497  tr->next_index = next1;
498  tr->error = error1;
499  tr->tunnel_index = tunnel_index1;
500  tr->teid = has_space1 ? clib_net_to_host_u32(gtpu1->teid) : ~0;
501  }
502 
503  vlib_validate_buffer_enqueue_x2 (vm, node, next_index,
504  to_next, n_left_to_next,
505  bi0, bi1, next0, next1);
506  }
507 
508  while (n_left_from > 0 && n_left_to_next > 0)
509  {
510  u32 bi0;
511  vlib_buffer_t * b0;
512  u32 next0;
513  ip4_header_t * ip4_0;
514  ip6_header_t * ip6_0;
515  gtpu_header_t * gtpu0;
516  u32 gtpu_hdr_len0;
517  uword * p0;
518  u32 tunnel_index0;
519  gtpu_tunnel_t * t0, * mt0 = NULL;
520  gtpu4_tunnel_key_t key4_0;
521  gtpu6_tunnel_key_t key6_0;
522  u32 error0;
523  u32 sw_if_index0, len0;
524  u8 has_space0;
525  u8 ver0;
526 
527  bi0 = from[0];
528  to_next[0] = bi0;
529  from += 1;
530  to_next += 1;
531  n_left_from -= 1;
532  n_left_to_next -= 1;
533 
534  b0 = vlib_get_buffer (vm, bi0);
535 
536  /* udp leaves current_data pointing at the gtpu header */
537  gtpu0 = vlib_buffer_get_current (b0);
538  if (is_ip4) {
539  ip4_0 = (void *)((u8*)gtpu0 - sizeof(udp_header_t) - sizeof(ip4_header_t));
540  } else {
541  ip6_0 = (void *)((u8*)gtpu0 - sizeof(udp_header_t) - sizeof(ip6_header_t));
542  }
543 
544  tunnel_index0 = ~0;
545  error0 = 0;
546 
547  /* speculatively load gtp header version field */
548  ver0 = gtpu0->ver_flags;
549 
550  /*
551  * Manipulate gtpu header
552  * TBD: Manipulate Sequence Number and N-PDU Number
553  * TBD: Manipulate Next Extension Header
554  */
555  gtpu_hdr_len0 = sizeof(gtpu_header_t) - (((ver0 & GTPU_E_S_PN_BIT) == 0) * 4);
556 
557  has_space0 = vlib_buffer_has_space (b0, gtpu_hdr_len0);
558 
559  if (PREDICT_FALSE (((ver0 & GTPU_VER_MASK) != GTPU_V1_VER) | (!has_space0)))
560  {
561  error0 = has_space0 ? GTPU_ERROR_BAD_VER : GTPU_ERROR_TOO_SMALL;
562  next0 = GTPU_INPUT_NEXT_DROP;
563  goto trace00;
564  }
565 
566  if (is_ip4) {
567  key4_0.src = ip4_0->src_address.as_u32;
568  key4_0.teid = gtpu0->teid;
569 
570  /* Make sure GTPU tunnel exist according to packet SIP and teid
571  * SIP identify a GTPU path, and teid identify a tunnel in a given GTPU path */
572  if (PREDICT_FALSE (key4_0.as_u64 != last_key4.as_u64))
573  {
574  p0 = hash_get (gtm->gtpu4_tunnel_by_key, key4_0.as_u64);
575  if (PREDICT_FALSE (p0 == NULL))
576  {
577  error0 = GTPU_ERROR_NO_SUCH_TUNNEL;
578  next0 = GTPU_INPUT_NEXT_DROP;
579  goto trace00;
580  }
581  last_key4.as_u64 = key4_0.as_u64;
582  tunnel_index0 = last_tunnel_index = p0[0];
583  }
584  else
585  tunnel_index0 = last_tunnel_index;
586  t0 = pool_elt_at_index (gtm->tunnels, tunnel_index0);
587 
588  /* Validate GTPU tunnel encap-fib index against packet */
589  if (PREDICT_FALSE (validate_gtpu_fib (b0, t0, is_ip4) == 0))
590  {
591  error0 = GTPU_ERROR_NO_SUCH_TUNNEL;
592  next0 = GTPU_INPUT_NEXT_DROP;
593  goto trace00;
594  }
595 
596  /* Validate GTPU tunnel SIP against packet DIP */
597  if (PREDICT_TRUE (ip4_0->dst_address.as_u32 == t0->src.ip4.as_u32))
598  goto next00; /* valid packet */
600  {
601  key4_0.src = ip4_0->dst_address.as_u32;
602  key4_0.teid = gtpu0->teid;
603  /* Make sure mcast GTPU tunnel exist by packet DIP and teid */
604  p0 = hash_get (gtm->gtpu4_tunnel_by_key, key4_0.as_u64);
605  if (PREDICT_TRUE (p0 != NULL))
606  {
607  mt0 = pool_elt_at_index (gtm->tunnels, p0[0]);
608  goto next00; /* valid packet */
609  }
610  }
611  error0 = GTPU_ERROR_NO_SUCH_TUNNEL;
612  next0 = GTPU_INPUT_NEXT_DROP;
613  goto trace00;
614 
615  } else /* !is_ip4 */ {
616  key6_0.src.as_u64[0] = ip6_0->src_address.as_u64[0];
617  key6_0.src.as_u64[1] = ip6_0->src_address.as_u64[1];
618  key6_0.teid = gtpu0->teid;
619 
620  /* Make sure GTPU tunnel exist according to packet SIP and teid
621  * SIP identify a GTPU path, and teid identify a tunnel in a given GTPU path */
622  if (PREDICT_FALSE (memcmp(&key6_0, &last_key6, sizeof(last_key6)) != 0))
623  {
624  p0 = hash_get_mem (gtm->gtpu6_tunnel_by_key, &key6_0);
625  if (PREDICT_FALSE (p0 == NULL))
626  {
627  error0 = GTPU_ERROR_NO_SUCH_TUNNEL;
628  next0 = GTPU_INPUT_NEXT_DROP;
629  goto trace00;
630  }
631  clib_memcpy_fast (&last_key6, &key6_0, sizeof(key6_0));
632  tunnel_index0 = last_tunnel_index = p0[0];
633  }
634  else
635  tunnel_index0 = last_tunnel_index;
636  t0 = pool_elt_at_index (gtm->tunnels, tunnel_index0);
637 
638  /* Validate GTPU tunnel encap-fib index against packet */
639  if (PREDICT_FALSE (validate_gtpu_fib (b0, t0, is_ip4) == 0))
640  {
641  error0 = GTPU_ERROR_NO_SUCH_TUNNEL;
642  next0 = GTPU_INPUT_NEXT_DROP;
643  goto trace00;
644  }
645 
646  /* Validate GTPU tunnel SIP against packet DIP */
648  &t0->src.ip6)))
649  goto next00; /* valid packet */
651  {
652  key6_0.src.as_u64[0] = ip6_0->dst_address.as_u64[0];
653  key6_0.src.as_u64[1] = ip6_0->dst_address.as_u64[1];
654  key6_0.teid = gtpu0->teid;
655  p0 = hash_get_mem (gtm->gtpu6_tunnel_by_key, &key6_0);
656  if (PREDICT_TRUE (p0 != NULL))
657  {
658  mt0 = pool_elt_at_index (gtm->tunnels, p0[0]);
659  goto next00; /* valid packet */
660  }
661  }
662  error0 = GTPU_ERROR_NO_SUCH_TUNNEL;
663  next0 = GTPU_INPUT_NEXT_DROP;
664  goto trace00;
665  }
666 
667  next00:
668  /* Pop gtpu header */
669  vlib_buffer_advance (b0, gtpu_hdr_len0);
670 
671  next0 = t0->decap_next_index;
672  sw_if_index0 = t0->sw_if_index;
673  len0 = vlib_buffer_length_in_chain (vm, b0);
674 
675  /* Required to make the l2 tag push / pop code work on l2 subifs */
676  if (PREDICT_TRUE(next0 == GTPU_INPUT_NEXT_L2_INPUT))
677  vnet_update_l2_len (b0);
678 
679  /* Set packet input sw_if_index to unicast GTPU tunnel for learning */
680  vnet_buffer(b0)->sw_if_index[VLIB_RX] = sw_if_index0;
681  sw_if_index0 = (mt0) ? mt0->sw_if_index : sw_if_index0;
682 
683  pkts_decapsulated ++;
684  stats_n_packets += 1;
685  stats_n_bytes += len0;
686 
687  /* Batch stats increment on the same gtpu tunnel so counter
688  is not incremented per packet */
689  if (PREDICT_FALSE (sw_if_index0 != stats_sw_if_index))
690  {
691  stats_n_packets -= 1;
692  stats_n_bytes -= len0;
693  if (stats_n_packets)
696  thread_index, stats_sw_if_index,
697  stats_n_packets, stats_n_bytes);
698  stats_n_packets = 1;
699  stats_n_bytes = len0;
700  stats_sw_if_index = sw_if_index0;
701  }
702 
703  trace00:
704  b0->error = error0 ? node->errors[error0] : 0;
705 
706  if (PREDICT_FALSE(b0->flags & VLIB_BUFFER_IS_TRACED))
707  {
708  gtpu_rx_trace_t *tr
709  = vlib_add_trace (vm, node, b0, sizeof (*tr));
710  tr->next_index = next0;
711  tr->error = error0;
712  tr->tunnel_index = tunnel_index0;
713  tr->teid = has_space0 ? clib_net_to_host_u32(gtpu0->teid) : ~0;
714  }
715  vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
716  to_next, n_left_to_next,
717  bi0, next0);
718  }
719 
720  vlib_put_next_frame (vm, node, next_index, n_left_to_next);
721  }
722  /* Do we still need this now that tunnel tx stats is kept? */
723  vlib_node_increment_counter (vm, is_ip4?
725  GTPU_ERROR_DECAPSULATED,
726  pkts_decapsulated);
727 
728  /* Increment any remaining batch stats */
729  if (stats_n_packets)
730  {
733  thread_index, stats_sw_if_index, stats_n_packets, stats_n_bytes);
734  node->runtime_data[0] = stats_sw_if_index;
735  }
736 
737  return from_frame->n_vectors;
738 }
739 
742  vlib_frame_t * from_frame)
743 {
744  return gtpu_input(vm, node, from_frame, /* is_ip4 */ 1);
745 }
746 
749  vlib_frame_t * from_frame)
750 {
751  return gtpu_input(vm, node, from_frame, /* is_ip4 */ 0);
752 }
753 
754 static char * gtpu_error_strings[] = {
755 #define gtpu_error(n,s) s,
756 #include <gtpu/gtpu_error.def>
757 #undef gtpu_error
758 #undef _
759 };
760 
762  .name = "gtpu4-input",
763  /* Takes a vector of packets. */
764  .vector_size = sizeof (u32),
765 
766  .n_errors = GTPU_N_ERROR,
767  .error_strings = gtpu_error_strings,
768 
769  .n_next_nodes = GTPU_INPUT_N_NEXT,
770  .next_nodes = {
771 #define _(s,n) [GTPU_INPUT_NEXT_##s] = n,
773 #undef _
774  },
775 
776 //temp .format_buffer = format_gtpu_header,
777  .format_trace = format_gtpu_rx_trace,
778  // $$$$ .unformat_buffer = unformat_gtpu_header,
779 };
780 
782  .name = "gtpu6-input",
783  /* Takes a vector of packets. */
784  .vector_size = sizeof (u32),
785 
786  .n_errors = GTPU_N_ERROR,
787  .error_strings = gtpu_error_strings,
788 
789  .n_next_nodes = GTPU_INPUT_N_NEXT,
790  .next_nodes = {
791 #define _(s,n) [GTPU_INPUT_NEXT_##s] = n,
793 #undef _
794  },
795 
796 //temp .format_buffer = format_gtpu_header,
797  .format_trace = format_gtpu_rx_trace,
798  // $$$$ .unformat_buffer = unformat_gtpu_header,
799 };
800 
801 typedef enum {
806 
811  u32 is_ip4)
812 {
813  gtpu_main_t * gtm = &gtpu_main;
814  u32 * from, * to_next, n_left_from, n_left_to_next, next_index;
815  vlib_node_runtime_t * error_node = vlib_node_get_runtime (vm, ip4_input_node.index);
816  ip4_address_t addr4; /* last IPv4 address matching a local VTEP address */
817  ip6_address_t addr6; /* last IPv6 address matching a local VTEP address */
818 
819  from = vlib_frame_vector_args (frame);
820  n_left_from = frame->n_vectors;
821  next_index = node->cached_next_index;
822 
823  if (node->flags & VLIB_NODE_FLAG_TRACE)
824  ip4_forward_next_trace (vm, node, frame, VLIB_TX);
825 
826  if (is_ip4) addr4.data_u32 = ~0;
827  else ip6_address_set_zero (&addr6);
828 
829  while (n_left_from > 0)
830  {
831  vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
832 
833  while (n_left_from >= 4 && n_left_to_next >= 2)
834  {
835  vlib_buffer_t * b0, * b1;
836  ip4_header_t * ip40, * ip41;
837  ip6_header_t * ip60, * ip61;
838  udp_header_t * udp0, * udp1;
839  u32 bi0, ip_len0, udp_len0, flags0, next0;
840  u32 bi1, ip_len1, udp_len1, flags1, next1;
841  i32 len_diff0, len_diff1;
842  u8 error0, good_udp0, proto0;
843  u8 error1, good_udp1, proto1;
844 
845  /* Prefetch next iteration. */
846  {
847  vlib_buffer_t * p2, * p3;
848 
849  p2 = vlib_get_buffer (vm, from[2]);
850  p3 = vlib_get_buffer (vm, from[3]);
851 
852  vlib_prefetch_buffer_header (p2, LOAD);
853  vlib_prefetch_buffer_header (p3, LOAD);
854 
857  }
858 
859  bi0 = to_next[0] = from[0];
860  bi1 = to_next[1] = from[1];
861  from += 2;
862  n_left_from -= 2;
863  to_next += 2;
864  n_left_to_next -= 2;
865 
866  b0 = vlib_get_buffer (vm, bi0);
867  b1 = vlib_get_buffer (vm, bi1);
868  if (is_ip4)
869  {
870  ip40 = vlib_buffer_get_current (b0);
871  ip41 = vlib_buffer_get_current (b1);
872  }
873  else
874  {
875  ip60 = vlib_buffer_get_current (b0);
876  ip61 = vlib_buffer_get_current (b1);
877  }
878 
879  /* Setup packet for next IP feature */
880  vnet_feature_next(&next0, b0);
881  vnet_feature_next(&next1, b1);
882 
883  if (is_ip4)
884  {
885  /* Treat IP frag packets as "experimental" protocol for now
886  until support of IP frag reassembly is implemented */
887  proto0 = ip4_is_fragment(ip40) ? 0xfe : ip40->protocol;
888  proto1 = ip4_is_fragment(ip41) ? 0xfe : ip41->protocol;
889  }
890  else
891  {
892  proto0 = ip60->protocol;
893  proto1 = ip61->protocol;
894  }
895 
896  /* Process packet 0 */
897  if (proto0 != IP_PROTOCOL_UDP)
898  goto exit0; /* not UDP packet */
899 
900  if (is_ip4)
901  udp0 = ip4_next_header (ip40);
902  else
903  udp0 = ip6_next_header (ip60);
904 
905  if (udp0->dst_port != clib_host_to_net_u16 (UDP_DST_PORT_GTPU))
906  goto exit0; /* not GTPU packet */
907 
908  /* Validate DIP against VTEPs*/
909  if (is_ip4)
910  {
911  if (addr4.as_u32 != ip40->dst_address.as_u32)
912  {
913  if (!hash_get (gtm->vtep4, ip40->dst_address.as_u32))
914  goto exit0; /* no local VTEP for GTPU packet */
915  addr4 = ip40->dst_address;
916  }
917  }
918  else
919  {
920  if (!ip6_address_is_equal (&addr6, &ip60->dst_address))
921  {
922  if (!hash_get_mem (gtm->vtep6, &ip60->dst_address))
923  goto exit0; /* no local VTEP for GTPU packet */
924  addr6 = ip60->dst_address;
925  }
926  }
927 
928  flags0 = b0->flags;
929  good_udp0 = (flags0 & VNET_BUFFER_F_L4_CHECKSUM_CORRECT) != 0;
930 
931  /* Don't verify UDP checksum for packets with explicit zero checksum. */
932  good_udp0 |= udp0->checksum == 0;
933 
934  /* Verify UDP length */
935  if (is_ip4)
936  ip_len0 = clib_net_to_host_u16 (ip40->length);
937  else
938  ip_len0 = clib_net_to_host_u16 (ip60->payload_length);
939  udp_len0 = clib_net_to_host_u16 (udp0->length);
940  len_diff0 = ip_len0 - udp_len0;
941 
942  /* Verify UDP checksum */
943  if (PREDICT_FALSE (!good_udp0))
944  {
945  if ((flags0 & VNET_BUFFER_F_L4_CHECKSUM_COMPUTED) == 0)
946  {
947  if (is_ip4)
948  flags0 = ip4_tcp_udp_validate_checksum (vm, b0);
949  else
950  flags0 = ip6_tcp_udp_icmp_validate_checksum (vm, b0);
951  good_udp0 =
952  (flags0 & VNET_BUFFER_F_L4_CHECKSUM_CORRECT) != 0;
953  }
954  }
955 
956  if (is_ip4)
957  {
958  error0 = good_udp0 ? 0 : IP4_ERROR_UDP_CHECKSUM;
959  error0 = (len_diff0 >= 0) ? error0 : IP4_ERROR_UDP_LENGTH;
960  }
961  else
962  {
963  error0 = good_udp0 ? 0 : IP6_ERROR_UDP_CHECKSUM;
964  error0 = (len_diff0 >= 0) ? error0 : IP6_ERROR_UDP_LENGTH;
965  }
966 
967  next0 = error0 ?
969  b0->error = error0 ? error_node->errors[error0] : 0;
970 
971  /* gtpu-input node expect current at GTPU header */
972  if (is_ip4)
973  vlib_buffer_advance (b0, sizeof(ip4_header_t)+sizeof(udp_header_t));
974  else
975  vlib_buffer_advance (b0, sizeof(ip6_header_t)+sizeof(udp_header_t));
976 
977  exit0:
978  /* Process packet 1 */
979  if (proto1 != IP_PROTOCOL_UDP)
980  goto exit1; /* not UDP packet */
981 
982  if (is_ip4)
983  udp1 = ip4_next_header (ip41);
984  else
985  udp1 = ip6_next_header (ip61);
986 
987  if (udp1->dst_port != clib_host_to_net_u16 (UDP_DST_PORT_GTPU))
988  goto exit1; /* not GTPU packet */
989 
990  /* Validate DIP against VTEPs*/
991  if (is_ip4)
992  {
993  if (addr4.as_u32 != ip41->dst_address.as_u32)
994  {
995  if (!hash_get (gtm->vtep4, ip41->dst_address.as_u32))
996  goto exit1; /* no local VTEP for GTPU packet */
997  addr4 = ip41->dst_address;
998  }
999  }
1000  else
1001  {
1002  if (!ip6_address_is_equal (&addr6, &ip61->dst_address))
1003  {
1004  if (!hash_get_mem (gtm->vtep6, &ip61->dst_address))
1005  goto exit1; /* no local VTEP for GTPU packet */
1006  addr6 = ip61->dst_address;
1007  }
1008  }
1009 
1010  flags1 = b1->flags;
1011  good_udp1 = (flags1 & VNET_BUFFER_F_L4_CHECKSUM_CORRECT) != 0;
1012 
1013  /* Don't verify UDP checksum for packets with explicit zero checksum. */
1014  good_udp1 |= udp1->checksum == 0;
1015 
1016  /* Verify UDP length */
1017  if (is_ip4)
1018  ip_len1 = clib_net_to_host_u16 (ip41->length);
1019  else
1020  ip_len1 = clib_net_to_host_u16 (ip61->payload_length);
1021  udp_len1 = clib_net_to_host_u16 (udp1->length);
1022  len_diff1 = ip_len1 - udp_len1;
1023 
1024  /* Verify UDP checksum */
1025  if (PREDICT_FALSE (!good_udp1))
1026  {
1027  if ((flags1 & VNET_BUFFER_F_L4_CHECKSUM_COMPUTED) == 0)
1028  {
1029  if (is_ip4)
1030  flags1 = ip4_tcp_udp_validate_checksum (vm, b1);
1031  else
1032  flags1 = ip6_tcp_udp_icmp_validate_checksum (vm, b1);
1033  good_udp1 =
1034  (flags1 & VNET_BUFFER_F_L4_CHECKSUM_CORRECT) != 0;
1035  }
1036  }
1037 
1038  if (is_ip4)
1039  {
1040  error1 = good_udp1 ? 0 : IP4_ERROR_UDP_CHECKSUM;
1041  error1 = (len_diff1 >= 0) ? error1 : IP4_ERROR_UDP_LENGTH;
1042  }
1043  else
1044  {
1045  error1 = good_udp1 ? 0 : IP6_ERROR_UDP_CHECKSUM;
1046  error1 = (len_diff1 >= 0) ? error1 : IP6_ERROR_UDP_LENGTH;
1047  }
1048 
1049  next1 = error1 ?
1051  b1->error = error1 ? error_node->errors[error1] : 0;
1052 
1053  /* gtpu-input node expect current at GTPU header */
1054  if (is_ip4)
1055  vlib_buffer_advance (b1, sizeof(ip4_header_t)+sizeof(udp_header_t));
1056  else
1057  vlib_buffer_advance (b1, sizeof(ip6_header_t)+sizeof(udp_header_t));
1058 
1059  exit1:
1060  vlib_validate_buffer_enqueue_x2 (vm, node, next_index,
1061  to_next, n_left_to_next,
1062  bi0, bi1, next0, next1);
1063  }
1064 
1065  while (n_left_from > 0 && n_left_to_next > 0)
1066  {
1067  vlib_buffer_t * b0;
1068  ip4_header_t * ip40;
1069  ip6_header_t * ip60;
1070  udp_header_t * udp0;
1071  u32 bi0, ip_len0, udp_len0, flags0, next0;
1072  i32 len_diff0;
1073  u8 error0, good_udp0, proto0;
1074 
1075  bi0 = to_next[0] = from[0];
1076  from += 1;
1077  n_left_from -= 1;
1078  to_next += 1;
1079  n_left_to_next -= 1;
1080 
1081  b0 = vlib_get_buffer (vm, bi0);
1082  if (is_ip4)
1083  ip40 = vlib_buffer_get_current (b0);
1084  else
1085  ip60 = vlib_buffer_get_current (b0);
1086 
1087  /* Setup packet for next IP feature */
1088  vnet_feature_next(&next0, b0);
1089 
1090  if (is_ip4)
1091  /* Treat IP4 frag packets as "experimental" protocol for now
1092  until support of IP frag reassembly is implemented */
1093  proto0 = ip4_is_fragment(ip40) ? 0xfe : ip40->protocol;
1094  else
1095  proto0 = ip60->protocol;
1096 
1097  if (proto0 != IP_PROTOCOL_UDP)
1098  goto exit; /* not UDP packet */
1099 
1100  if (is_ip4)
1101  udp0 = ip4_next_header (ip40);
1102  else
1103  udp0 = ip6_next_header (ip60);
1104 
1105  if (udp0->dst_port != clib_host_to_net_u16 (UDP_DST_PORT_GTPU))
1106  goto exit; /* not GTPU packet */
1107 
1108  /* Validate DIP against VTEPs*/
1109  if (is_ip4)
1110  {
1111  if (addr4.as_u32 != ip40->dst_address.as_u32)
1112  {
1113  if (!hash_get (gtm->vtep4, ip40->dst_address.as_u32))
1114  goto exit; /* no local VTEP for GTPU packet */
1115  addr4 = ip40->dst_address;
1116  }
1117  }
1118  else
1119  {
1120  if (!ip6_address_is_equal (&addr6, &ip60->dst_address))
1121  {
1122  if (!hash_get_mem (gtm->vtep6, &ip60->dst_address))
1123  goto exit; /* no local VTEP for GTPU packet */
1124  addr6 = ip60->dst_address;
1125  }
1126  }
1127 
1128  flags0 = b0->flags;
1129  good_udp0 = (flags0 & VNET_BUFFER_F_L4_CHECKSUM_CORRECT) != 0;
1130 
1131  /* Don't verify UDP checksum for packets with explicit zero checksum. */
1132  good_udp0 |= udp0->checksum == 0;
1133 
1134  /* Verify UDP length */
1135  if (is_ip4)
1136  ip_len0 = clib_net_to_host_u16 (ip40->length);
1137  else
1138  ip_len0 = clib_net_to_host_u16 (ip60->payload_length);
1139  udp_len0 = clib_net_to_host_u16 (udp0->length);
1140  len_diff0 = ip_len0 - udp_len0;
1141 
1142  /* Verify UDP checksum */
1143  if (PREDICT_FALSE (!good_udp0))
1144  {
1145  if ((flags0 & VNET_BUFFER_F_L4_CHECKSUM_COMPUTED) == 0)
1146  {
1147  if (is_ip4)
1148  flags0 = ip4_tcp_udp_validate_checksum (vm, b0);
1149  else
1150  flags0 = ip6_tcp_udp_icmp_validate_checksum (vm, b0);
1151  good_udp0 =
1152  (flags0 & VNET_BUFFER_F_L4_CHECKSUM_CORRECT) != 0;
1153  }
1154  }
1155 
1156  if (is_ip4)
1157  {
1158  error0 = good_udp0 ? 0 : IP4_ERROR_UDP_CHECKSUM;
1159  error0 = (len_diff0 >= 0) ? error0 : IP4_ERROR_UDP_LENGTH;
1160  }
1161  else
1162  {
1163  error0 = good_udp0 ? 0 : IP6_ERROR_UDP_CHECKSUM;
1164  error0 = (len_diff0 >= 0) ? error0 : IP6_ERROR_UDP_LENGTH;
1165  }
1166 
1167  next0 = error0 ?
1169  b0->error = error0 ? error_node->errors[error0] : 0;
1170 
1171  /* gtpu-input node expect current at GTPU header */
1172  if (is_ip4)
1173  vlib_buffer_advance (b0, sizeof(ip4_header_t)+sizeof(udp_header_t));
1174  else
1175  vlib_buffer_advance (b0, sizeof(ip6_header_t)+sizeof(udp_header_t));
1176 
1177  exit:
1178  vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
1179  to_next, n_left_to_next,
1180  bi0, next0);
1181  }
1182 
1183  vlib_put_next_frame (vm, node, next_index, n_left_to_next);
1184  }
1185 
1186  return frame->n_vectors;
1187 }
1188 
1191  vlib_frame_t * frame)
1192 {
1193  return ip_gtpu_bypass_inline (vm, node, frame, /* is_ip4 */ 1);
1194 }
1195 
1197  .name = "ip4-gtpu-bypass",
1198  .vector_size = sizeof (u32),
1199 
1200  .n_next_nodes = IP_GTPU_BYPASS_N_NEXT,
1201  .next_nodes = {
1202  [IP_GTPU_BYPASS_NEXT_DROP] = "error-drop",
1203  [IP_GTPU_BYPASS_NEXT_GTPU] = "gtpu4-input",
1204  },
1205 
1206  .format_buffer = format_ip4_header,
1207  .format_trace = format_ip4_forward_next_trace,
1208 };
1209 
1210 #ifndef CLIB_MARCH_VARIANT
1211 /* Dummy init function to get us linked in. */
1213 { return 0; }
1214 
1216 #endif /* CLIB_MARCH_VARIANT */
1217 
1220  vlib_frame_t * frame)
1221 {
1222  return ip_gtpu_bypass_inline (vm, node, frame, /* is_ip4 */ 0);
1223 }
1224 
1226  .name = "ip6-gtpu-bypass",
1227  .vector_size = sizeof (u32),
1228 
1229  .n_next_nodes = IP_GTPU_BYPASS_N_NEXT,
1230  .next_nodes = {
1231  [IP_GTPU_BYPASS_NEXT_DROP] = "error-drop",
1232  [IP_GTPU_BYPASS_NEXT_GTPU] = "gtpu6-input",
1233  },
1234 
1235  .format_buffer = format_ip6_header,
1236  .format_trace = format_ip6_forward_next_trace,
1237 };
1238 
1239 #ifndef CLIB_MARCH_VARIANT
1240 /* Dummy init function to get us linked in. */
1242 { return 0; }
1243 
1245 #endif /* CLIB_MARCH_VARIANT */
u32 flags
buffer flags: VLIB_BUFFER_FREE_LIST_INDEX_MASK: bits used to store free list index, VLIB_BUFFER_IS_TRACED: trace this buffer.
Definition: buffer.h:124
#define CLIB_UNUSED(x)
Definition: clib.h:82
vnet_main_t * vnet_main
Definition: gtpu.h:233
static void vlib_increment_combined_counter(vlib_combined_counter_main_t *cm, u32 thread_index, u32 index, u64 n_packets, u64 n_bytes)
Increment a combined counter.
Definition: counter.h:220
u8 runtime_data[0]
Function dependent node-runtime data.
Definition: node.h:523
ip4_address_t src_address
Definition: ip4_packet.h:170
u32 teid
Definition: mobile.h:77
static u8 * format_gtpu_rx_trace(u8 *s, va_list *args)
Definition: gtpu_decap.c:32
vnet_interface_main_t interface_main
Definition: vnet.h:56
format_function_t format_ip4_header
Definition: format.h:81
#define PREDICT_TRUE(x)
Definition: clib.h:112
u64 as_u64[2]
Definition: ip6_packet.h:51
#define clib_memcpy_fast(a, b, c)
Definition: string.h:81
#define NULL
Definition: clib.h:58
clib_memset(h->entries, 0, sizeof(h->entries[0]) *entries)
u8 data[0]
Packet data.
Definition: buffer.h:181
static u32 validate_gtpu_fib(vlib_buffer_t *b, gtpu_tunnel_t *t, u32 is_ip4)
Definition: gtpu_decap.c:52
vlib_node_registration_t ip6_gtpu_bypass_node
(constructor) VLIB_REGISTER_NODE (ip6_gtpu_bypass_node)
Definition: gtpu_decap.c:1225
u32 * fib_index_by_sw_if_index
Table index indexed by software interface.
Definition: ip4.h:121
u8 * format(u8 *s, const char *fmt,...)
Definition: format.c:424
clib_error_t * ip6_gtpu_bypass_init(vlib_main_t *vm)
Definition: gtpu_decap.c:1241
#define VLIB_NODE_FN(node)
Definition: node.h:202
static uword ip4_address_is_multicast(const ip4_address_t *a)
Definition: ip4_packet.h:382
vlib_error_t * errors
Vector of errors for this node.
Definition: node.h:470
uword * vtep4
Definition: gtpu.h:211
static uword vlib_buffer_length_in_chain(vlib_main_t *vm, vlib_buffer_t *b)
Get length in bytes of the buffer chain.
Definition: buffer_funcs.h:366
ip6_address_t src_address
Definition: ip6_packet.h:307
unsigned char u8
Definition: types.h:56
static int ip4_is_fragment(const ip4_header_t *i)
Definition: ip4_packet.h:213
vl_api_interface_index_t sw_if_index
Definition: gre.api:59
#define VLIB_INIT_FUNCTION(x)
Definition: init.h:173
ip46_address_t src
Definition: gtpu.h:141
vlib_node_registration_t ip4_gtpu_bypass_node
(constructor) VLIB_REGISTER_NODE (ip4_gtpu_bypass_node)
Definition: gtpu_decap.c:1196
ip4_address_t dst_address
Definition: ip4_packet.h:170
vlib_combined_counter_main_t * combined_sw_if_counters
Definition: interface.h:863
#define vlib_prefetch_buffer_header(b, type)
Prefetch buffer metadata.
Definition: buffer.h:203
static void * ip4_next_header(ip4_header_t *i)
Definition: ip4_packet.h:241
unsigned int u32
Definition: types.h:88
gtpu_main_t gtpu_main
Definition: gtpu.c:36
vlib_error_t error
Error code for buffers to be enqueued to error handler.
Definition: buffer.h:136
#define hash_get(h, key)
Definition: hash.h:249
#define pool_elt_at_index(p, i)
Returns pointer to element at given index.
Definition: pool.h:519
u32 decap_next_index
Definition: gtpu.h:148
vlib_node_registration_t ip4_input_node
Global ip4 input node.
Definition: ip4_input.c:385
uword * vtep6
Definition: gtpu.h:212
static void * vlib_buffer_get_current(vlib_buffer_t *b)
Get pointer to current data to process.
Definition: buffer.h:229
static char * gtpu_error_strings[]
Definition: gtpu_decap.c:754
#define PREDICT_FALSE(x)
Definition: clib.h:111
#define always_inline
Definition: ipsec.h:28
static uword ip_gtpu_bypass_inline(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame, u32 is_ip4)
Definition: gtpu_decap.c:808
ip6_main_t ip6_main
Definition: ip6_forward.c:2703
u32 ip4_tcp_udp_validate_checksum(vlib_main_t *vm, vlib_buffer_t *p0)
Definition: ip4_forward.c:1335
#define vlib_validate_buffer_enqueue_x2(vm, node, next_index, to_next, n_left_to_next, bi0, bi1, next0, next1)
Finish enqueueing two buffers forward in the graph.
Definition: buffer_node.h:70
#define vlib_validate_buffer_enqueue_x1(vm, node, next_index, to_next, n_left_to_next, bi0, next0)
Finish enqueueing one buffer forward in the graph.
Definition: buffer_node.h:218
#define vlib_get_next_frame(vm, node, next_index, vectors, n_vectors_left)
Get pointer to next frame vector data by (vlib_node_runtime_t, next_index).
Definition: node_funcs.h:338
vlib_node_registration_t gtpu4_input_node
(constructor) VLIB_REGISTER_NODE (gtpu4_input_node)
Definition: gtpu_decap.c:761
vlib_main_t * vm
Definition: in2out_ed.c:1810
static void vlib_node_increment_counter(vlib_main_t *vm, u32 node_index, u32 counter_index, u64 increment)
Definition: node_funcs.h:1150
u8 is_ip4
Definition: lisp_gpe.api:232
clib_error_t * ip4_gtpu_bypass_init(vlib_main_t *vm)
Definition: gtpu_decap.c:1212
#define VLIB_REGISTER_NODE(x,...)
Definition: node.h:169
u16 n_vectors
Definition: node.h:397
static_always_inline uword vlib_get_thread_index(void)
Definition: threads.h:218
#define CLIB_PREFETCH(addr, size, type)
Definition: cache.h:80
static void ip6_address_set_zero(ip6_address_t *a)
Definition: ip6_packet.h:200
static_always_inline void vnet_feature_next(u32 *next0, vlib_buffer_t *b0)
Definition: feature.h:302
static uword gtpu_input(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *from_frame, u32 is_ip4)
Definition: gtpu_decap.c:71
static vlib_node_runtime_t * vlib_node_get_runtime(vlib_main_t *vm, u32 node_index)
Get node runtime by node index.
Definition: node_funcs.h:89
#define GTPU_E_S_PN_BIT
Definition: gtpu.h:72
u32 sw_if_index
Definition: gtpu.h:154
void vlib_put_next_frame(vlib_main_t *vm, vlib_node_runtime_t *r, u32 next_index, u32 n_vectors_left)
Release pointer to next frame vector data.
Definition: main.c:456
vlib_main_t vlib_node_runtime_t * node
Definition: in2out_ed.c:1810
static void * ip6_next_header(ip6_header_t *i)
Definition: ip6_packet.h:368
#define foreach_gtpu_input_next
Definition: gtpu.h:178
signed int i32
Definition: types.h:77
u16 cached_next_index
Next frame index that vector arguments were last enqueued to last time this node ran.
Definition: node.h:515
static void vlib_buffer_advance(vlib_buffer_t *b, word l)
Advance current data pointer by the supplied (signed!) amount.
Definition: buffer.h:248
u32 encap_fib_index
Definition: gtpu.h:151
format_function_t format_ip6_header
Definition: format.h:95
static uword ip6_address_is_equal(const ip6_address_t *a, const ip6_address_t *b)
Definition: ip6_packet.h:164
#define GTPU_V1_VER
Definition: mobile.h:144
#define GTPU_VER_MASK
Definition: gtpu.h:67
static uword ip6_address_is_multicast(const ip6_address_t *a)
Definition: ip6_packet.h:118
ip_vxan_bypass_next_t
Definition: gtpu_decap.c:801
uword * gtpu4_tunnel_by_key
Definition: gtpu.h:206
static void * vlib_add_trace(vlib_main_t *vm, vlib_node_runtime_t *r, vlib_buffer_t *b, u32 n_data_bytes)
Definition: trace_funcs.h:55
#define vec_elt(v, i)
Get vector value at index i.
struct _vlib_node_registration vlib_node_registration_t
Definition: defs.h:47
static u8 vlib_buffer_has_space(vlib_buffer_t *b, word l)
Check if there is enough space in buffer to advance.
Definition: buffer.h:265
u16 payload_length
Definition: ip6_packet.h:298
u32 ip6_tcp_udp_icmp_validate_checksum(vlib_main_t *vm, vlib_buffer_t *p0)
Definition: ip6_forward.c:1086
static void vnet_update_l2_len(vlib_buffer_t *b)
Definition: l2_input.h:236
void ip4_forward_next_trace(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame, vlib_rx_or_tx_t which_adj_index)
Definition: ip4_forward.c:1214
uword * gtpu6_tunnel_by_key
Definition: gtpu.h:207
VLIB buffer representation.
Definition: buffer.h:102
u64 uword
Definition: types.h:112
static void * vlib_frame_vector_args(vlib_frame_t *f)
Get pointer to frame vector data.
Definition: node_funcs.h:244
u8 * format_ip4_forward_next_trace(u8 *s, va_list *args)
Definition: ip4_forward.c:1164
Bits Octets 8 7 6 5 4 3 2 1 1 Version PT (*) E S PN 2 Message Type 3 Length (1st Octet) 4 Length...
Definition: mobile.h:72
#define hash_get_mem(h, key)
Definition: hash.h:269
#define vnet_buffer(b)
Definition: buffer.h:408
gtpu_tunnel_t * tunnels
Definition: gtpu.h:203
ip4_main_t ip4_main
Global ip4 main structure.
Definition: ip4_forward.c:1079
u8 ver_flags
Definition: mobile.h:74
vlib_main_t vlib_node_runtime_t vlib_frame_t * frame
Definition: in2out_ed.c:1811
u16 flags
Copy of main node flags.
Definition: node.h:509
#define VLIB_NODE_FLAG_TRACE
Definition: node.h:302
#define CLIB_CACHE_LINE_BYTES
Definition: cache.h:59
u32 * fib_index_by_sw_if_index
Definition: ip6.h:195
vlib_node_registration_t gtpu6_input_node
(constructor) VLIB_REGISTER_NODE (gtpu6_input_node)
Definition: gtpu_decap.c:781
static vlib_buffer_t * vlib_get_buffer(vlib_main_t *vm, u32 buffer_index)
Translate buffer index into buffer pointer.
Definition: buffer_funcs.h:85
Definition: defs.h:46
ip6_address_t dst_address
Definition: ip6_packet.h:307
u8 * format_ip6_forward_next_trace(u8 *s, va_list *args)
Definition: ip6_forward.c:877