FD.io VPP  v20.01-48-g3e0dafb74
Vector Packet Processing
gbp_vxlan.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2018 Cisco and/or its affiliates.
3  * Licensed under the Apache License, Version 2.0 (the "License");
4  * you may not use this file except in compliance with the License.
5  * You may obtain a copy of the License at:
6  *
7  * http://www.apache.org/licenses/LICENSE-2.0
8  *
9  * Unless required by applicable law or agreed to in writing, software
10  * distributed under the License is distributed on an "AS IS" BASIS,
11  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12  * See the License for the specific language governing permissions and
13  * limitations under the License.
14  */
15 
16 #include <plugins/gbp/gbp_vxlan.h>
17 #include <plugins/gbp/gbp_learn.h>
20 
22 #include <vlibmemory/api.h>
23 #include <vnet/fib/fib_table.h>
24 #include <vlib/punt.h>
25 
26 /**
27  * A reference to a VXLAN-GBP tunnel created as a child/dependent tunnel
28  * of the tempplate GBP-VXLAN tunnel
29  */
30 typedef struct vxlan_tunnel_ref_t_
31 {
37 
38 /**
39  * DB of added tunnels
40  */
42 
43 /**
44  * Logger
45  */
47 
48 /**
49  * Pool of template tunnels
50  */
52 
53 /**
54  * Pool of child tunnels
55  */
57 
58 /**
59  * DB of template interfaces by SW interface index
60  */
62 
63 /**
64  * DB of child interfaces by SW interface index
65  */
67 
68 /**
69  * handle registered with the ;unt infra
70  */
72 
74 #define _(n,s) [GBP_VXLAN_TUN_##n] = s,
76 #undef _
77 };
78 
79 #define GBP_VXLAN_TUN_DBG(...) \
80  vlib_log_debug (gt_logger, __VA_ARGS__);
81 
82 
85 {
86  return (pool_elt_at_index (gbp_vxlan_tunnel_pool, gti));
87 }
88 
89 static vxlan_tunnel_ref_t *
91 {
92  return (pool_elt_at_index (vxlan_tunnel_ref_pool, vxri));
93 }
94 
95 static u8 *
96 format_vxlan_tunnel_ref (u8 * s, va_list * args)
97 {
98  index_t vxri = va_arg (*args, u32);
99  vxlan_tunnel_ref_t *vxr;
100 
101  vxr = vxlan_tunnel_ref_get (vxri);
102 
103  s = format (s, "[%U]", format_gbp_itf_hdl, vxr->vxr_itf);
104 
105  return (s);
106 }
107 
108 static void
110 {
111  vxlan_tunnel_ref_t *vxr;
112  gbp_vxlan_tunnel_t *gt;
113  index_t vxri;
114  u32 pos;
115 
116  vxr = vxlan_tunnel_ref_get (vxlan_tunnel_ref_db[sw_if_index]);
117  vxri = vxr - vxlan_tunnel_ref_pool;
118  gt = gbp_vxlan_tunnel_get (vxr->vxr_parent);
119 
120  GBP_VXLAN_TUN_DBG ("del-dep:%U", format_vxlan_tunnel_ref, vxri);
121 
123  pos = vec_search (gt->gt_tuns, vxri);
124 
125  ASSERT (~0 != pos);
126  vec_del1 (gt->gt_tuns, pos);
127 
129 
130  pool_put (vxlan_tunnel_ref_pool, vxr);
131 }
132 
133 static gbp_itf_hdl_t
135  const ip46_address_t * src, const ip46_address_t * dst)
136 {
138  .is_add = 1,
139  .is_ip6 = !ip46_address_is_ip4 (src),
140  .vni = gt->gt_vni,
141  .src = *src,
142  .dst = *dst,
143  .instance = ~0,
144  .mode = (GBP_VXLAN_TUN_L2 == gt->gt_layer ?
146  };
147  vxlan_tunnel_ref_t *vxr;
149  index_t vxri;
150  int rv;
151 
152  sw_if_index = ~0;
153  rv = vnet_vxlan_gbp_tunnel_add_del (&args, &sw_if_index);
154 
155  if (VNET_API_ERROR_TUNNEL_EXIST == rv)
156  {
158 
159  vxr = vxlan_tunnel_ref_get (vxri);
160  gbp_itf_lock (vxr->vxr_itf);
161  }
162  else if (0 == rv)
163  {
164  ASSERT (~0 != sw_if_index);
165  GBP_VXLAN_TUN_DBG ("add-dep:%U %U %U %d", format_vnet_sw_if_index_name,
166  vnet_get_main (), sw_if_index,
169 
170  pool_get_zero (vxlan_tunnel_ref_pool, vxr);
171 
172  vxri = (vxr - vxlan_tunnel_ref_pool);
173  vxr->vxr_parent = gt - gbp_vxlan_tunnel_pool;
175  vxr->vxr_layer = gt->gt_layer;
176 
177  /*
178  * store the child both on the parent's list and the global DB
179  */
180  vec_add1 (gt->gt_tuns, vxri);
181 
185 
186  if (GBP_VXLAN_TUN_L2 == vxr->vxr_layer)
187  {
188  l2output_feat_masks_t ofeat;
189  l2input_feat_masks_t ifeat;
190  gbp_bridge_domain_t *gbd;
191 
192  gbd = gbp_bridge_domain_get (gt->gt_gbd);
195 
196  ofeat = L2OUTPUT_FEAT_GBP_POLICY_MAC;
197  ifeat = L2INPUT_FEAT_NONE;
198 
199  if (!(gbd->gb_flags & GBP_BD_FLAG_DO_NOT_LEARN))
200  ifeat |= L2INPUT_FEAT_GBP_LEARN;
201 
204  }
205  else
206  {
209 
211  }
212  }
213  else
214  {
215  return (GBP_ITF_HDL_INVALID);
216  }
217 
218  return (vxr->vxr_itf);
219 }
220 
221 u32
223 {
224  ASSERT ((sw_if_index < vec_len (vxlan_tunnel_ref_db)) &&
225  (INDEX_INVALID != vxlan_tunnel_ref_db[sw_if_index]));
226 
227  gbp_vxlan_tunnel_t *gt;
228  vxlan_tunnel_ref_t *vxr;
229 
230  vxr = vxlan_tunnel_ref_get (vxlan_tunnel_ref_db[sw_if_index]);
231  gt = gbp_vxlan_tunnel_get (vxr->vxr_parent);
232 
233  return (gt->gt_sw_if_index);
234 }
235 
238 {
239  ASSERT ((sw_if_index < vec_len (vxlan_tunnel_ref_db)) &&
240  (INDEX_INVALID != vxlan_tunnel_ref_db[sw_if_index]));
241 
242  vxlan_tunnel_ref_t *vxr;
243 
244  vxr = vxlan_tunnel_ref_get (vxlan_tunnel_ref_db[sw_if_index]);
245 
246  gbp_itf_lock (vxr->vxr_itf);
247 
248  return (vxr->vxr_itf);
249 }
250 
251 
254 {
255  if (sw_if_index < vec_len (vxlan_tunnel_ref_db) &&
256  INDEX_INVALID != vxlan_tunnel_ref_db[sw_if_index])
257  {
258  return (VXLAN_GBP_TUNNEL);
259  }
260  else if (sw_if_index < vec_len (gbp_vxlan_tunnel_db) &&
261  INDEX_INVALID != gbp_vxlan_tunnel_db[sw_if_index])
262  {
263  return (GBP_VXLAN_TEMPLATE_TUNNEL);
264  }
265 
266  ASSERT (0);
267  return (GBP_VXLAN_TEMPLATE_TUNNEL);
268 }
269 
272  const ip46_address_t * src,
273  const ip46_address_t * dst)
274 {
275  gbp_vxlan_tunnel_t *gt;
276  index_t gti;
277 
279 
280  if (INDEX_INVALID == gti)
281  return (GBP_ITF_HDL_INVALID);
282 
283  gt = pool_elt_at_index (gbp_vxlan_tunnel_pool, gti);
284 
285  return (gdb_vxlan_dep_add (gt, src, dst));
286 }
287 
288 void
290 {
291  /* vxlan_tunnel_ref_t *vxr; */
292  /* index_t vxri; */
293 
294  /* vxri = vxlan_tunnel_ref_db[sw_if_index]; */
295 
296  /* ASSERT (vxri != INDEX_INVALID); */
297 
298  /* vxr = vxlan_tunnel_ref_get (vxri); */
299 
300  /* gdb_vxlan_dep_del (vxri); */
301 }
302 
303 void
305 {
306  gbp_vxlan_tunnel_t *gt;
307 
308  /* *INDENT-OFF* */
309  pool_foreach (gt, gbp_vxlan_tunnel_pool,
310  ({
311  if (WALK_CONTINUE != cb(gt, ctx))
312  break;
313  }));
314  /* *INDENT-ON* */
315 }
316 
317 static walk_rc_t
319 {
321  gt - gbp_vxlan_tunnel_pool);
322 
323  return (WALK_CONTINUE);
324 }
325 
326 static u8 *
327 format_gbp_vxlan_tunnel_name (u8 * s, va_list * args)
328 {
329  u32 dev_instance = va_arg (*args, u32);
330 
331  return format (s, "gbp-vxlan-%d", dev_instance);
332 }
333 
334 u8 *
335 format_gbp_vxlan_tunnel_layer (u8 * s, va_list * args)
336 {
338  s = format (s, "%s", gbp_vxlan_tunnel_layer_strings[gl]);
339 
340  return (s);
341 }
342 
343 u8 *
344 format_gbp_vxlan_tunnel (u8 * s, va_list * args)
345 {
346  u32 dev_instance = va_arg (*args, u32);
347  CLIB_UNUSED (int verbose) = va_arg (*args, int);
348  gbp_vxlan_tunnel_t *gt = gbp_vxlan_tunnel_get (dev_instance);
349  index_t *vxri;
350 
351  s = format (s, " [%d] gbp-vxlan-tunnel: hw:%d sw:%d vni:%d %U",
352  dev_instance, gt->gt_hw_if_index,
353  gt->gt_sw_if_index, gt->gt_vni,
355  if (GBP_VXLAN_TUN_L2 == gt->gt_layer)
356  s = format (s, " BD:%d gbd-index:%d", gt->gt_bd_rd_id, gt->gt_gbd);
357  else
358  s = format (s, " RD:%d grd-index:%d", gt->gt_bd_rd_id, gt->gt_grd);
359 
360  s = format (s, " dependents:");
361  vec_foreach (vxri, gt->gt_tuns)
362  {
363  s = format (s, "\n %U, ", format_vxlan_tunnel_ref, *vxri);
364  }
365 
366  return s;
367 }
368 
369 typedef struct gbp_vxlan_tx_trace_t_
370 {
373 
374 u8 *
375 format_gbp_vxlan_tx_trace (u8 * s, va_list * args)
376 {
377  CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
378  CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
379  gbp_vxlan_tx_trace_t *t = va_arg (*args, gbp_vxlan_tx_trace_t *);
380 
381  s = format (s, "GBP-VXLAN: vni:%d", t->vni);
382 
383  return (s);
384 }
385 
386 clib_error_t *
388  u32 hw_if_index, u32 flags)
389 {
391  u32 ti;
392 
393  hi = vnet_get_hw_interface (vnm, hw_if_index);
394 
395  if (NULL == gbp_vxlan_tunnel_db ||
397  return (NULL);
398 
400 
401  if (~0 == ti)
402  /* not one of ours */
403  return (NULL);
404 
406  vnet_hw_interface_set_flags (vnm, hw_if_index,
408  else
409  vnet_hw_interface_set_flags (vnm, hw_if_index, 0);
410 
411  return (NULL);
412 }
413 
414 static uword
417 {
418  clib_warning ("you shouldn't be here, leaking buffers...");
419  return frame->n_vectors;
420 }
421 
422 /* *INDENT-OFF* */
423 VNET_DEVICE_CLASS (gbp_vxlan_device_class) = {
424  .name = "GBP VXLAN tunnel-template",
425  .format_device_name = format_gbp_vxlan_tunnel_name,
426  .format_device = format_gbp_vxlan_tunnel,
427  .format_tx_trace = format_gbp_vxlan_tx_trace,
428  .admin_up_down_function = gbp_vxlan_interface_admin_up_down,
429  .tx_function = gbp_vxlan_interface_tx,
430 };
431 
432 VNET_HW_INTERFACE_CLASS (gbp_vxlan_hw_interface_class) = {
433  .name = "GBP-VXLAN",
435 };
436 /* *INDENT-ON* */
437 
438 int
440  u32 bd_rd_id,
441  const ip4_address_t * src, u32 * sw_if_indexp)
442 {
443  gbp_vxlan_tunnel_t *gt;
444  index_t gti;
445  uword *p;
446  int rv;
447 
448  rv = 0;
449  p = hash_get (gv_db, vni);
450 
451  GBP_VXLAN_TUN_DBG ("add: %d %d %d", vni, layer, bd_rd_id);
452 
453  if (NULL == p)
454  {
457  index_t gbi, grdi;
458  vnet_main_t *vnm;
459 
460  gbi = grdi = INDEX_INVALID;
461 
462  if (layer == GBP_VXLAN_TUN_L2)
463  {
464  gbi = gbp_bridge_domain_find_and_lock (bd_rd_id);
465 
466  if (INDEX_INVALID == gbi)
467  {
468  return (VNET_API_ERROR_BD_NOT_MODIFIABLE);
469  }
470  }
471  else
472  {
473  grdi = gbp_route_domain_find_and_lock (bd_rd_id);
474 
475  if (INDEX_INVALID == grdi)
476  {
477  return (VNET_API_ERROR_NO_SUCH_FIB);
478  }
479  }
480 
481  vnm = vnet_get_main ();
482  pool_get (gbp_vxlan_tunnel_pool, gt);
483  gti = gt - gbp_vxlan_tunnel_pool;
484 
485  gt->gt_vni = vni;
486  gt->gt_layer = layer;
487  gt->gt_bd_rd_id = bd_rd_id;
488  gt->gt_src.ip4.as_u32 = src->as_u32;
490  gbp_vxlan_device_class.index,
491  gti,
492  gbp_vxlan_hw_interface_class.index,
493  gti);
494 
495  hi = vnet_get_hw_interface (vnm, gt->gt_hw_if_index);
496 
497  gt->gt_sw_if_index = hi->sw_if_index;
498 
499  /* don't flood packets in a BD to these interfaces */
500  si = vnet_get_sw_interface (vnm, gt->gt_sw_if_index);
502 
503  if (layer == GBP_VXLAN_TUN_L2)
504  {
506 
507  gb = gbp_bridge_domain_get (gbi);
508 
509  gt->gt_gbd = gbi;
510  gb->gb_vni = gti;
511  /* set it up as a GBP interface */
513  gt->gt_gbd);
514  gbp_itf_l2_set_input_feature (gt->gt_itf, L2INPUT_FEAT_GBP_LEARN);
515  }
516  else
517  {
518  gt->gt_grd = grdi;
520  gt->gt_grd);
522  }
523 
524  /*
525  * save the tunnel by VNI and by sw_if_index
526  */
527  hash_set (gv_db, vni, gti);
528 
532 
533  if (sw_if_indexp)
534  *sw_if_indexp = gt->gt_sw_if_index;
535 
537  }
538  else
539  {
540  gti = p[0];
541  rv = VNET_API_ERROR_IF_ALREADY_EXISTS;
542  }
543 
544  GBP_VXLAN_TUN_DBG ("add: %U", format_gbp_vxlan_tunnel, gti);
545 
546  return (rv);
547 }
548 
549 int
551 {
552  gbp_vxlan_tunnel_t *gt;
553  uword *p;
554 
555  p = hash_get (gv_db, vni);
556 
557  if (NULL != p)
558  {
559  vnet_main_t *vnm;
560 
561  vnm = vnet_get_main ();
562  gt = gbp_vxlan_tunnel_get (p[0]);
563 
565 
567  gt - gbp_vxlan_tunnel_pool);
568 
569  gbp_endpoint_flush (GBP_ENDPOINT_SRC_DP, gt->gt_sw_if_index);
570  ASSERT (0 == vec_len (gt->gt_tuns));
571  vec_free (gt->gt_tuns);
572 
573  gbp_itf_unlock (&gt->gt_itf);
574 
575  if (GBP_VXLAN_TUN_L2 == gt->gt_layer)
576  {
578  }
579  else
580  {
582  }
583 
586 
587  hash_unset (gv_db, vni);
589 
590  pool_put (gbp_vxlan_tunnel_pool, gt);
591  }
592  else
593  return VNET_API_ERROR_NO_SUCH_ENTRY;
594 
595  return (0);
596 }
597 
598 static clib_error_t *
600  unformat_input_t * input, vlib_cli_command_t * cmd)
601 {
602 
603  vlib_cli_output (vm, "GBP-VXLAN Interfaces:");
604 
606 
607  return (NULL);
608 }
609 
610 /*?
611  * Show Group Based Policy VXLAN tunnels
612  *
613  * @cliexpar
614  * @cliexstart{show gbp vxlan}
615  * @cliexend
616  ?*/
617 /* *INDENT-OFF* */
618 VLIB_CLI_COMMAND (gbp_vxlan_show_node, static) = {
619  .path = "show gbp vxlan",
620  .short_help = "show gbp vxlan\n",
621  .function = gbp_vxlan_show,
622 };
623 /* *INDENT-ON* */
624 
625 static clib_error_t *
627 {
629 
630  gt_logger = vlib_log_register_class ("gbp", "tun");
631 
632  punt_hdl = vlib_punt_client_register ("gbp-vxlan");
633 
636  "gbp-vxlan4");
637 
638  return (0);
639 }
640 
641 /* *INDENT-OFF* */
643 {
644  .runs_after = VLIB_INITS("punt_init", "vxlan_gbp_init"),
645 };
646 /* *INDENT-ON* */
647 
648 /*
649  * fd.io coding-style-patch-verification: ON
650  *
651  * Local Variables:
652  * eval: (c-set-style "gnu")
653  * End:
654  */
vlib_log_class_t vlib_log_register_class(char *class, char *subclass)
Definition: log.c:176
A real VXLAN-GBP tunnel (from vnet/vxlan-gbp/...)
Definition: gbp_vxlan.h:100
VNET_DEVICE_CLASS(gbp_vxlan_device_class)
gbp_itf_hdl_t vxr_itf
Definition: gbp_vxlan.c:32
static vlib_punt_hdl_t punt_hdl
handle registered with the ;unt infra
Definition: gbp_vxlan.c:71
void gbp_route_domain_unlock(index_t index)
#define hash_set(h, key, value)
Definition: hash.h:255
#define CLIB_UNUSED(x)
Definition: clib.h:82
static walk_rc_t gbp_vxlan_tunnel_show_one(gbp_vxlan_tunnel_t *gt, void *ctx)
Definition: gbp_vxlan.c:318
#define hash_unset(h, key)
Definition: hash.h:261
#define GBP_VXLAN_TUN_DBG(...)
Definition: gbp_vxlan.c:79
vnet_main_t * vnet_get_main(void)
Definition: misc.c:46
gbp_itf_hdl_t gbp_itf_l3_add_and_lock(u32 sw_if_index, index_t gri)
Definition: gbp_itf.c:245
#define pool_get_zero(P, E)
Allocate an object E from a pool P and zero it.
Definition: pool.h:240
u32 gb_vni
The index of the BD&#39;s VNI interface on which packets from unkown endpoints arrive.
void gbp_itf_l2_set_output_feature(gbp_itf_hdl_t gh, l2output_feat_masks_t feats)
Definition: gbp_itf.c:428
u8 * format_gbp_vxlan_tx_trace(u8 *s, va_list *args)
Definition: gbp_vxlan.c:375
#define NULL
Definition: clib.h:58
ip46_address_t gt_src
The source address to use for child tunnels.
Definition: gbp_vxlan.h:82
void gbp_itf_unlock(gbp_itf_hdl_t *gh)
Definition: gbp_itf.c:286
int vnet_vxlan_gbp_tunnel_add_del(vnet_vxlan_gbp_tunnel_add_del_args_t *a, u32 *sw_if_indexp)
Definition: vxlan_gbp.c:384
A bridge Domain Representation.
void vxlan_gbp_unregister_udp_ports(void)
Definition: vxlan_gbp.c:366
int gbp_vxlan_tunnel_add(u32 vni, gbp_vxlan_tunnel_layer_t layer, u32 bd_rd_id, const ip4_address_t *src, u32 *sw_if_indexp)
Definition: gbp_vxlan.c:439
static vnet_hw_interface_t * vnet_get_hw_interface(vnet_main_t *vnm, u32 hw_if_index)
u32 index_t
A Data-Path Object is an object that represents actions that are applied to packets are they are swit...
Definition: dpo.h:41
#define vec_add1(V, E)
Add 1 element to end of vector (unspecified alignment).
Definition: vec.h:523
vl_api_address_t src
Definition: gre.api:60
int vlib_punt_hdl_t
Typedef for a client handle.
Definition: punt.h:47
void gbp_vxlan_walk(gbp_vxlan_cb_t cb, void *ctx)
Definition: gbp_vxlan.c:304
vxlan_gbp_main_t vxlan_gbp_main
Definition: vxlan_gbp.c:34
static clib_error_t * gbp_vxlan_show(vlib_main_t *vm, unformat_input_t *input, vlib_cli_command_t *cmd)
Definition: gbp_vxlan.c:599
static u8 * format_gbp_vxlan_tunnel_name(u8 *s, va_list *args)
Definition: gbp_vxlan.c:327
static vnet_sw_interface_t * vnet_get_sw_interface(vnet_main_t *vnm, u32 sw_if_index)
u8 * format(u8 *s, const char *fmt,...)
Definition: format.c:424
u8 * format_gbp_vxlan_tunnel(u8 *s, va_list *args)
Definition: gbp_vxlan.c:344
static u8 ip46_address_is_ip4(const ip46_address_t *ip46)
Definition: ip46_address.h:55
#define pool_get(P, E)
Allocate an object E from a pool P (unspecified alignment).
Definition: pool.h:237
static gbp_bridge_domain_t * gbp_bridge_domain_get(index_t i)
format_function_t format_vnet_sw_if_index_name
unsigned char u8
Definition: types.h:56
static gbp_vxlan_tunnel_t * gbp_vxlan_tunnel_pool
Pool of template tunnels.
Definition: gbp_vxlan.c:51
gbp_vxlan_tunnel_layer_t vxr_layer
Definition: gbp_vxlan.c:35
enum walk_rc_t_ walk_rc_t
Walk return code.
index_t * gt_tuns
list of child vxlan-gbp tunnels built from this template
Definition: gbp_vxlan.h:77
void gbp_bridge_domain_unlock(index_t gbdi)
u32 vlib_log_class_t
Definition: vlib.h:51
void vxlan_gbp_tunnel_unlock(u32 sw_if_index)
Definition: gbp_vxlan.c:289
vnet_flood_class_t flood_class
Definition: interface.h:745
u8 * format_gbp_vxlan_tunnel_layer(u8 *s, va_list *args)
Definition: gbp_vxlan.c:335
#define pool_foreach(VAR, POOL, BODY)
Iterate through pool.
Definition: pool.h:498
vl_api_interface_index_t sw_if_index
Definition: gre.api:59
#define VLIB_INIT_FUNCTION(x)
Definition: init.h:173
static u8 * format_vxlan_tunnel_ref(u8 *s, va_list *args)
Definition: gbp_vxlan.c:96
index_t vxr_parent
Definition: gbp_vxlan.c:34
unsigned int u32
Definition: types.h:88
#define vec_search(v, E)
Search a vector for the index of the entry that matches.
Definition: vec.h:943
u32 vnet_register_interface(vnet_main_t *vnm, u32 dev_class_index, u32 dev_instance, u32 hw_class_index, u32 hw_instance)
Definition: interface.c:762
vlib_punt_hdl_t vlib_punt_client_register(const char *who)
Register a new clinet.
Definition: punt.c:140
struct gbp_vxlan_tx_trace_t_ gbp_vxlan_tx_trace_t
#define hash_get(h, key)
Definition: hash.h:249
#define pool_elt_at_index(p, i)
Returns pointer to element at given index.
Definition: pool.h:519
vlib_punt_reason_t punt_no_such_tunnel[FIB_PROTOCOL_IP_MAX]
Punt reasons for no such tunnel.
Definition: vxlan_gbp.h:197
walk_rc_t(* gbp_vxlan_cb_t)(gbp_vxlan_tunnel_t *gt, void *ctx)
Definition: gbp_vxlan.h:120
gbp_itf_hdl_t gbp_itf_l2_add_and_lock_w_free(u32 sw_if_index, index_t gbi, gbp_itf_free_fn_t ff)
Definition: gbp_itf.c:202
long ctx[MAX_CONNS]
Definition: main.c:144
struct _unformat_input_t unformat_input_t
u32 vni
Definition: lisp_gpe.api:129
#define pool_put(P, E)
Free an object E in pool P.
Definition: pool.h:287
gbp_itf_hdl_t gt_itf
gbp-itf config for this interface
Definition: gbp_vxlan.h:72
#define GBP_ITF_L3_FEAT_LEARN
Definition: gbp_itf.h:42
This is the object type deifend above.
Definition: gbp_vxlan.h:95
#define vec_del1(v, i)
Delete the element at index I.
Definition: vec.h:806
u32 bd_rd_id
Definition: gbp.api:360
vl_api_address_t dst
Definition: gre.api:61
gbp_itf_hdl_t gbp_vxlan_tunnel_clone_and_lock(u32 sw_if_index, const ip46_address_t *src, const ip46_address_t *dst)
Definition: gbp_vxlan.c:271
l2input_feat_masks_t
Definition: l2_input.h:141
vlib_main_t * vm
Definition: in2out_ed.c:1810
gbp_itf_hdl_t vxlan_gbp_tunnel_lock_itf(u32 sw_if_index)
Definition: gbp_vxlan.c:237
gbp_bridge_domain_flags_t gb_flags
Flags conttrolling behaviour.
format_function_t format_ip46_address
Definition: ip46_address.h:50
enum gbp_vxlan_tunnel_layer_t_ gbp_vxlan_tunnel_layer_t
u32 flags
Definition: vhost_user.h:141
gbp_itf_hdl_t gbp_itf_l3_add_and_lock_w_free(u32 sw_if_index, index_t gri, gbp_itf_free_fn_t ff)
Definition: gbp_itf.c:251
u16 n_vectors
Definition: node.h:397
gbp_itf_hdl_t gbp_itf_l2_add_and_lock(u32 sw_if_index, index_t gbi)
Definition: gbp_itf.c:196
#define vec_free(V)
Free vector&#39;s memory (no header).
Definition: vec.h:342
#define clib_warning(format, args...)
Definition: error.h:59
int gbp_vxlan_tunnel_del(u32 vni)
Definition: gbp_vxlan.c:550
static index_t * gbp_vxlan_tunnel_db
DB of template interfaces by SW interface index.
Definition: gbp_vxlan.c:61
vlib_main_t vlib_node_runtime_t * node
Definition: in2out_ed.c:1810
index_t gbp_bridge_domain_find_and_lock(u32 bd_id)
static clib_error_t * gbp_vxlan_init(vlib_main_t *vm)
Definition: gbp_vxlan.c:626
void gbp_endpoint_flush(gbp_endpoint_src_t src, u32 sw_if_index)
remove all learnt endpoints using the interface
void vxlan_gbp_register_udp_ports(void)
Definition: vxlan_gbp.c:348
#define VLIB_CLI_COMMAND(x,...)
Definition: cli.h:152
gbp_vxlan_tunnel_layer_t gt_layer
Definition: gbp_vxlan.h:49
#define ASSERT(truth)
l2output_feat_masks_t
Definition: l2_output.h:111
void gbp_itf_lock(gbp_itf_hdl_t gh)
Definition: gbp_itf.c:258
u8 * format_gbp_itf_hdl(u8 *s, va_list *args)
Definition: gbp_itf.c:520
static vxlan_tunnel_ref_t * vxlan_tunnel_ref_get(index_t vxri)
Definition: gbp_vxlan.c:90
static void gdb_vxlan_dep_del(u32 sw_if_index)
Definition: gbp_vxlan.c:109
clib_error_t * gbp_vxlan_interface_admin_up_down(vnet_main_t *vnm, u32 hw_if_index, u32 flags)
Definition: gbp_vxlan.c:387
int vnet_vxlan_gbp_tunnel_del(u32 sw_if_index)
Definition: vxlan_gbp.c:665
index_t gt_gbd
Reference to the GPB-BD.
Definition: gbp_vxlan.h:58
vl_api_ip4_address_t hi
Definition: arp.api:37
void vnet_delete_hw_interface(vnet_main_t *vnm, u32 hw_if_index)
Definition: interface.c:974
static vlib_log_class_t gt_logger
Logger.
Definition: gbp_vxlan.c:46
void gbp_itf_l3_set_input_feature(gbp_itf_hdl_t gh, gbp_itf_l3_feat_t feats)
Definition: gbp_itf.c:335
int vlib_punt_register(vlib_punt_hdl_t client, vlib_punt_reason_t reason, const char *node_name)
Register a node to receive particular punted buffers.
Definition: punt.c:252
gbp_vxlan_tunnel_t * gbp_vxlan_tunnel_get(index_t gti)
Definition: gbp_vxlan.c:84
void gbp_itf_l2_set_input_feature(gbp_itf_hdl_t gh, l2input_feat_masks_t feats)
Definition: gbp_itf.c:382
u32 vxlan_gbp_tunnel_get_parent(u32 sw_if_index)
Definition: gbp_vxlan.c:222
#define vec_len(v)
Number of elements in vector (rvalue-only, NULL tolerant)
enum gbp_vxlan_tunnel_type_t_ gbp_vxlan_tunnel_type_t
The different types of interfaces that endpoints are learned on.
#define INDEX_INVALID
Invalid index - used when no index is known blazoned capitals INVALID speak volumes where ~0 does not...
Definition: dpo.h:47
clib_error_t * vnet_hw_interface_set_flags(vnet_main_t *vnm, u32 hw_if_index, vnet_hw_interface_flags_t flags)
Definition: interface.c:492
u64 uword
Definition: types.h:112
static gbp_itf_hdl_t gdb_vxlan_dep_add(gbp_vxlan_tunnel_t *gt, const ip46_address_t *src, const ip46_address_t *dst)
Definition: gbp_vxlan.c:134
a point 2 point interface
Definition: interface.h:375
gbp_vxlan_tunnel_type_t gbp_vxlan_tunnel_get_type(u32 sw_if_index)
Definition: gbp_vxlan.c:253
GBP VXLAN (template) tunnel.
Definition: gbp_vxlan.h:39
static const gbp_itf_hdl_t GBP_ITF_HDL_INVALID
Definition: gbp_itf.h:57
static index_t * vxlan_tunnel_ref_db
DB of child interfaces by SW interface index.
Definition: gbp_vxlan.c:66
A reference to a VXLAN-GBP tunnel created as a child/dependent tunnel of the tempplate GBP-VXLAN tunn...
Definition: gbp_vxlan.c:30
clib_error_t * vnet_sw_interface_set_flags(vnet_main_t *vnm, u32 sw_if_index, vnet_sw_interface_flags_t flags)
Definition: interface.c:501
#define vec_foreach(var, vec)
Vector iterator.
vlib_main_t vlib_node_runtime_t vlib_frame_t * frame
Definition: in2out_ed.c:1811
index_t gt_grd
References to the GBP-RD.
Definition: gbp_vxlan.h:65
u32 gt_bd_rd_id
The BD or RD value (depending on the layer) that the tunnel is bound to.
Definition: gbp_vxlan.h:48
#define vec_validate_init_empty(V, I, INIT)
Make sure vector is long enough for given index and initialize empty space (no header, unspecified alignment)
Definition: vec.h:487
struct vxlan_tunnel_ref_t_ vxlan_tunnel_ref_t
A reference to a VXLAN-GBP tunnel created as a child/dependent tunnel of the tempplate GBP-VXLAN tunn...
uword * gv_db
DB of added tunnels.
Definition: gbp_vxlan.c:41
static char * gbp_vxlan_tunnel_layer_strings[]
Definition: gbp_vxlan.c:73
void vlib_cli_output(vlib_main_t *vm, char *fmt,...)
Definition: cli.c:689
static uword gbp_vxlan_interface_tx(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame)
Definition: gbp_vxlan.c:415
#define VLIB_INITS(...)
Definition: init.h:344
VNET_HW_INTERFACE_CLASS(gbp_vxlan_hw_interface_class)
static vxlan_tunnel_ref_t * vxlan_tunnel_ref_pool
Pool of child tunnels.
Definition: gbp_vxlan.c:56
index_t gbp_route_domain_find_and_lock(u32 rd_id)