FD.io VPP  v21.06
Vector Packet Processing
node.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2016 Cisco and/or its affiliates.
3  * Licensed under the Apache License, Version 2.0 (the "License");
4  * you may not use this file except in compliance with the License.
5  * You may obtain a copy of the License at:
6  *
7  * http://www.apache.org/licenses/LICENSE-2.0
8  *
9  * Unless required by applicable law or agreed to in writing, software
10  * distributed under the License is distributed on an "AS IS" BASIS,
11  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12  * See the License for the specific language governing permissions and
13  * limitations under the License.
14  */
15 
16 #include <lb/lb.h>
17 #include <vnet/fib/ip4_fib.h>
18 
19 #include <vnet/gre/packet.h>
20 #include <lb/lbhash.h>
21 
22 #define foreach_lb_error \
23  _(NONE, "no error") \
24  _(PROTO_NOT_SUPPORTED, "protocol not supported")
25 
26 typedef enum
27 {
28 #define _(sym,str) LB_ERROR_##sym,
30 #undef _
32 } lb_error_t;
33 
34 static char *lb_error_strings[] =
35  {
36 #define _(sym,string) string,
38 #undef _
39  };
40 
41 typedef struct
42 {
45 } lb_trace_t;
46 
47 typedef struct
48 {
50 
53 
54 typedef struct
55 {
61 
62 u8 *
63 format_lb_trace (u8 * s, va_list * args)
64 {
65  lb_main_t *lbm = &lb_main;
67 = va_arg (*args, vlib_main_t *);
69  = va_arg (*args, vlib_node_t *);
70  lb_trace_t *t = va_arg (*args, lb_trace_t *);
71  if (pool_is_free_index(lbm->vips, t->vip_index))
72  {
73  s = format (s, "lb vip[%d]: This VIP was freed since capture\n");
74  }
75  else
76  {
77  s = format (s, "lb vip[%d]: %U\n", t->vip_index, format_lb_vip,
78  &lbm->vips[t->vip_index]);
79  }
80  if (pool_is_free_index(lbm->ass, t->as_index))
81  {
82  s = format (s, "lb as[%d]: This AS was freed since capture\n");
83  }
84  else
85  {
86  s = format (s, "lb as[%d]: %U\n", t->as_index, format_lb_as,
87  &lbm->ass[t->as_index]);
88  }
89  return s;
90 }
91 
92 u8 *
93 format_lb_nat_trace (u8 * s, va_list * args)
94 {
95  lb_main_t *lbm = &lb_main;
97 = va_arg (*args, vlib_main_t *);
99  = va_arg (*args, vlib_node_t *);
100  lb_nat_trace_t *t = va_arg (*args, lb_nat_trace_t *);
101 
102  if (pool_is_free_index(lbm->vips, t->vip_index))
103  {
104  s = format (s, "lb vip[%d]: This VIP was freed since capture\n");
105  }
106  else
107  {
108  s = format (s, "lb vip[%d]: %U\n", t->vip_index, format_lb_vip,
109  &lbm->vips[t->vip_index]);
110  }
111  if (pool_is_free_index(lbm->ass, t->as_index))
112  {
113  s = format (s, "lb as[%d]: This AS was freed since capture\n");
114  }
115  else
116  {
117  s = format (s, "lb as[%d]: %U\n", t->as_index, format_lb_as,
118  &lbm->ass[t->as_index]);
119  }
120  s = format (s, "lb nat: rx_sw_if_index = %d, next_index = %d",
121  t->rx_sw_if_index, t->next_index);
122 
123  return s;
124 }
125 
126 lb_hash_t *
128 {
129  lb_main_t *lbm = &lb_main;
130  lb_hash_t *sticky_ht = lbm->per_cpu[thread_index].sticky_ht;
131  //Check if size changed
132  if (PREDICT_FALSE(
133  sticky_ht && (lbm->per_cpu_sticky_buckets != lb_hash_nbuckets(sticky_ht))))
134  {
135  //Dereference everything in there
137  u32 i;
138  lb_hash_foreach_entry(sticky_ht, b, i)
139  {
140  vlib_refcount_add (&lbm->as_refcount, thread_index, b->value[i], -1);
141  vlib_refcount_add (&lbm->as_refcount, thread_index, 0, 1);
142  }
143 
144  lb_hash_free (sticky_ht);
145  sticky_ht = NULL;
146  }
147 
148  //Create if necessary
149  if (PREDICT_FALSE(sticky_ht == NULL))
150  {
153  sticky_ht = lbm->per_cpu[thread_index].sticky_ht;
154  clib_warning("Regenerated sticky table %p", sticky_ht);
155  }
156 
157  ASSERT(sticky_ht);
158 
159  //Update timeout
160  sticky_ht->timeout = lbm->flow_timeout;
161  return sticky_ht;
162 }
163 
164 u64
166 {
167  return 0;
168 }
169 
170 u64
172 {
173  return 0;
174 }
175 
178  u32 *hash, u32 *vip_idx, u8 per_port_vip)
179 {
182 
183  /* For vip case, retrieve vip index for ip lookup */
184  *vip_idx = vnet_buffer (p)->ip.adj_index[VLIB_TX];
185 
186  if (per_port_vip)
187  {
188  /* For per-port-vip case, ip lookup stores placeholder index */
189  key.vip_prefix_index = *vip_idx;
190  }
191 
192  if (is_input_v4)
193  {
194  ip4_header_t *ip40;
195  u64 ports;
196 
197  ip40 = vlib_buffer_get_current (p);
198  if (PREDICT_TRUE(
199  ip40->protocol == IP_PROTOCOL_TCP
200  || ip40->protocol == IP_PROTOCOL_UDP))
201  ports = ((u64) ((udp_header_t *) (ip40 + 1))->src_port << 16)
202  | ((u64) ((udp_header_t *) (ip40 + 1))->dst_port);
203  else
204  ports = lb_node_get_other_ports4 (ip40);
205 
206  *hash = lb_hash_hash (*((u64 *) &ip40->address_pair), ports, 0, 0, 0);
207 
208  if (per_port_vip)
209  {
210  key.protocol = ip40->protocol;
211  key.port = (u16)(ports & 0xFFFF);
212  }
213  }
214  else
215  {
216  ip6_header_t *ip60;
217  ip60 = vlib_buffer_get_current (p);
218  u64 ports;
219 
220  if (PREDICT_TRUE(
221  ip60->protocol == IP_PROTOCOL_TCP
222  || ip60->protocol == IP_PROTOCOL_UDP))
223  ports = ((u64) ((udp_header_t *) (ip60 + 1))->src_port << 16)
224  | ((u64) ((udp_header_t *) (ip60 + 1))->dst_port);
225  else
226  ports = lb_node_get_other_ports6 (ip60);
227 
228  *hash = lb_hash_hash (ip60->src_address.as_u64[0],
229  ip60->src_address.as_u64[1],
230  ip60->dst_address.as_u64[0],
231  ip60->dst_address.as_u64[1], ports);
232 
233  if (per_port_vip)
234  {
235  key.protocol = ip60->protocol;
236  key.port = (u16)(ports & 0xFFFF);
237  }
238  }
239 
240  /* For per-port-vip case, retrieve vip index for vip_port_filter table */
241  if (per_port_vip)
242  {
243  kv.key = key.as_u64;
244  if (clib_bihash_search_8_8(&lbm->vip_index_per_port, &kv, &value) < 0)
245  {
246  /* return default vip */
247  *vip_idx = 0;
248  return;
249  }
250  *vip_idx = value.value;
251  }
252 }
253 
258  u8 is_input_v4, //Compile-time parameter stating that is input is v4 (or v6)
259  lb_encap_type_t encap_type, //Compile-time parameter is GRE4/GRE6/L3DSR/NAT4/NAT6
260  u8 per_port_vip) //Compile-time parameter stating that is per_port_vip or not
261 {
262  lb_main_t *lbm = &lb_main;
263  u32 n_left_from, *from, next_index, *to_next, n_left_to_next;
265  u32 lb_time = lb_hash_time_now (vm);
266 
267  lb_hash_t *sticky_ht = lb_get_sticky_table (thread_index);
268  from = vlib_frame_vector_args (frame);
269  n_left_from = frame->n_vectors;
270  next_index = node->cached_next_index;
271 
272  u32 nexthash0 = 0;
273  u32 next_vip_idx0 = ~0;
274  if (PREDICT_TRUE(n_left_from > 0))
275  {
276  vlib_buffer_t *p0 = vlib_get_buffer (vm, from[0]);
277  lb_node_get_hash (lbm, p0, is_input_v4, &nexthash0,
278  &next_vip_idx0, per_port_vip);
279  }
280 
281  while (n_left_from > 0)
282  {
283  vlib_get_next_frame(vm, node, next_index, to_next, n_left_to_next);
284  while (n_left_from > 0 && n_left_to_next > 0)
285  {
286  u32 pi0;
287  vlib_buffer_t *p0;
288  lb_vip_t *vip0;
289  u32 asindex0 = 0;
290  u16 len0;
291  u32 available_index0;
292  u8 counter = 0;
293  u32 hash0 = nexthash0;
294  u32 vip_index0 = next_vip_idx0;
295  u32 next0;
296 
297  if (PREDICT_TRUE(n_left_from > 1))
298  {
299  vlib_buffer_t *p1 = vlib_get_buffer (vm, from[1]);
300  //Compute next hash and prefetch bucket
301  lb_node_get_hash (lbm, p1, is_input_v4,
302  &nexthash0, &next_vip_idx0,
303  per_port_vip);
304  lb_hash_prefetch_bucket (sticky_ht, nexthash0);
305  //Prefetch for encap, next
306  CLIB_PREFETCH(vlib_buffer_get_current (p1) - 64, 64, STORE);
307  }
308 
309  if (PREDICT_TRUE(n_left_from > 2))
310  {
311  vlib_buffer_t *p2;
312  p2 = vlib_get_buffer (vm, from[2]);
313  /* prefetch packet header and data */
314  vlib_prefetch_buffer_header(p2, STORE);
315  CLIB_PREFETCH(vlib_buffer_get_current (p2), 64, STORE);
316  }
317 
318  pi0 = to_next[0] = from[0];
319  from += 1;
320  n_left_from -= 1;
321  to_next += 1;
322  n_left_to_next -= 1;
323 
324  p0 = vlib_get_buffer (vm, pi0);
325 
326  vip0 = pool_elt_at_index(lbm->vips, vip_index0);
327 
328  if (is_input_v4)
329  {
330  ip4_header_t *ip40;
331  ip40 = vlib_buffer_get_current (p0);
332  len0 = clib_net_to_host_u16 (ip40->length);
333  }
334  else
335  {
336  ip6_header_t *ip60;
337  ip60 = vlib_buffer_get_current (p0);
338  len0 = clib_net_to_host_u16 (ip60->payload_length)
339  + sizeof(ip6_header_t);
340  }
341 
342  lb_hash_get (sticky_ht, hash0,
343  vip_index0, lb_time,
344  &available_index0, &asindex0);
345 
346  if (PREDICT_TRUE(asindex0 != 0))
347  {
348  //Found an existing entry
349  counter = LB_VIP_COUNTER_NEXT_PACKET;
350  }
351  else if (PREDICT_TRUE(available_index0 != ~0))
352  {
353  //There is an available slot for a new flow
354  asindex0 =
355  vip0->new_flow_table[hash0 & vip0->new_flow_table_mask].as_index;
356  counter = LB_VIP_COUNTER_FIRST_PACKET;
357  counter = (asindex0 == 0) ? LB_VIP_COUNTER_NO_SERVER : counter;
358 
359  //TODO: There are race conditions with as0 and vip0 manipulation.
360  //Configuration may be changed, vectors resized, etc...
361 
362  //Dereference previously used
364  &lbm->as_refcount, thread_index,
365  lb_hash_available_value (sticky_ht, hash0, available_index0),
366  -1);
367  vlib_refcount_add (&lbm->as_refcount, thread_index, asindex0, 1);
368 
369  //Add sticky entry
370  //Note that when there is no AS configured, an entry is configured anyway.
371  //But no configured AS is not something that should happen
372  lb_hash_put (sticky_ht, hash0, asindex0,
373  vip_index0,
374  available_index0, lb_time);
375  }
376  else
377  {
378  //Could not store new entry in the table
379  asindex0 =
380  vip0->new_flow_table[hash0 & vip0->new_flow_table_mask].as_index;
381  counter = LB_VIP_COUNTER_UNTRACKED_PACKET;
382  }
383 
385  &lbm->vip_counters[counter], thread_index,
386  vip_index0,
387  1);
388 
389  //Now let's encap
390  if ((encap_type == LB_ENCAP_TYPE_GRE4)
391  || (encap_type == LB_ENCAP_TYPE_GRE6))
392  {
393  gre_header_t *gre0;
394  if (encap_type == LB_ENCAP_TYPE_GRE4) /* encap GRE4*/
395  {
396  ip4_header_t *ip40;
398  p0, -sizeof(ip4_header_t) - sizeof(gre_header_t));
399  ip40 = vlib_buffer_get_current (p0);
400  gre0 = (gre_header_t *) (ip40 + 1);
401  ip40->src_address = lbm->ip4_src_address;
402  ip40->dst_address = lbm->ass[asindex0].address.ip4;
403  ip40->ip_version_and_header_length = 0x45;
404  ip40->ttl = 128;
405  ip40->fragment_id = 0;
406  ip40->flags_and_fragment_offset = 0;
407  ip40->length = clib_host_to_net_u16 (
408  len0 + sizeof(gre_header_t) + sizeof(ip4_header_t));
409  ip40->protocol = IP_PROTOCOL_GRE;
410  ip40->checksum = ip4_header_checksum (ip40);
411  }
412  else /* encap GRE6*/
413  {
414  ip6_header_t *ip60;
416  p0, -sizeof(ip6_header_t) - sizeof(gre_header_t));
417  ip60 = vlib_buffer_get_current (p0);
418  gre0 = (gre_header_t *) (ip60 + 1);
419  ip60->dst_address = lbm->ass[asindex0].address.ip6;
420  ip60->src_address = lbm->ip6_src_address;
421  ip60->hop_limit = 128;
423  clib_host_to_net_u32 (0x6 << 28);
424  ip60->payload_length = clib_host_to_net_u16 (
425  len0 + sizeof(gre_header_t));
426  ip60->protocol = IP_PROTOCOL_GRE;
427  }
428 
429  gre0->flags_and_version = 0;
430  gre0->protocol =
431  (is_input_v4) ?
432  clib_host_to_net_u16 (0x0800) :
433  clib_host_to_net_u16 (0x86DD);
434  }
435  else if (encap_type == LB_ENCAP_TYPE_L3DSR) /* encap L3DSR*/
436  {
437  ip4_header_t *ip40;
438  ip_csum_t csum;
439  u32 old_dst, new_dst;
440  u8 old_tos, new_tos;
441 
442  ip40 = vlib_buffer_get_current (p0);
443  old_dst = ip40->dst_address.as_u32;
444  new_dst = lbm->ass[asindex0].address.ip4.as_u32;
445  ip40->dst_address.as_u32 = lbm->ass[asindex0].address.ip4.as_u32;
446  /* Get and rewrite DSCP bit */
447  old_tos = ip40->tos;
448  new_tos = (u8) ((vip0->encap_args.dscp & 0x3F) << 2);
449  ip40->tos = (u8) ((vip0->encap_args.dscp & 0x3F) << 2);
450 
451  csum = ip40->checksum;
452  csum = ip_csum_update (csum, old_tos, new_tos,
453  ip4_header_t,
454  tos /* changed member */);
455  csum = ip_csum_update (csum, old_dst, new_dst,
456  ip4_header_t,
457  dst_address /* changed member */);
458  ip40->checksum = ip_csum_fold (csum);
459 
460  /* Recomputing L4 checksum after dst-IP modifying */
461  if (ip40->protocol == IP_PROTOCOL_TCP)
462  {
463  tcp_header_t *th0;
464  th0 = ip4_next_header (ip40);
465  th0->checksum = 0;
466  th0->checksum = ip4_tcp_udp_compute_checksum (vm, p0, ip40);
467  }
468  else if (ip40->protocol == IP_PROTOCOL_UDP)
469  {
470  udp_header_t *uh0;
471  uh0 = ip4_next_header (ip40);
472  uh0->checksum = 0;
473  uh0->checksum = ip4_tcp_udp_compute_checksum (vm, p0, ip40);
474  }
475  }
476  else if ((encap_type == LB_ENCAP_TYPE_NAT4)
477  || (encap_type == LB_ENCAP_TYPE_NAT6))
478  {
479  ip_csum_t csum;
480  udp_header_t *uh;
481 
482  /* do NAT */
483  if ((is_input_v4 == 1) && (encap_type == LB_ENCAP_TYPE_NAT4))
484  {
485  /* NAT44 */
486  ip4_header_t *ip40;
487  u32 old_dst;
488  ip40 = vlib_buffer_get_current (p0);
489  uh = (udp_header_t *) (ip40 + 1);
490  old_dst = ip40->dst_address.as_u32;
491  ip40->dst_address = lbm->ass[asindex0].address.ip4;
492 
493  csum = ip40->checksum;
494  csum = ip_csum_sub_even (csum, old_dst);
495  csum = ip_csum_add_even (
496  csum, lbm->ass[asindex0].address.ip4.as_u32);
497  ip40->checksum = ip_csum_fold (csum);
498 
499  if (ip40->protocol == IP_PROTOCOL_UDP)
500  {
501  uh->dst_port = vip0->encap_args.target_port;
502  csum = uh->checksum;
503  csum = ip_csum_sub_even (csum, old_dst);
504  csum = ip_csum_add_even (
505  csum, lbm->ass[asindex0].address.ip4.as_u32);
506  uh->checksum = ip_csum_fold (csum);
507  }
508  else
509  {
510  asindex0 = 0;
511  }
512  }
513  else if ((is_input_v4 == 0) && (encap_type == LB_ENCAP_TYPE_NAT6))
514  {
515  /* NAT66 */
516  ip6_header_t *ip60;
517  ip6_address_t old_dst;
518 
519  ip60 = vlib_buffer_get_current (p0);
520  uh = (udp_header_t *) (ip60 + 1);
521 
522  old_dst.as_u64[0] = ip60->dst_address.as_u64[0];
523  old_dst.as_u64[1] = ip60->dst_address.as_u64[1];
524  ip60->dst_address.as_u64[0] =
525  lbm->ass[asindex0].address.ip6.as_u64[0];
526  ip60->dst_address.as_u64[1] =
527  lbm->ass[asindex0].address.ip6.as_u64[1];
528 
529  if (PREDICT_TRUE(ip60->protocol == IP_PROTOCOL_UDP))
530  {
531  uh->dst_port = vip0->encap_args.target_port;
532  csum = uh->checksum;
533  csum = ip_csum_sub_even (csum, old_dst.as_u64[0]);
534  csum = ip_csum_sub_even (csum, old_dst.as_u64[1]);
535  csum = ip_csum_add_even (
536  csum, lbm->ass[asindex0].address.ip6.as_u64[0]);
537  csum = ip_csum_add_even (
538  csum, lbm->ass[asindex0].address.ip6.as_u64[1]);
539  uh->checksum = ip_csum_fold (csum);
540  }
541  else
542  {
543  asindex0 = 0;
544  }
545  }
546  }
547  next0 = lbm->ass[asindex0].dpo.dpoi_next_node;
548  //Note that this is going to error if asindex0 == 0
549  vnet_buffer (p0)->ip.adj_index[VLIB_TX] =
550  lbm->ass[asindex0].dpo.dpoi_index;
551 
552  if (PREDICT_FALSE(p0->flags & VLIB_BUFFER_IS_TRACED))
553  {
554  lb_trace_t *tr = vlib_add_trace (vm, node, p0, sizeof(*tr));
555  tr->as_index = asindex0;
556  tr->vip_index = vip_index0;
557  }
558 
559  //Enqueue to next
561  vm, node, next_index, to_next, n_left_to_next, pi0, next0);
562  }
563  vlib_put_next_frame (vm, node, next_index, n_left_to_next);
564  }
565 
566  return frame->n_vectors;
567 }
568 
569 u8 *
570 format_nodeport_lb_trace (u8 * s, va_list * args)
571 {
572  lb_main_t *lbm = &lb_main;
574 = va_arg (*args, vlib_main_t *);
576  = va_arg (*args, vlib_node_t *);
577  lb_nodeport_trace_t *t = va_arg (*args, lb_nodeport_trace_t *);
578  if (pool_is_free_index(lbm->vips, t->vip_index))
579  {
580  s = format (s, "lb vip[%d]: This VIP was freed since capture\n");
581  }
582  else
583  {
584  s = format (s, "lb vip[%d]: %U\n", t->vip_index, format_lb_vip,
585  &lbm->vips[t->vip_index]);
586  }
587 
588  s = format (s, " lb node_port: %d", t->node_port);
589 
590  return s;
591 }
592 
593 static uword
595  vlib_frame_t * frame, u8 is_input_v4)
596 {
597  lb_main_t *lbm = &lb_main;
598  u32 n_left_from, *from, next_index, *to_next, n_left_to_next;
599 
600  from = vlib_frame_vector_args (frame);
601  n_left_from = frame->n_vectors;
602  next_index = node->cached_next_index;
603 
604  while (n_left_from > 0)
605  {
606  vlib_get_next_frame(vm, node, next_index, to_next, n_left_to_next);
607 
608  while (n_left_from > 0 && n_left_to_next > 0)
609  {
610  u32 pi0;
611  vlib_buffer_t *p0;
612  udp_header_t * udp_0;
613  uword * entry0;
614 
615  if (PREDICT_TRUE(n_left_from > 1))
616  {
617  vlib_buffer_t *p1 = vlib_get_buffer (vm, from[1]);
618  //Prefetch for encap, next
619  CLIB_PREFETCH(vlib_buffer_get_current (p1) - 64, 64, STORE);
620  }
621 
622  if (PREDICT_TRUE(n_left_from > 2))
623  {
624  vlib_buffer_t *p2;
625  p2 = vlib_get_buffer (vm, from[2]);
626  /* prefetch packet header and data */
627  vlib_prefetch_buffer_header(p2, STORE);
628  CLIB_PREFETCH(vlib_buffer_get_current (p2), 64, STORE);
629  }
630 
631  pi0 = to_next[0] = from[0];
632  from += 1;
633  n_left_from -= 1;
634  to_next += 1;
635  n_left_to_next -= 1;
636 
637  p0 = vlib_get_buffer (vm, pi0);
638 
639  if (is_input_v4)
640  {
641  ip4_header_t *ip40;
643  p0, -(word) (sizeof(udp_header_t) + sizeof(ip4_header_t)));
644  ip40 = vlib_buffer_get_current (p0);
645  udp_0 = (udp_header_t *) (ip40 + 1);
646  }
647  else
648  {
649  ip6_header_t *ip60;
651  p0, -(word) (sizeof(udp_header_t) + sizeof(ip6_header_t)));
652  ip60 = vlib_buffer_get_current (p0);
653  udp_0 = (udp_header_t *) (ip60 + 1);
654  }
655 
656  entry0 = hash_get_mem(lbm->vip_index_by_nodeport, &(udp_0->dst_port));
657 
658  //Enqueue to next
659  vnet_buffer(p0)->ip.adj_index[VLIB_TX] = entry0 ? entry0[0]
661 
662  if (PREDICT_FALSE(p0->flags & VLIB_BUFFER_IS_TRACED))
663  {
664  lb_nodeport_trace_t *tr = vlib_add_trace (vm, node, p0,
665  sizeof(*tr));
666  tr->vip_index = entry0 ? entry0[0] : ADJ_INDEX_INVALID;
667  tr->node_port = (u32) clib_net_to_host_u16 (udp_0->dst_port);
668  }
669 
670  vlib_validate_buffer_enqueue_x1(vm, node, next_index, to_next,
671  n_left_to_next, pi0,
672  is_input_v4 ?
674  }
675  vlib_put_next_frame (vm, node, next_index, n_left_to_next);
676  }
677 
678  return frame->n_vectors;
679 
680 }
681 
682 /**
683  * @brief Match NAT44 static mapping.
684  *
685  * @param sm NAT main.
686  * @param match Address and port to match.
687  * @param index index to the pool.
688  *
689  * @returns 0 if match found, otherwise -1.
690  */
691 int
693 {
695  clib_bihash_8_8_t *mapping_hash = &lbm->mapping_by_as4;
696 
697  kv4.key = match->as_u64;
698  kv4.value = 0;
699  if (clib_bihash_search_8_8 (mapping_hash, &kv4, &value))
700  {
701  return 1;
702  }
703 
704  *index = value.value;
705  return 0;
706 }
707 
708 /**
709  * @brief Match NAT66 static mapping.
710  *
711  * @param sm NAT main.
712  * @param match Address and port to match.
713  * @param mapping External or local address and port of the matched mapping.
714  *
715  * @returns 0 if match found otherwise 1.
716  */
717 int
719 {
721  lb_snat6_key_t m_key6;
722  clib_bihash_24_8_t *mapping_hash = &lbm->mapping_by_as6;
723 
724  m_key6.addr.as_u64[0] = match->addr.as_u64[0];
725  m_key6.addr.as_u64[1] = match->addr.as_u64[1];
726  m_key6.port = match->port;
727  m_key6.protocol = 0;
728  m_key6.fib_index = 0;
729 
730  kv6.key[0] = m_key6.as_u64[0];
731  kv6.key[1] = m_key6.as_u64[1];
732  kv6.key[2] = m_key6.as_u64[2];
733  kv6.value = 0;
734  if (clib_bihash_search_24_8 (mapping_hash, &kv6, &value))
735  {
736  return 1;
737  }
738 
739  *index = value.value;
740  return 0;
741 }
742 
743 static uword
745  vlib_frame_t * frame, u32 is_nat4)
746 {
747  u32 n_left_from, *from, *to_next;
748  u32 next_index;
749  u32 pkts_processed = 0;
750  lb_main_t *lbm = &lb_main;
751  u32 stats_node_index;
752 
753  stats_node_index =
754  is_nat4 ? lb_nat4_in2out_node.index : lb_nat6_in2out_node.index;
755 
756  from = vlib_frame_vector_args (frame);
757  n_left_from = frame->n_vectors;
758  next_index = node->cached_next_index;
759 
760  while (n_left_from > 0)
761  {
762  u32 n_left_to_next;
763 
764  vlib_get_next_frame(vm, node, next_index, to_next, n_left_to_next);
765 
766  while (n_left_from > 0 && n_left_to_next > 0)
767  {
768  u32 bi0;
769  vlib_buffer_t * b0;
770  u32 next0;
771  u32 sw_if_index0;
772  ip_csum_t csum;
773  u16 old_port0, new_port0;
774  udp_header_t * udp0;
775  tcp_header_t * tcp0;
776 
777  u32 proto0;
778  u32 rx_fib_index0;
779 
780  /* speculatively enqueue b0 to the current next frame */
781  bi0 = from[0];
782  to_next[0] = bi0;
783  from += 1;
784  to_next += 1;
785  n_left_from -= 1;
786  n_left_to_next -= 1;
787 
788  b0 = vlib_get_buffer (vm, bi0);
790  sw_if_index0 = vnet_buffer(b0)->sw_if_index[VLIB_RX];
792  sw_if_index0);
793 
794  if (is_nat4)
795  {
796  ip4_header_t * ip40;
797  u32 old_addr0, new_addr0;
798  lb_snat4_key_t key40;
799  lb_snat_mapping_t *sm40;
800  u32 index40;
801 
802  ip40 = vlib_buffer_get_current (b0);
803  udp0 = ip4_next_header (ip40);
804  tcp0 = (tcp_header_t *) udp0;
805  proto0 = lb_ip_proto_to_nat_proto (ip40->protocol);
806 
807  key40.addr = ip40->src_address;
808  key40.protocol = proto0;
809  key40.port = udp0->src_port;
810  key40.fib_index = rx_fib_index0;
811 
812  if (lb_nat44_mapping_match (lbm, &key40, &index40))
813  {
814  next0 = LB_NAT4_IN2OUT_NEXT_DROP;
815  goto trace0;
816  }
817 
818  sm40 = pool_elt_at_index(lbm->snat_mappings, index40);
819  new_addr0 = sm40->src_ip.ip4.as_u32;
820  new_port0 = sm40->src_port;
821  vnet_buffer(b0)->sw_if_index[VLIB_TX] = sm40->fib_index;
822  old_addr0 = ip40->src_address.as_u32;
823  ip40->src_address.as_u32 = new_addr0;
824 
825  csum = ip40->checksum;
826  csum = ip_csum_sub_even (csum, old_addr0);
827  csum = ip_csum_add_even (csum, new_addr0);
828  ip40->checksum = ip_csum_fold (csum);
829 
830  if (PREDICT_TRUE(proto0 == LB_NAT_PROTOCOL_TCP))
831  {
832  old_port0 = tcp0->src_port;
833  tcp0->src_port = new_port0;
834 
835  csum = tcp0->checksum;
836  csum = ip_csum_sub_even (csum, old_addr0);
837  csum = ip_csum_sub_even (csum, old_port0);
838  csum = ip_csum_add_even (csum, new_addr0);
839  csum = ip_csum_add_even (csum, new_port0);
840  tcp0->checksum = ip_csum_fold (csum);
841  }
842  else if (PREDICT_TRUE(proto0 == LB_NAT_PROTOCOL_UDP))
843  {
844  old_port0 = udp0->src_port;
845  udp0->src_port = new_port0;
846 
847  csum = udp0->checksum;
848  csum = ip_csum_sub_even (csum, old_addr0);
849  csum = ip_csum_sub_even (csum, old_port0);
850  csum = ip_csum_add_even (csum, new_addr0);
851  csum = ip_csum_add_even (csum, new_port0);
852  udp0->checksum = ip_csum_fold (csum);
853  }
854 
855  pkts_processed += next0 != LB_NAT4_IN2OUT_NEXT_DROP;
856  }
857  else
858  {
859  ip6_header_t * ip60;
860  ip6_address_t old_addr0, new_addr0;
861  lb_snat6_key_t key60;
862  lb_snat_mapping_t *sm60;
863  u32 index60;
864 
865  ip60 = vlib_buffer_get_current (b0);
866  udp0 = ip6_next_header (ip60);
867  tcp0 = (tcp_header_t *) udp0;
868  proto0 = lb_ip_proto_to_nat_proto (ip60->protocol);
869 
870  key60.addr.as_u64[0] = ip60->src_address.as_u64[0];
871  key60.addr.as_u64[1] = ip60->src_address.as_u64[1];
872  key60.protocol = proto0;
873  key60.port = udp0->src_port;
874  key60.fib_index = rx_fib_index0;
875 
876  if (lb_nat66_mapping_match (lbm, &key60, &index60))
877  {
878  next0 = LB_NAT6_IN2OUT_NEXT_DROP;
879  goto trace0;
880  }
881 
882  sm60 = pool_elt_at_index(lbm->snat_mappings, index60);
883  new_addr0.as_u64[0] = sm60->src_ip.as_u64[0];
884  new_addr0.as_u64[1] = sm60->src_ip.as_u64[1];
885  new_port0 = sm60->src_port;
886  vnet_buffer(b0)->sw_if_index[VLIB_TX] = sm60->fib_index;
887  old_addr0.as_u64[0] = ip60->src_address.as_u64[0];
888  old_addr0.as_u64[1] = ip60->src_address.as_u64[1];
889  ip60->src_address.as_u64[0] = new_addr0.as_u64[0];
890  ip60->src_address.as_u64[1] = new_addr0.as_u64[1];
891 
892  if (PREDICT_TRUE(proto0 == LB_NAT_PROTOCOL_TCP))
893  {
894  old_port0 = tcp0->src_port;
895  tcp0->src_port = new_port0;
896 
897  csum = tcp0->checksum;
898  csum = ip_csum_sub_even (csum, old_addr0.as_u64[0]);
899  csum = ip_csum_sub_even (csum, old_addr0.as_u64[1]);
900  csum = ip_csum_add_even (csum, new_addr0.as_u64[0]);
901  csum = ip_csum_add_even (csum, new_addr0.as_u64[1]);
902  csum = ip_csum_sub_even (csum, old_port0);
903  csum = ip_csum_add_even (csum, new_port0);
904  tcp0->checksum = ip_csum_fold (csum);
905  }
906  else if (PREDICT_TRUE(proto0 == LB_NAT_PROTOCOL_UDP))
907  {
908  old_port0 = udp0->src_port;
909  udp0->src_port = new_port0;
910 
911  csum = udp0->checksum;
912  csum = ip_csum_sub_even (csum, old_addr0.as_u64[0]);
913  csum = ip_csum_sub_even (csum, old_addr0.as_u64[1]);
914  csum = ip_csum_add_even (csum, new_addr0.as_u64[0]);
915  csum = ip_csum_add_even (csum, new_addr0.as_u64[1]);
916  csum = ip_csum_sub_even (csum, old_port0);
917  csum = ip_csum_add_even (csum, new_port0);
918  udp0->checksum = ip_csum_fold (csum);
919  }
920 
921  pkts_processed += next0 != LB_NAT4_IN2OUT_NEXT_DROP;
922  }
923 
924  trace0: if (PREDICT_FALSE(
925  (node->flags & VLIB_NODE_FLAG_TRACE) && (b0->flags & VLIB_BUFFER_IS_TRACED)))
926  {
927  lb_nat_trace_t *t = vlib_add_trace (vm, node, b0, sizeof(*t));
928  t->rx_sw_if_index = sw_if_index0;
929  t->next_index = next0;
930  }
931 
932  /* verify speculative enqueue, maybe switch current next frame */
933  vlib_validate_buffer_enqueue_x1(vm, node, next_index, to_next,
934  n_left_to_next, bi0, next0);
935  }
936 
937  vlib_put_next_frame (vm, node, next_index, n_left_to_next);
938  }
939 
940  vlib_node_increment_counter (vm, stats_node_index,
941  LB_NAT_IN2OUT_ERROR_IN2OUT_PACKETS,
942  pkts_processed);
943  return frame->n_vectors;
944 }
945 
946 static uword
949 {
950  return lb_node_fn (vm, node, frame, 0, LB_ENCAP_TYPE_GRE6, 0);
951 }
952 
953 static uword
956 {
957  return lb_node_fn (vm, node, frame, 0, LB_ENCAP_TYPE_GRE4, 0);
958 }
959 
960 static uword
963 {
964  return lb_node_fn (vm, node, frame, 1, LB_ENCAP_TYPE_GRE6, 0);
965 }
966 
967 static uword
970 {
971  return lb_node_fn (vm, node, frame, 1, LB_ENCAP_TYPE_GRE4, 0);
972 }
973 
974 static uword
977 {
978  return lb_node_fn (vm, node, frame, 0, LB_ENCAP_TYPE_GRE6, 1);
979 }
980 
981 static uword
984 {
985  return lb_node_fn (vm, node, frame, 0, LB_ENCAP_TYPE_GRE4, 1);
986 }
987 
988 static uword
991 {
992  return lb_node_fn (vm, node, frame, 1, LB_ENCAP_TYPE_GRE6, 1);
993 }
994 
995 static uword
998 {
999  return lb_node_fn (vm, node, frame, 1, LB_ENCAP_TYPE_GRE4, 1);
1000 }
1001 
1002 static uword
1004  vlib_frame_t * frame)
1005 {
1006  return lb_node_fn (vm, node, frame, 1, LB_ENCAP_TYPE_L3DSR, 0);
1007 }
1008 
1009 static uword
1011  vlib_frame_t * frame)
1012 {
1013  return lb_node_fn (vm, node, frame, 1, LB_ENCAP_TYPE_L3DSR, 1);
1014 }
1015 
1016 static uword
1018  vlib_frame_t * frame)
1019 {
1020  return lb_node_fn (vm, node, frame, 0, LB_ENCAP_TYPE_NAT6, 1);
1021 }
1022 
1023 static uword
1025  vlib_frame_t * frame)
1026 {
1027  return lb_node_fn (vm, node, frame, 1, LB_ENCAP_TYPE_NAT4, 1);
1028 }
1029 
1030 static uword
1032  vlib_frame_t * frame)
1033 {
1034  return lb_nat_in2out_node_fn (vm, node, frame, 1);
1035 }
1036 
1037 static uword
1039  vlib_frame_t * frame)
1040 {
1041  return lb_nat_in2out_node_fn (vm, node, frame, 0);
1042 }
1043 
1045  {
1046  .function = lb6_gre6_node_fn,
1047  .name = "lb6-gre6",
1048  .vector_size = sizeof(u32),
1049  .format_trace = format_lb_trace,
1050  .n_errors = LB_N_ERROR,
1051  .error_strings = lb_error_strings,
1052  .n_next_nodes = LB_N_NEXT,
1053  .next_nodes =
1054  { [LB_NEXT_DROP] = "error-drop" },
1055  };
1056 
1058  {
1059  .function = lb6_gre4_node_fn,
1060  .name = "lb6-gre4",
1061  .vector_size = sizeof(u32),
1062  .format_trace = format_lb_trace,
1063  .n_errors = LB_N_ERROR,
1064  .error_strings = lb_error_strings,
1065  .n_next_nodes = LB_N_NEXT,
1066  .next_nodes =
1067  { [LB_NEXT_DROP] = "error-drop" },
1068  };
1069 
1071  {
1072  .function = lb4_gre6_node_fn,
1073  .name = "lb4-gre6",
1074  .vector_size = sizeof(u32),
1075  .format_trace = format_lb_trace,
1076  .n_errors = LB_N_ERROR,
1077  .error_strings = lb_error_strings,
1078  .n_next_nodes = LB_N_NEXT,
1079  .next_nodes =
1080  { [LB_NEXT_DROP] = "error-drop" },
1081  };
1082 
1084  {
1085  .function = lb4_gre4_node_fn,
1086  .name = "lb4-gre4",
1087  .vector_size = sizeof(u32),
1088  .format_trace = format_lb_trace,
1089  .n_errors = LB_N_ERROR,
1090  .error_strings = lb_error_strings,
1091  .n_next_nodes = LB_N_NEXT,
1092  .next_nodes =
1093  { [LB_NEXT_DROP] = "error-drop" },
1094  };
1095 
1097  {
1098  .function = lb6_gre6_port_node_fn,
1099  .name = "lb6-gre6-port",
1100  .vector_size = sizeof(u32),
1101  .format_trace = format_lb_trace,
1102  .n_errors = LB_N_ERROR,
1103  .error_strings = lb_error_strings,
1104  .n_next_nodes = LB_N_NEXT,
1105  .next_nodes =
1106  { [LB_NEXT_DROP] = "error-drop" },
1107  };
1108 
1110  {
1111  .function = lb6_gre4_port_node_fn,
1112  .name = "lb6-gre4-port",
1113  .vector_size = sizeof(u32),
1114  .format_trace = format_lb_trace,
1115  .n_errors = LB_N_ERROR,
1116  .error_strings = lb_error_strings,
1117  .n_next_nodes = LB_N_NEXT,
1118  .next_nodes =
1119  { [LB_NEXT_DROP] = "error-drop" },
1120  };
1121 
1123  {
1124  .function = lb4_gre6_port_node_fn,
1125  .name = "lb4-gre6-port",
1126  .vector_size = sizeof(u32),
1127  .format_trace = format_lb_trace,
1128  .n_errors = LB_N_ERROR,
1129  .error_strings = lb_error_strings,
1130  .n_next_nodes = LB_N_NEXT,
1131  .next_nodes =
1132  { [LB_NEXT_DROP] = "error-drop" },
1133  };
1134 
1136  {
1137  .function = lb4_gre4_port_node_fn,
1138  .name = "lb4-gre4-port",
1139  .vector_size = sizeof(u32),
1140  .format_trace = format_lb_trace,
1141  .n_errors = LB_N_ERROR,
1142  .error_strings = lb_error_strings,
1143  .n_next_nodes = LB_N_NEXT,
1144  .next_nodes =
1145  { [LB_NEXT_DROP] = "error-drop" },
1146  };
1147 
1149  {
1150  .function = lb4_l3dsr_port_node_fn,
1151  .name = "lb4-l3dsr-port",
1152  .vector_size = sizeof(u32),
1153  .format_trace = format_lb_trace,
1154  .n_errors = LB_N_ERROR,
1155  .error_strings = lb_error_strings,
1156  .n_next_nodes = LB_N_NEXT,
1157  .next_nodes =
1158  { [LB_NEXT_DROP] = "error-drop" },
1159  };
1160 
1162  {
1163  .function = lb4_l3dsr_node_fn,
1164  .name = "lb4-l3dsr",
1165  .vector_size = sizeof(u32),
1166  .format_trace = format_lb_trace,
1167  .n_errors = LB_N_ERROR,
1168  .error_strings = lb_error_strings,
1169  .n_next_nodes = LB_N_NEXT,
1170  .next_nodes =
1171  { [LB_NEXT_DROP] = "error-drop" },
1172  };
1173 
1175  {
1176  .function = lb6_nat6_port_node_fn,
1177  .name = "lb6-nat6-port",
1178  .vector_size = sizeof(u32),
1179  .format_trace = format_lb_trace,
1180  .n_errors = LB_N_ERROR,
1181  .error_strings = lb_error_strings,
1182  .n_next_nodes = LB_N_NEXT,
1183  .next_nodes =
1184  { [LB_NEXT_DROP] = "error-drop" },
1185  };
1186 
1188  {
1189  .function = lb4_nat4_port_node_fn,
1190  .name = "lb4-nat4-port",
1191  .vector_size = sizeof(u32),
1192  .format_trace = format_lb_trace,
1193  .n_errors = LB_N_ERROR,
1194  .error_strings = lb_error_strings,
1195  .n_next_nodes = LB_N_NEXT,
1196  .next_nodes =
1197  { [LB_NEXT_DROP] = "error-drop" },
1198  };
1199 
1200 static uword
1202  vlib_frame_t * frame)
1203 {
1204  return lb_nodeport_node_fn (vm, node, frame, 1);
1205 }
1206 
1207 static uword
1209  vlib_frame_t * frame)
1210 {
1211  return lb_nodeport_node_fn (vm, node, frame, 0);
1212 }
1213 
1215  {
1216  .function = lb4_nodeport_node_fn,
1217  .name = "lb4-nodeport",
1218  .vector_size = sizeof(u32),
1219  .format_trace = format_nodeport_lb_trace,
1220  .n_errors = LB_N_ERROR,
1221  .error_strings = lb_error_strings,
1222  .n_next_nodes = LB4_NODEPORT_N_NEXT,
1223  .next_nodes =
1224  {
1225  [LB4_NODEPORT_NEXT_IP4_NAT4] = "lb4-nat4-port",
1226  [LB4_NODEPORT_NEXT_DROP] = "error-drop",
1227  },
1228  };
1229 
1231  {
1232  .function = lb6_nodeport_node_fn,
1233  .name = "lb6-nodeport",
1234  .vector_size = sizeof(u32),
1235  .format_trace = format_nodeport_lb_trace,
1236  .n_errors = LB_N_ERROR,
1237  .error_strings = lb_error_strings,
1238  .n_next_nodes = LB6_NODEPORT_N_NEXT,
1239  .next_nodes =
1240  {
1241  [LB6_NODEPORT_NEXT_IP6_NAT6] = "lb6-nat6-port",
1242  [LB6_NODEPORT_NEXT_DROP] = "error-drop",
1243  },
1244  };
1245 
1247  {
1248  .arc_name = "ip4-unicast",
1249  .node_name = "lb-nat4-in2out",
1250  .runs_before = VNET_FEATURES("ip4-lookup"),
1251  };
1252 
1254  {
1255  .function = lb_nat4_in2out_node_fn,
1256  .name = "lb-nat4-in2out",
1257  .vector_size = sizeof(u32),
1258  .format_trace = format_lb_nat_trace,
1259  .n_errors = LB_N_ERROR,
1260  .error_strings = lb_error_strings,
1261  .n_next_nodes = LB_NAT4_IN2OUT_N_NEXT,
1262  .next_nodes =
1263  {
1264  [LB_NAT4_IN2OUT_NEXT_DROP] = "error-drop",
1265  [LB_NAT4_IN2OUT_NEXT_LOOKUP] = "ip4-lookup",
1266  },
1267  };
1268 
1270  {
1271  .arc_name = "ip6-unicast",
1272  .node_name = "lb-nat6-in2out",
1273  .runs_before = VNET_FEATURES("ip6-lookup"),
1274  };
1275 
1277  {
1278  .function = lb_nat6_in2out_node_fn,
1279  .name = "lb-nat6-in2out",
1280  .vector_size = sizeof(u32),
1281  .format_trace = format_lb_nat_trace,
1282  .n_errors = LB_N_ERROR,
1283  .error_strings = lb_error_strings,
1284  .n_next_nodes = LB_NAT6_IN2OUT_N_NEXT,
1285  .next_nodes =
1286  {
1287  [LB_NAT6_IN2OUT_NEXT_DROP] = "error-drop",
1288  [LB_NAT6_IN2OUT_NEXT_LOOKUP] = "ip6-lookup",
1289  },
1290  };
u32 flags
buffer flags: VLIB_BUFFER_FREE_LIST_INDEX_MASK: bits used to store free list index, VLIB_BUFFER_IS_TRACED: trace this buffer.
Definition: buffer.h:133
format_function_t format_lb_vip
Definition: lb.h:388
u64 as_u64
Definition: lb.h:423
u32 lb_hash_time_now(vlib_main_t *vm)
Definition: lb.c:96
static u32 lb_ip_proto_to_nat_proto(u8 ip_proto)
Definition: lb.h:402
vlib_node_registration_t lb6_gre6_node
(constructor) VLIB_REGISTER_NODE (lb6_gre6_node)
Definition: node.c:1044
u64 as_u64[3]
Definition: lb.h:438
vlib_node_registration_t lb6_gre6_port_node
(constructor) VLIB_REGISTER_NODE (lb6_gre6_port_node)
Definition: node.c:1096
#define CLIB_UNUSED(x)
Definition: clib.h:90
u32 vip_index
Definition: node.c:56
vlib_node_registration_t lb4_gre4_node
(constructor) VLIB_REGISTER_NODE (lb4_gre4_node)
Definition: node.c:1083
ip4_address_t src_address
Definition: ip4_packet.h:125
static uword lb6_gre4_node_fn(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame)
Definition: node.c:954
u32 fib_index
Definition: lb.h:436
static uword lb6_nat6_port_node_fn(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame)
Definition: node.c:1017
u32 per_cpu_sticky_buckets
Number of buckets in the per-cpu sticky hash table.
Definition: lb.h:522
u32 thread_index
#define PREDICT_TRUE(x)
Definition: clib.h:125
static uword lb_nat6_in2out_node_fn(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame)
Definition: node.c:1038
unsigned long u64
Definition: types.h:89
vl_api_ip_port_and_mask_t dst_port
Definition: flow_types.api:92
lb_hash_t * lb_get_sticky_table(u32 thread_index)
Definition: node.c:127
static_always_inline lb_hash_t * lb_hash_alloc(u32 buckets, u32 timeout)
Definition: lbhash.h:81
Definition: lb.h:56
u32 thread_index
Definition: main.h:213
vlib_main_t vlib_node_runtime_t vlib_frame_t * frame
Definition: nat44_ei.c:3048
u16 port
Definition: lb.h:419
u16 ip4_tcp_udp_compute_checksum(vlib_main_t *vm, vlib_buffer_t *p0, ip4_header_t *ip0)
vlib_node_registration_t lb4_nat4_port_node
(constructor) VLIB_REGISTER_NODE (lb4_nat4_port_node)
Definition: node.c:1187
u32 vip_prefix_index
Definition: lb.h:245
uword ip_csum_t
Definition: ip_packet.h:245
static uword lb4_nat4_port_node_fn(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame)
Definition: node.c:1024
static void vlib_increment_simple_counter(vlib_simple_counter_main_t *cm, u32 thread_index, u32 index, u64 increment)
Increment a simple counter.
Definition: counter.h:74
u16 flags_and_fragment_offset
Definition: ip4_packet.h:106
static uword lb_nat_in2out_node_fn(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame, u32 is_nat4)
Definition: node.c:744
static_always_inline u32 lb_hash_available_value(lb_hash_t *h, u32 hash, u32 available_index)
Definition: lbhash.h:169
struct _tcp_header tcp_header_t
ip6_address_t src_address
Definition: ip6_packet.h:310
lb_hash_t * sticky_ht
Each CPU has its own sticky flow hash table.
Definition: lb.h:467
unsigned char u8
Definition: types.h:56
vlib_buffer_t ** b
u32 next_index
Definition: node.c:59
static uword lb6_gre6_port_node_fn(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame)
Definition: node.c:975
unsigned int u32
Definition: types.h:88
ip46_address_t address
Destination address used to tunnel traffic towards that application server.
Definition: lb.h:120
u8 * format_lb_trace(u8 *s, va_list *args)
Definition: node.c:63
vlib_node_registration_t lb6_nat6_port_node
(constructor) VLIB_REGISTER_NODE (lb6_nat6_port_node)
Definition: node.c:1174
u32 timeout
Definition: lbhash.h:60
u32 vip_index
Definition: node.c:43
#define lb_hash_nbuckets(h)
Definition: lbhash.h:64
#define static_always_inline
Definition: clib.h:112
int lb_nat44_mapping_match(lb_main_t *lbm, lb_snat4_key_t *match, u32 *index)
Match NAT44 static mapping.
Definition: node.c:692
i64 word
Definition: types.h:111
ip4_address_t dst_address
Definition: ip4_packet.h:125
static char * lb_error_strings[]
Definition: node.c:34
lb_main_t lb_main
Definition: lb.c:32
description fragment has unexpected format
Definition: map.api:433
#define vlib_prefetch_buffer_header(b, type)
Prefetch buffer metadata.
Definition: buffer.h:231
u32 flow_timeout
Flow timeout in seconds.
Definition: lb.h:527
u32 ip4_fib_table_get_index_for_sw_if_index(u32 sw_if_index)
Definition: lb.h:470
u16 protocol
Definition: lb.h:420
vlib_refcount_t as_refcount
Each AS has an associated reference counter.
Definition: lb.h:494
lb_vip_encap_args_t encap_args
Definition: lb.h:316
static uword lb_nodeport_node_fn(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame, u8 is_input_v4)
Definition: node.c:594
static uword lb4_gre4_node_fn(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame)
Definition: node.c:968
static void * ip4_next_header(ip4_header_t *i)
Definition: ip4_packet.h:196
format_function_t format_lb_as
Definition: lb.h:165
lb_vip_t * vips
Pool of all Virtual IPs.
Definition: lb.h:474
ip4_address_t ip4_src_address
Source address used for IPv4 encapsulated traffic.
Definition: lb.h:517
#define ADJ_INDEX_INVALID
Invalid ADJ index - used when no adj is known likewise blazoned capitals INVALID speak volumes where ...
Definition: adj_types.h:36
#define pool_elt_at_index(p, i)
Returns pointer to element at given index.
Definition: pool.h:553
vlib_node_registration_t lb4_l3dsr_node
(constructor) VLIB_REGISTER_NODE (lb4_l3dsr_node)
Definition: node.c:1161
u32 value[LBHASH_ENTRY_PER_BUCKET]
Definition: lbhash.h:55
static uword lb6_gre6_node_fn(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame)
Definition: node.c:947
u64 key
the key
Definition: bihash_8_8.h:43
static uword lb_nat4_in2out_node_fn(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame)
Definition: node.c:1031
static uword lb4_gre6_port_node_fn(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame)
Definition: node.c:989
unsigned short u16
Definition: types.h:57
u16 src_port
Network byte order for vip + port case, src_port = port; for node ip + node_port, src_port = node_por...
Definition: lb.h:456
vlib_node_registration_t lb4_nodeport_node
(constructor) VLIB_REGISTER_NODE (lb4_nodeport_node)
Definition: node.c:1214
static void * vlib_buffer_get_current(vlib_buffer_t *b)
Get pointer to current data to process.
Definition: buffer.h:257
vlib_node_registration_t lb6_gre4_port_node
(constructor) VLIB_REGISTER_NODE (lb6_gre4_port_node)
Definition: node.c:1109
ip4_address_pair_t address_pair
Definition: ip4_packet.h:127
#define PREDICT_FALSE(x)
Definition: clib.h:124
vlib_node_registration_t lb4_gre4_port_node
(constructor) VLIB_REGISTER_NODE (lb4_gre4_port_node)
Definition: node.c:1135
static_always_inline void lb_hash_prefetch_bucket(lb_hash_t *ht, u32 hash)
Definition: lbhash.h:107
clib_bihash_8_8_t mapping_by_as4
Definition: lb.h:554
vlib_main_t * vm
X-connect all packets from the HOST to the PHY.
Definition: nat44_ei.c:3047
u32 as_index
Definition: node.c:57
#define vlib_validate_buffer_enqueue_x1(vm, node, next_index, to_next, n_left_to_next, bi0, next0)
Finish enqueueing one buffer forward in the graph.
Definition: buffer_node.h:224
#define vlib_get_next_frame(vm, node, next_index, vectors, n_vectors_left)
Get pointer to next frame vector data by (vlib_node_runtime_t, next_index).
Definition: node_funcs.h:395
u16 flags_and_version
Definition: packet.h:40
u16 port
Definition: lb.h:246
lb_encap_type_t
Definition: lb.h:184
static void vlib_node_increment_counter(vlib_main_t *vm, u32 node_index, u32 counter_index, u64 increment)
Definition: node_funcs.h:1244
static_always_inline void vlib_refcount_add(vlib_refcount_t *r, u32 thread_index, u32 counter_index, i32 v)
Definition: refcount.h:68
ip46_address_t src_ip
for vip + port case, src_ip = vip; for node ip + node_port, src_ip = node_ip
Definition: lb.h:447
#define lb_hash_foreach_entry(h, bucket, i)
Definition: lbhash.h:72
static uword lb4_gre6_node_fn(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame)
Definition: node.c:961
u64 value
the value
Definition: bihash_8_8.h:44
#define VLIB_REGISTER_NODE(x,...)
Definition: node.h:169
u32 rx_sw_if_index
Definition: node.c:58
u16 n_vectors
Definition: node.h:388
static uword lb4_l3dsr_node_fn(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame)
Definition: node.c:1003
#define CLIB_PREFETCH(addr, size, type)
Definition: cache.h:80
sll srl srl sll sra u16x4 i
Definition: vector_sse42.h:261
u32 index
Definition: flow_types.api:221
u32 as_index
Definition: node.c:44
u16 protocol
Definition: packet.h:55
#define clib_warning(format, args...)
Definition: error.h:59
#define foreach_lb_error
Definition: node.c:22
u8 * format_nodeport_lb_trace(u8 *s, va_list *args)
Definition: node.c:570
#define pool_is_free_index(P, I)
Use free bitmap to query whether given index is free.
Definition: pool.h:302
u32 as_index
Definition: lb.h:168
8 octet key, 8 octet key value pair
Definition: bihash_8_8.h:41
vlib_node_registration_t lb4_l3dsr_port_node
(constructor) VLIB_REGISTER_NODE (lb4_l3dsr_port_node)
Definition: node.c:1148
static void * ip6_next_header(ip6_header_t *i)
Definition: ip6_packet.h:407
static_always_inline void lb_hash_free(lb_hash_t *h)
Definition: lbhash.h:100
lb_as_t * ass
Pool of ASs.
Definition: lb.h:487
uword * vip_index_by_nodeport
Definition: lb.h:497
static uword lb6_nodeport_node_fn(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame)
Definition: node.c:1208
vlib_node_registration_t lb6_nodeport_node
(constructor) VLIB_REGISTER_NODE (lb6_nodeport_node)
Definition: node.c:1230
ip6_address_t addr
Definition: lb.h:433
vlib_node_registration_t lb4_gre6_node
(constructor) VLIB_REGISTER_NODE (lb4_gre6_node)
Definition: node.c:1070
u16 cached_next_index
Next frame index that vector arguments were last enqueued to last time this node ran.
Definition: node.h:498
u8 value
Definition: qos.api:54
#define ASSERT(truth)
int lb_nat66_mapping_match(lb_main_t *lbm, lb_snat6_key_t *match, u32 *index)
Match NAT66 static mapping.
Definition: node.c:718
ip_dscp_t tos
Definition: ip4_packet.h:96
static ip_csum_t ip_csum_sub_even(ip_csum_t c, ip_csum_t x)
Definition: ip_packet.h:273
u32 new_flow_table_mask
New flows table length - 1 (length MUST be a power of 2)
Definition: lb.h:276
vlib_node_registration_t lb_nat4_in2out_node
(constructor) VLIB_REGISTER_NODE (lb_nat4_in2out_node)
Definition: node.c:1253
vlib_put_next_frame(vm, node, next_index, 0)
static void vlib_buffer_advance(vlib_buffer_t *b, word l)
Advance current data pointer by the supplied (signed!) amount.
Definition: buffer.h:276
nat44_ei_hairpin_src_next_t next_index
lb_per_cpu_t * per_cpu
Some global data is per-cpu.
Definition: lb.h:502
#define VNET_FEATURES(...)
Definition: feature.h:470
u16 target_port
Definition: lb.h:233
vlib_simple_counter_main_t vip_counters[LB_N_VIP_COUNTERS]
Per VIP counter.
Definition: lb.h:532
typedef key
Definition: ipsec_types.api:88
u64 as_u64
Definition: lb.h:250
vlib_node_registration_t lb4_gre6_port_node
(constructor) VLIB_REGISTER_NODE (lb4_gre6_port_node)
Definition: node.c:1122
u32 ip_version_traffic_class_and_flow_label
Definition: ip6_packet.h:297
Definition: defs.h:47
lb_error_t
Definition: node.c:26
ip6_address_t ip6_src_address
Source address used in IPv6 encapsulated traffic.
Definition: lb.h:512
u16 payload_length
Definition: ip6_packet.h:301
u64 lb_node_get_other_ports6(ip6_header_t *ip60)
Definition: node.c:171
static uword lb4_gre4_port_node_fn(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame)
Definition: node.c:996
index_t dpoi_index
the index of objects of that type
Definition: dpo.h:190
static_always_inline void lb_node_get_hash(lb_main_t *lbm, vlib_buffer_t *p, u8 is_input_v4, u32 *hash, u32 *vip_idx, u8 per_port_vip)
Definition: node.c:177
vlib_main_t vlib_node_runtime_t * node
Definition: nat44_ei.c:3047
VLIB buffer representation.
Definition: buffer.h:111
u64 uword
Definition: types.h:112
static void * vlib_frame_vector_args(vlib_frame_t *f)
Get pointer to frame vector data.
Definition: node_funcs.h:301
vlib_node_registration_t lb_nat6_in2out_node
(constructor) VLIB_REGISTER_NODE (lb_nat6_in2out_node)
Definition: node.c:1276
lb_snat_mapping_t * snat_mappings
Definition: lb.h:558
#define ip_csum_update(sum, old, new, type, field)
Definition: ip_packet.h:295
static uword lb4_l3dsr_port_node_fn(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame)
Definition: node.c:1010
u8 protocol
Definition: lb.h:247
u16 port
Definition: lb.h:434
#define hash_get_mem(h, key)
Definition: hash.h:269
#define vnet_buffer(b)
Definition: buffer.h:437
u32 fib_index
Definition: lb.h:459
clib_bihash_8_8_t vip_index_per_port
Definition: lb.h:551
static_always_inline u32 lb_hash_hash(u64 k0, u64 k1, u64 k2, u64 k3, u64 k4)
Definition: lb_hash_hash.h:46
u16 protocol
Definition: lb.h:435
dpo_id_t dpo
The next DPO in the graph to follow.
Definition: lb.h:161
u16 flags
Copy of main node flags.
Definition: node.h:492
void * vlib_add_trace(vlib_main_t *vm, vlib_node_runtime_t *r, vlib_buffer_t *b, u32 n_data_bytes)
Definition: trace.c:628
u16 dpoi_next_node
The next VLIB node to follow.
Definition: dpo.h:186
ip4_address_t addr
Definition: lb.h:418
clib_bihash_24_8_t mapping_by_as6
Definition: lb.h:555
u8 ip_version_and_header_length
Definition: ip4_packet.h:93
lb_new_flow_entry_t * new_flow_table
Vector mapping (flow-hash & new_connect_table_mask) to AS index.
Definition: lb.h:270
#define VLIB_NODE_FLAG_TRACE
Definition: node.h:292
static uword lb6_gre4_port_node_fn(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame)
Definition: node.c:982
static_always_inline uword lb_node_fn(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame, u8 is_input_v4, lb_encap_type_t encap_type, u8 per_port_vip)
Definition: node.c:255
static uword lb4_nodeport_node_fn(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame)
Definition: node.c:1201
Load balancing service is provided per VIP+protocol+port.
Definition: lb.h:262
static vlib_buffer_t * vlib_get_buffer(vlib_main_t *vm, u32 buffer_index)
Translate buffer index into buffer pointer.
Definition: buffer_funcs.h:111
static_always_inline void lb_hash_get(lb_hash_t *ht, u32 hash, u32 vip, u32 time_now, u32 *available_index, u32 *found_value)
Definition: lbhash.h:114
static u16 ip4_header_checksum(ip4_header_t *i)
Definition: ip4_packet.h:314
u16 fib_index
Definition: lb.h:420
u8 * format_lb_nat_trace(u8 *s, va_list *args)
Definition: node.c:93
static u16 ip_csum_fold(ip_csum_t c)
Definition: ip_packet.h:301
u64 lb_node_get_other_ports4(ip4_header_t *ip40)
Definition: node.c:165
Definition: defs.h:46
static ip_csum_t ip_csum_add_even(ip_csum_t c, ip_csum_t x)
Definition: ip_packet.h:256
ip6_address_t dst_address
Definition: ip6_packet.h:310
#define u8
Padding.
Definition: clib.h:121
VNET_FEATURE_INIT(lb_nat4_in2out_node_fn, static)
vlib_node_registration_t lb6_gre4_node
(constructor) VLIB_REGISTER_NODE (lb6_gre4_node)
Definition: node.c:1057
static_always_inline void lb_hash_put(lb_hash_t *h, u32 hash, u32 value, u32 vip, u32 available_index, u32 time_now)
Definition: lbhash.h:175