FD.io VPP  v18.01-8-g0eacf49
Vector Packet Processing
kp_node.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2016 Intel and/or its affiliates.
3  * Licensed under the Apache License, Version 2.0 (the "License");
4  * you may not use this file except in compliance with the License.
5  * You may obtain a copy of the License at:
6  *
7  * http://www.apache.org/licenses/LICENSE-2.0
8  *
9  * Unless required by applicable law or anated to in writing, software
10  * distributed under the License is distributed on an "POD IS" BPODIS,
11  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12  * See the License for the specific language governing permissions and
13  * limitations under the License.
14  */
15 
16 
17 #include <vnet/fib/ip4_fib.h>
18 
19 #include <kubeproxy/kp.h>
20 #include <kubeproxy/kphash.h>
21 
22 #define foreach_kp_error \
23  _(NONE, "no error") \
24  _(PROTO_NOT_SUPPORTED, "protocol not supported")
25 
26 typedef enum {
27 #define _(sym,str) KP_ERROR_##sym,
29 #undef _
31 } kp_error_t;
32 
33 static char *kp_error_strings[] = {
34 #define _(sym,string) string,
36 #undef _
37 };
38 
39 typedef struct {
42 } kp_trace_t;
43 
44 typedef struct {
48 
49 typedef struct {
53 
54 u8 *
55 format_kp_trace (u8 * s, va_list * args)
56 {
57  kp_main_t *kpm = &kp_main;
58  CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
59  CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
60  kp_trace_t *t = va_arg (*args, kp_trace_t *);
61  if (pool_is_free_index(kpm->vips, t->vip_index)) {
62  s = format(s, "kp vip[%d]: This VIP was freed since capture\n");
63  } else {
64  s = format(s, "kp vip[%d]: %U\n", t->vip_index, format_kp_vip, &kpm->vips[t->vip_index]);
65  }
66  if (pool_is_free_index(kpm->pods, t->pod_index)) {
67  s = format(s, " kp pod[%d]: This POD was freed since capture");
68  } else {
69  s = format(s, " kp pod[%d]: %U", t->pod_index, format_kp_pod, &kpm->pods[t->pod_index]);
70  }
71  return s;
72 }
73 
74 u8 *
75 format_kp_nat_trace (u8 * s, va_list * args)
76 {
77  CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
78  CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
79  kp_nat_trace_t *t = va_arg (*args, kp_nat_trace_t *);
80 
81  s = format(s, "kp nat: rx_sw_if_index = %d, next_index = %d",
82  t->rx_sw_if_index, t->next_index);
83 
84  return s;
85 }
86 
88 {
89  kp_main_t *kpm = &kp_main;
90  kp_hash_t *sticky_ht = kpm->per_cpu[thread_index].sticky_ht;
91  //Check if size changed
92  if (PREDICT_FALSE(sticky_ht && (kpm->per_cpu_sticky_buckets != kp_hash_nbuckets(sticky_ht))))
93  {
94  //Dereference everything in there
96  u32 i;
97  kp_hash_foreach_entry(sticky_ht, b, i) {
98  vlib_refcount_add(&kpm->pod_refcount, thread_index, b->value[i], -1);
99  vlib_refcount_add(&kpm->pod_refcount, thread_index, 0, 1);
100  }
101 
102  kp_hash_free(sticky_ht);
103  sticky_ht = NULL;
104  }
105 
106  //Create if necessary
107  if (PREDICT_FALSE(sticky_ht == NULL)) {
108  kpm->per_cpu[thread_index].sticky_ht = kp_hash_alloc(kpm->per_cpu_sticky_buckets, kpm->flow_timeout);
109  sticky_ht = kpm->per_cpu[thread_index].sticky_ht;
110  clib_warning("Regenerated sticky table %p", sticky_ht);
111  }
112 
113  ASSERT(sticky_ht);
114 
115  //Update timeout
116  sticky_ht->timeout = kpm->flow_timeout;
117  return sticky_ht;
118 }
119 
120 u64
122 {
123  return 0;
124 }
125 
126 u64
128 {
129  return 0;
130 }
131 
134 {
135  u32 hash;
136  if (is_input_v4)
137  {
138  ip4_header_t *ip40;
139  u64 ports;
140  ip40 = vlib_buffer_get_current (p);
141  if (PREDICT_TRUE (ip40->protocol == IP_PROTOCOL_TCP ||
142  ip40->protocol == IP_PROTOCOL_UDP))
143  ports = ((u64)((udp_header_t *)(ip40 + 1))->src_port << 16) |
144  ((u64)((udp_header_t *)(ip40 + 1))->dst_port);
145  else
146  ports = kp_node_get_other_ports4(ip40);
147 
148  hash = kp_hash_hash(*((u64 *)&ip40->address_pair), ports,
149  0, 0, 0);
150  }
151  else
152  {
153  ip6_header_t *ip60;
154  ip60 = vlib_buffer_get_current (p);
155  u64 ports;
156  if (PREDICT_TRUE (ip60->protocol == IP_PROTOCOL_TCP ||
157  ip60->protocol == IP_PROTOCOL_UDP))
158  ports = ((u64)((udp_header_t *)(ip60 + 1))->src_port << 16) |
159  ((u64)((udp_header_t *)(ip60 + 1))->dst_port);
160  else
161  ports = kp_node_get_other_ports6(ip60);
162 
163  hash = kp_hash_hash(ip60->src_address.as_u64[0],
164  ip60->src_address.as_u64[1],
165  ip60->dst_address.as_u64[0],
166  ip60->dst_address.as_u64[1],
167  ports);
168  }
169  return hash;
170 }
171 
174  vlib_node_runtime_t * node, vlib_frame_t * frame,
175  u8 is_input_v4, //Compile-time parameter stating that is input is v4 (or v6)
176  u8 is_nat_v4) //Compile-time parameter stating that is NAT is v4 (or v6)
177 {
178  kp_main_t *kpm = &kp_main;
179  u32 n_left_from, *from, next_index, *to_next, n_left_to_next;
180  u32 thread_index = vlib_get_thread_index();
181  u32 kp_time = kp_hash_time_now(vm);
182 
183  kp_hash_t *sticky_ht = kp_get_sticky_table(thread_index);
184  from = vlib_frame_vector_args (frame);
185  n_left_from = frame->n_vectors;
186  next_index = node->cached_next_index;
187 
188  u32 nexthash0 = 0;
189  if (PREDICT_TRUE(n_left_from > 0))
190  nexthash0 = kp_node_get_hash(vlib_get_buffer (vm, from[0]), is_input_v4);
191 
192  while (n_left_from > 0)
193  {
194  vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
195  while (n_left_from > 0 && n_left_to_next > 0)
196  {
197  u32 pi0;
198  vlib_buffer_t *p0;
199  kp_vip_t *vip0;
200  u32 podindex0;
201  u32 available_index0;
202  u8 counter = 0;
203  u32 hash0 = nexthash0;
204 
205  if (PREDICT_TRUE(n_left_from > 1))
206  {
207  vlib_buffer_t *p1 = vlib_get_buffer (vm, from[1]);
208  //Compute next hash and prefetch bucket
209  nexthash0 = kp_node_get_hash(p1, is_input_v4);
210  kp_hash_prefetch_bucket(sticky_ht, nexthash0);
211  //Prefetch for encap, next
212  CLIB_PREFETCH (vlib_buffer_get_current(p1) - 64, 64, STORE);
213  }
214 
215  if (PREDICT_TRUE(n_left_from > 2))
216  {
217  vlib_buffer_t *p2;
218  p2 = vlib_get_buffer(vm, from[2]);
219  /* prefetch packet header and data */
220  vlib_prefetch_buffer_header(p2, STORE);
221  CLIB_PREFETCH (vlib_buffer_get_current(p2), 64, STORE);
222  }
223 
224  pi0 = to_next[0] = from[0];
225  from += 1;
226  n_left_from -= 1;
227  to_next += 1;
228  n_left_to_next -= 1;
229 
230  p0 = vlib_get_buffer (vm, pi0);
231  vip0 = pool_elt_at_index (kpm->vips,
232  vnet_buffer (p0)->ip.adj_index[VLIB_TX]);
233 
234  kp_hash_get(sticky_ht, hash0, vnet_buffer (p0)->ip.adj_index[VLIB_TX],
235  kp_time, &available_index0, &podindex0);
236 
237  if (PREDICT_TRUE(podindex0 != ~0))
238  {
239  //Found an existing entry
240  counter = KP_VIP_COUNTER_NEXT_PACKET;
241  }
242  else if (PREDICT_TRUE(available_index0 != ~0))
243  {
244  //There is an available slot for a new flow
245  podindex0 = vip0->new_flow_table[hash0 & vip0->new_flow_table_mask].pod_index;
246  counter = KP_VIP_COUNTER_FIRST_PACKET;
247  counter = (podindex0 == 0)?KP_VIP_COUNTER_NO_SERVER:counter;
248 
249  //Dereference previously used
250  vlib_refcount_add(&kpm->pod_refcount, thread_index,
251  kp_hash_available_value(sticky_ht, hash0, available_index0), -1);
252  vlib_refcount_add(&kpm->pod_refcount, thread_index,
253  podindex0, 1);
254 
255  //Add sticky entry
256  //Note that when there is no POD configured, an entry is configured anyway.
257  //But no configured POD is not something that should happen
258  kp_hash_put(sticky_ht, hash0, podindex0,
259  vnet_buffer (p0)->ip.adj_index[VLIB_TX],
260  available_index0, kp_time);
261  }
262  else
263  {
264  //Could not store new entry in the table
265  podindex0 = vip0->new_flow_table[hash0 & vip0->new_flow_table_mask].pod_index;
266  counter = KP_VIP_COUNTER_UNTRACKED_PACKET;
267  }
268 
270  thread_index,
271  vnet_buffer (p0)->ip.adj_index[VLIB_TX],
272  1);
273  //Now let's do NAT
274  {
275  udp_header_t *port0;
276 
277  if ( (is_input_v4==1) && (is_nat_v4==1) ) /* NAT44 */
278  {
279  ip4_header_t *ip40;
280  ip40 = vlib_buffer_get_current(p0);
281  port0 = (udp_header_t *)(ip40 + 1);
282  ip40->dst_address = kpm->pods[podindex0].address.ip4;
283  ip40->checksum = ip4_header_checksum (ip40);
284  }
285  else if ( (is_input_v4==1) && (is_nat_v4==0) ) /* NAT46 */
286  {
287  /* TBD */
288  u16 len0 = 0;
289  ip4_header_t *ip40;
290  ip40 = vlib_buffer_get_current(p0);
291  len0 = clib_net_to_host_u16(ip40->length);
292 
293  vlib_buffer_advance(p0, (-sizeof(ip6_header_t)+sizeof(ip4_header_t)) );
294  ip6_header_t *ip60;
295  ip60 = vlib_buffer_get_current(p0);
296  port0 = (udp_header_t *)(ip60 + 1);
297  ip60->payload_length = len0 - sizeof(ip4_header_t);
298  ip60->dst_address = kpm->pods[podindex0].address.ip6;
299  }
300  else if ( (is_input_v4==0) && (is_nat_v4==0) ) /* NAT66 */
301  {
302  ip6_header_t *ip60;
303  ip60 = vlib_buffer_get_current(p0);
304  port0 = (udp_header_t *)(ip60 + 1);
305  ip60->dst_address = kpm->pods[podindex0].address.ip6;
306  }
307  else /* NAT64 */
308  {
309  /* TBD */
310  u16 len0 = 0;
311  ip6_header_t *ip60;
312  ip60 = vlib_buffer_get_current(p0);
313  len0 = clib_net_to_host_u16(ip60->payload_length);
314 
315  vlib_buffer_advance(p0, (sizeof(ip6_header_t)-sizeof(ip4_header_t)) );
316  ip4_header_t *ip40;
317  ip40 = vlib_buffer_get_current(p0);
318  port0 = (udp_header_t *)(ip40 + 1);
319  ip40->length = len0 + sizeof(ip4_header_t);
320  ip40->dst_address = kpm->pods[podindex0].address.ip4;
321  ip40->checksum = ip4_header_checksum (ip40);
322  }
323 
324  port0->dst_port = vip0->target_port;
325  }
326 
328  {
329  kp_trace_t *tr = vlib_add_trace (vm, node, p0, sizeof (*tr));
330  tr->pod_index = podindex0;
331  tr->vip_index = vnet_buffer (p0)->ip.adj_index[VLIB_TX];
332  }
333 
334  //Enqueue to next
335  //Note that this is going to error if podindex0 == 0
336  vnet_buffer (p0)->ip.adj_index[VLIB_TX] = kpm->pods[podindex0].dpo.dpoi_index;
337  vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next,
338  n_left_to_next, pi0,
339  kpm->pods[podindex0].dpo.dpoi_next_node);
340  }
341  vlib_put_next_frame (vm, node, next_index, n_left_to_next);
342  }
343 
344  return frame->n_vectors;
345 }
346 
347 u8 *
348 format_nodeport_kp_trace (u8 * s, va_list * args)
349 {
350  kp_main_t *kpm = &kp_main;
351  CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
352  CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
353  kp_nodeport_trace_t *t = va_arg (*args, kp_nodeport_trace_t *);
354  if (pool_is_free_index(kpm->vips, t->vip_index)) {
355  s = format(s, "kp vip[%d]: This VIP was freed since capture\n");
356  } else {
357  s = format(s, "kp vip[%d]: %U\n", t->vip_index, format_kp_vip, &kpm->vips[t->vip_index]);
358  }
359 
360  s = format(s, " kp node_port: %d", t->node_port);
361 
362  return s;
363 }
364 static uword
366  vlib_node_runtime_t * node,
367  vlib_frame_t * frame,
368  u8 is_input_v4)
369 {
370  kp_main_t *kpm = &kp_main;
371  u32 n_left_from, *from, next_index, *to_next, n_left_to_next;
372 
373  from = vlib_frame_vector_args (frame);
374  n_left_from = frame->n_vectors;
375  next_index = node->cached_next_index;
376 
377 
378  while (n_left_from > 0)
379  {
380  vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
381 
382  while (n_left_from > 0 && n_left_to_next > 0)
383  {
384  u32 pi0;
385  vlib_buffer_t *p0;
386  udp_header_t * udp_0;
387  uword * entry0;
388  u32 next0 = KP_NODEPORT_NEXT_DROP;
389 
390 
391  if (PREDICT_TRUE(n_left_from > 1))
392  {
393  vlib_buffer_t *p1 = vlib_get_buffer (vm, from[1]);
394  //Prefetch for encap, next
395  CLIB_PREFETCH (vlib_buffer_get_current(p1) - 64, 64, STORE);
396  }
397 
398  if (PREDICT_TRUE(n_left_from > 2))
399  {
400  vlib_buffer_t *p2;
401  p2 = vlib_get_buffer(vm, from[2]);
402  /* prefetch packet header and data */
403  vlib_prefetch_buffer_header(p2, STORE);
404  CLIB_PREFETCH (vlib_buffer_get_current(p2), 64, STORE);
405  }
406 
407  pi0 = to_next[0] = from[0];
408  from += 1;
409  n_left_from -= 1;
410  to_next += 1;
411  n_left_to_next -= 1;
412 
413  p0 = vlib_get_buffer (vm, pi0);
414 
415  if (is_input_v4==1)
416  {
417  ip4_header_t *ip40;
419  (p0, -(word)(sizeof(udp_header_t)+sizeof(ip4_header_t)));
420  ip40 = vlib_buffer_get_current(p0);
421  udp_0 = (udp_header_t *)(ip40 + 1);
422  }
423  else
424  {
425  ip6_header_t *ip60;
427  (p0, -(word)(sizeof(udp_header_t)+sizeof(ip6_header_t)));
428  ip60 = vlib_buffer_get_current(p0);
429  udp_0 = (udp_header_t *)(ip60 + 1);
430  }
431 
432  entry0 = hash_get_mem(kpm->nodeport_by_key, &(udp_0->dst_port));
433 
434 
435  if (is_input_v4==1)
436  {
438  }
439  else
440  {
442  }
443 
445  {
446  kp_nodeport_trace_t *tr = vlib_add_trace (vm, node,
447  p0, sizeof (*tr));
448  tr->vip_index = vnet_buffer (p0)->ip.adj_index[VLIB_TX];
449  tr->node_port = (u32)clib_net_to_host_u16(udp_0->dst_port);
450  }
451 
452  //Enqueue to next
453  vnet_buffer(p0)->ip.adj_index[VLIB_TX] = entry0[0];
454  vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next,
455  n_left_to_next, pi0, next0);
456  }
457  vlib_put_next_frame (vm, node, next_index, n_left_to_next);
458  }
459 
460  return frame->n_vectors;
461 
462 }
463 
464 /**
465  * @brief Match NAT4 static mapping.
466  *
467  * @param sm NAT main.
468  * @param match Address and port to match.
469  * @param mapping External or local address and port of the matched mapping.
470  *
471  * @returns 0 if match found otherwise 1.
472  */
474  kp_snat4_key_t match,
475  kp_snat4_key_t * mapping)
476 {
477  clib_bihash_kv_8_8_t kv, value;
479  kp_snat4_key_t m_key;
480  clib_bihash_8_8_t *mapping_hash = &kpm->mapping_by_pod;
481 
482  m_key.addr = match.addr;
483  m_key.port = match.port;
484  m_key.protocol = match.protocol;
485  m_key.fib_index = match.fib_index;
486 
487  kv.key = m_key.as_u64;
488 
489  if (clib_bihash_search_8_8 (mapping_hash, &kv, &value))
490  {
491  return 1;
492  }
493 
494  m = pool_elt_at_index (kpm->snat_mappings, value.value);
495 
496  if (m->svr_type == KP_SVR_TYPE_VIP_PORT)
497  {
498  mapping->addr = m->vip.ip4;
499  mapping->port = clib_host_to_net_u16 (m->port);
500  mapping->fib_index = m->fib_index;
501  mapping->protocol = match.protocol;
502  }
503  else if (m->svr_type == KP_SVR_TYPE_NODEIP_PORT)
504  {
505  mapping->addr = m->node_ip.ip4;
506  mapping->port = clib_host_to_net_u16 (m->node_port);
507  mapping->fib_index = m->fib_index;
508  mapping->protocol = match.protocol;
509  }
510 
511  return 0;
512 }
513 
514 static uword
516  vlib_node_runtime_t * node,
517  vlib_frame_t * frame)
518 {
519  u32 n_left_from, * from, * to_next;
520  kp_nat4_in2out_next_t next_index;
521  u32 pkts_processed = 0;
522  kp_main_t *kpm = &kp_main;
523  u32 stats_node_index;
524 
525  stats_node_index = kp_nat4_in2out_node.index;
526 
527  from = vlib_frame_vector_args (frame);
528  n_left_from = frame->n_vectors;
529  next_index = node->cached_next_index;
530 
531  while (n_left_from > 0)
532  {
533  u32 n_left_to_next;
534 
535  vlib_get_next_frame (vm, node, next_index,
536  to_next, n_left_to_next);
537 
538  while (n_left_from > 0 && n_left_to_next > 0)
539  {
540  u32 bi0;
541  vlib_buffer_t * b0;
542  u32 next0;
543  u32 sw_if_index0;
544  ip4_header_t * ip0;
545  ip_csum_t sum0;
546  u32 new_addr0, old_addr0;
547  u16 old_port0, new_port0;
548  udp_header_t * udp0;
549  tcp_header_t * tcp0;
550  kp_snat4_key_t key0, sm0;
551  u32 proto0;
552  u32 rx_fib_index0;
553 
554  /* speculatively enqueue b0 to the current next frame */
555  bi0 = from[0];
556  to_next[0] = bi0;
557  from += 1;
558  to_next += 1;
559  n_left_from -= 1;
560  n_left_to_next -= 1;
561 
562  b0 = vlib_get_buffer (vm, bi0);
564 
565  ip0 = vlib_buffer_get_current (b0);
566  udp0 = ip4_next_header (ip0);
567  tcp0 = (tcp_header_t *) udp0;
568 
569  sw_if_index0 = vnet_buffer(b0)->sw_if_index[VLIB_RX];
570  rx_fib_index0 = ip4_fib_table_get_index_for_sw_if_index(sw_if_index0);
571 
572  proto0 = kp_ip_proto_to_nat_proto (ip0->protocol);
573 
574  if (PREDICT_FALSE (proto0 == ~0))
575  goto trace0;
576 
577  key0.addr = ip0->src_address;
578  key0.protocol = proto0;
579  key0.port = udp0->src_port;
580  key0.fib_index = rx_fib_index0;
581 
582  if (kp_nat4_mapping_match (kpm, key0, &sm0))
583  {
585  goto trace0;
586  }
587 
588  new_addr0 = sm0.addr.as_u32;
589  new_port0 = sm0.port;
590  vnet_buffer(b0)->sw_if_index[VLIB_TX] = sm0.fib_index;
591  old_addr0 = ip0->src_address.as_u32;
592  ip0->src_address.as_u32 = new_addr0;
593 
594  sum0 = ip0->checksum;
595  sum0 = ip_csum_update (sum0, old_addr0, new_addr0,
596  ip4_header_t,
597  src_address /* changed member */);
598  ip0->checksum = ip_csum_fold (sum0);
599 
600  if (PREDICT_FALSE(new_port0 != udp0->dst_port))
601  {
602  if (PREDICT_TRUE(proto0 == KP_NAT_PROTOCOL_TCP))
603  {
604  old_port0 = tcp0->src_port;
605  tcp0->src_port = new_port0;
606 
607  sum0 = tcp0->checksum;
608  sum0 = ip_csum_update (sum0, old_addr0, new_addr0,
609  ip4_header_t,
610  dst_address /* changed member */);
611  sum0 = ip_csum_update (sum0, old_port0, new_port0,
612  ip4_header_t /* cheat */,
613  length /* changed member */);
614  tcp0->checksum = ip_csum_fold(sum0);
615  }
616  else
617  {
618  old_port0 = udp0->src_port;
619  udp0->src_port = new_port0;
620  udp0->checksum = 0;
621  }
622  }
623  else
624  {
625  if (PREDICT_TRUE(proto0 == KP_NAT_PROTOCOL_TCP))
626  {
627  sum0 = tcp0->checksum;
628  sum0 = ip_csum_update (sum0, old_addr0, new_addr0,
629  ip4_header_t,
630  dst_address /* changed member */);
631  tcp0->checksum = ip_csum_fold(sum0);
632  }
633  }
634 
635  trace0:
637  && (b0->flags & VLIB_BUFFER_IS_TRACED)))
638  {
639  kp_nat_trace_t *t =
640  vlib_add_trace (vm, node, b0, sizeof (*t));
641  t->rx_sw_if_index = sw_if_index0;
642  t->next_index = next0;
643  }
644 
645  pkts_processed += next0 != KP_NAT4_IN2OUT_NEXT_DROP;
646 
647  /* verify speculative enqueue, maybe switch current next frame */
648  vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
649  to_next, n_left_to_next,
650  bi0, next0);
651  }
652 
653  vlib_put_next_frame (vm, node, next_index, n_left_to_next);
654  }
655 
656  vlib_node_increment_counter (vm, stats_node_index,
657  KP_NAT_IN2OUT_ERROR_IN2OUT_PACKETS,
658  pkts_processed);
659  return frame->n_vectors;
660 }
661 
662 static uword
664  vlib_node_runtime_t * node, vlib_frame_t * frame)
665 {
666  return kp_node_fn(vm, node, frame, 0, 0);
667 }
668 
669 static uword
671  vlib_node_runtime_t * node, vlib_frame_t * frame)
672 {
673  return kp_node_fn(vm, node, frame, 0, 1);
674 }
675 
676 static uword
678  vlib_node_runtime_t * node, vlib_frame_t * frame)
679 {
680  return kp_node_fn(vm, node, frame, 1, 0);
681 }
682 
683 static uword
685  vlib_node_runtime_t * node, vlib_frame_t * frame)
686 {
687  return kp_node_fn(vm, node, frame, 1, 1);
688 }
689 
691 {
692  .function = kp6_nat6_node_fn,
693  .name = "kp6-nat6",
694  .vector_size = sizeof (u32),
695  .format_trace = format_kp_trace,
696 
697  .n_errors = KP_N_ERROR,
698  .error_strings = kp_error_strings,
699 
700  .n_next_nodes = KP_N_NEXT,
701  .next_nodes =
702  {
703  [KP_NEXT_DROP] = "error-drop"
704  },
705 };
706 
708 {
709  .function = kp6_nat4_node_fn,
710  .name = "kp6-nat4",
711  .vector_size = sizeof (u32),
712  .format_trace = format_kp_trace,
713 
714  .n_errors = KP_N_ERROR,
715  .error_strings = kp_error_strings,
716 
717  .n_next_nodes = KP_N_NEXT,
718  .next_nodes =
719  {
720  [KP_NEXT_DROP] = "error-drop"
721  },
722 };
723 
725 {
726  .function = kp4_nat6_node_fn,
727  .name = "kp4-nat6",
728  .vector_size = sizeof (u32),
729  .format_trace = format_kp_trace,
730 
731  .n_errors = KP_N_ERROR,
732  .error_strings = kp_error_strings,
733 
734  .n_next_nodes = KP_N_NEXT,
735  .next_nodes =
736  {
737  [KP_NEXT_DROP] = "error-drop"
738  },
739 };
740 
742 {
743  .function = kp4_nat4_node_fn,
744  .name = "kp4-nat4",
745  .vector_size = sizeof (u32),
746  .format_trace = format_kp_trace,
747 
748  .n_errors = KP_N_ERROR,
749  .error_strings = kp_error_strings,
750 
751  .n_next_nodes = KP_N_NEXT,
752  .next_nodes =
753  {
754  [KP_NEXT_DROP] = "error-drop"
755  },
756 };
757 
758 static uword
760  vlib_node_runtime_t * node,
761  vlib_frame_t * frame)
762 {
763  return kp_nodeport_node_fn(vm, node, frame, 1);
764 }
765 
766 static uword
768  vlib_node_runtime_t * node,
769  vlib_frame_t * frame)
770 {
771  return kp_nodeport_node_fn(vm, node, frame, 0);
772 }
773 
775 {
776  .function = kp4_nodeport_node_fn,
777  .name = "kp4-nodeport",
778  .vector_size = sizeof (u32),
779  .format_trace = format_nodeport_kp_trace,
780 
781  .n_errors = KP_N_ERROR,
782  .error_strings = kp_error_strings,
783 
784  .n_next_nodes = KP_NODEPORT_N_NEXT,
785  .next_nodes =
786  {
787  [KP_NODEPORT_NEXT_IP4_NAT4] = "kp4-nat4",
788  [KP_NODEPORT_NEXT_IP4_NAT6] = "kp4-nat6",
789  [KP_NODEPORT_NEXT_IP6_NAT4] = "kp6-nat4",
790  [KP_NODEPORT_NEXT_IP6_NAT6] = "kp6-nat6",
791  [KP_NODEPORT_NEXT_DROP] = "error-drop",
792  },
793 };
794 
796 {
797  .function = kp6_nodeport_node_fn,
798  .name = "kp6-nodeport",
799  .vector_size = sizeof (u32),
800  .format_trace = format_nodeport_kp_trace,
801 
802  .n_errors = KP_N_ERROR,
803  .error_strings = kp_error_strings,
804 
805  .n_next_nodes = KP_NODEPORT_N_NEXT,
806  .next_nodes =
807  {
808  [KP_NODEPORT_NEXT_IP4_NAT4] = "kp4-nat4",
809  [KP_NODEPORT_NEXT_IP4_NAT6] = "kp4-nat6",
810  [KP_NODEPORT_NEXT_IP6_NAT4] = "kp6-nat4",
811  [KP_NODEPORT_NEXT_IP6_NAT6] = "kp6-nat6",
812  [KP_NODEPORT_NEXT_DROP] = "error-drop",
813  },
814 };
815 
817 {
818  .arc_name = "ip4-unicast",
819  .node_name = "kp-nat4-in2out",
820  .runs_before = VNET_FEATURES ("ip4-lookup"),
821 };
822 
824 {
825  .function = kp_nat4_in2out_node_fn,
826  .name = "kp-nat4-in2out",
827  .vector_size = sizeof (u32),
828  .format_trace = format_kp_nat_trace,
829 
830  .n_errors = KP_N_ERROR,
831  .error_strings = kp_error_strings,
832 
833  .n_next_nodes = KP_NAT4_IN2OUT_N_NEXT,
834  .next_nodes =
835  {
836  [KP_NAT4_IN2OUT_NEXT_DROP] = "error-drop",
837  [KP_NAT4_IN2OUT_NEXT_LOOKUP] = "ip4-lookup",
838  },
839 };
sll srl srl sll sra u16x4 i
Definition: vector_sse2.h:337
#define CLIB_UNUSED(x)
Definition: clib.h:79
vlib_node_registration_t kp6_nat6_node
(constructor) VLIB_REGISTER_NODE (kp6_nat6_node)
Definition: kp_node.c:690
ip4_address_t src_address
Definition: ip4_packet.h:164
vlib_refcount_t pod_refcount
Each POD has an associated reference counter.
Definition: kp.h:371
u32 pod_index
Definition: kp.h:151
u16 protocol
Definition: kp.h:313
vlib_node_registration_t kp6_nodeport_node
(constructor) VLIB_REGISTER_NODE (kp6_nodeport_node)
Definition: kp_node.c:795
#define PREDICT_TRUE(x)
Definition: clib.h:106
u64 as_u64[2]
Definition: ip6_packet.h:51
#define kp_hash_nbuckets(h)
Definition: kphash.h:67
uword * nodeport_by_key
Definition: kp.h:374
#define NULL
Definition: clib.h:55
vlib_simple_counter_main_t vip_counters[KP_N_VIP_COUNTERS]
Per VIP counter.
Definition: kp.h:400
vlib_node_registration_t kp4_nodeport_node
(constructor) VLIB_REGISTER_NODE (kp4_nodeport_node)
Definition: kp_node.c:774
u32 fib_index
Definition: kp.h:340
static uword kp_nat4_in2out_node_fn(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame)
Definition: kp_node.c:515
static u32 kp_ip_proto_to_nat_proto(u8 ip_proto)
Definition: kp.h:295
#define kp_hash_foreach_entry(h, bucket, i)
Definition: kphash.h:75
uword ip_csum_t
Definition: ip_packet.h:90
kp_per_cpu_t * per_cpu
Some global data is per-cpu.
Definition: kp.h:380
u8 * format(u8 *s, const char *fmt,...)
Definition: format.c:419
kp_nat4_in2out_next_t
Definition: kp.h:52
ip4_address_t addr
Definition: kp.h:311
static void vlib_increment_simple_counter(vlib_simple_counter_main_t *cm, u32 thread_index, u32 index, u64 increment)
Increment a simple counter.
Definition: counter.h:78
u32 next_index
Definition: kp_node.c:51
static uword kp4_nodeport_node_fn(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame)
Definition: kp_node.c:759
struct _tcp_header tcp_header_t
ip6_address_t src_address
Definition: ip6_packet.h:341
vlib_node_registration_t kp_nat4_in2out_node
(constructor) VLIB_REGISTER_NODE (kp_nat4_in2out_node)
Definition: kp_node.c:823
u16 port
Definition: kp.h:312
u32 ip4_fib_table_get_index_for_sw_if_index(u32 sw_if_index)
Definition: ip4_fib.c:226
#define static_always_inline
Definition: clib.h:93
static_always_inline u32 kp_node_get_hash(vlib_buffer_t *p, u8 is_input_v4)
Definition: kp_node.c:133
ip4_address_t dst_address
Definition: ip4_packet.h:164
u32 kp_hash_time_now(vlib_main_t *vm)
Definition: kp.c:106
#define vlib_prefetch_buffer_header(b, type)
Prefetch buffer metadata.
Definition: buffer.h:171
static uword kp4_nat6_node_fn(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame)
Definition: kp_node.c:677
kp_snat_mapping_t * snat_mappings
Definition: kp.h:417
format_function_t format_kp_pod
Definition: kp.h:148
unsigned long u64
Definition: types.h:89
static void * ip4_next_header(ip4_header_t *i)
Definition: ip4_packet.h:233
u8 * format_nodeport_kp_trace(u8 *s, va_list *args)
Definition: kp_node.c:348
kp_error_t
Definition: kp_node.c:26
Definition: kp.h:352
kp_new_flow_entry_t * new_flow_table
Vector mapping (flow-hash & new_connect_table_mask) to POD index.
Definition: kp.h:198
u16 target_port
Pod&#39;s port corresponding to specific service.
Definition: kp.h:236
static_always_inline uword kp_node_fn(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame, u8 is_input_v4, u8 is_nat_v4)
Definition: kp_node.c:173
u8 * format_kp_nat_trace(u8 *s, va_list *args)
Definition: kp_node.c:75
vlib_node_registration_t kp4_nat6_node
(constructor) VLIB_REGISTER_NODE (kp4_nat6_node)
Definition: kp_node.c:724
static char * kp_error_strings[]
Definition: kp_node.c:33
#define pool_elt_at_index(p, i)
Returns pointer to element at given index.
Definition: pool.h:459
u32 rx_sw_if_index
Definition: kp_node.c:50
u64 key
the key
Definition: bihash_8_8.h:35
static_always_inline void kp_hash_free(kp_hash_t *h)
Definition: kphash.h:102
static_always_inline kp_hash_t * kp_hash_alloc(u32 buckets, u32 timeout)
Definition: kphash.h:84
static void * vlib_buffer_get_current(vlib_buffer_t *b)
Get pointer to current data to process.
Definition: buffer.h:195
u32 pod_index
Definition: kp_node.c:41
VNET_FEATURE_INIT(kp_nat4_in2out_node_fn, static)
static_always_inline void kp_hash_prefetch_bucket(kp_hash_t *ht, u32 hash)
Definition: kphash.h:126
ip46_address_t address
Destination address used to transfer traffic towards to that POD.
Definition: kp.h:103
ip4_address_pair_t address_pair
Definition: ip4_packet.h:166
#define PREDICT_FALSE(x)
Definition: clib.h:105
ip46_address_t vip
Definition: kp.h:330
#define vlib_validate_buffer_enqueue_x1(vm, node, next_index, to_next, n_left_to_next, bi0, next0)
Finish enqueueing one buffer forward in the graph.
Definition: buffer_node.h:218
#define vlib_get_next_frame(vm, node, next_index, vectors, n_vectors_left)
Get pointer to next frame vector data by (vlib_node_runtime_t, next_index).
Definition: node_funcs.h:364
u16 fib_index
Definition: kp.h:313
static void vlib_node_increment_counter(vlib_main_t *vm, u32 node_index, u32 counter_index, u64 increment)
Definition: node_funcs.h:1158
static_always_inline void vlib_refcount_add(vlib_refcount_t *r, u32 thread_index, u32 counter_index, i32 v)
Definition: refcount.h:68
u32 vip_index
Definition: kp_node.c:40
u64 value
the value
Definition: bihash_8_8.h:36
u32 per_cpu_sticky_buckets
Number of buckets in the per-cpu sticky hash table.
Definition: kp.h:390
static_always_inline void kp_hash_put(kp_hash_t *h, u32 hash, u32 value, u32 vip, u32 available_index, u32 time_now)
Definition: kphash.h:194
u16 n_vectors
Definition: node.h:344
static_always_inline uword vlib_get_thread_index(void)
Definition: threads.h:221
#define CLIB_PREFETCH(addr, size, type)
Definition: cache.h:82
vlib_main_t * vm
Definition: buffer.c:283
u16 node_port
Definition: kp.h:337
int kp_nat4_mapping_match(kp_main_t *kpm, kp_snat4_key_t match, kp_snat4_key_t *mapping)
Match NAT4 static mapping.
Definition: kp_node.c:473
#define clib_warning(format, args...)
Definition: error.h:59
#define VLIB_BUFFER_IS_TRACED
Definition: buffer.h:93
#define pool_is_free_index(P, I)
Use free bitmap to query whether given index is free.
Definition: pool.h:268
8 octet key, 8 octet key value pair
Definition: bihash_8_8.h:33
void vlib_put_next_frame(vlib_main_t *vm, vlib_node_runtime_t *r, u32 next_index, u32 n_vectors_left)
Release pointer to next frame vector data.
Definition: main.c:454
static_always_inline u32 kp_hash_available_value(kp_hash_t *h, u32 hash, u32 available_index)
Definition: kphash.h:188
u8 * format_kp_trace(u8 *s, va_list *args)
Definition: kp_node.c:55
u16 cached_next_index
Next frame index that vector arguments were last enqueued to last time this node ran.
Definition: node.h:456
#define ASSERT(truth)
u32 flow_timeout
Flow timeout in seconds.
Definition: kp.h:395
unsigned int u32
Definition: types.h:88
static void vlib_buffer_advance(vlib_buffer_t *b, word l)
Advance current data pointer by the supplied (signed!) amount.
Definition: buffer.h:208
#define VLIB_NODE_FLAG_TRACE
Definition: node.h:259
static uword kp6_nat4_node_fn(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame)
Definition: kp_node.c:670
vlib_node_registration_t kp6_nat4_node
(constructor) VLIB_REGISTER_NODE (kp6_nat4_node)
Definition: kp_node.c:707
u64 as_u64
Definition: kp.h:316
#define VNET_FEATURES(...)
Definition: feature.h:368
u32 value[KPHASH_ENTRY_PER_BUCKET]
Definition: kphash.h:58
u32 new_flow_table_mask
New flows table length - 1 (length MUST be a power of 2)
Definition: kp.h:204
u64 uword
Definition: types.h:112
static void * vlib_add_trace(vlib_main_t *vm, vlib_node_runtime_t *r, vlib_buffer_t *b, u32 n_data_bytes)
Definition: trace_funcs.h:55
static uword kp6_nat6_node_fn(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame)
Definition: kp_node.c:663
kp_main_t kp_main
Definition: kp.c:28
Definition: kp.h:49
Definition: defs.h:47
unsigned short u16
Definition: types.h:57
u16 payload_length
Definition: ip6_packet.h:332
i64 word
Definition: types.h:111
index_t dpoi_index
the index of objects of that type
Definition: dpo.h:182
static_always_inline void kp_hash_get(kp_hash_t *ht, u32 hash, u32 vip, u32 time_now, u32 *available_index, u32 *found_value)
Definition: kphash.h:133
unsigned char u8
Definition: types.h:56
static uword kp6_nodeport_node_fn(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame)
Definition: kp_node.c:767
dpo_id_t dpo
The next DPO in the graph to follow.
Definition: kp.h:144
u32 timeout
Definition: kphash.h:63
ip46_address_t node_ip
Definition: kp.h:331
kp_pod_t * pods
Pool of PODs.
Definition: kp.h:364
kp_hash_t * kp_get_sticky_table(u32 thread_index)
Definition: kp_node.c:87
u64 kp_node_get_other_ports6(ip6_header_t *ip60)
Definition: kp_node.c:127
static void * vlib_frame_vector_args(vlib_frame_t *f)
Get pointer to frame vector data.
Definition: node_funcs.h:267
#define ip_csum_update(sum, old, new, type, field)
Definition: ip_packet.h:139
static uword kp_nodeport_node_fn(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame, u8 is_input_v4)
Definition: kp_node.c:365
format_function_t format_kp_vip
Definition: kp.h:281
#define hash_get_mem(h, key)
Definition: hash.h:268
static_always_inline u32 kp_hash_hash(u64 k0, u64 k1, u64 k2, u64 k3, u64 k4)
Definition: kphash.h:109
#define vnet_buffer(b)
Definition: buffer.h:326
kp_vip_t * vips
Pool of all Virtual IPs.
Definition: kp.h:356
#define VLIB_REGISTER_NODE(x,...)
Definition: node.h:143
u64 kp_node_get_other_ports4(ip4_header_t *ip40)
Definition: kp_node.c:121
u16 flags
Copy of main node flags.
Definition: node.h:450
u16 dpoi_next_node
The next VLIB node to follow.
Definition: dpo.h:178
kp_svr_type_t svr_type
Definition: kp.h:329
clib_bihash_8_8_t mapping_by_pod
Definition: kp.h:414
u32 flags
buffer flags: VLIB_BUFFER_FREE_LIST_INDEX_MASK: bits used to store free list index, VLIB_BUFFER_IS_TRACED: trace this buffer.
Definition: buffer.h:75
#define foreach_kp_error
Definition: kp_node.c:22
vlib_node_registration_t kp4_nat4_node
(constructor) VLIB_REGISTER_NODE (kp4_nat4_node)
Definition: kp_node.c:741
static vlib_buffer_t * vlib_get_buffer(vlib_main_t *vm, u32 buffer_index)
Translate buffer index into buffer pointer.
Definition: buffer_funcs.h:57
static uword kp4_nat4_node_fn(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame)
Definition: kp_node.c:684
static u16 ip4_header_checksum(ip4_header_t *i)
Definition: ip4_packet.h:239
Load balancing service is provided per VIP.
Definition: kp.h:190
static u16 ip_csum_fold(ip_csum_t c)
Definition: ip_packet.h:145
Definition: defs.h:46
ip6_address_t dst_address
Definition: ip6_packet.h:341
kp_hash_t * sticky_ht
Each CPU has its own sticky flow hash table.
Definition: kp.h:348