FD.io VPP  v19.08-27-gf4dcae4
Vector Packet Processing
nsh_node.c
Go to the documentation of this file.
1 /*
2  * nsh_node.c - nsh nodes
3  *
4  * Copyright (c) 2013 Cisco and/or its affiliates.
5  * Licensed under the Apache License, Version 2.0 (the "License");
6  * you may not use this file except in compliance with the License.
7  * You may obtain a copy of the License at:
8  *
9  * http://www.apache.org/licenses/LICENSE-2.0
10  *
11  * Unless required by applicable law or agreed to in writing, software
12  * distributed under the License is distributed on an "AS IS" BASIS,
13  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14  * See the License for the specific language governing permissions and
15  * limitations under the License.
16  */
17 
18 #include <vnet/vnet.h>
19 #include <vnet/plugin/plugin.h>
20 #include <nsh/nsh.h>
21 
22 always_inline void
23 nsh_md2_encap (vlib_buffer_t * b, nsh_base_header_t * hdr,
24  nsh_entry_t * nsh_entry)
25 {
26  nsh_main_t *nm = &nsh_main;
27  nsh_base_header_t *nsh_base;
28  nsh_tlv_header_t *opt0;
29  nsh_tlv_header_t *limit0;
30  nsh_tlv_header_t *nsh_md2;
31  nsh_option_map_t *nsh_option;
32  u8 old_option_size = 0;
33  u8 new_option_size = 0;
34 
35  /* Populate the NSH Header */
36  opt0 = (nsh_tlv_header_t *) (nsh_entry->tlvs_data);
37  limit0 = (nsh_tlv_header_t *) (nsh_entry->tlvs_data + nsh_entry->tlvs_len);
38 
39  nsh_md2 = (nsh_tlv_header_t *) ((u8 *) hdr /*nsh_entry->rewrite */ +
40  sizeof (nsh_base_header_t));
41  nsh_entry->rewrite_size = sizeof (nsh_base_header_t);
42 
43  /* Scan the set of variable metadata, process ones that we understand */
44  while (opt0 < limit0)
45  {
46  old_option_size = sizeof (nsh_tlv_header_t) + opt0->length;
47  /* round to 4-byte */
48  old_option_size = ((old_option_size + 3) >> 2) << 2;
49 
50  nsh_option = nsh_md2_lookup_option (opt0->class, opt0->type);
51  if (nsh_option == NULL)
52  {
53  goto next_tlv_md2;
54  }
55 
56  if (nm->options[nsh_option->option_id])
57  {
58  if ((*nm->options[nsh_option->option_id]) (b, nsh_md2))
59  {
60  goto next_tlv_md2;
61  }
62 
63  /* option length may be varied */
64  new_option_size = sizeof (nsh_tlv_header_t) + nsh_md2->length;
65  /* round to 4-byte */
66  new_option_size = ((new_option_size + 3) >> 2) << 2;
67  nsh_entry->rewrite_size += new_option_size;
68 
69  nsh_md2 = (nsh_tlv_header_t *) (((u8 *) nsh_md2) + new_option_size);
70  opt0 = (nsh_tlv_header_t *) (((u8 *) opt0) + old_option_size);
71 
72  }
73  else
74  {
75  next_tlv_md2:
76  opt0 = (nsh_tlv_header_t *) (((u8 *) opt0) + old_option_size);
77  }
78  }
79 
80  /* update nsh header's length */
81  nsh_base = (nsh_base_header_t *) nsh_entry->rewrite;
82  nsh_base->length = (nsh_base->length & NSH_TTL_L2_MASK) |
83  ((nsh_entry->rewrite_size >> 2) & NSH_LEN_MASK);
84  return;
85 }
86 
87 always_inline void
89  nsh_base_header_t * hdr,
90  u32 header_len,
91  nsh_entry_t * nsh_entry, u32 * next, u32 drop_node_val)
92 {
93  nsh_main_t *nm = &nsh_main;
94  nsh_base_header_t *nsh_base;
95  nsh_tlv_header_t *opt0;
96  nsh_tlv_header_t *limit0;
97  nsh_tlv_header_t *nsh_md2;
98  nsh_option_map_t *nsh_option;
99  u8 old_option_size = 0;
100  u8 new_option_size = 0;
101 
102  /* Populate the NSH Header */
103  opt0 = (nsh_md2_data_t *) (hdr + 1);
104  limit0 = (nsh_md2_data_t *) ((u8 *) hdr + header_len);
105 
106  nsh_md2 =
107  (nsh_tlv_header_t *) (nsh_entry->rewrite + sizeof (nsh_base_header_t));
108  nsh_entry->rewrite_size = sizeof (nsh_base_header_t);
109 
110  /* Scan the set of variable metadata, process ones that we understand */
111  while (opt0 < limit0)
112  {
113  old_option_size = sizeof (nsh_tlv_header_t) + opt0->length;
114  /* round to 4-byte */
115  old_option_size = ((old_option_size + 3) >> 2) << 2;
116 
117  nsh_option = nsh_md2_lookup_option (opt0->class, opt0->type);
118  if (nsh_option == NULL)
119  {
120  goto next_tlv_md2;
121  }
122 
123  if (nm->swap_options[nsh_option->option_id])
124  {
125  if ((*nm->swap_options[nsh_option->option_id]) (b, opt0, nsh_md2))
126  {
127  goto next_tlv_md2;
128  }
129 
130  /* option length may be varied */
131  new_option_size = sizeof (nsh_tlv_header_t) + nsh_md2->length;
132  /* round to 4-byte */
133  new_option_size = ((new_option_size + 3) >> 2) << 2;
134  nsh_entry->rewrite_size += new_option_size;
135  nsh_md2 = (nsh_tlv_header_t *) (((u8 *) nsh_md2) + new_option_size);
136 
137  opt0 = (nsh_tlv_header_t *) (((u8 *) opt0) + old_option_size);
138 
139  }
140  else
141  {
142  next_tlv_md2:
143  opt0 = (nsh_tlv_header_t *) (((u8 *) opt0) + old_option_size);
144  }
145  }
146 
147  /* update nsh header's length */
148  nsh_base = (nsh_base_header_t *) nsh_entry->rewrite;
149  nsh_base->length = (nsh_base->length & NSH_TTL_L2_MASK) |
150  ((nsh_entry->rewrite_size >> 2) & NSH_LEN_MASK);
151  return;
152 }
153 
154 always_inline void
156  nsh_base_header_t * hdr,
157  u32 * header_len, u32 * next, u32 drop_node_val)
158 {
159  nsh_main_t *nm = &nsh_main;
160  nsh_md2_data_t *opt0;
161  nsh_md2_data_t *limit0;
162  nsh_option_map_t *nsh_option;
163  u8 option_len = 0;
164 
165  /* Populate the NSH Header */
166  opt0 = (nsh_md2_data_t *) (hdr + 1);
167  limit0 = (nsh_md2_data_t *) ((u8 *) hdr + *header_len);
168 
169  /* Scan the set of variable metadata, process ones that we understand */
170  while (opt0 < limit0)
171  {
172  nsh_option = nsh_md2_lookup_option (opt0->class, opt0->type);
173  if (nsh_option == NULL)
174  {
175  *next = drop_node_val;
176  return;
177  }
178 
179  if (nm->pop_options[nsh_option->option_id])
180  {
181  if ((*nm->pop_options[nsh_option->option_id]) (b, opt0))
182  {
183  *next = drop_node_val;
184  return;
185  }
186  }
187  /* round to 4-byte */
188  option_len = ((opt0->length + 3) >> 2) << 2;
189  opt0 =
190  (nsh_md2_data_t *) (((u8 *) opt0) + sizeof (nsh_md2_data_t) +
191  option_len);
192  *next =
193  (nm->decap_v4_next_override) ? (nm->decap_v4_next_override) : (*next);
194  *header_len = (nm->decap_v4_next_override) ? 0 : (*header_len);
195  }
196 
197  return;
198 }
199 
200 static uword
202  vlib_node_runtime_t * node,
203  vlib_frame_t * from_frame, u32 node_type)
204 {
205  u32 n_left_from, next_index, *from, *to_next;
206  nsh_main_t *nm = &nsh_main;
207 
208  from = vlib_frame_vector_args (from_frame);
209  n_left_from = from_frame->n_vectors;
210 
211  next_index = node->cached_next_index;
212 
213  while (n_left_from > 0)
214  {
215  u32 n_left_to_next;
216 
217  vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
218 
219  while (n_left_from >= 4 && n_left_to_next >= 2)
220  {
221  u32 bi0, bi1;
222  vlib_buffer_t *b0, *b1;
223  u32 next0 = NSH_NODE_NEXT_DROP, next1 = NSH_NODE_NEXT_DROP;
224  uword *entry0, *entry1;
225  nsh_base_header_t *hdr0 = 0, *hdr1 = 0;
226  u32 header_len0 = 0, header_len1 = 0;
227  u32 nsp_nsi0, nsp_nsi1;
228  u32 ttl0, ttl1;
229  u32 error0, error1;
230  nsh_map_t *map0 = 0, *map1 = 0;
231  nsh_entry_t *nsh_entry0 = 0, *nsh_entry1 = 0;
232  nsh_base_header_t *encap_hdr0 = 0, *encap_hdr1 = 0;
233  u32 encap_hdr_len0 = 0, encap_hdr_len1 = 0;
234  nsh_proxy_session_by_key_t key0, key1;
235  uword *p0, *p1;
236  nsh_proxy_session_t *proxy0, *proxy1;
237  u32 sw_if_index0 = 0, sw_if_index1 = 0;
238  ethernet_header_t dummy_eth0, dummy_eth1;
239 
240  /* Prefetch next iteration. */
241  {
242  vlib_buffer_t *p2, *p3;
243 
244  p2 = vlib_get_buffer (vm, from[2]);
245  p3 = vlib_get_buffer (vm, from[3]);
246 
247  vlib_prefetch_buffer_header (p2, LOAD);
248  vlib_prefetch_buffer_header (p3, LOAD);
249 
250  CLIB_PREFETCH (p2->data, 2 * CLIB_CACHE_LINE_BYTES, LOAD);
251  CLIB_PREFETCH (p3->data, 2 * CLIB_CACHE_LINE_BYTES, LOAD);
252  }
253 
254  bi0 = from[0];
255  bi1 = from[1];
256  to_next[0] = bi0;
257  to_next[1] = bi1;
258  from += 2;
259  to_next += 2;
260  n_left_from -= 2;
261  n_left_to_next -= 2;
262 
263  error0 = 0;
264  error1 = 0;
265 
266  b0 = vlib_get_buffer (vm, bi0);
267  b1 = vlib_get_buffer (vm, bi1);
268  hdr0 = vlib_buffer_get_current (b0);
269  hdr1 = vlib_buffer_get_current (b1);
270 
271  /* Process packet 0 */
272  if (node_type == NSH_INPUT_TYPE)
273  {
274  nsp_nsi0 = hdr0->nsp_nsi;
275  header_len0 = (hdr0->length & NSH_LEN_MASK) * 4;
276  ttl0 = (hdr0->ver_o_c & NSH_TTL_H4_MASK) << 2 |
277  (hdr0->length & NSH_TTL_L2_MASK) >> 6;
278  ttl0 = ttl0 - 1;
279  if (PREDICT_FALSE (ttl0 == 0))
280  {
281  error0 = NSH_NODE_ERROR_INVALID_TTL;
282  goto trace0;
283  }
284  }
285  else if (node_type == NSH_CLASSIFIER_TYPE)
286  {
287  nsp_nsi0 =
288  clib_host_to_net_u32 (vnet_buffer (b0)->
289  l2_classify.opaque_index);
290  }
291  else if (node_type == NSH_AWARE_VNF_PROXY_TYPE)
292  {
293  /* Push dummy Eth header */
294  char dummy_dst_address[6] =
295  { 0x11, 0x22, 0x33, 0x44, 0x55, 0x66 };
296  char dummy_src_address[6] =
297  { 0x77, 0x88, 0x99, 0xaa, 0xbb, 0xcc };
298  clib_memcpy_fast (dummy_eth0.dst_address, dummy_dst_address, 6);
299  clib_memcpy_fast (dummy_eth0.src_address, dummy_src_address, 6);
300  dummy_eth0.type = 0x0800;
301  vlib_buffer_advance (b0, -(word) sizeof (ethernet_header_t));
302  hdr0 = vlib_buffer_get_current (b0);
303  clib_memcpy_fast (hdr0, &dummy_eth0,
304  (word) sizeof (ethernet_header_t));
305 
306  sw_if_index0 = vnet_buffer (b0)->sw_if_index[VLIB_TX];
307  nsp_nsi0 = nm->tunnel_index_by_sw_if_index[sw_if_index0];
308  }
309  else
310  {
311  clib_memset (&key0, 0, sizeof (key0));
312  key0.transport_type = NSH_NODE_NEXT_ENCAP_VXLAN4;
313  key0.transport_index = vnet_buffer (b0)->sw_if_index[VLIB_RX];
314 
315  p0 = hash_get_mem (nm->nsh_proxy_session_by_key, &key0);
316  if (PREDICT_FALSE (p0 == 0))
317  {
318  error0 = NSH_NODE_ERROR_NO_PROXY;
319  goto trace0;
320  }
321 
322  proxy0 = pool_elt_at_index (nm->nsh_proxy_sessions, p0[0]);
323  if (PREDICT_FALSE (proxy0 == 0))
324  {
325  error0 = NSH_NODE_ERROR_NO_PROXY;
326  goto trace0;
327  }
328  nsp_nsi0 = proxy0->nsp_nsi;
329  }
330 
331  entry0 = hash_get_mem (nm->nsh_mapping_by_key, &nsp_nsi0);
332  if (PREDICT_FALSE (entry0 == 0))
333  {
334  error0 = NSH_NODE_ERROR_NO_MAPPING;
335  goto trace0;
336  }
337 
338  /* Entry should point to a mapping ... */
339  map0 = pool_elt_at_index (nm->nsh_mappings, entry0[0]);
340  if (PREDICT_FALSE (map0 == 0))
341  {
342  error0 = NSH_NODE_ERROR_NO_MAPPING;
343  goto trace0;
344  }
345 
346  /* set up things for next node to transmit ie which node to handle it and where */
347  next0 = map0->next_node;
348  vnet_buffer (b0)->sw_if_index[VLIB_TX] = map0->sw_if_index;
349  vnet_buffer (b0)->ip.adj_index[VLIB_TX] = map0->adj_index;
350 
351  if (PREDICT_FALSE (map0->nsh_action == NSH_ACTION_POP))
352  {
353  /* Manipulate MD2 */
354  if (PREDICT_FALSE (hdr0->md_type == 2))
355  {
356  nsh_md2_decap (b0, hdr0, &header_len0, &next0,
357  NSH_NODE_NEXT_DROP);
358  if (PREDICT_FALSE (next0 == NSH_NODE_NEXT_DROP))
359  {
360  error0 = NSH_NODE_ERROR_INVALID_OPTIONS;
361  goto trace0;
362  }
363  vnet_buffer (b0)->sw_if_index[VLIB_RX] =
364  map0->rx_sw_if_index;
365  }
366 
367  /* Pop NSH header */
368  vlib_buffer_advance (b0, (word) header_len0);
369  goto trace0;
370  }
371 
372  entry0 = hash_get_mem (nm->nsh_entry_by_key, &map0->mapped_nsp_nsi);
373  if (PREDICT_FALSE (entry0 == 0))
374  {
375  error0 = NSH_NODE_ERROR_NO_ENTRY;
376  goto trace0;
377  }
378 
379  nsh_entry0 =
380  (nsh_entry_t *) pool_elt_at_index (nm->nsh_entries, entry0[0]);
381  encap_hdr0 = (nsh_base_header_t *) (nsh_entry0->rewrite);
382  /* rewrite_size should equal to (encap_hdr0->length * 4) */
383  encap_hdr_len0 = nsh_entry0->rewrite_size;
384 
385  if (PREDICT_TRUE (map0->nsh_action == NSH_ACTION_SWAP))
386  {
387  /* Manipulate MD2 */
388  if (PREDICT_FALSE (hdr0->md_type == 2))
389  {
390  nsh_md2_swap (b0, hdr0, header_len0, nsh_entry0,
391  &next0, NSH_NODE_NEXT_DROP);
392  if (PREDICT_FALSE (next0 == NSH_NODE_NEXT_DROP))
393  {
394  error0 = NSH_NODE_ERROR_INVALID_OPTIONS;
395  goto trace0;
396  }
397  }
398 
399  /* Pop old NSH header */
400  vlib_buffer_advance (b0, (word) header_len0);
401 
402  /* After processing, md2's length may be varied */
403  encap_hdr_len0 = nsh_entry0->rewrite_size;
404  /* Push new NSH header */
405  vlib_buffer_advance (b0, -(word) encap_hdr_len0);
406  hdr0 = vlib_buffer_get_current (b0);
407  clib_memcpy_fast (hdr0, encap_hdr0, (word) encap_hdr_len0);
408 
409  goto trace0;
410  }
411 
412  if (PREDICT_TRUE (map0->nsh_action == NSH_ACTION_PUSH))
413  {
414  /* After processing, md2's length may be varied */
415  encap_hdr_len0 = nsh_entry0->rewrite_size;
416  /* Push new NSH header */
417  vlib_buffer_advance (b0, -(word) encap_hdr_len0);
418  hdr0 = vlib_buffer_get_current (b0);
419  clib_memcpy_fast (hdr0, encap_hdr0, (word) encap_hdr_len0);
420 
421  /* Manipulate MD2 */
422  if (PREDICT_FALSE (nsh_entry0->nsh_base.md_type == 2))
423  {
424  nsh_md2_encap (b0, hdr0, nsh_entry0);
425  }
426 
427  }
428 
429  trace0:
430  b0->error = error0 ? node->errors[error0] : 0;
431 
432  if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED))
433  {
434  nsh_input_trace_t *tr =
435  vlib_add_trace (vm, node, b0, sizeof (*tr));
436  clib_memcpy_fast (&(tr->trace_data), hdr0,
437  ((hdr0->length & NSH_LEN_MASK) * 4));
438  }
439 
440  /* Process packet 1 */
441  if (node_type == NSH_INPUT_TYPE)
442  {
443  nsp_nsi1 = hdr1->nsp_nsi;
444  header_len1 = (hdr1->length & NSH_LEN_MASK) * 4;
445  ttl1 = (hdr1->ver_o_c & NSH_TTL_H4_MASK) << 2 |
446  (hdr1->length & NSH_TTL_L2_MASK) >> 6;
447  ttl1 = ttl1 - 1;
448  if (PREDICT_FALSE (ttl1 == 0))
449  {
450  error1 = NSH_NODE_ERROR_INVALID_TTL;
451  goto trace1;
452  }
453  }
454  else if (node_type == NSH_CLASSIFIER_TYPE)
455  {
456  nsp_nsi1 =
457  clib_host_to_net_u32 (vnet_buffer (b1)->
458  l2_classify.opaque_index);
459  }
460  else if (node_type == NSH_AWARE_VNF_PROXY_TYPE)
461  {
462  /* Push dummy Eth header */
463  char dummy_dst_address[6] =
464  { 0x11, 0x22, 0x33, 0x44, 0x55, 0x66 };
465  char dummy_src_address[6] =
466  { 0x77, 0x88, 0x99, 0xaa, 0xbb, 0xcc };
467  clib_memcpy_fast (dummy_eth1.dst_address, dummy_dst_address, 6);
468  clib_memcpy_fast (dummy_eth1.src_address, dummy_src_address, 6);
469  dummy_eth1.type = 0x0800;
470  vlib_buffer_advance (b1, -(word) sizeof (ethernet_header_t));
471  hdr1 = vlib_buffer_get_current (b1);
472  clib_memcpy_fast (hdr1, &dummy_eth1,
473  (word) sizeof (ethernet_header_t));
474 
475  sw_if_index1 = vnet_buffer (b1)->sw_if_index[VLIB_TX];
476  nsp_nsi1 = nm->tunnel_index_by_sw_if_index[sw_if_index1];
477  }
478  else
479  {
480  clib_memset (&key1, 0, sizeof (key1));
481  key1.transport_type = NSH_NODE_NEXT_ENCAP_VXLAN4;
482  key1.transport_index = vnet_buffer (b1)->sw_if_index[VLIB_RX];
483 
484  p1 = hash_get_mem (nm->nsh_proxy_session_by_key, &key1);
485  if (PREDICT_FALSE (p1 == 0))
486  {
487  error1 = NSH_NODE_ERROR_NO_PROXY;
488  goto trace1;
489  }
490 
491  proxy1 = pool_elt_at_index (nm->nsh_proxy_sessions, p1[0]);
492  if (PREDICT_FALSE (proxy1 == 0))
493  {
494  error1 = NSH_NODE_ERROR_NO_PROXY;
495  goto trace1;
496  }
497  nsp_nsi1 = proxy1->nsp_nsi;
498  }
499 
500  entry1 = hash_get_mem (nm->nsh_mapping_by_key, &nsp_nsi1);
501  if (PREDICT_FALSE (entry1 == 0))
502  {
503  error1 = NSH_NODE_ERROR_NO_MAPPING;
504  goto trace1;
505  }
506 
507  /* Entry should point to a mapping ... */
508  map1 = pool_elt_at_index (nm->nsh_mappings, entry1[0]);
509  if (PREDICT_FALSE (map1 == 0))
510  {
511  error1 = NSH_NODE_ERROR_NO_MAPPING;
512  goto trace1;
513  }
514 
515  /* set up things for next node to transmit ie which node to handle it and where */
516  next1 = map1->next_node;
517  vnet_buffer (b1)->sw_if_index[VLIB_TX] = map1->sw_if_index;
518  vnet_buffer (b1)->ip.adj_index[VLIB_TX] = map1->adj_index;
519 
520  if (PREDICT_FALSE (map1->nsh_action == NSH_ACTION_POP))
521  {
522  /* Manipulate MD2 */
523  if (PREDICT_FALSE (hdr1->md_type == 2))
524  {
525  nsh_md2_decap (b1, hdr1, &header_len1, &next1,
526  NSH_NODE_NEXT_DROP);
527  if (PREDICT_FALSE (next1 == NSH_NODE_NEXT_DROP))
528  {
529  error1 = NSH_NODE_ERROR_INVALID_OPTIONS;
530  goto trace1;
531  }
532  vnet_buffer (b1)->sw_if_index[VLIB_RX] =
533  map1->rx_sw_if_index;
534  }
535 
536  /* Pop NSH header */
537  vlib_buffer_advance (b1, (word) header_len1);
538  goto trace1;
539  }
540 
541  entry1 = hash_get_mem (nm->nsh_entry_by_key, &map1->mapped_nsp_nsi);
542  if (PREDICT_FALSE (entry1 == 0))
543  {
544  error1 = NSH_NODE_ERROR_NO_ENTRY;
545  goto trace1;
546  }
547 
548  nsh_entry1 =
549  (nsh_entry_t *) pool_elt_at_index (nm->nsh_entries, entry1[0]);
550  encap_hdr1 = (nsh_base_header_t *) (nsh_entry1->rewrite);
551  /* rewrite_size should equal to (encap_hdr0->length * 4) */
552  encap_hdr_len1 = nsh_entry1->rewrite_size;
553 
554  if (PREDICT_TRUE (map1->nsh_action == NSH_ACTION_SWAP))
555  {
556  /* Manipulate MD2 */
557  if (PREDICT_FALSE (hdr1->md_type == 2))
558  {
559  nsh_md2_swap (b1, hdr1, header_len1, nsh_entry1,
560  &next1, NSH_NODE_NEXT_DROP);
561  if (PREDICT_FALSE (next1 == NSH_NODE_NEXT_DROP))
562  {
563  error1 = NSH_NODE_ERROR_INVALID_OPTIONS;
564  goto trace1;
565  }
566  }
567 
568  /* Pop old NSH header */
569  vlib_buffer_advance (b1, (word) header_len1);
570 
571  /* After processing, md2's length may be varied */
572  encap_hdr_len1 = nsh_entry1->rewrite_size;
573  /* Push new NSH header */
574  vlib_buffer_advance (b1, -(word) encap_hdr_len1);
575  hdr1 = vlib_buffer_get_current (b1);
576  clib_memcpy_fast (hdr1, encap_hdr1, (word) encap_hdr_len1);
577 
578  goto trace1;
579  }
580 
581  if (PREDICT_FALSE (map1->nsh_action == NSH_ACTION_PUSH))
582  {
583  /* After processing, md2's length may be varied */
584  encap_hdr_len1 = nsh_entry1->rewrite_size;
585  /* Push new NSH header */
586  vlib_buffer_advance (b1, -(word) encap_hdr_len1);
587  hdr1 = vlib_buffer_get_current (b1);
588  clib_memcpy_fast (hdr1, encap_hdr1, (word) encap_hdr_len1);
589 
590  /* Manipulate MD2 */
591  if (PREDICT_FALSE (nsh_entry1->nsh_base.md_type == 2))
592  {
593  nsh_md2_encap (b1, hdr1, nsh_entry1);
594  }
595 
596  }
597 
598  trace1:
599  b1->error = error1 ? node->errors[error1] : 0;
600 
601  if (PREDICT_FALSE (b1->flags & VLIB_BUFFER_IS_TRACED))
602  {
603  nsh_input_trace_t *tr =
604  vlib_add_trace (vm, node, b1, sizeof (*tr));
605  clib_memcpy_fast (&(tr->trace_data), hdr1,
606  ((hdr1->length & NSH_LEN_MASK) * 4));
607  }
608 
609  vlib_validate_buffer_enqueue_x2 (vm, node, next_index, to_next,
610  n_left_to_next, bi0, bi1, next0,
611  next1);
612 
613  }
614 
615  while (n_left_from > 0 && n_left_to_next > 0)
616  {
617  u32 bi0 = 0;
618  vlib_buffer_t *b0 = NULL;
619  u32 next0 = NSH_NODE_NEXT_DROP;
620  uword *entry0;
621  nsh_base_header_t *hdr0 = 0;
622  u32 header_len0 = 0;
623  u32 nsp_nsi0;
624  u32 ttl0;
625  u32 error0;
626  nsh_map_t *map0 = 0;
627  nsh_entry_t *nsh_entry0 = 0;
628  nsh_base_header_t *encap_hdr0 = 0;
629  u32 encap_hdr_len0 = 0;
631  uword *p0;
632  nsh_proxy_session_t *proxy0 = 0;
633  u32 sw_if_index0 = 0;
634  ethernet_header_t dummy_eth0;
635 
636  bi0 = from[0];
637  to_next[0] = bi0;
638  from += 1;
639  to_next += 1;
640  n_left_from -= 1;
641  n_left_to_next -= 1;
642  error0 = 0;
643 
644  b0 = vlib_get_buffer (vm, bi0);
645  hdr0 = vlib_buffer_get_current (b0);
646 
647  if (node_type == NSH_INPUT_TYPE)
648  {
649  nsp_nsi0 = hdr0->nsp_nsi;
650  header_len0 = (hdr0->length & NSH_LEN_MASK) * 4;
651  ttl0 = (hdr0->ver_o_c & NSH_TTL_H4_MASK) << 2 |
652  (hdr0->length & NSH_TTL_L2_MASK) >> 6;
653  ttl0 = ttl0 - 1;
654  if (PREDICT_FALSE (ttl0 == 0))
655  {
656  error0 = NSH_NODE_ERROR_INVALID_TTL;
657  goto trace00;
658  }
659  }
660  else if (node_type == NSH_CLASSIFIER_TYPE)
661  {
662  nsp_nsi0 =
663  clib_host_to_net_u32 (vnet_buffer (b0)->
664  l2_classify.opaque_index);
665  }
666  else if (node_type == NSH_AWARE_VNF_PROXY_TYPE)
667  {
668  /* Push dummy Eth header */
669  char dummy_dst_address[6] =
670  { 0x11, 0x22, 0x33, 0x44, 0x55, 0x66 };
671  char dummy_src_address[6] =
672  { 0x77, 0x88, 0x99, 0xaa, 0xbb, 0xcc };
673  clib_memcpy_fast (dummy_eth0.dst_address, dummy_dst_address, 6);
674  clib_memcpy_fast (dummy_eth0.src_address, dummy_src_address, 6);
675  dummy_eth0.type = 0x0800;
676  vlib_buffer_advance (b0, -(word) sizeof (ethernet_header_t));
677  hdr0 = vlib_buffer_get_current (b0);
678  clib_memcpy_fast (hdr0, &dummy_eth0,
679  (word) sizeof (ethernet_header_t));
680 
681  sw_if_index0 = vnet_buffer (b0)->sw_if_index[VLIB_TX];
682  nsp_nsi0 = nm->tunnel_index_by_sw_if_index[sw_if_index0];
683  }
684  else
685  {
686  clib_memset (&key0, 0, sizeof (key0));
687  key0.transport_type = NSH_NODE_NEXT_ENCAP_VXLAN4;
688  key0.transport_index = vnet_buffer (b0)->sw_if_index[VLIB_RX];
689 
690  p0 = hash_get_mem (nm->nsh_proxy_session_by_key, &key0);
691  if (PREDICT_FALSE (p0 == 0))
692  {
693  error0 = NSH_NODE_ERROR_NO_PROXY;
694  goto trace00;
695  }
696 
697  proxy0 = pool_elt_at_index (nm->nsh_proxy_sessions, p0[0]);
698  if (PREDICT_FALSE (proxy0 == 0))
699  {
700  error0 = NSH_NODE_ERROR_NO_PROXY;
701  goto trace00;
702  }
703  nsp_nsi0 = proxy0->nsp_nsi;
704  }
705 
706  entry0 = hash_get_mem (nm->nsh_mapping_by_key, &nsp_nsi0);
707 
708  if (PREDICT_FALSE (entry0 == 0))
709  {
710  error0 = NSH_NODE_ERROR_NO_MAPPING;
711  goto trace00;
712  }
713 
714  /* Entry should point to a mapping ... */
715  map0 = pool_elt_at_index (nm->nsh_mappings, entry0[0]);
716 
717  if (PREDICT_FALSE (map0 == 0))
718  {
719  error0 = NSH_NODE_ERROR_NO_MAPPING;
720  goto trace00;
721  }
722 
723  /* set up things for next node to transmit ie which node to handle it and where */
724  next0 = map0->next_node;
725  vnet_buffer (b0)->sw_if_index[VLIB_TX] = map0->sw_if_index;
726  vnet_buffer (b0)->ip.adj_index[VLIB_TX] = map0->adj_index;
727  vnet_buffer (b0)->sw_if_index[VLIB_RX] = map0->nsh_sw_if;
728 
729  if (PREDICT_FALSE (map0->nsh_action == NSH_ACTION_POP))
730  {
731  /* Manipulate MD2 */
732  if (PREDICT_FALSE (hdr0->md_type == 2))
733  {
734  nsh_md2_decap (b0, hdr0, &header_len0, &next0,
735  NSH_NODE_NEXT_DROP);
736  if (PREDICT_FALSE (next0 == NSH_NODE_NEXT_DROP))
737  {
738  error0 = NSH_NODE_ERROR_INVALID_OPTIONS;
739  goto trace00;
740  }
741  vnet_buffer (b0)->sw_if_index[VLIB_RX] =
742  map0->rx_sw_if_index;
743  }
744 
745  /* Pop NSH header */
746  vlib_buffer_advance (b0, (word) header_len0);
747  goto trace00;
748  }
749 
750  entry0 = hash_get_mem (nm->nsh_entry_by_key, &map0->mapped_nsp_nsi);
751  if (PREDICT_FALSE (entry0 == 0))
752  {
753  error0 = NSH_NODE_ERROR_NO_ENTRY;
754  goto trace00;
755  }
756 
757  nsh_entry0 =
758  (nsh_entry_t *) pool_elt_at_index (nm->nsh_entries, entry0[0]);
759  encap_hdr0 = (nsh_base_header_t *) (nsh_entry0->rewrite);
760  /* rewrite_size should equal to (encap_hdr0->length * 4) */
761  encap_hdr_len0 = nsh_entry0->rewrite_size;
762 
763  if (PREDICT_TRUE (map0->nsh_action == NSH_ACTION_SWAP))
764  {
765  /* Manipulate MD2 */
766  if (PREDICT_FALSE (hdr0->md_type == 2))
767  {
768  nsh_md2_swap (b0, hdr0, header_len0, nsh_entry0,
769  &next0, NSH_NODE_NEXT_DROP);
770  if (PREDICT_FALSE (next0 == NSH_NODE_NEXT_DROP))
771  {
772  error0 = NSH_NODE_ERROR_INVALID_OPTIONS;
773  goto trace00;
774  }
775  }
776 
777  /* Pop old NSH header */
778  vlib_buffer_advance (b0, (word) header_len0);
779 
780  /* After processing, md2's length may be varied */
781  encap_hdr_len0 = nsh_entry0->rewrite_size;
782  /* Push new NSH header */
783  vlib_buffer_advance (b0, -(word) encap_hdr_len0);
784  hdr0 = vlib_buffer_get_current (b0);
785  clib_memcpy_fast (hdr0, encap_hdr0, (word) encap_hdr_len0);
786 
787  goto trace00;
788  }
789 
790  if (PREDICT_TRUE (map0->nsh_action == NSH_ACTION_PUSH))
791  {
792  /* After processing, md2's length may be varied */
793  encap_hdr_len0 = nsh_entry0->rewrite_size;
794  /* Push new NSH header */
795  vlib_buffer_advance (b0, -(word) encap_hdr_len0);
796  hdr0 = vlib_buffer_get_current (b0);
797  clib_memcpy_fast (hdr0, encap_hdr0, (word) encap_hdr_len0);
798  /* Manipulate MD2 */
799  if (PREDICT_FALSE (nsh_entry0->nsh_base.md_type == 2))
800  {
801  nsh_md2_encap (b0, hdr0, nsh_entry0);
802  }
803 
804  }
805 
806  trace00:b0->error = error0 ? node->errors[error0] : 0;
807 
808  if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED))
809  {
810  nsh_input_trace_t *tr =
811  vlib_add_trace (vm, node, b0, sizeof (*tr));
812  clib_memcpy_fast (&(tr->trace_data[0]), hdr0,
813  ((hdr0->length & NSH_LEN_MASK) * 4));
814  }
815 
816  vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next,
817  n_left_to_next, bi0, next0);
818  }
819 
820  vlib_put_next_frame (vm, node, next_index, n_left_to_next);
821 
822  }
823 
824  return from_frame->n_vectors;
825 }
826 
827 /**
828  * @brief Graph processing dispatch function for NSH Input
829  *
830  * @node nsh_input
831  * @param *vm
832  * @param *node
833  * @param *from_frame
834  *
835  * @return from_frame->n_vectors
836  *
837  */
839  vlib_frame_t * from_frame)
840 {
841  return nsh_input_map (vm, node, from_frame, NSH_INPUT_TYPE);
842 }
843 
844 /**
845  * @brief Graph processing dispatch function for NSH-Proxy
846  *
847  * @node nsh_proxy
848  * @param *vm
849  * @param *node
850  * @param *from_frame
851  *
852  * @return from_frame->n_vectors
853  *
854  */
856  vlib_frame_t * from_frame)
857 {
858  return nsh_input_map (vm, node, from_frame, NSH_PROXY_TYPE);
859 }
860 
861 /**
862  * @brief Graph processing dispatch function for NSH Classifier
863  *
864  * @node nsh_classifier
865  * @param *vm
866  * @param *node
867  * @param *from_frame
868  *
869  * @return from_frame->n_vectors
870  *
871  */
873  vlib_node_runtime_t * node,
874  vlib_frame_t * from_frame)
875 {
876  return nsh_input_map (vm, node, from_frame, NSH_CLASSIFIER_TYPE);
877 }
878 
879 /**
880  * @brief Graph processing dispatch function for NSH-AWARE-VNF-PROXY
881  *
882  * @node nsh_aware_vnf_proxy
883  * @param *vm
884  * @param *node
885  * @param *from_frame
886  *
887  * @return from_frame->n_vectors
888  *
889  */
891  vlib_node_runtime_t * node,
892  vlib_frame_t * from_frame)
893 {
894  return nsh_input_map (vm, node, from_frame, NSH_AWARE_VNF_PROXY_TYPE);
895 }
896 
897 static char *nsh_node_error_strings[] = {
898 #define _(sym,string) string,
900 #undef _
901 };
902 
903 /* *INDENT-OFF* */
904 
905 /* register nsh-input node */
907  .name = "nsh-input",
908  .vector_size = sizeof (u32),
909  .format_trace = format_nsh_node_map_trace,
910  .format_buffer = format_nsh_header,
912  .n_errors = ARRAY_LEN (nsh_node_error_strings),
913  .error_strings = nsh_node_error_strings,
914  .n_next_nodes = NSH_NODE_N_NEXT,
915  .next_nodes = {
916 #define _(s,n) [NSH_NODE_NEXT_##s] = n,
918 #undef _
919  },
920 };
921 
922 /* register nsh-proxy node */
924 {
925  .name = "nsh-proxy",
926  .vector_size = sizeof (u32),
927  .format_trace = format_nsh_node_map_trace,
928  .format_buffer = format_nsh_header,
930  .n_errors = ARRAY_LEN (nsh_node_error_strings),
931  .error_strings = nsh_node_error_strings,
932  .n_next_nodes = NSH_NODE_N_NEXT,
933  .next_nodes = {
934 #define _(s,n) [NSH_NODE_NEXT_##s] = n,
936 #undef _
937  },
938 };
939 
940 /* register nsh-classifier node */
942 {
943  .name = "nsh-classifier",
944  .vector_size = sizeof (u32),
945  .format_trace = format_nsh_node_map_trace,
946  .format_buffer = format_nsh_header,
948  .n_errors = ARRAY_LEN (nsh_node_error_strings),
949  .error_strings = nsh_node_error_strings,
950  .n_next_nodes = NSH_NODE_N_NEXT,
951  .next_nodes = {
952 #define _(s,n) [NSH_NODE_NEXT_##s] = n,
954 #undef _
955  },
956 };
957 
958 /* register nsh-aware-vnf-proxy node */
960 {
961  .name = "nsh-aware-vnf-proxy",
962  .vector_size = sizeof (u32),
963  .format_trace = format_nsh_node_map_trace,
964  .format_buffer = format_nsh_header,
966  .n_errors = ARRAY_LEN (nsh_node_error_strings),
967  .error_strings = nsh_node_error_strings,
968  .n_next_nodes = NSH_NODE_N_NEXT,
969  .next_nodes = {
970 #define _(s,n) [NSH_NODE_NEXT_##s] = n,
972 #undef _
973  },
974 };
975 /* *INDENT-ON* */
976 
977 /*
978  * fd.io coding-style-patch-verification: ON
979  *
980  * Local Variables:
981  * eval: (c-set-style "gnu")
982  * End:
983  */
u32 flags
buffer flags: VLIB_BUFFER_FREE_LIST_INDEX_MASK: bits used to store free list index, VLIB_BUFFER_IS_TRACED: trace this buffer.
Definition: buffer.h:124
u32 sw_if_index
Definition: nsh.h:82
u32 nsh_action
Definition: nsh.h:74
u8 * format_nsh_node_map_trace(u8 *s, va_list *args)
Definition: nsh_cli.c:597
static uword nsh_input_map(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *from_frame, u32 node_type)
Definition: nsh_node.c:201
u32 * tunnel_index_by_sw_if_index
Mapping from sw_if_index to tunnel index.
Definition: nsh.h:131
#define PREDICT_TRUE(x)
Definition: clib.h:112
#define clib_memcpy_fast(a, b, c)
Definition: string.h:81
#define NULL
Definition: clib.h:58
u32 next_node
Definition: nsh.h:84
#define foreach_nsh_node_next
Definition: nsh.h:208
u8 src_address[6]
Definition: packet.h:56
u8 data[0]
Packet data.
Definition: buffer.h:181
u32 nsh_sw_if
Definition: nsh.h:79
#define NSH_LEN_MASK
Definition: nsh_packet.h:109
static char * nsh_node_error_strings[]
Definition: nsh_node.c:897
clib_memset(h->entries, 0, sizeof(h->entries[0])*entries)
#define VLIB_NODE_FN(node)
Definition: node.h:201
vlib_error_t * errors
Vector of errors for this node.
Definition: node.h:468
vlib_node_registration_t nsh_proxy_node
(constructor) VLIB_REGISTER_NODE (nsh_proxy_node)
Definition: nsh_node.c:923
unsigned char u8
Definition: types.h:56
nsh_base_header_t nsh_base
Definition: nsh.h:40
nsh_proxy_session_t * nsh_proxy_sessions
Definition: nsh.h:123
i64 word
Definition: types.h:111
#define always_inline
Definition: clib.h:98
vlib_node_registration_t nsh_aware_vnf_proxy_node
(constructor) VLIB_REGISTER_NODE (nsh_aware_vnf_proxy_node)
Definition: nsh_node.c:959
u8 dst_address[6]
Definition: packet.h:55
#define vlib_prefetch_buffer_header(b, type)
Prefetch buffer metadata.
Definition: buffer.h:203
nsh_entry_t * nsh_entries
Definition: nsh.h:110
u32 adj_index
Definition: nsh.h:85
unsigned int u32
Definition: types.h:88
vlib_node_registration_t nsh_classifier_node
(constructor) VLIB_REGISTER_NODE (nsh_classifier_node)
Definition: nsh_node.c:941
nsh_option_map_t * nsh_md2_lookup_option(u16 class, u8 type)
Definition: nsh.c:88
vl_api_fib_path_type_t type
Definition: fib_types.api:123
vlib_error_t error
Error code for buffers to be enqueued to error handler.
Definition: buffer.h:136
#define pool_elt_at_index(p, i)
Returns pointer to element at given index.
Definition: pool.h:514
Definition: nsh.h:59
nsh_map_t * nsh_mappings
Definition: nsh.h:116
uword * nsh_entry_by_key
Definition: nsh.h:113
u8 * rewrite
Rewrite string.
Definition: nsh.h:50
static void * vlib_buffer_get_current(vlib_buffer_t *b)
Get pointer to current data to process.
Definition: buffer.h:229
#define PREDICT_FALSE(x)
Definition: clib.h:111
#define vlib_validate_buffer_enqueue_x2(vm, node, next_index, to_next, n_left_to_next, bi0, bi1, next0, next1)
Finish enqueueing two buffers forward in the graph.
Definition: buffer_node.h:70
#define vlib_validate_buffer_enqueue_x1(vm, node, next_index, to_next, n_left_to_next, bi0, next0)
Finish enqueueing one buffer forward in the graph.
Definition: buffer_node.h:218
#define vlib_get_next_frame(vm, node, next_index, vectors, n_vectors_left)
Get pointer to next frame vector data by (vlib_node_runtime_t, next_index).
Definition: node_funcs.h:338
uword decap_v4_next_override
Definition: nsh.h:156
int(* pop_options[MAX_MD2_OPTIONS])(vlib_buffer_t *b, nsh_tlv_header_t *opt)
Definition: nsh.h:153
int(* swap_options[MAX_MD2_OPTIONS])(vlib_buffer_t *b, nsh_tlv_header_t *old_opt, nsh_tlv_header_t *new_opt)
Definition: nsh.h:150
#define VLIB_REGISTER_NODE(x,...)
Definition: node.h:169
u16 n_vectors
Definition: node.h:395
#define CLIB_PREFETCH(addr, size, type)
Definition: cache.h:80
vlib_main_t * vm
Definition: buffer.c:312
uword * nsh_proxy_session_by_key
Definition: nsh.h:126
nsh_main_t nsh_main
Definition: nsh.c:28
u8 tlvs_len
Definition: nsh.h:45
#define ARRAY_LEN(x)
Definition: clib.h:62
void vlib_put_next_frame(vlib_main_t *vm, vlib_node_runtime_t *r, u32 next_index, u32 n_vectors_left)
Release pointer to next frame vector data.
Definition: main.c:458
#define foreach_nsh_node_error
Definition: nsh.h:191
nsh_tlv_header_t nsh_md2_data_t
Definition: nsh_packet.h:93
u32 mapped_nsp_nsi
Key for nsh_header_t entry to map to.
Definition: nsh.h:72
u16 cached_next_index
Next frame index that vector arguments were last enqueued to last time this node ran.
Definition: node.h:513
u32 rx_sw_if_index
Definition: nsh.h:83
u32 option_id
Definition: nsh.h:29
static void vlib_buffer_advance(vlib_buffer_t *b, word l)
Advance current data pointer by the supplied (signed!) amount.
Definition: buffer.h:248
static void nsh_md2_encap(vlib_buffer_t *b, nsh_base_header_t *hdr, nsh_entry_t *nsh_entry)
Definition: nsh_node.c:23
u8 rewrite_size
Definition: nsh.h:51
static void * vlib_add_trace(vlib_main_t *vm, vlib_node_runtime_t *r, vlib_buffer_t *b, u32 n_data_bytes)
Definition: trace_funcs.h:55
int(* options[MAX_MD2_OPTIONS])(vlib_buffer_t *b, nsh_tlv_header_t *opt)
Definition: nsh.h:148
Definition: defs.h:47
u8 * tlvs_data
Definition: nsh.h:46
static void nsh_md2_swap(vlib_buffer_t *b, nsh_base_header_t *hdr, u32 header_len, nsh_entry_t *nsh_entry, u32 *next, u32 drop_node_val)
Definition: nsh_node.c:88
VLIB buffer representation.
Definition: buffer.h:102
u64 uword
Definition: types.h:112
u8 trace_data[256]
Definition: nsh.h:176
static void * vlib_frame_vector_args(vlib_frame_t *f)
Get pointer to frame vector data.
Definition: node_funcs.h:244
#define NSH_TTL_L2_MASK
Definition: nsh_packet.h:108
#define hash_get_mem(h, key)
Definition: hash.h:269
#define vnet_buffer(b)
Definition: buffer.h:361
static void nsh_md2_decap(vlib_buffer_t *b, nsh_base_header_t *hdr, u32 *header_len, u32 *next, u32 drop_node_val)
Definition: nsh_node.c:155
Note: rewrite and rewrite_size used to support varied nsh header.
Definition: nsh.h:36
#define NSH_TTL_H4_MASK
Definition: nsh_packet.h:107
#define CLIB_CACHE_LINE_BYTES
Definition: cache.h:59
uword * nsh_mapping_by_key
Definition: nsh.h:119
static vlib_buffer_t * vlib_get_buffer(vlib_main_t *vm, u32 buffer_index)
Translate buffer index into buffer pointer.
Definition: buffer_funcs.h:85
Definition: defs.h:46
vlib_node_registration_t nsh_input_node
(constructor) VLIB_REGISTER_NODE (nsh_input_node)
Definition: nsh_node.c:906
u8 * format_nsh_header(u8 *s, va_list *args)
Definition: nsh_cli.c:512