FD.io VPP  v19.04.4-rc0-5-ge88582fac
Vector Packet Processing
node.c
Go to the documentation of this file.
1 /*
2  *------------------------------------------------------------------
3  * Copyright (c) 2016 Cisco and/or its affiliates.
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at:
7  *
8  * http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  *------------------------------------------------------------------
16  */
17 
18 #include <stdint.h>
19 #include <net/if.h>
20 #include <sys/ioctl.h>
21 
22 #include <vlib/vlib.h>
23 #include <vlib/unix/unix.h>
24 #include <vnet/ethernet/ethernet.h>
25 #include <vnet/devices/devices.h>
26 #include <vnet/feature/feature.h>
27 
30 
31 #define foreach_netmap_input_error
32 
33 typedef enum
34 {
35 #define _(f,s) NETMAP_INPUT_ERROR_##f,
37 #undef _
40 
41 static char *netmap_input_error_strings[] = {
42 #define _(n,s) s,
44 #undef _
45 };
46 
47 typedef struct
48 {
51  struct netmap_slot slot;
53 
54 static u8 *
55 format_netmap_input_trace (u8 * s, va_list * args)
56 {
57  CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
58  CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
59  netmap_input_trace_t *t = va_arg (*args, netmap_input_trace_t *);
60  u32 indent = format_get_indent (s);
61 
62  s = format (s, "netmap: hw_if_index %d next-index %d",
63  t->hw_if_index, t->next_index);
64  s = format (s, "\n%Uslot: flags 0x%x len %u buf_idx %u",
65  format_white_space, indent + 2,
66  t->slot.flags, t->slot.len, t->slot.buf_idx);
67  return s;
68 }
69 
70 always_inline void
71 buffer_add_to_chain (vlib_main_t * vm, u32 bi, u32 first_bi, u32 prev_bi)
72 {
73  vlib_buffer_t *b = vlib_get_buffer (vm, bi);
74  vlib_buffer_t *first_b = vlib_get_buffer (vm, first_bi);
75  vlib_buffer_t *prev_b = vlib_get_buffer (vm, prev_bi);
76 
77  /* update first buffer */
79 
80  /* update previous buffer */
81  prev_b->next_buffer = bi;
82  prev_b->flags |= VLIB_BUFFER_NEXT_PRESENT;
83 
84  /* update current buffer */
85  b->next_buffer = 0;
86 }
87 
90  vlib_frame_t * frame, netmap_if_t * nif)
91 {
93  uword n_trace = vlib_get_trace_count (vm, node);
95  u32 n_rx_packets = 0;
96  u32 n_rx_bytes = 0;
97  u32 *to_next = 0;
98  u32 n_free_bufs;
99  struct netmap_ring *ring;
100  int cur_ring;
101  u32 thread_index = vm->thread_index;
102  u32 n_buffer_bytes = vlib_buffer_get_default_data_size (vm);
103 
104  if (nif->per_interface_next_index != ~0)
105  next_index = nif->per_interface_next_index;
106 
107  n_free_bufs = vec_len (nm->rx_buffers[thread_index]);
108  if (PREDICT_FALSE (n_free_bufs < VLIB_FRAME_SIZE))
109  {
110  vec_validate (nm->rx_buffers[thread_index],
111  VLIB_FRAME_SIZE + n_free_bufs - 1);
112  n_free_bufs +=
113  vlib_buffer_alloc (vm, &nm->rx_buffers[thread_index][n_free_bufs],
115  _vec_len (nm->rx_buffers[thread_index]) = n_free_bufs;
116  }
117 
118  cur_ring = nif->first_rx_ring;
119  while (cur_ring <= nif->last_rx_ring && n_free_bufs)
120  {
121  int r = 0;
122  u32 cur_slot_index;
123  ring = NETMAP_RXRING (nif->nifp, cur_ring);
124  r = nm_ring_space (ring);
125 
126  if (!r)
127  {
128  cur_ring++;
129  continue;
130  }
131 
132  if (r > n_free_bufs)
133  r = n_free_bufs;
134 
135  cur_slot_index = ring->cur;
136  while (r)
137  {
138  u32 n_left_to_next;
139  u32 next0 = next_index;
140  vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
141 
142  while (r && n_left_to_next)
143  {
144  vlib_buffer_t *first_b0 = 0;
145  u32 offset = 0;
146  u32 bi0 = 0, first_bi0 = 0, prev_bi0;
147  u32 next_slot_index = (cur_slot_index + 1) % ring->num_slots;
148  u32 next2_slot_index = (cur_slot_index + 2) % ring->num_slots;
149  struct netmap_slot *slot = &ring->slot[cur_slot_index];
150  u32 data_len = slot->len;
151 
152  /* prefetch 2 slots in advance */
153  CLIB_PREFETCH (&ring->slot[next2_slot_index],
154  CLIB_CACHE_LINE_BYTES, LOAD);
155  /* prefetch start of next packet */
156  CLIB_PREFETCH (NETMAP_BUF
157  (ring, ring->slot[next_slot_index].buf_idx),
158  CLIB_CACHE_LINE_BYTES, LOAD);
159 
160  while (data_len && n_free_bufs)
161  {
162  vlib_buffer_t *b0;
163  /* grab free buffer */
164  u32 last_empty_buffer =
165  vec_len (nm->rx_buffers[thread_index]) - 1;
166  prev_bi0 = bi0;
167  bi0 = nm->rx_buffers[thread_index][last_empty_buffer];
168  b0 = vlib_get_buffer (vm, bi0);
169  _vec_len (nm->rx_buffers[thread_index]) = last_empty_buffer;
170  n_free_bufs--;
171 
172  /* copy data */
173  u32 bytes_to_copy =
174  data_len > n_buffer_bytes ? n_buffer_bytes : data_len;
175  b0->current_data = 0;
177  (u8 *) NETMAP_BUF (ring, slot->buf_idx) +
178  offset, bytes_to_copy);
179 
180  /* fill buffer header */
181  b0->current_length = bytes_to_copy;
182 
183  if (offset == 0)
184  {
186  b0->flags = VLIB_BUFFER_TOTAL_LENGTH_VALID;
187  vnet_buffer (b0)->sw_if_index[VLIB_RX] =
188  nif->sw_if_index;
189  vnet_buffer (b0)->sw_if_index[VLIB_TX] = (u32) ~ 0;
190  first_bi0 = bi0;
191  first_b0 = vlib_get_buffer (vm, first_bi0);
192  }
193  else
194  buffer_add_to_chain (vm, bi0, first_bi0, prev_bi0);
195 
196  offset += bytes_to_copy;
197  data_len -= bytes_to_copy;
198  }
199 
200  /* trace */
202  if (PREDICT_FALSE (n_trace > 0))
203  {
204  if (PREDICT_TRUE (first_b0 != 0))
205  {
207  vlib_trace_buffer (vm, node, next0, first_b0,
208  /* follow_chain */ 0);
209  vlib_set_trace_count (vm, node, --n_trace);
210  tr = vlib_add_trace (vm, node, first_b0, sizeof (*tr));
211  tr->next_index = next0;
212  tr->hw_if_index = nif->hw_if_index;
213  memcpy (&tr->slot, slot, sizeof (struct netmap_slot));
214  }
215  }
216 
217  /* redirect if feature path enabled */
219  first_b0);
220 
221  /* enque and take next packet */
222  vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next,
223  n_left_to_next, first_bi0,
224  next0);
225 
226  /* next packet */
227  n_rx_packets++;
228  n_rx_bytes += slot->len;
229  to_next[0] = first_bi0;
230  to_next += 1;
231  n_left_to_next--;
232  cur_slot_index = next_slot_index;
233 
234  r--;
235  }
236  vlib_put_next_frame (vm, node, next_index, n_left_to_next);
237  }
238  ring->head = ring->cur = cur_slot_index;
239  cur_ring++;
240  }
241 
242  if (n_rx_packets)
243  ioctl (nif->fd, NIOCRXSYNC, NULL);
244 
246  (vnet_get_main ()->interface_main.combined_sw_if_counters
248  vlib_get_thread_index (), nif->hw_if_index, n_rx_packets, n_rx_bytes);
249 
250  vnet_device_increment_rx_packets (thread_index, n_rx_packets);
251 
252  return n_rx_packets;
253 }
254 
256  vlib_node_runtime_t * node,
257  vlib_frame_t * frame)
258 {
259  int i;
260  u32 n_rx_packets = 0;
261  u32 thread_index = vm->thread_index;
262  netmap_main_t *nm = &netmap_main;
263  netmap_if_t *nmi;
264 
265  for (i = 0; i < vec_len (nm->interfaces); i++)
266  {
267  nmi = vec_elt_at_index (nm->interfaces, i);
268  if (nmi->is_admin_up &&
269  (i % nm->input_cpu_count) ==
270  (thread_index - nm->input_cpu_first_index))
271  n_rx_packets += netmap_device_input_fn (vm, node, frame, nmi);
272  }
273 
274  return n_rx_packets;
275 }
276 
277 /* *INDENT-OFF* */
279  .name = "netmap-input",
280  .sibling_of = "device-input",
281  .format_trace = format_netmap_input_trace,
282  .type = VLIB_NODE_TYPE_INPUT,
283  /* default state is INTERRUPT mode, switch to POLLING if worker threads are enabled */
284  .state = VLIB_NODE_STATE_INTERRUPT,
285  .n_errors = NETMAP_INPUT_N_ERROR,
286  .error_strings = netmap_input_error_strings,
287 };
288 /* *INDENT-ON* */
289 
290 
291 /*
292  * fd.io coding-style-patch-verification: ON
293  *
294  * Local Variables:
295  * eval: (c-set-style "gnu")
296  * End:
297  */
#define vec_validate(V, I)
Make sure vector is long enough for given index (no header, unspecified alignment) ...
Definition: vec.h:439
u32 flags
buffer flags: VLIB_BUFFER_FREE_LIST_INDEX_MASK: bits used to store free list index, VLIB_BUFFER_IS_TRACED: trace this buffer.
Definition: buffer.h:124
struct netmap_slot slot[0]
Definition: net_netmap.h:287
static void vnet_device_increment_rx_packets(u32 thread_index, u64 count)
Definition: devices.h:110
const uint32_t num_slots
Definition: net_netmap.h:266
struct netmap_slot slot
Definition: node.c:51
#define CLIB_UNUSED(x)
Definition: clib.h:82
static u32 vlib_get_trace_count(vlib_main_t *vm, vlib_node_runtime_t *rt)
Definition: trace_funcs.h:156
netmap_if_t * interfaces
Definition: netmap.h:80
static void vlib_increment_combined_counter(vlib_combined_counter_main_t *cm, u32 thread_index, u32 index, u64 n_packets, u64 n_bytes)
Increment a combined counter.
Definition: counter.h:220
static char * netmap_input_error_strings[]
Definition: node.c:41
vnet_main_t * vnet_get_main(void)
Definition: misc.c:47
#define PREDICT_TRUE(x)
Definition: clib.h:112
uint16_t flags
Definition: net_netmap.h:148
i16 current_data
signed offset in data[], pre_data[] that we are currently processing.
Definition: buffer.h:110
#define clib_memcpy_fast(a, b, c)
Definition: string.h:81
#define NULL
Definition: clib.h:58
uint32_t cur
Definition: net_netmap.h:272
u32 thread_index
Definition: main.h:197
u16 current_length
Nbytes between current data and the end of this buffer.
Definition: buffer.h:113
int i
static u32 format_get_indent(u8 *s)
Definition: format.h:72
u8 * format(u8 *s, const char *fmt,...)
Definition: format.c:424
#define VLIB_NODE_FN(node)
Definition: node.h:201
unsigned char u8
Definition: types.h:56
uint16_t len
Definition: net_netmap.h:147
u32 sw_if_index
Definition: netmap.h:52
static void vlib_trace_buffer(vlib_main_t *vm, vlib_node_runtime_t *r, u32 next_index, vlib_buffer_t *b, int follow_chain)
Definition: trace_funcs.h:114
#define always_inline
Definition: clib.h:98
u8 * format_white_space(u8 *s, va_list *va)
Definition: std-formats.c:113
#define vec_elt_at_index(v, i)
Get vector value at index i checking that i is in bounds.
u32 per_interface_next_index
Definition: netmap.h:55
u32 input_cpu_count
Definition: netmap.h:98
#define foreach_netmap_input_error
Definition: node.c:31
unsigned int u32
Definition: types.h:88
#define VLIB_FRAME_SIZE
Definition: node.h:376
struct netmap_if * nifp
Definition: netmap.h:62
static void * vlib_buffer_get_current(vlib_buffer_t *b)
Get pointer to current data to process.
Definition: buffer.h:229
static uword netmap_device_input_fn(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame, netmap_if_t *nif)
Definition: node.c:89
#define PREDICT_FALSE(x)
Definition: clib.h:111
uint32_t buf_idx
Definition: net_netmap.h:146
#define vlib_validate_buffer_enqueue_x1(vm, node, next_index, to_next, n_left_to_next, bi0, next0)
Finish enqueueing one buffer forward in the graph.
Definition: buffer_node.h:218
#define vlib_get_next_frame(vm, node, next_index, vectors, n_vectors_left)
Get pointer to next frame vector data by (vlib_node_runtime_t, next_index).
Definition: node_funcs.h:338
u16 first_rx_ring
Definition: netmap.h:65
static_always_inline u32 vlib_buffer_get_default_data_size(vlib_main_t *vm)
Definition: buffer_funcs.h:96
#define VLIB_REGISTER_NODE(x,...)
Definition: node.h:169
static u8 * format_netmap_input_trace(u8 *s, va_list *args)
Definition: node.c:55
static_always_inline uword vlib_get_thread_index(void)
Definition: threads.h:212
#define CLIB_PREFETCH(addr, size, type)
Definition: cache.h:80
vlib_main_t * vm
Definition: buffer.c:312
int fd
Definition: netmap.h:61
#define NIOCRXSYNC
Definition: net_netmap.h:597
uint32_t head
Definition: net_netmap.h:271
u32 hw_if_index
Definition: netmap.h:51
void vlib_put_next_frame(vlib_main_t *vm, vlib_node_runtime_t *r, u32 next_index, u32 n_vectors_left)
Release pointer to next frame vector data.
Definition: main.c:458
static void * vlib_add_trace(vlib_main_t *vm, vlib_node_runtime_t *r, vlib_buffer_t *b, u32 n_data_bytes)
Definition: trace_funcs.h:57
vlib_node_registration_t netmap_input_node
(constructor) VLIB_REGISTER_NODE (netmap_input_node)
Definition: node.c:278
template key/value backing page structure
Definition: bihash_doc.h:44
Definition: defs.h:47
u32 input_cpu_first_index
Definition: netmap.h:95
u8 is_admin_up
Definition: netmap.h:56
#define vec_len(v)
Number of elements in vector (rvalue-only, NULL tolerant)
u32 next_buffer
Next buffer for this linked-list of buffers.
Definition: buffer.h:140
#define VLIB_BUFFER_TRACE_TRAJECTORY_INIT(b)
Definition: buffer.h:451
VLIB buffer representation.
Definition: buffer.h:102
u64 uword
Definition: types.h:112
static void buffer_add_to_chain(vlib_main_t *vm, u32 bi, u32 first_bi, u32 prev_bi)
Definition: node.c:71
#define vnet_buffer(b)
Definition: buffer.h:369
static_always_inline void vnet_feature_start_device_input_x1(u32 sw_if_index, u32 *next0, vlib_buffer_t *b0)
Definition: feature.h:308
netmap_main_t netmap_main
Definition: netmap.c:30
u32 ** rx_buffers
Definition: netmap.h:86
static void vlib_set_trace_count(vlib_main_t *vm, vlib_node_runtime_t *rt, u32 count)
Definition: trace_funcs.h:172
#define CLIB_CACHE_LINE_BYTES
Definition: cache.h:59
u32 total_length_not_including_first_buffer
Only valid for first buffer in chain.
Definition: buffer.h:167
static u32 vlib_buffer_alloc(vlib_main_t *vm, u32 *buffers, u32 n_buffers)
Allocate buffers into supplied array.
Definition: buffer_funcs.h:612
netmap_input_error_t
Definition: node.c:33
static vlib_buffer_t * vlib_get_buffer(vlib_main_t *vm, u32 buffer_index)
Translate buffer index into buffer pointer.
Definition: buffer_funcs.h:85
Definition: defs.h:46