FD.io VPP  v21.06
Vector Packet Processing
input.c
Go to the documentation of this file.
1 /*
2  *------------------------------------------------------------------
3  * Copyright (c) 2018 Cisco and/or its affiliates.
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at:
7  *
8  * http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  *------------------------------------------------------------------
16  */
17 
18 #include <vlib/vlib.h>
19 #include <vlib/unix/unix.h>
20 #include <vlib/pci/pci.h>
21 #include <vnet/ethernet/ethernet.h>
22 #include <vnet/devices/devices.h>
23 #include <vnet/ip/ip6_packet.h>
24 #include <vnet/ip/ip4_packet.h>
25 #include <vnet/udp/udp_packet.h>
27 #include <vmxnet3/vmxnet3.h>
28 
29 #define foreach_vmxnet3_input_error \
30  _(BUFFER_ALLOC, "buffer alloc error") \
31  _(RX_PACKET_NO_SOP, "Rx packet error - no SOP") \
32  _(RX_PACKET, "Rx packet error") \
33  _(RX_PACKET_EOP, "Rx packet error found on EOP") \
34  _(NO_BUFFER, "Rx no buffer error")
35 
36 typedef enum
37 {
38 #define _(f,s) VMXNET3_INPUT_ERROR_##f,
40 #undef _
43 
44 static __clib_unused char *vmxnet3_input_error_strings[] = {
45 #define _(n,s) s,
47 #undef _
48 };
49 
51 vmxnet3_find_rid (vmxnet3_device_t * vd, vmxnet3_rx_comp * rx_comp)
52 {
53  u32 rid;
54 
55  // rid is bits 16-25 (10 bits number)
56  rid = rx_comp->index & (0xffffffff >> 6);
57  rid >>= 16;
58  if ((rid >= vd->num_rx_queues) && (rid < (vd->num_rx_queues << 1)))
59  return 1;
60  else
61  return 0;
62 }
63 
66 {
67  vmxnet3_rx_comp_ring *comp_ring = &rxq->rx_comp_ring;
68 
69  comp_ring->next++;
70  if (PREDICT_FALSE (comp_ring->next == rxq->size))
71  {
72  comp_ring->next = 0;
73  comp_ring->gen ^= VMXNET3_RXCF_GEN;
74  }
75 }
76 
78 vmxnet3_handle_offload (vmxnet3_rx_comp * rx_comp, vlib_buffer_t * hb,
79  u16 gso_size)
80 {
81  u8 l4_hdr_sz = 0;
82  vnet_buffer_oflags_t oflags = 0;
83 
84  if (rx_comp->flags & VMXNET3_RXCF_IP4)
85  {
86  ip4_header_t *ip4 = (ip4_header_t *) (hb->data +
87  sizeof (ethernet_header_t));
88 
89  vnet_buffer (hb)->l2_hdr_offset = 0;
90  vnet_buffer (hb)->l3_hdr_offset = sizeof (ethernet_header_t);
91  vnet_buffer (hb)->l4_hdr_offset = sizeof (ethernet_header_t) +
92  ip4_header_bytes (ip4);
93  hb->flags |= VNET_BUFFER_F_L2_HDR_OFFSET_VALID |
94  VNET_BUFFER_F_L3_HDR_OFFSET_VALID |
95  VNET_BUFFER_F_L4_HDR_OFFSET_VALID | VNET_BUFFER_F_IS_IP4;
96 
97  /* checksum offload */
98  if (!(rx_comp->index & VMXNET3_RXCI_CNC))
99  {
100  if (!(rx_comp->flags & VMXNET3_RXCF_IPC))
101  {
102  oflags |= VNET_BUFFER_OFFLOAD_F_IP_CKSUM;
103  ip4->checksum = 0;
104  }
105  if (!(rx_comp->flags & VMXNET3_RXCF_TUC))
106  {
107  if (rx_comp->flags & VMXNET3_RXCF_TCP)
108  {
109  tcp_header_t *tcp =
110  (tcp_header_t *) (hb->data +
111  vnet_buffer (hb)->l4_hdr_offset);
112  oflags |= VNET_BUFFER_OFFLOAD_F_TCP_CKSUM;
113  tcp->checksum = 0;
114  }
115  else if (rx_comp->flags & VMXNET3_RXCF_UDP)
116  {
117  udp_header_t *udp =
118  (udp_header_t *) (hb->data +
119  vnet_buffer (hb)->l4_hdr_offset);
120  oflags |= VNET_BUFFER_OFFLOAD_F_UDP_CKSUM;
121  udp->checksum = 0;
122  }
123  }
124  }
125 
126  if (gso_size)
127  {
128  if (rx_comp->flags & VMXNET3_RXCF_TCP)
129  {
130  tcp_header_t *tcp =
131  (tcp_header_t *) (hb->data + vnet_buffer (hb)->l4_hdr_offset);
132  l4_hdr_sz = tcp_header_bytes (tcp);
133  }
134  else if (rx_comp->flags & VMXNET3_RXCF_UDP)
135  {
136  udp_header_t *udp =
137  (udp_header_t *) (hb->data + vnet_buffer (hb)->l4_hdr_offset);
138  l4_hdr_sz = sizeof (*udp);
139  }
140  vnet_buffer2 (hb)->gso_size = gso_size;
141  vnet_buffer2 (hb)->gso_l4_hdr_sz = l4_hdr_sz;
142  hb->flags |= VNET_BUFFER_F_GSO;
143  }
144  }
145  else if (rx_comp->flags & VMXNET3_RXCF_IP6)
146  {
147  vnet_buffer (hb)->l2_hdr_offset = 0;
148  vnet_buffer (hb)->l3_hdr_offset = sizeof (ethernet_header_t);
149  vnet_buffer (hb)->l4_hdr_offset = sizeof (ethernet_header_t) +
150  sizeof (ip6_header_t);
151  hb->flags |= VNET_BUFFER_F_L2_HDR_OFFSET_VALID |
152  VNET_BUFFER_F_L3_HDR_OFFSET_VALID |
153  VNET_BUFFER_F_L4_HDR_OFFSET_VALID | VNET_BUFFER_F_IS_IP6;
154 
155  /* checksum offload */
156  if (!(rx_comp->index & VMXNET3_RXCI_CNC))
157  {
158  if (!(rx_comp->flags & VMXNET3_RXCF_TUC))
159  {
160  if (rx_comp->flags & VMXNET3_RXCF_TCP)
161  {
162  tcp_header_t *tcp =
163  (tcp_header_t *) (hb->data +
164  vnet_buffer (hb)->l4_hdr_offset);
165  oflags |= VNET_BUFFER_OFFLOAD_F_TCP_CKSUM;
166  tcp->checksum = 0;
167  }
168  else if (rx_comp->flags & VMXNET3_RXCF_UDP)
169  {
170  udp_header_t *udp =
171  (udp_header_t *) (hb->data +
172  vnet_buffer (hb)->l4_hdr_offset);
173  oflags |= VNET_BUFFER_OFFLOAD_F_UDP_CKSUM;
174  udp->checksum = 0;
175  }
176  }
177  }
178 
179  if (gso_size)
180  {
181  if (rx_comp->flags & VMXNET3_RXCF_TCP)
182  {
183  tcp_header_t *tcp =
184  (tcp_header_t *) (hb->data + vnet_buffer (hb)->l4_hdr_offset);
185  l4_hdr_sz = tcp_header_bytes (tcp);
186  }
187  else if (rx_comp->flags & VMXNET3_RXCF_UDP)
188  {
189  udp_header_t *udp =
190  (udp_header_t *) (hb->data + vnet_buffer (hb)->l4_hdr_offset);
191  l4_hdr_sz = sizeof (*udp);
192  }
193  vnet_buffer2 (hb)->gso_size = gso_size;
194  vnet_buffer2 (hb)->gso_l4_hdr_sz = l4_hdr_sz;
195  hb->flags |= VNET_BUFFER_F_GSO;
196  }
197  }
198  if (oflags)
199  vnet_buffer_offload_flags_set (hb, oflags);
200 }
201 
205  u16 qid)
206 {
207  vnet_main_t *vnm = vnet_get_main ();
208  uword n_trace = vlib_get_trace_count (vm, node);
209  u32 n_rx_packets = 0, n_rx_bytes = 0;
210  vmxnet3_rx_comp *rx_comp;
211  u32 desc_idx;
212  vmxnet3_rxq_t *rxq;
214  u32 buffer_indices[VLIB_FRAME_SIZE], *bi;
216  vmxnet3_rx_ring *ring;
217  vmxnet3_rx_comp_ring *comp_ring;
218  u16 rid;
219  vlib_buffer_t *prev_b0 = 0, *hb = 0;
221  u8 known_next = 0, got_packet = 0;
222  vmxnet3_rx_desc *rxd;
224  u16 gso_size = 0;
225 
226  rxq = vec_elt_at_index (vd->rxqs, qid);
227  comp_ring = &rxq->rx_comp_ring;
228  bi = buffer_indices;
229  next = nexts;
230  rx_comp = &rxq->rx_comp[comp_ring->next];
231 
232  while (PREDICT_TRUE ((n_rx_packets < VLIB_FRAME_SIZE) &&
233  (comp_ring->gen ==
234  (rx_comp->flags & VMXNET3_RXCF_GEN))))
235  {
236  vlib_buffer_t *b0;
237  u32 bi0;
238 
239  rid = vmxnet3_find_rid (vd, rx_comp);
240  ring = &rxq->rx_ring[rid];
241 
242  if (PREDICT_TRUE (ring->fill >= 1))
243  ring->fill--;
244  else
245  {
246  vlib_error_count (vm, node->node_index,
247  VMXNET3_INPUT_ERROR_NO_BUFFER, 1);
248  if (hb)
249  {
251  hb = 0;
252  }
253  prev_b0 = 0;
254  break;
255  }
256 
257  desc_idx = rx_comp->index & VMXNET3_RXC_INDEX;
258  ring->consume = desc_idx;
259  rxd = &rxq->rx_desc[rid][desc_idx];
260 
261  bi0 = ring->bufs[desc_idx];
262  ring->bufs[desc_idx] = ~0;
263 
264  b0 = vlib_get_buffer (vm, bi0);
265  vnet_buffer (b0)->sw_if_index[VLIB_RX] = vd->sw_if_index;
266  vnet_buffer (b0)->sw_if_index[VLIB_TX] = (u32) ~ 0;
267  vnet_buffer (b0)->feature_arc_index = 0;
268  b0->current_length = rx_comp->len & VMXNET3_RXCL_LEN_MASK;
269  b0->current_data = 0;
271  b0->next_buffer = 0;
272  b0->flags = 0;
273  b0->error = 0;
274  b0->current_config_index = 0;
275 
276  if (PREDICT_FALSE ((rx_comp->index & VMXNET3_RXCI_EOP) &&
277  (rx_comp->len & VMXNET3_RXCL_ERROR)))
278  {
279  vlib_buffer_free_one (vm, bi0);
280  vlib_error_count (vm, node->node_index,
281  VMXNET3_INPUT_ERROR_RX_PACKET_EOP, 1);
282  if (hb && vlib_get_buffer_index (vm, hb) != bi0)
283  {
285  hb = 0;
286  }
287  prev_b0 = 0;
288  goto next;
289  }
290 
291  if (rx_comp->index & VMXNET3_RXCI_SOP)
292  {
293  ASSERT (!(rxd->flags & VMXNET3_RXF_BTYPE));
294  /* start segment */
295  if (vd->gso_enable &&
296  (rx_comp->flags & VMXNET3_RXCF_CT) == VMXNET3_RXCOMP_TYPE_LRO)
297  {
298  vmxnet3_rx_comp_ext *lro = (vmxnet3_rx_comp_ext *) rx_comp;
299 
300  gso_size = lro->flags & VMXNET3_RXECF_MSS_MASK;
301  }
302 
303  hb = b0;
304  bi[0] = bi0;
305  if (!(rx_comp->index & VMXNET3_RXCI_EOP))
306  {
307  hb->flags = VLIB_BUFFER_TOTAL_LENGTH_VALID;
308  prev_b0 = b0;
309  }
310  else
311  {
312  /*
313  * Both start and end of packet is set. It is a complete packet
314  */
315  prev_b0 = 0;
316  got_packet = 1;
317  }
318  }
319  else if (rx_comp->index & VMXNET3_RXCI_EOP)
320  {
321  /* end of segment */
322  if (PREDICT_TRUE (prev_b0 != 0))
323  {
324  if (PREDICT_TRUE (b0->current_length != 0))
325  {
326  prev_b0->flags |= VLIB_BUFFER_NEXT_PRESENT;
327  prev_b0->next_buffer = bi0;
328  hb->total_length_not_including_first_buffer +=
329  b0->current_length;
330  }
331  else
332  {
333  vlib_buffer_free_one (vm, bi0);
334  }
335  prev_b0 = 0;
336  got_packet = 1;
337  }
338  else
339  {
340  /* EOP without SOP, error */
341  vlib_error_count (vm, node->node_index,
342  VMXNET3_INPUT_ERROR_RX_PACKET_NO_SOP, 1);
343  vlib_buffer_free_one (vm, bi0);
344  if (hb && vlib_get_buffer_index (vm, hb) != bi0)
345  {
347  hb = 0;
348  }
349  goto next;
350  }
351  }
352  else if (prev_b0) // !sop && !eop
353  {
354  /* mid chain */
355  ASSERT (rxd->flags & VMXNET3_RXF_BTYPE);
356  prev_b0->flags |= VLIB_BUFFER_NEXT_PRESENT;
357  prev_b0->next_buffer = bi0;
358  prev_b0 = b0;
360  }
361  else
362  {
363  vlib_error_count (vm, node->node_index,
364  VMXNET3_INPUT_ERROR_RX_PACKET, 1);
365  vlib_buffer_free_one (vm, bi0);
366  if (hb && vlib_get_buffer_index (vm, hb) != bi0)
367  {
369  hb = 0;
370  }
371  goto next;
372  }
373 
374  n_rx_bytes += b0->current_length;
375 
376  if (got_packet)
377  {
378  if (PREDICT_FALSE (vd->per_interface_next_index != ~0))
379  {
380  next_index = vd->per_interface_next_index;
381  known_next = 1;
382  }
383 
384  if (PREDICT_FALSE
386  {
388  &next_index, hb);
389  known_next = 1;
390  }
391 
392  if (PREDICT_FALSE (known_next))
393  next[0] = next_index;
394  else
395  {
396  ethernet_header_t *e = (ethernet_header_t *) hb->data;
397 
400  vmxnet3_handle_offload (rx_comp, hb, gso_size);
401  }
402 
403  n_rx_packets++;
404  next++;
405  bi++;
406  hb = 0;
407  got_packet = 0;
408  gso_size = 0;
409  }
410 
411  next:
413  rx_comp = &rxq->rx_comp[comp_ring->next];
414  }
415 
416  if (PREDICT_FALSE ((n_trace = vlib_get_trace_count (vm, node))))
417  {
418  u32 n_left = n_rx_packets;
419 
420  bi = buffer_indices;
421  next = nexts;
422  while (n_trace && n_left)
423  {
424  vlib_buffer_t *b = vlib_get_buffer (vm, bi[0]);
425  if (PREDICT_TRUE
427  (vm, node, next[0], b, /* follow_chain */ 0)))
428  {
430  vlib_add_trace (vm, node, b, sizeof (*tr));
431  tr->next_index = next[0];
432  tr->hw_if_index = vd->hw_if_index;
433  tr->buffer = *b;
434  n_trace--;
435  }
436  n_left--;
437  bi++;
438  next++;
439  }
440  vlib_set_trace_count (vm, node, n_trace);
441  }
442 
443  if (PREDICT_TRUE (n_rx_packets))
444  {
445  vlib_buffer_enqueue_to_next (vm, node, buffer_indices, nexts,
446  n_rx_packets);
449  VNET_INTERFACE_COUNTER_RX, thread_index,
450  vd->sw_if_index, n_rx_packets, n_rx_bytes);
451  }
452 
453  error = vmxnet3_rxq_refill_ring0 (vm, vd, rxq);
454  if (PREDICT_FALSE (error != 0))
455  {
456  vlib_error_count (vm, node->node_index,
457  VMXNET3_INPUT_ERROR_BUFFER_ALLOC, 1);
458  }
459  error = vmxnet3_rxq_refill_ring1 (vm, vd, rxq);
460  if (PREDICT_FALSE (error != 0))
461  {
462  vlib_error_count (vm, node->node_index,
463  VMXNET3_INPUT_ERROR_BUFFER_ALLOC, 1);
464  }
465 
466  return n_rx_packets;
467 }
468 
472 {
473  u32 n_rx = 0;
474  vmxnet3_main_t *vmxm = &vmxnet3_main;
477 
478  vec_foreach (pve, pv)
479  {
480  vmxnet3_device_t *vd;
481  vd = vec_elt_at_index (vmxm->devices, pve->dev_instance);
482  if ((vd->flags & VMXNET3_DEVICE_F_ADMIN_UP) == 0)
483  continue;
484  n_rx += vmxnet3_device_input_inline (vm, node, frame, vd, pve->queue_id);
485  }
486  return n_rx;
487 }
488 
489 #ifndef CLIB_MARCH_VARIANT
490 /* *INDENT-OFF* */
492  .name = "vmxnet3-input",
493  .sibling_of = "device-input",
495  .format_trace = format_vmxnet3_input_trace,
496  .type = VLIB_NODE_TYPE_INPUT,
497  .state = VLIB_NODE_STATE_DISABLED,
498  .n_errors = VMXNET3_INPUT_N_ERROR,
499  .error_strings = vmxnet3_input_error_strings,
500 };
501 #endif
502 
503 /* *INDENT-ON* */
504 
505 /*
506  * fd.io coding-style-patch-verification: ON
507  *
508  * Local Variables:
509  * eval: (c-set-style "gnu")
510  * End:
511  */
vnet_buffer_oflags_t
Definition: buffer.h:118
u32 flags
buffer flags: VLIB_BUFFER_FREE_LIST_INDEX_MASK: bits used to store free list index, VLIB_BUFFER_IS_TRACED: trace this buffer.
Definition: buffer.h:133
static_always_inline clib_error_t * vmxnet3_rxq_refill_ring0(vlib_main_t *vm, vmxnet3_device_t *vd, vmxnet3_rxq_t *rxq)
Definition: vmxnet3.h:689
static u32 vlib_get_trace_count(vlib_main_t *vm, vlib_node_runtime_t *rt)
Definition: trace_funcs.h:212
#define VMXNET3_RXCF_TCP
Definition: vmxnet3.h:124
#define ntohs(x)
Definition: af_xdp.bpf.c:29
#define vnet_buffer2(b)
Definition: buffer.h:499
vmxnet3_rx_desc * rx_desc[VMXNET3_RX_RING_SIZE]
Definition: vmxnet3.h:521
vnet_interface_main_t interface_main
Definition: vnet.h:81
u32 thread_index
#define PREDICT_TRUE(x)
Definition: clib.h:125
i16 current_data
signed offset in data[], pre_data[] that we are currently processing.
Definition: buffer.h:119
static void vlib_error_count(vlib_main_t *vm, uword node_index, uword counter, uword increment)
Definition: error_funcs.h:57
u16 nexts[VLIB_FRAME_SIZE]
vlib_increment_combined_counter(ccm, ti, sw_if_index, n_buffers, n_bytes)
#define VLIB_NODE_FLAG_TRACE_SUPPORTED
Definition: node.h:296
#define VMXNET3_RXCF_IP4
Definition: vmxnet3.h:127
static_always_inline vnet_hw_if_rxq_poll_vector_t * vnet_hw_if_get_rxq_poll_vector(vlib_main_t *vm, vlib_node_runtime_t *node)
u32 thread_index
Definition: main.h:213
u16 current_length
Nbytes between current data and the end of this buffer.
Definition: buffer.h:122
vlib_main_t vlib_node_runtime_t vlib_frame_t * frame
Definition: nat44_ei.c:3048
vmxnet3_main_t vmxnet3_main
Definition: vmxnet3.c:28
#define VLIB_NODE_FN(node)
Definition: node.h:202
struct _tcp_header tcp_header_t
unsigned char u8
Definition: types.h:56
vlib_buffer_t ** b
#define VMXNET3_RXCF_GEN
Definition: vmxnet3.h:129
unsigned int u32
Definition: types.h:88
#define VMXNET3_RXCF_IPC
Definition: vmxnet3.h:125
vmxnet3_rxq_t * rxqs
Definition: vmxnet3.h:570
static __clib_unused char * vmxnet3_input_error_strings[]
Definition: input.c:44
if(node->flags &VLIB_NODE_FLAG_TRACE) vnet_interface_output_trace(vm
#define static_always_inline
Definition: clib.h:112
vlib_combined_counter_main_t * combined_sw_if_counters
Definition: interface.h:1023
#define VMXNET3_RXCF_UDP
Definition: vmxnet3.h:123
static_always_inline int vnet_device_input_have_features(u32 sw_if_index)
Definition: feature.h:336
#define vec_elt_at_index(v, i)
Get vector value at index i checking that i is in bounds.
#define VMXNET3_RXC_INDEX
Definition: vmxnet3.h:131
vnet_main_t * vnet_get_main(void)
#define VMXNET3_RXCL_LEN_MASK
Definition: vmxnet3.h:175
vlib_buffer_enqueue_to_next(vm, node, from,(u16 *) nexts, frame->n_vectors)
#define VLIB_FRAME_SIZE
Definition: node.h:369
static u32 vlib_get_buffer_index(vlib_main_t *vm, void *p)
Translate buffer pointer into buffer index.
Definition: buffer_funcs.h:324
#define VMXNET3_RXCF_CT
Definition: vmxnet3.h:128
vlib_error_t error
Error code for buffers to be enqueued to error handler.
Definition: buffer.h:145
#define VMXNET3_RXECF_MSS_MASK
Definition: vmxnet3.h:185
Definition: cJSON.c:88
u16 * next
#define VMXNET3_RXCF_TUC
Definition: vmxnet3.h:122
static __clib_warn_unused_result int vlib_trace_buffer(vlib_main_t *vm, vlib_node_runtime_t *r, u32 next_index, vlib_buffer_t *b, int follow_chain)
Definition: trace_funcs.h:153
static_always_inline void vmxnet3_rx_comp_ring_advance_next(vmxnet3_rxq_t *rxq)
Definition: input.c:65
unsigned short u16
Definition: types.h:57
vmxnet3_rx_comp_ring rx_comp_ring
Definition: vmxnet3.h:523
#define PREDICT_FALSE(x)
Definition: clib.h:124
vl_api_ip4_address_t ip4
Definition: one.api:376
vlib_main_t * vm
X-connect all packets from the HOST to the PHY.
Definition: nat44_ei.c:3047
u32 node_index
Node index.
Definition: node.h:479
u32 n_left
#define VLIB_REGISTER_NODE(x,...)
Definition: node.h:169
vlib_buffer_t buffer
Definition: vmxnet3.h:620
u8 data[]
Packet data.
Definition: buffer.h:204
u32 per_interface_next_index
Definition: vmxnet3.h:559
u32 current_config_index
Used by feature subgraph arcs to visit enabled feature nodes.
Definition: buffer.h:156
#define ASSERT(truth)
static_always_inline int ethernet_frame_is_tagged(u16 type)
Definition: ethernet.h:78
#define VMXNET3_RXCI_EOP
Definition: vmxnet3.h:178
#define foreach_vmxnet3_input_error
Definition: input.c:29
nat44_ei_hairpin_src_next_t next_index
static_always_inline uword vmxnet3_device_input_inline(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame, vmxnet3_device_t *vd, u16 qid)
Definition: input.c:203
vlib_node_registration_t vmxnet3_input_node
(constructor) VLIB_REGISTER_NODE (vmxnet3_input_node)
Definition: input.c:491
Definition: defs.h:47
#define VMXNET3_RXCL_ERROR
Definition: vmxnet3.h:176
vlib_main_t vlib_node_runtime_t * node
Definition: nat44_ei.c:3047
u32 next_buffer
Next buffer for this linked-list of buffers.
Definition: buffer.h:149
VLIB buffer representation.
Definition: buffer.h:111
u64 uword
Definition: types.h:112
#define VMXNET3_RXF_BTYPE
Definition: vmxnet3.h:118
static_always_inline void vmxnet3_handle_offload(vmxnet3_rx_comp *rx_comp, vlib_buffer_t *hb, u16 gso_size)
Definition: input.c:78
vmxnet3_rx_comp * rx_comp
Definition: vmxnet3.h:522
#define VMXNET3_RXCI_CNC
Definition: vmxnet3.h:180
vmxnet3_rx_ring rx_ring[VMXNET3_RX_RING_SIZE]
Definition: vmxnet3.h:520
vmxnet3_input_error_t
Definition: input.c:36
format_function_t format_vmxnet3_input_trace
Definition: vmxnet3.h:633
#define vnet_buffer(b)
Definition: buffer.h:437
#define VMXNET3_RXCF_IP6
Definition: vmxnet3.h:126
static_always_inline void vnet_feature_start_device_input_x1(u32 sw_if_index, u32 *next0, vlib_buffer_t *b0)
Definition: feature.h:343
static int tcp_header_bytes(tcp_header_t *t)
Definition: tcp_packet.h:93
vmxnet3_device_t * devices
Definition: vmxnet3.h:593
#define vec_foreach(var, vec)
Vector iterator.
static_always_inline void vnet_buffer_offload_flags_set(vlib_buffer_t *b, vnet_buffer_oflags_t oflags)
Definition: buffer.h:522
static_always_inline clib_error_t * vmxnet3_rxq_refill_ring1(vlib_main_t *vm, vmxnet3_device_t *vd, vmxnet3_rxq_t *rxq)
Definition: vmxnet3.h:735
void * vlib_add_trace(vlib_main_t *vm, vlib_node_runtime_t *r, vlib_buffer_t *b, u32 n_data_bytes)
Definition: trace.c:628
#define VMXNET3_RXCI_SOP
Definition: vmxnet3.h:179
static void vlib_buffer_free_one(vlib_main_t *vm, u32 buffer_index)
Free one buffer Shorthand to free a single buffer chain.
static int ip4_header_bytes(const ip4_header_t *i)
Definition: ip4_packet.h:190
static void vlib_set_trace_count(vlib_main_t *vm, vlib_node_runtime_t *rt, u32 count)
Definition: trace_funcs.h:226
u32 total_length_not_including_first_buffer
Only valid for first buffer in chain.
Definition: buffer.h:176
#define VMXNET3_RXCOMP_TYPE_LRO
Definition: vmxnet3.h:183
static vlib_buffer_t * vlib_get_buffer(vlib_main_t *vm, u32 buffer_index)
Translate buffer index into buffer pointer.
Definition: buffer_funcs.h:111
Definition: defs.h:46
static_always_inline u16 vmxnet3_find_rid(vmxnet3_device_t *vd, vmxnet3_rx_comp *rx_comp)
Definition: input.c:51