FD.io VPP  v21.06
Vector Packet Processing
ixge.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2016 Cisco and/or its affiliates.
3  * Licensed under the Apache License, Version 2.0 (the "License");
4  * you may not use this file except in compliance with the License.
5  * You may obtain a copy of the License at:
6  *
7  * http://www.apache.org/licenses/LICENSE-2.0
8  *
9  * Unless required by applicable law or agreed to in writing, software
10  * distributed under the License is distributed on an "AS IS" BASIS,
11  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12  * See the License for the specific language governing permissions and
13  * limitations under the License.
14  */
15 
16 /*
17  * WARNING!
18  * This driver is not intended for production use and it is unsupported.
19  * It is provided for educational use only.
20  * Please use supported DPDK driver instead.
21  */
22 
23 #if __x86_64__ || __i386__ || __aarch64__
24 #include <vppinfra/vector.h>
25 
26 #ifndef CLIB_HAVE_VEC128
27 #warning HACK: ixge driver wont really work, missing u32x4
28 typedef unsigned long long u32x4;
29 #endif
30 
31 #include <vlib/vlib.h>
32 #include <vlib/unix/unix.h>
33 #include <vlib/pci/pci.h>
34 #include <vnet/vnet.h>
35 #include <ixge/ixge.h>
36 #include <vnet/ethernet/ethernet.h>
37 #include <vnet/plugin/plugin.h>
38 #include <vpp/app/version.h>
39 
40 #define IXGE_ALWAYS_POLL 0
41 
42 #define EVENT_SET_FLAGS 0
43 #define IXGE_HWBP_RACE_ELOG 0
44 
45 #define PCI_VENDOR_ID_INTEL 0x8086
46 
47 /* 10 GIG E (XGE) PHY IEEE 802.3 clause 45 definitions. */
48 #define XGE_PHY_DEV_TYPE_PMA_PMD 1
49 #define XGE_PHY_DEV_TYPE_PHY_XS 4
50 #define XGE_PHY_ID1 0x2
51 #define XGE_PHY_ID2 0x3
52 #define XGE_PHY_CONTROL 0x0
53 #define XGE_PHY_CONTROL_RESET (1 << 15)
54 
58 
59 static void
61 {
62  ixge_main_t *xm = &ixge_main;
63  vlib_main_t *vm = xm->vlib_main;
64  ixge_regs_t *r = xd->regs;
65  u32 i;
66 
67  i = 0;
68  while (!(r->software_semaphore & (1 << 0)))
69  {
70  if (i > 0)
71  vlib_process_suspend (vm, 100e-6);
72  i++;
73  }
74  do
75  {
76  r->software_semaphore |= 1 << 1;
77  }
78  while (!(r->software_semaphore & (1 << 1)));
79 }
80 
81 static void
83 {
84  ixge_regs_t *r = xd->regs;
85  r->software_semaphore &= ~3;
86 }
87 
88 static void
90 {
91  ixge_main_t *xm = &ixge_main;
92  vlib_main_t *vm = xm->vlib_main;
93  ixge_regs_t *r = xd->regs;
94  u32 fw_mask = sw_mask << 5;
95  u32 m, done = 0;
96 
97  while (!done)
98  {
99  ixge_semaphore_get (xd);
100  m = r->software_firmware_sync;
101  done = (m & fw_mask) == 0;
102  if (done)
103  r->software_firmware_sync = m | sw_mask;
105  if (!done)
106  vlib_process_suspend (vm, 10e-3);
107  }
108 }
109 
110 static void
112 {
113  ixge_regs_t *r = xd->regs;
114  ixge_semaphore_get (xd);
115  r->software_firmware_sync &= ~sw_mask;
117 }
118 
119 u32
120 ixge_read_write_phy_reg (ixge_device_t * xd, u32 dev_type, u32 reg_index,
121  u32 v, u32 is_read)
122 {
123  ixge_regs_t *r = xd->regs;
124  const u32 busy_bit = 1 << 30;
125  u32 x;
126 
127  ASSERT (xd->phy_index < 2);
128  ixge_software_firmware_sync (xd, 1 << (1 + xd->phy_index));
129 
130  ASSERT (reg_index < (1 << 16));
131  ASSERT (dev_type < (1 << 5));
132  if (!is_read)
133  r->xge_mac.phy_data = v;
134 
135  /* Address cycle. */
136  x =
137  reg_index | (dev_type << 16) | (xd->
138  phys[xd->phy_index].mdio_address << 21);
139  r->xge_mac.phy_command = x | busy_bit;
140  /* Busy wait timed to take 28e-6 secs. No suspend. */
141  while (r->xge_mac.phy_command & busy_bit)
142  ;
143 
144  r->xge_mac.phy_command = x | ((is_read ? 2 : 1) << 26) | busy_bit;
145  while (r->xge_mac.phy_command & busy_bit)
146  ;
147 
148  if (is_read)
149  v = r->xge_mac.phy_data >> 16;
150 
151  ixge_software_firmware_sync_release (xd, 1 << (1 + xd->phy_index));
152 
153  return v;
154 }
155 
156 static u32
157 ixge_read_phy_reg (ixge_device_t * xd, u32 dev_type, u32 reg_index)
158 {
159  return ixge_read_write_phy_reg (xd, dev_type, reg_index, 0, /* is_read */
160  1);
161 }
162 
163 static void
164 ixge_write_phy_reg (ixge_device_t * xd, u32 dev_type, u32 reg_index, u32 v)
165 {
166  (void) ixge_read_write_phy_reg (xd, dev_type, reg_index, v, /* is_read */
167  0);
168 }
169 
170 static void
171 ixge_i2c_put_bits (i2c_bus_t * b, int scl, int sda)
172 {
173  ixge_main_t *xm = &ixge_main;
175  u32 v;
176 
177  v = 0;
178  v |= (sda != 0) << 3;
179  v |= (scl != 0) << 1;
180  xd->regs->i2c_control = v;
181 }
182 
183 static void
184 ixge_i2c_get_bits (i2c_bus_t * b, int *scl, int *sda)
185 {
186  ixge_main_t *xm = &ixge_main;
188  u32 v;
189 
190  v = xd->regs->i2c_control;
191  *sda = (v & (1 << 2)) != 0;
192  *scl = (v & (1 << 0)) != 0;
193 }
194 
195 static u16
197 {
198  ixge_regs_t *r = xd->regs;
199  u32 v;
200  r->eeprom_read = (( /* start bit */ (1 << 0)) | (address << 2));
201  /* Wait for done bit. */
202  while (!((v = r->eeprom_read) & (1 << 1)))
203  ;
204  return v >> 16;
205 }
206 
207 static void
209 {
210  u32 tx_disable_bit = 1 << 3;
211  if (enable)
212  xd->regs->sdp_control &= ~tx_disable_bit;
213  else
214  xd->regs->sdp_control |= tx_disable_bit;
215 }
216 
217 static void
219 {
220  u32 is_10g_bit = 1 << 5;
221  if (enable)
222  xd->regs->sdp_control |= is_10g_bit;
223  else
224  xd->regs->sdp_control &= ~is_10g_bit;
225 }
226 
227 static clib_error_t *
229 {
230  u16 a, id, reg_values_addr = 0;
231 
232  a = ixge_read_eeprom (xd, 0x2b);
233  if (a == 0 || a == 0xffff)
234  return clib_error_create ("no init sequence in eeprom");
235 
236  while (1)
237  {
238  id = ixge_read_eeprom (xd, ++a);
239  if (id == 0xffff)
240  break;
241  reg_values_addr = ixge_read_eeprom (xd, ++a);
242  if (id == sfp_type)
243  break;
244  }
245  if (id != sfp_type)
246  return clib_error_create ("failed to find id 0x%x", sfp_type);
247 
248  ixge_software_firmware_sync (xd, 1 << 3);
249  while (1)
250  {
251  u16 v = ixge_read_eeprom (xd, ++reg_values_addr);
252  if (v == 0xffff)
253  break;
254  xd->regs->core_analog_config = v;
255  }
257 
258  /* Make sure laser is off. We'll turn on the laser when
259  the interface is brought up. */
260  ixge_sfp_enable_disable_laser (xd, /* enable */ 0);
261  ixge_sfp_enable_disable_10g (xd, /* is_10g */ 1);
262 
263  return 0;
264 }
265 
266 static void
268 {
269  u32 v;
270 
271  if (is_up)
272  {
273  /* pma/pmd 10g serial SFI. */
274  xd->regs->xge_mac.auto_negotiation_control2 &= ~(3 << 16);
275  xd->regs->xge_mac.auto_negotiation_control2 |= 2 << 16;
276 
278  v &= ~(7 << 13);
279  v |= (0 << 13);
280  /* Restart autoneg. */
281  v |= (1 << 12);
283 
284  while (!(xd->regs->xge_mac.link_partner_ability[0] & 0xf0000))
285  ;
286 
288 
289  /* link mode 10g sfi serdes */
290  v &= ~(7 << 13);
291  v |= (3 << 13);
292 
293  /* Restart autoneg. */
294  v |= (1 << 12);
296 
297  xd->regs->xge_mac.link_status;
298  }
299 
300  ixge_sfp_enable_disable_laser (xd, /* enable */ is_up);
301 
302  /* Give time for link partner to notice that we're up. */
303  if (is_up && vlib_in_process_context (vlib_get_main ()))
304  {
305  vlib_process_suspend (vlib_get_main (), 300e-3);
306  }
307 }
308 
311 {
312  ixge_regs_t *r = xd->regs;
313  ASSERT (qi < 128);
314  if (rt == VLIB_RX)
315  return qi < 64 ? &r->rx_dma0[qi] : &r->rx_dma1[qi - 64];
316  else
317  return &r->tx_dma[qi];
318 }
319 
320 static clib_error_t *
322 {
323  vnet_hw_interface_t *hif = vnet_get_hw_interface (vnm, hw_if_index);
324  uword is_up = (flags & VNET_SW_INTERFACE_FLAG_ADMIN_UP) != 0;
325  ixge_main_t *xm = &ixge_main;
327  ixge_dma_regs_t *dr = get_dma_regs (xd, VLIB_RX, 0);
328 
329  if (is_up)
330  {
331  xd->regs->rx_enable |= 1;
332  xd->regs->tx_dma_control |= 1;
333  dr->control |= 1 << 25;
334  while (!(dr->control & (1 << 25)))
335  ;
336  }
337  else
338  {
339  xd->regs->rx_enable &= ~1;
340  xd->regs->tx_dma_control &= ~1;
341  }
342 
343  ixge_sfp_device_up_down (xd, is_up);
344 
345  return /* no error */ 0;
346 }
347 
348 static void
350 {
351  ixge_phy_t *phy = xd->phys + xd->phy_index;
352  i2c_bus_t *ib = &xd->i2c_bus;
353 
354  ib->private_data = xd->device_index;
357  vlib_i2c_init (ib);
358 
359  vlib_i2c_read_eeprom (ib, 0x50, 0, 128, (u8 *) & xd->sfp_eeprom);
360 
362  xd->sfp_eeprom.id = SFP_ID_UNKNOWN;
363  else
364  {
365  /* FIXME 5 => SR/LR eeprom ID. */
366  clib_error_t *e =
368  if (e)
369  clib_error_report (e);
370  }
371 
372  phy->mdio_address = ~0;
373 }
374 
375 static void
377 {
378  ixge_main_t *xm = &ixge_main;
379  vlib_main_t *vm = xm->vlib_main;
380  ixge_phy_t *phy = xd->phys + xd->phy_index;
381 
382  switch (xd->device_id)
383  {
384  case IXGE_82599_sfp:
385  case IXGE_82599_sfp_em:
386  case IXGE_82599_sfp_fcoe:
387  /* others? */
388  return ixge_sfp_phy_init (xd);
389 
390  default:
391  break;
392  }
393 
394  /* Probe address of phy. */
395  {
396  u32 i, v;
397 
398  phy->mdio_address = ~0;
399  for (i = 0; i < 32; i++)
400  {
401  phy->mdio_address = i;
403  if (v != 0xffff && v != 0)
404  break;
405  }
406 
407  /* No PHY found? */
408  if (i >= 32)
409  return;
410  }
411 
412  phy->id =
415 
416  {
417  ELOG_TYPE_DECLARE (e) =
418  {
419  .function = (char *) __FUNCTION__,.format =
420  "ixge %d, phy id 0x%d mdio address %d",.format_args = "i4i4i4",};
421  struct
422  {
424  } *ed;
425  ed = ELOG_DATA (&vm->elog_main, e);
426  ed->instance = xd->device_index;
427  ed->id = phy->id;
428  ed->address = phy->mdio_address;
429  }
430 
431  /* Reset phy. */
434 
435  /* Wait for self-clearning reset bit to clear. */
436  do
437  {
438  vlib_process_suspend (vm, 1e-3);
439  }
442 }
443 
444 static u8 *
446 {
448  va_arg (*va, ixge_rx_from_hw_descriptor_t *);
449  u32 s0 = d->status[0], s2 = d->status[2];
450  u32 is_ip4, is_ip6, is_ip, is_tcp, is_udp;
451  u32 indent = format_get_indent (s);
452 
453  s = format (s, "%s-owned",
455  "hw");
456  s =
457  format (s, ", length this descriptor %d, l3 offset %d",
461  s = format (s, ", end-of-packet");
462 
463  s = format (s, "\n%U", format_white_space, indent);
464 
466  s = format (s, "layer2 error");
467 
469  {
470  s = format (s, "layer 2 type %d", (s0 & 0x1f));
471  return s;
472  }
473 
475  s = format (s, "vlan header 0x%x\n%U", d->vlan_tag,
476  format_white_space, indent);
477 
478  if ((is_ip4 = (s0 & IXGE_RX_DESCRIPTOR_STATUS0_IS_IP4)))
479  {
480  s = format (s, "ip4%s",
481  (s0 & IXGE_RX_DESCRIPTOR_STATUS0_IS_IP4_EXT) ? " options" :
482  "");
484  s = format (s, " checksum %s",
486  "bad" : "ok");
487  }
488  if ((is_ip6 = (s0 & IXGE_RX_DESCRIPTOR_STATUS0_IS_IP6)))
489  s = format (s, "ip6%s",
490  (s0 & IXGE_RX_DESCRIPTOR_STATUS0_IS_IP6_EXT) ? " extended" :
491  "");
492  is_tcp = is_udp = 0;
493  if ((is_ip = (is_ip4 | is_ip6)))
494  {
495  is_tcp = (s0 & IXGE_RX_DESCRIPTOR_STATUS0_IS_TCP) != 0;
496  is_udp = (s0 & IXGE_RX_DESCRIPTOR_STATUS0_IS_UDP) != 0;
497  if (is_tcp)
498  s = format (s, ", tcp");
499  if (is_udp)
500  s = format (s, ", udp");
501  }
502 
504  s = format (s, ", tcp checksum %s",
506  "ok");
508  s = format (s, ", udp checksum %s",
510  "ok");
511 
512  return s;
513 }
514 
515 static u8 *
516 format_ixge_tx_descriptor (u8 * s, va_list * va)
517 {
518  ixge_tx_descriptor_t *d = va_arg (*va, ixge_tx_descriptor_t *);
519  u32 s0 = d->status0, s1 = d->status1;
520  u32 indent = format_get_indent (s);
521  u32 v;
522 
523  s = format (s, "buffer 0x%Lx, %d packet bytes, %d bytes this buffer",
524  d->buffer_address, s1 >> 14, d->n_bytes_this_buffer);
525 
526  s = format (s, "\n%U", format_white_space, indent);
527 
528  if ((v = (s0 >> 0) & 3))
529  s = format (s, "reserved 0x%x, ", v);
530 
531  if ((v = (s0 >> 2) & 3))
532  s = format (s, "mac 0x%x, ", v);
533 
534  if ((v = (s0 >> 4) & 0xf) != 3)
535  s = format (s, "type 0x%x, ", v);
536 
537  s = format (s, "%s%s%s%s%s%s%s%s",
538  (s0 & (1 << 8)) ? "eop, " : "",
539  (s0 & (1 << 9)) ? "insert-fcs, " : "",
540  (s0 & (1 << 10)) ? "reserved26, " : "",
541  (s0 & (1 << 11)) ? "report-status, " : "",
542  (s0 & (1 << 12)) ? "reserved28, " : "",
543  (s0 & (1 << 13)) ? "is-advanced, " : "",
544  (s0 & (1 << 14)) ? "vlan-enable, " : "",
545  (s0 & (1 << 15)) ? "tx-segmentation, " : "");
546 
547  if ((v = s1 & 0xf) != 0)
548  s = format (s, "status 0x%x, ", v);
549 
550  if ((v = (s1 >> 4) & 0xf))
551  s = format (s, "context 0x%x, ", v);
552 
553  if ((v = (s1 >> 8) & 0x3f))
554  s = format (s, "options 0x%x, ", v);
555 
556  return s;
557 }
558 
559 typedef struct
560 {
562 
564 
566 
568 
570 
571  /* Copy of VLIB buffer; packet data stored in pre_data. */
574 
575 static u8 *
576 format_ixge_rx_dma_trace (u8 * s, va_list * va)
577 {
578  CLIB_UNUSED (vlib_main_t * vm) = va_arg (*va, vlib_main_t *);
579  vlib_node_t *node = va_arg (*va, vlib_node_t *);
580  vnet_main_t *vnm = vnet_get_main ();
581  ixge_rx_dma_trace_t *t = va_arg (*va, ixge_rx_dma_trace_t *);
582  ixge_main_t *xm = &ixge_main;
585  u32 indent = format_get_indent (s);
586 
587  {
588  vnet_sw_interface_t *sw =
590  s =
591  format (s, "%U rx queue %d", format_vnet_sw_interface_name, vnm, sw,
592  t->queue_index);
593  }
594 
595  s = format (s, "\n%Ubefore: %U",
596  format_white_space, indent,
598  s = format (s, "\n%Uafter : head/tail address 0x%Lx/0x%Lx",
599  format_white_space, indent,
601 
602  s = format (s, "\n%Ubuffer 0x%x: %U",
603  format_white_space, indent,
605 
606  s = format (s, "\n%U", format_white_space, indent);
607 
608  f = node->format_buffer;
609  if (!f || !t->is_start_of_packet)
610  f = format_hex_bytes;
611  s = format (s, "%U", f, t->buffer.pre_data, sizeof (t->buffer.pre_data));
612 
613  return s;
614 }
615 
616 #define foreach_ixge_error \
617  _ (none, "no error") \
618  _ (tx_full_drops, "tx ring full drops") \
619  _ (ip4_checksum_error, "ip4 checksum errors") \
620  _ (rx_alloc_fail, "rx buf alloc from free list failed") \
621  _ (rx_alloc_no_physmem, "rx buf alloc failed no physmem")
622 
623 typedef enum
624 {
625 #define _(f,s) IXGE_ERROR_##f,
627 #undef _
629 } ixge_error_t;
630 
631 always_inline void
633  u32 s00, u32 s02,
634  u8 * next0, u8 * error0, u32 * flags0)
635 {
636  u8 is0_ip4, is0_ip6, n0, e0;
637  u32 f0;
638 
639  e0 = IXGE_ERROR_none;
641 
643  n0 = is0_ip4 ? IXGE_RX_NEXT_IP4_INPUT : n0;
644 
645  e0 = (is0_ip4 && (s02 & IXGE_RX_DESCRIPTOR_STATUS2_IP4_CHECKSUM_ERROR)
646  ? IXGE_ERROR_ip4_checksum_error : e0);
647 
648  is0_ip6 = s00 & IXGE_RX_DESCRIPTOR_STATUS0_IS_IP6;
649  n0 = is0_ip6 ? IXGE_RX_NEXT_IP6_INPUT : n0;
650 
651  n0 = (xd->per_interface_next_index != ~0) ?
652  xd->per_interface_next_index : n0;
653 
654  /* Check for error. */
655  n0 = e0 != IXGE_ERROR_none ? IXGE_RX_NEXT_DROP : n0;
656 
659  ? VNET_BUFFER_F_L4_CHECKSUM_COMPUTED : 0);
660 
663  ? 0 : VNET_BUFFER_F_L4_CHECKSUM_CORRECT);
664 
665  *error0 = e0;
666  *next0 = n0;
667  *flags0 = f0;
668 }
669 
670 always_inline void
672  u32 s00, u32 s02,
673  u32 s10, u32 s12,
674  u8 * next0, u8 * error0, u32 * flags0,
675  u8 * next1, u8 * error1, u32 * flags1)
676 {
677  u8 is0_ip4, is0_ip6, n0, e0;
678  u8 is1_ip4, is1_ip6, n1, e1;
679  u32 f0, f1;
680 
681  e0 = e1 = IXGE_ERROR_none;
682  n0 = n1 = IXGE_RX_NEXT_IP4_INPUT;
683 
686 
687  n0 = is0_ip4 ? IXGE_RX_NEXT_IP4_INPUT : n0;
688  n1 = is1_ip4 ? IXGE_RX_NEXT_IP4_INPUT : n1;
689 
690  e0 = (is0_ip4 && (s02 & IXGE_RX_DESCRIPTOR_STATUS2_IP4_CHECKSUM_ERROR)
691  ? IXGE_ERROR_ip4_checksum_error : e0);
692  e1 = (is1_ip4 && (s12 & IXGE_RX_DESCRIPTOR_STATUS2_IP4_CHECKSUM_ERROR)
693  ? IXGE_ERROR_ip4_checksum_error : e1);
694 
695  is0_ip6 = s00 & IXGE_RX_DESCRIPTOR_STATUS0_IS_IP6;
696  is1_ip6 = s10 & IXGE_RX_DESCRIPTOR_STATUS0_IS_IP6;
697 
698  n0 = is0_ip6 ? IXGE_RX_NEXT_IP6_INPUT : n0;
699  n1 = is1_ip6 ? IXGE_RX_NEXT_IP6_INPUT : n1;
700 
701  n0 = (xd->per_interface_next_index != ~0) ?
702  xd->per_interface_next_index : n0;
703  n1 = (xd->per_interface_next_index != ~0) ?
704  xd->per_interface_next_index : n1;
705 
706  /* Check for error. */
707  n0 = e0 != IXGE_ERROR_none ? IXGE_RX_NEXT_DROP : n0;
708  n1 = e1 != IXGE_ERROR_none ? IXGE_RX_NEXT_DROP : n1;
709 
710  *error0 = e0;
711  *error1 = e1;
712 
713  *next0 = n0;
714  *next1 = n1;
715 
718  ? VNET_BUFFER_F_L4_CHECKSUM_COMPUTED : 0);
721  ? VNET_BUFFER_F_L4_CHECKSUM_COMPUTED : 0);
722 
725  ? 0 : VNET_BUFFER_F_L4_CHECKSUM_CORRECT);
728  ? 0 : VNET_BUFFER_F_L4_CHECKSUM_CORRECT);
729 
730  *flags0 = f0;
731  *flags1 = f1;
732 }
733 
734 static void
736  ixge_device_t * xd,
737  ixge_dma_queue_t * dq,
738  ixge_descriptor_t * before_descriptors,
739  u32 * before_buffers,
740  ixge_descriptor_t * after_descriptors, uword n_descriptors)
741 {
742  vlib_main_t *vm = xm->vlib_main;
743  vlib_node_runtime_t *node = dq->rx.node;
746  u32 *b, n_left, is_sop, next_index_sop;
747 
748  n_left = n_descriptors;
749  b = before_buffers;
750  bd = &before_descriptors->rx_from_hw;
751  ad = &after_descriptors->rx_to_hw;
752  is_sop = dq->rx.is_start_of_packet;
753  next_index_sop = dq->rx.saved_start_of_packet_next_index;
754 
755  while (n_left >= 2)
756  {
757  u32 bi0, bi1, flags0, flags1;
758  vlib_buffer_t *b0, *b1;
759  ixge_rx_dma_trace_t *t0, *t1;
760  u8 next0, error0, next1, error1;
761 
762  bi0 = b[0];
763  bi1 = b[1];
764  n_left -= 2;
765 
766  b0 = vlib_get_buffer (vm, bi0);
767  b1 = vlib_get_buffer (vm, bi1);
768 
770  bd[0].status[0], bd[0].status[2],
771  bd[1].status[0], bd[1].status[2],
772  &next0, &error0, &flags0,
773  &next1, &error1, &flags1);
774 
775  next_index_sop = is_sop ? next0 : next_index_sop;
776  vlib_trace_buffer (vm, node, next_index_sop, b0, /* follow_chain */ 0);
777  t0 = vlib_add_trace (vm, node, b0, sizeof (t0[0]));
778  t0->is_start_of_packet = is_sop;
779  is_sop = (b0->flags & VLIB_BUFFER_NEXT_PRESENT) == 0;
780 
781  next_index_sop = is_sop ? next1 : next_index_sop;
782  vlib_trace_buffer (vm, node, next_index_sop, b1, /* follow_chain */ 0);
783  t1 = vlib_add_trace (vm, node, b1, sizeof (t1[0]));
784  t1->is_start_of_packet = is_sop;
785  is_sop = (b1->flags & VLIB_BUFFER_NEXT_PRESENT) == 0;
786 
787  t0->queue_index = dq->queue_index;
788  t1->queue_index = dq->queue_index;
789  t0->device_index = xd->device_index;
790  t1->device_index = xd->device_index;
791  t0->before.rx_from_hw = bd[0];
792  t1->before.rx_from_hw = bd[1];
793  t0->after.rx_to_hw = ad[0];
794  t1->after.rx_to_hw = ad[1];
795  t0->buffer_index = bi0;
796  t1->buffer_index = bi1;
797  memcpy (&t0->buffer, b0, sizeof (b0[0]) - sizeof (b0->pre_data));
798  memcpy (&t1->buffer, b1, sizeof (b1[0]) - sizeof (b0->pre_data));
799  memcpy (t0->buffer.pre_data, b0->data + b0->current_data,
800  sizeof (t0->buffer.pre_data));
801  memcpy (t1->buffer.pre_data, b1->data + b1->current_data,
802  sizeof (t1->buffer.pre_data));
803 
804  b += 2;
805  bd += 2;
806  ad += 2;
807  }
808 
809  while (n_left >= 1)
810  {
811  u32 bi0, flags0;
812  vlib_buffer_t *b0;
814  u8 next0, error0;
815 
816  bi0 = b[0];
817  n_left -= 1;
818 
819  b0 = vlib_get_buffer (vm, bi0);
820 
822  bd[0].status[0], bd[0].status[2],
823  &next0, &error0, &flags0);
824 
825  next_index_sop = is_sop ? next0 : next_index_sop;
826  vlib_trace_buffer (vm, node, next_index_sop, b0, /* follow_chain */ 0);
827  t0 = vlib_add_trace (vm, node, b0, sizeof (t0[0]));
828  t0->is_start_of_packet = is_sop;
829  is_sop = (b0->flags & VLIB_BUFFER_NEXT_PRESENT) == 0;
830 
831  t0->queue_index = dq->queue_index;
832  t0->device_index = xd->device_index;
833  t0->before.rx_from_hw = bd[0];
834  t0->after.rx_to_hw = ad[0];
835  t0->buffer_index = bi0;
836  memcpy (&t0->buffer, b0, sizeof (b0[0]) - sizeof (b0->pre_data));
837  memcpy (t0->buffer.pre_data, b0->data + b0->current_data,
838  sizeof (t0->buffer.pre_data));
839 
840  b += 1;
841  bd += 1;
842  ad += 1;
843  }
844 }
845 
846 typedef struct
847 {
849 
851 
853 
855 
857 
858  /* Copy of VLIB buffer; packet data stored in pre_data. */
861 
862 static u8 *
863 format_ixge_tx_dma_trace (u8 * s, va_list * va)
864 {
865  CLIB_UNUSED (vlib_main_t * vm) = va_arg (*va, vlib_main_t *);
866  CLIB_UNUSED (vlib_node_t * node) = va_arg (*va, vlib_node_t *);
867  ixge_tx_dma_trace_t *t = va_arg (*va, ixge_tx_dma_trace_t *);
868  vnet_main_t *vnm = vnet_get_main ();
869  ixge_main_t *xm = &ixge_main;
872  u32 indent = format_get_indent (s);
873 
874  {
875  vnet_sw_interface_t *sw =
877  s =
878  format (s, "%U tx queue %d", format_vnet_sw_interface_name, vnm, sw,
879  t->queue_index);
880  }
881 
882  s = format (s, "\n%Udescriptor: %U",
883  format_white_space, indent,
885 
886  s = format (s, "\n%Ubuffer 0x%x: %U",
887  format_white_space, indent,
889 
890  s = format (s, "\n%U", format_white_space, indent);
891 
893  if (!f || !t->is_start_of_packet)
894  f = format_hex_bytes;
895  s = format (s, "%U", f, t->buffer.pre_data, sizeof (t->buffer.pre_data));
896 
897  return s;
898 }
899 
900 typedef struct
901 {
903 
905 
907 
910 
911 static void
913  ixge_device_t * xd,
914  ixge_dma_queue_t * dq,
915  ixge_tx_state_t * tx_state,
916  ixge_tx_descriptor_t * descriptors,
917  u32 * buffers, uword n_descriptors)
918 {
919  vlib_main_t *vm = xm->vlib_main;
920  vlib_node_runtime_t *node = tx_state->node;
922  u32 *b, n_left, is_sop;
923 
924  n_left = n_descriptors;
925  b = buffers;
926  d = descriptors;
927  is_sop = tx_state->is_start_of_packet;
928 
929  while (n_left >= 2)
930  {
931  u32 bi0, bi1;
932  vlib_buffer_t *b0, *b1;
933  ixge_tx_dma_trace_t *t0, *t1;
934 
935  bi0 = b[0];
936  bi1 = b[1];
937  n_left -= 2;
938 
939  b0 = vlib_get_buffer (vm, bi0);
940  b1 = vlib_get_buffer (vm, bi1);
941 
942  t0 = vlib_add_trace (vm, node, b0, sizeof (t0[0]));
943  t0->is_start_of_packet = is_sop;
944  is_sop = (b0->flags & VLIB_BUFFER_NEXT_PRESENT) == 0;
945 
946  t1 = vlib_add_trace (vm, node, b1, sizeof (t1[0]));
947  t1->is_start_of_packet = is_sop;
948  is_sop = (b1->flags & VLIB_BUFFER_NEXT_PRESENT) == 0;
949 
950  t0->queue_index = dq->queue_index;
951  t1->queue_index = dq->queue_index;
952  t0->device_index = xd->device_index;
953  t1->device_index = xd->device_index;
954  t0->descriptor = d[0];
955  t1->descriptor = d[1];
956  t0->buffer_index = bi0;
957  t1->buffer_index = bi1;
958  memcpy (&t0->buffer, b0, sizeof (b0[0]) - sizeof (b0->pre_data));
959  memcpy (&t1->buffer, b1, sizeof (b1[0]) - sizeof (b0->pre_data));
960  memcpy (t0->buffer.pre_data, b0->data + b0->current_data,
961  sizeof (t0->buffer.pre_data));
962  memcpy (t1->buffer.pre_data, b1->data + b1->current_data,
963  sizeof (t1->buffer.pre_data));
964 
965  b += 2;
966  d += 2;
967  }
968 
969  while (n_left >= 1)
970  {
971  u32 bi0;
972  vlib_buffer_t *b0;
974 
975  bi0 = b[0];
976  n_left -= 1;
977 
978  b0 = vlib_get_buffer (vm, bi0);
979 
980  t0 = vlib_add_trace (vm, node, b0, sizeof (t0[0]));
981  t0->is_start_of_packet = is_sop;
982  is_sop = (b0->flags & VLIB_BUFFER_NEXT_PRESENT) == 0;
983 
984  t0->queue_index = dq->queue_index;
985  t0->device_index = xd->device_index;
986  t0->descriptor = d[0];
987  t0->buffer_index = bi0;
988  memcpy (&t0->buffer, b0, sizeof (b0[0]) - sizeof (b0->pre_data));
989  memcpy (t0->buffer.pre_data, b0->data + b0->current_data,
990  sizeof (t0->buffer.pre_data));
991 
992  b += 1;
993  d += 1;
994  }
995 }
996 
999 {
1000  i32 d = i1 - i0;
1001  ASSERT (i0 < q->n_descriptors);
1002  ASSERT (i1 < q->n_descriptors);
1003  return d < 0 ? q->n_descriptors + d : d;
1004 }
1005 
1008 {
1009  u32 d = i0 + i1;
1010  ASSERT (i0 < q->n_descriptors);
1011  ASSERT (i1 < q->n_descriptors);
1012  d -= d >= q->n_descriptors ? q->n_descriptors : 0;
1013  return d;
1014 }
1015 
1019 {
1020  u32 cmp;
1021 
1022  cmp = ((d->status0 & xm->tx_descriptor_template_mask.status0)
1024  if (cmp)
1025  return 0;
1026  cmp = ((d->status1 & xm->tx_descriptor_template_mask.status1)
1028  if (cmp)
1029  return 0;
1030 
1031  return 1;
1032 }
1033 
1034 static uword
1036  ixge_device_t * xd,
1037  ixge_dma_queue_t * dq,
1038  u32 * buffers,
1039  u32 start_descriptor_index,
1040  u32 n_descriptors, ixge_tx_state_t * tx_state)
1041 {
1042  vlib_main_t *vm = xm->vlib_main;
1043  ixge_tx_descriptor_t *d, *d_sop;
1044  u32 n_left = n_descriptors;
1045  u32 *to_free = vec_end (xm->tx_buffers_pending_free);
1046  u32 *to_tx =
1047  vec_elt_at_index (dq->descriptor_buffer_indices, start_descriptor_index);
1048  u32 is_sop = tx_state->is_start_of_packet;
1049  u32 len_sop = tx_state->n_bytes_in_packet;
1050  u16 template_status = xm->tx_descriptor_template.status0;
1051  u32 descriptor_prefetch_rotor = 0;
1052 
1053  ASSERT (start_descriptor_index + n_descriptors <= dq->n_descriptors);
1054  d = &dq->descriptors[start_descriptor_index].tx;
1055  d_sop = is_sop ? d : tx_state->start_of_packet_descriptor;
1056 
1057  while (n_left >= 4)
1058  {
1059  vlib_buffer_t *b0, *b1;
1060  u32 bi0, fi0, len0;
1061  u32 bi1, fi1, len1;
1062  u8 is_eop0, is_eop1;
1063 
1064  /* Prefetch next iteration. */
1065  vlib_prefetch_buffer_with_index (vm, buffers[2], LOAD);
1066  vlib_prefetch_buffer_with_index (vm, buffers[3], LOAD);
1067 
1068  if ((descriptor_prefetch_rotor & 0x3) == 0)
1069  CLIB_PREFETCH (d + 4, CLIB_CACHE_LINE_BYTES, STORE);
1070 
1071  descriptor_prefetch_rotor += 2;
1072 
1073  bi0 = buffers[0];
1074  bi1 = buffers[1];
1075 
1076  to_free[0] = fi0 = to_tx[0];
1077  to_tx[0] = bi0;
1078  to_free += fi0 != 0;
1079 
1080  to_free[0] = fi1 = to_tx[1];
1081  to_tx[1] = bi1;
1082  to_free += fi1 != 0;
1083 
1084  buffers += 2;
1085  n_left -= 2;
1086  to_tx += 2;
1087 
1088  b0 = vlib_get_buffer (vm, bi0);
1089  b1 = vlib_get_buffer (vm, bi1);
1090 
1091  is_eop0 = (b0->flags & VLIB_BUFFER_NEXT_PRESENT) == 0;
1092  is_eop1 = (b1->flags & VLIB_BUFFER_NEXT_PRESENT) == 0;
1093 
1094  len0 = b0->current_length;
1095  len1 = b1->current_length;
1096 
1099 
1100  d[0].buffer_address = vlib_buffer_get_pa (vm, b0);
1101  d[1].buffer_address = vlib_buffer_get_pa (vm, b1);
1102 
1103  d[0].n_bytes_this_buffer = len0;
1104  d[1].n_bytes_this_buffer = len1;
1105 
1106  d[0].status0 =
1107  template_status | (is_eop0 <<
1109  d[1].status0 =
1110  template_status | (is_eop1 <<
1112 
1113  len_sop = (is_sop ? 0 : len_sop) + len0;
1114  d_sop[0].status1 =
1116  d += 1;
1117  d_sop = is_eop0 ? d : d_sop;
1118 
1119  is_sop = is_eop0;
1120 
1121  len_sop = (is_sop ? 0 : len_sop) + len1;
1122  d_sop[0].status1 =
1124  d += 1;
1125  d_sop = is_eop1 ? d : d_sop;
1126 
1127  is_sop = is_eop1;
1128  }
1129 
1130  while (n_left > 0)
1131  {
1132  vlib_buffer_t *b0;
1133  u32 bi0, fi0, len0;
1134  u8 is_eop0;
1135 
1136  bi0 = buffers[0];
1137 
1138  to_free[0] = fi0 = to_tx[0];
1139  to_tx[0] = bi0;
1140  to_free += fi0 != 0;
1141 
1142  buffers += 1;
1143  n_left -= 1;
1144  to_tx += 1;
1145 
1146  b0 = vlib_get_buffer (vm, bi0);
1147 
1148  is_eop0 = (b0->flags & VLIB_BUFFER_NEXT_PRESENT) == 0;
1149 
1150  len0 = b0->current_length;
1151 
1153 
1154  d[0].buffer_address = vlib_buffer_get_pa (vm, b0);
1155  d[0].n_bytes_this_buffer = len0;
1156 
1157  d[0].status0 =
1158  template_status | (is_eop0 <<
1160 
1161  len_sop = (is_sop ? 0 : len_sop) + len0;
1162  d_sop[0].status1 =
1164  d += 1;
1165  d_sop = is_eop0 ? d : d_sop;
1166 
1167  is_sop = is_eop0;
1168  }
1169 
1170  if (tx_state->node->flags & VLIB_NODE_FLAG_TRACE)
1171  {
1172  to_tx =
1174  start_descriptor_index);
1175  ixge_tx_trace (xm, xd, dq, tx_state,
1176  &dq->descriptors[start_descriptor_index].tx, to_tx,
1177  n_descriptors);
1178  }
1179 
1180  _vec_len (xm->tx_buffers_pending_free) =
1181  to_free - xm->tx_buffers_pending_free;
1182 
1183  /* When we are done d_sop can point to end of ring. Wrap it if so. */
1184  {
1185  ixge_tx_descriptor_t *d_start = &dq->descriptors[0].tx;
1186 
1187  ASSERT (d_sop - d_start <= dq->n_descriptors);
1188  d_sop = d_sop - d_start == dq->n_descriptors ? d_start : d_sop;
1189  }
1190 
1191  tx_state->is_start_of_packet = is_sop;
1192  tx_state->start_of_packet_descriptor = d_sop;
1193  tx_state->n_bytes_in_packet = len_sop;
1194 
1195  return n_descriptors;
1196 }
1197 
1198 static uword
1201 {
1202  ixge_main_t *xm = &ixge_main;
1203  vnet_interface_output_runtime_t *rd = (void *) node->runtime_data;
1205  ixge_dma_queue_t *dq;
1206  u32 *from, n_left_tx, n_descriptors_to_tx, n_tail_drop;
1207  u32 queue_index = 0; /* fixme parameter */
1208  ixge_tx_state_t tx_state;
1209 
1210  tx_state.node = node;
1211  tx_state.is_start_of_packet = 1;
1212  tx_state.start_of_packet_descriptor = 0;
1213  tx_state.n_bytes_in_packet = 0;
1214 
1215  from = vlib_frame_vector_args (f);
1216 
1217  dq = vec_elt_at_index (xd->dma_queues[VLIB_TX], queue_index);
1218 
1219  dq->head_index = dq->tx.head_index_write_back[0];
1220 
1221  /* Since head == tail means ring is empty we can send up to dq->n_descriptors - 1. */
1222  n_left_tx = dq->n_descriptors - 1;
1223  n_left_tx -= ixge_ring_sub (dq, dq->head_index, dq->tail_index);
1224 
1225  _vec_len (xm->tx_buffers_pending_free) = 0;
1226 
1227  n_descriptors_to_tx = f->n_vectors;
1228  n_tail_drop = 0;
1229  if (PREDICT_FALSE (n_descriptors_to_tx > n_left_tx))
1230  {
1231  i32 i, n_ok, i_eop, i_sop;
1232 
1233  i_sop = i_eop = ~0;
1234  for (i = n_left_tx - 1; i >= 0; i--)
1235  {
1236  vlib_buffer_t *b = vlib_get_buffer (vm, from[i]);
1237  if (!(b->flags & VLIB_BUFFER_NEXT_PRESENT))
1238  {
1239  if (i_sop != ~0 && i_eop != ~0)
1240  break;
1241  i_eop = i;
1242  i_sop = i + 1;
1243  }
1244  }
1245  if (i == 0)
1246  n_ok = 0;
1247  else
1248  n_ok = i_eop + 1;
1249 
1250  {
1251  ELOG_TYPE_DECLARE (e) =
1252  {
1253  .function = (char *) __FUNCTION__,.format =
1254  "ixge %d, ring full to tx %d head %d tail %d",.format_args =
1255  "i2i2i2i2",};
1256  struct
1257  {
1258  u16 instance, to_tx, head, tail;
1259  } *ed;
1260  ed = ELOG_DATA (&vm->elog_main, e);
1261  ed->instance = xd->device_index;
1262  ed->to_tx = n_descriptors_to_tx;
1263  ed->head = dq->head_index;
1264  ed->tail = dq->tail_index;
1265  }
1266 
1267  if (n_ok < n_descriptors_to_tx)
1268  {
1269  n_tail_drop = n_descriptors_to_tx - n_ok;
1270  vec_add (xm->tx_buffers_pending_free, from + n_ok, n_tail_drop);
1271  vlib_error_count (vm, ixge_input_node.index,
1272  IXGE_ERROR_tx_full_drops, n_tail_drop);
1273  }
1274 
1275  n_descriptors_to_tx = n_ok;
1276  }
1277 
1278  dq->tx.n_buffers_on_ring += n_descriptors_to_tx;
1279 
1280  /* Process from tail to end of descriptor ring. */
1281  if (n_descriptors_to_tx > 0 && dq->tail_index < dq->n_descriptors)
1282  {
1283  u32 n =
1284  clib_min (dq->n_descriptors - dq->tail_index, n_descriptors_to_tx);
1285  n = ixge_tx_no_wrap (xm, xd, dq, from, dq->tail_index, n, &tx_state);
1286  from += n;
1287  n_descriptors_to_tx -= n;
1288  dq->tail_index += n;
1289  ASSERT (dq->tail_index <= dq->n_descriptors);
1290  if (dq->tail_index == dq->n_descriptors)
1291  dq->tail_index = 0;
1292  }
1293 
1294  if (n_descriptors_to_tx > 0)
1295  {
1296  u32 n =
1297  ixge_tx_no_wrap (xm, xd, dq, from, 0, n_descriptors_to_tx, &tx_state);
1298  from += n;
1299  ASSERT (n == n_descriptors_to_tx);
1300  dq->tail_index += n;
1301  ASSERT (dq->tail_index <= dq->n_descriptors);
1302  if (dq->tail_index == dq->n_descriptors)
1303  dq->tail_index = 0;
1304  }
1305 
1306  /* We should only get full packets. */
1307  ASSERT (tx_state.is_start_of_packet);
1308 
1309  /* Report status when last descriptor is done. */
1310  {
1311  u32 i = dq->tail_index == 0 ? dq->n_descriptors - 1 : dq->tail_index - 1;
1312  ixge_tx_descriptor_t *d = &dq->descriptors[i].tx;
1314  }
1315 
1316  /* Give new descriptors to hardware. */
1317  {
1318  ixge_dma_regs_t *dr = get_dma_regs (xd, VLIB_TX, queue_index);
1319 
1321 
1322  dr->tail_index = dq->tail_index;
1323  }
1324 
1325  /* Free any buffers that are done. */
1326  {
1327  u32 n = _vec_len (xm->tx_buffers_pending_free);
1328  if (n > 0)
1329  {
1331  _vec_len (xm->tx_buffers_pending_free) = 0;
1332  ASSERT (dq->tx.n_buffers_on_ring >= n);
1333  dq->tx.n_buffers_on_ring -= (n - n_tail_drop);
1334  }
1335  }
1336 
1337  return f->n_vectors;
1338 }
1339 
1340 static uword
1342  ixge_device_t * xd,
1343  ixge_dma_queue_t * dq,
1344  u32 start_descriptor_index, u32 n_descriptors)
1345 {
1346  vlib_main_t *vm = xm->vlib_main;
1347  vlib_node_runtime_t *node = dq->rx.node;
1348  ixge_descriptor_t *d;
1349  static ixge_descriptor_t *d_trace_save;
1350  static u32 *d_trace_buffers;
1351  u32 n_descriptors_left = n_descriptors;
1352  u32 *to_rx =
1353  vec_elt_at_index (dq->descriptor_buffer_indices, start_descriptor_index);
1354  u32 *to_add;
1355  u32 bi_sop = dq->rx.saved_start_of_packet_buffer_index;
1356  u32 bi_last = dq->rx.saved_last_buffer_index;
1357  u32 next_index_sop = dq->rx.saved_start_of_packet_next_index;
1358  u32 is_sop = dq->rx.is_start_of_packet;
1359  u32 next_index, n_left_to_next, *to_next;
1360  u32 n_packets = 0;
1361  u32 n_bytes = 0;
1362  u32 n_trace = vlib_get_trace_count (vm, node);
1363  vlib_buffer_t *b_last, b_placeholder;
1364 
1365  ASSERT (start_descriptor_index + n_descriptors <= dq->n_descriptors);
1366  d = &dq->descriptors[start_descriptor_index];
1367 
1368  b_last = bi_last != ~0 ? vlib_get_buffer (vm, bi_last) : &b_placeholder;
1369  next_index = dq->rx.next_index;
1370 
1371  if (n_trace > 0)
1372  {
1373  u32 n = clib_min (n_trace, n_descriptors);
1374  if (d_trace_save)
1375  {
1376  _vec_len (d_trace_save) = 0;
1377  _vec_len (d_trace_buffers) = 0;
1378  }
1379  vec_add (d_trace_save, (ixge_descriptor_t *) d, n);
1380  vec_add (d_trace_buffers, to_rx, n);
1381  }
1382 
1383  {
1384  uword l = vec_len (xm->rx_buffers_to_add);
1385 
1386  if (l < n_descriptors_left)
1387  {
1388  u32 n_to_alloc = 2 * dq->n_descriptors - l;
1389  u32 n_allocated;
1390 
1391  vec_resize (xm->rx_buffers_to_add, n_to_alloc);
1392 
1393  _vec_len (xm->rx_buffers_to_add) = l;
1394  n_allocated =
1395  vlib_buffer_alloc (vm, xm->rx_buffers_to_add + l, n_to_alloc);
1396  _vec_len (xm->rx_buffers_to_add) += n_allocated;
1397 
1398  /* Handle transient allocation failure */
1399  if (PREDICT_FALSE (l + n_allocated <= n_descriptors_left))
1400  {
1401  if (n_allocated == 0)
1402  vlib_error_count (vm, ixge_input_node.index,
1403  IXGE_ERROR_rx_alloc_no_physmem, 1);
1404  else
1405  vlib_error_count (vm, ixge_input_node.index,
1406  IXGE_ERROR_rx_alloc_fail, 1);
1407 
1408  n_descriptors_left = l + n_allocated;
1409  }
1410  n_descriptors = n_descriptors_left;
1411  }
1412 
1413  /* Add buffers from end of vector going backwards. */
1414  to_add = vec_end (xm->rx_buffers_to_add) - 1;
1415  }
1416 
1417  while (n_descriptors_left > 0)
1418  {
1419  vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
1420 
1421  while (n_descriptors_left >= 4 && n_left_to_next >= 2)
1422  {
1423  vlib_buffer_t *b0, *b1;
1424  vlib_buffer_t *f0, *f1;
1425  u32 bi0, fi0, len0, l3_offset0, s20, s00, flags0;
1426  u32 bi1, fi1, len1, l3_offset1, s21, s01, flags1;
1427  u8 is_eop0, error0, next0;
1428  u8 is_eop1, error1, next1;
1429  ixge_descriptor_t d0, d1;
1430 
1431  vlib_prefetch_buffer_with_index (vm, to_rx[2], STORE);
1432  vlib_prefetch_buffer_with_index (vm, to_rx[3], STORE);
1433 
1434  CLIB_PREFETCH (d + 2, 32, STORE);
1435 
1436  d0.as_u32x4 = d[0].as_u32x4;
1437  d1.as_u32x4 = d[1].as_u32x4;
1438 
1439  s20 = d0.rx_from_hw.status[2];
1440  s21 = d1.rx_from_hw.status[2];
1441 
1442  s00 = d0.rx_from_hw.status[0];
1443  s01 = d1.rx_from_hw.status[0];
1444 
1445  if (!
1447  goto found_hw_owned_descriptor_x2;
1448 
1449  bi0 = to_rx[0];
1450  bi1 = to_rx[1];
1451 
1452  ASSERT (to_add - 1 >= xm->rx_buffers_to_add);
1453  fi0 = to_add[0];
1454  fi1 = to_add[-1];
1455 
1456  to_rx[0] = fi0;
1457  to_rx[1] = fi1;
1458  to_rx += 2;
1459  to_add -= 2;
1460 
1461 #if 0
1466 #endif
1467 
1468  b0 = vlib_get_buffer (vm, bi0);
1469  b1 = vlib_get_buffer (vm, bi1);
1470 
1473 
1474  is_eop0 = (s20 & IXGE_RX_DESCRIPTOR_STATUS2_IS_END_OF_PACKET) != 0;
1475  is_eop1 = (s21 & IXGE_RX_DESCRIPTOR_STATUS2_IS_END_OF_PACKET) != 0;
1476 
1477  ixge_rx_next_and_error_from_status_x2 (xd, s00, s20, s01, s21,
1478  &next0, &error0, &flags0,
1479  &next1, &error1, &flags1);
1480 
1481  next0 = is_sop ? next0 : next_index_sop;
1482  next1 = is_eop0 ? next1 : next0;
1483  next_index_sop = next1;
1484 
1485  b0->flags |= flags0 | (!is_eop0 << VLIB_BUFFER_LOG2_NEXT_PRESENT);
1486  b1->flags |= flags1 | (!is_eop1 << VLIB_BUFFER_LOG2_NEXT_PRESENT);
1487 
1488  vnet_buffer (b0)->sw_if_index[VLIB_RX] = xd->vlib_sw_if_index;
1489  vnet_buffer (b1)->sw_if_index[VLIB_RX] = xd->vlib_sw_if_index;
1490  vnet_buffer (b0)->sw_if_index[VLIB_TX] = (u32) ~ 0;
1491  vnet_buffer (b1)->sw_if_index[VLIB_TX] = (u32) ~ 0;
1492 
1493  b0->error = node->errors[error0];
1494  b1->error = node->errors[error1];
1495 
1498  n_bytes += len0 + len1;
1499  n_packets += is_eop0 + is_eop1;
1500 
1501  /* Give new buffers to hardware. */
1502  f0 = vlib_get_buffer (vm, fi0);
1503  f1 = vlib_get_buffer (vm, fi1);
1508  d[0].as_u32x4 = d0.as_u32x4;
1509  d[1].as_u32x4 = d1.as_u32x4;
1510 
1511  d += 2;
1512  n_descriptors_left -= 2;
1513 
1514  /* Point to either l2 or l3 header depending on next. */
1515  l3_offset0 = (is_sop && (next0 != IXGE_RX_NEXT_ETHERNET_INPUT))
1517  l3_offset1 = (is_eop0 && (next1 != IXGE_RX_NEXT_ETHERNET_INPUT))
1519 
1520  b0->current_length = len0 - l3_offset0;
1521  b1->current_length = len1 - l3_offset1;
1522  b0->current_data = l3_offset0;
1523  b1->current_data = l3_offset1;
1524 
1525  b_last->next_buffer = is_sop ? ~0 : bi0;
1526  b0->next_buffer = is_eop0 ? ~0 : bi1;
1527  bi_last = bi1;
1528  b_last = b1;
1529 
1530  if (CLIB_DEBUG > 0)
1531  {
1532  u32 bi_sop0 = is_sop ? bi0 : bi_sop;
1533  u32 bi_sop1 = is_eop0 ? bi1 : bi_sop0;
1534 
1535  if (is_eop0)
1536  {
1537  u8 *msg = vlib_validate_buffer (vm, bi_sop0,
1538  /* follow_buffer_next */ 1);
1539  ASSERT (!msg);
1540  }
1541  if (is_eop1)
1542  {
1543  u8 *msg = vlib_validate_buffer (vm, bi_sop1,
1544  /* follow_buffer_next */ 1);
1545  ASSERT (!msg);
1546  }
1547  }
1548  if (0) /* "Dave" version */
1549  {
1550  u32 bi_sop0 = is_sop ? bi0 : bi_sop;
1551  u32 bi_sop1 = is_eop0 ? bi1 : bi_sop0;
1552 
1553  if (is_eop0)
1554  {
1555  to_next[0] = bi_sop0;
1556  to_next++;
1557  n_left_to_next--;
1558 
1559  vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
1560  to_next, n_left_to_next,
1561  bi_sop0, next0);
1562  }
1563  if (is_eop1)
1564  {
1565  to_next[0] = bi_sop1;
1566  to_next++;
1567  n_left_to_next--;
1568 
1569  vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
1570  to_next, n_left_to_next,
1571  bi_sop1, next1);
1572  }
1573  is_sop = is_eop1;
1574  bi_sop = bi_sop1;
1575  }
1576  if (1) /* "Eliot" version */
1577  {
1578  /* Speculatively enqueue to cached next. */
1579  u8 saved_is_sop = is_sop;
1580  u32 bi_sop_save = bi_sop;
1581 
1582  bi_sop = saved_is_sop ? bi0 : bi_sop;
1583  to_next[0] = bi_sop;
1584  to_next += is_eop0;
1585  n_left_to_next -= is_eop0;
1586 
1587  bi_sop = is_eop0 ? bi1 : bi_sop;
1588  to_next[0] = bi_sop;
1589  to_next += is_eop1;
1590  n_left_to_next -= is_eop1;
1591 
1592  is_sop = is_eop1;
1593 
1594  if (PREDICT_FALSE
1595  (!(next0 == next_index && next1 == next_index)))
1596  {
1597  /* Undo speculation. */
1598  to_next -= is_eop0 + is_eop1;
1599  n_left_to_next += is_eop0 + is_eop1;
1600 
1601  /* Re-do both descriptors being careful about where we enqueue. */
1602  bi_sop = saved_is_sop ? bi0 : bi_sop_save;
1603  if (is_eop0)
1604  {
1605  if (next0 != next_index)
1606  vlib_set_next_frame_buffer (vm, node, next0, bi_sop);
1607  else
1608  {
1609  to_next[0] = bi_sop;
1610  to_next += 1;
1611  n_left_to_next -= 1;
1612  }
1613  }
1614 
1615  bi_sop = is_eop0 ? bi1 : bi_sop;
1616  if (is_eop1)
1617  {
1618  if (next1 != next_index)
1619  vlib_set_next_frame_buffer (vm, node, next1, bi_sop);
1620  else
1621  {
1622  to_next[0] = bi_sop;
1623  to_next += 1;
1624  n_left_to_next -= 1;
1625  }
1626  }
1627 
1628  /* Switch cached next index when next for both packets is the same. */
1629  if (is_eop0 && is_eop1 && next0 == next1)
1630  {
1631  vlib_put_next_frame (vm, node, next_index,
1632  n_left_to_next);
1633  next_index = next0;
1634  vlib_get_next_frame (vm, node, next_index,
1635  to_next, n_left_to_next);
1636  }
1637  }
1638  }
1639  }
1640 
1641  /* Bail out of dual loop and proceed with single loop. */
1642  found_hw_owned_descriptor_x2:
1643 
1644  while (n_descriptors_left > 0 && n_left_to_next > 0)
1645  {
1646  vlib_buffer_t *b0;
1647  vlib_buffer_t *f0;
1648  u32 bi0, fi0, len0, l3_offset0, s20, s00, flags0;
1649  u8 is_eop0, error0, next0;
1650  ixge_descriptor_t d0;
1651 
1652  d0.as_u32x4 = d[0].as_u32x4;
1653 
1654  s20 = d0.rx_from_hw.status[2];
1655  s00 = d0.rx_from_hw.status[0];
1656 
1658  goto found_hw_owned_descriptor_x1;
1659 
1660  bi0 = to_rx[0];
1661  ASSERT (to_add >= xm->rx_buffers_to_add);
1662  fi0 = to_add[0];
1663 
1664  to_rx[0] = fi0;
1665  to_rx += 1;
1666  to_add -= 1;
1667 
1668 #if 0
1671 #endif
1672 
1673  b0 = vlib_get_buffer (vm, bi0);
1674 
1675  is_eop0 = (s20 & IXGE_RX_DESCRIPTOR_STATUS2_IS_END_OF_PACKET) != 0;
1677  (xd, s00, s20, &next0, &error0, &flags0);
1678 
1679  next0 = is_sop ? next0 : next_index_sop;
1680  next_index_sop = next0;
1681 
1682  b0->flags |= flags0 | (!is_eop0 << VLIB_BUFFER_LOG2_NEXT_PRESENT);
1683 
1684  vnet_buffer (b0)->sw_if_index[VLIB_RX] = xd->vlib_sw_if_index;
1685  vnet_buffer (b0)->sw_if_index[VLIB_TX] = (u32) ~ 0;
1686 
1687  b0->error = node->errors[error0];
1688 
1690  n_bytes += len0;
1691  n_packets += is_eop0;
1692 
1693  /* Give new buffer to hardware. */
1694  f0 = vlib_get_buffer (vm, fi0);
1697  d[0].as_u32x4 = d0.as_u32x4;
1698 
1699  d += 1;
1700  n_descriptors_left -= 1;
1701 
1702  /* Point to either l2 or l3 header depending on next. */
1703  l3_offset0 = (is_sop && (next0 != IXGE_RX_NEXT_ETHERNET_INPUT))
1705  b0->current_length = len0 - l3_offset0;
1706  b0->current_data = l3_offset0;
1707 
1708  b_last->next_buffer = is_sop ? ~0 : bi0;
1709  bi_last = bi0;
1710  b_last = b0;
1711 
1712  bi_sop = is_sop ? bi0 : bi_sop;
1713 
1714  if (CLIB_DEBUG > 0 && is_eop0)
1715  {
1716  u8 *msg =
1717  vlib_validate_buffer (vm, bi_sop, /* follow_buffer_next */ 1);
1718  ASSERT (!msg);
1719  }
1720 
1721  if (0) /* "Dave" version */
1722  {
1723  if (is_eop0)
1724  {
1725  to_next[0] = bi_sop;
1726  to_next++;
1727  n_left_to_next--;
1728 
1729  vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
1730  to_next, n_left_to_next,
1731  bi_sop, next0);
1732  }
1733  }
1734  if (1) /* "Eliot" version */
1735  {
1736  if (PREDICT_TRUE (next0 == next_index))
1737  {
1738  to_next[0] = bi_sop;
1739  to_next += is_eop0;
1740  n_left_to_next -= is_eop0;
1741  }
1742  else
1743  {
1744  if (next0 != next_index && is_eop0)
1745  vlib_set_next_frame_buffer (vm, node, next0, bi_sop);
1746 
1747  vlib_put_next_frame (vm, node, next_index, n_left_to_next);
1748  next_index = next0;
1749  vlib_get_next_frame (vm, node, next_index,
1750  to_next, n_left_to_next);
1751  }
1752  }
1753  is_sop = is_eop0;
1754  }
1755  vlib_put_next_frame (vm, node, next_index, n_left_to_next);
1756  }
1757 
1758 found_hw_owned_descriptor_x1:
1759  if (n_descriptors_left > 0)
1760  vlib_put_next_frame (vm, node, next_index, n_left_to_next);
1761 
1762  _vec_len (xm->rx_buffers_to_add) = (to_add + 1) - xm->rx_buffers_to_add;
1763 
1764  {
1765  u32 n_done = n_descriptors - n_descriptors_left;
1766 
1767  if (n_trace > 0 && n_done > 0)
1768  {
1769  u32 n = clib_min (n_trace, n_done);
1770  ixge_rx_trace (xm, xd, dq,
1771  d_trace_save,
1772  d_trace_buffers,
1773  &dq->descriptors[start_descriptor_index], n);
1774  vlib_set_trace_count (vm, node, n_trace - n);
1775  }
1776  if (d_trace_save)
1777  {
1778  _vec_len (d_trace_save) = 0;
1779  _vec_len (d_trace_buffers) = 0;
1780  }
1781 
1782  /* Don't keep a reference to b_last if we don't have to.
1783  Otherwise we can over-write a next_buffer pointer after already haven
1784  enqueued a packet. */
1785  if (is_sop)
1786  {
1787  b_last->next_buffer = ~0;
1788  bi_last = ~0;
1789  }
1790 
1791  dq->rx.n_descriptors_done_this_call = n_done;
1792  dq->rx.n_descriptors_done_total += n_done;
1793  dq->rx.is_start_of_packet = is_sop;
1794  dq->rx.saved_start_of_packet_buffer_index = bi_sop;
1795  dq->rx.saved_last_buffer_index = bi_last;
1796  dq->rx.saved_start_of_packet_next_index = next_index_sop;
1797  dq->rx.next_index = next_index;
1798  dq->rx.n_bytes += n_bytes;
1799 
1800  return n_packets;
1801  }
1802 }
1803 
1804 static uword
1806  ixge_device_t * xd,
1807  vlib_node_runtime_t * node, u32 queue_index)
1808 {
1809  ixge_dma_queue_t *dq =
1810  vec_elt_at_index (xd->dma_queues[VLIB_RX], queue_index);
1812  uword n_packets = 0;
1813  u32 hw_head_index, sw_head_index;
1814 
1815  /* One time initialization. */
1816  if (!dq->rx.node)
1817  {
1818  dq->rx.node = node;
1819  dq->rx.is_start_of_packet = 1;
1820  dq->rx.saved_start_of_packet_buffer_index = ~0;
1821  dq->rx.saved_last_buffer_index = ~0;
1822  }
1823 
1824  dq->rx.next_index = node->cached_next_index;
1825 
1826  dq->rx.n_descriptors_done_total = 0;
1827  dq->rx.n_descriptors_done_this_call = 0;
1828  dq->rx.n_bytes = 0;
1829 
1830  /* Fetch head from hardware and compare to where we think we are. */
1831  hw_head_index = dr->head_index;
1832  sw_head_index = dq->head_index;
1833 
1834  if (hw_head_index == sw_head_index)
1835  goto done;
1836 
1837  if (hw_head_index < sw_head_index)
1838  {
1839  u32 n_tried = dq->n_descriptors - sw_head_index;
1840  n_packets += ixge_rx_queue_no_wrap (xm, xd, dq, sw_head_index, n_tried);
1841  sw_head_index =
1842  ixge_ring_add (dq, sw_head_index,
1843  dq->rx.n_descriptors_done_this_call);
1844 
1845  if (dq->rx.n_descriptors_done_this_call != n_tried)
1846  goto done;
1847  }
1848  if (hw_head_index >= sw_head_index)
1849  {
1850  u32 n_tried = hw_head_index - sw_head_index;
1851  n_packets += ixge_rx_queue_no_wrap (xm, xd, dq, sw_head_index, n_tried);
1852  sw_head_index =
1853  ixge_ring_add (dq, sw_head_index,
1854  dq->rx.n_descriptors_done_this_call);
1855  }
1856 
1857 done:
1858  dq->head_index = sw_head_index;
1859  dq->tail_index =
1860  ixge_ring_add (dq, dq->tail_index, dq->rx.n_descriptors_done_total);
1861 
1862  /* Give tail back to hardware. */
1864 
1865  dr->tail_index = dq->tail_index;
1866 
1868  interface_main.combined_sw_if_counters +
1870  0 /* thread_index */ ,
1871  xd->vlib_sw_if_index, n_packets,
1872  dq->rx.n_bytes);
1873 
1874  return n_packets;
1875 }
1876 
1877 static void
1879 {
1880  vlib_main_t *vm = xm->vlib_main;
1881  ixge_regs_t *r = xd->regs;
1882 
1883  if (i != 20)
1884  {
1885  ELOG_TYPE_DECLARE (e) =
1886  {
1887  .function = (char *) __FUNCTION__,.format =
1888  "ixge %d, %s",.format_args = "i1t1",.n_enum_strings =
1889  16,.enum_strings =
1890  {
1891  "flow director",
1892  "rx miss",
1893  "pci exception",
1894  "mailbox",
1895  "link status change",
1896  "linksec key exchange",
1897  "manageability event",
1898  "reserved23",
1899  "sdp0",
1900  "sdp1",
1901  "sdp2",
1902  "sdp3",
1903  "ecc", "descriptor handler error", "tcp timer", "other",},};
1904  struct
1905  {
1906  u8 instance;
1907  u8 index;
1908  } *ed;
1909  ed = ELOG_DATA (&vm->elog_main, e);
1910  ed->instance = xd->device_index;
1911  ed->index = i - 16;
1912  }
1913  else
1914  {
1915  u32 v = r->xge_mac.link_status;
1916  uword is_up = (v & (1 << 30)) != 0;
1917 
1918  ELOG_TYPE_DECLARE (e) =
1919  {
1920  .function = (char *) __FUNCTION__,.format =
1921  "ixge %d, link status change 0x%x",.format_args = "i4i4",};
1922  struct
1923  {
1924  u32 instance, link_status;
1925  } *ed;
1926  ed = ELOG_DATA (&vm->elog_main, e);
1927  ed->instance = xd->device_index;
1928  ed->link_status = v;
1930 
1933  ((is_up << 31) | xd->vlib_hw_if_index));
1934  }
1935 }
1936 
1939 {
1940  u32 *t0 = t;
1941 
1942  while (n_left >= 4)
1943  {
1944  u32 bi0, bi1, bi2, bi3;
1945 
1946  t[0] = bi0 = b[0];
1947  b[0] = 0;
1948  t += bi0 != 0;
1949 
1950  t[0] = bi1 = b[1];
1951  b[1] = 0;
1952  t += bi1 != 0;
1953 
1954  t[0] = bi2 = b[2];
1955  b[2] = 0;
1956  t += bi2 != 0;
1957 
1958  t[0] = bi3 = b[3];
1959  b[3] = 0;
1960  t += bi3 != 0;
1961 
1962  b += 4;
1963  n_left -= 4;
1964  }
1965 
1966  while (n_left > 0)
1967  {
1968  u32 bi0;
1969 
1970  t[0] = bi0 = b[0];
1971  b[0] = 0;
1972  t += bi0 != 0;
1973  b += 1;
1974  n_left -= 1;
1975  }
1976 
1977  return t - t0;
1978 }
1979 
1980 static void
1981 ixge_tx_queue (ixge_main_t * xm, ixge_device_t * xd, u32 queue_index)
1982 {
1983  vlib_main_t *vm = xm->vlib_main;
1984  ixge_dma_queue_t *dq =
1985  vec_elt_at_index (xd->dma_queues[VLIB_TX], queue_index);
1986  u32 n_clean, *b, *t, *t0;
1987  i32 n_hw_owned_descriptors;
1988  i32 first_to_clean, last_to_clean;
1989  u64 hwbp_race = 0;
1990 
1991  /* Handle case where head write back pointer update
1992  * arrives after the interrupt during high PCI bus loads.
1993  */
1994  while ((dq->head_index == dq->tx.head_index_write_back[0]) &&
1995  dq->tx.n_buffers_on_ring && (dq->head_index != dq->tail_index))
1996  {
1997  hwbp_race++;
1998  if (IXGE_HWBP_RACE_ELOG && (hwbp_race == 1))
1999  {
2000  ELOG_TYPE_DECLARE (e) =
2001  {
2002  .function = (char *) __FUNCTION__,.format =
2003  "ixge %d tx head index race: head %4d, tail %4d, buffs %4d",.format_args
2004  = "i4i4i4i4",};
2005  struct
2006  {
2007  u32 instance, head_index, tail_index, n_buffers_on_ring;
2008  } *ed;
2009  ed = ELOG_DATA (&vm->elog_main, e);
2010  ed->instance = xd->device_index;
2011  ed->head_index = dq->head_index;
2012  ed->tail_index = dq->tail_index;
2013  ed->n_buffers_on_ring = dq->tx.n_buffers_on_ring;
2014  }
2015  }
2016 
2017  dq->head_index = dq->tx.head_index_write_back[0];
2018  n_hw_owned_descriptors = ixge_ring_sub (dq, dq->head_index, dq->tail_index);
2019  ASSERT (dq->tx.n_buffers_on_ring >= n_hw_owned_descriptors);
2020  n_clean = dq->tx.n_buffers_on_ring - n_hw_owned_descriptors;
2021 
2022  if (IXGE_HWBP_RACE_ELOG && hwbp_race)
2023  {
2024  ELOG_TYPE_DECLARE (e) =
2025  {
2026  .function = (char *) __FUNCTION__,.format =
2027  "ixge %d tx head index race: head %4d, hw_owned %4d, n_clean %4d, retries %d",.format_args
2028  = "i4i4i4i4i4",};
2029  struct
2030  {
2031  u32 instance, head_index, n_hw_owned_descriptors, n_clean, retries;
2032  } *ed;
2033  ed = ELOG_DATA (&vm->elog_main, e);
2034  ed->instance = xd->device_index;
2035  ed->head_index = dq->head_index;
2036  ed->n_hw_owned_descriptors = n_hw_owned_descriptors;
2037  ed->n_clean = n_clean;
2038  ed->retries = hwbp_race;
2039  }
2040 
2041  /*
2042  * This function used to wait until hardware owned zero descriptors.
2043  * At high PPS rates, that doesn't happen until the TX ring is
2044  * completely full of descriptors which need to be cleaned up.
2045  * That, in turn, causes TX ring-full drops and/or long RX service
2046  * interruptions.
2047  */
2048  if (n_clean == 0)
2049  return;
2050 
2051  /* Clean the n_clean descriptors prior to the reported hardware head */
2052  last_to_clean = dq->head_index - 1;
2053  last_to_clean = (last_to_clean < 0) ? last_to_clean + dq->n_descriptors :
2054  last_to_clean;
2055 
2056  first_to_clean = (last_to_clean) - (n_clean - 1);
2057  first_to_clean = (first_to_clean < 0) ? first_to_clean + dq->n_descriptors :
2058  first_to_clean;
2059 
2061  t0 = t = xm->tx_buffers_pending_free;
2062  b = dq->descriptor_buffer_indices + first_to_clean;
2063 
2064  /* Wrap case: clean from first to end, then start to last */
2065  if (first_to_clean > last_to_clean)
2066  {
2067  t += clean_block (b, t, (dq->n_descriptors - 1) - first_to_clean);
2068  first_to_clean = 0;
2069  b = dq->descriptor_buffer_indices;
2070  }
2071 
2072  /* Typical case: clean from first to last */
2073  if (first_to_clean <= last_to_clean)
2074  t += clean_block (b, t, (last_to_clean - first_to_clean) + 1);
2075 
2076  if (t > t0)
2077  {
2078  u32 n = t - t0;
2079  vlib_buffer_free_no_next (vm, t0, n);
2080  ASSERT (dq->tx.n_buffers_on_ring >= n);
2081  dq->tx.n_buffers_on_ring -= n;
2082  _vec_len (xm->tx_buffers_pending_free) = 0;
2083  }
2084 }
2085 
2086 /* RX queue interrupts 0 thru 7; TX 8 thru 15. */
2089 {
2090  return i < 8;
2091 }
2092 
2095 {
2096  return i >= 8 && i < 16;
2097 }
2098 
2101 {
2102  return 8 + i;
2103 }
2104 
2107 {
2108  return 0 + i;
2109 }
2110 
2113 {
2115  return i - 0;
2116 }
2117 
2120 {
2122  return i - 8;
2123 }
2124 
2125 static uword
2128 {
2129  ixge_regs_t *r = xd->regs;
2130  u32 i, s;
2131  uword n_rx_packets = 0;
2132 
2134  if (s)
2136 
2137  /* *INDENT-OFF* */
2138  foreach_set_bit (i, s, ({
2140  n_rx_packets += ixge_rx_queue (xm, xd, node, ixge_interrupt_rx_queue (i));
2141 
2142  else if (ixge_interrupt_is_tx_queue (i))
2143  ixge_tx_queue (xm, xd, ixge_interrupt_tx_queue (i));
2144 
2145  else
2146  ixge_interrupt (xm, xd, i);
2147  }));
2148  /* *INDENT-ON* */
2149 
2150  return n_rx_packets;
2151 }
2152 
2153 static uword
2155 {
2156  ixge_main_t *xm = &ixge_main;
2157  ixge_device_t *xd;
2158  uword n_rx_packets = 0;
2159 
2160  if (node->state == VLIB_NODE_STATE_INTERRUPT)
2161  {
2162  uword i;
2163 
2164  /* Loop over devices with interrupts. */
2165  /* *INDENT-OFF* */
2166  foreach_set_bit (i, node->runtime_data[0], ({
2167  xd = vec_elt_at_index (xm->devices, i);
2168  n_rx_packets += ixge_device_input (xm, xd, node);
2169 
2170  /* Re-enable interrupts since we're going to stay in interrupt mode. */
2171  if (! (node->flags & VLIB_NODE_FLAG_SWITCH_FROM_INTERRUPT_TO_POLLING_MODE))
2172  xd->regs->interrupt.enable_write_1_to_set = ~0;
2173  }));
2174  /* *INDENT-ON* */
2175 
2176  /* Clear mask of devices with pending interrupts. */
2177  node->runtime_data[0] = 0;
2178  }
2179  else
2180  {
2181  /* Poll all devices for input/interrupts. */
2182  vec_foreach (xd, xm->devices)
2183  {
2184  n_rx_packets += ixge_device_input (xm, xd, node);
2185 
2186  /* Re-enable interrupts when switching out of polling mode. */
2187  if (node->flags &
2190  }
2191  }
2192 
2193  return n_rx_packets;
2194 }
2195 
2196 static char *ixge_error_strings[] = {
2197 #define _(n,s) s,
2199 #undef _
2200 };
2201 
2202 /* *INDENT-OFF* */
2203 VLIB_REGISTER_NODE (ixge_input_node, static) = {
2204  .function = ixge_input,
2205  .type = VLIB_NODE_TYPE_INPUT,
2206  .name = "ixge-input",
2208 
2209  /* Will be enabled if/when hardware is detected. */
2210  .state = VLIB_NODE_STATE_DISABLED,
2211 
2212  .format_buffer = format_ethernet_header_with_length,
2213  .format_trace = format_ixge_rx_dma_trace,
2214 
2215  .n_errors = IXGE_N_ERROR,
2216  .error_strings = ixge_error_strings,
2217 
2218  .n_next_nodes = IXGE_RX_N_NEXT,
2219  .next_nodes = {
2220  [IXGE_RX_NEXT_DROP] = "error-drop",
2221  [IXGE_RX_NEXT_ETHERNET_INPUT] = "ethernet-input",
2222  [IXGE_RX_NEXT_IP4_INPUT] = "ip4-input",
2223  [IXGE_RX_NEXT_IP6_INPUT] = "ip6-input",
2224  },
2225 };
2226 
2227 /* *INDENT-ON* */
2228 
2229 static u8 *
2230 format_ixge_device_name (u8 * s, va_list * args)
2231 {
2233  u32 i = va_arg (*args, u32);
2234  ixge_main_t *xm = &ixge_main;
2235  ixge_device_t *xd = vec_elt_at_index (xm->devices, i);
2236  vlib_pci_addr_t *addr = vlib_pci_get_addr (vm, xd->pci_dev_handle);
2237  return format (s, "TenGigabitEthernet%x/%x/%x/%x",
2238  addr->domain, addr->bus, addr->slot, addr->function);
2239 }
2240 
2241 #define IXGE_COUNTER_IS_64_BIT (1 << 0)
2242 #define IXGE_COUNTER_NOT_CLEAR_ON_READ (1 << 1)
2243 
2245 #define _(a,f) 0,
2246 #define _64(a,f) IXGE_COUNTER_IS_64_BIT,
2248 #undef _
2249 #undef _64
2250 };
2251 
2252 static void
2254 {
2255  /* Byte offset for counter registers. */
2256  static u32 reg_offsets[] = {
2257 #define _(a,f) (a) / sizeof (u32),
2258 #define _64(a,f) _(a,f)
2260 #undef _
2261 #undef _64
2262  };
2263  volatile u32 *r = (volatile u32 *) xd->regs;
2264  int i;
2265 
2266  for (i = 0; i < ARRAY_LEN (xd->counters); i++)
2267  {
2268  u32 o = reg_offsets[i];
2269  xd->counters[i] += r[o];
2271  r[o] = 0;
2273  xd->counters[i] += (u64) r[o + 1] << (u64) 32;
2274  }
2275 }
2276 
2277 static u8 *
2278 format_ixge_device_id (u8 * s, va_list * args)
2279 {
2280  u32 device_id = va_arg (*args, u32);
2281  char *t = 0;
2282  switch (device_id)
2283  {
2284 #define _(f,n) case n: t = #f; break;
2286 #undef _
2287  default:
2288  t = 0;
2289  break;
2290  }
2291  if (t == 0)
2292  s = format (s, "unknown 0x%x", device_id);
2293  else
2294  s = format (s, "%s", t);
2295  return s;
2296 }
2297 
2298 static u8 *
2299 format_ixge_link_status (u8 * s, va_list * args)
2300 {
2301  ixge_device_t *xd = va_arg (*args, ixge_device_t *);
2303 
2304  s = format (s, "%s", (v & (1 << 30)) ? "up" : "down");
2305 
2306  {
2307  char *modes[] = {
2308  "1g", "10g parallel", "10g serial", "autoneg",
2309  };
2310  char *speeds[] = {
2311  "unknown", "100m", "1g", "10g",
2312  };
2313  s = format (s, ", mode %s, speed %s",
2314  modes[(v >> 26) & 3], speeds[(v >> 28) & 3]);
2315  }
2316 
2317  return s;
2318 }
2319 
2320 static u8 *
2321 format_ixge_device (u8 * s, va_list * args)
2322 {
2323  u32 dev_instance = va_arg (*args, u32);
2324  CLIB_UNUSED (int verbose) = va_arg (*args, int);
2326  ixge_main_t *xm = &ixge_main;
2327  ixge_device_t *xd = vec_elt_at_index (xm->devices, dev_instance);
2328  ixge_phy_t *phy = xd->phys + xd->phy_index;
2329  u32 indent = format_get_indent (s);
2330 
2331  ixge_update_counters (xd);
2333 
2334  s = format (s, "Intel 8259X: id %U\n%Ulink %U",
2336  format_white_space, indent + 2, format_ixge_link_status, xd);
2337 
2338  {
2339 
2340  vlib_pci_addr_t *addr = vlib_pci_get_addr (vm, xd->pci_dev_handle);
2342 
2343  if (d)
2344  s = format (s, "\n%UPCIe %U", format_white_space, indent + 2,
2346  }
2347 
2348  s = format (s, "\n%U", format_white_space, indent + 2);
2349  if (phy->mdio_address != ~0)
2350  s = format (s, "PHY address %d, id 0x%x", phy->mdio_address, phy->id);
2351  else if (xd->sfp_eeprom.id == SFP_ID_SFP)
2352  s = format (s, "SFP %U", format_sfp_eeprom, &xd->sfp_eeprom);
2353  else
2354  s = format (s, "PHY not found");
2355 
2356  /* FIXME */
2357  {
2359  ixge_dma_regs_t *dr = get_dma_regs (xd, VLIB_RX, 0);
2360  u32 hw_head_index = dr->head_index;
2361  u32 sw_head_index = dq->head_index;
2362  u32 nitems;
2363 
2364  nitems = ixge_ring_sub (dq, hw_head_index, sw_head_index);
2365  s = format (s, "\n%U%d unprocessed, %d total buffers on rx queue 0 ring",
2366  format_white_space, indent + 2, nitems, dq->n_descriptors);
2367 
2368  s = format (s, "\n%U%d buffers in driver rx cache",
2369  format_white_space, indent + 2,
2370  vec_len (xm->rx_buffers_to_add));
2371 
2372  s = format (s, "\n%U%d buffers on tx queue 0 ring",
2373  format_white_space, indent + 2,
2374  xd->dma_queues[VLIB_TX][0].tx.n_buffers_on_ring);
2375  }
2376  {
2377  u32 i;
2378  u64 v;
2379  static char *names[] = {
2380 #define _(a,f) #f,
2381 #define _64(a,f) _(a,f)
2383 #undef _
2384 #undef _64
2385  };
2386 
2387  for (i = 0; i < ARRAY_LEN (names); i++)
2388  {
2389  v = xd->counters[i] - xd->counters_last_clear[i];
2390  if (v != 0)
2391  s = format (s, "\n%U%-40U%16Ld",
2392  format_white_space, indent + 2,
2393  format_c_identifier, names[i], v);
2394  }
2395  }
2396 
2397  return s;
2398 }
2399 
2400 static void
2402 {
2403  ixge_main_t *xm = &ixge_main;
2404  ixge_device_t *xd = vec_elt_at_index (xm->devices, instance);
2405  ixge_update_counters (xd);
2406  memcpy (xd->counters_last_clear, xd->counters, sizeof (xd->counters));
2407 }
2408 
2409 /*
2410  * Dynamically redirect all pkts from a specific interface
2411  * to the specified node
2412  */
2413 static void
2415  u32 node_index)
2416 {
2417  ixge_main_t *xm = &ixge_main;
2418  vnet_hw_interface_t *hw = vnet_get_hw_interface (vnm, hw_if_index);
2420 
2421  /* Shut off redirection */
2422  if (node_index == ~0)
2423  {
2425  return;
2426  }
2427 
2429  vlib_node_add_next (xm->vlib_main, ixge_input_node.index, node_index);
2430 }
2431 
2432 
2433 /* *INDENT-OFF* */
2435  .name = "ixge",
2436  .tx_function = ixge_interface_tx,
2437  .format_device_name = format_ixge_device_name,
2438  .format_device = format_ixge_device,
2439  .format_tx_trace = format_ixge_tx_dma_trace,
2440  .clear_counters = ixge_clear_hw_interface_counters,
2441  .admin_up_down_function = ixge_interface_admin_up_down,
2442  .rx_redirect_to_node = ixge_set_interface_next_node,
2443 };
2444 /* *INDENT-ON* */
2445 
2446 #define IXGE_N_BYTES_IN_RX_BUFFER (2048) // DAW-HACK: Set Rx buffer size so all packets < ETH_MTU_SIZE fit in the buffer (i.e. sop & eop for all descriptors).
2447 
2448 static clib_error_t *
2450 {
2451  ixge_main_t *xm = &ixge_main;
2452  vlib_main_t *vm = xm->vlib_main;
2453  ixge_dma_queue_t *dq;
2454  clib_error_t *error = 0;
2455 
2456  vec_validate (xd->dma_queues[rt], queue_index);
2457  dq = vec_elt_at_index (xd->dma_queues[rt], queue_index);
2458 
2461  CLIB_CACHE_LINE_BYTES / sizeof (dq->descriptors[0]);
2462 
2463  if (!xm->n_bytes_in_rx_buffer)
2466 
2467  if (!xm->n_descriptors[rt])
2468  xm->n_descriptors[rt] = 4 * VLIB_FRAME_SIZE;
2469 
2470  dq->queue_index = queue_index;
2471  dq->n_descriptors =
2473  dq->head_index = dq->tail_index = 0;
2474 
2476  sizeof (dq->descriptors[0]),
2477  128 /* per chip spec */ );
2478  if (!dq->descriptors)
2479  return vlib_physmem_last_error (vm);
2480 
2481  clib_memset (dq->descriptors, 0,
2482  dq->n_descriptors * sizeof (dq->descriptors[0]));
2484 
2485  if (rt == VLIB_RX)
2486  {
2487  u32 n_alloc, i;
2488 
2489  n_alloc = vlib_buffer_alloc (vm, dq->descriptor_buffer_indices,
2491  ASSERT (n_alloc == vec_len (dq->descriptor_buffer_indices));
2492  for (i = 0; i < n_alloc; i++)
2493  {
2496  (vm, vlib_get_buffer (vm, dq->descriptor_buffer_indices[i]));
2497  }
2498  }
2499  else
2500  {
2501  u32 i;
2502 
2503  dq->tx.head_index_write_back =
2505  if (!dq->tx.head_index_write_back)
2506  return vlib_physmem_last_error (vm);
2507 
2508  for (i = 0; i < dq->n_descriptors; i++)
2509  dq->descriptors[i].tx = xm->tx_descriptor_template;
2510 
2512  }
2513 
2514  {
2515  ixge_dma_regs_t *dr = get_dma_regs (xd, rt, queue_index);
2516  u64 a;
2517 
2518  a = vlib_physmem_get_pa (vm, dq->descriptors);
2519  dr->descriptor_address[0] = a & 0xFFFFFFFF;
2520  dr->descriptor_address[1] = a >> (u64) 32;
2521  dr->n_descriptor_bytes = dq->n_descriptors * sizeof (dq->descriptors[0]);
2522  dq->head_index = dq->tail_index = 0;
2523 
2524  if (rt == VLIB_RX)
2525  {
2526  ASSERT ((xm->n_bytes_in_rx_buffer / 1024) < 32);
2527  dr->rx_split_control =
2528  ( /* buffer size */ ((xm->n_bytes_in_rx_buffer / 1024) << 0)
2529  | ( /* lo free descriptor threshold (units of 64 descriptors) */
2530  (1 << 22)) | ( /* descriptor type: advanced one buffer */
2531  (1 << 25)) | ( /* drop if no descriptors available */
2532  (1 << 28)));
2533 
2534  /* Give hardware all but last 16 cache lines' worth of descriptors. */
2535  dq->tail_index = dq->n_descriptors -
2537  }
2538  else
2539  {
2540  /* Make sure its initialized before hardware can get to it. */
2541  dq->tx.head_index_write_back[0] = dq->head_index;
2542 
2543  a = vlib_physmem_get_pa (vm, dq->tx.head_index_write_back);
2544  dr->tx.head_index_write_back_address[0] = /* enable bit */ 1 | a;
2545  dr->tx.head_index_write_back_address[1] = (u64) a >> (u64) 32;
2546  }
2547 
2548  /* DMA on 82599 does not work with [13] rx data write relaxed ordering
2549  and [12] undocumented set. */
2550  if (rt == VLIB_RX)
2551  dr->dca_control &= ~((1 << 13) | (1 << 12));
2552 
2554 
2555  if (rt == VLIB_TX)
2556  {
2557  xd->regs->tx_dma_control |= (1 << 0);
2558  dr->control |= ((32 << 0) /* prefetch threshold */
2559  | (64 << 8) /* host threshold */
2560  | (0 << 16) /* writeback threshold */ );
2561  }
2562 
2563  /* Enable this queue and wait for hardware to initialize
2564  before adding to tail. */
2565  if (rt == VLIB_TX)
2566  {
2567  dr->control |= 1 << 25;
2568  while (!(dr->control & (1 << 25)))
2569  ;
2570  }
2571 
2572  /* Set head/tail indices and enable DMA. */
2573  dr->head_index = dq->head_index;
2574  dr->tail_index = dq->tail_index;
2575  }
2576 
2577  return error;
2578 }
2579 
2580 static u32
2582 {
2583  ixge_device_t *xd;
2584  ixge_regs_t *r;
2585  u32 old;
2586  ixge_main_t *xm = &ixge_main;
2587 
2588  xd = vec_elt_at_index (xm->devices, hw->dev_instance);
2589  r = xd->regs;
2590 
2591  old = r->filter_control;
2592 
2594  r->filter_control = old | (1 << 9) /* unicast promiscuous */ ;
2595  else if (flags == ETHERNET_INTERFACE_FLAGS_DEFAULT_L3)
2596  r->filter_control = old & ~(1 << 9);
2597  else
2598  return ~0;
2599 
2600  return old;
2601 }
2602 
2603 static void
2605 {
2606  vnet_main_t *vnm = vnet_get_main ();
2607  ixge_device_t *xd;
2608 
2609  /* Reset chip(s). */
2610  vec_foreach (xd, xm->devices)
2611  {
2612  ixge_regs_t *r = xd->regs;
2613  const u32 reset_bit = (1 << 26) | (1 << 3);
2614 
2615  r->control |= reset_bit;
2616 
2617  /* No need to suspend. Timed to take ~1e-6 secs */
2618  while (r->control & reset_bit)
2619  ;
2620 
2621  /* Software loaded. */
2622  r->extended_control |= (1 << 28);
2623 
2624  ixge_phy_init (xd);
2625 
2626  /* Register ethernet interface. */
2627  {
2628  u8 addr8[6];
2629  u32 i, addr32[2];
2631 
2632  addr32[0] = r->rx_ethernet_address0[0][0];
2633  addr32[1] = r->rx_ethernet_address0[0][1];
2634  for (i = 0; i < 6; i++)
2635  addr8[i] = addr32[i / 4] >> ((i % 4) * 8);
2636 
2638  (vnm, ixge_device_class.index, xd->device_index,
2639  /* ethernet address */ addr8,
2641  if (error)
2642  clib_error_report (error);
2643  }
2644 
2645  {
2646  vnet_sw_interface_t *sw =
2648  xd->vlib_sw_if_index = sw->sw_if_index;
2649  }
2650 
2651  ixge_dma_init (xd, VLIB_RX, /* queue_index */ 0);
2652 
2654 
2655  ixge_dma_init (xd, VLIB_TX, /* queue_index */ 0);
2656 
2657  /* RX/TX queue 0 gets mapped to interrupt bits 0 & 8. */
2658  r->interrupt.queue_mapping[0] = (( /* valid bit */ (1 << 7) |
2659  ixge_rx_queue_to_interrupt (0)) << 0);
2660 
2661  r->interrupt.queue_mapping[0] |= (( /* valid bit */ (1 << 7) |
2662  ixge_tx_queue_to_interrupt (0)) << 8);
2663 
2664  /* No use in getting too many interrupts.
2665  Limit them to one every 3/4 ring size at line rate
2666  min sized packets.
2667  No need for this since kernel/vlib main loop provides adequate interrupt
2668  limiting scheme. */
2669  if (0)
2670  {
2671  f64 line_rate_max_pps =
2672  10e9 / (8 * (64 + /* interframe padding */ 20));
2674  .75 * xm->n_descriptors[VLIB_RX] /
2675  line_rate_max_pps);
2676  }
2677 
2678  /* Accept all multicast and broadcast packets. Should really add them
2679  to the dst_ethernet_address register array. */
2680  r->filter_control |= (1 << 10) | (1 << 8);
2681 
2682  /* Enable frames up to size in mac frame size register. */
2683  r->xge_mac.control |= 1 << 2;
2684  r->xge_mac.rx_max_frame_size = (9216 + 14) << 16;
2685 
2686  /* Enable all interrupts. */
2687  if (!IXGE_ALWAYS_POLL)
2689  }
2690 }
2691 
2692 static uword
2694 {
2695  vnet_main_t *vnm = vnet_get_main ();
2696  ixge_main_t *xm = &ixge_main;
2697  ixge_device_t *xd;
2698  uword event_type, *event_data = 0;
2699  f64 timeout, link_debounce_deadline;
2700 
2701  ixge_device_init (xm);
2702 
2703  /* Clear all counters. */
2704  vec_foreach (xd, xm->devices)
2705  {
2706  ixge_update_counters (xd);
2707  clib_memset (xd->counters, 0, sizeof (xd->counters));
2708  }
2709 
2710  timeout = 30.0;
2711  link_debounce_deadline = 1e70;
2712 
2713  while (1)
2714  {
2715  /* 36 bit stat counters could overflow in ~50 secs.
2716  We poll every 30 secs to be conservative. */
2718 
2719  event_type = vlib_process_get_events (vm, &event_data);
2720 
2721  switch (event_type)
2722  {
2723  case EVENT_SET_FLAGS:
2724  /* 1 ms */
2725  link_debounce_deadline = vlib_time_now (vm) + 1e-3;
2726  timeout = 1e-3;
2727  break;
2728 
2729  case ~0:
2730  /* No events found: timer expired. */
2731  if (vlib_time_now (vm) > link_debounce_deadline)
2732  {
2733  vec_foreach (xd, xm->devices)
2734  {
2735  ixge_regs_t *r = xd->regs;
2736  u32 v = r->xge_mac.link_status;
2737  uword is_up = (v & (1 << 30)) != 0;
2738 
2740  (vnm, xd->vlib_hw_if_index,
2741  is_up ? VNET_HW_INTERFACE_FLAG_LINK_UP : 0);
2742  }
2743  link_debounce_deadline = 1e70;
2744  timeout = 30.0;
2745  }
2746  break;
2747 
2748  default:
2749  ASSERT (0);
2750  }
2751 
2752  if (event_data)
2753  _vec_len (event_data) = 0;
2754 
2755  /* Query stats every 30 secs. */
2756  {
2757  f64 now = vlib_time_now (vm);
2758  if (now - xm->time_last_stats_update > 30)
2759  {
2761  vec_foreach (xd, xm->devices) ixge_update_counters (xd);
2762  }
2763  }
2764  }
2765 
2766  return 0;
2767 }
2768 
2770  .function = ixge_process,
2771  .type = VLIB_NODE_TYPE_PROCESS,
2772  .name = "ixge-process",
2773 };
2774 
2775 clib_error_t *
2777 {
2778  ixge_main_t *xm = &ixge_main;
2779 
2780  xm->vlib_main = vm;
2782  sizeof (xm->tx_descriptor_template));
2784  sizeof (xm->tx_descriptor_template_mask));
2789  xm->tx_descriptor_template_mask.status0 = 0xffff;
2790  xm->tx_descriptor_template_mask.status1 = 0x00003fff;
2791 
2797  return 0;
2798 }
2799 
2800 /* *INDENT-OFF* */
2802 {
2803  .runs_before = VLIB_INITS("pci_bus_init"),
2804 };
2805 /* *INDENT-ON* */
2806 
2807 
2808 static void
2810 {
2811  uword private_data = vlib_pci_get_private_data (vm, h);
2812 
2814 
2815  /* Let node know which device is interrupting. */
2816  {
2819  rt->runtime_data[0] |= 1 << private_data;
2820  }
2821 }
2822 
2823 static clib_error_t *
2825 {
2826  ixge_main_t *xm = &ixge_main;
2827  clib_error_t *error = 0;
2828  void *r;
2829  ixge_device_t *xd;
2830  vlib_pci_addr_t *addr = vlib_pci_get_addr (vm, h);
2832 
2833  error = vlib_pci_map_region (vm, h, 0, &r);
2834  if (error)
2835  return error;
2836 
2837  vec_add2 (xm->devices, xd, 1);
2838 
2839  if (vec_len (xm->devices) == 1)
2840  {
2841  ixge_input_node.function = ixge_input;
2842  }
2843 
2844  xd->pci_dev_handle = h;
2845  xd->device_id = d->device_id;
2846  xd->regs = r;
2847  xd->device_index = xd - xm->devices;
2848  xd->pci_function = addr->function;
2849  xd->per_interface_next_index = ~0;
2850 
2852 
2853  /* Chip found so enable node. */
2854  {
2857  ? VLIB_NODE_STATE_POLLING
2858  : VLIB_NODE_STATE_INTERRUPT));
2859 
2860  //dev->private_data = xd->device_index;
2861  }
2862 
2863  if (vec_len (xm->devices) == 1)
2864  {
2867  }
2868 
2869  error = vlib_pci_bus_master_enable (vm, h);
2870 
2871  if (error)
2872  return error;
2873 
2874  return vlib_pci_intr_enable (vm, h);
2875 }
2876 
2877 /* *INDENT-OFF* */
2878 PCI_REGISTER_DEVICE (ixge_pci_device_registration,static) = {
2879  .init_function = ixge_pci_init,
2880  .interrupt_handler = ixge_pci_intr_handler,
2881  .supported_devices = {
2882 #define _(t,i) { .vendor_id = PCI_VENDOR_ID_INTEL, .device_id = i, },
2884 #undef _
2885  { 0 },
2886  },
2887 };
2888 /* *INDENT-ON* */
2889 
2890 void
2892 {
2894 
2895  switch (next)
2896  {
2900  r->next_nodes[next] = name;
2901  break;
2902 
2903  default:
2904  clib_warning ("%s: illegal next %d\n", __FUNCTION__, next);
2905  break;
2906  }
2907 }
2908 
2909 /* *INDENT-OFF* */
2910 VLIB_PLUGIN_REGISTER () = {
2911  .version = VPP_BUILD_VER,
2912  .default_disabled = 1,
2913  .description = "Intel 82599 Family Native Driver (experimental)",
2914 };
2915 #endif
2916 
2917 /* *INDENT-ON* */
2918 
2919 /*
2920  * fd.io coding-style-patch-verification: ON
2921  *
2922  * Local Variables:
2923  * eval: (c-set-style "gnu")
2924  * End:
2925  */
u32 mdio_address
Definition: ixge.h:1113
static void ixge_update_counters(ixge_device_t *xd)
Definition: ixge.c:2253
#define vec_validate(V, I)
Make sure vector is long enough for given index (no header, unspecified alignment) ...
Definition: vec.h:524
u32 flags
buffer flags: VLIB_BUFFER_FREE_LIST_INDEX_MASK: bits used to store free list index, VLIB_BUFFER_IS_TRACED: trace this buffer.
Definition: buffer.h:133
#define IXGE_RX_DESCRIPTOR_STATUS0_IS_UDP
Definition: ixge.h:107
#define IXGE_TX_DESCRIPTOR_STATUS0_IS_END_OF_PACKET
Definition: ixge.h:138
u32 process_node_index
Definition: ixge.h:1256
#define IXGE_RX_DESCRIPTOR_STATUS0_IS_IP6_EXT
Definition: ixge.h:105
static clib_error_t * ixge_interface_admin_up_down(vnet_main_t *vnm, u32 hw_if_index, u32 flags)
Definition: ixge.c:321
#define IXGE_RX_DESCRIPTOR_STATUS2_ETHERNET_ERROR
Definition: ixge.h:119
static clib_error_t * vlib_pci_intr_enable(vlib_main_t *vm, vlib_pci_dev_handle_t h)
Definition: pci.h:239
vnet_interface_output_runtime_t * rt
#define clib_min(x, y)
Definition: clib.h:342
static void ixge_software_firmware_sync_release(ixge_device_t *xd, u32 sw_mask)
Definition: ixge.c:111
struct ixge_regs_t::@786 xge_mac
#define CLIB_UNUSED(x)
Definition: clib.h:90
ixge_rx_to_hw_descriptor_t rx_to_hw
Definition: ixge.h:168
static void ixge_phy_init(ixge_device_t *xd)
Definition: ixge.c:376
static clib_error_t * vlib_pci_bus_master_enable(vlib_main_t *vm, vlib_pci_dev_handle_t h)
Definition: pci.h:271
static u32 vlib_get_trace_count(vlib_main_t *vm, vlib_node_runtime_t *rt)
Definition: trace_funcs.h:212
ixge_tx_descriptor_t tx_descriptor_template
Definition: ixge.h:1259
static f64 vlib_process_wait_for_event_or_clock(vlib_main_t *vm, f64 dt)
Suspend a cooperative multi-tasking thread Waits for an event, or for the indicated number of seconds...
Definition: node_funcs.h:755
vl_api_wireguard_peer_flags_t flags
Definition: wireguard.api:105
u32 enable_write_1_to_set
Definition: ixge.h:245
a
Definition: bitmap.h:544
u32 n_descriptor_bytes
Definition: ixge.h:30
static u32 ixge_flag_change(vnet_main_t *vnm, vnet_hw_interface_t *hw, u32 flags)
Definition: ixge.c:2581
#define IXGE_RX_DESCRIPTOR_STATUS2_TCP_CHECKSUM_ERROR
Definition: ixge.h:120
u8 runtime_data[0]
Function dependent node-runtime data.
Definition: node.h:506
u32 vlib_hw_if_index
Definition: ixge.h:1222
u32 n_descriptors[VLIB_N_RX_TX]
Definition: ixge.h:1248
u32 link_status_at_last_link_change
Definition: ixge.h:1231
vnet_hw_if_output_node_runtime_t * r
static void vlib_set_next_frame_buffer(vlib_main_t *vm, vlib_node_runtime_t *node, u32 next_index, u32 buffer_index)
Definition: node_funcs.h:428
u32 head_index
Definition: ixge.h:42
static uword vlib_buffer_get_pa(vlib_main_t *vm, vlib_buffer_t *b)
Definition: buffer_funcs.h:488
u32 n_bytes
static void ixge_semaphore_release(ixge_device_t *xd)
Definition: ixge.c:82
#define PREDICT_TRUE(x)
Definition: clib.h:125
static void * vlib_physmem_alloc(vlib_main_t *vm, uword n_bytes)
Definition: physmem_funcs.h:73
u8 is_start_of_packet
Definition: ixge.c:569
i16 current_data
signed offset in data[], pre_data[] that we are currently processing.
Definition: buffer.h:119
unsigned long u64
Definition: types.h:89
#define IXGE_RX_DESCRIPTOR_STATUS2_IS_IP4_CHECKSUMMED
Definition: ixge.h:115
u32 * descriptor_buffer_indices
Definition: ixge.h:1134
static void vlib_error_count(vlib_main_t *vm, uword node_index, uword counter, uword increment)
Definition: error_funcs.h:57
static void vlib_node_set_interrupt_pending(vlib_main_t *vm, u32 node_index)
Definition: node_funcs.h:249
#define IXGE_RX_DESCRIPTOR_STATUS0_IS_IP6
Definition: ixge.h:104
u32 queue_mapping[64]
Definition: ixge.h:266
vlib_increment_combined_counter(ccm, ti, sw_if_index, n_buffers, n_bytes)
clib_memset(h->entries, 0, sizeof(h->entries[0]) *entries)
static f64 vlib_time_now(vlib_main_t *vm)
Definition: main.h:325
#define VLIB_NODE_FLAG_TRACE_SUPPORTED
Definition: node.h:296
vlib_buffer_t buffer
Definition: ixge.c:572
static u8 * format_ixge_link_status(u8 *s, va_list *args)
Definition: ixge.c:2299
static clib_error_t * vlib_physmem_last_error(struct vlib_main_t *vm)
static vnet_hw_interface_t * vnet_get_hw_interface(vnet_main_t *vnm, u32 hw_if_index)
u16 current_length
Nbytes between current data and the end of this buffer.
Definition: buffer.h:122
u32 phy_command
Definition: ixge.h:368
u32 * rx_buffers_to_add
Definition: ixge.h:1264
#define XGE_PHY_CONTROL
Definition: ixge.c:52
static vlib_node_registration_t ixge_input_node
(constructor) VLIB_REGISTER_NODE (ixge_input_node)
Definition: ixge.c:56
PCI_REGISTER_DEVICE(ixge_pci_device_registration, static)
static uword ixge_rx_queue_no_wrap(ixge_main_t *xm, ixge_device_t *xd, ixge_dma_queue_t *dq, u32 start_descriptor_index, u32 n_descriptors)
Definition: ixge.c:1341
ixge_device_t * devices
Definition: ixge.h:1245
static uword sfp_eeprom_is_valid(sfp_eeprom_t *e)
Definition: sfp.h:90
#define vec_add2(V, P, N)
Add N elements to end of vector V, return pointer to new elements in P.
Definition: vec.h:645
for(i=1;i<=collision_buckets;i++)
ixge_descriptor_t before
Definition: ixge.c:561
struct ixge_dma_queue_t::@807::@810 rx
#define IXGE_N_BYTES_IN_RX_BUFFER
Definition: ixge.c:2446
static void ixge_throttle_queue_interrupt(ixge_regs_t *r, u32 queue_interrupt_index, f64 inter_interrupt_interval_in_secs)
Definition: ixge.h:994
#define XGE_PHY_ID2
Definition: ixge.c:51
static u32 format_get_indent(u8 *s)
Definition: format.h:72
static void ixge_i2c_put_bits(i2c_bus_t *b, int scl, int sda)
Definition: ixge.c:171
u32 per_interface_next_index
Definition: ixge.h:1208
static uword ixge_interrupt_is_rx_queue(uword i)
Definition: ixge.c:2088
static vnet_sw_interface_t * vnet_get_sw_interface(vnet_main_t *vnm, u32 sw_if_index)
u32 extended_control
Definition: ixge.h:192
u32 software_semaphore
Definition: ixge.h:890
static void ixge_i2c_get_bits(i2c_bus_t *b, int *scl, int *sda)
Definition: ixge.c:184
string name[64]
Definition: fib.api:25
static void ixge_write_phy_reg(ixge_device_t *xd, u32 dev_type, u32 reg_index, u32 v)
Definition: ixge.c:164
ixge_rx_next_t
Definition: ixge.h:1273
static u8 ixge_counter_flags[]
Definition: ixge.c:2244
static vlib_buffer_known_state_t vlib_buffer_is_known(vlib_main_t *vm, u32 buffer_index)
Definition: buffer_funcs.h:529
u32 rx_ethernet_address0[16][2]
Definition: ixge.h:581
vlib_error_t * errors
Vector of errors for this node.
Definition: node.h:461
clib_error_t * vnet_hw_interface_set_flags(vnet_main_t *vnm, u32 hw_if_index, vnet_hw_interface_flags_t flags)
Definition: interface.c:513
u32 tx_dma_control
Definition: ixge.h:500
static uword ixge_ring_add(ixge_dma_queue_t *q, u32 i0, u32 i1)
Definition: ixge.c:1007
vhost_vring_addr_t addr
Definition: vhost_user.h:130
u32 * tx_buffers_pending_free
Definition: ixge.h:1262
static uword vlib_node_add_next(vlib_main_t *vm, uword node, uword next_node)
Definition: node_funcs.h:1177
unsigned char u8
Definition: types.h:56
vlib_buffer_t ** b
#define IXGE_RX_DESCRIPTOR_STATUS2_IS_END_OF_PACKET
Definition: ixge.h:111
u8 id[64]
Definition: dhcp.api:160
u32 filter_control
Definition: ixge.h:522
#define foreach_set_bit(var, mask, body)
Definition: bitops.h:166
double f64
Definition: types.h:142
u32 i2c_control
Definition: ixge.h:208
static vnet_sw_interface_t * vnet_get_hw_sw_interface(vnet_main_t *vnm, u32 hw_if_index)
u32 phy_data
Definition: ixge.h:369
unsigned int u32
Definition: types.h:88
static void ixge_rx_next_and_error_from_status_x1(ixge_device_t *xd, u32 s00, u32 s02, u8 *next0, u8 *error0, u32 *flags0)
Definition: ixge.c:632
#define vec_add(V, E, N)
Add N elements to end of vector V (no header, unspecified alignment)
Definition: vec.h:689
static u32 clean_block(u32 *b, u32 *t, u32 n_left)
Definition: ixge.c:1938
u8 *() format_function_t(u8 *s, va_list *args)
Definition: format.h:48
vlib_frame_t * f
vlib_pci_dev_handle_t pci_dev_handle
Definition: ixge.h:1211
static uword vlib_process_suspend(vlib_main_t *vm, f64 dt)
Suspend a vlib cooperative multi-tasking thread for a period of time.
Definition: node_funcs.h:486
u32 ixge_read_write_phy_reg(ixge_device_t *xd, u32 dev_type, u32 reg_index, u32 v, u32 is_read)
Definition: ixge.c:120
u32 rx_split_control
Definition: ixge.h:51
static uword ixge_rx_queue_to_interrupt(uword i)
Definition: ixge.c:2106
vlib_rx_or_tx_t
Definition: defs.h:44
if(node->flags &VLIB_NODE_FLAG_TRACE) vnet_interface_output_trace(vm
ixge_tx_descriptor_t descriptor
Definition: ixge.c:848
i2c_bus_t i2c_bus
Definition: ixge.h:1233
static u8 * format_ixge_rx_from_hw_descriptor(u8 *s, va_list *va)
Definition: ixge.c:445
#define vlib_prefetch_buffer_with_index(vm, bi, type)
Prefetch buffer metadata by buffer index The first 64 bytes of buffer contains most header informatio...
Definition: buffer_funcs.h:507
#define VLIB_INIT_FUNCTION(x)
Definition: init.h:172
static clib_error_t * ixge_sfp_phy_init_from_eeprom(ixge_device_t *xd, u16 sfp_type)
Definition: ixge.c:228
static uword vlib_process_get_events(vlib_main_t *vm, uword **data_vector)
Return the first event type which has occurred and a vector of per-event data of that type...
Definition: node_funcs.h:583
static void * vlib_physmem_alloc_aligned(vlib_main_t *vm, uword n_bytes, uword alignment)
Definition: physmem_funcs.h:56
static void ixge_rx_trace(ixge_main_t *xm, ixge_device_t *xd, ixge_dma_queue_t *dq, ixge_descriptor_t *before_descriptors, u32 *before_buffers, ixge_descriptor_t *after_descriptors, uword n_descriptors)
Definition: ixge.c:735
u8 * format_white_space(u8 *s, va_list *va)
Definition: std-formats.c:129
u32 dca_control
Definition: ixge.h:40
#define IXGE_RX_DESCRIPTOR_STATUS2_IS_TCP_CHECKSUMMED
Definition: ixge.h:114
description fragment has unexpected format
Definition: map.api:433
ixge_phy_t phys[2]
Definition: ixge.h:1228
#define IXGE_RX_DESCRIPTOR_STATUS2_IP4_CHECKSUM_ERROR
Definition: ixge.h:121
ixge_dma_regs_t tx_dma[128]
Definition: ixge.h:616
u32 link_partner_ability
Definition: ixge.h:343
#define IXGE_RX_DESCRIPTOR_STATUS2_IS_VLAN
Definition: ixge.h:112
#define vec_elt_at_index(v, i)
Get vector value at index i checking that i is in bounds.
u8 * format_hex_bytes(u8 *s, va_list *va)
Definition: std-formats.c:84
vnet_main_t * vnet_get_main(void)
vnet_device_class_t ixge_device_class
ixge_main_t ixge_main
Definition: ixge.c:55
static vlib_node_registration_t ixge_process_node
Definition: ixge.c:57
u32 queue_index
Definition: ixge.h:1131
#define vec_resize(V, N)
Resize a vector (no header, unspecified alignment) Add N elements to end of given vector V...
Definition: vec.h:296
#define vec_end(v)
End (last data address) of vector.
#define clib_error_create(args...)
Definition: error.h:96
#define VLIB_FRAME_SIZE
Definition: node.h:369
f64 time_last_stats_update
Definition: ixge.h:1266
#define IXGE_RX_DESCRIPTOR_STATUS0_IS_LAYER2
Definition: ixge.h:100
bool is_ip6
Definition: ip.api:43
static clib_error_t * ixge_pci_init(vlib_main_t *vm, vlib_pci_dev_handle_t h)
Definition: ixge.c:2824
static void ixge_rx_next_and_error_from_status_x2(ixge_device_t *xd, u32 s00, u32 s02, u32 s10, u32 s12, u8 *next0, u8 *error0, u32 *flags0, u8 *next1, u8 *error1, u32 *flags1)
Definition: ixge.c:671
u8 * format_c_identifier(u8 *s, va_list *va)
Definition: std-formats.c:329
static uword ixge_process(vlib_main_t *vm, vlib_node_runtime_t *rt, vlib_frame_t *f)
Definition: ixge.c:2693
vlib_error_t error
Error code for buffers to be enqueued to error handler.
Definition: buffer.h:145
vlib_pci_device_info_t * vlib_pci_get_device_info(vlib_main_t *vm, vlib_pci_addr_t *addr, clib_error_t **error)
Definition: pci.c:202
static void ixge_sfp_enable_disable_10g(ixge_device_t *xd, uword enable)
Definition: ixge.c:218
u8 id
Definition: sfp.h:56
Definition: cJSON.c:88
uword private_data
Definition: i2c.h:44
static u8 * format_ixge_tx_descriptor(u8 *s, va_list *va)
Definition: ixge.c:516
format_function_t format_vnet_sw_interface_name
u16 * next
#define IXGE_TX_DESCRIPTOR_STATUS1_N_BYTES_IN_PACKET(l)
Definition: ixge.h:145
static uword ixge_interface_tx(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *f)
Definition: ixge.c:1199
u16 state
Input node state.
Definition: node.h:494
static void ixge_clear_hw_interface_counters(u32 instance)
Definition: ixge.c:2401
static void ixge_sfp_device_up_down(ixge_device_t *xd, uword is_up)
Definition: ixge.c:267
format_function_t format_vnet_buffer
Definition: buffer.h:515
#define foreach_ixge_counter
Definition: ixge.h:1016
static uword ixge_tx_no_wrap(ixge_main_t *xm, ixge_device_t *xd, ixge_dma_queue_t *dq, u32 *buffers, u32 start_descriptor_index, u32 n_descriptors, ixge_tx_state_t *tx_state)
Definition: ixge.c:1035
u64 counters[IXGE_N_COUNTER]
Definition: ixge.h:1237
static void vlib_process_signal_event(vlib_main_t *vm, uword node_index, uword type_opaque, uword data)
Definition: node_funcs.h:1019
ixge_tx_descriptor_t tx
Definition: ixge.h:170
#define IXGE_TX_DESCRIPTOR_STATUS0_LOG2_IS_END_OF_PACKET
Definition: ixge.h:137
void vlib_pci_set_private_data(vlib_main_t *vm, vlib_pci_dev_handle_t h, uword private_data)
Definition: pci.c:155
#define IXGE_TX_DESCRIPTOR_STATUS0_IS_ADVANCED
Definition: ixge.h:133
static __clib_warn_unused_result u32 vlib_buffer_alloc(vlib_main_t *vm, u32 *buffers, u32 n_buffers)
Allocate buffers into supplied array.
Definition: buffer_funcs.h:708
static void ixge_set_interface_next_node(vnet_main_t *vnm, u32 hw_if_index, u32 node_index)
Definition: ixge.c:2414
static __clib_warn_unused_result int vlib_trace_buffer(vlib_main_t *vm, vlib_node_runtime_t *r, u32 next_index, vlib_buffer_t *b, int follow_chain)
Definition: trace_funcs.h:153
unsigned short u16
Definition: types.h:57
#define XGE_PHY_ID1
Definition: ixge.c:50
static u8 * format_ixge_device_id(u8 *s, va_list *args)
Definition: ixge.c:2278
u32 vlib_register_node(vlib_main_t *vm, vlib_node_registration_t *r)
Definition: node.c:519
#define XGE_PHY_DEV_TYPE_PMA_PMD
Definition: ixge.c:48
#define ELOG_DATA(em, f)
Definition: elog.h:484
#define IXGE_TX_DESCRIPTOR_STATUS0_ADVANCED
Definition: ixge.h:132
format_function_t format_sfp_eeprom
Definition: sfp.h:133
#define PREDICT_FALSE(x)
Definition: clib.h:124
u16 n_bytes_this_buffer
Definition: ixge.h:129
sfp_eeprom_t sfp_eeprom
Definition: ixge.h:1234
vnet_main_t vnet_main
Definition: misc.c:43
u32 control
Definition: ixge.h:62
uword vlib_pci_get_private_data(vlib_main_t *vm, vlib_pci_dev_handle_t h)
Definition: pci.c:148
u32 descriptor_address[2]
Definition: ixge.h:29
static u16 ixge_read_eeprom(ixge_device_t *xd, u32 address)
Definition: ixge.c:196
vlib_main_t * vm
X-connect all packets from the HOST to the PHY.
Definition: nat44_ei.c:3047
format_function_t * format_buffer
Definition: node.h:349
VNET_DEVICE_CLASS(ixge_device_class)
u32 vlib_pci_dev_handle_t
Definition: pci.h:97
#define vlib_validate_buffer_enqueue_x1(vm, node, next_index, to_next, n_left_to_next, bi0, next0)
Finish enqueueing one buffer forward in the graph.
Definition: buffer_node.h:224
#define vlib_get_next_frame(vm, node, next_index, vectors, n_vectors_left)
Get pointer to next frame vector data by (vlib_node_runtime_t, next_index).
Definition: node_funcs.h:395
u32 software_firmware_sync
Definition: ixge.h:894
static void ixge_sfp_phy_init(ixge_device_t *xd)
Definition: ixge.c:349
u32 n_left
vlib_node_runtime_t * node
Definition: ixge.c:902
#define IXGE_RX_DESCRIPTOR_STATUS2_IS_OWNED_BY_SOFTWARE
Definition: ixge.h:110
u32 vlib_sw_if_index
Definition: ixge.h:1222
u32 head_index
Definition: ixge.h:1128
u32 auto_negotiation_control2
Definition: ixge.h:465
u8 * format_ethernet_header_with_length(u8 *s, va_list *args)
Definition: format.c:97
ixge_tx_descriptor_t * start_of_packet_descriptor
Definition: ixge.c:908
static ixge_dma_regs_t * get_dma_regs(ixge_device_t *xd, vlib_rx_or_tx_t rt, u32 qi)
Definition: ixge.c:310
static u8 * format_ixge_tx_dma_trace(u8 *s, va_list *va)
Definition: ixge.c:863
#define IXGE_RX_DESCRIPTOR_STATUS0_IS_IP4
Definition: ixge.h:102
#define VLIB_REGISTER_NODE(x,...)
Definition: node.h:169
static void vlib_buffer_free_no_next(vlib_main_t *vm, u32 *buffers, u32 n_buffers)
Free buffers, does not free the buffer chain for each buffer.
Definition: buffer_funcs.h:999
u32 n_descriptors
Definition: ixge.h:1125
u16 n_vectors
Definition: node.h:388
#define XGE_PHY_DEV_TYPE_PHY_XS
Definition: ixge.c:49
u32 status_write_1_to_set
Definition: ixge.h:232
Definition: i2c.h:33
static uword ixge_interrupt_tx_queue(uword i)
Definition: ixge.c:2119
#define CLIB_PREFETCH(addr, size, type)
Definition: cache.h:80
ixge_descriptor_t after
Definition: ixge.c:561
u32x4 as_u32x4
Definition: ixge.h:171
sll srl srl sll sra u16x4 i
Definition: vector_sse42.h:261
u8 * vlib_validate_buffer(vlib_main_t *vm, u32 bi, uword follow_buffer_next)
Definition: buffer.c:254
u32 index
Definition: flow_types.api:221
#define IXGE_RX_DESCRIPTOR_STATUS0_L3_OFFSET(s)
Definition: ixge.h:108
#define clib_warning(format, args...)
Definition: error.h:59
static vlib_node_runtime_t * vlib_node_get_runtime(vlib_main_t *vm, u32 node_index)
Get node runtime by node index.
Definition: node_funcs.h:116
u8 data[]
Packet data.
Definition: buffer.h:204
void(* get_bits)(struct i2c_bus_t *b, int *scl, int *sda)
Definition: i2c.h:36
#define ETHERNET_INTERFACE_FLAG_ACCEPT_ALL
Definition: ethernet.h:163
static u8 * format_ixge_device_name(u8 *s, va_list *args)
Definition: ixge.c:2230
#define IXGE_TX_DESCRIPTOR_STATUS0_INSERT_FCS
Definition: ixge.h:136
#define ARRAY_LEN(x)
Definition: clib.h:70
u16 device_index
Definition: ixge.h:1216
#define ELOG_TYPE_DECLARE(f)
Definition: elog.h:442
static uword ixge_interrupt_rx_queue(uword i)
Definition: ixge.c:2112
u32 n_bytes_in_packet
Definition: ixge.c:906
static uword round_pow2(uword x, uword pow2)
Definition: clib.h:279
void vlib_i2c_init(i2c_bus_t *b)
Definition: i2c.c:150
u32 sdp_control
Definition: ixge.h:201
vlib_pci_addr_t * vlib_pci_get_addr(vlib_main_t *vm, vlib_pci_dev_handle_t h)
Definition: pci.c:163
static u8 * format_ixge_rx_dma_trace(u8 *s, va_list *va)
Definition: ixge.c:576
u32 tail_index
Definition: ixge.h:1128
static char * ixge_error_strings[]
Definition: ixge.c:2196
#define EVENT_SET_FLAGS
Definition: ixge.c:42
signed int i32
Definition: types.h:77
u16 cached_next_index
Next frame index that vector arguments were last enqueued to last time this node ran.
Definition: node.h:498
#define ASSERT(truth)
#define IXGE_COUNTER_NOT_CLEAR_ON_READ
Definition: ixge.c:2242
manual_print typedef address
Definition: ip_types.api:96
u8 pre_data[VLIB_BUFFER_PRE_DATA_SIZE]
Space for inserting data before buffer start.
Definition: buffer.h:201
ixge_pci_device_id_t device_id
Definition: ixge.h:1214
ixge_descriptor_t * descriptors
Definition: ixge.h:1122
static void ixge_tx_queue(ixge_main_t *xm, ixge_device_t *xd, u32 queue_index)
Definition: ixge.c:1981
#define always_inline
Definition: rdma_mlx5dv.h:23
u32 auto_negotiation_control
Definition: ixge.h:427
vlib_put_next_frame(vm, node, next_index, 0)
#define clib_error_report(e)
Definition: error.h:113
static void vlib_node_set_state(vlib_main_t *vm, u32 node_index, vlib_node_state_t new_state)
Set node dispatch state.
Definition: node_funcs.h:175
u32 control
Definition: ixge.h:179
nat44_ei_hairpin_src_next_t next_index
ixge_regs_t * regs
Definition: ixge.h:1205
u32 id
Definition: ixge.h:1116
u32 rx_enable
Definition: ixge.h:308
static vlib_main_t * vlib_get_main(void)
Definition: global_funcs.h:38
static void ixge_semaphore_get(ixge_device_t *xd)
Definition: ixge.c:60
static void ixge_interrupt(ixge_main_t *xm, ixge_device_t *xd, u32 i)
Definition: ixge.c:1878
static uword ixge_tx_descriptor_matches_template(ixge_main_t *xm, ixge_tx_descriptor_t *d)
Definition: ixge.c:1017
static uword ixge_tx_queue_to_interrupt(uword i)
Definition: ixge.c:2100
void vlib_i2c_read_eeprom(i2c_bus_t *bus, u8 i2c_addr, u16 start_addr, u16 length, u8 *data)
Definition: i2c.c:201
void(* put_bits)(struct i2c_bus_t *b, int scl, int sda)
Definition: i2c.h:35
struct _vlib_node_registration vlib_node_registration_t
static void ixge_device_init(ixge_main_t *xm)
Definition: ixge.c:2604
static void ixge_software_firmware_sync(ixge_device_t *xd, u32 sw_mask)
Definition: ixge.c:89
static u64 vlib_physmem_get_pa(vlib_main_t *vm, void *mem)
Definition: defs.h:47
ixge_tx_descriptor_t tx_descriptor_template_mask
Definition: ixge.h:1259
struct ixge_regs_t::@784 interrupt
#define IXGE_RX_DESCRIPTOR_STATUS2_IS_UDP_CHECKSUMMED
Definition: ixge.h:113
void ixge_set_next_node(ixge_rx_next_t next, char *name)
Definition: ixge.c:2891
ixge_dma_queue_t * dma_queues[VLIB_N_RX_TX]
Definition: ixge.h:1224
VLIB_PLUGIN_REGISTER()
static uword ixge_ring_sub(ixge_dma_queue_t *q, u32 i0, u32 i1)
Definition: ixge.c:998
struct ixge_dma_queue_t::@807::@809 tx
static void ixge_tx_trace(ixge_main_t *xm, ixge_device_t *xd, ixge_dma_queue_t *dq, ixge_tx_state_t *tx_state, ixge_tx_descriptor_t *descriptors, u32 *buffers, uword n_descriptors)
Definition: ixge.c:912
u32 tail_index
Definition: ixge.h:53
u32 rx_max_frame_size
Definition: ixge.h:373
#define vec_len(v)
Number of elements in vector (rvalue-only, NULL tolerant)
vlib_main_t vlib_node_runtime_t * node
Definition: nat44_ei.c:3047
u32 next_buffer
Next buffer for this linked-list of buffers.
Definition: buffer.h:149
clib_error_t * ethernet_register_interface(vnet_main_t *vnm, u32 dev_class_index, u32 dev_instance, const u8 *address, u32 *hw_if_index_return, ethernet_flag_change_function_t flag_change)
Definition: interface.c:348
u32 instance
Definition: gre.api:51
static uword ixge_interrupt_is_tx_queue(uword i)
Definition: ixge.c:2094
VLIB buffer representation.
Definition: buffer.h:111
u64 uword
Definition: types.h:112
#define IXGE_COUNTER_IS_64_BIT
Definition: ixge.c:2241
static void * vlib_frame_vector_args(vlib_frame_t *f)
Get pointer to frame vector data.
Definition: node_funcs.h:301
vlib_buffer_t buffer
Definition: ixge.c:859
static uword ixge_device_input(ixge_main_t *xm, ixge_device_t *xd, vlib_node_runtime_t *node)
Definition: ixge.c:2126
static uword ixge_input(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *f)
Definition: ixge.c:2154
u32 n_descriptors_per_cache_line
Definition: ixge.h:1254
node node_index
clib_error_t * vlib_pci_map_region(vlib_main_t *vm, vlib_pci_dev_handle_t h, u32 resource, void **result)
Definition: pci.c:1182
#define IXGE_RX_DESCRIPTOR_STATUS0_IS_IP4_EXT
Definition: ixge.h:103
vlib_main_t * vlib_main
Definition: ixge.h:1242
u16 n_packet_bytes_this_descriptor
Definition: ixge.h:96
static uword ixge_rx_queue(ixge_main_t *xm, ixge_device_t *xd, vlib_node_runtime_t *node, u32 queue_index)
Definition: ixge.c:1805
#define XGE_PHY_CONTROL_RESET
Definition: ixge.c:53
static uword vlib_in_process_context(vlib_main_t *vm)
Definition: node_funcs.h:442
#define vnet_buffer(b)
Definition: buffer.h:437
u32 is_start_of_packet
Definition: ixge.c:904
#define IXGE_HWBP_RACE_ELOG
Definition: ixge.c:43
static int vlib_i2c_bus_timed_out(i2c_bus_t *bus)
Definition: i2c.h:54
ixge_dma_regs_t rx_dma0[64]
Definition: ixge.h:281
u8 is_start_of_packet
Definition: ixge.c:856
#define IXGE_ALWAYS_POLL
Definition: ixge.c:40
f64 now
u32 core_analog_config
Definition: ixge.h:949
static void ixge_pci_intr_handler(vlib_main_t *vm, vlib_pci_dev_handle_t h)
Definition: ixge.c:2809
#define vec_foreach(var, vec)
Vector iterator.
#define IXGE_RX_DESCRIPTOR_STATUS0_IS_TCP
Definition: ixge.h:106
unsigned long long u32x4
Definition: ixge.c:28
clib_error_t * ixge_init(vlib_main_t *vm)
Definition: ixge.c:2776
u16 flags
Copy of main node flags.
Definition: node.h:492
#define CLIB_MEMORY_BARRIER()
Definition: clib.h:137
void * vlib_add_trace(vlib_main_t *vm, vlib_node_runtime_t *r, vlib_buffer_t *b, u32 n_data_bytes)
Definition: trace.c:628
static void ixge_sfp_enable_disable_laser(ixge_device_t *xd, uword enable)
Definition: ixge.c:208
u16 pci_function
Definition: ixge.h:1219
format_function_t format_vlib_pci_link_speed
Definition: pci.h:327
static void vlib_set_trace_count(vlib_main_t *vm, vlib_node_runtime_t *rt, u32 count)
Definition: trace_funcs.h:226
u32 status_write_1_to_clear
Definition: ixge.h:230
#define VLIB_NODE_FLAG_TRACE
Definition: node.h:292
#define IXGE_TX_DESCRIPTOR_STATUS0_REPORT_STATUS
Definition: ixge.h:135
#define CLIB_CACHE_LINE_BYTES
Definition: cache.h:59
static u8 * format_ixge_device(u8 *s, va_list *args)
Definition: ixge.c:2321
u32 phy_index
Definition: ixge.h:1227
u32 eeprom_read
Definition: ixge.h:881
struct ixge_dma_regs_t::@778::@781 tx
static u32 ixge_read_phy_reg(ixge_device_t *xd, u32 dev_type, u32 reg_index)
Definition: ixge.c:157
u32 n_bytes_in_rx_buffer
Definition: ixge.h:1252
u32 link_status
Definition: ixge.h:340
static clib_error_t * ixge_dma_init(ixge_device_t *xd, vlib_rx_or_tx_t rt, u32 queue_index)
Definition: ixge.c:2449
#define VLIB_INITS(...)
Definition: init.h:352
static vlib_buffer_t * vlib_get_buffer(vlib_main_t *vm, u32 buffer_index)
Translate buffer index into buffer pointer.
Definition: buffer_funcs.h:111
u64 counters_last_clear[IXGE_N_COUNTER]
Definition: ixge.h:1237
ixge_dma_regs_t rx_dma1[64]
Definition: ixge.h:747
ixge_rx_from_hw_descriptor_t rx_from_hw
Definition: ixge.h:169
#define VLIB_NODE_FLAG_SWITCH_FROM_POLLING_TO_INTERRUPT_MODE
Definition: node.h:295
#define foreach_ixge_error
Definition: ixge.c:616
Definition: defs.h:46
#define IXGE_TX_DESCRIPTOR_STATUS1_DONE
Definition: ixge.h:139
#define IXGE_RX_DESCRIPTOR_STATUS2_UDP_CHECKSUM_ERROR
Definition: ixge.h:118
ixge_error_t
Definition: ixge.c:623