FD.io VPP  v19.04-6-g6f05f72
Vector Packet Processing
ixge.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2016 Cisco and/or its affiliates.
3  * Licensed under the Apache License, Version 2.0 (the "License");
4  * you may not use this file except in compliance with the License.
5  * You may obtain a copy of the License at:
6  *
7  * http://www.apache.org/licenses/LICENSE-2.0
8  *
9  * Unless required by applicable law or agreed to in writing, software
10  * distributed under the License is distributed on an "AS IS" BASIS,
11  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12  * See the License for the specific language governing permissions and
13  * limitations under the License.
14  */
15 
16 /*
17  * WARNING!
18  * This driver is not intended for production use and it is unsupported.
19  * It is provided for educational use only.
20  * Please use supported DPDK driver instead.
21  */
22 
23 #if __x86_64__ || __i386__ || __aarch64__
24 #include <vppinfra/vector.h>
25 
26 #ifndef CLIB_HAVE_VEC128
27 #warning HACK: ixge driver wont really work, missing u32x4
28 typedef unsigned long long u32x4;
29 #endif
30 
31 #include <vlib/vlib.h>
32 #include <vlib/unix/unix.h>
33 #include <vlib/pci/pci.h>
34 #include <vnet/vnet.h>
35 #include <ixge/ixge.h>
36 #include <vnet/ethernet/ethernet.h>
37 #include <vnet/plugin/plugin.h>
38 #include <vpp/app/version.h>
39 
40 #define IXGE_ALWAYS_POLL 0
41 
42 #define EVENT_SET_FLAGS 0
43 #define IXGE_HWBP_RACE_ELOG 0
44 
45 #define PCI_VENDOR_ID_INTEL 0x8086
46 
47 /* 10 GIG E (XGE) PHY IEEE 802.3 clause 45 definitions. */
48 #define XGE_PHY_DEV_TYPE_PMA_PMD 1
49 #define XGE_PHY_DEV_TYPE_PHY_XS 4
50 #define XGE_PHY_ID1 0x2
51 #define XGE_PHY_ID2 0x3
52 #define XGE_PHY_CONTROL 0x0
53 #define XGE_PHY_CONTROL_RESET (1 << 15)
54 
58 
59 static void
61 {
62  ixge_main_t *xm = &ixge_main;
63  vlib_main_t *vm = xm->vlib_main;
64  ixge_regs_t *r = xd->regs;
65  u32 i;
66 
67  i = 0;
68  while (!(r->software_semaphore & (1 << 0)))
69  {
70  if (i > 0)
71  vlib_process_suspend (vm, 100e-6);
72  i++;
73  }
74  do
75  {
76  r->software_semaphore |= 1 << 1;
77  }
78  while (!(r->software_semaphore & (1 << 1)));
79 }
80 
81 static void
83 {
84  ixge_regs_t *r = xd->regs;
85  r->software_semaphore &= ~3;
86 }
87 
88 static void
90 {
91  ixge_main_t *xm = &ixge_main;
92  vlib_main_t *vm = xm->vlib_main;
93  ixge_regs_t *r = xd->regs;
94  u32 fw_mask = sw_mask << 5;
95  u32 m, done = 0;
96 
97  while (!done)
98  {
99  ixge_semaphore_get (xd);
100  m = r->software_firmware_sync;
101  done = (m & fw_mask) == 0;
102  if (done)
103  r->software_firmware_sync = m | sw_mask;
105  if (!done)
106  vlib_process_suspend (vm, 10e-3);
107  }
108 }
109 
110 static void
112 {
113  ixge_regs_t *r = xd->regs;
114  ixge_semaphore_get (xd);
115  r->software_firmware_sync &= ~sw_mask;
117 }
118 
119 u32
120 ixge_read_write_phy_reg (ixge_device_t * xd, u32 dev_type, u32 reg_index,
121  u32 v, u32 is_read)
122 {
123  ixge_regs_t *r = xd->regs;
124  const u32 busy_bit = 1 << 30;
125  u32 x;
126 
127  ASSERT (xd->phy_index < 2);
128  ixge_software_firmware_sync (xd, 1 << (1 + xd->phy_index));
129 
130  ASSERT (reg_index < (1 << 16));
131  ASSERT (dev_type < (1 << 5));
132  if (!is_read)
133  r->xge_mac.phy_data = v;
134 
135  /* Address cycle. */
136  x =
137  reg_index | (dev_type << 16) | (xd->
138  phys[xd->phy_index].mdio_address << 21);
139  r->xge_mac.phy_command = x | busy_bit;
140  /* Busy wait timed to take 28e-6 secs. No suspend. */
141  while (r->xge_mac.phy_command & busy_bit)
142  ;
143 
144  r->xge_mac.phy_command = x | ((is_read ? 2 : 1) << 26) | busy_bit;
145  while (r->xge_mac.phy_command & busy_bit)
146  ;
147 
148  if (is_read)
149  v = r->xge_mac.phy_data >> 16;
150 
151  ixge_software_firmware_sync_release (xd, 1 << (1 + xd->phy_index));
152 
153  return v;
154 }
155 
156 static u32
157 ixge_read_phy_reg (ixge_device_t * xd, u32 dev_type, u32 reg_index)
158 {
159  return ixge_read_write_phy_reg (xd, dev_type, reg_index, 0, /* is_read */
160  1);
161 }
162 
163 static void
164 ixge_write_phy_reg (ixge_device_t * xd, u32 dev_type, u32 reg_index, u32 v)
165 {
166  (void) ixge_read_write_phy_reg (xd, dev_type, reg_index, v, /* is_read */
167  0);
168 }
169 
170 static void
171 ixge_i2c_put_bits (i2c_bus_t * b, int scl, int sda)
172 {
173  ixge_main_t *xm = &ixge_main;
175  u32 v;
176 
177  v = 0;
178  v |= (sda != 0) << 3;
179  v |= (scl != 0) << 1;
180  xd->regs->i2c_control = v;
181 }
182 
183 static void
184 ixge_i2c_get_bits (i2c_bus_t * b, int *scl, int *sda)
185 {
186  ixge_main_t *xm = &ixge_main;
188  u32 v;
189 
190  v = xd->regs->i2c_control;
191  *sda = (v & (1 << 2)) != 0;
192  *scl = (v & (1 << 0)) != 0;
193 }
194 
195 static u16
197 {
198  ixge_regs_t *r = xd->regs;
199  u32 v;
200  r->eeprom_read = (( /* start bit */ (1 << 0)) | (address << 2));
201  /* Wait for done bit. */
202  while (!((v = r->eeprom_read) & (1 << 1)))
203  ;
204  return v >> 16;
205 }
206 
207 static void
209 {
210  u32 tx_disable_bit = 1 << 3;
211  if (enable)
212  xd->regs->sdp_control &= ~tx_disable_bit;
213  else
214  xd->regs->sdp_control |= tx_disable_bit;
215 }
216 
217 static void
219 {
220  u32 is_10g_bit = 1 << 5;
221  if (enable)
222  xd->regs->sdp_control |= is_10g_bit;
223  else
224  xd->regs->sdp_control &= ~is_10g_bit;
225 }
226 
227 static clib_error_t *
229 {
230  u16 a, id, reg_values_addr = 0;
231 
232  a = ixge_read_eeprom (xd, 0x2b);
233  if (a == 0 || a == 0xffff)
234  return clib_error_create ("no init sequence in eeprom");
235 
236  while (1)
237  {
238  id = ixge_read_eeprom (xd, ++a);
239  if (id == 0xffff)
240  break;
241  reg_values_addr = ixge_read_eeprom (xd, ++a);
242  if (id == sfp_type)
243  break;
244  }
245  if (id != sfp_type)
246  return clib_error_create ("failed to find id 0x%x", sfp_type);
247 
248  ixge_software_firmware_sync (xd, 1 << 3);
249  while (1)
250  {
251  u16 v = ixge_read_eeprom (xd, ++reg_values_addr);
252  if (v == 0xffff)
253  break;
254  xd->regs->core_analog_config = v;
255  }
257 
258  /* Make sure laser is off. We'll turn on the laser when
259  the interface is brought up. */
260  ixge_sfp_enable_disable_laser (xd, /* enable */ 0);
261  ixge_sfp_enable_disable_10g (xd, /* is_10g */ 1);
262 
263  return 0;
264 }
265 
266 static void
268 {
269  u32 v;
270 
271  if (is_up)
272  {
273  /* pma/pmd 10g serial SFI. */
274  xd->regs->xge_mac.auto_negotiation_control2 &= ~(3 << 16);
275  xd->regs->xge_mac.auto_negotiation_control2 |= 2 << 16;
276 
278  v &= ~(7 << 13);
279  v |= (0 << 13);
280  /* Restart autoneg. */
281  v |= (1 << 12);
283 
284  while (!(xd->regs->xge_mac.link_partner_ability[0] & 0xf0000))
285  ;
286 
288 
289  /* link mode 10g sfi serdes */
290  v &= ~(7 << 13);
291  v |= (3 << 13);
292 
293  /* Restart autoneg. */
294  v |= (1 << 12);
296 
297  xd->regs->xge_mac.link_status;
298  }
299 
300  ixge_sfp_enable_disable_laser (xd, /* enable */ is_up);
301 
302  /* Give time for link partner to notice that we're up. */
303  if (is_up && vlib_in_process_context (vlib_get_main ()))
304  {
305  vlib_process_suspend (vlib_get_main (), 300e-3);
306  }
307 }
308 
311 {
312  ixge_regs_t *r = xd->regs;
313  ASSERT (qi < 128);
314  if (rt == VLIB_RX)
315  return qi < 64 ? &r->rx_dma0[qi] : &r->rx_dma1[qi - 64];
316  else
317  return &r->tx_dma[qi];
318 }
319 
320 static clib_error_t *
322 {
323  vnet_hw_interface_t *hif = vnet_get_hw_interface (vnm, hw_if_index);
324  uword is_up = (flags & VNET_SW_INTERFACE_FLAG_ADMIN_UP) != 0;
325  ixge_main_t *xm = &ixge_main;
327  ixge_dma_regs_t *dr = get_dma_regs (xd, VLIB_RX, 0);
328 
329  if (is_up)
330  {
331  xd->regs->rx_enable |= 1;
332  xd->regs->tx_dma_control |= 1;
333  dr->control |= 1 << 25;
334  while (!(dr->control & (1 << 25)))
335  ;
336  }
337  else
338  {
339  xd->regs->rx_enable &= ~1;
340  xd->regs->tx_dma_control &= ~1;
341  }
342 
343  ixge_sfp_device_up_down (xd, is_up);
344 
345  return /* no error */ 0;
346 }
347 
348 static void
350 {
351  ixge_phy_t *phy = xd->phys + xd->phy_index;
352  i2c_bus_t *ib = &xd->i2c_bus;
353 
354  ib->private_data = xd->device_index;
357  vlib_i2c_init (ib);
358 
359  vlib_i2c_read_eeprom (ib, 0x50, 0, 128, (u8 *) & xd->sfp_eeprom);
360 
362  xd->sfp_eeprom.id = SFP_ID_UNKNOWN;
363  else
364  {
365  /* FIXME 5 => SR/LR eeprom ID. */
366  clib_error_t *e =
368  if (e)
369  clib_error_report (e);
370  }
371 
372  phy->mdio_address = ~0;
373 }
374 
375 static void
377 {
378  ixge_main_t *xm = &ixge_main;
379  vlib_main_t *vm = xm->vlib_main;
380  ixge_phy_t *phy = xd->phys + xd->phy_index;
381 
382  switch (xd->device_id)
383  {
384  case IXGE_82599_sfp:
385  case IXGE_82599_sfp_em:
386  case IXGE_82599_sfp_fcoe:
387  /* others? */
388  return ixge_sfp_phy_init (xd);
389 
390  default:
391  break;
392  }
393 
394  /* Probe address of phy. */
395  {
396  u32 i, v;
397 
398  phy->mdio_address = ~0;
399  for (i = 0; i < 32; i++)
400  {
401  phy->mdio_address = i;
403  if (v != 0xffff && v != 0)
404  break;
405  }
406 
407  /* No PHY found? */
408  if (i >= 32)
409  return;
410  }
411 
412  phy->id =
415 
416  {
417  ELOG_TYPE_DECLARE (e) =
418  {
419  .function = (char *) __FUNCTION__,.format =
420  "ixge %d, phy id 0x%d mdio address %d",.format_args = "i4i4i4",};
421  struct
422  {
423  u32 instance, id, address;
424  } *ed;
425  ed = ELOG_DATA (&vm->elog_main, e);
426  ed->instance = xd->device_index;
427  ed->id = phy->id;
428  ed->address = phy->mdio_address;
429  }
430 
431  /* Reset phy. */
434 
435  /* Wait for self-clearning reset bit to clear. */
436  do
437  {
438  vlib_process_suspend (vm, 1e-3);
439  }
442 }
443 
444 static u8 *
446 {
448  va_arg (*va, ixge_rx_from_hw_descriptor_t *);
449  u32 s0 = d->status[0], s2 = d->status[2];
450  u32 is_ip4, is_ip6, is_ip, is_tcp, is_udp;
451  u32 indent = format_get_indent (s);
452 
453  s = format (s, "%s-owned",
455  "hw");
456  s =
457  format (s, ", length this descriptor %d, l3 offset %d",
461  s = format (s, ", end-of-packet");
462 
463  s = format (s, "\n%U", format_white_space, indent);
464 
466  s = format (s, "layer2 error");
467 
469  {
470  s = format (s, "layer 2 type %d", (s0 & 0x1f));
471  return s;
472  }
473 
475  s = format (s, "vlan header 0x%x\n%U", d->vlan_tag,
476  format_white_space, indent);
477 
478  if ((is_ip4 = (s0 & IXGE_RX_DESCRIPTOR_STATUS0_IS_IP4)))
479  {
480  s = format (s, "ip4%s",
481  (s0 & IXGE_RX_DESCRIPTOR_STATUS0_IS_IP4_EXT) ? " options" :
482  "");
484  s = format (s, " checksum %s",
486  "bad" : "ok");
487  }
488  if ((is_ip6 = (s0 & IXGE_RX_DESCRIPTOR_STATUS0_IS_IP6)))
489  s = format (s, "ip6%s",
490  (s0 & IXGE_RX_DESCRIPTOR_STATUS0_IS_IP6_EXT) ? " extended" :
491  "");
492  is_tcp = is_udp = 0;
493  if ((is_ip = (is_ip4 | is_ip6)))
494  {
495  is_tcp = (s0 & IXGE_RX_DESCRIPTOR_STATUS0_IS_TCP) != 0;
496  is_udp = (s0 & IXGE_RX_DESCRIPTOR_STATUS0_IS_UDP) != 0;
497  if (is_tcp)
498  s = format (s, ", tcp");
499  if (is_udp)
500  s = format (s, ", udp");
501  }
502 
504  s = format (s, ", tcp checksum %s",
506  "ok");
508  s = format (s, ", udp checksum %s",
510  "ok");
511 
512  return s;
513 }
514 
515 static u8 *
516 format_ixge_tx_descriptor (u8 * s, va_list * va)
517 {
518  ixge_tx_descriptor_t *d = va_arg (*va, ixge_tx_descriptor_t *);
519  u32 s0 = d->status0, s1 = d->status1;
520  u32 indent = format_get_indent (s);
521  u32 v;
522 
523  s = format (s, "buffer 0x%Lx, %d packet bytes, %d bytes this buffer",
524  d->buffer_address, s1 >> 14, d->n_bytes_this_buffer);
525 
526  s = format (s, "\n%U", format_white_space, indent);
527 
528  if ((v = (s0 >> 0) & 3))
529  s = format (s, "reserved 0x%x, ", v);
530 
531  if ((v = (s0 >> 2) & 3))
532  s = format (s, "mac 0x%x, ", v);
533 
534  if ((v = (s0 >> 4) & 0xf) != 3)
535  s = format (s, "type 0x%x, ", v);
536 
537  s = format (s, "%s%s%s%s%s%s%s%s",
538  (s0 & (1 << 8)) ? "eop, " : "",
539  (s0 & (1 << 9)) ? "insert-fcs, " : "",
540  (s0 & (1 << 10)) ? "reserved26, " : "",
541  (s0 & (1 << 11)) ? "report-status, " : "",
542  (s0 & (1 << 12)) ? "reserved28, " : "",
543  (s0 & (1 << 13)) ? "is-advanced, " : "",
544  (s0 & (1 << 14)) ? "vlan-enable, " : "",
545  (s0 & (1 << 15)) ? "tx-segmentation, " : "");
546 
547  if ((v = s1 & 0xf) != 0)
548  s = format (s, "status 0x%x, ", v);
549 
550  if ((v = (s1 >> 4) & 0xf))
551  s = format (s, "context 0x%x, ", v);
552 
553  if ((v = (s1 >> 8) & 0x3f))
554  s = format (s, "options 0x%x, ", v);
555 
556  return s;
557 }
558 
559 typedef struct
560 {
562 
564 
566 
568 
570 
571  /* Copy of VLIB buffer; packet data stored in pre_data. */
574 
575 static u8 *
576 format_ixge_rx_dma_trace (u8 * s, va_list * va)
577 {
578  CLIB_UNUSED (vlib_main_t * vm) = va_arg (*va, vlib_main_t *);
579  vlib_node_t *node = va_arg (*va, vlib_node_t *);
580  vnet_main_t *vnm = vnet_get_main ();
581  ixge_rx_dma_trace_t *t = va_arg (*va, ixge_rx_dma_trace_t *);
582  ixge_main_t *xm = &ixge_main;
585  u32 indent = format_get_indent (s);
586 
587  {
588  vnet_sw_interface_t *sw =
590  s =
591  format (s, "%U rx queue %d", format_vnet_sw_interface_name, vnm, sw,
592  t->queue_index);
593  }
594 
595  s = format (s, "\n%Ubefore: %U",
596  format_white_space, indent,
598  s = format (s, "\n%Uafter : head/tail address 0x%Lx/0x%Lx",
599  format_white_space, indent,
601 
602  s = format (s, "\n%Ubuffer 0x%x: %U",
603  format_white_space, indent,
605 
606  s = format (s, "\n%U", format_white_space, indent);
607 
608  f = node->format_buffer;
609  if (!f || !t->is_start_of_packet)
610  f = format_hex_bytes;
611  s = format (s, "%U", f, t->buffer.pre_data, sizeof (t->buffer.pre_data));
612 
613  return s;
614 }
615 
616 #define foreach_ixge_error \
617  _ (none, "no error") \
618  _ (tx_full_drops, "tx ring full drops") \
619  _ (ip4_checksum_error, "ip4 checksum errors") \
620  _ (rx_alloc_fail, "rx buf alloc from free list failed") \
621  _ (rx_alloc_no_physmem, "rx buf alloc failed no physmem")
622 
623 typedef enum
624 {
625 #define _(f,s) IXGE_ERROR_##f,
627 #undef _
629 } ixge_error_t;
630 
631 always_inline void
633  u32 s00, u32 s02,
634  u8 * next0, u8 * error0, u32 * flags0)
635 {
636  u8 is0_ip4, is0_ip6, n0, e0;
637  u32 f0;
638 
639  e0 = IXGE_ERROR_none;
641 
643  n0 = is0_ip4 ? IXGE_RX_NEXT_IP4_INPUT : n0;
644 
645  e0 = (is0_ip4 && (s02 & IXGE_RX_DESCRIPTOR_STATUS2_IP4_CHECKSUM_ERROR)
646  ? IXGE_ERROR_ip4_checksum_error : e0);
647 
648  is0_ip6 = s00 & IXGE_RX_DESCRIPTOR_STATUS0_IS_IP6;
649  n0 = is0_ip6 ? IXGE_RX_NEXT_IP6_INPUT : n0;
650 
651  n0 = (xd->per_interface_next_index != ~0) ?
652  xd->per_interface_next_index : n0;
653 
654  /* Check for error. */
655  n0 = e0 != IXGE_ERROR_none ? IXGE_RX_NEXT_DROP : n0;
656 
659  ? VNET_BUFFER_F_L4_CHECKSUM_COMPUTED : 0);
660 
663  ? 0 : VNET_BUFFER_F_L4_CHECKSUM_CORRECT);
664 
665  *error0 = e0;
666  *next0 = n0;
667  *flags0 = f0;
668 }
669 
670 always_inline void
672  u32 s00, u32 s02,
673  u32 s10, u32 s12,
674  u8 * next0, u8 * error0, u32 * flags0,
675  u8 * next1, u8 * error1, u32 * flags1)
676 {
677  u8 is0_ip4, is0_ip6, n0, e0;
678  u8 is1_ip4, is1_ip6, n1, e1;
679  u32 f0, f1;
680 
681  e0 = e1 = IXGE_ERROR_none;
682  n0 = n1 = IXGE_RX_NEXT_IP4_INPUT;
683 
686 
687  n0 = is0_ip4 ? IXGE_RX_NEXT_IP4_INPUT : n0;
688  n1 = is1_ip4 ? IXGE_RX_NEXT_IP4_INPUT : n1;
689 
690  e0 = (is0_ip4 && (s02 & IXGE_RX_DESCRIPTOR_STATUS2_IP4_CHECKSUM_ERROR)
691  ? IXGE_ERROR_ip4_checksum_error : e0);
692  e1 = (is1_ip4 && (s12 & IXGE_RX_DESCRIPTOR_STATUS2_IP4_CHECKSUM_ERROR)
693  ? IXGE_ERROR_ip4_checksum_error : e1);
694 
695  is0_ip6 = s00 & IXGE_RX_DESCRIPTOR_STATUS0_IS_IP6;
696  is1_ip6 = s10 & IXGE_RX_DESCRIPTOR_STATUS0_IS_IP6;
697 
698  n0 = is0_ip6 ? IXGE_RX_NEXT_IP6_INPUT : n0;
699  n1 = is1_ip6 ? IXGE_RX_NEXT_IP6_INPUT : n1;
700 
701  n0 = (xd->per_interface_next_index != ~0) ?
702  xd->per_interface_next_index : n0;
703  n1 = (xd->per_interface_next_index != ~0) ?
704  xd->per_interface_next_index : n1;
705 
706  /* Check for error. */
707  n0 = e0 != IXGE_ERROR_none ? IXGE_RX_NEXT_DROP : n0;
708  n1 = e1 != IXGE_ERROR_none ? IXGE_RX_NEXT_DROP : n1;
709 
710  *error0 = e0;
711  *error1 = e1;
712 
713  *next0 = n0;
714  *next1 = n1;
715 
718  ? VNET_BUFFER_F_L4_CHECKSUM_COMPUTED : 0);
721  ? VNET_BUFFER_F_L4_CHECKSUM_COMPUTED : 0);
722 
725  ? 0 : VNET_BUFFER_F_L4_CHECKSUM_CORRECT);
728  ? 0 : VNET_BUFFER_F_L4_CHECKSUM_CORRECT);
729 
730  *flags0 = f0;
731  *flags1 = f1;
732 }
733 
734 static void
736  ixge_device_t * xd,
737  ixge_dma_queue_t * dq,
738  ixge_descriptor_t * before_descriptors,
739  u32 * before_buffers,
740  ixge_descriptor_t * after_descriptors, uword n_descriptors)
741 {
742  vlib_main_t *vm = xm->vlib_main;
743  vlib_node_runtime_t *node = dq->rx.node;
746  u32 *b, n_left, is_sop, next_index_sop;
747 
748  n_left = n_descriptors;
749  b = before_buffers;
750  bd = &before_descriptors->rx_from_hw;
751  ad = &after_descriptors->rx_to_hw;
752  is_sop = dq->rx.is_start_of_packet;
753  next_index_sop = dq->rx.saved_start_of_packet_next_index;
754 
755  while (n_left >= 2)
756  {
757  u32 bi0, bi1, flags0, flags1;
758  vlib_buffer_t *b0, *b1;
759  ixge_rx_dma_trace_t *t0, *t1;
760  u8 next0, error0, next1, error1;
761 
762  bi0 = b[0];
763  bi1 = b[1];
764  n_left -= 2;
765 
766  b0 = vlib_get_buffer (vm, bi0);
767  b1 = vlib_get_buffer (vm, bi1);
768 
770  bd[0].status[0], bd[0].status[2],
771  bd[1].status[0], bd[1].status[2],
772  &next0, &error0, &flags0,
773  &next1, &error1, &flags1);
774 
775  next_index_sop = is_sop ? next0 : next_index_sop;
776  vlib_trace_buffer (vm, node, next_index_sop, b0, /* follow_chain */ 0);
777  t0 = vlib_add_trace (vm, node, b0, sizeof (t0[0]));
778  t0->is_start_of_packet = is_sop;
779  is_sop = (b0->flags & VLIB_BUFFER_NEXT_PRESENT) == 0;
780 
781  next_index_sop = is_sop ? next1 : next_index_sop;
782  vlib_trace_buffer (vm, node, next_index_sop, b1, /* follow_chain */ 0);
783  t1 = vlib_add_trace (vm, node, b1, sizeof (t1[0]));
784  t1->is_start_of_packet = is_sop;
785  is_sop = (b1->flags & VLIB_BUFFER_NEXT_PRESENT) == 0;
786 
787  t0->queue_index = dq->queue_index;
788  t1->queue_index = dq->queue_index;
789  t0->device_index = xd->device_index;
790  t1->device_index = xd->device_index;
791  t0->before.rx_from_hw = bd[0];
792  t1->before.rx_from_hw = bd[1];
793  t0->after.rx_to_hw = ad[0];
794  t1->after.rx_to_hw = ad[1];
795  t0->buffer_index = bi0;
796  t1->buffer_index = bi1;
797  memcpy (&t0->buffer, b0, sizeof (b0[0]) - sizeof (b0->pre_data));
798  memcpy (&t1->buffer, b1, sizeof (b1[0]) - sizeof (b0->pre_data));
799  memcpy (t0->buffer.pre_data, b0->data + b0->current_data,
800  sizeof (t0->buffer.pre_data));
801  memcpy (t1->buffer.pre_data, b1->data + b1->current_data,
802  sizeof (t1->buffer.pre_data));
803 
804  b += 2;
805  bd += 2;
806  ad += 2;
807  }
808 
809  while (n_left >= 1)
810  {
811  u32 bi0, flags0;
812  vlib_buffer_t *b0;
814  u8 next0, error0;
815 
816  bi0 = b[0];
817  n_left -= 1;
818 
819  b0 = vlib_get_buffer (vm, bi0);
820 
822  bd[0].status[0], bd[0].status[2],
823  &next0, &error0, &flags0);
824 
825  next_index_sop = is_sop ? next0 : next_index_sop;
826  vlib_trace_buffer (vm, node, next_index_sop, b0, /* follow_chain */ 0);
827  t0 = vlib_add_trace (vm, node, b0, sizeof (t0[0]));
828  t0->is_start_of_packet = is_sop;
829  is_sop = (b0->flags & VLIB_BUFFER_NEXT_PRESENT) == 0;
830 
831  t0->queue_index = dq->queue_index;
832  t0->device_index = xd->device_index;
833  t0->before.rx_from_hw = bd[0];
834  t0->after.rx_to_hw = ad[0];
835  t0->buffer_index = bi0;
836  memcpy (&t0->buffer, b0, sizeof (b0[0]) - sizeof (b0->pre_data));
837  memcpy (t0->buffer.pre_data, b0->data + b0->current_data,
838  sizeof (t0->buffer.pre_data));
839 
840  b += 1;
841  bd += 1;
842  ad += 1;
843  }
844 }
845 
846 typedef struct
847 {
849 
851 
853 
855 
857 
858  /* Copy of VLIB buffer; packet data stored in pre_data. */
861 
862 static u8 *
863 format_ixge_tx_dma_trace (u8 * s, va_list * va)
864 {
865  CLIB_UNUSED (vlib_main_t * vm) = va_arg (*va, vlib_main_t *);
866  CLIB_UNUSED (vlib_node_t * node) = va_arg (*va, vlib_node_t *);
867  ixge_tx_dma_trace_t *t = va_arg (*va, ixge_tx_dma_trace_t *);
868  vnet_main_t *vnm = vnet_get_main ();
869  ixge_main_t *xm = &ixge_main;
872  u32 indent = format_get_indent (s);
873 
874  {
875  vnet_sw_interface_t *sw =
877  s =
878  format (s, "%U tx queue %d", format_vnet_sw_interface_name, vnm, sw,
879  t->queue_index);
880  }
881 
882  s = format (s, "\n%Udescriptor: %U",
883  format_white_space, indent,
885 
886  s = format (s, "\n%Ubuffer 0x%x: %U",
887  format_white_space, indent,
889 
890  s = format (s, "\n%U", format_white_space, indent);
891 
893  if (!f || !t->is_start_of_packet)
894  f = format_hex_bytes;
895  s = format (s, "%U", f, t->buffer.pre_data, sizeof (t->buffer.pre_data));
896 
897  return s;
898 }
899 
900 typedef struct
901 {
903 
905 
907 
910 
911 static void
913  ixge_device_t * xd,
914  ixge_dma_queue_t * dq,
915  ixge_tx_state_t * tx_state,
916  ixge_tx_descriptor_t * descriptors,
917  u32 * buffers, uword n_descriptors)
918 {
919  vlib_main_t *vm = xm->vlib_main;
920  vlib_node_runtime_t *node = tx_state->node;
922  u32 *b, n_left, is_sop;
923 
924  n_left = n_descriptors;
925  b = buffers;
926  d = descriptors;
927  is_sop = tx_state->is_start_of_packet;
928 
929  while (n_left >= 2)
930  {
931  u32 bi0, bi1;
932  vlib_buffer_t *b0, *b1;
933  ixge_tx_dma_trace_t *t0, *t1;
934 
935  bi0 = b[0];
936  bi1 = b[1];
937  n_left -= 2;
938 
939  b0 = vlib_get_buffer (vm, bi0);
940  b1 = vlib_get_buffer (vm, bi1);
941 
942  t0 = vlib_add_trace (vm, node, b0, sizeof (t0[0]));
943  t0->is_start_of_packet = is_sop;
944  is_sop = (b0->flags & VLIB_BUFFER_NEXT_PRESENT) == 0;
945 
946  t1 = vlib_add_trace (vm, node, b1, sizeof (t1[0]));
947  t1->is_start_of_packet = is_sop;
948  is_sop = (b1->flags & VLIB_BUFFER_NEXT_PRESENT) == 0;
949 
950  t0->queue_index = dq->queue_index;
951  t1->queue_index = dq->queue_index;
952  t0->device_index = xd->device_index;
953  t1->device_index = xd->device_index;
954  t0->descriptor = d[0];
955  t1->descriptor = d[1];
956  t0->buffer_index = bi0;
957  t1->buffer_index = bi1;
958  memcpy (&t0->buffer, b0, sizeof (b0[0]) - sizeof (b0->pre_data));
959  memcpy (&t1->buffer, b1, sizeof (b1[0]) - sizeof (b0->pre_data));
960  memcpy (t0->buffer.pre_data, b0->data + b0->current_data,
961  sizeof (t0->buffer.pre_data));
962  memcpy (t1->buffer.pre_data, b1->data + b1->current_data,
963  sizeof (t1->buffer.pre_data));
964 
965  b += 2;
966  d += 2;
967  }
968 
969  while (n_left >= 1)
970  {
971  u32 bi0;
972  vlib_buffer_t *b0;
974 
975  bi0 = b[0];
976  n_left -= 1;
977 
978  b0 = vlib_get_buffer (vm, bi0);
979 
980  t0 = vlib_add_trace (vm, node, b0, sizeof (t0[0]));
981  t0->is_start_of_packet = is_sop;
982  is_sop = (b0->flags & VLIB_BUFFER_NEXT_PRESENT) == 0;
983 
984  t0->queue_index = dq->queue_index;
985  t0->device_index = xd->device_index;
986  t0->descriptor = d[0];
987  t0->buffer_index = bi0;
988  memcpy (&t0->buffer, b0, sizeof (b0[0]) - sizeof (b0->pre_data));
989  memcpy (t0->buffer.pre_data, b0->data + b0->current_data,
990  sizeof (t0->buffer.pre_data));
991 
992  b += 1;
993  d += 1;
994  }
995 }
996 
999 {
1000  i32 d = i1 - i0;
1001  ASSERT (i0 < q->n_descriptors);
1002  ASSERT (i1 < q->n_descriptors);
1003  return d < 0 ? q->n_descriptors + d : d;
1004 }
1005 
1008 {
1009  u32 d = i0 + i1;
1010  ASSERT (i0 < q->n_descriptors);
1011  ASSERT (i1 < q->n_descriptors);
1012  d -= d >= q->n_descriptors ? q->n_descriptors : 0;
1013  return d;
1014 }
1015 
1019 {
1020  u32 cmp;
1021 
1022  cmp = ((d->status0 & xm->tx_descriptor_template_mask.status0)
1024  if (cmp)
1025  return 0;
1026  cmp = ((d->status1 & xm->tx_descriptor_template_mask.status1)
1028  if (cmp)
1029  return 0;
1030 
1031  return 1;
1032 }
1033 
1034 static uword
1036  ixge_device_t * xd,
1037  ixge_dma_queue_t * dq,
1038  u32 * buffers,
1039  u32 start_descriptor_index,
1040  u32 n_descriptors, ixge_tx_state_t * tx_state)
1041 {
1042  vlib_main_t *vm = xm->vlib_main;
1043  ixge_tx_descriptor_t *d, *d_sop;
1044  u32 n_left = n_descriptors;
1045  u32 *to_free = vec_end (xm->tx_buffers_pending_free);
1046  u32 *to_tx =
1047  vec_elt_at_index (dq->descriptor_buffer_indices, start_descriptor_index);
1048  u32 is_sop = tx_state->is_start_of_packet;
1049  u32 len_sop = tx_state->n_bytes_in_packet;
1050  u16 template_status = xm->tx_descriptor_template.status0;
1051  u32 descriptor_prefetch_rotor = 0;
1052 
1053  ASSERT (start_descriptor_index + n_descriptors <= dq->n_descriptors);
1054  d = &dq->descriptors[start_descriptor_index].tx;
1055  d_sop = is_sop ? d : tx_state->start_of_packet_descriptor;
1056 
1057  while (n_left >= 4)
1058  {
1059  vlib_buffer_t *b0, *b1;
1060  u32 bi0, fi0, len0;
1061  u32 bi1, fi1, len1;
1062  u8 is_eop0, is_eop1;
1063 
1064  /* Prefetch next iteration. */
1065  vlib_prefetch_buffer_with_index (vm, buffers[2], LOAD);
1066  vlib_prefetch_buffer_with_index (vm, buffers[3], LOAD);
1067 
1068  if ((descriptor_prefetch_rotor & 0x3) == 0)
1069  CLIB_PREFETCH (d + 4, CLIB_CACHE_LINE_BYTES, STORE);
1070 
1071  descriptor_prefetch_rotor += 2;
1072 
1073  bi0 = buffers[0];
1074  bi1 = buffers[1];
1075 
1076  to_free[0] = fi0 = to_tx[0];
1077  to_tx[0] = bi0;
1078  to_free += fi0 != 0;
1079 
1080  to_free[0] = fi1 = to_tx[1];
1081  to_tx[1] = bi1;
1082  to_free += fi1 != 0;
1083 
1084  buffers += 2;
1085  n_left -= 2;
1086  to_tx += 2;
1087 
1088  b0 = vlib_get_buffer (vm, bi0);
1089  b1 = vlib_get_buffer (vm, bi1);
1090 
1091  is_eop0 = (b0->flags & VLIB_BUFFER_NEXT_PRESENT) == 0;
1092  is_eop1 = (b1->flags & VLIB_BUFFER_NEXT_PRESENT) == 0;
1093 
1094  len0 = b0->current_length;
1095  len1 = b1->current_length;
1096 
1099 
1100  d[0].buffer_address = vlib_buffer_get_pa (vm, b0);
1101  d[1].buffer_address = vlib_buffer_get_pa (vm, b1);
1102 
1103  d[0].n_bytes_this_buffer = len0;
1104  d[1].n_bytes_this_buffer = len1;
1105 
1106  d[0].status0 =
1107  template_status | (is_eop0 <<
1109  d[1].status0 =
1110  template_status | (is_eop1 <<
1112 
1113  len_sop = (is_sop ? 0 : len_sop) + len0;
1114  d_sop[0].status1 =
1116  d += 1;
1117  d_sop = is_eop0 ? d : d_sop;
1118 
1119  is_sop = is_eop0;
1120 
1121  len_sop = (is_sop ? 0 : len_sop) + len1;
1122  d_sop[0].status1 =
1124  d += 1;
1125  d_sop = is_eop1 ? d : d_sop;
1126 
1127  is_sop = is_eop1;
1128  }
1129 
1130  while (n_left > 0)
1131  {
1132  vlib_buffer_t *b0;
1133  u32 bi0, fi0, len0;
1134  u8 is_eop0;
1135 
1136  bi0 = buffers[0];
1137 
1138  to_free[0] = fi0 = to_tx[0];
1139  to_tx[0] = bi0;
1140  to_free += fi0 != 0;
1141 
1142  buffers += 1;
1143  n_left -= 1;
1144  to_tx += 1;
1145 
1146  b0 = vlib_get_buffer (vm, bi0);
1147 
1148  is_eop0 = (b0->flags & VLIB_BUFFER_NEXT_PRESENT) == 0;
1149 
1150  len0 = b0->current_length;
1151 
1153 
1154  d[0].buffer_address = vlib_buffer_get_pa (vm, b0);
1155  d[0].n_bytes_this_buffer = len0;
1156 
1157  d[0].status0 =
1158  template_status | (is_eop0 <<
1160 
1161  len_sop = (is_sop ? 0 : len_sop) + len0;
1162  d_sop[0].status1 =
1164  d += 1;
1165  d_sop = is_eop0 ? d : d_sop;
1166 
1167  is_sop = is_eop0;
1168  }
1169 
1170  if (tx_state->node->flags & VLIB_NODE_FLAG_TRACE)
1171  {
1172  to_tx =
1174  start_descriptor_index);
1175  ixge_tx_trace (xm, xd, dq, tx_state,
1176  &dq->descriptors[start_descriptor_index].tx, to_tx,
1177  n_descriptors);
1178  }
1179 
1180  _vec_len (xm->tx_buffers_pending_free) =
1181  to_free - xm->tx_buffers_pending_free;
1182 
1183  /* When we are done d_sop can point to end of ring. Wrap it if so. */
1184  {
1185  ixge_tx_descriptor_t *d_start = &dq->descriptors[0].tx;
1186 
1187  ASSERT (d_sop - d_start <= dq->n_descriptors);
1188  d_sop = d_sop - d_start == dq->n_descriptors ? d_start : d_sop;
1189  }
1190 
1191  tx_state->is_start_of_packet = is_sop;
1192  tx_state->start_of_packet_descriptor = d_sop;
1193  tx_state->n_bytes_in_packet = len_sop;
1194 
1195  return n_descriptors;
1196 }
1197 
1198 static uword
1200  vlib_node_runtime_t * node, vlib_frame_t * f)
1201 {
1202  ixge_main_t *xm = &ixge_main;
1203  vnet_interface_output_runtime_t *rd = (void *) node->runtime_data;
1205  ixge_dma_queue_t *dq;
1206  u32 *from, n_left_tx, n_descriptors_to_tx, n_tail_drop;
1207  u32 queue_index = 0; /* fixme parameter */
1208  ixge_tx_state_t tx_state;
1209 
1210  tx_state.node = node;
1211  tx_state.is_start_of_packet = 1;
1212  tx_state.start_of_packet_descriptor = 0;
1213  tx_state.n_bytes_in_packet = 0;
1214 
1215  from = vlib_frame_vector_args (f);
1216 
1217  dq = vec_elt_at_index (xd->dma_queues[VLIB_TX], queue_index);
1218 
1219  dq->head_index = dq->tx.head_index_write_back[0];
1220 
1221  /* Since head == tail means ring is empty we can send up to dq->n_descriptors - 1. */
1222  n_left_tx = dq->n_descriptors - 1;
1223  n_left_tx -= ixge_ring_sub (dq, dq->head_index, dq->tail_index);
1224 
1225  _vec_len (xm->tx_buffers_pending_free) = 0;
1226 
1227  n_descriptors_to_tx = f->n_vectors;
1228  n_tail_drop = 0;
1229  if (PREDICT_FALSE (n_descriptors_to_tx > n_left_tx))
1230  {
1231  i32 i, n_ok, i_eop, i_sop;
1232 
1233  i_sop = i_eop = ~0;
1234  for (i = n_left_tx - 1; i >= 0; i--)
1235  {
1236  vlib_buffer_t *b = vlib_get_buffer (vm, from[i]);
1237  if (!(b->flags & VLIB_BUFFER_NEXT_PRESENT))
1238  {
1239  if (i_sop != ~0 && i_eop != ~0)
1240  break;
1241  i_eop = i;
1242  i_sop = i + 1;
1243  }
1244  }
1245  if (i == 0)
1246  n_ok = 0;
1247  else
1248  n_ok = i_eop + 1;
1249 
1250  {
1251  ELOG_TYPE_DECLARE (e) =
1252  {
1253  .function = (char *) __FUNCTION__,.format =
1254  "ixge %d, ring full to tx %d head %d tail %d",.format_args =
1255  "i2i2i2i2",};
1256  struct
1257  {
1258  u16 instance, to_tx, head, tail;
1259  } *ed;
1260  ed = ELOG_DATA (&vm->elog_main, e);
1261  ed->instance = xd->device_index;
1262  ed->to_tx = n_descriptors_to_tx;
1263  ed->head = dq->head_index;
1264  ed->tail = dq->tail_index;
1265  }
1266 
1267  if (n_ok < n_descriptors_to_tx)
1268  {
1269  n_tail_drop = n_descriptors_to_tx - n_ok;
1270  vec_add (xm->tx_buffers_pending_free, from + n_ok, n_tail_drop);
1271  vlib_error_count (vm, ixge_input_node.index,
1272  IXGE_ERROR_tx_full_drops, n_tail_drop);
1273  }
1274 
1275  n_descriptors_to_tx = n_ok;
1276  }
1277 
1278  dq->tx.n_buffers_on_ring += n_descriptors_to_tx;
1279 
1280  /* Process from tail to end of descriptor ring. */
1281  if (n_descriptors_to_tx > 0 && dq->tail_index < dq->n_descriptors)
1282  {
1283  u32 n =
1284  clib_min (dq->n_descriptors - dq->tail_index, n_descriptors_to_tx);
1285  n = ixge_tx_no_wrap (xm, xd, dq, from, dq->tail_index, n, &tx_state);
1286  from += n;
1287  n_descriptors_to_tx -= n;
1288  dq->tail_index += n;
1289  ASSERT (dq->tail_index <= dq->n_descriptors);
1290  if (dq->tail_index == dq->n_descriptors)
1291  dq->tail_index = 0;
1292  }
1293 
1294  if (n_descriptors_to_tx > 0)
1295  {
1296  u32 n =
1297  ixge_tx_no_wrap (xm, xd, dq, from, 0, n_descriptors_to_tx, &tx_state);
1298  from += n;
1299  ASSERT (n == n_descriptors_to_tx);
1300  dq->tail_index += n;
1301  ASSERT (dq->tail_index <= dq->n_descriptors);
1302  if (dq->tail_index == dq->n_descriptors)
1303  dq->tail_index = 0;
1304  }
1305 
1306  /* We should only get full packets. */
1307  ASSERT (tx_state.is_start_of_packet);
1308 
1309  /* Report status when last descriptor is done. */
1310  {
1311  u32 i = dq->tail_index == 0 ? dq->n_descriptors - 1 : dq->tail_index - 1;
1312  ixge_tx_descriptor_t *d = &dq->descriptors[i].tx;
1314  }
1315 
1316  /* Give new descriptors to hardware. */
1317  {
1318  ixge_dma_regs_t *dr = get_dma_regs (xd, VLIB_TX, queue_index);
1319 
1321 
1322  dr->tail_index = dq->tail_index;
1323  }
1324 
1325  /* Free any buffers that are done. */
1326  {
1327  u32 n = _vec_len (xm->tx_buffers_pending_free);
1328  if (n > 0)
1329  {
1331  _vec_len (xm->tx_buffers_pending_free) = 0;
1332  ASSERT (dq->tx.n_buffers_on_ring >= n);
1333  dq->tx.n_buffers_on_ring -= (n - n_tail_drop);
1334  }
1335  }
1336 
1337  return f->n_vectors;
1338 }
1339 
1340 static uword
1342  ixge_device_t * xd,
1343  ixge_dma_queue_t * dq,
1344  u32 start_descriptor_index, u32 n_descriptors)
1345 {
1346  vlib_main_t *vm = xm->vlib_main;
1347  vlib_node_runtime_t *node = dq->rx.node;
1348  ixge_descriptor_t *d;
1349  static ixge_descriptor_t *d_trace_save;
1350  static u32 *d_trace_buffers;
1351  u32 n_descriptors_left = n_descriptors;
1352  u32 *to_rx =
1353  vec_elt_at_index (dq->descriptor_buffer_indices, start_descriptor_index);
1354  u32 *to_add;
1355  u32 bi_sop = dq->rx.saved_start_of_packet_buffer_index;
1356  u32 bi_last = dq->rx.saved_last_buffer_index;
1357  u32 next_index_sop = dq->rx.saved_start_of_packet_next_index;
1358  u32 is_sop = dq->rx.is_start_of_packet;
1359  u32 next_index, n_left_to_next, *to_next;
1360  u32 n_packets = 0;
1361  u32 n_bytes = 0;
1362  u32 n_trace = vlib_get_trace_count (vm, node);
1363  vlib_buffer_t *b_last, b_dummy;
1364 
1365  ASSERT (start_descriptor_index + n_descriptors <= dq->n_descriptors);
1366  d = &dq->descriptors[start_descriptor_index];
1367 
1368  b_last = bi_last != ~0 ? vlib_get_buffer (vm, bi_last) : &b_dummy;
1369  next_index = dq->rx.next_index;
1370 
1371  if (n_trace > 0)
1372  {
1373  u32 n = clib_min (n_trace, n_descriptors);
1374  if (d_trace_save)
1375  {
1376  _vec_len (d_trace_save) = 0;
1377  _vec_len (d_trace_buffers) = 0;
1378  }
1379  vec_add (d_trace_save, (ixge_descriptor_t *) d, n);
1380  vec_add (d_trace_buffers, to_rx, n);
1381  }
1382 
1383  {
1384  uword l = vec_len (xm->rx_buffers_to_add);
1385 
1386  if (l < n_descriptors_left)
1387  {
1388  u32 n_to_alloc = 2 * dq->n_descriptors - l;
1389  u32 n_allocated;
1390 
1391  vec_resize (xm->rx_buffers_to_add, n_to_alloc);
1392 
1393  _vec_len (xm->rx_buffers_to_add) = l;
1394  n_allocated =
1395  vlib_buffer_alloc (vm, xm->rx_buffers_to_add + l, n_to_alloc);
1396  _vec_len (xm->rx_buffers_to_add) += n_allocated;
1397 
1398  /* Handle transient allocation failure */
1399  if (PREDICT_FALSE (l + n_allocated <= n_descriptors_left))
1400  {
1401  if (n_allocated == 0)
1402  vlib_error_count (vm, ixge_input_node.index,
1403  IXGE_ERROR_rx_alloc_no_physmem, 1);
1404  else
1405  vlib_error_count (vm, ixge_input_node.index,
1406  IXGE_ERROR_rx_alloc_fail, 1);
1407 
1408  n_descriptors_left = l + n_allocated;
1409  }
1410  n_descriptors = n_descriptors_left;
1411  }
1412 
1413  /* Add buffers from end of vector going backwards. */
1414  to_add = vec_end (xm->rx_buffers_to_add) - 1;
1415  }
1416 
1417  while (n_descriptors_left > 0)
1418  {
1419  vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
1420 
1421  while (n_descriptors_left >= 4 && n_left_to_next >= 2)
1422  {
1423  vlib_buffer_t *b0, *b1;
1424  vlib_buffer_t *f0, *f1;
1425  u32 bi0, fi0, len0, l3_offset0, s20, s00, flags0;
1426  u32 bi1, fi1, len1, l3_offset1, s21, s01, flags1;
1427  u8 is_eop0, error0, next0;
1428  u8 is_eop1, error1, next1;
1429  ixge_descriptor_t d0, d1;
1430 
1431  vlib_prefetch_buffer_with_index (vm, to_rx[2], STORE);
1432  vlib_prefetch_buffer_with_index (vm, to_rx[3], STORE);
1433 
1434  CLIB_PREFETCH (d + 2, 32, STORE);
1435 
1436  d0.as_u32x4 = d[0].as_u32x4;
1437  d1.as_u32x4 = d[1].as_u32x4;
1438 
1439  s20 = d0.rx_from_hw.status[2];
1440  s21 = d1.rx_from_hw.status[2];
1441 
1442  s00 = d0.rx_from_hw.status[0];
1443  s01 = d1.rx_from_hw.status[0];
1444 
1445  if (!
1447  goto found_hw_owned_descriptor_x2;
1448 
1449  bi0 = to_rx[0];
1450  bi1 = to_rx[1];
1451 
1452  ASSERT (to_add - 1 >= xm->rx_buffers_to_add);
1453  fi0 = to_add[0];
1454  fi1 = to_add[-1];
1455 
1456  to_rx[0] = fi0;
1457  to_rx[1] = fi1;
1458  to_rx += 2;
1459  to_add -= 2;
1460 
1461 #if 0
1466 #endif
1467 
1468  b0 = vlib_get_buffer (vm, bi0);
1469  b1 = vlib_get_buffer (vm, bi1);
1470 
1471  /*
1472  * Turn this on if you run into
1473  * "bad monkey" contexts, and you want to know exactly
1474  * which nodes they've visited... See main.c...
1475  */
1478 
1481 
1482  is_eop0 = (s20 & IXGE_RX_DESCRIPTOR_STATUS2_IS_END_OF_PACKET) != 0;
1483  is_eop1 = (s21 & IXGE_RX_DESCRIPTOR_STATUS2_IS_END_OF_PACKET) != 0;
1484 
1485  ixge_rx_next_and_error_from_status_x2 (xd, s00, s20, s01, s21,
1486  &next0, &error0, &flags0,
1487  &next1, &error1, &flags1);
1488 
1489  next0 = is_sop ? next0 : next_index_sop;
1490  next1 = is_eop0 ? next1 : next0;
1491  next_index_sop = next1;
1492 
1493  b0->flags |= flags0 | (!is_eop0 << VLIB_BUFFER_LOG2_NEXT_PRESENT);
1494  b1->flags |= flags1 | (!is_eop1 << VLIB_BUFFER_LOG2_NEXT_PRESENT);
1495 
1496  vnet_buffer (b0)->sw_if_index[VLIB_RX] = xd->vlib_sw_if_index;
1497  vnet_buffer (b1)->sw_if_index[VLIB_RX] = xd->vlib_sw_if_index;
1498  vnet_buffer (b0)->sw_if_index[VLIB_TX] = (u32) ~ 0;
1499  vnet_buffer (b1)->sw_if_index[VLIB_TX] = (u32) ~ 0;
1500 
1501  b0->error = node->errors[error0];
1502  b1->error = node->errors[error1];
1503 
1506  n_bytes += len0 + len1;
1507  n_packets += is_eop0 + is_eop1;
1508 
1509  /* Give new buffers to hardware. */
1510  f0 = vlib_get_buffer (vm, fi0);
1511  f1 = vlib_get_buffer (vm, fi1);
1516  d[0].as_u32x4 = d0.as_u32x4;
1517  d[1].as_u32x4 = d1.as_u32x4;
1518 
1519  d += 2;
1520  n_descriptors_left -= 2;
1521 
1522  /* Point to either l2 or l3 header depending on next. */
1523  l3_offset0 = (is_sop && (next0 != IXGE_RX_NEXT_ETHERNET_INPUT))
1525  l3_offset1 = (is_eop0 && (next1 != IXGE_RX_NEXT_ETHERNET_INPUT))
1527 
1528  b0->current_length = len0 - l3_offset0;
1529  b1->current_length = len1 - l3_offset1;
1530  b0->current_data = l3_offset0;
1531  b1->current_data = l3_offset1;
1532 
1533  b_last->next_buffer = is_sop ? ~0 : bi0;
1534  b0->next_buffer = is_eop0 ? ~0 : bi1;
1535  bi_last = bi1;
1536  b_last = b1;
1537 
1538  if (CLIB_DEBUG > 0)
1539  {
1540  u32 bi_sop0 = is_sop ? bi0 : bi_sop;
1541  u32 bi_sop1 = is_eop0 ? bi1 : bi_sop0;
1542 
1543  if (is_eop0)
1544  {
1545  u8 *msg = vlib_validate_buffer (vm, bi_sop0,
1546  /* follow_buffer_next */ 1);
1547  ASSERT (!msg);
1548  }
1549  if (is_eop1)
1550  {
1551  u8 *msg = vlib_validate_buffer (vm, bi_sop1,
1552  /* follow_buffer_next */ 1);
1553  ASSERT (!msg);
1554  }
1555  }
1556  if (0) /* "Dave" version */
1557  {
1558  u32 bi_sop0 = is_sop ? bi0 : bi_sop;
1559  u32 bi_sop1 = is_eop0 ? bi1 : bi_sop0;
1560 
1561  if (is_eop0)
1562  {
1563  to_next[0] = bi_sop0;
1564  to_next++;
1565  n_left_to_next--;
1566 
1567  vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
1568  to_next, n_left_to_next,
1569  bi_sop0, next0);
1570  }
1571  if (is_eop1)
1572  {
1573  to_next[0] = bi_sop1;
1574  to_next++;
1575  n_left_to_next--;
1576 
1577  vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
1578  to_next, n_left_to_next,
1579  bi_sop1, next1);
1580  }
1581  is_sop = is_eop1;
1582  bi_sop = bi_sop1;
1583  }
1584  if (1) /* "Eliot" version */
1585  {
1586  /* Speculatively enqueue to cached next. */
1587  u8 saved_is_sop = is_sop;
1588  u32 bi_sop_save = bi_sop;
1589 
1590  bi_sop = saved_is_sop ? bi0 : bi_sop;
1591  to_next[0] = bi_sop;
1592  to_next += is_eop0;
1593  n_left_to_next -= is_eop0;
1594 
1595  bi_sop = is_eop0 ? bi1 : bi_sop;
1596  to_next[0] = bi_sop;
1597  to_next += is_eop1;
1598  n_left_to_next -= is_eop1;
1599 
1600  is_sop = is_eop1;
1601 
1602  if (PREDICT_FALSE
1603  (!(next0 == next_index && next1 == next_index)))
1604  {
1605  /* Undo speculation. */
1606  to_next -= is_eop0 + is_eop1;
1607  n_left_to_next += is_eop0 + is_eop1;
1608 
1609  /* Re-do both descriptors being careful about where we enqueue. */
1610  bi_sop = saved_is_sop ? bi0 : bi_sop_save;
1611  if (is_eop0)
1612  {
1613  if (next0 != next_index)
1614  vlib_set_next_frame_buffer (vm, node, next0, bi_sop);
1615  else
1616  {
1617  to_next[0] = bi_sop;
1618  to_next += 1;
1619  n_left_to_next -= 1;
1620  }
1621  }
1622 
1623  bi_sop = is_eop0 ? bi1 : bi_sop;
1624  if (is_eop1)
1625  {
1626  if (next1 != next_index)
1627  vlib_set_next_frame_buffer (vm, node, next1, bi_sop);
1628  else
1629  {
1630  to_next[0] = bi_sop;
1631  to_next += 1;
1632  n_left_to_next -= 1;
1633  }
1634  }
1635 
1636  /* Switch cached next index when next for both packets is the same. */
1637  if (is_eop0 && is_eop1 && next0 == next1)
1638  {
1639  vlib_put_next_frame (vm, node, next_index,
1640  n_left_to_next);
1641  next_index = next0;
1642  vlib_get_next_frame (vm, node, next_index,
1643  to_next, n_left_to_next);
1644  }
1645  }
1646  }
1647  }
1648 
1649  /* Bail out of dual loop and proceed with single loop. */
1650  found_hw_owned_descriptor_x2:
1651 
1652  while (n_descriptors_left > 0 && n_left_to_next > 0)
1653  {
1654  vlib_buffer_t *b0;
1655  vlib_buffer_t *f0;
1656  u32 bi0, fi0, len0, l3_offset0, s20, s00, flags0;
1657  u8 is_eop0, error0, next0;
1658  ixge_descriptor_t d0;
1659 
1660  d0.as_u32x4 = d[0].as_u32x4;
1661 
1662  s20 = d0.rx_from_hw.status[2];
1663  s00 = d0.rx_from_hw.status[0];
1664 
1666  goto found_hw_owned_descriptor_x1;
1667 
1668  bi0 = to_rx[0];
1669  ASSERT (to_add >= xm->rx_buffers_to_add);
1670  fi0 = to_add[0];
1671 
1672  to_rx[0] = fi0;
1673  to_rx += 1;
1674  to_add -= 1;
1675 
1676 #if 0
1679 #endif
1680 
1681  b0 = vlib_get_buffer (vm, bi0);
1682 
1683  /*
1684  * Turn this on if you run into
1685  * "bad monkey" contexts, and you want to know exactly
1686  * which nodes they've visited...
1687  */
1689 
1690  is_eop0 = (s20 & IXGE_RX_DESCRIPTOR_STATUS2_IS_END_OF_PACKET) != 0;
1692  (xd, s00, s20, &next0, &error0, &flags0);
1693 
1694  next0 = is_sop ? next0 : next_index_sop;
1695  next_index_sop = next0;
1696 
1697  b0->flags |= flags0 | (!is_eop0 << VLIB_BUFFER_LOG2_NEXT_PRESENT);
1698 
1699  vnet_buffer (b0)->sw_if_index[VLIB_RX] = xd->vlib_sw_if_index;
1700  vnet_buffer (b0)->sw_if_index[VLIB_TX] = (u32) ~ 0;
1701 
1702  b0->error = node->errors[error0];
1703 
1705  n_bytes += len0;
1706  n_packets += is_eop0;
1707 
1708  /* Give new buffer to hardware. */
1709  f0 = vlib_get_buffer (vm, fi0);
1712  d[0].as_u32x4 = d0.as_u32x4;
1713 
1714  d += 1;
1715  n_descriptors_left -= 1;
1716 
1717  /* Point to either l2 or l3 header depending on next. */
1718  l3_offset0 = (is_sop && (next0 != IXGE_RX_NEXT_ETHERNET_INPUT))
1720  b0->current_length = len0 - l3_offset0;
1721  b0->current_data = l3_offset0;
1722 
1723  b_last->next_buffer = is_sop ? ~0 : bi0;
1724  bi_last = bi0;
1725  b_last = b0;
1726 
1727  bi_sop = is_sop ? bi0 : bi_sop;
1728 
1729  if (CLIB_DEBUG > 0 && is_eop0)
1730  {
1731  u8 *msg =
1732  vlib_validate_buffer (vm, bi_sop, /* follow_buffer_next */ 1);
1733  ASSERT (!msg);
1734  }
1735 
1736  if (0) /* "Dave" version */
1737  {
1738  if (is_eop0)
1739  {
1740  to_next[0] = bi_sop;
1741  to_next++;
1742  n_left_to_next--;
1743 
1744  vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
1745  to_next, n_left_to_next,
1746  bi_sop, next0);
1747  }
1748  }
1749  if (1) /* "Eliot" version */
1750  {
1751  if (PREDICT_TRUE (next0 == next_index))
1752  {
1753  to_next[0] = bi_sop;
1754  to_next += is_eop0;
1755  n_left_to_next -= is_eop0;
1756  }
1757  else
1758  {
1759  if (next0 != next_index && is_eop0)
1760  vlib_set_next_frame_buffer (vm, node, next0, bi_sop);
1761 
1762  vlib_put_next_frame (vm, node, next_index, n_left_to_next);
1763  next_index = next0;
1764  vlib_get_next_frame (vm, node, next_index,
1765  to_next, n_left_to_next);
1766  }
1767  }
1768  is_sop = is_eop0;
1769  }
1770  vlib_put_next_frame (vm, node, next_index, n_left_to_next);
1771  }
1772 
1773 found_hw_owned_descriptor_x1:
1774  if (n_descriptors_left > 0)
1775  vlib_put_next_frame (vm, node, next_index, n_left_to_next);
1776 
1777  _vec_len (xm->rx_buffers_to_add) = (to_add + 1) - xm->rx_buffers_to_add;
1778 
1779  {
1780  u32 n_done = n_descriptors - n_descriptors_left;
1781 
1782  if (n_trace > 0 && n_done > 0)
1783  {
1784  u32 n = clib_min (n_trace, n_done);
1785  ixge_rx_trace (xm, xd, dq,
1786  d_trace_save,
1787  d_trace_buffers,
1788  &dq->descriptors[start_descriptor_index], n);
1789  vlib_set_trace_count (vm, node, n_trace - n);
1790  }
1791  if (d_trace_save)
1792  {
1793  _vec_len (d_trace_save) = 0;
1794  _vec_len (d_trace_buffers) = 0;
1795  }
1796 
1797  /* Don't keep a reference to b_last if we don't have to.
1798  Otherwise we can over-write a next_buffer pointer after already haven
1799  enqueued a packet. */
1800  if (is_sop)
1801  {
1802  b_last->next_buffer = ~0;
1803  bi_last = ~0;
1804  }
1805 
1806  dq->rx.n_descriptors_done_this_call = n_done;
1807  dq->rx.n_descriptors_done_total += n_done;
1808  dq->rx.is_start_of_packet = is_sop;
1809  dq->rx.saved_start_of_packet_buffer_index = bi_sop;
1810  dq->rx.saved_last_buffer_index = bi_last;
1811  dq->rx.saved_start_of_packet_next_index = next_index_sop;
1812  dq->rx.next_index = next_index;
1813  dq->rx.n_bytes += n_bytes;
1814 
1815  return n_packets;
1816  }
1817 }
1818 
1819 static uword
1821  ixge_device_t * xd,
1822  vlib_node_runtime_t * node, u32 queue_index)
1823 {
1824  ixge_dma_queue_t *dq =
1825  vec_elt_at_index (xd->dma_queues[VLIB_RX], queue_index);
1827  uword n_packets = 0;
1828  u32 hw_head_index, sw_head_index;
1829 
1830  /* One time initialization. */
1831  if (!dq->rx.node)
1832  {
1833  dq->rx.node = node;
1834  dq->rx.is_start_of_packet = 1;
1835  dq->rx.saved_start_of_packet_buffer_index = ~0;
1836  dq->rx.saved_last_buffer_index = ~0;
1837  }
1838 
1839  dq->rx.next_index = node->cached_next_index;
1840 
1841  dq->rx.n_descriptors_done_total = 0;
1842  dq->rx.n_descriptors_done_this_call = 0;
1843  dq->rx.n_bytes = 0;
1844 
1845  /* Fetch head from hardware and compare to where we think we are. */
1846  hw_head_index = dr->head_index;
1847  sw_head_index = dq->head_index;
1848 
1849  if (hw_head_index == sw_head_index)
1850  goto done;
1851 
1852  if (hw_head_index < sw_head_index)
1853  {
1854  u32 n_tried = dq->n_descriptors - sw_head_index;
1855  n_packets += ixge_rx_queue_no_wrap (xm, xd, dq, sw_head_index, n_tried);
1856  sw_head_index =
1857  ixge_ring_add (dq, sw_head_index,
1858  dq->rx.n_descriptors_done_this_call);
1859 
1860  if (dq->rx.n_descriptors_done_this_call != n_tried)
1861  goto done;
1862  }
1863  if (hw_head_index >= sw_head_index)
1864  {
1865  u32 n_tried = hw_head_index - sw_head_index;
1866  n_packets += ixge_rx_queue_no_wrap (xm, xd, dq, sw_head_index, n_tried);
1867  sw_head_index =
1868  ixge_ring_add (dq, sw_head_index,
1869  dq->rx.n_descriptors_done_this_call);
1870  }
1871 
1872 done:
1873  dq->head_index = sw_head_index;
1874  dq->tail_index =
1875  ixge_ring_add (dq, dq->tail_index, dq->rx.n_descriptors_done_total);
1876 
1877  /* Give tail back to hardware. */
1879 
1880  dr->tail_index = dq->tail_index;
1881 
1883  interface_main.combined_sw_if_counters +
1885  0 /* thread_index */ ,
1886  xd->vlib_sw_if_index, n_packets,
1887  dq->rx.n_bytes);
1888 
1889  return n_packets;
1890 }
1891 
1892 static void
1894 {
1895  vlib_main_t *vm = xm->vlib_main;
1896  ixge_regs_t *r = xd->regs;
1897 
1898  if (i != 20)
1899  {
1900  ELOG_TYPE_DECLARE (e) =
1901  {
1902  .function = (char *) __FUNCTION__,.format =
1903  "ixge %d, %s",.format_args = "i1t1",.n_enum_strings =
1904  16,.enum_strings =
1905  {
1906  "flow director",
1907  "rx miss",
1908  "pci exception",
1909  "mailbox",
1910  "link status change",
1911  "linksec key exchange",
1912  "manageability event",
1913  "reserved23",
1914  "sdp0",
1915  "sdp1",
1916  "sdp2",
1917  "sdp3",
1918  "ecc", "descriptor handler error", "tcp timer", "other",},};
1919  struct
1920  {
1921  u8 instance;
1922  u8 index;
1923  } *ed;
1924  ed = ELOG_DATA (&vm->elog_main, e);
1925  ed->instance = xd->device_index;
1926  ed->index = i - 16;
1927  }
1928  else
1929  {
1930  u32 v = r->xge_mac.link_status;
1931  uword is_up = (v & (1 << 30)) != 0;
1932 
1933  ELOG_TYPE_DECLARE (e) =
1934  {
1935  .function = (char *) __FUNCTION__,.format =
1936  "ixge %d, link status change 0x%x",.format_args = "i4i4",};
1937  struct
1938  {
1939  u32 instance, link_status;
1940  } *ed;
1941  ed = ELOG_DATA (&vm->elog_main, e);
1942  ed->instance = xd->device_index;
1943  ed->link_status = v;
1945 
1948  ((is_up << 31) | xd->vlib_hw_if_index));
1949  }
1950 }
1951 
1953 clean_block (u32 * b, u32 * t, u32 n_left)
1954 {
1955  u32 *t0 = t;
1956 
1957  while (n_left >= 4)
1958  {
1959  u32 bi0, bi1, bi2, bi3;
1960 
1961  t[0] = bi0 = b[0];
1962  b[0] = 0;
1963  t += bi0 != 0;
1964 
1965  t[0] = bi1 = b[1];
1966  b[1] = 0;
1967  t += bi1 != 0;
1968 
1969  t[0] = bi2 = b[2];
1970  b[2] = 0;
1971  t += bi2 != 0;
1972 
1973  t[0] = bi3 = b[3];
1974  b[3] = 0;
1975  t += bi3 != 0;
1976 
1977  b += 4;
1978  n_left -= 4;
1979  }
1980 
1981  while (n_left > 0)
1982  {
1983  u32 bi0;
1984 
1985  t[0] = bi0 = b[0];
1986  b[0] = 0;
1987  t += bi0 != 0;
1988  b += 1;
1989  n_left -= 1;
1990  }
1991 
1992  return t - t0;
1993 }
1994 
1995 static void
1996 ixge_tx_queue (ixge_main_t * xm, ixge_device_t * xd, u32 queue_index)
1997 {
1998  vlib_main_t *vm = xm->vlib_main;
1999  ixge_dma_queue_t *dq =
2000  vec_elt_at_index (xd->dma_queues[VLIB_TX], queue_index);
2001  u32 n_clean, *b, *t, *t0;
2002  i32 n_hw_owned_descriptors;
2003  i32 first_to_clean, last_to_clean;
2004  u64 hwbp_race = 0;
2005 
2006  /* Handle case where head write back pointer update
2007  * arrives after the interrupt during high PCI bus loads.
2008  */
2009  while ((dq->head_index == dq->tx.head_index_write_back[0]) &&
2010  dq->tx.n_buffers_on_ring && (dq->head_index != dq->tail_index))
2011  {
2012  hwbp_race++;
2013  if (IXGE_HWBP_RACE_ELOG && (hwbp_race == 1))
2014  {
2015  ELOG_TYPE_DECLARE (e) =
2016  {
2017  .function = (char *) __FUNCTION__,.format =
2018  "ixge %d tx head index race: head %4d, tail %4d, buffs %4d",.format_args
2019  = "i4i4i4i4",};
2020  struct
2021  {
2022  u32 instance, head_index, tail_index, n_buffers_on_ring;
2023  } *ed;
2024  ed = ELOG_DATA (&vm->elog_main, e);
2025  ed->instance = xd->device_index;
2026  ed->head_index = dq->head_index;
2027  ed->tail_index = dq->tail_index;
2028  ed->n_buffers_on_ring = dq->tx.n_buffers_on_ring;
2029  }
2030  }
2031 
2032  dq->head_index = dq->tx.head_index_write_back[0];
2033  n_hw_owned_descriptors = ixge_ring_sub (dq, dq->head_index, dq->tail_index);
2034  ASSERT (dq->tx.n_buffers_on_ring >= n_hw_owned_descriptors);
2035  n_clean = dq->tx.n_buffers_on_ring - n_hw_owned_descriptors;
2036 
2037  if (IXGE_HWBP_RACE_ELOG && hwbp_race)
2038  {
2039  ELOG_TYPE_DECLARE (e) =
2040  {
2041  .function = (char *) __FUNCTION__,.format =
2042  "ixge %d tx head index race: head %4d, hw_owned %4d, n_clean %4d, retries %d",.format_args
2043  = "i4i4i4i4i4",};
2044  struct
2045  {
2046  u32 instance, head_index, n_hw_owned_descriptors, n_clean, retries;
2047  } *ed;
2048  ed = ELOG_DATA (&vm->elog_main, e);
2049  ed->instance = xd->device_index;
2050  ed->head_index = dq->head_index;
2051  ed->n_hw_owned_descriptors = n_hw_owned_descriptors;
2052  ed->n_clean = n_clean;
2053  ed->retries = hwbp_race;
2054  }
2055 
2056  /*
2057  * This function used to wait until hardware owned zero descriptors.
2058  * At high PPS rates, that doesn't happen until the TX ring is
2059  * completely full of descriptors which need to be cleaned up.
2060  * That, in turn, causes TX ring-full drops and/or long RX service
2061  * interruptions.
2062  */
2063  if (n_clean == 0)
2064  return;
2065 
2066  /* Clean the n_clean descriptors prior to the reported hardware head */
2067  last_to_clean = dq->head_index - 1;
2068  last_to_clean = (last_to_clean < 0) ? last_to_clean + dq->n_descriptors :
2069  last_to_clean;
2070 
2071  first_to_clean = (last_to_clean) - (n_clean - 1);
2072  first_to_clean = (first_to_clean < 0) ? first_to_clean + dq->n_descriptors :
2073  first_to_clean;
2074 
2076  t0 = t = xm->tx_buffers_pending_free;
2077  b = dq->descriptor_buffer_indices + first_to_clean;
2078 
2079  /* Wrap case: clean from first to end, then start to last */
2080  if (first_to_clean > last_to_clean)
2081  {
2082  t += clean_block (b, t, (dq->n_descriptors - 1) - first_to_clean);
2083  first_to_clean = 0;
2084  b = dq->descriptor_buffer_indices;
2085  }
2086 
2087  /* Typical case: clean from first to last */
2088  if (first_to_clean <= last_to_clean)
2089  t += clean_block (b, t, (last_to_clean - first_to_clean) + 1);
2090 
2091  if (t > t0)
2092  {
2093  u32 n = t - t0;
2094  vlib_buffer_free_no_next (vm, t0, n);
2095  ASSERT (dq->tx.n_buffers_on_ring >= n);
2096  dq->tx.n_buffers_on_ring -= n;
2097  _vec_len (xm->tx_buffers_pending_free) = 0;
2098  }
2099 }
2100 
2101 /* RX queue interrupts 0 thru 7; TX 8 thru 15. */
2104 {
2105  return i < 8;
2106 }
2107 
2110 {
2111  return i >= 8 && i < 16;
2112 }
2113 
2116 {
2117  return 8 + i;
2118 }
2119 
2122 {
2123  return 0 + i;
2124 }
2125 
2128 {
2130  return i - 0;
2131 }
2132 
2135 {
2137  return i - 8;
2138 }
2139 
2140 static uword
2142  ixge_device_t * xd, vlib_node_runtime_t * node)
2143 {
2144  ixge_regs_t *r = xd->regs;
2145  u32 i, s;
2146  uword n_rx_packets = 0;
2147 
2149  if (s)
2151 
2152  /* *INDENT-OFF* */
2153  foreach_set_bit (i, s, ({
2155  n_rx_packets += ixge_rx_queue (xm, xd, node, ixge_interrupt_rx_queue (i));
2156 
2157  else if (ixge_interrupt_is_tx_queue (i))
2158  ixge_tx_queue (xm, xd, ixge_interrupt_tx_queue (i));
2159 
2160  else
2161  ixge_interrupt (xm, xd, i);
2162  }));
2163  /* *INDENT-ON* */
2164 
2165  return n_rx_packets;
2166 }
2167 
2168 static uword
2170 {
2171  ixge_main_t *xm = &ixge_main;
2172  ixge_device_t *xd;
2173  uword n_rx_packets = 0;
2174 
2175  if (node->state == VLIB_NODE_STATE_INTERRUPT)
2176  {
2177  uword i;
2178 
2179  /* Loop over devices with interrupts. */
2180  /* *INDENT-OFF* */
2181  foreach_set_bit (i, node->runtime_data[0], ({
2182  xd = vec_elt_at_index (xm->devices, i);
2183  n_rx_packets += ixge_device_input (xm, xd, node);
2184 
2185  /* Re-enable interrupts since we're going to stay in interrupt mode. */
2186  if (! (node->flags & VLIB_NODE_FLAG_SWITCH_FROM_INTERRUPT_TO_POLLING_MODE))
2187  xd->regs->interrupt.enable_write_1_to_set = ~0;
2188  }));
2189  /* *INDENT-ON* */
2190 
2191  /* Clear mask of devices with pending interrupts. */
2192  node->runtime_data[0] = 0;
2193  }
2194  else
2195  {
2196  /* Poll all devices for input/interrupts. */
2197  vec_foreach (xd, xm->devices)
2198  {
2199  n_rx_packets += ixge_device_input (xm, xd, node);
2200 
2201  /* Re-enable interrupts when switching out of polling mode. */
2202  if (node->flags &
2205  }
2206  }
2207 
2208  return n_rx_packets;
2209 }
2210 
2211 static char *ixge_error_strings[] = {
2212 #define _(n,s) s,
2214 #undef _
2215 };
2216 
2217 /* *INDENT-OFF* */
2218 VLIB_REGISTER_NODE (ixge_input_node, static) = {
2219  .function = ixge_input,
2220  .type = VLIB_NODE_TYPE_INPUT,
2221  .name = "ixge-input",
2222 
2223  /* Will be enabled if/when hardware is detected. */
2224  .state = VLIB_NODE_STATE_DISABLED,
2225 
2226  .format_buffer = format_ethernet_header_with_length,
2227  .format_trace = format_ixge_rx_dma_trace,
2228 
2229  .n_errors = IXGE_N_ERROR,
2230  .error_strings = ixge_error_strings,
2231 
2232  .n_next_nodes = IXGE_RX_N_NEXT,
2233  .next_nodes = {
2234  [IXGE_RX_NEXT_DROP] = "error-drop",
2235  [IXGE_RX_NEXT_ETHERNET_INPUT] = "ethernet-input",
2236  [IXGE_RX_NEXT_IP4_INPUT] = "ip4-input",
2237  [IXGE_RX_NEXT_IP6_INPUT] = "ip6-input",
2238  },
2239 };
2240 
2241 /* *INDENT-ON* */
2242 
2243 static u8 *
2244 format_ixge_device_name (u8 * s, va_list * args)
2245 {
2247  u32 i = va_arg (*args, u32);
2248  ixge_main_t *xm = &ixge_main;
2249  ixge_device_t *xd = vec_elt_at_index (xm->devices, i);
2250  vlib_pci_addr_t *addr = vlib_pci_get_addr (vm, xd->pci_dev_handle);
2251  return format (s, "TenGigabitEthernet%x/%x/%x/%x",
2252  addr->domain, addr->bus, addr->slot, addr->function);
2253 }
2254 
2255 #define IXGE_COUNTER_IS_64_BIT (1 << 0)
2256 #define IXGE_COUNTER_NOT_CLEAR_ON_READ (1 << 1)
2257 
2259 #define _(a,f) 0,
2260 #define _64(a,f) IXGE_COUNTER_IS_64_BIT,
2262 #undef _
2263 #undef _64
2264 };
2265 
2266 static void
2268 {
2269  /* Byte offset for counter registers. */
2270  static u32 reg_offsets[] = {
2271 #define _(a,f) (a) / sizeof (u32),
2272 #define _64(a,f) _(a,f)
2274 #undef _
2275 #undef _64
2276  };
2277  volatile u32 *r = (volatile u32 *) xd->regs;
2278  int i;
2279 
2280  for (i = 0; i < ARRAY_LEN (xd->counters); i++)
2281  {
2282  u32 o = reg_offsets[i];
2283  xd->counters[i] += r[o];
2285  r[o] = 0;
2287  xd->counters[i] += (u64) r[o + 1] << (u64) 32;
2288  }
2289 }
2290 
2291 static u8 *
2292 format_ixge_device_id (u8 * s, va_list * args)
2293 {
2294  u32 device_id = va_arg (*args, u32);
2295  char *t = 0;
2296  switch (device_id)
2297  {
2298 #define _(f,n) case n: t = #f; break;
2300 #undef _
2301  default:
2302  t = 0;
2303  break;
2304  }
2305  if (t == 0)
2306  s = format (s, "unknown 0x%x", device_id);
2307  else
2308  s = format (s, "%s", t);
2309  return s;
2310 }
2311 
2312 static u8 *
2313 format_ixge_link_status (u8 * s, va_list * args)
2314 {
2315  ixge_device_t *xd = va_arg (*args, ixge_device_t *);
2317 
2318  s = format (s, "%s", (v & (1 << 30)) ? "up" : "down");
2319 
2320  {
2321  char *modes[] = {
2322  "1g", "10g parallel", "10g serial", "autoneg",
2323  };
2324  char *speeds[] = {
2325  "unknown", "100m", "1g", "10g",
2326  };
2327  s = format (s, ", mode %s, speed %s",
2328  modes[(v >> 26) & 3], speeds[(v >> 28) & 3]);
2329  }
2330 
2331  return s;
2332 }
2333 
2334 static u8 *
2335 format_ixge_device (u8 * s, va_list * args)
2336 {
2337  u32 dev_instance = va_arg (*args, u32);
2338  CLIB_UNUSED (int verbose) = va_arg (*args, int);
2340  ixge_main_t *xm = &ixge_main;
2341  ixge_device_t *xd = vec_elt_at_index (xm->devices, dev_instance);
2342  ixge_phy_t *phy = xd->phys + xd->phy_index;
2343  u32 indent = format_get_indent (s);
2344 
2345  ixge_update_counters (xd);
2347 
2348  s = format (s, "Intel 8259X: id %U\n%Ulink %U",
2350  format_white_space, indent + 2, format_ixge_link_status, xd);
2351 
2352  {
2353 
2354  vlib_pci_addr_t *addr = vlib_pci_get_addr (vm, xd->pci_dev_handle);
2356 
2357  if (d)
2358  s = format (s, "\n%UPCIe %U", format_white_space, indent + 2,
2360  }
2361 
2362  s = format (s, "\n%U", format_white_space, indent + 2);
2363  if (phy->mdio_address != ~0)
2364  s = format (s, "PHY address %d, id 0x%x", phy->mdio_address, phy->id);
2365  else if (xd->sfp_eeprom.id == SFP_ID_SFP)
2366  s = format (s, "SFP %U", format_sfp_eeprom, &xd->sfp_eeprom);
2367  else
2368  s = format (s, "PHY not found");
2369 
2370  /* FIXME */
2371  {
2373  ixge_dma_regs_t *dr = get_dma_regs (xd, VLIB_RX, 0);
2374  u32 hw_head_index = dr->head_index;
2375  u32 sw_head_index = dq->head_index;
2376  u32 nitems;
2377 
2378  nitems = ixge_ring_sub (dq, hw_head_index, sw_head_index);
2379  s = format (s, "\n%U%d unprocessed, %d total buffers on rx queue 0 ring",
2380  format_white_space, indent + 2, nitems, dq->n_descriptors);
2381 
2382  s = format (s, "\n%U%d buffers in driver rx cache",
2383  format_white_space, indent + 2,
2384  vec_len (xm->rx_buffers_to_add));
2385 
2386  s = format (s, "\n%U%d buffers on tx queue 0 ring",
2387  format_white_space, indent + 2,
2388  xd->dma_queues[VLIB_TX][0].tx.n_buffers_on_ring);
2389  }
2390  {
2391  u32 i;
2392  u64 v;
2393  static char *names[] = {
2394 #define _(a,f) #f,
2395 #define _64(a,f) _(a,f)
2397 #undef _
2398 #undef _64
2399  };
2400 
2401  for (i = 0; i < ARRAY_LEN (names); i++)
2402  {
2403  v = xd->counters[i] - xd->counters_last_clear[i];
2404  if (v != 0)
2405  s = format (s, "\n%U%-40U%16Ld",
2406  format_white_space, indent + 2,
2407  format_c_identifier, names[i], v);
2408  }
2409  }
2410 
2411  return s;
2412 }
2413 
2414 static void
2416 {
2417  ixge_main_t *xm = &ixge_main;
2418  ixge_device_t *xd = vec_elt_at_index (xm->devices, instance);
2419  ixge_update_counters (xd);
2420  memcpy (xd->counters_last_clear, xd->counters, sizeof (xd->counters));
2421 }
2422 
2423 /*
2424  * Dynamically redirect all pkts from a specific interface
2425  * to the specified node
2426  */
2427 static void
2429  u32 node_index)
2430 {
2431  ixge_main_t *xm = &ixge_main;
2432  vnet_hw_interface_t *hw = vnet_get_hw_interface (vnm, hw_if_index);
2434 
2435  /* Shut off redirection */
2436  if (node_index == ~0)
2437  {
2438  xd->per_interface_next_index = node_index;
2439  return;
2440  }
2441 
2443  vlib_node_add_next (xm->vlib_main, ixge_input_node.index, node_index);
2444 }
2445 
2446 
2447 /* *INDENT-OFF* */
2449  .name = "ixge",
2450  .tx_function = ixge_interface_tx,
2451  .format_device_name = format_ixge_device_name,
2452  .format_device = format_ixge_device,
2453  .format_tx_trace = format_ixge_tx_dma_trace,
2454  .clear_counters = ixge_clear_hw_interface_counters,
2455  .admin_up_down_function = ixge_interface_admin_up_down,
2456  .rx_redirect_to_node = ixge_set_interface_next_node,
2457 };
2458 /* *INDENT-ON* */
2459 
2460 #define IXGE_N_BYTES_IN_RX_BUFFER (2048) // DAW-HACK: Set Rx buffer size so all packets < ETH_MTU_SIZE fit in the buffer (i.e. sop & eop for all descriptors).
2461 
2462 static clib_error_t *
2464 {
2465  ixge_main_t *xm = &ixge_main;
2466  vlib_main_t *vm = xm->vlib_main;
2467  ixge_dma_queue_t *dq;
2468  clib_error_t *error = 0;
2469 
2470  vec_validate (xd->dma_queues[rt], queue_index);
2471  dq = vec_elt_at_index (xd->dma_queues[rt], queue_index);
2472 
2475  CLIB_CACHE_LINE_BYTES / sizeof (dq->descriptors[0]);
2476 
2477  if (!xm->n_bytes_in_rx_buffer)
2480 
2481  if (!xm->n_descriptors[rt])
2482  xm->n_descriptors[rt] = 4 * VLIB_FRAME_SIZE;
2483 
2484  dq->queue_index = queue_index;
2485  dq->n_descriptors =
2487  dq->head_index = dq->tail_index = 0;
2488 
2490  sizeof (dq->descriptors[0]),
2491  128 /* per chip spec */ );
2492  if (!dq->descriptors)
2493  return vlib_physmem_last_error (vm);
2494 
2495  clib_memset (dq->descriptors, 0,
2496  dq->n_descriptors * sizeof (dq->descriptors[0]));
2498 
2499  if (rt == VLIB_RX)
2500  {
2501  u32 n_alloc, i;
2502 
2503  n_alloc = vlib_buffer_alloc (vm, dq->descriptor_buffer_indices,
2505  ASSERT (n_alloc == vec_len (dq->descriptor_buffer_indices));
2506  for (i = 0; i < n_alloc; i++)
2507  {
2510  (vm, vlib_get_buffer (vm, dq->descriptor_buffer_indices[i]));
2511  }
2512  }
2513  else
2514  {
2515  u32 i;
2516 
2517  dq->tx.head_index_write_back =
2519  if (!dq->tx.head_index_write_back)
2520  return vlib_physmem_last_error (vm);
2521 
2522  for (i = 0; i < dq->n_descriptors; i++)
2523  dq->descriptors[i].tx = xm->tx_descriptor_template;
2524 
2526  }
2527 
2528  {
2529  ixge_dma_regs_t *dr = get_dma_regs (xd, rt, queue_index);
2530  u64 a;
2531 
2532  a = vlib_physmem_get_pa (vm, dq->descriptors);
2533  dr->descriptor_address[0] = a & 0xFFFFFFFF;
2534  dr->descriptor_address[1] = a >> (u64) 32;
2535  dr->n_descriptor_bytes = dq->n_descriptors * sizeof (dq->descriptors[0]);
2536  dq->head_index = dq->tail_index = 0;
2537 
2538  if (rt == VLIB_RX)
2539  {
2540  ASSERT ((xm->n_bytes_in_rx_buffer / 1024) < 32);
2541  dr->rx_split_control =
2542  ( /* buffer size */ ((xm->n_bytes_in_rx_buffer / 1024) << 0)
2543  | ( /* lo free descriptor threshold (units of 64 descriptors) */
2544  (1 << 22)) | ( /* descriptor type: advanced one buffer */
2545  (1 << 25)) | ( /* drop if no descriptors available */
2546  (1 << 28)));
2547 
2548  /* Give hardware all but last 16 cache lines' worth of descriptors. */
2549  dq->tail_index = dq->n_descriptors -
2551  }
2552  else
2553  {
2554  /* Make sure its initialized before hardware can get to it. */
2555  dq->tx.head_index_write_back[0] = dq->head_index;
2556 
2557  a = vlib_physmem_get_pa (vm, dq->tx.head_index_write_back);
2558  dr->tx.head_index_write_back_address[0] = /* enable bit */ 1 | a;
2559  dr->tx.head_index_write_back_address[1] = (u64) a >> (u64) 32;
2560  }
2561 
2562  /* DMA on 82599 does not work with [13] rx data write relaxed ordering
2563  and [12] undocumented set. */
2564  if (rt == VLIB_RX)
2565  dr->dca_control &= ~((1 << 13) | (1 << 12));
2566 
2568 
2569  if (rt == VLIB_TX)
2570  {
2571  xd->regs->tx_dma_control |= (1 << 0);
2572  dr->control |= ((32 << 0) /* prefetch threshold */
2573  | (64 << 8) /* host threshold */
2574  | (0 << 16) /* writeback threshold */ );
2575  }
2576 
2577  /* Enable this queue and wait for hardware to initialize
2578  before adding to tail. */
2579  if (rt == VLIB_TX)
2580  {
2581  dr->control |= 1 << 25;
2582  while (!(dr->control & (1 << 25)))
2583  ;
2584  }
2585 
2586  /* Set head/tail indices and enable DMA. */
2587  dr->head_index = dq->head_index;
2588  dr->tail_index = dq->tail_index;
2589  }
2590 
2591  return error;
2592 }
2593 
2594 static u32
2596 {
2597  ixge_device_t *xd;
2598  ixge_regs_t *r;
2599  u32 old;
2600  ixge_main_t *xm = &ixge_main;
2601 
2602  xd = vec_elt_at_index (xm->devices, hw->dev_instance);
2603  r = xd->regs;
2604 
2605  old = r->filter_control;
2606 
2608  r->filter_control = old | (1 << 9) /* unicast promiscuous */ ;
2609  else
2610  r->filter_control = old & ~(1 << 9);
2611 
2612  return old;
2613 }
2614 
2615 static void
2617 {
2618  vnet_main_t *vnm = vnet_get_main ();
2619  ixge_device_t *xd;
2620 
2621  /* Reset chip(s). */
2622  vec_foreach (xd, xm->devices)
2623  {
2624  ixge_regs_t *r = xd->regs;
2625  const u32 reset_bit = (1 << 26) | (1 << 3);
2626 
2627  r->control |= reset_bit;
2628 
2629  /* No need to suspend. Timed to take ~1e-6 secs */
2630  while (r->control & reset_bit)
2631  ;
2632 
2633  /* Software loaded. */
2634  r->extended_control |= (1 << 28);
2635 
2636  ixge_phy_init (xd);
2637 
2638  /* Register ethernet interface. */
2639  {
2640  u8 addr8[6];
2641  u32 i, addr32[2];
2642  clib_error_t *error;
2643 
2644  addr32[0] = r->rx_ethernet_address0[0][0];
2645  addr32[1] = r->rx_ethernet_address0[0][1];
2646  for (i = 0; i < 6; i++)
2647  addr8[i] = addr32[i / 4] >> ((i % 4) * 8);
2648 
2650  (vnm, ixge_device_class.index, xd->device_index,
2651  /* ethernet address */ addr8,
2653  if (error)
2654  clib_error_report (error);
2655  }
2656 
2657  {
2658  vnet_sw_interface_t *sw =
2660  xd->vlib_sw_if_index = sw->sw_if_index;
2661  }
2662 
2663  ixge_dma_init (xd, VLIB_RX, /* queue_index */ 0);
2664 
2666 
2667  ixge_dma_init (xd, VLIB_TX, /* queue_index */ 0);
2668 
2669  /* RX/TX queue 0 gets mapped to interrupt bits 0 & 8. */
2670  r->interrupt.queue_mapping[0] = (( /* valid bit */ (1 << 7) |
2671  ixge_rx_queue_to_interrupt (0)) << 0);
2672 
2673  r->interrupt.queue_mapping[0] |= (( /* valid bit */ (1 << 7) |
2674  ixge_tx_queue_to_interrupt (0)) << 8);
2675 
2676  /* No use in getting too many interrupts.
2677  Limit them to one every 3/4 ring size at line rate
2678  min sized packets.
2679  No need for this since kernel/vlib main loop provides adequate interrupt
2680  limiting scheme. */
2681  if (0)
2682  {
2683  f64 line_rate_max_pps =
2684  10e9 / (8 * (64 + /* interframe padding */ 20));
2686  .75 * xm->n_descriptors[VLIB_RX] /
2687  line_rate_max_pps);
2688  }
2689 
2690  /* Accept all multicast and broadcast packets. Should really add them
2691  to the dst_ethernet_address register array. */
2692  r->filter_control |= (1 << 10) | (1 << 8);
2693 
2694  /* Enable frames up to size in mac frame size register. */
2695  r->xge_mac.control |= 1 << 2;
2696  r->xge_mac.rx_max_frame_size = (9216 + 14) << 16;
2697 
2698  /* Enable all interrupts. */
2699  if (!IXGE_ALWAYS_POLL)
2701  }
2702 }
2703 
2704 static uword
2706 {
2707  vnet_main_t *vnm = vnet_get_main ();
2708  ixge_main_t *xm = &ixge_main;
2709  ixge_device_t *xd;
2710  uword event_type, *event_data = 0;
2711  f64 timeout, link_debounce_deadline;
2712 
2713  ixge_device_init (xm);
2714 
2715  /* Clear all counters. */
2716  vec_foreach (xd, xm->devices)
2717  {
2718  ixge_update_counters (xd);
2719  clib_memset (xd->counters, 0, sizeof (xd->counters));
2720  }
2721 
2722  timeout = 30.0;
2723  link_debounce_deadline = 1e70;
2724 
2725  while (1)
2726  {
2727  /* 36 bit stat counters could overflow in ~50 secs.
2728  We poll every 30 secs to be conservative. */
2730 
2731  event_type = vlib_process_get_events (vm, &event_data);
2732 
2733  switch (event_type)
2734  {
2735  case EVENT_SET_FLAGS:
2736  /* 1 ms */
2737  link_debounce_deadline = vlib_time_now (vm) + 1e-3;
2738  timeout = 1e-3;
2739  break;
2740 
2741  case ~0:
2742  /* No events found: timer expired. */
2743  if (vlib_time_now (vm) > link_debounce_deadline)
2744  {
2745  vec_foreach (xd, xm->devices)
2746  {
2747  ixge_regs_t *r = xd->regs;
2748  u32 v = r->xge_mac.link_status;
2749  uword is_up = (v & (1 << 30)) != 0;
2750 
2752  (vnm, xd->vlib_hw_if_index,
2753  is_up ? VNET_HW_INTERFACE_FLAG_LINK_UP : 0);
2754  }
2755  link_debounce_deadline = 1e70;
2756  timeout = 30.0;
2757  }
2758  break;
2759 
2760  default:
2761  ASSERT (0);
2762  }
2763 
2764  if (event_data)
2765  _vec_len (event_data) = 0;
2766 
2767  /* Query stats every 30 secs. */
2768  {
2769  f64 now = vlib_time_now (vm);
2770  if (now - xm->time_last_stats_update > 30)
2771  {
2772  xm->time_last_stats_update = now;
2773  vec_foreach (xd, xm->devices) ixge_update_counters (xd);
2774  }
2775  }
2776  }
2777 
2778  return 0;
2779 }
2780 
2782  .function = ixge_process,
2783  .type = VLIB_NODE_TYPE_PROCESS,
2784  .name = "ixge-process",
2785 };
2786 
2787 clib_error_t *
2789 {
2790  ixge_main_t *xm = &ixge_main;
2791  clib_error_t *error;
2792 
2793  xm->vlib_main = vm;
2795  sizeof (xm->tx_descriptor_template));
2797  sizeof (xm->tx_descriptor_template_mask));
2802  xm->tx_descriptor_template_mask.status0 = 0xffff;
2803  xm->tx_descriptor_template_mask.status1 = 0x00003fff;
2804 
2810 
2811  error = vlib_call_init_function (vm, pci_bus_init);
2812 
2813  return error;
2814 }
2815 
2817 
2818 
2819 static void
2821 {
2822  uword private_data = vlib_pci_get_private_data (vm, h);
2823 
2825 
2826  /* Let node know which device is interrupting. */
2827  {
2828  vlib_node_runtime_t *rt =
2830  rt->runtime_data[0] |= 1 << private_data;
2831  }
2832 }
2833 
2834 static clib_error_t *
2836 {
2837  ixge_main_t *xm = &ixge_main;
2838  clib_error_t *error = 0;
2839  void *r;
2840  ixge_device_t *xd;
2841  vlib_pci_addr_t *addr = vlib_pci_get_addr (vm, h);
2843 
2844  error = vlib_pci_map_region (vm, h, 0, &r);
2845  if (error)
2846  return error;
2847 
2848  vec_add2 (xm->devices, xd, 1);
2849 
2850  if (vec_len (xm->devices) == 1)
2851  {
2852  ixge_input_node.function = ixge_input;
2853  }
2854 
2855  xd->pci_dev_handle = h;
2856  xd->device_id = d->device_id;
2857  xd->regs = r;
2858  xd->device_index = xd - xm->devices;
2859  xd->pci_function = addr->function;
2860  xd->per_interface_next_index = ~0;
2861 
2863 
2864  /* Chip found so enable node. */
2865  {
2868  ? VLIB_NODE_STATE_POLLING
2869  : VLIB_NODE_STATE_INTERRUPT));
2870 
2871  //dev->private_data = xd->device_index;
2872  }
2873 
2874  if (vec_len (xm->devices) == 1)
2875  {
2878  }
2879 
2880  error = vlib_pci_bus_master_enable (vm, h);
2881 
2882  if (error)
2883  return error;
2884 
2885  return vlib_pci_intr_enable (vm, h);
2886 }
2887 
2888 /* *INDENT-OFF* */
2889 PCI_REGISTER_DEVICE (ixge_pci_device_registration,static) = {
2890  .init_function = ixge_pci_init,
2891  .interrupt_handler = ixge_pci_intr_handler,
2892  .supported_devices = {
2893 #define _(t,i) { .vendor_id = PCI_VENDOR_ID_INTEL, .device_id = i, },
2895 #undef _
2896  { 0 },
2897  },
2898 };
2899 /* *INDENT-ON* */
2900 
2901 void
2903 {
2905 
2906  switch (next)
2907  {
2911  r->next_nodes[next] = name;
2912  break;
2913 
2914  default:
2915  clib_warning ("%s: illegal next %d\n", __FUNCTION__, next);
2916  break;
2917  }
2918 }
2919 
2920 /* *INDENT-OFF* */
2921 VLIB_PLUGIN_REGISTER () = {
2922  .version = VPP_BUILD_VER,
2923  .default_disabled = 1,
2924  .description = "Intel 82599 Family Native Driver (experimental)",
2925 };
2926 #endif
2927 
2928 /* *INDENT-ON* */
2929 
2930 /*
2931  * fd.io coding-style-patch-verification: ON
2932  *
2933  * Local Variables:
2934  * eval: (c-set-style "gnu")
2935  * End:
2936  */
u32 mdio_address
Definition: ixge.h:1113
static void ixge_update_counters(ixge_device_t *xd)
Definition: ixge.c:2267
clib_error_t * pci_bus_init(vlib_main_t *vm)
Definition: pci.c:252
#define vec_validate(V, I)
Make sure vector is long enough for given index (no header, unspecified alignment) ...
Definition: vec.h:439
u32 flags
buffer flags: VLIB_BUFFER_FREE_LIST_INDEX_MASK: bits used to store free list index, VLIB_BUFFER_IS_TRACED: trace this buffer.
Definition: buffer.h:124
#define IXGE_RX_DESCRIPTOR_STATUS0_IS_UDP
Definition: ixge.h:107
#define IXGE_TX_DESCRIPTOR_STATUS0_IS_END_OF_PACKET
Definition: ixge.h:138
u32 process_node_index
Definition: ixge.h:1256
#define IXGE_RX_DESCRIPTOR_STATUS0_IS_IP6_EXT
Definition: ixge.h:105
typedef address
Definition: ip_types.api:30
static clib_error_t * ixge_interface_admin_up_down(vnet_main_t *vnm, u32 hw_if_index, u32 flags)
Definition: ixge.c:321
#define IXGE_RX_DESCRIPTOR_STATUS2_ETHERNET_ERROR
Definition: ixge.h:119
static clib_error_t * vlib_pci_intr_enable(vlib_main_t *vm, vlib_pci_dev_handle_t h)
Definition: pci.h:239
u32 flags
Definition: vhost_user.h:115
#define clib_min(x, y)
Definition: clib.h:295
static void ixge_software_firmware_sync_release(ixge_device_t *xd, u32 sw_mask)
Definition: ixge.c:111
#define CLIB_UNUSED(x)
Definition: clib.h:82
ixge_rx_to_hw_descriptor_t rx_to_hw
Definition: ixge.h:168
static void ixge_phy_init(ixge_device_t *xd)
Definition: ixge.c:376
static clib_error_t * vlib_pci_bus_master_enable(vlib_main_t *vm, vlib_pci_dev_handle_t h)
Definition: pci.h:271
static u32 vlib_get_trace_count(vlib_main_t *vm, vlib_node_runtime_t *rt)
Definition: trace_funcs.h:156
ixge_tx_descriptor_t tx_descriptor_template
Definition: ixge.h:1259
static f64 vlib_process_wait_for_event_or_clock(vlib_main_t *vm, f64 dt)
Suspend a cooperative multi-tasking thread Waits for an event, or for the indicated number of seconds...
Definition: node_funcs.h:703
u32 enable_write_1_to_set
Definition: ixge.h:245
a
Definition: bitmap.h:538
u32 n_descriptor_bytes
Definition: ixge.h:30
static u32 ixge_flag_change(vnet_main_t *vnm, vnet_hw_interface_t *hw, u32 flags)
Definition: ixge.c:2595
#define IXGE_RX_DESCRIPTOR_STATUS2_TCP_CHECKSUM_ERROR
Definition: ixge.h:120
static void vlib_increment_combined_counter(vlib_combined_counter_main_t *cm, u32 thread_index, u32 index, u64 n_packets, u64 n_bytes)
Increment a combined counter.
Definition: counter.h:220
u8 runtime_data[0]
Function dependent node-runtime data.
Definition: node.h:522
u32 vlib_hw_if_index
Definition: ixge.h:1222
u32 n_descriptors[VLIB_N_RX_TX]
Definition: ixge.h:1248
u32 link_status_at_last_link_change
Definition: ixge.h:1231
static void vlib_set_next_frame_buffer(vlib_main_t *vm, vlib_node_runtime_t *node, u32 next_index, u32 buffer_index)
Definition: node_funcs.h:401
u32 head_index
Definition: ixge.h:42
vnet_main_t * vnet_get_main(void)
Definition: misc.c:47
static uword vlib_buffer_get_pa(vlib_main_t *vm, vlib_buffer_t *b)
Definition: buffer_funcs.h:421
static void ixge_semaphore_release(ixge_device_t *xd)
Definition: ixge.c:82
#define PREDICT_TRUE(x)
Definition: clib.h:112
static void * vlib_physmem_alloc(vlib_main_t *vm, uword n_bytes)
Definition: physmem_funcs.h:73
u8 is_start_of_packet
Definition: ixge.c:569
i16 current_data
signed offset in data[], pre_data[] that we are currently processing.
Definition: buffer.h:110
unsigned long u64
Definition: types.h:89
struct ixge_regs_t::@458 interrupt
#define IXGE_RX_DESCRIPTOR_STATUS2_IS_IP4_CHECKSUMMED
Definition: ixge.h:115
struct ixge_dma_queue_t::@481::@484 rx
u32 * descriptor_buffer_indices
Definition: ixge.h:1134
static void vlib_error_count(vlib_main_t *vm, uword node_index, uword counter, uword increment)
Definition: error_funcs.h:57
static void vlib_node_set_interrupt_pending(vlib_main_t *vm, u32 node_index)
Definition: node_funcs.h:197
#define IXGE_RX_DESCRIPTOR_STATUS0_IS_IP6
Definition: ixge.h:104
u32 queue_mapping[64]
Definition: ixge.h:266
static f64 vlib_time_now(vlib_main_t *vm)
Definition: main.h:255
vlib_buffer_t buffer
Definition: ixge.c:572
static u8 * format_ixge_link_status(u8 *s, va_list *args)
Definition: ixge.c:2313
static clib_error_t * vlib_physmem_last_error(struct vlib_main_t *vm)
static vnet_hw_interface_t * vnet_get_hw_interface(vnet_main_t *vnm, u32 hw_if_index)
u16 current_length
Nbytes between current data and the end of this buffer.
Definition: buffer.h:113
u32 phy_command
Definition: ixge.h:368
u32 * rx_buffers_to_add
Definition: ixge.h:1264
u8 data[0]
Packet data.
Definition: buffer.h:181
#define XGE_PHY_CONTROL
Definition: ixge.c:52
static vlib_node_registration_t ixge_input_node
(constructor) VLIB_REGISTER_NODE (ixge_input_node)
Definition: ixge.c:56
PCI_REGISTER_DEVICE(ixge_pci_device_registration, static)
static uword ixge_rx_queue_no_wrap(ixge_main_t *xm, ixge_device_t *xd, ixge_dma_queue_t *dq, u32 start_descriptor_index, u32 n_descriptors)
Definition: ixge.c:1341
ixge_device_t * devices
Definition: ixge.h:1245
static uword sfp_eeprom_is_valid(sfp_eeprom_t *e)
Definition: sfp.h:90
#define vec_add2(V, P, N)
Add N elements to end of vector V, return pointer to new elements in P.
Definition: vec.h:560
for(i=1;i<=collision_buckets;i++)
int i
ixge_descriptor_t before
Definition: ixge.c:561
#define IXGE_N_BYTES_IN_RX_BUFFER
Definition: ixge.c:2460
static void ixge_throttle_queue_interrupt(ixge_regs_t *r, u32 queue_interrupt_index, f64 inter_interrupt_interval_in_secs)
Definition: ixge.h:994
#define XGE_PHY_ID2
Definition: ixge.c:51
static u32 format_get_indent(u8 *s)
Definition: format.h:72
static void ixge_i2c_put_bits(i2c_bus_t *b, int scl, int sda)
Definition: ixge.c:171
clib_memset(h->entries, 0, sizeof(h->entries[0])*entries)
u32 per_interface_next_index
Definition: ixge.h:1208
static uword ixge_interrupt_is_rx_queue(uword i)
Definition: ixge.c:2103
static vnet_sw_interface_t * vnet_get_sw_interface(vnet_main_t *vnm, u32 sw_if_index)
u32 extended_control
Definition: ixge.h:192
u32 software_semaphore
Definition: ixge.h:890
static void ixge_i2c_get_bits(i2c_bus_t *b, int *scl, int *sda)
Definition: ixge.c:184
struct ixge_dma_queue_t::@481::@483 tx
u8 * format(u8 *s, const char *fmt,...)
Definition: format.c:424
static void ixge_write_phy_reg(ixge_device_t *xd, u32 dev_type, u32 reg_index, u32 v)
Definition: ixge.c:164
ixge_rx_next_t
Definition: ixge.h:1273
static u8 ixge_counter_flags[]
Definition: ixge.c:2258
u8 *( format_function_t)(u8 *s, va_list *args)
Definition: format.h:48
static vlib_buffer_known_state_t vlib_buffer_is_known(vlib_main_t *vm, u32 buffer_index)
Definition: buffer_funcs.h:462
u32 rx_ethernet_address0[16][2]
Definition: ixge.h:581
vlib_error_t * errors
Vector of errors for this node.
Definition: node.h:469
u32 tx_dma_control
Definition: ixge.h:500
static uword ixge_ring_add(ixge_dma_queue_t *q, u32 i0, u32 i1)
Definition: ixge.c:1007
vhost_vring_addr_t addr
Definition: vhost_user.h:121
u32 * tx_buffers_pending_free
Definition: ixge.h:1262
static uword vlib_node_add_next(vlib_main_t *vm, uword node, uword next_node)
Definition: node_funcs.h:1122
unsigned char u8
Definition: types.h:56
#define IXGE_RX_DESCRIPTOR_STATUS2_IS_END_OF_PACKET
Definition: ixge.h:111
u32 filter_control
Definition: ixge.h:522
#define foreach_set_bit(var, mask, body)
Definition: bitops.h:166
double f64
Definition: types.h:142
u32 i2c_control
Definition: ixge.h:208
static vnet_sw_interface_t * vnet_get_hw_sw_interface(vnet_main_t *vnm, u32 hw_if_index)
u32 phy_data
Definition: ixge.h:369
static void ixge_rx_next_and_error_from_status_x1(ixge_device_t *xd, u32 s00, u32 s02, u8 *next0, u8 *error0, u32 *flags0)
Definition: ixge.c:632
#define vec_add(V, E, N)
Add N elements to end of vector V (no header, unspecified alignment)
Definition: vec.h:598
static void vlib_trace_buffer(vlib_main_t *vm, vlib_node_runtime_t *r, u32 next_index, vlib_buffer_t *b, int follow_chain)
Definition: trace_funcs.h:114
static u32 clean_block(u32 *b, u32 *t, u32 n_left)
Definition: ixge.c:1953
vlib_pci_dev_handle_t pci_dev_handle
Definition: ixge.h:1211
static uword vlib_process_suspend(vlib_main_t *vm, f64 dt)
Suspend a vlib cooperative multi-tasking thread for a period of time.
Definition: node_funcs.h:452
u32 ixge_read_write_phy_reg(ixge_device_t *xd, u32 dev_type, u32 reg_index, u32 v, u32 is_read)
Definition: ixge.c:120
u32 rx_split_control
Definition: ixge.h:51
static uword ixge_rx_queue_to_interrupt(uword i)
Definition: ixge.c:2121
vlib_rx_or_tx_t
Definition: defs.h:44
ixge_tx_descriptor_t descriptor
Definition: ixge.c:848
i2c_bus_t i2c_bus
Definition: ixge.h:1233
static u8 * format_ixge_rx_from_hw_descriptor(u8 *s, va_list *va)
Definition: ixge.c:445
#define vlib_prefetch_buffer_with_index(vm, bi, type)
Prefetch buffer metadata by buffer index The first 64 bytes of buffer contains most header informatio...
Definition: buffer_funcs.h:440
#define VLIB_INIT_FUNCTION(x)
Definition: init.h:163
static clib_error_t * ixge_sfp_phy_init_from_eeprom(ixge_device_t *xd, u16 sfp_type)
Definition: ixge.c:228
static uword vlib_process_get_events(vlib_main_t *vm, uword **data_vector)
Return the first event type which has occurred and a vector of per-event data of that type...
Definition: node_funcs.h:546
static void * vlib_physmem_alloc_aligned(vlib_main_t *vm, uword n_bytes, uword alignment)
Definition: physmem_funcs.h:56
#define always_inline
Definition: clib.h:98
static void ixge_rx_trace(ixge_main_t *xm, ixge_device_t *xd, ixge_dma_queue_t *dq, ixge_descriptor_t *before_descriptors, u32 *before_buffers, ixge_descriptor_t *after_descriptors, uword n_descriptors)
Definition: ixge.c:735
u8 * format_white_space(u8 *s, va_list *va)
Definition: std-formats.c:113
u32 dca_control
Definition: ixge.h:40
#define IXGE_RX_DESCRIPTOR_STATUS2_IS_TCP_CHECKSUMMED
Definition: ixge.h:114
ixge_phy_t phys[2]
Definition: ixge.h:1228
#define IXGE_RX_DESCRIPTOR_STATUS2_IP4_CHECKSUM_ERROR
Definition: ixge.h:121
ixge_dma_regs_t tx_dma[128]
Definition: ixge.h:616
u32 link_partner_ability
Definition: ixge.h:343
#define IXGE_RX_DESCRIPTOR_STATUS2_IS_VLAN
Definition: ixge.h:112
#define vec_elt_at_index(v, i)
Get vector value at index i checking that i is in bounds.
u8 * format_hex_bytes(u8 *s, va_list *va)
Definition: std-formats.c:84
vnet_device_class_t ixge_device_class
ixge_main_t ixge_main
Definition: ixge.c:55
static vlib_node_registration_t ixge_process_node
Definition: ixge.c:57
u32 queue_index
Definition: ixge.h:1131
#define vec_resize(V, N)
Resize a vector (no header, unspecified alignment) Add N elements to end of given vector V...
Definition: vec.h:242
unsigned int u32
Definition: types.h:88
#define vec_end(v)
End (last data address) of vector.
#define clib_error_create(args...)
Definition: error.h:96
#define vlib_call_init_function(vm, x)
Definition: init.h:260
#define VLIB_FRAME_SIZE
Definition: node.h:376
f64 time_last_stats_update
Definition: ixge.h:1266
#define IXGE_RX_DESCRIPTOR_STATUS0_IS_LAYER2
Definition: ixge.h:100
static clib_error_t * ixge_pci_init(vlib_main_t *vm, vlib_pci_dev_handle_t h)
Definition: ixge.c:2835
static void ixge_rx_next_and_error_from_status_x2(ixge_device_t *xd, u32 s00, u32 s02, u32 s10, u32 s12, u8 *next0, u8 *error0, u32 *flags0, u8 *next1, u8 *error1, u32 *flags1)
Definition: ixge.c:671
u8 * format_c_identifier(u8 *s, va_list *va)
Definition: std-formats.c:258
static uword ixge_process(vlib_main_t *vm, vlib_node_runtime_t *rt, vlib_frame_t *f)
Definition: ixge.c:2705
vlib_error_t error
Error code for buffers to be enqueued to error handler.
Definition: buffer.h:136
vlib_pci_device_info_t * vlib_pci_get_device_info(vlib_main_t *vm, vlib_pci_addr_t *addr, clib_error_t **error)
Definition: pci.c:202
static void ixge_sfp_enable_disable_10g(ixge_device_t *xd, uword enable)
Definition: ixge.c:218
u8 id
Definition: sfp.h:56
uword private_data
Definition: i2c.h:44
static u8 * format_ixge_tx_descriptor(u8 *s, va_list *va)
Definition: ixge.c:516
format_function_t format_vnet_sw_interface_name
#define IXGE_TX_DESCRIPTOR_STATUS1_N_BYTES_IN_PACKET(l)
Definition: ixge.h:145
static uword ixge_interface_tx(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *f)
Definition: ixge.c:1199
u16 state
Input node state.
Definition: node.h:510
static void ixge_clear_hw_interface_counters(u32 instance)
Definition: ixge.c:2415
static void ixge_sfp_device_up_down(ixge_device_t *xd, uword is_up)
Definition: ixge.c:267
format_function_t format_vnet_buffer
Definition: buffer.h:441
#define foreach_ixge_counter
Definition: ixge.h:1016
static uword ixge_tx_no_wrap(ixge_main_t *xm, ixge_device_t *xd, ixge_dma_queue_t *dq, u32 *buffers, u32 start_descriptor_index, u32 n_descriptors, ixge_tx_state_t *tx_state)
Definition: ixge.c:1035
u64 counters[IXGE_N_COUNTER]
Definition: ixge.h:1237
static void vlib_process_signal_event(vlib_main_t *vm, uword node_index, uword type_opaque, uword data)
Definition: node_funcs.h:964
ixge_tx_descriptor_t tx
Definition: ixge.h:170
#define IXGE_TX_DESCRIPTOR_STATUS0_LOG2_IS_END_OF_PACKET
Definition: ixge.h:137
void vlib_pci_set_private_data(vlib_main_t *vm, vlib_pci_dev_handle_t h, uword private_data)
Definition: pci.c:155
#define IXGE_TX_DESCRIPTOR_STATUS0_IS_ADVANCED
Definition: ixge.h:133
static void ixge_set_interface_next_node(vnet_main_t *vnm, u32 hw_if_index, u32 node_index)
Definition: ixge.c:2428
unsigned short u16
Definition: types.h:57
#define XGE_PHY_ID1
Definition: ixge.c:50
static u8 * format_ixge_device_id(u8 *s, va_list *args)
Definition: ixge.c:2292
#define XGE_PHY_DEV_TYPE_PMA_PMD
Definition: ixge.c:48
#define ELOG_DATA(em, f)
Definition: elog.h:481
#define IXGE_TX_DESCRIPTOR_STATUS0_ADVANCED
Definition: ixge.h:132
format_function_t format_sfp_eeprom
Definition: sfp.h:133
#define PREDICT_FALSE(x)
Definition: clib.h:111
u16 n_bytes_this_buffer
Definition: ixge.h:129
sfp_eeprom_t sfp_eeprom
Definition: ixge.h:1234
vnet_main_t vnet_main
Definition: misc.c:43
u32 control
Definition: ixge.h:62
uword vlib_pci_get_private_data(vlib_main_t *vm, vlib_pci_dev_handle_t h)
Definition: pci.c:148
u32 descriptor_address[2]
Definition: ixge.h:29
static u16 ixge_read_eeprom(ixge_device_t *xd, u32 address)
Definition: ixge.c:196
format_function_t * format_buffer
Definition: node.h:356
VNET_DEVICE_CLASS(ixge_device_class)
u32 vlib_pci_dev_handle_t
Definition: pci.h:97
#define vlib_validate_buffer_enqueue_x1(vm, node, next_index, to_next, n_left_to_next, bi0, next0)
Finish enqueueing one buffer forward in the graph.
Definition: buffer_node.h:218
#define vlib_get_next_frame(vm, node, next_index, vectors, n_vectors_left)
Get pointer to next frame vector data by (vlib_node_runtime_t, next_index).
Definition: node_funcs.h:368
u32 software_firmware_sync
Definition: ixge.h:894
u8 name[64]
Definition: memclnt.api:152
static void ixge_sfp_phy_init(ixge_device_t *xd)
Definition: ixge.c:349
vlib_node_runtime_t * node
Definition: ixge.c:902
#define IXGE_RX_DESCRIPTOR_STATUS2_IS_OWNED_BY_SOFTWARE
Definition: ixge.h:110
u32 vlib_sw_if_index
Definition: ixge.h:1222
u32 head_index
Definition: ixge.h:1128
u32 auto_negotiation_control2
Definition: ixge.h:465
u8 * format_ethernet_header_with_length(u8 *s, va_list *args)
Definition: format.c:97
ixge_tx_descriptor_t * start_of_packet_descriptor
Definition: ixge.c:908
static ixge_dma_regs_t * get_dma_regs(ixge_device_t *xd, vlib_rx_or_tx_t rt, u32 qi)
Definition: ixge.c:310
static u8 * format_ixge_tx_dma_trace(u8 *s, va_list *va)
Definition: ixge.c:863
#define IXGE_RX_DESCRIPTOR_STATUS0_IS_IP4
Definition: ixge.h:102
#define VLIB_REGISTER_NODE(x,...)
Definition: node.h:169
static void vlib_buffer_free_no_next(vlib_main_t *vm, u32 *buffers, u32 n_buffers)
Free buffers, does not free the buffer chain for each buffer.
Definition: buffer_funcs.h:882
u32 n_descriptors
Definition: ixge.h:1125
u16 n_vectors
Definition: node.h:395
#define XGE_PHY_DEV_TYPE_PHY_XS
Definition: ixge.c:49
u32 status_write_1_to_set
Definition: ixge.h:232
Definition: i2c.h:33
static uword ixge_interrupt_tx_queue(uword i)
Definition: ixge.c:2134
#define CLIB_PREFETCH(addr, size, type)
Definition: cache.h:80
vlib_main_t * vm
Definition: buffer.c:312
ixge_descriptor_t after
Definition: ixge.c:561
u32x4 as_u32x4
Definition: ixge.h:171
u8 * vlib_validate_buffer(vlib_main_t *vm, u32 bi, uword follow_buffer_next)
Definition: buffer.c:243
#define IXGE_RX_DESCRIPTOR_STATUS0_L3_OFFSET(s)
Definition: ixge.h:108
#define clib_warning(format, args...)
Definition: error.h:59
static vlib_node_runtime_t * vlib_node_get_runtime(vlib_main_t *vm, u32 node_index)
Get node runtime by node index.
Definition: node_funcs.h:89
void(* get_bits)(struct i2c_bus_t *b, int *scl, int *sda)
Definition: i2c.h:36
elog_main_t elog_main
Definition: main.h:172
#define ETHERNET_INTERFACE_FLAG_ACCEPT_ALL
Definition: ethernet.h:141
static u8 * format_ixge_device_name(u8 *s, va_list *args)
Definition: ixge.c:2244
#define IXGE_TX_DESCRIPTOR_STATUS0_INSERT_FCS
Definition: ixge.h:136
#define ARRAY_LEN(x)
Definition: clib.h:62
u16 device_index
Definition: ixge.h:1216
void vlib_put_next_frame(vlib_main_t *vm, vlib_node_runtime_t *r, u32 next_index, u32 n_vectors_left)
Release pointer to next frame vector data.
Definition: main.c:465
#define ELOG_TYPE_DECLARE(f)
Definition: elog.h:439
static uword ixge_interrupt_rx_queue(uword i)
Definition: ixge.c:2127
u32 n_bytes_in_packet
Definition: ixge.c:906
static uword round_pow2(uword x, uword pow2)
Definition: clib.h:241
void vlib_i2c_init(i2c_bus_t *b)
Definition: i2c.c:150
u32 sdp_control
Definition: ixge.h:201
vlib_pci_addr_t * vlib_pci_get_addr(vlib_main_t *vm, vlib_pci_dev_handle_t h)
Definition: pci.c:163
static u8 * format_ixge_rx_dma_trace(u8 *s, va_list *va)
Definition: ixge.c:576
u32 tail_index
Definition: ixge.h:1128
static char * ixge_error_strings[]
Definition: ixge.c:2211
#define EVENT_SET_FLAGS
Definition: ixge.c:42
signed int i32
Definition: types.h:77
u16 cached_next_index
Next frame index that vector arguments were last enqueued to last time this node ran.
Definition: node.h:514
#define ASSERT(truth)
#define IXGE_COUNTER_NOT_CLEAR_ON_READ
Definition: ixge.c:2256
u8 pre_data[VLIB_BUFFER_PRE_DATA_SIZE]
Space for inserting data before buffer start.
Definition: buffer.h:178
ixge_pci_device_id_t device_id
Definition: ixge.h:1214
ixge_descriptor_t * descriptors
Definition: ixge.h:1122
static void ixge_tx_queue(ixge_main_t *xm, ixge_device_t *xd, u32 queue_index)
Definition: ixge.c:1996
u32 vlib_register_node(vlib_main_t *vm, vlib_node_registration_t *r)
Definition: node.c:520
u32 auto_negotiation_control
Definition: ixge.h:427
#define clib_error_report(e)
Definition: error.h:113
static void vlib_node_set_state(vlib_main_t *vm, u32 node_index, vlib_node_state_t new_state)
Set node dispatch state.
Definition: node_funcs.h:148
u32 control
Definition: ixge.h:179
ixge_regs_t * regs
Definition: ixge.h:1205
u32 id
Definition: ixge.h:1116
u32 rx_enable
Definition: ixge.h:308
static vlib_main_t * vlib_get_main(void)
Definition: global_funcs.h:23
static void ixge_semaphore_get(ixge_device_t *xd)
Definition: ixge.c:60
static void ixge_interrupt(ixge_main_t *xm, ixge_device_t *xd, u32 i)
Definition: ixge.c:1893
static uword ixge_tx_descriptor_matches_template(ixge_main_t *xm, ixge_tx_descriptor_t *d)
Definition: ixge.c:1017
static uword ixge_tx_queue_to_interrupt(uword i)
Definition: ixge.c:2115
struct ixge_regs_t::@460 xge_mac
static void * vlib_add_trace(vlib_main_t *vm, vlib_node_runtime_t *r, vlib_buffer_t *b, u32 n_data_bytes)
Definition: trace_funcs.h:57
void vlib_i2c_read_eeprom(i2c_bus_t *bus, u8 i2c_addr, u16 start_addr, u16 length, u8 *data)
Definition: i2c.c:201
void(* put_bits)(struct i2c_bus_t *b, int scl, int sda)
Definition: i2c.h:35
struct _vlib_node_registration vlib_node_registration_t
static void ixge_device_init(ixge_main_t *xm)
Definition: ixge.c:2616
struct ixge_dma_regs_t::@452::@455 tx
static void ixge_software_firmware_sync(ixge_device_t *xd, u32 sw_mask)
Definition: ixge.c:89
static u64 vlib_physmem_get_pa(vlib_main_t *vm, void *mem)
Definition: defs.h:47
ixge_tx_descriptor_t tx_descriptor_template_mask
Definition: ixge.h:1259
#define IXGE_RX_DESCRIPTOR_STATUS2_IS_UDP_CHECKSUMMED
Definition: ixge.h:113
void ixge_set_next_node(ixge_rx_next_t next, char *name)
Definition: ixge.c:2902
ixge_dma_queue_t * dma_queues[VLIB_N_RX_TX]
Definition: ixge.h:1224
VLIB_PLUGIN_REGISTER()
static uword ixge_ring_sub(ixge_dma_queue_t *q, u32 i0, u32 i1)
Definition: ixge.c:998
static void ixge_tx_trace(ixge_main_t *xm, ixge_device_t *xd, ixge_dma_queue_t *dq, ixge_tx_state_t *tx_state, ixge_tx_descriptor_t *descriptors, u32 *buffers, uword n_descriptors)
Definition: ixge.c:912
u32 tail_index
Definition: ixge.h:53
u32 rx_max_frame_size
Definition: ixge.h:373
#define vec_len(v)
Number of elements in vector (rvalue-only, NULL tolerant)
u32 next_buffer
Next buffer for this linked-list of buffers.
Definition: buffer.h:140
clib_error_t * ethernet_register_interface(vnet_main_t *vnm, u32 dev_class_index, u32 dev_instance, const u8 *address, u32 *hw_if_index_return, ethernet_flag_change_function_t flag_change)
Definition: interface.c:278
clib_error_t * vnet_hw_interface_set_flags(vnet_main_t *vnm, u32 hw_if_index, vnet_hw_interface_flags_t flags)
Definition: interface.c:504
#define VLIB_BUFFER_TRACE_TRAJECTORY_INIT(b)
Definition: buffer.h:451
static uword ixge_interrupt_is_tx_queue(uword i)
Definition: ixge.c:2109
VLIB buffer representation.
Definition: buffer.h:102
u64 uword
Definition: types.h:112
#define IXGE_COUNTER_IS_64_BIT
Definition: ixge.c:2255
static void * vlib_frame_vector_args(vlib_frame_t *f)
Get pointer to frame vector data.
Definition: node_funcs.h:274
vlib_buffer_t buffer
Definition: ixge.c:859
static uword ixge_device_input(ixge_main_t *xm, ixge_device_t *xd, vlib_node_runtime_t *node)
Definition: ixge.c:2141
static uword ixge_input(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *f)
Definition: ixge.c:2169
u32 n_descriptors_per_cache_line
Definition: ixge.h:1254
clib_error_t * vlib_pci_map_region(vlib_main_t *vm, vlib_pci_dev_handle_t h, u32 resource, void **result)
Definition: pci.c:1146
#define IXGE_RX_DESCRIPTOR_STATUS0_IS_IP4_EXT
Definition: ixge.h:103
vlib_main_t * vlib_main
Definition: ixge.h:1242
u16 n_packet_bytes_this_descriptor
Definition: ixge.h:96
static uword ixge_rx_queue(ixge_main_t *xm, ixge_device_t *xd, vlib_node_runtime_t *node, u32 queue_index)
Definition: ixge.c:1820
#define XGE_PHY_CONTROL_RESET
Definition: ixge.c:53
static uword vlib_in_process_context(vlib_main_t *vm)
Definition: node_funcs.h:415
#define vnet_buffer(b)
Definition: buffer.h:369
u32 is_start_of_packet
Definition: ixge.c:904
#define IXGE_HWBP_RACE_ELOG
Definition: ixge.c:43
static int vlib_i2c_bus_timed_out(i2c_bus_t *bus)
Definition: i2c.h:54
ixge_dma_regs_t rx_dma0[64]
Definition: ixge.h:281
u8 is_start_of_packet
Definition: ixge.c:856
#define IXGE_ALWAYS_POLL
Definition: ixge.c:40
u32 core_analog_config
Definition: ixge.h:949
static void ixge_pci_intr_handler(vlib_main_t *vm, vlib_pci_dev_handle_t h)
Definition: ixge.c:2820
#define vec_foreach(var, vec)
Vector iterator.
#define IXGE_RX_DESCRIPTOR_STATUS0_IS_TCP
Definition: ixge.h:106
unsigned long long u32x4
Definition: ixge.c:28
clib_error_t * ixge_init(vlib_main_t *vm)
Definition: ixge.c:2788
u16 flags
Copy of main node flags.
Definition: node.h:508
#define CLIB_MEMORY_BARRIER()
Definition: clib.h:115
u32 id
Definition: udp.api:45
static void ixge_sfp_enable_disable_laser(ixge_device_t *xd, uword enable)
Definition: ixge.c:208
u16 pci_function
Definition: ixge.h:1219
format_function_t format_vlib_pci_link_speed
Definition: pci.h:325
static void vlib_set_trace_count(vlib_main_t *vm, vlib_node_runtime_t *rt, u32 count)
Definition: trace_funcs.h:172
u32 status_write_1_to_clear
Definition: ixge.h:230
#define VLIB_NODE_FLAG_TRACE
Definition: node.h:301
#define IXGE_TX_DESCRIPTOR_STATUS0_REPORT_STATUS
Definition: ixge.h:135
#define CLIB_CACHE_LINE_BYTES
Definition: cache.h:59
static u8 * format_ixge_device(u8 *s, va_list *args)
Definition: ixge.c:2335
u32 phy_index
Definition: ixge.h:1227
static u32 vlib_buffer_alloc(vlib_main_t *vm, u32 *buffers, u32 n_buffers)
Allocate buffers into supplied array.
Definition: buffer_funcs.h:612
u32 eeprom_read
Definition: ixge.h:881
static u32 ixge_read_phy_reg(ixge_device_t *xd, u32 dev_type, u32 reg_index)
Definition: ixge.c:157
u32 n_bytes_in_rx_buffer
Definition: ixge.h:1252
u32 link_status
Definition: ixge.h:340
static clib_error_t * ixge_dma_init(ixge_device_t *xd, vlib_rx_or_tx_t rt, u32 queue_index)
Definition: ixge.c:2463
static vlib_buffer_t * vlib_get_buffer(vlib_main_t *vm, u32 buffer_index)
Translate buffer index into buffer pointer.
Definition: buffer_funcs.h:85
u64 counters_last_clear[IXGE_N_COUNTER]
Definition: ixge.h:1237
ixge_dma_regs_t rx_dma1[64]
Definition: ixge.h:747
ixge_rx_from_hw_descriptor_t rx_from_hw
Definition: ixge.h:169
#define VLIB_NODE_FLAG_SWITCH_FROM_POLLING_TO_INTERRUPT_MODE
Definition: node.h:304
#define foreach_ixge_error
Definition: ixge.c:616
Definition: defs.h:46
#define IXGE_TX_DESCRIPTOR_STATUS1_DONE
Definition: ixge.h:139
#define IXGE_RX_DESCRIPTOR_STATUS2_UDP_CHECKSUM_ERROR
Definition: ixge.h:118
ixge_error_t
Definition: ixge.c:623