4 * Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33 * version: DPDK.L.1.2.3-3
36 #include <sys/queue.h>
44 #include <rte_byteorder.h>
45 #include <rte_common.h>
46 #include <rte_cycles.h>
48 #include <rte_interrupts.h>
50 #include <rte_debug.h>
52 #include <rte_atomic.h>
53 #include <rte_branch_prediction.h>
54 #include <rte_memory.h>
55 #include <rte_memzone.h>
56 #include <rte_tailq.h>
58 #include <rte_alarm.h>
59 #include <rte_ether.h>
60 #include <rte_ethdev.h>
61 #include <rte_atomic.h>
62 #include <rte_malloc.h>
64 #include "ixgbe_logs.h"
65 #include "ixgbe/ixgbe_api.h"
66 #include "ixgbe/ixgbe_vf.h"
67 #include "ixgbe/ixgbe_common.h"
68 #include "ixgbe_ethdev.h"
71 * High threshold controlling when to start sending XOFF frames. Must be at
72 * least 8 bytes less than receive packet buffer size. This value is in units
75 #define IXGBE_FC_HI 0x80
78 * Low threshold controlling when to start sending XON frames. This value is
79 * in units of 1024 bytes.
81 #define IXGBE_FC_LO 0x40
83 /* Timer value included in XOFF frames. */
84 #define IXGBE_FC_PAUSE 0x680
86 #define IXGBE_LINK_DOWN_CHECK_TIMEOUT 4000 /* ms */
87 #define IXGBE_LINK_UP_CHECK_TIMEOUT 1000 /* ms */
89 static int eth_ixgbe_dev_init(struct eth_driver *eth_drv,
90 struct rte_eth_dev *eth_dev);
91 static int ixgbe_dev_configure(struct rte_eth_dev *dev, uint16_t nb_rx_q,
93 static int ixgbe_dev_start(struct rte_eth_dev *dev);
94 static void ixgbe_dev_stop(struct rte_eth_dev *dev);
95 static void ixgbe_dev_close(struct rte_eth_dev *dev);
96 static void ixgbe_dev_promiscuous_enable(struct rte_eth_dev *dev);
97 static void ixgbe_dev_promiscuous_disable(struct rte_eth_dev *dev);
98 static void ixgbe_dev_allmulticast_enable(struct rte_eth_dev *dev);
99 static void ixgbe_dev_allmulticast_disable(struct rte_eth_dev *dev);
100 static int ixgbe_dev_link_update(struct rte_eth_dev *dev,
101 int wait_to_complete);
102 static void ixgbe_dev_stats_get(struct rte_eth_dev *dev,
103 struct rte_eth_stats *stats);
104 static void ixgbe_dev_stats_reset(struct rte_eth_dev *dev);
105 static void ixgbe_dev_info_get(struct rte_eth_dev *dev,
106 struct rte_eth_dev_info *dev_info);
107 static void ixgbe_vlan_filter_set(struct rte_eth_dev *dev,
110 static int ixgbe_dev_led_on(struct rte_eth_dev *dev);
111 static int ixgbe_dev_led_off(struct rte_eth_dev *dev);
112 static int ixgbe_flow_ctrl_set(struct rte_eth_dev *dev,
113 struct rte_eth_fc_conf *fc_conf);
114 static void ixgbe_dev_link_status_print(struct rte_eth_dev *dev);
115 static int ixgbe_dev_interrupt_setup(struct rte_eth_dev *dev);
116 static int ixgbe_dev_interrupt_get_status(struct rte_eth_dev *dev);
117 static int ixgbe_dev_interrupt_action(struct rte_eth_dev *dev);
118 static void ixgbe_dev_interrupt_handler(struct rte_intr_handle *handle,
120 static void ixgbe_dev_interrupt_delayed_handler(void *param);
121 static void ixgbe_add_rar(struct rte_eth_dev *dev, struct ether_addr *mac_addr,
122 uint32_t index, uint32_t pool);
123 static void ixgbe_remove_rar(struct rte_eth_dev *dev, uint32_t index);
125 /* For Virtual Function support */
126 static int eth_ixgbevf_dev_init(struct eth_driver *eth_drv,
127 struct rte_eth_dev *eth_dev);
128 static int ixgbevf_dev_configure(struct rte_eth_dev *dev, uint16_t nb_rx_q,
130 static int ixgbevf_dev_start(struct rte_eth_dev *dev);
131 static void ixgbevf_dev_stop(struct rte_eth_dev *dev);
132 static void ixgbevf_intr_disable(struct ixgbe_hw *hw);
133 static void ixgbevf_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats);
134 static void ixgbevf_dev_stats_reset(struct rte_eth_dev *dev);
137 * * Define VF Stats MACRO for Non "cleared on read" register
139 #define UPDATE_VF_STAT(reg, last, cur) \
141 u32 latest = IXGBE_READ_REG(hw, reg); \
142 cur += latest - last; \
146 #define UPDATE_VF_STAT_36BIT(lsb, msb, last, cur) \
148 u64 new_lsb = IXGBE_READ_REG(hw, lsb); \
149 u64 new_msb = IXGBE_READ_REG(hw, msb); \
150 u64 latest = ((new_msb << 32) | new_lsb); \
151 cur += (0x1000000000LL + latest - last) & 0xFFFFFFFFFLL; \
156 * The set of PCI devices this driver supports
158 static struct rte_pci_id pci_id_ixgbe_map[] = {
160 #undef RTE_LIBRTE_IGB_PMD
161 #define RTE_PCI_DEV_ID_DECL(vend, dev) {RTE_PCI_DEVICE(vend, dev)},
162 #include "rte_pci_dev_ids.h"
164 { .vendor_id = 0, /* sentinel */ },
169 * The set of PCI devices this driver supports (for 82599 VF)
171 static struct rte_pci_id pci_id_ixgbevf_map[] = {
173 .vendor_id = PCI_VENDOR_ID_INTEL,
174 .device_id = IXGBE_DEV_ID_82599_VF,
175 .subsystem_vendor_id = PCI_ANY_ID,
176 .subsystem_device_id = PCI_ANY_ID,
178 { .vendor_id = 0, /* sentinel */ },
181 static struct eth_dev_ops ixgbe_eth_dev_ops = {
182 .dev_configure = ixgbe_dev_configure,
183 .dev_start = ixgbe_dev_start,
184 .dev_stop = ixgbe_dev_stop,
185 .dev_close = ixgbe_dev_close,
186 .promiscuous_enable = ixgbe_dev_promiscuous_enable,
187 .promiscuous_disable = ixgbe_dev_promiscuous_disable,
188 .allmulticast_enable = ixgbe_dev_allmulticast_enable,
189 .allmulticast_disable = ixgbe_dev_allmulticast_disable,
190 .link_update = ixgbe_dev_link_update,
191 .stats_get = ixgbe_dev_stats_get,
192 .stats_reset = ixgbe_dev_stats_reset,
193 .dev_infos_get = ixgbe_dev_info_get,
194 .vlan_filter_set = ixgbe_vlan_filter_set,
195 .rx_queue_setup = ixgbe_dev_rx_queue_setup,
196 .tx_queue_setup = ixgbe_dev_tx_queue_setup,
197 .dev_led_on = ixgbe_dev_led_on,
198 .dev_led_off = ixgbe_dev_led_off,
199 .flow_ctrl_set = ixgbe_flow_ctrl_set,
200 .mac_addr_add = ixgbe_add_rar,
201 .mac_addr_remove = ixgbe_remove_rar,
202 .fdir_add_signature_filter = ixgbe_fdir_add_signature_filter,
203 .fdir_update_signature_filter = ixgbe_fdir_update_signature_filter,
204 .fdir_remove_signature_filter = ixgbe_fdir_remove_signature_filter,
205 .fdir_infos_get = ixgbe_fdir_info_get,
206 .fdir_add_perfect_filter = ixgbe_fdir_add_perfect_filter,
207 .fdir_update_perfect_filter = ixgbe_fdir_update_perfect_filter,
208 .fdir_remove_perfect_filter = ixgbe_fdir_remove_perfect_filter,
209 .fdir_set_masks = ixgbe_fdir_set_masks,
213 * dev_ops for virtual function, bare necessities for basic vf
214 * operation have been implemented
216 static struct eth_dev_ops ixgbevf_eth_dev_ops = {
218 .dev_configure = ixgbevf_dev_configure,
219 .dev_start = ixgbevf_dev_start,
220 .dev_stop = ixgbevf_dev_stop,
221 .link_update = ixgbe_dev_link_update,
222 .stats_get = ixgbevf_dev_stats_get,
223 .stats_reset = ixgbevf_dev_stats_reset,
224 .dev_close = ixgbevf_dev_stop,
226 .dev_infos_get = ixgbe_dev_info_get,
227 .rx_queue_setup = ixgbe_dev_rx_queue_setup,
228 .tx_queue_setup = ixgbe_dev_tx_queue_setup,
232 * Atomically reads the link status information from global
233 * structure rte_eth_dev.
236 * - Pointer to the structure rte_eth_dev to read from.
237 * - Pointer to the buffer to be saved with the link status.
240 * - On success, zero.
241 * - On failure, negative value.
244 rte_ixgbe_dev_atomic_read_link_status(struct rte_eth_dev *dev,
245 struct rte_eth_link *link)
247 struct rte_eth_link *dst = link;
248 struct rte_eth_link *src = &(dev->data->dev_link);
250 if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
251 *(uint64_t *)src) == 0)
258 * Atomically writes the link status information into global
259 * structure rte_eth_dev.
262 * - Pointer to the structure rte_eth_dev to read from.
263 * - Pointer to the buffer to be saved with the link status.
266 * - On success, zero.
267 * - On failure, negative value.
270 rte_ixgbe_dev_atomic_write_link_status(struct rte_eth_dev *dev,
271 struct rte_eth_link *link)
273 struct rte_eth_link *dst = &(dev->data->dev_link);
274 struct rte_eth_link *src = link;
276 if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
277 *(uint64_t *)src) == 0)
284 * This function is the same as ixgbe_is_sfp() in ixgbe/ixgbe.h.
287 ixgbe_is_sfp(struct ixgbe_hw *hw)
289 switch (hw->phy.type) {
290 case ixgbe_phy_sfp_avago:
291 case ixgbe_phy_sfp_ftl:
292 case ixgbe_phy_sfp_intel:
293 case ixgbe_phy_sfp_unknown:
294 case ixgbe_phy_sfp_passive_tyco:
295 case ixgbe_phy_sfp_passive_unknown:
303 * This function is based on ixgbe_disable_intr() in ixgbe/ixgbe.h.
306 ixgbe_disable_intr(struct ixgbe_hw *hw)
308 PMD_INIT_FUNC_TRACE();
310 if (hw->mac.type == ixgbe_mac_82598EB) {
311 IXGBE_WRITE_REG(hw, IXGBE_EIMC, ~0);
313 IXGBE_WRITE_REG(hw, IXGBE_EIMC, 0xFFFF0000);
314 IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(0), ~0);
315 IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(1), ~0);
317 IXGBE_WRITE_FLUSH(hw);
321 * This function resets queue statistics mapping registers.
322 * From Niantic datasheet, Initialization of Statistics section:
323 * "...if software requires the queue counters, the RQSMR and TQSM registers
324 * must be re-programmed following a device reset.
327 ixgbe_reset_qstat_mappings(struct ixgbe_hw *hw)
330 for(i = 0; i != 16; i++) {
331 IXGBE_WRITE_REG(hw, IXGBE_RQSMR(i), 0);
332 IXGBE_WRITE_REG(hw, IXGBE_TQSM(i), 0);
337 * This function is based on code in ixgbe_attach() in ixgbe/ixgbe.c.
338 * It returns 0 on success.
341 eth_ixgbe_dev_init(__attribute__((unused)) struct eth_driver *eth_drv,
342 struct rte_eth_dev *eth_dev)
344 struct rte_pci_device *pci_dev;
345 struct ixgbe_hw *hw =
346 IXGBE_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
347 struct ixgbe_vfta * shadow_vfta =
348 IXGBE_DEV_PRIVATE_TO_VFTA(eth_dev->data->dev_private);
353 PMD_INIT_FUNC_TRACE();
355 eth_dev->dev_ops = &ixgbe_eth_dev_ops;
356 eth_dev->rx_pkt_burst = &ixgbe_recv_pkts;
357 eth_dev->tx_pkt_burst = &ixgbe_xmit_pkts;
359 /* for secondary processes, we don't initialise any further as primary
360 * has already done this work. Only check we don't need a different
362 if (rte_eal_process_type() != RTE_PROC_PRIMARY){
363 if (eth_dev->data->scattered_rx)
364 eth_dev->rx_pkt_burst = ixgbe_recv_scattered_pkts;
367 pci_dev = eth_dev->pci_dev;
369 /* Vendor and Device ID need to be set before init of shared code */
370 hw->device_id = pci_dev->id.device_id;
371 hw->vendor_id = pci_dev->id.vendor_id;
372 hw->hw_addr = (void *)pci_dev->mem_resource.addr;
374 /* Initialize the shared code */
375 diag = ixgbe_init_shared_code(hw);
376 if (diag != IXGBE_SUCCESS) {
377 PMD_INIT_LOG(ERR, "Shared code init failed: %d", diag);
381 /* Get Hardware Flow Control setting */
382 hw->fc.requested_mode = ixgbe_fc_full;
383 hw->fc.current_mode = ixgbe_fc_full;
384 hw->fc.pause_time = IXGBE_FC_PAUSE;
385 hw->fc.low_water = IXGBE_FC_LO;
386 for (i = 0; i < MAX_TRAFFIC_CLASS; i++)
387 hw->fc.high_water[i] = IXGBE_FC_HI;
390 ixgbe_disable_intr(hw);
392 /* Make sure we have a good EEPROM before we read from it */
393 diag = ixgbe_validate_eeprom_checksum(hw, &csum);
394 if (diag != IXGBE_SUCCESS) {
395 PMD_INIT_LOG(ERR, "The EEPROM checksum is not valid: %d", diag);
399 diag = ixgbe_init_hw(hw);
402 * Devices with copper phys will fail to initialise if ixgbe_init_hw()
403 * is called too soon after the kernel driver unbinding/binding occurs.
404 * The failure occurs in ixgbe_identify_phy_generic() for all devices,
405 * but for non-copper devies, ixgbe_identify_sfp_module_generic() is
406 * also called. See ixgbe_identify_phy_82599(). The reason for the
407 * failure is not known, and only occuts when virtualisation features
408 * are disabled in the bios. A delay of 100ms was found to be enough by
409 * trial-and-error, and is doubled to be safe.
411 if (diag && (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_copper)) {
413 diag = ixgbe_init_hw(hw);
416 if (diag == IXGBE_ERR_EEPROM_VERSION) {
417 PMD_INIT_LOG(ERR, "This device is a pre-production adapter/"
418 "LOM. Please be aware there may be issues associated "
419 "with your hardware.\n If you are experiencing problems "
420 "please contact your Intel or hardware representative "
421 "who provided you with this hardware.\n");
422 } else if (diag == IXGBE_ERR_SFP_NOT_SUPPORTED)
423 PMD_INIT_LOG(ERR, "Unsupported SFP+ Module\n");
425 PMD_INIT_LOG(ERR, "Hardware Initialization Failure: %d", diag);
429 /* pick up the PCI bus settings for reporting later */
430 ixgbe_get_bus_info(hw);
432 /* reset mappings for queue statistics hw counters*/
433 ixgbe_reset_qstat_mappings(hw);
435 /* Allocate memory for storing MAC addresses */
436 eth_dev->data->mac_addrs = rte_zmalloc("ixgbe", ETHER_ADDR_LEN *
437 hw->mac.num_rar_entries, 0);
438 if (eth_dev->data->mac_addrs == NULL) {
440 "Failed to allocate %d bytes needed to store MAC addresses",
441 ETHER_ADDR_LEN * hw->mac.num_rar_entries);
444 /* Copy the permanent MAC address */
445 ether_addr_copy((struct ether_addr *) hw->mac.perm_addr,
446 ð_dev->data->mac_addrs[0]);
448 /* initialize the vfta */
449 memset(shadow_vfta, 0, sizeof(*shadow_vfta));
451 /* let hardware know driver is loaded */
452 ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
453 ctrl_ext |= IXGBE_CTRL_EXT_DRV_LOAD;
454 IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
456 if (ixgbe_is_sfp(hw) && hw->phy.sfp_type != ixgbe_sfp_type_not_present)
458 "MAC: %d, PHY: %d, SFP+: %d<n",
459 (int) hw->mac.type, (int) hw->phy.type,
460 (int) hw->phy.sfp_type);
462 PMD_INIT_LOG(DEBUG, "MAC: %d, PHY: %d\n",
463 (int) hw->mac.type, (int) hw->phy.type);
465 PMD_INIT_LOG(DEBUG, "port %d vendorID=0x%x deviceID=0x%x",
466 eth_dev->data->port_id, pci_dev->id.vendor_id,
467 pci_dev->id.device_id);
469 rte_intr_callback_register(&(pci_dev->intr_handle),
470 ixgbe_dev_interrupt_handler, (void *)eth_dev);
476 * Virtual Function device init
479 eth_ixgbevf_dev_init(__attribute__((unused)) struct eth_driver *eth_drv,
480 struct rte_eth_dev *eth_dev)
482 struct rte_pci_device *pci_dev;
483 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
486 PMD_INIT_LOG(DEBUG, "eth_ixgbevf_dev_init");
488 eth_dev->dev_ops = &ixgbevf_eth_dev_ops;
489 pci_dev = eth_dev->pci_dev;
491 hw->device_id = pci_dev->id.device_id;
492 hw->vendor_id = pci_dev->id.vendor_id;
493 hw->hw_addr = (void *)pci_dev->mem_resource.addr;
495 /* Initialize the shared code */
496 diag = ixgbe_init_shared_code(hw);
497 if (diag != IXGBE_SUCCESS) {
498 PMD_INIT_LOG(ERR, "Shared code init failed for ixgbevf: %d", diag);
502 /* init_mailbox_params */
503 hw->mbx.ops.init_params(hw);
505 /* Disable the interrupts for VF */
506 ixgbevf_intr_disable(hw);
508 hw->mac.num_rar_entries = hw->mac.max_rx_queues;
509 diag = hw->mac.ops.reset_hw(hw);
511 /* Allocate memory for storing MAC addresses */
512 eth_dev->data->mac_addrs = rte_zmalloc("ixgbevf", ETHER_ADDR_LEN *
513 hw->mac.num_rar_entries, 0);
514 if (eth_dev->data->mac_addrs == NULL) {
516 "Failed to allocate %d bytes needed to store MAC addresses",
517 ETHER_ADDR_LEN * hw->mac.num_rar_entries);
520 /* Copy the permanent MAC address */
521 ether_addr_copy((struct ether_addr *) hw->mac.perm_addr,
522 ð_dev->data->mac_addrs[0]);
524 /* reset the hardware with the new settings */
525 diag = hw->mac.ops.start_hw(hw);
531 PMD_INIT_LOG(ERR, "VF Initialization Failure: %d", diag);
535 PMD_INIT_LOG(DEBUG, "\nport %d vendorID=0x%x deviceID=0x%x mac.type=%s\n",
536 eth_dev->data->port_id, pci_dev->id.vendor_id, pci_dev->id.device_id,
537 "ixgbe_mac_82599_vf");
542 static struct eth_driver rte_ixgbe_pmd = {
544 .name = "rte_ixgbe_pmd",
545 .id_table = pci_id_ixgbe_map,
546 .drv_flags = RTE_PCI_DRV_NEED_IGB_UIO,
548 .eth_dev_init = eth_ixgbe_dev_init,
549 .dev_private_size = sizeof(struct ixgbe_adapter),
553 * virtual function driver struct
555 static struct eth_driver rte_ixgbevf_pmd = {
557 .name = "rte_ixgbevf_pmd",
558 .id_table = pci_id_ixgbevf_map,
559 .drv_flags = RTE_PCI_DRV_NEED_IGB_UIO,
561 .eth_dev_init = eth_ixgbevf_dev_init,
562 .dev_private_size = sizeof(struct ixgbe_adapter),
566 * Driver initialization routine.
567 * Invoked once at EAL init time.
568 * Register itself as the [Poll Mode] Driver of PCI IXGBE devices.
571 rte_ixgbe_pmd_init(void)
573 PMD_INIT_FUNC_TRACE();
575 rte_eth_driver_register(&rte_ixgbe_pmd);
580 * VF Driver initialization routine.
581 * Invoked one at EAL init time.
582 * Register itself as the [Virtual Poll Mode] Driver of PCI niantic devices.
585 rte_ixgbevf_pmd_init(void)
587 DEBUGFUNC("rte_ixgbevf_pmd_init");
589 rte_eth_driver_register(&rte_ixgbevf_pmd);
594 ixgbe_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
596 struct ixgbe_hw *hw =
597 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
598 struct ixgbe_vfta * shadow_vfta =
599 IXGBE_DEV_PRIVATE_TO_VFTA(dev->data->dev_private);
604 vid_idx = (uint32_t) ((vlan_id >> 5) & 0x7F);
605 vid_bit = (uint32_t) (1 << (vlan_id & 0x1F));
606 vfta = IXGBE_READ_REG(hw, IXGBE_VFTA(vid_idx));
611 IXGBE_WRITE_REG(hw, IXGBE_VFTA(vid_idx), vfta);
613 /* update local VFTA copy */
614 shadow_vfta->vfta[vid_idx] = vfta;
618 ixgbe_vlan_hw_support_disable(struct rte_eth_dev *dev)
620 struct ixgbe_hw *hw =
621 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
626 PMD_INIT_FUNC_TRACE();
628 /* Filter Table Disable */
629 vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
630 vlnctrl &= ~IXGBE_VLNCTRL_VFE;
632 if (hw->mac.type == ixgbe_mac_82598EB)
633 vlnctrl &= ~IXGBE_VLNCTRL_VME;
635 /* On 82599 the VLAN enable is per/queue in RXDCTL */
636 for (i = 0; i < dev->data->nb_rx_queues; i++) {
637 rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(i));
638 rxdctl &= ~IXGBE_RXDCTL_VME;
639 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(i), rxdctl);
642 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
646 ixgbe_vlan_hw_support_enable(struct rte_eth_dev *dev)
648 struct ixgbe_hw *hw =
649 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
650 struct ixgbe_vfta * shadow_vfta =
651 IXGBE_DEV_PRIVATE_TO_VFTA(dev->data->dev_private);
656 PMD_INIT_FUNC_TRACE();
658 /* Filter Table Enable */
659 vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
660 vlnctrl &= ~IXGBE_VLNCTRL_CFIEN;
661 vlnctrl |= IXGBE_VLNCTRL_VFE;
663 if (hw->mac.type == ixgbe_mac_82598EB)
664 vlnctrl |= IXGBE_VLNCTRL_VME;
666 /* On 82599 the VLAN enable is per/queue in RXDCTL */
667 for (i = 0; i < dev->data->nb_rx_queues; i++) {
668 rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(i));
669 rxdctl |= IXGBE_RXDCTL_VME;
670 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(i), rxdctl);
673 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
675 /* write whatever is in local vfta copy */
676 for (i = 0; i < IXGBE_VFTA_SIZE; i++)
677 IXGBE_WRITE_REG(hw, IXGBE_VFTA(i), shadow_vfta->vfta[i]);
681 ixgbe_dev_configure(struct rte_eth_dev *dev, uint16_t nb_rx_q, uint16_t nb_tx_q)
683 struct ixgbe_interrupt *intr =
684 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
687 PMD_INIT_FUNC_TRACE();
689 /* Allocate the array of pointers to RX queue structures */
690 diag = ixgbe_dev_rx_queue_alloc(dev, nb_rx_q);
692 PMD_INIT_LOG(ERR, "ethdev port_id=%d allocation of array of %d"
693 "pointers to RX queues failed", dev->data->port_id,
698 /* Allocate the array of pointers to TX queue structures */
699 diag = ixgbe_dev_tx_queue_alloc(dev, nb_tx_q);
701 PMD_INIT_LOG(ERR, "ethdev port_id=%d allocation of array of %d"
702 "pointers to TX queues failed", dev->data->port_id,
707 /* set flag to update link status after init */
708 intr->flags |= IXGBE_FLAG_NEED_LINK_UPDATE;
714 * Configure device link speed and setup link.
715 * It returns 0 on success.
718 ixgbe_dev_start(struct rte_eth_dev *dev)
720 struct ixgbe_hw *hw =
721 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
722 int err, link_up = 0, negotiate = 0;
725 PMD_INIT_FUNC_TRACE();
727 /* IXGBE devices don't support half duplex */
728 if ((dev->data->dev_conf.link_duplex != ETH_LINK_AUTONEG_DUPLEX) &&
729 (dev->data->dev_conf.link_duplex != ETH_LINK_FULL_DUPLEX)) {
730 PMD_INIT_LOG(ERR, "Invalid link_duplex (%u) for port %u\n",
731 dev->data->dev_conf.link_duplex,
737 hw->adapter_stopped = FALSE;
738 ixgbe_stop_adapter(hw);
740 /* reinitialize adapter
741 * this calls reset and start */
744 /* initialize transmission unit */
745 ixgbe_dev_tx_init(dev);
747 /* This can fail when allocating mbufs for descriptor rings */
748 err = ixgbe_dev_rx_init(dev);
750 PMD_INIT_LOG(ERR, "Unable to initialize RX hardware\n");
754 ixgbe_dev_rxtx_start(dev);
756 if (ixgbe_is_sfp(hw) && hw->phy.multispeed_fiber) {
757 err = hw->mac.ops.setup_sfp(hw);
762 /* Turn on the laser */
763 if (hw->phy.multispeed_fiber)
764 ixgbe_enable_tx_laser(hw);
766 err = ixgbe_check_link(hw, &speed, &link_up, 0);
769 err = ixgbe_get_link_capabilities(hw, &speed, &negotiate);
773 switch(dev->data->dev_conf.link_speed) {
774 case ETH_LINK_SPEED_AUTONEG:
775 speed = (hw->mac.type != ixgbe_mac_82598EB) ?
776 IXGBE_LINK_SPEED_82599_AUTONEG :
777 IXGBE_LINK_SPEED_82598_AUTONEG;
779 case ETH_LINK_SPEED_100:
781 * Invalid for 82598 but error will be detected by
784 speed = IXGBE_LINK_SPEED_100_FULL;
786 case ETH_LINK_SPEED_1000:
787 speed = IXGBE_LINK_SPEED_1GB_FULL;
789 case ETH_LINK_SPEED_10000:
790 speed = IXGBE_LINK_SPEED_10GB_FULL;
793 PMD_INIT_LOG(ERR, "Invalid link_speed (%u) for port %u\n",
794 dev->data->dev_conf.link_speed, dev->data->port_id);
798 err = ixgbe_setup_link(hw, speed, negotiate, link_up);
802 /* check if lsc interrupt is enabled */
803 if (dev->data->dev_conf.intr_conf.lsc != 0) {
804 err = ixgbe_dev_interrupt_setup(dev);
810 * If VLAN filtering is enabled, set up VLAN tag offload and filtering
813 if (dev->data->dev_conf.rxmode.hw_vlan_filter)
814 ixgbe_vlan_hw_support_enable(dev);
816 ixgbe_vlan_hw_support_disable(dev);
818 if (dev->data->dev_conf.fdir_conf.mode != RTE_FDIR_MODE_NONE) {
819 err = ixgbe_fdir_configure(dev);
827 PMD_INIT_LOG(ERR, "failure in ixgbe_dev_start(): %d", err);
832 * Stop device: disable rx and tx functions to allow for reconfiguring.
835 ixgbe_dev_stop(struct rte_eth_dev *dev)
837 struct rte_eth_link link;
838 struct ixgbe_hw *hw =
839 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
841 PMD_INIT_FUNC_TRACE();
843 /* disable interrupts */
844 ixgbe_disable_intr(hw);
848 hw->adapter_stopped = FALSE;
851 ixgbe_stop_adapter(hw);
853 /* Turn off the laser */
854 if (hw->phy.multispeed_fiber)
855 ixgbe_disable_tx_laser(hw);
857 ixgbe_dev_clear_queues(dev);
859 /* Clear recorded link status */
860 memset(&link, 0, sizeof(link));
861 rte_ixgbe_dev_atomic_write_link_status(dev, &link);
865 * Reest and stop device.
868 ixgbe_dev_close(struct rte_eth_dev *dev)
870 struct ixgbe_hw *hw =
871 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
873 PMD_INIT_FUNC_TRACE();
879 hw->adapter_stopped = 1;
881 ixgbe_disable_pcie_master(hw);
883 /* reprogram the RAR[0] in case user changed it. */
884 ixgbe_set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
888 * This function is based on ixgbe_update_stats_counters() in ixgbe/ixgbe.c
891 ixgbe_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
893 struct ixgbe_hw *hw =
894 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
895 struct ixgbe_hw_stats *hw_stats =
896 IXGBE_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
897 uint32_t bprc, lxon, lxoff, total;
898 uint64_t total_missed_rx, total_qbrc, total_qprc;
905 hw_stats->crcerrs += IXGBE_READ_REG(hw, IXGBE_CRCERRS);
906 hw_stats->illerrc += IXGBE_READ_REG(hw, IXGBE_ILLERRC);
907 hw_stats->errbc += IXGBE_READ_REG(hw, IXGBE_ERRBC);
908 hw_stats->mspdc += IXGBE_READ_REG(hw, IXGBE_MSPDC);
910 for (i = 0; i < 8; i++) {
912 mp = IXGBE_READ_REG(hw, IXGBE_MPC(i));
913 /* global total per queue */
914 hw_stats->mpc[i] += mp;
915 /* Running comprehensive total for stats display */
916 total_missed_rx += hw_stats->mpc[i];
917 if (hw->mac.type == ixgbe_mac_82598EB)
919 IXGBE_READ_REG(hw, IXGBE_RNBC(i));
920 hw_stats->pxontxc[i] +=
921 IXGBE_READ_REG(hw, IXGBE_PXONTXC(i));
922 hw_stats->pxonrxc[i] +=
923 IXGBE_READ_REG(hw, IXGBE_PXONRXC(i));
924 hw_stats->pxofftxc[i] +=
925 IXGBE_READ_REG(hw, IXGBE_PXOFFTXC(i));
926 hw_stats->pxoffrxc[i] +=
927 IXGBE_READ_REG(hw, IXGBE_PXOFFRXC(i));
928 hw_stats->pxon2offc[i] +=
929 IXGBE_READ_REG(hw, IXGBE_PXON2OFFCNT(i));
931 for (i = 0; i < 16; i++) {
932 hw_stats->qprc[i] += IXGBE_READ_REG(hw, IXGBE_QPRC(i));
933 hw_stats->qptc[i] += IXGBE_READ_REG(hw, IXGBE_QPTC(i));
934 hw_stats->qbrc[i] += IXGBE_READ_REG(hw, IXGBE_QBRC_L(i));
936 ((uint64_t)IXGBE_READ_REG(hw, IXGBE_QBRC_H(i)) << 32);
937 hw_stats->qbtc[i] += IXGBE_READ_REG(hw, IXGBE_QBTC_L(i));
939 ((uint64_t)IXGBE_READ_REG(hw, IXGBE_QBTC_H(i)) << 32);
940 hw_stats->qprdc[i] += IXGBE_READ_REG(hw, IXGBE_QPRDC(i));
942 total_qprc += hw_stats->qprc[i];
943 total_qbrc += hw_stats->qbrc[i];
945 hw_stats->mlfc += IXGBE_READ_REG(hw, IXGBE_MLFC);
946 hw_stats->mrfc += IXGBE_READ_REG(hw, IXGBE_MRFC);
947 hw_stats->rlec += IXGBE_READ_REG(hw, IXGBE_RLEC);
949 /* Note that gprc counts missed packets */
950 hw_stats->gprc += IXGBE_READ_REG(hw, IXGBE_GPRC);
952 if (hw->mac.type != ixgbe_mac_82598EB) {
953 hw_stats->gorc += IXGBE_READ_REG(hw, IXGBE_GORCL) +
954 ((u64)IXGBE_READ_REG(hw, IXGBE_GORCH) << 32);
955 hw_stats->gotc += IXGBE_READ_REG(hw, IXGBE_GOTCL) +
956 ((u64)IXGBE_READ_REG(hw, IXGBE_GOTCH) << 32);
957 hw_stats->tor += IXGBE_READ_REG(hw, IXGBE_TORL) +
958 ((u64)IXGBE_READ_REG(hw, IXGBE_TORH) << 32);
959 hw_stats->lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXCNT);
960 hw_stats->lxoffrxc += IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT);
962 hw_stats->lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXC);
963 hw_stats->lxoffrxc += IXGBE_READ_REG(hw, IXGBE_LXOFFRXC);
964 /* 82598 only has a counter in the high register */
965 hw_stats->gorc += IXGBE_READ_REG(hw, IXGBE_GORCH);
966 hw_stats->gotc += IXGBE_READ_REG(hw, IXGBE_GOTCH);
967 hw_stats->tor += IXGBE_READ_REG(hw, IXGBE_TORH);
971 * Workaround: mprc hardware is incorrectly counting
972 * broadcasts, so for now we subtract those.
974 bprc = IXGBE_READ_REG(hw, IXGBE_BPRC);
975 hw_stats->bprc += bprc;
976 hw_stats->mprc += IXGBE_READ_REG(hw, IXGBE_MPRC);
977 if (hw->mac.type == ixgbe_mac_82598EB)
978 hw_stats->mprc -= bprc;
980 hw_stats->prc64 += IXGBE_READ_REG(hw, IXGBE_PRC64);
981 hw_stats->prc127 += IXGBE_READ_REG(hw, IXGBE_PRC127);
982 hw_stats->prc255 += IXGBE_READ_REG(hw, IXGBE_PRC255);
983 hw_stats->prc511 += IXGBE_READ_REG(hw, IXGBE_PRC511);
984 hw_stats->prc1023 += IXGBE_READ_REG(hw, IXGBE_PRC1023);
985 hw_stats->prc1522 += IXGBE_READ_REG(hw, IXGBE_PRC1522);
987 lxon = IXGBE_READ_REG(hw, IXGBE_LXONTXC);
988 hw_stats->lxontxc += lxon;
989 lxoff = IXGBE_READ_REG(hw, IXGBE_LXOFFTXC);
990 hw_stats->lxofftxc += lxoff;
991 total = lxon + lxoff;
993 hw_stats->gptc += IXGBE_READ_REG(hw, IXGBE_GPTC);
994 hw_stats->mptc += IXGBE_READ_REG(hw, IXGBE_MPTC);
995 hw_stats->ptc64 += IXGBE_READ_REG(hw, IXGBE_PTC64);
996 hw_stats->gptc -= total;
997 hw_stats->mptc -= total;
998 hw_stats->ptc64 -= total;
999 hw_stats->gotc -= total * ETHER_MIN_LEN;
1001 hw_stats->ruc += IXGBE_READ_REG(hw, IXGBE_RUC);
1002 hw_stats->rfc += IXGBE_READ_REG(hw, IXGBE_RFC);
1003 hw_stats->roc += IXGBE_READ_REG(hw, IXGBE_ROC);
1004 hw_stats->rjc += IXGBE_READ_REG(hw, IXGBE_RJC);
1005 hw_stats->mngprc += IXGBE_READ_REG(hw, IXGBE_MNGPRC);
1006 hw_stats->mngpdc += IXGBE_READ_REG(hw, IXGBE_MNGPDC);
1007 hw_stats->mngptc += IXGBE_READ_REG(hw, IXGBE_MNGPTC);
1008 hw_stats->tpr += IXGBE_READ_REG(hw, IXGBE_TPR);
1009 hw_stats->tpt += IXGBE_READ_REG(hw, IXGBE_TPT);
1010 hw_stats->ptc127 += IXGBE_READ_REG(hw, IXGBE_PTC127);
1011 hw_stats->ptc255 += IXGBE_READ_REG(hw, IXGBE_PTC255);
1012 hw_stats->ptc511 += IXGBE_READ_REG(hw, IXGBE_PTC511);
1013 hw_stats->ptc1023 += IXGBE_READ_REG(hw, IXGBE_PTC1023);
1014 hw_stats->ptc1522 += IXGBE_READ_REG(hw, IXGBE_PTC1522);
1015 hw_stats->bptc += IXGBE_READ_REG(hw, IXGBE_BPTC);
1016 hw_stats->xec += IXGBE_READ_REG(hw, IXGBE_XEC);
1017 hw_stats->fccrc += IXGBE_READ_REG(hw, IXGBE_FCCRC);
1018 hw_stats->fclast += IXGBE_READ_REG(hw, IXGBE_FCLAST);
1019 /* Only read FCOE on 82599 */
1020 if (hw->mac.type != ixgbe_mac_82598EB) {
1021 hw_stats->fcoerpdc += IXGBE_READ_REG(hw, IXGBE_FCOERPDC);
1022 hw_stats->fcoeprc += IXGBE_READ_REG(hw, IXGBE_FCOEPRC);
1023 hw_stats->fcoeptc += IXGBE_READ_REG(hw, IXGBE_FCOEPTC);
1024 hw_stats->fcoedwrc += IXGBE_READ_REG(hw, IXGBE_FCOEDWRC);
1025 hw_stats->fcoedwtc += IXGBE_READ_REG(hw, IXGBE_FCOEDWTC);
1031 /* Fill out the rte_eth_stats statistics structure */
1032 stats->ipackets = total_qprc;
1033 stats->ibytes = total_qbrc;
1034 stats->opackets = hw_stats->gptc;
1035 stats->obytes = hw_stats->gotc;
1036 stats->imcasts = hw_stats->mprc;
1039 stats->ierrors = total_missed_rx + hw_stats->crcerrs +
1044 /* Flow Director Stats registers */
1045 hw_stats->fdirmatch += IXGBE_READ_REG(hw, IXGBE_FDIRMATCH);
1046 hw_stats->fdirmiss += IXGBE_READ_REG(hw, IXGBE_FDIRMISS);
1047 stats->fdirmatch = hw_stats->fdirmatch;
1048 stats->fdirmiss = hw_stats->fdirmiss;
1052 ixgbe_dev_stats_reset(struct rte_eth_dev *dev)
1054 struct ixgbe_hw_stats *stats =
1055 IXGBE_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
1057 /* HW registers are cleared on read */
1058 ixgbe_dev_stats_get(dev, NULL);
1060 /* Reset software totals */
1061 memset(stats, 0, sizeof(*stats));
1065 ixgbevf_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
1067 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1068 struct ixgbevf_hw_stats *hw_stats = (struct ixgbevf_hw_stats*)
1069 IXGBE_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
1071 /* Good Rx packet, include VF loopback */
1072 UPDATE_VF_STAT(IXGBE_VFGPRC,
1073 hw_stats->last_vfgprc, hw_stats->vfgprc);
1075 /* Good Rx octets, include VF loopback */
1076 UPDATE_VF_STAT_36BIT(IXGBE_VFGORC_LSB, IXGBE_VFGORC_MSB,
1077 hw_stats->last_vfgorc, hw_stats->vfgorc);
1079 /* Good Tx packet, include VF loopback */
1080 UPDATE_VF_STAT(IXGBE_VFGPTC,
1081 hw_stats->last_vfgptc, hw_stats->vfgptc);
1083 /* Good Tx octets, include VF loopback */
1084 UPDATE_VF_STAT_36BIT(IXGBE_VFGOTC_LSB, IXGBE_VFGOTC_MSB,
1085 hw_stats->last_vfgotc, hw_stats->vfgotc);
1087 /* Rx Multicst Packet */
1088 UPDATE_VF_STAT(IXGBE_VFMPRC,
1089 hw_stats->last_vfmprc, hw_stats->vfmprc);
1094 memset(stats, 0, sizeof(*stats));
1095 stats->ipackets = hw_stats->vfgprc;
1096 stats->ibytes = hw_stats->vfgorc;
1097 stats->opackets = hw_stats->vfgptc;
1098 stats->obytes = hw_stats->vfgotc;
1099 stats->imcasts = hw_stats->vfmprc;
1103 ixgbevf_dev_stats_reset(struct rte_eth_dev *dev)
1105 struct ixgbevf_hw_stats *hw_stats = (struct ixgbevf_hw_stats*)
1106 IXGBE_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
1108 /* Sync HW register to the last stats */
1109 ixgbevf_dev_stats_get(dev, NULL);
1111 /* reset HW current stats*/
1112 hw_stats->vfgprc = 0;
1113 hw_stats->vfgorc = 0;
1114 hw_stats->vfgptc = 0;
1115 hw_stats->vfgotc = 0;
1116 hw_stats->vfmprc = 0;
1121 ixgbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
1123 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1125 dev_info->max_rx_queues = hw->mac.max_rx_queues;
1126 dev_info->max_tx_queues = hw->mac.max_tx_queues;
1127 dev_info->min_rx_bufsize = 1024; /* cf BSIZEPACKET in SRRCTL register */
1128 dev_info->max_rx_pktlen = 15872; /* includes CRC, cf MAXFRS register */
1129 dev_info->max_mac_addrs = hw->mac.num_rar_entries;
1132 /* return 0 means link status changed, -1 means not changed */
1134 ixgbe_dev_link_update(struct rte_eth_dev *dev, int wait_to_complete)
1136 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1137 struct rte_eth_link link, old;
1138 ixgbe_link_speed link_speed;
1142 link.link_status = 0;
1143 link.link_speed = 0;
1144 link.link_duplex = 0;
1145 memset(&old, 0, sizeof(old));
1146 rte_ixgbe_dev_atomic_read_link_status(dev, &old);
1148 /* check if it needs to wait to complete, if lsc interrupt is enabled */
1149 if (wait_to_complete == 0 || dev->data->dev_conf.intr_conf.lsc != 0)
1150 diag = ixgbe_check_link(hw, &link_speed, &link_up, 0);
1152 diag = ixgbe_check_link(hw, &link_speed, &link_up, 1);
1154 link.link_speed = ETH_LINK_SPEED_100;
1155 link.link_duplex = ETH_LINK_HALF_DUPLEX;
1156 rte_ixgbe_dev_atomic_write_link_status(dev, &link);
1157 if (link.link_status == old.link_status)
1163 rte_ixgbe_dev_atomic_write_link_status(dev, &link);
1164 if (link.link_status == old.link_status)
1168 link.link_status = 1;
1169 link.link_duplex = ETH_LINK_FULL_DUPLEX;
1171 switch (link_speed) {
1173 case IXGBE_LINK_SPEED_UNKNOWN:
1174 link.link_duplex = ETH_LINK_HALF_DUPLEX;
1175 link.link_speed = ETH_LINK_SPEED_100;
1178 case IXGBE_LINK_SPEED_100_FULL:
1179 link.link_speed = ETH_LINK_SPEED_100;
1182 case IXGBE_LINK_SPEED_1GB_FULL:
1183 link.link_speed = ETH_LINK_SPEED_1000;
1186 case IXGBE_LINK_SPEED_10GB_FULL:
1187 link.link_speed = ETH_LINK_SPEED_10000;
1190 rte_ixgbe_dev_atomic_write_link_status(dev, &link);
1192 if (link.link_status == old.link_status)
1199 ixgbe_dev_promiscuous_enable(struct rte_eth_dev *dev)
1201 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1204 fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
1205 fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
1206 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
1210 ixgbe_dev_promiscuous_disable(struct rte_eth_dev *dev)
1212 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1215 fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
1216 fctrl &= (~IXGBE_FCTRL_UPE);
1217 if (dev->data->all_multicast == 1)
1218 fctrl |= IXGBE_FCTRL_MPE;
1220 fctrl &= (~IXGBE_FCTRL_MPE);
1221 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
1225 ixgbe_dev_allmulticast_enable(struct rte_eth_dev *dev)
1227 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1230 fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
1231 fctrl |= IXGBE_FCTRL_MPE;
1232 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
1236 ixgbe_dev_allmulticast_disable(struct rte_eth_dev *dev)
1238 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1241 if (dev->data->promiscuous == 1)
1242 return; /* must remain in all_multicast mode */
1244 fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
1245 fctrl &= (~IXGBE_FCTRL_MPE);
1246 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
1250 * It clears the interrupt causes and enables the interrupt.
1251 * It will be called once only during nic initialized.
1254 * Pointer to struct rte_eth_dev.
1257 * - On success, zero.
1258 * - On failure, a negative value.
1261 ixgbe_dev_interrupt_setup(struct rte_eth_dev *dev)
1263 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1265 ixgbe_dev_link_status_print(dev);
1266 IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EICR_LSC);
1267 IXGBE_WRITE_FLUSH(hw);
1268 rte_intr_enable(&(dev->pci_dev->intr_handle));
1274 * It reads ICR and sets flag (IXGBE_EICR_LSC) for the link_update.
1277 * Pointer to struct rte_eth_dev.
1280 * - On success, zero.
1281 * - On failure, a negative value.
1284 ixgbe_dev_interrupt_get_status(struct rte_eth_dev *dev)
1287 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1288 struct ixgbe_interrupt *intr =
1289 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
1291 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EICR_LSC);
1292 IXGBE_WRITE_FLUSH(hw);
1294 /* read-on-clear nic registers here */
1295 eicr = IXGBE_READ_REG(hw, IXGBE_EICR);
1296 PMD_INIT_LOG(INFO, "eicr %x", eicr);
1297 if (eicr & IXGBE_EICR_LSC) {
1298 /* set flag for async link update */
1299 intr->flags |= IXGBE_FLAG_NEED_LINK_UPDATE;
1306 * It gets and then prints the link status.
1309 * Pointer to struct rte_eth_dev.
1312 * - On success, zero.
1313 * - On failure, a negative value.
1316 ixgbe_dev_link_status_print(struct rte_eth_dev *dev)
1318 struct rte_eth_link link;
1320 memset(&link, 0, sizeof(link));
1321 rte_ixgbe_dev_atomic_read_link_status(dev, &link);
1322 if (link.link_status) {
1323 PMD_INIT_LOG(INFO, "Port %d: Link Up - speed %u Mbps - %s",
1324 (int)(dev->data->port_id),
1325 (unsigned)link.link_speed,
1326 link.link_duplex == ETH_LINK_FULL_DUPLEX ?
1327 "full-duplex" : "half-duplex");
1329 PMD_INIT_LOG(INFO, " Port %d: Link Down",
1330 (int)(dev->data->port_id));
1332 PMD_INIT_LOG(INFO, "PCI Address: %04d:%02d:%02d:%d",
1333 dev->pci_dev->addr.domain,
1334 dev->pci_dev->addr.bus,
1335 dev->pci_dev->addr.devid,
1336 dev->pci_dev->addr.function);
1340 * It executes link_update after knowing an interrupt occured.
1343 * Pointer to struct rte_eth_dev.
1346 * - On success, zero.
1347 * - On failure, a negative value.
1350 ixgbe_dev_interrupt_action(struct rte_eth_dev *dev)
1352 struct ixgbe_interrupt *intr =
1353 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
1355 if (!(intr->flags & IXGBE_FLAG_NEED_LINK_UPDATE)) {
1358 ixgbe_dev_link_update(dev, 0);
1364 * Interrupt handler which shall be registered for alarm callback for delayed
1365 * handling specific interrupt to wait for the stable nic state. As the
1366 * NIC interrupt state is not stable for ixgbe after link is just down,
1367 * it needs to wait 4 seconds to get the stable status.
1370 * Pointer to interrupt handle.
1372 * The address of parameter (struct rte_eth_dev *) regsitered before.
1378 ixgbe_dev_interrupt_delayed_handler(void *param)
1380 struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
1381 struct ixgbe_interrupt *intr =
1382 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
1383 struct ixgbe_hw *hw =
1384 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1386 IXGBE_READ_REG(hw, IXGBE_EICR);
1387 ixgbe_dev_interrupt_action(dev);
1388 if (intr->flags & IXGBE_FLAG_NEED_LINK_UPDATE) {
1389 intr->flags &= ~IXGBE_FLAG_NEED_LINK_UPDATE;
1390 rte_intr_enable(&(dev->pci_dev->intr_handle));
1391 IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EICR_LSC);
1392 IXGBE_WRITE_FLUSH(hw);
1393 ixgbe_dev_link_status_print(dev);
1394 _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC);
1399 * Interrupt handler triggered by NIC for handling
1400 * specific interrupt.
1403 * Pointer to interrupt handle.
1405 * The address of parameter (struct rte_eth_dev *) regsitered before.
1411 ixgbe_dev_interrupt_handler(struct rte_intr_handle *handle, void *param)
1414 struct rte_eth_link link;
1415 struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
1416 struct ixgbe_interrupt *intr =
1417 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
1419 /* get the link status before link update, for predicting later */
1420 memset(&link, 0, sizeof(link));
1421 rte_ixgbe_dev_atomic_read_link_status(dev, &link);
1422 ixgbe_dev_interrupt_get_status(dev);
1423 ixgbe_dev_interrupt_action(dev);
1425 if (!(intr->flags & IXGBE_FLAG_NEED_LINK_UPDATE))
1429 if (!link.link_status)
1430 /* handle it 1 sec later, wait it being stable */
1431 timeout = IXGBE_LINK_UP_CHECK_TIMEOUT;
1432 /* likely to down */
1434 /* handle it 4 sec later, wait it being stable */
1435 timeout = IXGBE_LINK_DOWN_CHECK_TIMEOUT;
1437 ixgbe_dev_link_status_print(dev);
1438 if (rte_eal_alarm_set(timeout * 1000,
1439 ixgbe_dev_interrupt_delayed_handler, param) < 0)
1440 PMD_INIT_LOG(ERR, "Error setting alarm");
1444 ixgbe_dev_led_on(struct rte_eth_dev *dev)
1446 struct ixgbe_hw *hw;
1448 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1449 return (ixgbe_led_on(hw, 0) == IXGBE_SUCCESS ? 0 : -ENOTSUP);
1453 ixgbe_dev_led_off(struct rte_eth_dev *dev)
1455 struct ixgbe_hw *hw;
1457 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1458 return (ixgbe_led_off(hw, 0) == IXGBE_SUCCESS ? 0 : -ENOTSUP);
1462 ixgbe_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
1464 struct ixgbe_hw *hw;
1466 uint32_t rx_buf_size;
1467 uint32_t max_high_water;
1468 enum ixgbe_fc_mode rte_fcmode_2_ixgbe_fcmode[] = {
1475 PMD_INIT_FUNC_TRACE();
1477 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1478 rx_buf_size = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(0));
1479 PMD_INIT_LOG(DEBUG, "Rx packet buffer size = 0x%x \n", rx_buf_size);
1482 * At least reserve one Ethernet frame for watermark
1483 * high_water/low_water in kilo bytes for ixgbe
1485 max_high_water = (rx_buf_size - ETHER_MAX_LEN) >> IXGBE_RXPBSIZE_SHIFT;
1486 if ((fc_conf->high_water > max_high_water) ||
1487 (fc_conf->high_water < fc_conf->low_water)) {
1488 PMD_INIT_LOG(ERR, "Invalid high/low water setup value in KB\n");
1489 PMD_INIT_LOG(ERR, "High_water must <= 0x%x\n", max_high_water);
1493 hw->fc.requested_mode = rte_fcmode_2_ixgbe_fcmode[fc_conf->mode];
1494 hw->fc.pause_time = fc_conf->pause_time;
1495 hw->fc.high_water[0] = fc_conf->high_water;
1496 hw->fc.low_water = fc_conf->low_water;
1497 hw->fc.send_xon = fc_conf->send_xon;
1499 err = ixgbe_fc_enable(hw, 0);
1500 /* Not negotiated is not an error case */
1501 if ((err == IXGBE_SUCCESS) || (err == IXGBE_ERR_FC_NOT_NEGOTIATED)) {
1505 PMD_INIT_LOG(ERR, "ixgbe_fc_enable = 0x%x \n", err);
1510 ixgbe_add_rar(struct rte_eth_dev *dev, struct ether_addr *mac_addr,
1511 uint32_t index, uint32_t pool)
1513 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1514 uint32_t enable_addr = 1;
1516 ixgbe_set_rar(hw, index, mac_addr->addr_bytes, pool, enable_addr);
1520 ixgbe_remove_rar(struct rte_eth_dev *dev, uint32_t index)
1522 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1524 ixgbe_clear_rar(hw, index);
1528 * Virtual Function operations
1531 ixgbevf_intr_disable(struct ixgbe_hw *hw)
1533 PMD_INIT_LOG(DEBUG, "ixgbevf_intr_disable");
1535 /* Clear interrupt mask to stop from interrupts being generated */
1536 IXGBE_WRITE_REG(hw, IXGBE_VTEIMC, IXGBE_VF_IRQ_CLEAR_MASK);
1538 IXGBE_WRITE_FLUSH(hw);
1542 ixgbevf_dev_configure(struct rte_eth_dev *dev, uint16_t nb_rx_q, uint16_t nb_tx_q)
1545 struct rte_eth_conf* conf = &dev->data->dev_conf;
1547 PMD_INIT_FUNC_TRACE();
1549 /* Allocate the array of pointers to RX queue structures */
1550 diag = ixgbe_dev_rx_queue_alloc(dev, nb_rx_q);
1552 PMD_INIT_LOG(ERR, "ethdev port_id=%d allocation of array of %d"
1553 "pointers to RX queues failed", dev->data->port_id,
1558 /* Allocate the array of pointers to TX queue structures */
1559 diag = ixgbe_dev_tx_queue_alloc(dev, nb_tx_q);
1561 PMD_INIT_LOG(ERR, "ethdev port_id=%d allocation of array of %d"
1562 "pointers to TX queues failed", dev->data->port_id,
1567 if (!conf->rxmode.hw_strip_crc) {
1569 * VF has no ability to enable/disable HW CRC
1570 * Keep the persistent behavior the same as Host PF
1572 PMD_INIT_LOG(INFO, "VF can't disable HW CRC Strip\n");
1573 conf->rxmode.hw_strip_crc = 1;
1580 ixgbevf_dev_start(struct rte_eth_dev *dev)
1583 PMD_INIT_LOG(DEBUG, "ixgbevf_dev_start");
1585 ixgbevf_dev_tx_init(dev);
1586 err = ixgbevf_dev_rx_init(dev);
1588 ixgbe_dev_clear_queues(dev);
1589 PMD_INIT_LOG(ERR,"Unable to initialize RX hardware\n");
1592 ixgbevf_dev_rxtx_start(dev);
1598 ixgbevf_dev_stop(struct rte_eth_dev *dev)
1600 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1602 PMD_INIT_LOG(DEBUG, "ixgbevf_dev_stop");
1605 hw->adapter_stopped = 0;
1606 ixgbe_stop_adapter(hw);
1607 /* reprogram the RAR[0] in case user changed it. */
1608 ixgbe_set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);