4 * Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
35 #include <sys/queue.h>
43 #include <rte_byteorder.h>
44 #include <rte_common.h>
45 #include <rte_cycles.h>
47 #include <rte_interrupts.h>
49 #include <rte_debug.h>
51 #include <rte_atomic.h>
52 #include <rte_branch_prediction.h>
53 #include <rte_memory.h>
54 #include <rte_memzone.h>
55 #include <rte_tailq.h>
57 #include <rte_alarm.h>
58 #include <rte_ether.h>
59 #include <rte_ethdev.h>
60 #include <rte_atomic.h>
61 #include <rte_malloc.h>
63 #include "ixgbe_logs.h"
64 #include "ixgbe/ixgbe_api.h"
65 #include "ixgbe/ixgbe_vf.h"
66 #include "ixgbe/ixgbe_common.h"
67 #include "ixgbe_ethdev.h"
70 * High threshold controlling when to start sending XOFF frames. Must be at
71 * least 8 bytes less than receive packet buffer size. This value is in units
74 #define IXGBE_FC_HI 0x80
77 * Low threshold controlling when to start sending XON frames. This value is
78 * in units of 1024 bytes.
80 #define IXGBE_FC_LO 0x40
82 /* Timer value included in XOFF frames. */
83 #define IXGBE_FC_PAUSE 0x680
85 #define IXGBE_LINK_DOWN_CHECK_TIMEOUT 4000 /* ms */
86 #define IXGBE_LINK_UP_CHECK_TIMEOUT 1000 /* ms */
88 static int eth_ixgbe_dev_init(struct eth_driver *eth_drv,
89 struct rte_eth_dev *eth_dev);
90 static int ixgbe_dev_configure(struct rte_eth_dev *dev, uint16_t nb_rx_q,
92 static int ixgbe_dev_start(struct rte_eth_dev *dev);
93 static void ixgbe_dev_stop(struct rte_eth_dev *dev);
94 static void ixgbe_dev_close(struct rte_eth_dev *dev);
95 static void ixgbe_dev_promiscuous_enable(struct rte_eth_dev *dev);
96 static void ixgbe_dev_promiscuous_disable(struct rte_eth_dev *dev);
97 static void ixgbe_dev_allmulticast_enable(struct rte_eth_dev *dev);
98 static void ixgbe_dev_allmulticast_disable(struct rte_eth_dev *dev);
99 static int ixgbe_dev_link_update(struct rte_eth_dev *dev,
100 int wait_to_complete);
101 static void ixgbe_dev_stats_get(struct rte_eth_dev *dev,
102 struct rte_eth_stats *stats);
103 static void ixgbe_dev_stats_reset(struct rte_eth_dev *dev);
104 static void ixgbe_dev_info_get(struct rte_eth_dev *dev,
105 struct rte_eth_dev_info *dev_info);
106 static void ixgbe_vlan_filter_set(struct rte_eth_dev *dev,
109 static int ixgbe_dev_led_on(struct rte_eth_dev *dev);
110 static int ixgbe_dev_led_off(struct rte_eth_dev *dev);
111 static int ixgbe_flow_ctrl_set(struct rte_eth_dev *dev,
112 struct rte_eth_fc_conf *fc_conf);
113 static void ixgbe_dev_link_status_print(struct rte_eth_dev *dev);
114 static int ixgbe_dev_interrupt_setup(struct rte_eth_dev *dev);
115 static int ixgbe_dev_interrupt_get_status(struct rte_eth_dev *dev);
116 static int ixgbe_dev_interrupt_action(struct rte_eth_dev *dev);
117 static void ixgbe_dev_interrupt_handler(struct rte_intr_handle *handle,
119 static void ixgbe_dev_interrupt_delayed_handler(void *param);
120 static void ixgbe_add_rar(struct rte_eth_dev *dev, struct ether_addr *mac_addr,
121 uint32_t index, uint32_t pool);
122 static void ixgbe_remove_rar(struct rte_eth_dev *dev, uint32_t index);
124 /* For Virtual Function support */
125 static int eth_ixgbevf_dev_init(struct eth_driver *eth_drv,
126 struct rte_eth_dev *eth_dev);
127 static int ixgbevf_dev_configure(struct rte_eth_dev *dev, uint16_t nb_rx_q,
129 static int ixgbevf_dev_start(struct rte_eth_dev *dev);
130 static void ixgbevf_dev_stop(struct rte_eth_dev *dev);
131 static void ixgbevf_intr_disable(struct ixgbe_hw *hw);
132 static void ixgbevf_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats);
133 static void ixgbevf_dev_stats_reset(struct rte_eth_dev *dev);
136 * * Define VF Stats MACRO for Non "cleared on read" register
138 #define UPDATE_VF_STAT(reg, last, cur) \
140 u32 latest = IXGBE_READ_REG(hw, reg); \
141 cur += latest - last; \
145 #define UPDATE_VF_STAT_36BIT(lsb, msb, last, cur) \
147 u64 new_lsb = IXGBE_READ_REG(hw, lsb); \
148 u64 new_msb = IXGBE_READ_REG(hw, msb); \
149 u64 latest = ((new_msb << 32) | new_lsb); \
150 cur += (0x1000000000LL + latest - last) & 0xFFFFFFFFFLL; \
155 * The set of PCI devices this driver supports
157 static struct rte_pci_id pci_id_ixgbe_map[] = {
159 #undef RTE_LIBRTE_IGB_PMD
160 #define RTE_PCI_DEV_ID_DECL(vend, dev) {RTE_PCI_DEVICE(vend, dev)},
161 #include "rte_pci_dev_ids.h"
163 { .vendor_id = 0, /* sentinel */ },
168 * The set of PCI devices this driver supports (for 82599 VF)
170 static struct rte_pci_id pci_id_ixgbevf_map[] = {
172 .vendor_id = PCI_VENDOR_ID_INTEL,
173 .device_id = IXGBE_DEV_ID_82599_VF,
174 .subsystem_vendor_id = PCI_ANY_ID,
175 .subsystem_device_id = PCI_ANY_ID,
177 { .vendor_id = 0, /* sentinel */ },
180 static struct eth_dev_ops ixgbe_eth_dev_ops = {
181 .dev_configure = ixgbe_dev_configure,
182 .dev_start = ixgbe_dev_start,
183 .dev_stop = ixgbe_dev_stop,
184 .dev_close = ixgbe_dev_close,
185 .promiscuous_enable = ixgbe_dev_promiscuous_enable,
186 .promiscuous_disable = ixgbe_dev_promiscuous_disable,
187 .allmulticast_enable = ixgbe_dev_allmulticast_enable,
188 .allmulticast_disable = ixgbe_dev_allmulticast_disable,
189 .link_update = ixgbe_dev_link_update,
190 .stats_get = ixgbe_dev_stats_get,
191 .stats_reset = ixgbe_dev_stats_reset,
192 .dev_infos_get = ixgbe_dev_info_get,
193 .vlan_filter_set = ixgbe_vlan_filter_set,
194 .rx_queue_setup = ixgbe_dev_rx_queue_setup,
195 .tx_queue_setup = ixgbe_dev_tx_queue_setup,
196 .dev_led_on = ixgbe_dev_led_on,
197 .dev_led_off = ixgbe_dev_led_off,
198 .flow_ctrl_set = ixgbe_flow_ctrl_set,
199 .mac_addr_add = ixgbe_add_rar,
200 .mac_addr_remove = ixgbe_remove_rar,
201 .fdir_add_signature_filter = ixgbe_fdir_add_signature_filter,
202 .fdir_update_signature_filter = ixgbe_fdir_update_signature_filter,
203 .fdir_remove_signature_filter = ixgbe_fdir_remove_signature_filter,
204 .fdir_infos_get = ixgbe_fdir_info_get,
205 .fdir_add_perfect_filter = ixgbe_fdir_add_perfect_filter,
206 .fdir_update_perfect_filter = ixgbe_fdir_update_perfect_filter,
207 .fdir_remove_perfect_filter = ixgbe_fdir_remove_perfect_filter,
208 .fdir_set_masks = ixgbe_fdir_set_masks,
212 * dev_ops for virtual function, bare necessities for basic vf
213 * operation have been implemented
215 static struct eth_dev_ops ixgbevf_eth_dev_ops = {
217 .dev_configure = ixgbevf_dev_configure,
218 .dev_start = ixgbevf_dev_start,
219 .dev_stop = ixgbevf_dev_stop,
220 .link_update = ixgbe_dev_link_update,
221 .stats_get = ixgbevf_dev_stats_get,
222 .stats_reset = ixgbevf_dev_stats_reset,
223 .dev_close = ixgbevf_dev_stop,
225 .dev_infos_get = ixgbe_dev_info_get,
226 .rx_queue_setup = ixgbe_dev_rx_queue_setup,
227 .tx_queue_setup = ixgbe_dev_tx_queue_setup,
231 * Atomically reads the link status information from global
232 * structure rte_eth_dev.
235 * - Pointer to the structure rte_eth_dev to read from.
236 * - Pointer to the buffer to be saved with the link status.
239 * - On success, zero.
240 * - On failure, negative value.
243 rte_ixgbe_dev_atomic_read_link_status(struct rte_eth_dev *dev,
244 struct rte_eth_link *link)
246 struct rte_eth_link *dst = link;
247 struct rte_eth_link *src = &(dev->data->dev_link);
249 if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
250 *(uint64_t *)src) == 0)
257 * Atomically writes the link status information into global
258 * structure rte_eth_dev.
261 * - Pointer to the structure rte_eth_dev to read from.
262 * - Pointer to the buffer to be saved with the link status.
265 * - On success, zero.
266 * - On failure, negative value.
269 rte_ixgbe_dev_atomic_write_link_status(struct rte_eth_dev *dev,
270 struct rte_eth_link *link)
272 struct rte_eth_link *dst = &(dev->data->dev_link);
273 struct rte_eth_link *src = link;
275 if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
276 *(uint64_t *)src) == 0)
283 * This function is the same as ixgbe_is_sfp() in ixgbe/ixgbe.h.
286 ixgbe_is_sfp(struct ixgbe_hw *hw)
288 switch (hw->phy.type) {
289 case ixgbe_phy_sfp_avago:
290 case ixgbe_phy_sfp_ftl:
291 case ixgbe_phy_sfp_intel:
292 case ixgbe_phy_sfp_unknown:
293 case ixgbe_phy_sfp_passive_tyco:
294 case ixgbe_phy_sfp_passive_unknown:
302 * This function is based on ixgbe_disable_intr() in ixgbe/ixgbe.h.
305 ixgbe_disable_intr(struct ixgbe_hw *hw)
307 PMD_INIT_FUNC_TRACE();
309 if (hw->mac.type == ixgbe_mac_82598EB) {
310 IXGBE_WRITE_REG(hw, IXGBE_EIMC, ~0);
312 IXGBE_WRITE_REG(hw, IXGBE_EIMC, 0xFFFF0000);
313 IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(0), ~0);
314 IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(1), ~0);
316 IXGBE_WRITE_FLUSH(hw);
320 * This function resets queue statistics mapping registers.
321 * From Niantic datasheet, Initialization of Statistics section:
322 * "...if software requires the queue counters, the RQSMR and TQSM registers
323 * must be re-programmed following a device reset.
326 ixgbe_reset_qstat_mappings(struct ixgbe_hw *hw)
329 for(i = 0; i != 16; i++) {
330 IXGBE_WRITE_REG(hw, IXGBE_RQSMR(i), 0);
331 IXGBE_WRITE_REG(hw, IXGBE_TQSM(i), 0);
336 * This function is based on code in ixgbe_attach() in ixgbe/ixgbe.c.
337 * It returns 0 on success.
340 eth_ixgbe_dev_init(__attribute__((unused)) struct eth_driver *eth_drv,
341 struct rte_eth_dev *eth_dev)
343 struct rte_pci_device *pci_dev;
344 struct ixgbe_hw *hw =
345 IXGBE_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
346 struct ixgbe_vfta * shadow_vfta =
347 IXGBE_DEV_PRIVATE_TO_VFTA(eth_dev->data->dev_private);
352 PMD_INIT_FUNC_TRACE();
354 eth_dev->dev_ops = &ixgbe_eth_dev_ops;
355 eth_dev->rx_pkt_burst = &ixgbe_recv_pkts;
356 eth_dev->tx_pkt_burst = &ixgbe_xmit_pkts;
358 /* for secondary processes, we don't initialise any further as primary
359 * has already done this work. Only check we don't need a different
361 if (rte_eal_process_type() != RTE_PROC_PRIMARY){
362 if (eth_dev->data->scattered_rx)
363 eth_dev->rx_pkt_burst = ixgbe_recv_scattered_pkts;
366 pci_dev = eth_dev->pci_dev;
368 /* Vendor and Device ID need to be set before init of shared code */
369 hw->device_id = pci_dev->id.device_id;
370 hw->vendor_id = pci_dev->id.vendor_id;
371 hw->hw_addr = (void *)pci_dev->mem_resource.addr;
373 /* Initialize the shared code */
374 diag = ixgbe_init_shared_code(hw);
375 if (diag != IXGBE_SUCCESS) {
376 PMD_INIT_LOG(ERR, "Shared code init failed: %d", diag);
380 /* Get Hardware Flow Control setting */
381 hw->fc.requested_mode = ixgbe_fc_full;
382 hw->fc.current_mode = ixgbe_fc_full;
383 hw->fc.pause_time = IXGBE_FC_PAUSE;
384 hw->fc.low_water = IXGBE_FC_LO;
385 for (i = 0; i < MAX_TRAFFIC_CLASS; i++)
386 hw->fc.high_water[i] = IXGBE_FC_HI;
389 ixgbe_disable_intr(hw);
391 /* Make sure we have a good EEPROM before we read from it */
392 diag = ixgbe_validate_eeprom_checksum(hw, &csum);
393 if (diag != IXGBE_SUCCESS) {
394 PMD_INIT_LOG(ERR, "The EEPROM checksum is not valid: %d", diag);
398 diag = ixgbe_init_hw(hw);
401 * Devices with copper phys will fail to initialise if ixgbe_init_hw()
402 * is called too soon after the kernel driver unbinding/binding occurs.
403 * The failure occurs in ixgbe_identify_phy_generic() for all devices,
404 * but for non-copper devies, ixgbe_identify_sfp_module_generic() is
405 * also called. See ixgbe_identify_phy_82599(). The reason for the
406 * failure is not known, and only occuts when virtualisation features
407 * are disabled in the bios. A delay of 100ms was found to be enough by
408 * trial-and-error, and is doubled to be safe.
410 if (diag && (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_copper)) {
412 diag = ixgbe_init_hw(hw);
415 if (diag == IXGBE_ERR_EEPROM_VERSION) {
416 PMD_INIT_LOG(ERR, "This device is a pre-production adapter/"
417 "LOM. Please be aware there may be issues associated "
418 "with your hardware.\n If you are experiencing problems "
419 "please contact your Intel or hardware representative "
420 "who provided you with this hardware.\n");
421 } else if (diag == IXGBE_ERR_SFP_NOT_SUPPORTED)
422 PMD_INIT_LOG(ERR, "Unsupported SFP+ Module\n");
424 PMD_INIT_LOG(ERR, "Hardware Initialization Failure: %d", diag);
428 /* pick up the PCI bus settings for reporting later */
429 ixgbe_get_bus_info(hw);
431 /* reset mappings for queue statistics hw counters*/
432 ixgbe_reset_qstat_mappings(hw);
434 /* Allocate memory for storing MAC addresses */
435 eth_dev->data->mac_addrs = rte_zmalloc("ixgbe", ETHER_ADDR_LEN *
436 hw->mac.num_rar_entries, 0);
437 if (eth_dev->data->mac_addrs == NULL) {
439 "Failed to allocate %d bytes needed to store MAC addresses",
440 ETHER_ADDR_LEN * hw->mac.num_rar_entries);
443 /* Copy the permanent MAC address */
444 ether_addr_copy((struct ether_addr *) hw->mac.perm_addr,
445 ð_dev->data->mac_addrs[0]);
447 /* initialize the vfta */
448 memset(shadow_vfta, 0, sizeof(*shadow_vfta));
450 /* let hardware know driver is loaded */
451 ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
452 ctrl_ext |= IXGBE_CTRL_EXT_DRV_LOAD;
453 IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
455 if (ixgbe_is_sfp(hw) && hw->phy.sfp_type != ixgbe_sfp_type_not_present)
457 "MAC: %d, PHY: %d, SFP+: %d<n",
458 (int) hw->mac.type, (int) hw->phy.type,
459 (int) hw->phy.sfp_type);
461 PMD_INIT_LOG(DEBUG, "MAC: %d, PHY: %d\n",
462 (int) hw->mac.type, (int) hw->phy.type);
464 PMD_INIT_LOG(DEBUG, "port %d vendorID=0x%x deviceID=0x%x",
465 eth_dev->data->port_id, pci_dev->id.vendor_id,
466 pci_dev->id.device_id);
468 rte_intr_callback_register(&(pci_dev->intr_handle),
469 ixgbe_dev_interrupt_handler, (void *)eth_dev);
475 * Virtual Function device init
478 eth_ixgbevf_dev_init(__attribute__((unused)) struct eth_driver *eth_drv,
479 struct rte_eth_dev *eth_dev)
481 struct rte_pci_device *pci_dev;
482 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
485 PMD_INIT_LOG(DEBUG, "eth_ixgbevf_dev_init");
487 eth_dev->dev_ops = &ixgbevf_eth_dev_ops;
488 pci_dev = eth_dev->pci_dev;
490 hw->device_id = pci_dev->id.device_id;
491 hw->vendor_id = pci_dev->id.vendor_id;
492 hw->hw_addr = (void *)pci_dev->mem_resource.addr;
494 /* Initialize the shared code */
495 diag = ixgbe_init_shared_code(hw);
496 if (diag != IXGBE_SUCCESS) {
497 PMD_INIT_LOG(ERR, "Shared code init failed for ixgbevf: %d", diag);
501 /* init_mailbox_params */
502 hw->mbx.ops.init_params(hw);
504 /* Disable the interrupts for VF */
505 ixgbevf_intr_disable(hw);
507 hw->mac.num_rar_entries = hw->mac.max_rx_queues;
508 diag = hw->mac.ops.reset_hw(hw);
510 /* Allocate memory for storing MAC addresses */
511 eth_dev->data->mac_addrs = rte_zmalloc("ixgbevf", ETHER_ADDR_LEN *
512 hw->mac.num_rar_entries, 0);
513 if (eth_dev->data->mac_addrs == NULL) {
515 "Failed to allocate %d bytes needed to store MAC addresses",
516 ETHER_ADDR_LEN * hw->mac.num_rar_entries);
519 /* Copy the permanent MAC address */
520 ether_addr_copy((struct ether_addr *) hw->mac.perm_addr,
521 ð_dev->data->mac_addrs[0]);
523 /* reset the hardware with the new settings */
524 diag = hw->mac.ops.start_hw(hw);
530 PMD_INIT_LOG(ERR, "VF Initialization Failure: %d", diag);
534 PMD_INIT_LOG(DEBUG, "\nport %d vendorID=0x%x deviceID=0x%x mac.type=%s\n",
535 eth_dev->data->port_id, pci_dev->id.vendor_id, pci_dev->id.device_id,
536 "ixgbe_mac_82599_vf");
541 static struct eth_driver rte_ixgbe_pmd = {
543 .name = "rte_ixgbe_pmd",
544 .id_table = pci_id_ixgbe_map,
545 .drv_flags = RTE_PCI_DRV_NEED_IGB_UIO,
547 .eth_dev_init = eth_ixgbe_dev_init,
548 .dev_private_size = sizeof(struct ixgbe_adapter),
552 * virtual function driver struct
554 static struct eth_driver rte_ixgbevf_pmd = {
556 .name = "rte_ixgbevf_pmd",
557 .id_table = pci_id_ixgbevf_map,
558 .drv_flags = RTE_PCI_DRV_NEED_IGB_UIO,
560 .eth_dev_init = eth_ixgbevf_dev_init,
561 .dev_private_size = sizeof(struct ixgbe_adapter),
565 * Driver initialization routine.
566 * Invoked once at EAL init time.
567 * Register itself as the [Poll Mode] Driver of PCI IXGBE devices.
570 rte_ixgbe_pmd_init(void)
572 PMD_INIT_FUNC_TRACE();
574 rte_eth_driver_register(&rte_ixgbe_pmd);
579 * VF Driver initialization routine.
580 * Invoked one at EAL init time.
581 * Register itself as the [Virtual Poll Mode] Driver of PCI niantic devices.
584 rte_ixgbevf_pmd_init(void)
586 DEBUGFUNC("rte_ixgbevf_pmd_init");
588 rte_eth_driver_register(&rte_ixgbevf_pmd);
593 ixgbe_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
595 struct ixgbe_hw *hw =
596 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
597 struct ixgbe_vfta * shadow_vfta =
598 IXGBE_DEV_PRIVATE_TO_VFTA(dev->data->dev_private);
603 vid_idx = (uint32_t) ((vlan_id >> 5) & 0x7F);
604 vid_bit = (uint32_t) (1 << (vlan_id & 0x1F));
605 vfta = IXGBE_READ_REG(hw, IXGBE_VFTA(vid_idx));
610 IXGBE_WRITE_REG(hw, IXGBE_VFTA(vid_idx), vfta);
612 /* update local VFTA copy */
613 shadow_vfta->vfta[vid_idx] = vfta;
617 ixgbe_vlan_hw_support_disable(struct rte_eth_dev *dev)
619 struct ixgbe_hw *hw =
620 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
625 PMD_INIT_FUNC_TRACE();
627 /* Filter Table Disable */
628 vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
629 vlnctrl &= ~IXGBE_VLNCTRL_VFE;
631 if (hw->mac.type == ixgbe_mac_82598EB)
632 vlnctrl &= ~IXGBE_VLNCTRL_VME;
634 /* On 82599 the VLAN enable is per/queue in RXDCTL */
635 for (i = 0; i < dev->data->nb_rx_queues; i++) {
636 rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(i));
637 rxdctl &= ~IXGBE_RXDCTL_VME;
638 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(i), rxdctl);
641 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
645 ixgbe_vlan_hw_support_enable(struct rte_eth_dev *dev)
647 struct ixgbe_hw *hw =
648 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
649 struct ixgbe_vfta * shadow_vfta =
650 IXGBE_DEV_PRIVATE_TO_VFTA(dev->data->dev_private);
655 PMD_INIT_FUNC_TRACE();
657 /* Filter Table Enable */
658 vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
659 vlnctrl &= ~IXGBE_VLNCTRL_CFIEN;
660 vlnctrl |= IXGBE_VLNCTRL_VFE;
662 if (hw->mac.type == ixgbe_mac_82598EB)
663 vlnctrl |= IXGBE_VLNCTRL_VME;
665 /* On 82599 the VLAN enable is per/queue in RXDCTL */
666 for (i = 0; i < dev->data->nb_rx_queues; i++) {
667 rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(i));
668 rxdctl |= IXGBE_RXDCTL_VME;
669 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(i), rxdctl);
672 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
674 /* write whatever is in local vfta copy */
675 for (i = 0; i < IXGBE_VFTA_SIZE; i++)
676 IXGBE_WRITE_REG(hw, IXGBE_VFTA(i), shadow_vfta->vfta[i]);
680 ixgbe_dev_configure(struct rte_eth_dev *dev, uint16_t nb_rx_q, uint16_t nb_tx_q)
682 struct ixgbe_interrupt *intr =
683 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
686 PMD_INIT_FUNC_TRACE();
688 /* Allocate the array of pointers to RX queue structures */
689 diag = ixgbe_dev_rx_queue_alloc(dev, nb_rx_q);
691 PMD_INIT_LOG(ERR, "ethdev port_id=%d allocation of array of %d"
692 "pointers to RX queues failed", dev->data->port_id,
697 /* Allocate the array of pointers to TX queue structures */
698 diag = ixgbe_dev_tx_queue_alloc(dev, nb_tx_q);
700 PMD_INIT_LOG(ERR, "ethdev port_id=%d allocation of array of %d"
701 "pointers to TX queues failed", dev->data->port_id,
706 /* set flag to update link status after init */
707 intr->flags |= IXGBE_FLAG_NEED_LINK_UPDATE;
713 * Configure device link speed and setup link.
714 * It returns 0 on success.
717 ixgbe_dev_start(struct rte_eth_dev *dev)
719 struct ixgbe_hw *hw =
720 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
721 int err, link_up = 0, negotiate = 0;
724 PMD_INIT_FUNC_TRACE();
726 /* IXGBE devices don't support half duplex */
727 if ((dev->data->dev_conf.link_duplex != ETH_LINK_AUTONEG_DUPLEX) &&
728 (dev->data->dev_conf.link_duplex != ETH_LINK_FULL_DUPLEX)) {
729 PMD_INIT_LOG(ERR, "Invalid link_duplex (%u) for port %u\n",
730 dev->data->dev_conf.link_duplex,
736 hw->adapter_stopped = FALSE;
737 ixgbe_stop_adapter(hw);
739 /* reinitialize adapter
740 * this calls reset and start */
743 /* initialize transmission unit */
744 ixgbe_dev_tx_init(dev);
746 /* This can fail when allocating mbufs for descriptor rings */
747 err = ixgbe_dev_rx_init(dev);
749 PMD_INIT_LOG(ERR, "Unable to initialize RX hardware\n");
753 ixgbe_dev_rxtx_start(dev);
755 if (ixgbe_is_sfp(hw) && hw->phy.multispeed_fiber) {
756 err = hw->mac.ops.setup_sfp(hw);
761 /* Turn on the laser */
762 if (hw->phy.multispeed_fiber)
763 ixgbe_enable_tx_laser(hw);
765 err = ixgbe_check_link(hw, &speed, &link_up, 0);
768 err = ixgbe_get_link_capabilities(hw, &speed, &negotiate);
772 switch(dev->data->dev_conf.link_speed) {
773 case ETH_LINK_SPEED_AUTONEG:
774 speed = (hw->mac.type != ixgbe_mac_82598EB) ?
775 IXGBE_LINK_SPEED_82599_AUTONEG :
776 IXGBE_LINK_SPEED_82598_AUTONEG;
778 case ETH_LINK_SPEED_100:
780 * Invalid for 82598 but error will be detected by
783 speed = IXGBE_LINK_SPEED_100_FULL;
785 case ETH_LINK_SPEED_1000:
786 speed = IXGBE_LINK_SPEED_1GB_FULL;
788 case ETH_LINK_SPEED_10000:
789 speed = IXGBE_LINK_SPEED_10GB_FULL;
792 PMD_INIT_LOG(ERR, "Invalid link_speed (%u) for port %u\n",
793 dev->data->dev_conf.link_speed, dev->data->port_id);
797 err = ixgbe_setup_link(hw, speed, negotiate, link_up);
801 /* check if lsc interrupt is enabled */
802 if (dev->data->dev_conf.intr_conf.lsc != 0) {
803 err = ixgbe_dev_interrupt_setup(dev);
809 * If VLAN filtering is enabled, set up VLAN tag offload and filtering
812 if (dev->data->dev_conf.rxmode.hw_vlan_filter)
813 ixgbe_vlan_hw_support_enable(dev);
815 ixgbe_vlan_hw_support_disable(dev);
817 if (dev->data->dev_conf.fdir_conf.mode != RTE_FDIR_MODE_NONE) {
818 err = ixgbe_fdir_configure(dev);
826 PMD_INIT_LOG(ERR, "failure in ixgbe_dev_start(): %d", err);
831 * Stop device: disable rx and tx functions to allow for reconfiguring.
834 ixgbe_dev_stop(struct rte_eth_dev *dev)
836 struct rte_eth_link link;
837 struct ixgbe_hw *hw =
838 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
840 PMD_INIT_FUNC_TRACE();
842 /* disable interrupts */
843 ixgbe_disable_intr(hw);
847 hw->adapter_stopped = FALSE;
850 ixgbe_stop_adapter(hw);
852 /* Turn off the laser */
853 if (hw->phy.multispeed_fiber)
854 ixgbe_disable_tx_laser(hw);
856 ixgbe_dev_clear_queues(dev);
858 /* Clear recorded link status */
859 memset(&link, 0, sizeof(link));
860 rte_ixgbe_dev_atomic_write_link_status(dev, &link);
864 * Reest and stop device.
867 ixgbe_dev_close(struct rte_eth_dev *dev)
869 struct ixgbe_hw *hw =
870 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
872 PMD_INIT_FUNC_TRACE();
878 hw->adapter_stopped = 1;
880 ixgbe_disable_pcie_master(hw);
882 /* reprogram the RAR[0] in case user changed it. */
883 ixgbe_set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
887 * This function is based on ixgbe_update_stats_counters() in ixgbe/ixgbe.c
890 ixgbe_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
892 struct ixgbe_hw *hw =
893 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
894 struct ixgbe_hw_stats *hw_stats =
895 IXGBE_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
896 uint32_t bprc, lxon, lxoff, total;
897 uint64_t total_missed_rx, total_qbrc, total_qprc;
904 hw_stats->crcerrs += IXGBE_READ_REG(hw, IXGBE_CRCERRS);
905 hw_stats->illerrc += IXGBE_READ_REG(hw, IXGBE_ILLERRC);
906 hw_stats->errbc += IXGBE_READ_REG(hw, IXGBE_ERRBC);
907 hw_stats->mspdc += IXGBE_READ_REG(hw, IXGBE_MSPDC);
909 for (i = 0; i < 8; i++) {
911 mp = IXGBE_READ_REG(hw, IXGBE_MPC(i));
912 /* global total per queue */
913 hw_stats->mpc[i] += mp;
914 /* Running comprehensive total for stats display */
915 total_missed_rx += hw_stats->mpc[i];
916 if (hw->mac.type == ixgbe_mac_82598EB)
918 IXGBE_READ_REG(hw, IXGBE_RNBC(i));
919 hw_stats->pxontxc[i] +=
920 IXGBE_READ_REG(hw, IXGBE_PXONTXC(i));
921 hw_stats->pxonrxc[i] +=
922 IXGBE_READ_REG(hw, IXGBE_PXONRXC(i));
923 hw_stats->pxofftxc[i] +=
924 IXGBE_READ_REG(hw, IXGBE_PXOFFTXC(i));
925 hw_stats->pxoffrxc[i] +=
926 IXGBE_READ_REG(hw, IXGBE_PXOFFRXC(i));
927 hw_stats->pxon2offc[i] +=
928 IXGBE_READ_REG(hw, IXGBE_PXON2OFFCNT(i));
930 for (i = 0; i < 16; i++) {
931 hw_stats->qprc[i] += IXGBE_READ_REG(hw, IXGBE_QPRC(i));
932 hw_stats->qptc[i] += IXGBE_READ_REG(hw, IXGBE_QPTC(i));
933 hw_stats->qbrc[i] += IXGBE_READ_REG(hw, IXGBE_QBRC_L(i));
935 ((uint64_t)IXGBE_READ_REG(hw, IXGBE_QBRC_H(i)) << 32);
936 hw_stats->qbtc[i] += IXGBE_READ_REG(hw, IXGBE_QBTC_L(i));
938 ((uint64_t)IXGBE_READ_REG(hw, IXGBE_QBTC_H(i)) << 32);
939 hw_stats->qprdc[i] += IXGBE_READ_REG(hw, IXGBE_QPRDC(i));
941 total_qprc += hw_stats->qprc[i];
942 total_qbrc += hw_stats->qbrc[i];
944 hw_stats->mlfc += IXGBE_READ_REG(hw, IXGBE_MLFC);
945 hw_stats->mrfc += IXGBE_READ_REG(hw, IXGBE_MRFC);
946 hw_stats->rlec += IXGBE_READ_REG(hw, IXGBE_RLEC);
948 /* Note that gprc counts missed packets */
949 hw_stats->gprc += IXGBE_READ_REG(hw, IXGBE_GPRC);
951 if (hw->mac.type != ixgbe_mac_82598EB) {
952 hw_stats->gorc += IXGBE_READ_REG(hw, IXGBE_GORCL) +
953 ((u64)IXGBE_READ_REG(hw, IXGBE_GORCH) << 32);
954 hw_stats->gotc += IXGBE_READ_REG(hw, IXGBE_GOTCL) +
955 ((u64)IXGBE_READ_REG(hw, IXGBE_GOTCH) << 32);
956 hw_stats->tor += IXGBE_READ_REG(hw, IXGBE_TORL) +
957 ((u64)IXGBE_READ_REG(hw, IXGBE_TORH) << 32);
958 hw_stats->lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXCNT);
959 hw_stats->lxoffrxc += IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT);
961 hw_stats->lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXC);
962 hw_stats->lxoffrxc += IXGBE_READ_REG(hw, IXGBE_LXOFFRXC);
963 /* 82598 only has a counter in the high register */
964 hw_stats->gorc += IXGBE_READ_REG(hw, IXGBE_GORCH);
965 hw_stats->gotc += IXGBE_READ_REG(hw, IXGBE_GOTCH);
966 hw_stats->tor += IXGBE_READ_REG(hw, IXGBE_TORH);
970 * Workaround: mprc hardware is incorrectly counting
971 * broadcasts, so for now we subtract those.
973 bprc = IXGBE_READ_REG(hw, IXGBE_BPRC);
974 hw_stats->bprc += bprc;
975 hw_stats->mprc += IXGBE_READ_REG(hw, IXGBE_MPRC);
976 if (hw->mac.type == ixgbe_mac_82598EB)
977 hw_stats->mprc -= bprc;
979 hw_stats->prc64 += IXGBE_READ_REG(hw, IXGBE_PRC64);
980 hw_stats->prc127 += IXGBE_READ_REG(hw, IXGBE_PRC127);
981 hw_stats->prc255 += IXGBE_READ_REG(hw, IXGBE_PRC255);
982 hw_stats->prc511 += IXGBE_READ_REG(hw, IXGBE_PRC511);
983 hw_stats->prc1023 += IXGBE_READ_REG(hw, IXGBE_PRC1023);
984 hw_stats->prc1522 += IXGBE_READ_REG(hw, IXGBE_PRC1522);
986 lxon = IXGBE_READ_REG(hw, IXGBE_LXONTXC);
987 hw_stats->lxontxc += lxon;
988 lxoff = IXGBE_READ_REG(hw, IXGBE_LXOFFTXC);
989 hw_stats->lxofftxc += lxoff;
990 total = lxon + lxoff;
992 hw_stats->gptc += IXGBE_READ_REG(hw, IXGBE_GPTC);
993 hw_stats->mptc += IXGBE_READ_REG(hw, IXGBE_MPTC);
994 hw_stats->ptc64 += IXGBE_READ_REG(hw, IXGBE_PTC64);
995 hw_stats->gptc -= total;
996 hw_stats->mptc -= total;
997 hw_stats->ptc64 -= total;
998 hw_stats->gotc -= total * ETHER_MIN_LEN;
1000 hw_stats->ruc += IXGBE_READ_REG(hw, IXGBE_RUC);
1001 hw_stats->rfc += IXGBE_READ_REG(hw, IXGBE_RFC);
1002 hw_stats->roc += IXGBE_READ_REG(hw, IXGBE_ROC);
1003 hw_stats->rjc += IXGBE_READ_REG(hw, IXGBE_RJC);
1004 hw_stats->mngprc += IXGBE_READ_REG(hw, IXGBE_MNGPRC);
1005 hw_stats->mngpdc += IXGBE_READ_REG(hw, IXGBE_MNGPDC);
1006 hw_stats->mngptc += IXGBE_READ_REG(hw, IXGBE_MNGPTC);
1007 hw_stats->tpr += IXGBE_READ_REG(hw, IXGBE_TPR);
1008 hw_stats->tpt += IXGBE_READ_REG(hw, IXGBE_TPT);
1009 hw_stats->ptc127 += IXGBE_READ_REG(hw, IXGBE_PTC127);
1010 hw_stats->ptc255 += IXGBE_READ_REG(hw, IXGBE_PTC255);
1011 hw_stats->ptc511 += IXGBE_READ_REG(hw, IXGBE_PTC511);
1012 hw_stats->ptc1023 += IXGBE_READ_REG(hw, IXGBE_PTC1023);
1013 hw_stats->ptc1522 += IXGBE_READ_REG(hw, IXGBE_PTC1522);
1014 hw_stats->bptc += IXGBE_READ_REG(hw, IXGBE_BPTC);
1015 hw_stats->xec += IXGBE_READ_REG(hw, IXGBE_XEC);
1016 hw_stats->fccrc += IXGBE_READ_REG(hw, IXGBE_FCCRC);
1017 hw_stats->fclast += IXGBE_READ_REG(hw, IXGBE_FCLAST);
1018 /* Only read FCOE on 82599 */
1019 if (hw->mac.type != ixgbe_mac_82598EB) {
1020 hw_stats->fcoerpdc += IXGBE_READ_REG(hw, IXGBE_FCOERPDC);
1021 hw_stats->fcoeprc += IXGBE_READ_REG(hw, IXGBE_FCOEPRC);
1022 hw_stats->fcoeptc += IXGBE_READ_REG(hw, IXGBE_FCOEPTC);
1023 hw_stats->fcoedwrc += IXGBE_READ_REG(hw, IXGBE_FCOEDWRC);
1024 hw_stats->fcoedwtc += IXGBE_READ_REG(hw, IXGBE_FCOEDWTC);
1030 /* Fill out the rte_eth_stats statistics structure */
1031 stats->ipackets = total_qprc;
1032 stats->ibytes = total_qbrc;
1033 stats->opackets = hw_stats->gptc;
1034 stats->obytes = hw_stats->gotc;
1035 stats->imcasts = hw_stats->mprc;
1038 stats->ierrors = total_missed_rx + hw_stats->crcerrs +
1043 /* Flow Director Stats registers */
1044 hw_stats->fdirmatch += IXGBE_READ_REG(hw, IXGBE_FDIRMATCH);
1045 hw_stats->fdirmiss += IXGBE_READ_REG(hw, IXGBE_FDIRMISS);
1046 stats->fdirmatch = hw_stats->fdirmatch;
1047 stats->fdirmiss = hw_stats->fdirmiss;
1051 ixgbe_dev_stats_reset(struct rte_eth_dev *dev)
1053 struct ixgbe_hw_stats *stats =
1054 IXGBE_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
1056 /* HW registers are cleared on read */
1057 ixgbe_dev_stats_get(dev, NULL);
1059 /* Reset software totals */
1060 memset(stats, 0, sizeof(*stats));
1064 ixgbevf_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
1066 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1067 struct ixgbevf_hw_stats *hw_stats = (struct ixgbevf_hw_stats*)
1068 IXGBE_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
1070 /* Good Rx packet, include VF loopback */
1071 UPDATE_VF_STAT(IXGBE_VFGPRC,
1072 hw_stats->last_vfgprc, hw_stats->vfgprc);
1074 /* Good Rx octets, include VF loopback */
1075 UPDATE_VF_STAT_36BIT(IXGBE_VFGORC_LSB, IXGBE_VFGORC_MSB,
1076 hw_stats->last_vfgorc, hw_stats->vfgorc);
1078 /* Good Tx packet, include VF loopback */
1079 UPDATE_VF_STAT(IXGBE_VFGPTC,
1080 hw_stats->last_vfgptc, hw_stats->vfgptc);
1082 /* Good Tx octets, include VF loopback */
1083 UPDATE_VF_STAT_36BIT(IXGBE_VFGOTC_LSB, IXGBE_VFGOTC_MSB,
1084 hw_stats->last_vfgotc, hw_stats->vfgotc);
1086 /* Rx Multicst Packet */
1087 UPDATE_VF_STAT(IXGBE_VFMPRC,
1088 hw_stats->last_vfmprc, hw_stats->vfmprc);
1093 memset(stats, 0, sizeof(*stats));
1094 stats->ipackets = hw_stats->vfgprc;
1095 stats->ibytes = hw_stats->vfgorc;
1096 stats->opackets = hw_stats->vfgptc;
1097 stats->obytes = hw_stats->vfgotc;
1098 stats->imcasts = hw_stats->vfmprc;
1102 ixgbevf_dev_stats_reset(struct rte_eth_dev *dev)
1104 struct ixgbevf_hw_stats *hw_stats = (struct ixgbevf_hw_stats*)
1105 IXGBE_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
1107 /* Sync HW register to the last stats */
1108 ixgbevf_dev_stats_get(dev, NULL);
1110 /* reset HW current stats*/
1111 hw_stats->vfgprc = 0;
1112 hw_stats->vfgorc = 0;
1113 hw_stats->vfgptc = 0;
1114 hw_stats->vfgotc = 0;
1115 hw_stats->vfmprc = 0;
1120 ixgbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
1122 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1124 dev_info->max_rx_queues = hw->mac.max_rx_queues;
1125 dev_info->max_tx_queues = hw->mac.max_tx_queues;
1126 dev_info->min_rx_bufsize = 1024; /* cf BSIZEPACKET in SRRCTL register */
1127 dev_info->max_rx_pktlen = 15872; /* includes CRC, cf MAXFRS register */
1128 dev_info->max_mac_addrs = hw->mac.num_rar_entries;
1131 /* return 0 means link status changed, -1 means not changed */
1133 ixgbe_dev_link_update(struct rte_eth_dev *dev, int wait_to_complete)
1135 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1136 struct rte_eth_link link, old;
1137 ixgbe_link_speed link_speed;
1141 link.link_status = 0;
1142 link.link_speed = 0;
1143 link.link_duplex = 0;
1144 memset(&old, 0, sizeof(old));
1145 rte_ixgbe_dev_atomic_read_link_status(dev, &old);
1147 /* check if it needs to wait to complete, if lsc interrupt is enabled */
1148 if (wait_to_complete == 0 || dev->data->dev_conf.intr_conf.lsc != 0)
1149 diag = ixgbe_check_link(hw, &link_speed, &link_up, 0);
1151 diag = ixgbe_check_link(hw, &link_speed, &link_up, 1);
1153 link.link_speed = ETH_LINK_SPEED_100;
1154 link.link_duplex = ETH_LINK_HALF_DUPLEX;
1155 rte_ixgbe_dev_atomic_write_link_status(dev, &link);
1156 if (link.link_status == old.link_status)
1162 rte_ixgbe_dev_atomic_write_link_status(dev, &link);
1163 if (link.link_status == old.link_status)
1167 link.link_status = 1;
1168 link.link_duplex = ETH_LINK_FULL_DUPLEX;
1170 switch (link_speed) {
1172 case IXGBE_LINK_SPEED_UNKNOWN:
1173 link.link_duplex = ETH_LINK_HALF_DUPLEX;
1174 link.link_speed = ETH_LINK_SPEED_100;
1177 case IXGBE_LINK_SPEED_100_FULL:
1178 link.link_speed = ETH_LINK_SPEED_100;
1181 case IXGBE_LINK_SPEED_1GB_FULL:
1182 link.link_speed = ETH_LINK_SPEED_1000;
1185 case IXGBE_LINK_SPEED_10GB_FULL:
1186 link.link_speed = ETH_LINK_SPEED_10000;
1189 rte_ixgbe_dev_atomic_write_link_status(dev, &link);
1191 if (link.link_status == old.link_status)
1198 ixgbe_dev_promiscuous_enable(struct rte_eth_dev *dev)
1200 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1203 fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
1204 fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
1205 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
1209 ixgbe_dev_promiscuous_disable(struct rte_eth_dev *dev)
1211 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1214 fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
1215 fctrl &= (~IXGBE_FCTRL_UPE);
1216 if (dev->data->all_multicast == 1)
1217 fctrl |= IXGBE_FCTRL_MPE;
1219 fctrl &= (~IXGBE_FCTRL_MPE);
1220 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
1224 ixgbe_dev_allmulticast_enable(struct rte_eth_dev *dev)
1226 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1229 fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
1230 fctrl |= IXGBE_FCTRL_MPE;
1231 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
1235 ixgbe_dev_allmulticast_disable(struct rte_eth_dev *dev)
1237 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1240 if (dev->data->promiscuous == 1)
1241 return; /* must remain in all_multicast mode */
1243 fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
1244 fctrl &= (~IXGBE_FCTRL_MPE);
1245 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
1249 * It clears the interrupt causes and enables the interrupt.
1250 * It will be called once only during nic initialized.
1253 * Pointer to struct rte_eth_dev.
1256 * - On success, zero.
1257 * - On failure, a negative value.
1260 ixgbe_dev_interrupt_setup(struct rte_eth_dev *dev)
1262 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1264 ixgbe_dev_link_status_print(dev);
1265 IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EICR_LSC);
1266 IXGBE_WRITE_FLUSH(hw);
1267 rte_intr_enable(&(dev->pci_dev->intr_handle));
1273 * It reads ICR and sets flag (IXGBE_EICR_LSC) for the link_update.
1276 * Pointer to struct rte_eth_dev.
1279 * - On success, zero.
1280 * - On failure, a negative value.
1283 ixgbe_dev_interrupt_get_status(struct rte_eth_dev *dev)
1286 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1287 struct ixgbe_interrupt *intr =
1288 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
1290 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EICR_LSC);
1291 IXGBE_WRITE_FLUSH(hw);
1293 /* read-on-clear nic registers here */
1294 eicr = IXGBE_READ_REG(hw, IXGBE_EICR);
1295 PMD_INIT_LOG(INFO, "eicr %x", eicr);
1296 if (eicr & IXGBE_EICR_LSC) {
1297 /* set flag for async link update */
1298 intr->flags |= IXGBE_FLAG_NEED_LINK_UPDATE;
1305 * It gets and then prints the link status.
1308 * Pointer to struct rte_eth_dev.
1311 * - On success, zero.
1312 * - On failure, a negative value.
1315 ixgbe_dev_link_status_print(struct rte_eth_dev *dev)
1317 struct rte_eth_link link;
1319 memset(&link, 0, sizeof(link));
1320 rte_ixgbe_dev_atomic_read_link_status(dev, &link);
1321 if (link.link_status) {
1322 PMD_INIT_LOG(INFO, "Port %d: Link Up - speed %u Mbps - %s",
1323 (int)(dev->data->port_id),
1324 (unsigned)link.link_speed,
1325 link.link_duplex == ETH_LINK_FULL_DUPLEX ?
1326 "full-duplex" : "half-duplex");
1328 PMD_INIT_LOG(INFO, " Port %d: Link Down",
1329 (int)(dev->data->port_id));
1331 PMD_INIT_LOG(INFO, "PCI Address: %04d:%02d:%02d:%d",
1332 dev->pci_dev->addr.domain,
1333 dev->pci_dev->addr.bus,
1334 dev->pci_dev->addr.devid,
1335 dev->pci_dev->addr.function);
1339 * It executes link_update after knowing an interrupt occured.
1342 * Pointer to struct rte_eth_dev.
1345 * - On success, zero.
1346 * - On failure, a negative value.
1349 ixgbe_dev_interrupt_action(struct rte_eth_dev *dev)
1351 struct ixgbe_interrupt *intr =
1352 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
1354 if (!(intr->flags & IXGBE_FLAG_NEED_LINK_UPDATE)) {
1357 ixgbe_dev_link_update(dev, 0);
1363 * Interrupt handler which shall be registered for alarm callback for delayed
1364 * handling specific interrupt to wait for the stable nic state. As the
1365 * NIC interrupt state is not stable for ixgbe after link is just down,
1366 * it needs to wait 4 seconds to get the stable status.
1369 * Pointer to interrupt handle.
1371 * The address of parameter (struct rte_eth_dev *) regsitered before.
1377 ixgbe_dev_interrupt_delayed_handler(void *param)
1379 struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
1380 struct ixgbe_interrupt *intr =
1381 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
1382 struct ixgbe_hw *hw =
1383 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1385 IXGBE_READ_REG(hw, IXGBE_EICR);
1386 ixgbe_dev_interrupt_action(dev);
1387 if (intr->flags & IXGBE_FLAG_NEED_LINK_UPDATE) {
1388 intr->flags &= ~IXGBE_FLAG_NEED_LINK_UPDATE;
1389 rte_intr_enable(&(dev->pci_dev->intr_handle));
1390 IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EICR_LSC);
1391 IXGBE_WRITE_FLUSH(hw);
1392 ixgbe_dev_link_status_print(dev);
1393 _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC);
1398 * Interrupt handler triggered by NIC for handling
1399 * specific interrupt.
1402 * Pointer to interrupt handle.
1404 * The address of parameter (struct rte_eth_dev *) regsitered before.
1410 ixgbe_dev_interrupt_handler(struct rte_intr_handle *handle, void *param)
1413 struct rte_eth_link link;
1414 struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
1415 struct ixgbe_interrupt *intr =
1416 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
1418 /* get the link status before link update, for predicting later */
1419 memset(&link, 0, sizeof(link));
1420 rte_ixgbe_dev_atomic_read_link_status(dev, &link);
1421 ixgbe_dev_interrupt_get_status(dev);
1422 ixgbe_dev_interrupt_action(dev);
1424 if (!(intr->flags & IXGBE_FLAG_NEED_LINK_UPDATE))
1428 if (!link.link_status)
1429 /* handle it 1 sec later, wait it being stable */
1430 timeout = IXGBE_LINK_UP_CHECK_TIMEOUT;
1431 /* likely to down */
1433 /* handle it 4 sec later, wait it being stable */
1434 timeout = IXGBE_LINK_DOWN_CHECK_TIMEOUT;
1436 ixgbe_dev_link_status_print(dev);
1437 if (rte_eal_alarm_set(timeout * 1000,
1438 ixgbe_dev_interrupt_delayed_handler, param) < 0)
1439 PMD_INIT_LOG(ERR, "Error setting alarm");
1443 ixgbe_dev_led_on(struct rte_eth_dev *dev)
1445 struct ixgbe_hw *hw;
1447 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1448 return (ixgbe_led_on(hw, 0) == IXGBE_SUCCESS ? 0 : -ENOTSUP);
1452 ixgbe_dev_led_off(struct rte_eth_dev *dev)
1454 struct ixgbe_hw *hw;
1456 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1457 return (ixgbe_led_off(hw, 0) == IXGBE_SUCCESS ? 0 : -ENOTSUP);
1461 ixgbe_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
1463 struct ixgbe_hw *hw;
1465 uint32_t rx_buf_size;
1466 uint32_t max_high_water;
1467 enum ixgbe_fc_mode rte_fcmode_2_ixgbe_fcmode[] = {
1474 PMD_INIT_FUNC_TRACE();
1476 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1477 rx_buf_size = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(0));
1478 PMD_INIT_LOG(DEBUG, "Rx packet buffer size = 0x%x \n", rx_buf_size);
1481 * At least reserve one Ethernet frame for watermark
1482 * high_water/low_water in kilo bytes for ixgbe
1484 max_high_water = (rx_buf_size - ETHER_MAX_LEN) >> IXGBE_RXPBSIZE_SHIFT;
1485 if ((fc_conf->high_water > max_high_water) ||
1486 (fc_conf->high_water < fc_conf->low_water)) {
1487 PMD_INIT_LOG(ERR, "Invalid high/low water setup value in KB\n");
1488 PMD_INIT_LOG(ERR, "High_water must <= 0x%x\n", max_high_water);
1492 hw->fc.requested_mode = rte_fcmode_2_ixgbe_fcmode[fc_conf->mode];
1493 hw->fc.pause_time = fc_conf->pause_time;
1494 hw->fc.high_water[0] = fc_conf->high_water;
1495 hw->fc.low_water = fc_conf->low_water;
1496 hw->fc.send_xon = fc_conf->send_xon;
1498 err = ixgbe_fc_enable(hw, 0);
1499 /* Not negotiated is not an error case */
1500 if ((err == IXGBE_SUCCESS) || (err == IXGBE_ERR_FC_NOT_NEGOTIATED)) {
1504 PMD_INIT_LOG(ERR, "ixgbe_fc_enable = 0x%x \n", err);
1509 ixgbe_add_rar(struct rte_eth_dev *dev, struct ether_addr *mac_addr,
1510 uint32_t index, uint32_t pool)
1512 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1513 uint32_t enable_addr = 1;
1515 ixgbe_set_rar(hw, index, mac_addr->addr_bytes, pool, enable_addr);
1519 ixgbe_remove_rar(struct rte_eth_dev *dev, uint32_t index)
1521 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1523 ixgbe_clear_rar(hw, index);
1527 * Virtual Function operations
1530 ixgbevf_intr_disable(struct ixgbe_hw *hw)
1532 PMD_INIT_LOG(DEBUG, "ixgbevf_intr_disable");
1534 /* Clear interrupt mask to stop from interrupts being generated */
1535 IXGBE_WRITE_REG(hw, IXGBE_VTEIMC, IXGBE_VF_IRQ_CLEAR_MASK);
1537 IXGBE_WRITE_FLUSH(hw);
1541 ixgbevf_dev_configure(struct rte_eth_dev *dev, uint16_t nb_rx_q, uint16_t nb_tx_q)
1544 struct rte_eth_conf* conf = &dev->data->dev_conf;
1546 PMD_INIT_FUNC_TRACE();
1548 /* Allocate the array of pointers to RX queue structures */
1549 diag = ixgbe_dev_rx_queue_alloc(dev, nb_rx_q);
1551 PMD_INIT_LOG(ERR, "ethdev port_id=%d allocation of array of %d"
1552 "pointers to RX queues failed", dev->data->port_id,
1557 /* Allocate the array of pointers to TX queue structures */
1558 diag = ixgbe_dev_tx_queue_alloc(dev, nb_tx_q);
1560 PMD_INIT_LOG(ERR, "ethdev port_id=%d allocation of array of %d"
1561 "pointers to TX queues failed", dev->data->port_id,
1566 if (!conf->rxmode.hw_strip_crc) {
1568 * VF has no ability to enable/disable HW CRC
1569 * Keep the persistent behavior the same as Host PF
1571 PMD_INIT_LOG(INFO, "VF can't disable HW CRC Strip\n");
1572 conf->rxmode.hw_strip_crc = 1;
1579 ixgbevf_dev_start(struct rte_eth_dev *dev)
1582 PMD_INIT_LOG(DEBUG, "ixgbevf_dev_start");
1584 ixgbevf_dev_tx_init(dev);
1585 err = ixgbevf_dev_rx_init(dev);
1587 ixgbe_dev_clear_queues(dev);
1588 PMD_INIT_LOG(ERR,"Unable to initialize RX hardware\n");
1591 ixgbevf_dev_rxtx_start(dev);
1597 ixgbevf_dev_stop(struct rte_eth_dev *dev)
1599 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1601 PMD_INIT_LOG(DEBUG, "ixgbevf_dev_stop");
1604 hw->adapter_stopped = 0;
1605 ixgbe_stop_adapter(hw);
1606 /* reprogram the RAR[0] in case user changed it. */
1607 ixgbe_set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);