4 * Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
35 #include <sys/queue.h>
43 #include <rte_byteorder.h>
44 #include <rte_common.h>
45 #include <rte_cycles.h>
47 #include <rte_interrupts.h>
49 #include <rte_debug.h>
51 #include <rte_atomic.h>
52 #include <rte_branch_prediction.h>
53 #include <rte_memory.h>
54 #include <rte_memzone.h>
55 #include <rte_tailq.h>
57 #include <rte_alarm.h>
58 #include <rte_ether.h>
59 #include <rte_ethdev.h>
60 #include <rte_atomic.h>
61 #include <rte_malloc.h>
63 #include "ixgbe_logs.h"
64 #include "ixgbe/ixgbe_api.h"
65 #include "ixgbe/ixgbe_vf.h"
66 #include "ixgbe/ixgbe_common.h"
67 #include "ixgbe_ethdev.h"
70 * High threshold controlling when to start sending XOFF frames. Must be at
71 * least 8 bytes less than receive packet buffer size. This value is in units
74 #define IXGBE_FC_HI 0x80
77 * Low threshold controlling when to start sending XON frames. This value is
78 * in units of 1024 bytes.
80 #define IXGBE_FC_LO 0x40
82 /* Timer value included in XOFF frames. */
83 #define IXGBE_FC_PAUSE 0x680
85 #define IXGBE_LINK_DOWN_CHECK_TIMEOUT 4000 /* ms */
86 #define IXGBE_LINK_UP_CHECK_TIMEOUT 1000 /* ms */
88 #define IXGBE_QUEUE_STAT_COUNTERS (sizeof(hw_stats->qprc) / sizeof(hw_stats->qprc[0]))
90 static int eth_ixgbe_dev_init(struct eth_driver *eth_drv,
91 struct rte_eth_dev *eth_dev);
92 static int ixgbe_dev_configure(struct rte_eth_dev *dev);
93 static int ixgbe_dev_start(struct rte_eth_dev *dev);
94 static void ixgbe_dev_stop(struct rte_eth_dev *dev);
95 static void ixgbe_dev_close(struct rte_eth_dev *dev);
96 static void ixgbe_dev_promiscuous_enable(struct rte_eth_dev *dev);
97 static void ixgbe_dev_promiscuous_disable(struct rte_eth_dev *dev);
98 static void ixgbe_dev_allmulticast_enable(struct rte_eth_dev *dev);
99 static void ixgbe_dev_allmulticast_disable(struct rte_eth_dev *dev);
100 static int ixgbe_dev_link_update(struct rte_eth_dev *dev,
101 int wait_to_complete);
102 static void ixgbe_dev_stats_get(struct rte_eth_dev *dev,
103 struct rte_eth_stats *stats);
104 static void ixgbe_dev_stats_reset(struct rte_eth_dev *dev);
105 static int ixgbe_dev_queue_stats_mapping_set(struct rte_eth_dev *eth_dev,
109 static void ixgbe_dev_info_get(struct rte_eth_dev *dev,
110 struct rte_eth_dev_info *dev_info);
111 static void ixgbe_vlan_filter_set(struct rte_eth_dev *dev,
114 static int ixgbe_dev_led_on(struct rte_eth_dev *dev);
115 static int ixgbe_dev_led_off(struct rte_eth_dev *dev);
116 static int ixgbe_flow_ctrl_set(struct rte_eth_dev *dev,
117 struct rte_eth_fc_conf *fc_conf);
118 static void ixgbe_dev_link_status_print(struct rte_eth_dev *dev);
119 static int ixgbe_dev_interrupt_setup(struct rte_eth_dev *dev);
120 static int ixgbe_dev_interrupt_get_status(struct rte_eth_dev *dev);
121 static int ixgbe_dev_interrupt_action(struct rte_eth_dev *dev);
122 static void ixgbe_dev_interrupt_handler(struct rte_intr_handle *handle,
124 static void ixgbe_dev_interrupt_delayed_handler(void *param);
125 static void ixgbe_add_rar(struct rte_eth_dev *dev, struct ether_addr *mac_addr,
126 uint32_t index, uint32_t pool);
127 static void ixgbe_remove_rar(struct rte_eth_dev *dev, uint32_t index);
129 /* For Virtual Function support */
130 static int eth_ixgbevf_dev_init(struct eth_driver *eth_drv,
131 struct rte_eth_dev *eth_dev);
132 static int ixgbevf_dev_configure(struct rte_eth_dev *dev);
133 static int ixgbevf_dev_start(struct rte_eth_dev *dev);
134 static void ixgbevf_dev_stop(struct rte_eth_dev *dev);
135 static void ixgbevf_intr_disable(struct ixgbe_hw *hw);
136 static void ixgbevf_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats);
137 static void ixgbevf_dev_stats_reset(struct rte_eth_dev *dev);
140 * * Define VF Stats MACRO for Non "cleared on read" register
142 #define UPDATE_VF_STAT(reg, last, cur) \
144 u32 latest = IXGBE_READ_REG(hw, reg); \
145 cur += latest - last; \
149 #define UPDATE_VF_STAT_36BIT(lsb, msb, last, cur) \
151 u64 new_lsb = IXGBE_READ_REG(hw, lsb); \
152 u64 new_msb = IXGBE_READ_REG(hw, msb); \
153 u64 latest = ((new_msb << 32) | new_lsb); \
154 cur += (0x1000000000LL + latest - last) & 0xFFFFFFFFFLL; \
159 * The set of PCI devices this driver supports
161 static struct rte_pci_id pci_id_ixgbe_map[] = {
163 #undef RTE_LIBRTE_IGB_PMD
164 #define RTE_PCI_DEV_ID_DECL(vend, dev) {RTE_PCI_DEVICE(vend, dev)},
165 #include "rte_pci_dev_ids.h"
167 { .vendor_id = 0, /* sentinel */ },
172 * The set of PCI devices this driver supports (for 82599 VF)
174 static struct rte_pci_id pci_id_ixgbevf_map[] = {
176 .vendor_id = PCI_VENDOR_ID_INTEL,
177 .device_id = IXGBE_DEV_ID_82599_VF,
178 .subsystem_vendor_id = PCI_ANY_ID,
179 .subsystem_device_id = PCI_ANY_ID,
181 { .vendor_id = 0, /* sentinel */ },
184 static struct eth_dev_ops ixgbe_eth_dev_ops = {
185 .dev_configure = ixgbe_dev_configure,
186 .dev_start = ixgbe_dev_start,
187 .dev_stop = ixgbe_dev_stop,
188 .dev_close = ixgbe_dev_close,
189 .promiscuous_enable = ixgbe_dev_promiscuous_enable,
190 .promiscuous_disable = ixgbe_dev_promiscuous_disable,
191 .allmulticast_enable = ixgbe_dev_allmulticast_enable,
192 .allmulticast_disable = ixgbe_dev_allmulticast_disable,
193 .link_update = ixgbe_dev_link_update,
194 .stats_get = ixgbe_dev_stats_get,
195 .stats_reset = ixgbe_dev_stats_reset,
196 .queue_stats_mapping_set = ixgbe_dev_queue_stats_mapping_set,
197 .dev_infos_get = ixgbe_dev_info_get,
198 .vlan_filter_set = ixgbe_vlan_filter_set,
199 .rx_queue_setup = ixgbe_dev_rx_queue_setup,
200 .rx_queue_release = ixgbe_dev_rx_queue_release,
201 .tx_queue_setup = ixgbe_dev_tx_queue_setup,
202 .tx_queue_release = ixgbe_dev_tx_queue_release,
203 .dev_led_on = ixgbe_dev_led_on,
204 .dev_led_off = ixgbe_dev_led_off,
205 .flow_ctrl_set = ixgbe_flow_ctrl_set,
206 .mac_addr_add = ixgbe_add_rar,
207 .mac_addr_remove = ixgbe_remove_rar,
208 .fdir_add_signature_filter = ixgbe_fdir_add_signature_filter,
209 .fdir_update_signature_filter = ixgbe_fdir_update_signature_filter,
210 .fdir_remove_signature_filter = ixgbe_fdir_remove_signature_filter,
211 .fdir_infos_get = ixgbe_fdir_info_get,
212 .fdir_add_perfect_filter = ixgbe_fdir_add_perfect_filter,
213 .fdir_update_perfect_filter = ixgbe_fdir_update_perfect_filter,
214 .fdir_remove_perfect_filter = ixgbe_fdir_remove_perfect_filter,
215 .fdir_set_masks = ixgbe_fdir_set_masks,
219 * dev_ops for virtual function, bare necessities for basic vf
220 * operation have been implemented
222 static struct eth_dev_ops ixgbevf_eth_dev_ops = {
224 .dev_configure = ixgbevf_dev_configure,
225 .dev_start = ixgbevf_dev_start,
226 .dev_stop = ixgbevf_dev_stop,
227 .link_update = ixgbe_dev_link_update,
228 .stats_get = ixgbevf_dev_stats_get,
229 .stats_reset = ixgbevf_dev_stats_reset,
230 .dev_close = ixgbevf_dev_stop,
232 .dev_infos_get = ixgbe_dev_info_get,
233 .rx_queue_setup = ixgbe_dev_rx_queue_setup,
234 .rx_queue_release = ixgbe_dev_rx_queue_release,
235 .tx_queue_setup = ixgbe_dev_tx_queue_setup,
236 .tx_queue_release = ixgbe_dev_tx_queue_release,
240 * Atomically reads the link status information from global
241 * structure rte_eth_dev.
244 * - Pointer to the structure rte_eth_dev to read from.
245 * - Pointer to the buffer to be saved with the link status.
248 * - On success, zero.
249 * - On failure, negative value.
252 rte_ixgbe_dev_atomic_read_link_status(struct rte_eth_dev *dev,
253 struct rte_eth_link *link)
255 struct rte_eth_link *dst = link;
256 struct rte_eth_link *src = &(dev->data->dev_link);
258 if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
259 *(uint64_t *)src) == 0)
266 * Atomically writes the link status information into global
267 * structure rte_eth_dev.
270 * - Pointer to the structure rte_eth_dev to read from.
271 * - Pointer to the buffer to be saved with the link status.
274 * - On success, zero.
275 * - On failure, negative value.
278 rte_ixgbe_dev_atomic_write_link_status(struct rte_eth_dev *dev,
279 struct rte_eth_link *link)
281 struct rte_eth_link *dst = &(dev->data->dev_link);
282 struct rte_eth_link *src = link;
284 if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
285 *(uint64_t *)src) == 0)
292 * This function is the same as ixgbe_is_sfp() in ixgbe/ixgbe.h.
295 ixgbe_is_sfp(struct ixgbe_hw *hw)
297 switch (hw->phy.type) {
298 case ixgbe_phy_sfp_avago:
299 case ixgbe_phy_sfp_ftl:
300 case ixgbe_phy_sfp_intel:
301 case ixgbe_phy_sfp_unknown:
302 case ixgbe_phy_sfp_passive_tyco:
303 case ixgbe_phy_sfp_passive_unknown:
311 * This function is based on ixgbe_disable_intr() in ixgbe/ixgbe.h.
314 ixgbe_disable_intr(struct ixgbe_hw *hw)
316 PMD_INIT_FUNC_TRACE();
318 if (hw->mac.type == ixgbe_mac_82598EB) {
319 IXGBE_WRITE_REG(hw, IXGBE_EIMC, ~0);
321 IXGBE_WRITE_REG(hw, IXGBE_EIMC, 0xFFFF0000);
322 IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(0), ~0);
323 IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(1), ~0);
325 IXGBE_WRITE_FLUSH(hw);
329 * This function resets queue statistics mapping registers.
330 * From Niantic datasheet, Initialization of Statistics section:
331 * "...if software requires the queue counters, the RQSMR and TQSM registers
332 * must be re-programmed following a device reset.
335 ixgbe_reset_qstat_mappings(struct ixgbe_hw *hw)
339 for(i = 0; i != IXGBE_NB_STAT_MAPPING_REGS; i++) {
340 IXGBE_WRITE_REG(hw, IXGBE_RQSMR(i), 0);
341 IXGBE_WRITE_REG(hw, IXGBE_TQSM(i), 0);
347 ixgbe_dev_queue_stats_mapping_set(struct rte_eth_dev *eth_dev,
352 #define QSM_REG_NB_BITS_PER_QMAP_FIELD 8
353 #define NB_QMAP_FIELDS_PER_QSM_REG 4
354 #define QMAP_FIELD_RESERVED_BITS_MASK 0x0f
356 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
357 struct ixgbe_stat_mapping_registers *stat_mappings =
358 IXGBE_DEV_PRIVATE_TO_STAT_MAPPINGS(eth_dev->data->dev_private);
359 uint32_t qsmr_mask = 0;
360 uint32_t clearing_mask = QMAP_FIELD_RESERVED_BITS_MASK;
364 if ((hw->mac.type != ixgbe_mac_82599EB) && (hw->mac.type != ixgbe_mac_X540))
367 PMD_INIT_LOG(INFO, "Setting port %d, %s queue_id %d to stat index %d\n",
368 (int)(eth_dev->data->port_id), is_rx ? "RX" : "TX", queue_id, stat_idx);
370 n = queue_id / NB_QMAP_FIELDS_PER_QSM_REG;
371 if (n >= IXGBE_NB_STAT_MAPPING_REGS) {
372 PMD_INIT_LOG(ERR, "Nb of stat mapping registers exceeded\n");
375 offset = queue_id % NB_QMAP_FIELDS_PER_QSM_REG;
377 /* Now clear any previous stat_idx set */
378 clearing_mask <<= (QSM_REG_NB_BITS_PER_QMAP_FIELD * offset);
380 stat_mappings->tqsm[n] &= ~clearing_mask;
382 stat_mappings->rqsmr[n] &= ~clearing_mask;
384 q_map = (uint32_t)stat_idx;
385 q_map &= QMAP_FIELD_RESERVED_BITS_MASK;
386 qsmr_mask = q_map << (QSM_REG_NB_BITS_PER_QMAP_FIELD * offset);
388 stat_mappings->tqsm[n] |= qsmr_mask;
390 stat_mappings->rqsmr[n] |= qsmr_mask;
392 PMD_INIT_LOG(INFO, "Set port %d, %s queue_id %d to stat index %d\n"
394 (int)(eth_dev->data->port_id), is_rx ? "RX" : "TX", queue_id, stat_idx,
395 is_rx ? "RQSMR" : "TQSM",n, is_rx ? stat_mappings->rqsmr[n] : stat_mappings->tqsm[n]);
397 /* Now write the mapping in the appropriate register */
399 PMD_INIT_LOG(INFO, "Write 0x%x to RX IXGBE stat mapping reg:%d\n",
400 stat_mappings->rqsmr[n], n);
401 IXGBE_WRITE_REG(hw, IXGBE_RQSMR(n), stat_mappings->rqsmr[n]);
404 PMD_INIT_LOG(INFO, "Write 0x%x to TX IXGBE stat mapping reg:%d\n",
405 stat_mappings->tqsm[n], n);
406 IXGBE_WRITE_REG(hw, IXGBE_TQSM(n), stat_mappings->tqsm[n]);
412 ixgbe_restore_statistics_mapping(struct rte_eth_dev * dev)
414 struct ixgbe_stat_mapping_registers *stat_mappings =
415 IXGBE_DEV_PRIVATE_TO_STAT_MAPPINGS(dev->data->dev_private);
416 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
419 /* write whatever was in stat mapping table to the NIC */
420 for (i = 0; i < IXGBE_NB_STAT_MAPPING_REGS; i++) {
422 IXGBE_WRITE_REG(hw, IXGBE_RQSMR(i), stat_mappings->rqsmr[i]);
425 IXGBE_WRITE_REG(hw, IXGBE_TQSM(i), stat_mappings->tqsm[i]);
430 * This function is based on code in ixgbe_attach() in ixgbe/ixgbe.c.
431 * It returns 0 on success.
434 eth_ixgbe_dev_init(__attribute__((unused)) struct eth_driver *eth_drv,
435 struct rte_eth_dev *eth_dev)
437 struct rte_pci_device *pci_dev;
438 struct ixgbe_hw *hw =
439 IXGBE_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
440 struct ixgbe_vfta * shadow_vfta =
441 IXGBE_DEV_PRIVATE_TO_VFTA(eth_dev->data->dev_private);
446 PMD_INIT_FUNC_TRACE();
448 eth_dev->dev_ops = &ixgbe_eth_dev_ops;
449 eth_dev->rx_pkt_burst = &ixgbe_recv_pkts;
450 eth_dev->tx_pkt_burst = &ixgbe_xmit_pkts;
452 /* for secondary processes, we don't initialise any further as primary
453 * has already done this work. Only check we don't need a different
455 if (rte_eal_process_type() != RTE_PROC_PRIMARY){
456 if (eth_dev->data->scattered_rx)
457 eth_dev->rx_pkt_burst = ixgbe_recv_scattered_pkts;
460 pci_dev = eth_dev->pci_dev;
462 /* Vendor and Device ID need to be set before init of shared code */
463 hw->device_id = pci_dev->id.device_id;
464 hw->vendor_id = pci_dev->id.vendor_id;
465 hw->hw_addr = (void *)pci_dev->mem_resource.addr;
467 /* Initialize the shared code */
468 diag = ixgbe_init_shared_code(hw);
469 if (diag != IXGBE_SUCCESS) {
470 PMD_INIT_LOG(ERR, "Shared code init failed: %d", diag);
474 /* Get Hardware Flow Control setting */
475 hw->fc.requested_mode = ixgbe_fc_full;
476 hw->fc.current_mode = ixgbe_fc_full;
477 hw->fc.pause_time = IXGBE_FC_PAUSE;
478 hw->fc.low_water = IXGBE_FC_LO;
479 for (i = 0; i < MAX_TRAFFIC_CLASS; i++)
480 hw->fc.high_water[i] = IXGBE_FC_HI;
483 ixgbe_disable_intr(hw);
485 /* Make sure we have a good EEPROM before we read from it */
486 diag = ixgbe_validate_eeprom_checksum(hw, &csum);
487 if (diag != IXGBE_SUCCESS) {
488 PMD_INIT_LOG(ERR, "The EEPROM checksum is not valid: %d", diag);
492 diag = ixgbe_init_hw(hw);
495 * Devices with copper phys will fail to initialise if ixgbe_init_hw()
496 * is called too soon after the kernel driver unbinding/binding occurs.
497 * The failure occurs in ixgbe_identify_phy_generic() for all devices,
498 * but for non-copper devies, ixgbe_identify_sfp_module_generic() is
499 * also called. See ixgbe_identify_phy_82599(). The reason for the
500 * failure is not known, and only occuts when virtualisation features
501 * are disabled in the bios. A delay of 100ms was found to be enough by
502 * trial-and-error, and is doubled to be safe.
504 if (diag && (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_copper)) {
506 diag = ixgbe_init_hw(hw);
509 if (diag == IXGBE_ERR_EEPROM_VERSION) {
510 PMD_INIT_LOG(ERR, "This device is a pre-production adapter/"
511 "LOM. Please be aware there may be issues associated "
512 "with your hardware.\n If you are experiencing problems "
513 "please contact your Intel or hardware representative "
514 "who provided you with this hardware.\n");
515 } else if (diag == IXGBE_ERR_SFP_NOT_SUPPORTED)
516 PMD_INIT_LOG(ERR, "Unsupported SFP+ Module\n");
518 PMD_INIT_LOG(ERR, "Hardware Initialization Failure: %d", diag);
522 /* pick up the PCI bus settings for reporting later */
523 ixgbe_get_bus_info(hw);
525 /* reset mappings for queue statistics hw counters*/
526 ixgbe_reset_qstat_mappings(hw);
528 /* Allocate memory for storing MAC addresses */
529 eth_dev->data->mac_addrs = rte_zmalloc("ixgbe", ETHER_ADDR_LEN *
530 hw->mac.num_rar_entries, 0);
531 if (eth_dev->data->mac_addrs == NULL) {
533 "Failed to allocate %d bytes needed to store MAC addresses",
534 ETHER_ADDR_LEN * hw->mac.num_rar_entries);
537 /* Copy the permanent MAC address */
538 ether_addr_copy((struct ether_addr *) hw->mac.perm_addr,
539 ð_dev->data->mac_addrs[0]);
541 /* initialize the vfta */
542 memset(shadow_vfta, 0, sizeof(*shadow_vfta));
544 /* let hardware know driver is loaded */
545 ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
546 ctrl_ext |= IXGBE_CTRL_EXT_DRV_LOAD;
547 IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
549 if (ixgbe_is_sfp(hw) && hw->phy.sfp_type != ixgbe_sfp_type_not_present)
551 "MAC: %d, PHY: %d, SFP+: %d<n",
552 (int) hw->mac.type, (int) hw->phy.type,
553 (int) hw->phy.sfp_type);
555 PMD_INIT_LOG(DEBUG, "MAC: %d, PHY: %d\n",
556 (int) hw->mac.type, (int) hw->phy.type);
558 PMD_INIT_LOG(DEBUG, "port %d vendorID=0x%x deviceID=0x%x",
559 eth_dev->data->port_id, pci_dev->id.vendor_id,
560 pci_dev->id.device_id);
562 rte_intr_callback_register(&(pci_dev->intr_handle),
563 ixgbe_dev_interrupt_handler, (void *)eth_dev);
569 * Virtual Function device init
572 eth_ixgbevf_dev_init(__attribute__((unused)) struct eth_driver *eth_drv,
573 struct rte_eth_dev *eth_dev)
575 struct rte_pci_device *pci_dev;
576 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
579 PMD_INIT_LOG(DEBUG, "eth_ixgbevf_dev_init");
581 eth_dev->dev_ops = &ixgbevf_eth_dev_ops;
582 pci_dev = eth_dev->pci_dev;
584 hw->device_id = pci_dev->id.device_id;
585 hw->vendor_id = pci_dev->id.vendor_id;
586 hw->hw_addr = (void *)pci_dev->mem_resource.addr;
588 /* Initialize the shared code */
589 diag = ixgbe_init_shared_code(hw);
590 if (diag != IXGBE_SUCCESS) {
591 PMD_INIT_LOG(ERR, "Shared code init failed for ixgbevf: %d", diag);
595 /* init_mailbox_params */
596 hw->mbx.ops.init_params(hw);
598 /* Disable the interrupts for VF */
599 ixgbevf_intr_disable(hw);
601 hw->mac.num_rar_entries = hw->mac.max_rx_queues;
602 diag = hw->mac.ops.reset_hw(hw);
604 /* Allocate memory for storing MAC addresses */
605 eth_dev->data->mac_addrs = rte_zmalloc("ixgbevf", ETHER_ADDR_LEN *
606 hw->mac.num_rar_entries, 0);
607 if (eth_dev->data->mac_addrs == NULL) {
609 "Failed to allocate %d bytes needed to store MAC addresses",
610 ETHER_ADDR_LEN * hw->mac.num_rar_entries);
613 /* Copy the permanent MAC address */
614 ether_addr_copy((struct ether_addr *) hw->mac.perm_addr,
615 ð_dev->data->mac_addrs[0]);
617 /* reset the hardware with the new settings */
618 diag = hw->mac.ops.start_hw(hw);
624 PMD_INIT_LOG(ERR, "VF Initialization Failure: %d", diag);
628 PMD_INIT_LOG(DEBUG, "\nport %d vendorID=0x%x deviceID=0x%x mac.type=%s\n",
629 eth_dev->data->port_id, pci_dev->id.vendor_id, pci_dev->id.device_id,
630 "ixgbe_mac_82599_vf");
635 static struct eth_driver rte_ixgbe_pmd = {
637 .name = "rte_ixgbe_pmd",
638 .id_table = pci_id_ixgbe_map,
639 .drv_flags = RTE_PCI_DRV_NEED_IGB_UIO,
641 .eth_dev_init = eth_ixgbe_dev_init,
642 .dev_private_size = sizeof(struct ixgbe_adapter),
646 * virtual function driver struct
648 static struct eth_driver rte_ixgbevf_pmd = {
650 .name = "rte_ixgbevf_pmd",
651 .id_table = pci_id_ixgbevf_map,
652 .drv_flags = RTE_PCI_DRV_NEED_IGB_UIO,
654 .eth_dev_init = eth_ixgbevf_dev_init,
655 .dev_private_size = sizeof(struct ixgbe_adapter),
659 * Driver initialization routine.
660 * Invoked once at EAL init time.
661 * Register itself as the [Poll Mode] Driver of PCI IXGBE devices.
664 rte_ixgbe_pmd_init(void)
666 PMD_INIT_FUNC_TRACE();
668 rte_eth_driver_register(&rte_ixgbe_pmd);
673 * VF Driver initialization routine.
674 * Invoked one at EAL init time.
675 * Register itself as the [Virtual Poll Mode] Driver of PCI niantic devices.
678 rte_ixgbevf_pmd_init(void)
680 DEBUGFUNC("rte_ixgbevf_pmd_init");
682 rte_eth_driver_register(&rte_ixgbevf_pmd);
687 ixgbe_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
689 struct ixgbe_hw *hw =
690 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
691 struct ixgbe_vfta * shadow_vfta =
692 IXGBE_DEV_PRIVATE_TO_VFTA(dev->data->dev_private);
697 vid_idx = (uint32_t) ((vlan_id >> 5) & 0x7F);
698 vid_bit = (uint32_t) (1 << (vlan_id & 0x1F));
699 vfta = IXGBE_READ_REG(hw, IXGBE_VFTA(vid_idx));
704 IXGBE_WRITE_REG(hw, IXGBE_VFTA(vid_idx), vfta);
706 /* update local VFTA copy */
707 shadow_vfta->vfta[vid_idx] = vfta;
711 ixgbe_vlan_hw_support_disable(struct rte_eth_dev *dev)
713 struct ixgbe_hw *hw =
714 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
719 PMD_INIT_FUNC_TRACE();
721 /* Filter Table Disable */
722 vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
723 vlnctrl &= ~IXGBE_VLNCTRL_VFE;
725 if (hw->mac.type == ixgbe_mac_82598EB)
726 vlnctrl &= ~IXGBE_VLNCTRL_VME;
728 /* On 82599 the VLAN enable is per/queue in RXDCTL */
729 for (i = 0; i < dev->data->nb_rx_queues; i++) {
730 rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(i));
731 rxdctl &= ~IXGBE_RXDCTL_VME;
732 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(i), rxdctl);
735 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
739 ixgbe_vlan_hw_support_enable(struct rte_eth_dev *dev)
741 struct ixgbe_hw *hw =
742 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
743 struct ixgbe_vfta * shadow_vfta =
744 IXGBE_DEV_PRIVATE_TO_VFTA(dev->data->dev_private);
749 PMD_INIT_FUNC_TRACE();
751 /* Filter Table Enable */
752 vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
753 vlnctrl &= ~IXGBE_VLNCTRL_CFIEN;
754 vlnctrl |= IXGBE_VLNCTRL_VFE;
756 if (hw->mac.type == ixgbe_mac_82598EB)
757 vlnctrl |= IXGBE_VLNCTRL_VME;
759 /* On 82599 the VLAN enable is per/queue in RXDCTL */
760 for (i = 0; i < dev->data->nb_rx_queues; i++) {
761 rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(i));
762 rxdctl |= IXGBE_RXDCTL_VME;
763 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(i), rxdctl);
766 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
768 /* write whatever is in local vfta copy */
769 for (i = 0; i < IXGBE_VFTA_SIZE; i++)
770 IXGBE_WRITE_REG(hw, IXGBE_VFTA(i), shadow_vfta->vfta[i]);
774 ixgbe_dev_configure(struct rte_eth_dev *dev, uint16_t nb_rx_q, uint16_t nb_tx_q)
776 struct ixgbe_interrupt *intr =
777 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
780 PMD_INIT_FUNC_TRACE();
786 /* set flag to update link status after init */
787 intr->flags |= IXGBE_FLAG_NEED_LINK_UPDATE;
793 * Configure device link speed and setup link.
794 * It returns 0 on success.
797 ixgbe_dev_start(struct rte_eth_dev *dev)
799 struct ixgbe_hw *hw =
800 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
801 int err, link_up = 0, negotiate = 0;
804 PMD_INIT_FUNC_TRACE();
806 /* IXGBE devices don't support half duplex */
807 if ((dev->data->dev_conf.link_duplex != ETH_LINK_AUTONEG_DUPLEX) &&
808 (dev->data->dev_conf.link_duplex != ETH_LINK_FULL_DUPLEX)) {
809 PMD_INIT_LOG(ERR, "Invalid link_duplex (%u) for port %u\n",
810 dev->data->dev_conf.link_duplex,
816 hw->adapter_stopped = FALSE;
817 ixgbe_stop_adapter(hw);
819 /* reinitialize adapter
820 * this calls reset and start */
823 /* initialize transmission unit */
824 ixgbe_dev_tx_init(dev);
826 /* This can fail when allocating mbufs for descriptor rings */
827 err = ixgbe_dev_rx_init(dev);
829 PMD_INIT_LOG(ERR, "Unable to initialize RX hardware\n");
833 ixgbe_dev_rxtx_start(dev);
835 if (ixgbe_is_sfp(hw) && hw->phy.multispeed_fiber) {
836 err = hw->mac.ops.setup_sfp(hw);
841 /* Turn on the laser */
842 if (hw->phy.multispeed_fiber)
843 ixgbe_enable_tx_laser(hw);
845 err = ixgbe_check_link(hw, &speed, &link_up, 0);
848 err = ixgbe_get_link_capabilities(hw, &speed, &negotiate);
852 switch(dev->data->dev_conf.link_speed) {
853 case ETH_LINK_SPEED_AUTONEG:
854 speed = (hw->mac.type != ixgbe_mac_82598EB) ?
855 IXGBE_LINK_SPEED_82599_AUTONEG :
856 IXGBE_LINK_SPEED_82598_AUTONEG;
858 case ETH_LINK_SPEED_100:
860 * Invalid for 82598 but error will be detected by
863 speed = IXGBE_LINK_SPEED_100_FULL;
865 case ETH_LINK_SPEED_1000:
866 speed = IXGBE_LINK_SPEED_1GB_FULL;
868 case ETH_LINK_SPEED_10000:
869 speed = IXGBE_LINK_SPEED_10GB_FULL;
872 PMD_INIT_LOG(ERR, "Invalid link_speed (%u) for port %u\n",
873 dev->data->dev_conf.link_speed, dev->data->port_id);
877 err = ixgbe_setup_link(hw, speed, negotiate, link_up);
881 /* check if lsc interrupt is enabled */
882 if (dev->data->dev_conf.intr_conf.lsc != 0) {
883 err = ixgbe_dev_interrupt_setup(dev);
889 * If VLAN filtering is enabled, set up VLAN tag offload and filtering
892 if (dev->data->dev_conf.rxmode.hw_vlan_filter)
893 ixgbe_vlan_hw_support_enable(dev);
895 ixgbe_vlan_hw_support_disable(dev);
897 if (dev->data->dev_conf.fdir_conf.mode != RTE_FDIR_MODE_NONE) {
898 err = ixgbe_fdir_configure(dev);
903 ixgbe_restore_statistics_mapping(dev);
908 PMD_INIT_LOG(ERR, "failure in ixgbe_dev_start(): %d", err);
909 ixgbe_dev_clear_queues(dev);
914 * Stop device: disable rx and tx functions to allow for reconfiguring.
917 ixgbe_dev_stop(struct rte_eth_dev *dev)
919 struct rte_eth_link link;
920 struct ixgbe_hw *hw =
921 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
923 PMD_INIT_FUNC_TRACE();
925 /* disable interrupts */
926 ixgbe_disable_intr(hw);
930 hw->adapter_stopped = FALSE;
933 ixgbe_stop_adapter(hw);
935 /* Turn off the laser */
936 if (hw->phy.multispeed_fiber)
937 ixgbe_disable_tx_laser(hw);
939 ixgbe_dev_clear_queues(dev);
941 /* Clear recorded link status */
942 memset(&link, 0, sizeof(link));
943 rte_ixgbe_dev_atomic_write_link_status(dev, &link);
947 * Reest and stop device.
950 ixgbe_dev_close(struct rte_eth_dev *dev)
952 struct ixgbe_hw *hw =
953 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
955 PMD_INIT_FUNC_TRACE();
961 hw->adapter_stopped = 1;
963 ixgbe_disable_pcie_master(hw);
965 /* reprogram the RAR[0] in case user changed it. */
966 ixgbe_set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
970 * This function is based on ixgbe_update_stats_counters() in ixgbe/ixgbe.c
973 ixgbe_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
975 struct ixgbe_hw *hw =
976 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
977 struct ixgbe_hw_stats *hw_stats =
978 IXGBE_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
979 uint32_t bprc, lxon, lxoff, total;
980 uint64_t total_missed_rx, total_qbrc, total_qprc;
987 hw_stats->crcerrs += IXGBE_READ_REG(hw, IXGBE_CRCERRS);
988 hw_stats->illerrc += IXGBE_READ_REG(hw, IXGBE_ILLERRC);
989 hw_stats->errbc += IXGBE_READ_REG(hw, IXGBE_ERRBC);
990 hw_stats->mspdc += IXGBE_READ_REG(hw, IXGBE_MSPDC);
992 for (i = 0; i < 8; i++) {
994 mp = IXGBE_READ_REG(hw, IXGBE_MPC(i));
995 /* global total per queue */
996 hw_stats->mpc[i] += mp;
997 /* Running comprehensive total for stats display */
998 total_missed_rx += hw_stats->mpc[i];
999 if (hw->mac.type == ixgbe_mac_82598EB)
1000 hw_stats->rnbc[i] +=
1001 IXGBE_READ_REG(hw, IXGBE_RNBC(i));
1002 hw_stats->pxontxc[i] +=
1003 IXGBE_READ_REG(hw, IXGBE_PXONTXC(i));
1004 hw_stats->pxonrxc[i] +=
1005 IXGBE_READ_REG(hw, IXGBE_PXONRXC(i));
1006 hw_stats->pxofftxc[i] +=
1007 IXGBE_READ_REG(hw, IXGBE_PXOFFTXC(i));
1008 hw_stats->pxoffrxc[i] +=
1009 IXGBE_READ_REG(hw, IXGBE_PXOFFRXC(i));
1010 hw_stats->pxon2offc[i] +=
1011 IXGBE_READ_REG(hw, IXGBE_PXON2OFFCNT(i));
1013 for (i = 0; i < IXGBE_QUEUE_STAT_COUNTERS; i++) {
1014 hw_stats->qprc[i] += IXGBE_READ_REG(hw, IXGBE_QPRC(i));
1015 hw_stats->qptc[i] += IXGBE_READ_REG(hw, IXGBE_QPTC(i));
1016 hw_stats->qbrc[i] += IXGBE_READ_REG(hw, IXGBE_QBRC_L(i));
1017 hw_stats->qbrc[i] +=
1018 ((uint64_t)IXGBE_READ_REG(hw, IXGBE_QBRC_H(i)) << 32);
1019 hw_stats->qbtc[i] += IXGBE_READ_REG(hw, IXGBE_QBTC_L(i));
1020 hw_stats->qbtc[i] +=
1021 ((uint64_t)IXGBE_READ_REG(hw, IXGBE_QBTC_H(i)) << 32);
1022 hw_stats->qprdc[i] += IXGBE_READ_REG(hw, IXGBE_QPRDC(i));
1024 total_qprc += hw_stats->qprc[i];
1025 total_qbrc += hw_stats->qbrc[i];
1027 hw_stats->mlfc += IXGBE_READ_REG(hw, IXGBE_MLFC);
1028 hw_stats->mrfc += IXGBE_READ_REG(hw, IXGBE_MRFC);
1029 hw_stats->rlec += IXGBE_READ_REG(hw, IXGBE_RLEC);
1031 /* Note that gprc counts missed packets */
1032 hw_stats->gprc += IXGBE_READ_REG(hw, IXGBE_GPRC);
1034 if (hw->mac.type != ixgbe_mac_82598EB) {
1035 hw_stats->gorc += IXGBE_READ_REG(hw, IXGBE_GORCL) +
1036 ((u64)IXGBE_READ_REG(hw, IXGBE_GORCH) << 32);
1037 hw_stats->gotc += IXGBE_READ_REG(hw, IXGBE_GOTCL) +
1038 ((u64)IXGBE_READ_REG(hw, IXGBE_GOTCH) << 32);
1039 hw_stats->tor += IXGBE_READ_REG(hw, IXGBE_TORL) +
1040 ((u64)IXGBE_READ_REG(hw, IXGBE_TORH) << 32);
1041 hw_stats->lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXCNT);
1042 hw_stats->lxoffrxc += IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT);
1044 hw_stats->lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXC);
1045 hw_stats->lxoffrxc += IXGBE_READ_REG(hw, IXGBE_LXOFFRXC);
1046 /* 82598 only has a counter in the high register */
1047 hw_stats->gorc += IXGBE_READ_REG(hw, IXGBE_GORCH);
1048 hw_stats->gotc += IXGBE_READ_REG(hw, IXGBE_GOTCH);
1049 hw_stats->tor += IXGBE_READ_REG(hw, IXGBE_TORH);
1053 * Workaround: mprc hardware is incorrectly counting
1054 * broadcasts, so for now we subtract those.
1056 bprc = IXGBE_READ_REG(hw, IXGBE_BPRC);
1057 hw_stats->bprc += bprc;
1058 hw_stats->mprc += IXGBE_READ_REG(hw, IXGBE_MPRC);
1059 if (hw->mac.type == ixgbe_mac_82598EB)
1060 hw_stats->mprc -= bprc;
1062 hw_stats->prc64 += IXGBE_READ_REG(hw, IXGBE_PRC64);
1063 hw_stats->prc127 += IXGBE_READ_REG(hw, IXGBE_PRC127);
1064 hw_stats->prc255 += IXGBE_READ_REG(hw, IXGBE_PRC255);
1065 hw_stats->prc511 += IXGBE_READ_REG(hw, IXGBE_PRC511);
1066 hw_stats->prc1023 += IXGBE_READ_REG(hw, IXGBE_PRC1023);
1067 hw_stats->prc1522 += IXGBE_READ_REG(hw, IXGBE_PRC1522);
1069 lxon = IXGBE_READ_REG(hw, IXGBE_LXONTXC);
1070 hw_stats->lxontxc += lxon;
1071 lxoff = IXGBE_READ_REG(hw, IXGBE_LXOFFTXC);
1072 hw_stats->lxofftxc += lxoff;
1073 total = lxon + lxoff;
1075 hw_stats->gptc += IXGBE_READ_REG(hw, IXGBE_GPTC);
1076 hw_stats->mptc += IXGBE_READ_REG(hw, IXGBE_MPTC);
1077 hw_stats->ptc64 += IXGBE_READ_REG(hw, IXGBE_PTC64);
1078 hw_stats->gptc -= total;
1079 hw_stats->mptc -= total;
1080 hw_stats->ptc64 -= total;
1081 hw_stats->gotc -= total * ETHER_MIN_LEN;
1083 hw_stats->ruc += IXGBE_READ_REG(hw, IXGBE_RUC);
1084 hw_stats->rfc += IXGBE_READ_REG(hw, IXGBE_RFC);
1085 hw_stats->roc += IXGBE_READ_REG(hw, IXGBE_ROC);
1086 hw_stats->rjc += IXGBE_READ_REG(hw, IXGBE_RJC);
1087 hw_stats->mngprc += IXGBE_READ_REG(hw, IXGBE_MNGPRC);
1088 hw_stats->mngpdc += IXGBE_READ_REG(hw, IXGBE_MNGPDC);
1089 hw_stats->mngptc += IXGBE_READ_REG(hw, IXGBE_MNGPTC);
1090 hw_stats->tpr += IXGBE_READ_REG(hw, IXGBE_TPR);
1091 hw_stats->tpt += IXGBE_READ_REG(hw, IXGBE_TPT);
1092 hw_stats->ptc127 += IXGBE_READ_REG(hw, IXGBE_PTC127);
1093 hw_stats->ptc255 += IXGBE_READ_REG(hw, IXGBE_PTC255);
1094 hw_stats->ptc511 += IXGBE_READ_REG(hw, IXGBE_PTC511);
1095 hw_stats->ptc1023 += IXGBE_READ_REG(hw, IXGBE_PTC1023);
1096 hw_stats->ptc1522 += IXGBE_READ_REG(hw, IXGBE_PTC1522);
1097 hw_stats->bptc += IXGBE_READ_REG(hw, IXGBE_BPTC);
1098 hw_stats->xec += IXGBE_READ_REG(hw, IXGBE_XEC);
1099 hw_stats->fccrc += IXGBE_READ_REG(hw, IXGBE_FCCRC);
1100 hw_stats->fclast += IXGBE_READ_REG(hw, IXGBE_FCLAST);
1101 /* Only read FCOE on 82599 */
1102 if (hw->mac.type != ixgbe_mac_82598EB) {
1103 hw_stats->fcoerpdc += IXGBE_READ_REG(hw, IXGBE_FCOERPDC);
1104 hw_stats->fcoeprc += IXGBE_READ_REG(hw, IXGBE_FCOEPRC);
1105 hw_stats->fcoeptc += IXGBE_READ_REG(hw, IXGBE_FCOEPTC);
1106 hw_stats->fcoedwrc += IXGBE_READ_REG(hw, IXGBE_FCOEDWRC);
1107 hw_stats->fcoedwtc += IXGBE_READ_REG(hw, IXGBE_FCOEDWTC);
1113 /* Fill out the rte_eth_stats statistics structure */
1114 stats->ipackets = total_qprc;
1115 stats->ibytes = total_qbrc;
1116 stats->opackets = hw_stats->gptc;
1117 stats->obytes = hw_stats->gotc;
1118 stats->imcasts = hw_stats->mprc;
1120 for (i = 0; i < IXGBE_QUEUE_STAT_COUNTERS; i++) {
1121 stats->q_ipackets[i] = hw_stats->qprc[i];
1122 stats->q_opackets[i] = hw_stats->qptc[i];
1123 stats->q_ibytes[i] = hw_stats->qbrc[i];
1124 stats->q_obytes[i] = hw_stats->qbtc[i];
1125 stats->q_errors[i] = hw_stats->qprdc[i];
1129 stats->ierrors = total_missed_rx + hw_stats->crcerrs +
1134 /* Flow Director Stats registers */
1135 hw_stats->fdirmatch += IXGBE_READ_REG(hw, IXGBE_FDIRMATCH);
1136 hw_stats->fdirmiss += IXGBE_READ_REG(hw, IXGBE_FDIRMISS);
1137 stats->fdirmatch = hw_stats->fdirmatch;
1138 stats->fdirmiss = hw_stats->fdirmiss;
1142 ixgbe_dev_stats_reset(struct rte_eth_dev *dev)
1144 struct ixgbe_hw_stats *stats =
1145 IXGBE_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
1147 /* HW registers are cleared on read */
1148 ixgbe_dev_stats_get(dev, NULL);
1150 /* Reset software totals */
1151 memset(stats, 0, sizeof(*stats));
1155 ixgbevf_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
1157 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1158 struct ixgbevf_hw_stats *hw_stats = (struct ixgbevf_hw_stats*)
1159 IXGBE_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
1161 /* Good Rx packet, include VF loopback */
1162 UPDATE_VF_STAT(IXGBE_VFGPRC,
1163 hw_stats->last_vfgprc, hw_stats->vfgprc);
1165 /* Good Rx octets, include VF loopback */
1166 UPDATE_VF_STAT_36BIT(IXGBE_VFGORC_LSB, IXGBE_VFGORC_MSB,
1167 hw_stats->last_vfgorc, hw_stats->vfgorc);
1169 /* Good Tx packet, include VF loopback */
1170 UPDATE_VF_STAT(IXGBE_VFGPTC,
1171 hw_stats->last_vfgptc, hw_stats->vfgptc);
1173 /* Good Tx octets, include VF loopback */
1174 UPDATE_VF_STAT_36BIT(IXGBE_VFGOTC_LSB, IXGBE_VFGOTC_MSB,
1175 hw_stats->last_vfgotc, hw_stats->vfgotc);
1177 /* Rx Multicst Packet */
1178 UPDATE_VF_STAT(IXGBE_VFMPRC,
1179 hw_stats->last_vfmprc, hw_stats->vfmprc);
1184 memset(stats, 0, sizeof(*stats));
1185 stats->ipackets = hw_stats->vfgprc;
1186 stats->ibytes = hw_stats->vfgorc;
1187 stats->opackets = hw_stats->vfgptc;
1188 stats->obytes = hw_stats->vfgotc;
1189 stats->imcasts = hw_stats->vfmprc;
1193 ixgbevf_dev_stats_reset(struct rte_eth_dev *dev)
1195 struct ixgbevf_hw_stats *hw_stats = (struct ixgbevf_hw_stats*)
1196 IXGBE_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
1198 /* Sync HW register to the last stats */
1199 ixgbevf_dev_stats_get(dev, NULL);
1201 /* reset HW current stats*/
1202 hw_stats->vfgprc = 0;
1203 hw_stats->vfgorc = 0;
1204 hw_stats->vfgptc = 0;
1205 hw_stats->vfgotc = 0;
1206 hw_stats->vfmprc = 0;
1211 ixgbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
1213 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1215 dev_info->max_rx_queues = hw->mac.max_rx_queues;
1216 dev_info->max_tx_queues = hw->mac.max_tx_queues;
1217 dev_info->min_rx_bufsize = 1024; /* cf BSIZEPACKET in SRRCTL register */
1218 dev_info->max_rx_pktlen = 15872; /* includes CRC, cf MAXFRS register */
1219 dev_info->max_mac_addrs = hw->mac.num_rar_entries;
1222 /* return 0 means link status changed, -1 means not changed */
1224 ixgbe_dev_link_update(struct rte_eth_dev *dev, int wait_to_complete)
1226 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1227 struct rte_eth_link link, old;
1228 ixgbe_link_speed link_speed;
1232 link.link_status = 0;
1233 link.link_speed = 0;
1234 link.link_duplex = 0;
1235 memset(&old, 0, sizeof(old));
1236 rte_ixgbe_dev_atomic_read_link_status(dev, &old);
1238 /* check if it needs to wait to complete, if lsc interrupt is enabled */
1239 if (wait_to_complete == 0 || dev->data->dev_conf.intr_conf.lsc != 0)
1240 diag = ixgbe_check_link(hw, &link_speed, &link_up, 0);
1242 diag = ixgbe_check_link(hw, &link_speed, &link_up, 1);
1244 link.link_speed = ETH_LINK_SPEED_100;
1245 link.link_duplex = ETH_LINK_HALF_DUPLEX;
1246 rte_ixgbe_dev_atomic_write_link_status(dev, &link);
1247 if (link.link_status == old.link_status)
1253 rte_ixgbe_dev_atomic_write_link_status(dev, &link);
1254 if (link.link_status == old.link_status)
1258 link.link_status = 1;
1259 link.link_duplex = ETH_LINK_FULL_DUPLEX;
1261 switch (link_speed) {
1263 case IXGBE_LINK_SPEED_UNKNOWN:
1264 link.link_duplex = ETH_LINK_HALF_DUPLEX;
1265 link.link_speed = ETH_LINK_SPEED_100;
1268 case IXGBE_LINK_SPEED_100_FULL:
1269 link.link_speed = ETH_LINK_SPEED_100;
1272 case IXGBE_LINK_SPEED_1GB_FULL:
1273 link.link_speed = ETH_LINK_SPEED_1000;
1276 case IXGBE_LINK_SPEED_10GB_FULL:
1277 link.link_speed = ETH_LINK_SPEED_10000;
1280 rte_ixgbe_dev_atomic_write_link_status(dev, &link);
1282 if (link.link_status == old.link_status)
1289 ixgbe_dev_promiscuous_enable(struct rte_eth_dev *dev)
1291 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1294 fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
1295 fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
1296 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
1300 ixgbe_dev_promiscuous_disable(struct rte_eth_dev *dev)
1302 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1305 fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
1306 fctrl &= (~IXGBE_FCTRL_UPE);
1307 if (dev->data->all_multicast == 1)
1308 fctrl |= IXGBE_FCTRL_MPE;
1310 fctrl &= (~IXGBE_FCTRL_MPE);
1311 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
1315 ixgbe_dev_allmulticast_enable(struct rte_eth_dev *dev)
1317 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1320 fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
1321 fctrl |= IXGBE_FCTRL_MPE;
1322 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
1326 ixgbe_dev_allmulticast_disable(struct rte_eth_dev *dev)
1328 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1331 if (dev->data->promiscuous == 1)
1332 return; /* must remain in all_multicast mode */
1334 fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
1335 fctrl &= (~IXGBE_FCTRL_MPE);
1336 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
1340 * It clears the interrupt causes and enables the interrupt.
1341 * It will be called once only during nic initialized.
1344 * Pointer to struct rte_eth_dev.
1347 * - On success, zero.
1348 * - On failure, a negative value.
1351 ixgbe_dev_interrupt_setup(struct rte_eth_dev *dev)
1353 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1355 ixgbe_dev_link_status_print(dev);
1356 IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EICR_LSC);
1357 IXGBE_WRITE_FLUSH(hw);
1358 rte_intr_enable(&(dev->pci_dev->intr_handle));
1364 * It reads ICR and sets flag (IXGBE_EICR_LSC) for the link_update.
1367 * Pointer to struct rte_eth_dev.
1370 * - On success, zero.
1371 * - On failure, a negative value.
1374 ixgbe_dev_interrupt_get_status(struct rte_eth_dev *dev)
1377 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1378 struct ixgbe_interrupt *intr =
1379 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
1381 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EICR_LSC);
1382 IXGBE_WRITE_FLUSH(hw);
1384 /* read-on-clear nic registers here */
1385 eicr = IXGBE_READ_REG(hw, IXGBE_EICR);
1386 PMD_INIT_LOG(INFO, "eicr %x", eicr);
1387 if (eicr & IXGBE_EICR_LSC) {
1388 /* set flag for async link update */
1389 intr->flags |= IXGBE_FLAG_NEED_LINK_UPDATE;
1396 * It gets and then prints the link status.
1399 * Pointer to struct rte_eth_dev.
1402 * - On success, zero.
1403 * - On failure, a negative value.
1406 ixgbe_dev_link_status_print(struct rte_eth_dev *dev)
1408 struct rte_eth_link link;
1410 memset(&link, 0, sizeof(link));
1411 rte_ixgbe_dev_atomic_read_link_status(dev, &link);
1412 if (link.link_status) {
1413 PMD_INIT_LOG(INFO, "Port %d: Link Up - speed %u Mbps - %s",
1414 (int)(dev->data->port_id),
1415 (unsigned)link.link_speed,
1416 link.link_duplex == ETH_LINK_FULL_DUPLEX ?
1417 "full-duplex" : "half-duplex");
1419 PMD_INIT_LOG(INFO, " Port %d: Link Down",
1420 (int)(dev->data->port_id));
1422 PMD_INIT_LOG(INFO, "PCI Address: %04d:%02d:%02d:%d",
1423 dev->pci_dev->addr.domain,
1424 dev->pci_dev->addr.bus,
1425 dev->pci_dev->addr.devid,
1426 dev->pci_dev->addr.function);
1430 * It executes link_update after knowing an interrupt occured.
1433 * Pointer to struct rte_eth_dev.
1436 * - On success, zero.
1437 * - On failure, a negative value.
1440 ixgbe_dev_interrupt_action(struct rte_eth_dev *dev)
1442 struct ixgbe_interrupt *intr =
1443 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
1445 if (!(intr->flags & IXGBE_FLAG_NEED_LINK_UPDATE)) {
1448 ixgbe_dev_link_update(dev, 0);
1454 * Interrupt handler which shall be registered for alarm callback for delayed
1455 * handling specific interrupt to wait for the stable nic state. As the
1456 * NIC interrupt state is not stable for ixgbe after link is just down,
1457 * it needs to wait 4 seconds to get the stable status.
1460 * Pointer to interrupt handle.
1462 * The address of parameter (struct rte_eth_dev *) regsitered before.
1468 ixgbe_dev_interrupt_delayed_handler(void *param)
1470 struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
1471 struct ixgbe_interrupt *intr =
1472 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
1473 struct ixgbe_hw *hw =
1474 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1476 IXGBE_READ_REG(hw, IXGBE_EICR);
1477 ixgbe_dev_interrupt_action(dev);
1478 if (intr->flags & IXGBE_FLAG_NEED_LINK_UPDATE) {
1479 intr->flags &= ~IXGBE_FLAG_NEED_LINK_UPDATE;
1480 rte_intr_enable(&(dev->pci_dev->intr_handle));
1481 IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EICR_LSC);
1482 IXGBE_WRITE_FLUSH(hw);
1483 ixgbe_dev_link_status_print(dev);
1484 _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC);
1489 * Interrupt handler triggered by NIC for handling
1490 * specific interrupt.
1493 * Pointer to interrupt handle.
1495 * The address of parameter (struct rte_eth_dev *) regsitered before.
1501 ixgbe_dev_interrupt_handler(struct rte_intr_handle *handle, void *param)
1504 struct rte_eth_link link;
1505 struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
1506 struct ixgbe_interrupt *intr =
1507 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
1509 /* get the link status before link update, for predicting later */
1510 memset(&link, 0, sizeof(link));
1511 rte_ixgbe_dev_atomic_read_link_status(dev, &link);
1512 ixgbe_dev_interrupt_get_status(dev);
1513 ixgbe_dev_interrupt_action(dev);
1515 if (!(intr->flags & IXGBE_FLAG_NEED_LINK_UPDATE))
1519 if (!link.link_status)
1520 /* handle it 1 sec later, wait it being stable */
1521 timeout = IXGBE_LINK_UP_CHECK_TIMEOUT;
1522 /* likely to down */
1524 /* handle it 4 sec later, wait it being stable */
1525 timeout = IXGBE_LINK_DOWN_CHECK_TIMEOUT;
1527 ixgbe_dev_link_status_print(dev);
1528 if (rte_eal_alarm_set(timeout * 1000,
1529 ixgbe_dev_interrupt_delayed_handler, param) < 0)
1530 PMD_INIT_LOG(ERR, "Error setting alarm");
1534 ixgbe_dev_led_on(struct rte_eth_dev *dev)
1536 struct ixgbe_hw *hw;
1538 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1539 return (ixgbe_led_on(hw, 0) == IXGBE_SUCCESS ? 0 : -ENOTSUP);
1543 ixgbe_dev_led_off(struct rte_eth_dev *dev)
1545 struct ixgbe_hw *hw;
1547 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1548 return (ixgbe_led_off(hw, 0) == IXGBE_SUCCESS ? 0 : -ENOTSUP);
1552 ixgbe_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
1554 struct ixgbe_hw *hw;
1556 uint32_t rx_buf_size;
1557 uint32_t max_high_water;
1558 enum ixgbe_fc_mode rte_fcmode_2_ixgbe_fcmode[] = {
1565 PMD_INIT_FUNC_TRACE();
1567 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1568 rx_buf_size = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(0));
1569 PMD_INIT_LOG(DEBUG, "Rx packet buffer size = 0x%x \n", rx_buf_size);
1572 * At least reserve one Ethernet frame for watermark
1573 * high_water/low_water in kilo bytes for ixgbe
1575 max_high_water = (rx_buf_size - ETHER_MAX_LEN) >> IXGBE_RXPBSIZE_SHIFT;
1576 if ((fc_conf->high_water > max_high_water) ||
1577 (fc_conf->high_water < fc_conf->low_water)) {
1578 PMD_INIT_LOG(ERR, "Invalid high/low water setup value in KB\n");
1579 PMD_INIT_LOG(ERR, "High_water must <= 0x%x\n", max_high_water);
1583 hw->fc.requested_mode = rte_fcmode_2_ixgbe_fcmode[fc_conf->mode];
1584 hw->fc.pause_time = fc_conf->pause_time;
1585 hw->fc.high_water[0] = fc_conf->high_water;
1586 hw->fc.low_water = fc_conf->low_water;
1587 hw->fc.send_xon = fc_conf->send_xon;
1589 err = ixgbe_fc_enable(hw, 0);
1590 /* Not negotiated is not an error case */
1591 if ((err == IXGBE_SUCCESS) || (err == IXGBE_ERR_FC_NOT_NEGOTIATED)) {
1595 PMD_INIT_LOG(ERR, "ixgbe_fc_enable = 0x%x \n", err);
1600 ixgbe_add_rar(struct rte_eth_dev *dev, struct ether_addr *mac_addr,
1601 uint32_t index, uint32_t pool)
1603 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1604 uint32_t enable_addr = 1;
1606 ixgbe_set_rar(hw, index, mac_addr->addr_bytes, pool, enable_addr);
1610 ixgbe_remove_rar(struct rte_eth_dev *dev, uint32_t index)
1612 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1614 ixgbe_clear_rar(hw, index);
1618 * Virtual Function operations
1621 ixgbevf_intr_disable(struct ixgbe_hw *hw)
1623 PMD_INIT_LOG(DEBUG, "ixgbevf_intr_disable");
1625 /* Clear interrupt mask to stop from interrupts being generated */
1626 IXGBE_WRITE_REG(hw, IXGBE_VTEIMC, IXGBE_VF_IRQ_CLEAR_MASK);
1628 IXGBE_WRITE_FLUSH(hw);
1632 ixgbevf_dev_configure(struct rte_eth_dev *dev)
1634 struct rte_eth_conf* conf = &dev->data->dev_conf;
1637 if (!conf->rxmode.hw_strip_crc) {
1639 * VF has no ability to enable/disable HW CRC
1640 * Keep the persistent behavior the same as Host PF
1642 PMD_INIT_LOG(INFO, "VF can't disable HW CRC Strip\n");
1643 conf->rxmode.hw_strip_crc = 1;
1650 ixgbevf_dev_start(struct rte_eth_dev *dev)
1653 PMD_INIT_LOG(DEBUG, "ixgbevf_dev_start");
1655 ixgbevf_dev_tx_init(dev);
1656 err = ixgbevf_dev_rx_init(dev);
1658 ixgbe_dev_clear_queues(dev);
1659 PMD_INIT_LOG(ERR,"Unable to initialize RX hardware\n");
1662 ixgbevf_dev_rxtx_start(dev);
1668 ixgbevf_dev_stop(struct rte_eth_dev *dev)
1670 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1672 PMD_INIT_LOG(DEBUG, "ixgbevf_dev_stop");
1675 hw->adapter_stopped = 0;
1676 ixgbe_stop_adapter(hw);
1677 /* reprogram the RAR[0] in case user changed it. */
1678 ixgbe_set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);