1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2018-2021 Beijing WangXun Technology Co., Ltd.
3 * Copyright(c) 2010-2017 Intel Corporation
7 #include <rte_common.h>
8 #include <ethdev_pci.h>
10 #include <rte_alarm.h>
12 #include "ngbe_logs.h"
14 #include "ngbe_ethdev.h"
15 #include "ngbe_rxtx.h"
17 static int ngbe_dev_close(struct rte_eth_dev *dev);
18 static int ngbe_dev_link_update(struct rte_eth_dev *dev,
19 int wait_to_complete);
20 static int ngbe_dev_stats_reset(struct rte_eth_dev *dev);
21 static void ngbe_vlan_hw_strip_enable(struct rte_eth_dev *dev, uint16_t queue);
22 static void ngbe_vlan_hw_strip_disable(struct rte_eth_dev *dev,
25 static void ngbe_dev_link_status_print(struct rte_eth_dev *dev);
26 static int ngbe_dev_lsc_interrupt_setup(struct rte_eth_dev *dev, uint8_t on);
27 static int ngbe_dev_macsec_interrupt_setup(struct rte_eth_dev *dev);
28 static int ngbe_dev_misc_interrupt_setup(struct rte_eth_dev *dev);
29 static int ngbe_dev_rxq_interrupt_setup(struct rte_eth_dev *dev);
30 static void ngbe_dev_interrupt_handler(void *param);
31 static void ngbe_dev_interrupt_delayed_handler(void *param);
32 static void ngbe_configure_msix(struct rte_eth_dev *dev);
34 #define NGBE_SET_HWSTRIP(h, q) do {\
35 uint32_t idx = (q) / (sizeof((h)->bitmap[0]) * NBBY); \
36 uint32_t bit = (q) % (sizeof((h)->bitmap[0]) * NBBY); \
37 (h)->bitmap[idx] |= 1 << bit;\
40 #define NGBE_CLEAR_HWSTRIP(h, q) do {\
41 uint32_t idx = (q) / (sizeof((h)->bitmap[0]) * NBBY); \
42 uint32_t bit = (q) % (sizeof((h)->bitmap[0]) * NBBY); \
43 (h)->bitmap[idx] &= ~(1 << bit);\
46 #define NGBE_GET_HWSTRIP(h, q, r) do {\
47 uint32_t idx = (q) / (sizeof((h)->bitmap[0]) * NBBY); \
48 uint32_t bit = (q) % (sizeof((h)->bitmap[0]) * NBBY); \
49 (r) = (h)->bitmap[idx] >> bit & 1;\
53 * The set of PCI devices this driver supports
55 static const struct rte_pci_id pci_id_ngbe_map[] = {
56 { RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860A2) },
57 { RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860A2S) },
58 { RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860A4) },
59 { RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860A4S) },
60 { RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860AL2) },
61 { RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860AL2S) },
62 { RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860AL4) },
63 { RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860AL4S) },
64 { RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860NCSI) },
65 { RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860A1) },
66 { RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860A1L) },
67 { RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860AL_W) },
68 { .vendor_id = 0, /* sentinel */ },
71 static const struct rte_eth_desc_lim rx_desc_lim = {
72 .nb_max = NGBE_RING_DESC_MAX,
73 .nb_min = NGBE_RING_DESC_MIN,
74 .nb_align = NGBE_RXD_ALIGN,
77 static const struct rte_eth_desc_lim tx_desc_lim = {
78 .nb_max = NGBE_RING_DESC_MAX,
79 .nb_min = NGBE_RING_DESC_MIN,
80 .nb_align = NGBE_TXD_ALIGN,
81 .nb_seg_max = NGBE_TX_MAX_SEG,
82 .nb_mtu_seg_max = NGBE_TX_MAX_SEG,
85 static const struct eth_dev_ops ngbe_eth_dev_ops;
88 ngbe_pf_reset_hw(struct ngbe_hw *hw)
93 status = hw->mac.reset_hw(hw);
95 ctrl_ext = rd32(hw, NGBE_PORTCTL);
96 /* Set PF Reset Done bit so PF/VF Mail Ops can work */
97 ctrl_ext |= NGBE_PORTCTL_RSTDONE;
98 wr32(hw, NGBE_PORTCTL, ctrl_ext);
101 if (status == NGBE_ERR_SFP_NOT_PRESENT)
107 ngbe_enable_intr(struct rte_eth_dev *dev)
109 struct ngbe_interrupt *intr = ngbe_dev_intr(dev);
110 struct ngbe_hw *hw = ngbe_dev_hw(dev);
112 wr32(hw, NGBE_IENMISC, intr->mask_misc);
113 wr32(hw, NGBE_IMC(0), intr->mask & BIT_MASK32);
118 ngbe_disable_intr(struct ngbe_hw *hw)
120 PMD_INIT_FUNC_TRACE();
122 wr32(hw, NGBE_IMS(0), NGBE_IMS_MASK);
127 * Ensure that all locks are released before first NVM or PHY access
130 ngbe_swfw_lock_reset(struct ngbe_hw *hw)
135 * These ones are more tricky since they are common to all ports; but
136 * swfw_sync retries last long enough (1s) to be almost sure that if
137 * lock can not be taken it is due to an improper lock of the
140 mask = NGBE_MNGSEM_SWPHY |
143 if (hw->mac.acquire_swfw_sync(hw, mask) < 0)
144 PMD_DRV_LOG(DEBUG, "SWFW common locks released");
146 hw->mac.release_swfw_sync(hw, mask);
150 eth_ngbe_dev_init(struct rte_eth_dev *eth_dev, void *init_params __rte_unused)
152 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
153 struct ngbe_hw *hw = ngbe_dev_hw(eth_dev);
154 struct ngbe_vfta *shadow_vfta = NGBE_DEV_VFTA(eth_dev);
155 struct ngbe_hwstrip *hwstrip = NGBE_DEV_HWSTRIP(eth_dev);
156 struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
157 const struct rte_memzone *mz;
161 PMD_INIT_FUNC_TRACE();
163 eth_dev->dev_ops = &ngbe_eth_dev_ops;
164 eth_dev->rx_pkt_burst = &ngbe_recv_pkts;
165 eth_dev->tx_pkt_burst = &ngbe_xmit_pkts;
166 eth_dev->tx_pkt_prepare = &ngbe_prep_pkts;
169 * For secondary processes, we don't initialise any further as primary
170 * has already done this work. Only check we don't need a different
171 * Rx and Tx function.
173 if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
174 struct ngbe_tx_queue *txq;
175 /* Tx queue function in primary, set by last queue initialized
176 * Tx queue may not initialized by primary process
178 if (eth_dev->data->tx_queues) {
179 uint16_t nb_tx_queues = eth_dev->data->nb_tx_queues;
180 txq = eth_dev->data->tx_queues[nb_tx_queues - 1];
181 ngbe_set_tx_function(eth_dev, txq);
183 /* Use default Tx function if we get here */
185 "No Tx queues configured yet. Using default Tx function.");
188 ngbe_set_rx_function(eth_dev);
193 rte_eth_copy_pci_info(eth_dev, pci_dev);
194 eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
196 /* Vendor and Device ID need to be set before init of shared code */
197 hw->device_id = pci_dev->id.device_id;
198 hw->vendor_id = pci_dev->id.vendor_id;
199 hw->sub_system_id = pci_dev->id.subsystem_device_id;
200 ngbe_map_device_id(hw);
201 hw->hw_addr = (void *)pci_dev->mem_resource[0].addr;
203 /* Reserve memory for interrupt status block */
204 mz = rte_eth_dma_zone_reserve(eth_dev, "ngbe_driver", -1,
205 NGBE_ISB_SIZE, NGBE_ALIGN, SOCKET_ID_ANY);
209 hw->isb_dma = TMZ_PADDR(mz);
210 hw->isb_mem = TMZ_VADDR(mz);
212 /* Initialize the shared code (base driver) */
213 err = ngbe_init_shared_code(hw);
215 PMD_INIT_LOG(ERR, "Shared code init failed: %d", err);
219 /* Unlock any pending hardware semaphore */
220 ngbe_swfw_lock_reset(hw);
222 err = hw->rom.init_params(hw);
224 PMD_INIT_LOG(ERR, "The EEPROM init failed: %d", err);
228 /* Make sure we have a good EEPROM before we read from it */
229 err = hw->rom.validate_checksum(hw, NULL);
231 PMD_INIT_LOG(ERR, "The EEPROM checksum is not valid: %d", err);
235 err = hw->mac.init_hw(hw);
237 PMD_INIT_LOG(ERR, "Hardware Initialization Failure: %d", err);
241 /* Reset the hw statistics */
242 ngbe_dev_stats_reset(eth_dev);
244 /* disable interrupt */
245 ngbe_disable_intr(hw);
247 /* Allocate memory for storing MAC addresses */
248 eth_dev->data->mac_addrs = rte_zmalloc("ngbe", RTE_ETHER_ADDR_LEN *
249 hw->mac.num_rar_entries, 0);
250 if (eth_dev->data->mac_addrs == NULL) {
252 "Failed to allocate %u bytes needed to store MAC addresses",
253 RTE_ETHER_ADDR_LEN * hw->mac.num_rar_entries);
257 /* Copy the permanent MAC address */
258 rte_ether_addr_copy((struct rte_ether_addr *)hw->mac.perm_addr,
259 ð_dev->data->mac_addrs[0]);
261 /* Allocate memory for storing hash filter MAC addresses */
262 eth_dev->data->hash_mac_addrs = rte_zmalloc("ngbe",
263 RTE_ETHER_ADDR_LEN * NGBE_VMDQ_NUM_UC_MAC, 0);
264 if (eth_dev->data->hash_mac_addrs == NULL) {
266 "Failed to allocate %d bytes needed to store MAC addresses",
267 RTE_ETHER_ADDR_LEN * NGBE_VMDQ_NUM_UC_MAC);
268 rte_free(eth_dev->data->mac_addrs);
269 eth_dev->data->mac_addrs = NULL;
273 /* initialize the vfta */
274 memset(shadow_vfta, 0, sizeof(*shadow_vfta));
276 /* initialize the hw strip bitmap*/
277 memset(hwstrip, 0, sizeof(*hwstrip));
279 ctrl_ext = rd32(hw, NGBE_PORTCTL);
280 /* let hardware know driver is loaded */
281 ctrl_ext |= NGBE_PORTCTL_DRVLOAD;
282 /* Set PF Reset Done bit so PF/VF Mail Ops can work */
283 ctrl_ext |= NGBE_PORTCTL_RSTDONE;
284 wr32(hw, NGBE_PORTCTL, ctrl_ext);
287 PMD_INIT_LOG(DEBUG, "MAC: %d, PHY: %d",
288 (int)hw->mac.type, (int)hw->phy.type);
290 PMD_INIT_LOG(DEBUG, "port %d vendorID=0x%x deviceID=0x%x",
291 eth_dev->data->port_id, pci_dev->id.vendor_id,
292 pci_dev->id.device_id);
294 rte_intr_callback_register(intr_handle,
295 ngbe_dev_interrupt_handler, eth_dev);
297 /* enable uio/vfio intr/eventfd mapping */
298 rte_intr_enable(intr_handle);
300 /* enable support intr */
301 ngbe_enable_intr(eth_dev);
307 eth_ngbe_dev_uninit(struct rte_eth_dev *eth_dev)
309 PMD_INIT_FUNC_TRACE();
311 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
314 ngbe_dev_close(eth_dev);
320 eth_ngbe_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
321 struct rte_pci_device *pci_dev)
323 return rte_eth_dev_create(&pci_dev->device, pci_dev->device.name,
324 sizeof(struct ngbe_adapter),
325 eth_dev_pci_specific_init, pci_dev,
326 eth_ngbe_dev_init, NULL);
329 static int eth_ngbe_pci_remove(struct rte_pci_device *pci_dev)
331 struct rte_eth_dev *ethdev;
333 ethdev = rte_eth_dev_allocated(pci_dev->device.name);
337 return rte_eth_dev_destroy(ethdev, eth_ngbe_dev_uninit);
340 static struct rte_pci_driver rte_ngbe_pmd = {
341 .id_table = pci_id_ngbe_map,
342 .drv_flags = RTE_PCI_DRV_NEED_MAPPING |
343 RTE_PCI_DRV_INTR_LSC,
344 .probe = eth_ngbe_pci_probe,
345 .remove = eth_ngbe_pci_remove,
349 ngbe_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
351 struct ngbe_hw *hw = ngbe_dev_hw(dev);
352 struct ngbe_vfta *shadow_vfta = NGBE_DEV_VFTA(dev);
357 vid_idx = (uint32_t)((vlan_id >> 5) & 0x7F);
358 vid_bit = (uint32_t)(1 << (vlan_id & 0x1F));
359 vfta = rd32(hw, NGBE_VLANTBL(vid_idx));
364 wr32(hw, NGBE_VLANTBL(vid_idx), vfta);
366 /* update local VFTA copy */
367 shadow_vfta->vfta[vid_idx] = vfta;
373 ngbe_vlan_strip_queue_set(struct rte_eth_dev *dev, uint16_t queue, int on)
375 struct ngbe_hw *hw = ngbe_dev_hw(dev);
376 struct ngbe_rx_queue *rxq;
378 uint32_t rxcfg, rxbal, rxbah;
381 ngbe_vlan_hw_strip_enable(dev, queue);
383 ngbe_vlan_hw_strip_disable(dev, queue);
385 rxq = dev->data->rx_queues[queue];
386 rxbal = rd32(hw, NGBE_RXBAL(rxq->reg_idx));
387 rxbah = rd32(hw, NGBE_RXBAH(rxq->reg_idx));
388 rxcfg = rd32(hw, NGBE_RXCFG(rxq->reg_idx));
389 if (rxq->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP) {
390 restart = (rxcfg & NGBE_RXCFG_ENA) &&
391 !(rxcfg & NGBE_RXCFG_VLAN);
392 rxcfg |= NGBE_RXCFG_VLAN;
394 restart = (rxcfg & NGBE_RXCFG_ENA) &&
395 (rxcfg & NGBE_RXCFG_VLAN);
396 rxcfg &= ~NGBE_RXCFG_VLAN;
398 rxcfg &= ~NGBE_RXCFG_ENA;
401 /* set vlan strip for ring */
402 ngbe_dev_rx_queue_stop(dev, queue);
403 wr32(hw, NGBE_RXBAL(rxq->reg_idx), rxbal);
404 wr32(hw, NGBE_RXBAH(rxq->reg_idx), rxbah);
405 wr32(hw, NGBE_RXCFG(rxq->reg_idx), rxcfg);
406 ngbe_dev_rx_queue_start(dev, queue);
411 ngbe_vlan_tpid_set(struct rte_eth_dev *dev,
412 enum rte_vlan_type vlan_type,
415 struct ngbe_hw *hw = ngbe_dev_hw(dev);
417 uint32_t portctrl, vlan_ext, qinq;
419 portctrl = rd32(hw, NGBE_PORTCTL);
421 vlan_ext = (portctrl & NGBE_PORTCTL_VLANEXT);
422 qinq = vlan_ext && (portctrl & NGBE_PORTCTL_QINQ);
424 case RTE_ETH_VLAN_TYPE_INNER:
426 wr32m(hw, NGBE_VLANCTL,
427 NGBE_VLANCTL_TPID_MASK,
428 NGBE_VLANCTL_TPID(tpid));
429 wr32m(hw, NGBE_DMATXCTRL,
430 NGBE_DMATXCTRL_TPID_MASK,
431 NGBE_DMATXCTRL_TPID(tpid));
435 "Inner type is not supported by single VLAN");
439 wr32m(hw, NGBE_TAGTPID(0),
440 NGBE_TAGTPID_LSB_MASK,
441 NGBE_TAGTPID_LSB(tpid));
444 case RTE_ETH_VLAN_TYPE_OUTER:
446 /* Only the high 16-bits is valid */
447 wr32m(hw, NGBE_EXTAG,
448 NGBE_EXTAG_VLAN_MASK,
449 NGBE_EXTAG_VLAN(tpid));
451 wr32m(hw, NGBE_VLANCTL,
452 NGBE_VLANCTL_TPID_MASK,
453 NGBE_VLANCTL_TPID(tpid));
454 wr32m(hw, NGBE_DMATXCTRL,
455 NGBE_DMATXCTRL_TPID_MASK,
456 NGBE_DMATXCTRL_TPID(tpid));
460 wr32m(hw, NGBE_TAGTPID(0),
461 NGBE_TAGTPID_MSB_MASK,
462 NGBE_TAGTPID_MSB(tpid));
466 PMD_DRV_LOG(ERR, "Unsupported VLAN type %d", vlan_type);
474 ngbe_vlan_hw_filter_disable(struct rte_eth_dev *dev)
476 struct ngbe_hw *hw = ngbe_dev_hw(dev);
479 PMD_INIT_FUNC_TRACE();
481 /* Filter Table Disable */
482 vlnctrl = rd32(hw, NGBE_VLANCTL);
483 vlnctrl &= ~NGBE_VLANCTL_VFE;
484 wr32(hw, NGBE_VLANCTL, vlnctrl);
488 ngbe_vlan_hw_filter_enable(struct rte_eth_dev *dev)
490 struct ngbe_hw *hw = ngbe_dev_hw(dev);
491 struct ngbe_vfta *shadow_vfta = NGBE_DEV_VFTA(dev);
495 PMD_INIT_FUNC_TRACE();
497 /* Filter Table Enable */
498 vlnctrl = rd32(hw, NGBE_VLANCTL);
499 vlnctrl &= ~NGBE_VLANCTL_CFIENA;
500 vlnctrl |= NGBE_VLANCTL_VFE;
501 wr32(hw, NGBE_VLANCTL, vlnctrl);
503 /* write whatever is in local vfta copy */
504 for (i = 0; i < NGBE_VFTA_SIZE; i++)
505 wr32(hw, NGBE_VLANTBL(i), shadow_vfta->vfta[i]);
509 ngbe_vlan_hw_strip_bitmap_set(struct rte_eth_dev *dev, uint16_t queue, bool on)
511 struct ngbe_hwstrip *hwstrip = NGBE_DEV_HWSTRIP(dev);
512 struct ngbe_rx_queue *rxq;
514 if (queue >= NGBE_MAX_RX_QUEUE_NUM)
518 NGBE_SET_HWSTRIP(hwstrip, queue);
520 NGBE_CLEAR_HWSTRIP(hwstrip, queue);
522 if (queue >= dev->data->nb_rx_queues)
525 rxq = dev->data->rx_queues[queue];
528 rxq->vlan_flags = RTE_MBUF_F_RX_VLAN | RTE_MBUF_F_RX_VLAN_STRIPPED;
529 rxq->offloads |= RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
531 rxq->vlan_flags = RTE_MBUF_F_RX_VLAN;
532 rxq->offloads &= ~RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
537 ngbe_vlan_hw_strip_disable(struct rte_eth_dev *dev, uint16_t queue)
539 struct ngbe_hw *hw = ngbe_dev_hw(dev);
542 PMD_INIT_FUNC_TRACE();
544 ctrl = rd32(hw, NGBE_RXCFG(queue));
545 ctrl &= ~NGBE_RXCFG_VLAN;
546 wr32(hw, NGBE_RXCFG(queue), ctrl);
548 /* record those setting for HW strip per queue */
549 ngbe_vlan_hw_strip_bitmap_set(dev, queue, 0);
553 ngbe_vlan_hw_strip_enable(struct rte_eth_dev *dev, uint16_t queue)
555 struct ngbe_hw *hw = ngbe_dev_hw(dev);
558 PMD_INIT_FUNC_TRACE();
560 ctrl = rd32(hw, NGBE_RXCFG(queue));
561 ctrl |= NGBE_RXCFG_VLAN;
562 wr32(hw, NGBE_RXCFG(queue), ctrl);
564 /* record those setting for HW strip per queue */
565 ngbe_vlan_hw_strip_bitmap_set(dev, queue, 1);
569 ngbe_vlan_hw_extend_disable(struct rte_eth_dev *dev)
571 struct ngbe_hw *hw = ngbe_dev_hw(dev);
574 PMD_INIT_FUNC_TRACE();
576 ctrl = rd32(hw, NGBE_PORTCTL);
577 ctrl &= ~NGBE_PORTCTL_VLANEXT;
578 ctrl &= ~NGBE_PORTCTL_QINQ;
579 wr32(hw, NGBE_PORTCTL, ctrl);
583 ngbe_vlan_hw_extend_enable(struct rte_eth_dev *dev)
585 struct ngbe_hw *hw = ngbe_dev_hw(dev);
588 PMD_INIT_FUNC_TRACE();
590 ctrl = rd32(hw, NGBE_PORTCTL);
591 ctrl |= NGBE_PORTCTL_VLANEXT | NGBE_PORTCTL_QINQ;
592 wr32(hw, NGBE_PORTCTL, ctrl);
596 ngbe_qinq_hw_strip_disable(struct rte_eth_dev *dev)
598 struct ngbe_hw *hw = ngbe_dev_hw(dev);
601 PMD_INIT_FUNC_TRACE();
603 ctrl = rd32(hw, NGBE_PORTCTL);
604 ctrl &= ~NGBE_PORTCTL_QINQ;
605 wr32(hw, NGBE_PORTCTL, ctrl);
609 ngbe_qinq_hw_strip_enable(struct rte_eth_dev *dev)
611 struct ngbe_hw *hw = ngbe_dev_hw(dev);
614 PMD_INIT_FUNC_TRACE();
616 ctrl = rd32(hw, NGBE_PORTCTL);
617 ctrl |= NGBE_PORTCTL_QINQ | NGBE_PORTCTL_VLANEXT;
618 wr32(hw, NGBE_PORTCTL, ctrl);
622 ngbe_vlan_hw_strip_config(struct rte_eth_dev *dev)
624 struct ngbe_rx_queue *rxq;
627 PMD_INIT_FUNC_TRACE();
629 for (i = 0; i < dev->data->nb_rx_queues; i++) {
630 rxq = dev->data->rx_queues[i];
632 if (rxq->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
633 ngbe_vlan_hw_strip_enable(dev, i);
635 ngbe_vlan_hw_strip_disable(dev, i);
640 ngbe_config_vlan_strip_on_all_queues(struct rte_eth_dev *dev, int mask)
643 struct rte_eth_rxmode *rxmode;
644 struct ngbe_rx_queue *rxq;
646 if (mask & RTE_ETH_VLAN_STRIP_MASK) {
647 rxmode = &dev->data->dev_conf.rxmode;
648 if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
649 for (i = 0; i < dev->data->nb_rx_queues; i++) {
650 rxq = dev->data->rx_queues[i];
651 rxq->offloads |= RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
654 for (i = 0; i < dev->data->nb_rx_queues; i++) {
655 rxq = dev->data->rx_queues[i];
656 rxq->offloads &= ~RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
662 ngbe_vlan_offload_config(struct rte_eth_dev *dev, int mask)
664 struct rte_eth_rxmode *rxmode;
665 rxmode = &dev->data->dev_conf.rxmode;
667 if (mask & RTE_ETH_VLAN_STRIP_MASK)
668 ngbe_vlan_hw_strip_config(dev);
670 if (mask & RTE_ETH_VLAN_FILTER_MASK) {
671 if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER)
672 ngbe_vlan_hw_filter_enable(dev);
674 ngbe_vlan_hw_filter_disable(dev);
677 if (mask & RTE_ETH_VLAN_EXTEND_MASK) {
678 if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_EXTEND)
679 ngbe_vlan_hw_extend_enable(dev);
681 ngbe_vlan_hw_extend_disable(dev);
684 if (mask & RTE_ETH_QINQ_STRIP_MASK) {
685 if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_QINQ_STRIP)
686 ngbe_qinq_hw_strip_enable(dev);
688 ngbe_qinq_hw_strip_disable(dev);
695 ngbe_vlan_offload_set(struct rte_eth_dev *dev, int mask)
697 ngbe_config_vlan_strip_on_all_queues(dev, mask);
699 ngbe_vlan_offload_config(dev, mask);
705 ngbe_dev_configure(struct rte_eth_dev *dev)
707 struct ngbe_interrupt *intr = ngbe_dev_intr(dev);
708 struct ngbe_adapter *adapter = ngbe_dev_adapter(dev);
710 PMD_INIT_FUNC_TRACE();
712 /* set flag to update link status after init */
713 intr->flags |= NGBE_FLAG_NEED_LINK_UPDATE;
716 * Initialize to TRUE. If any of Rx queues doesn't meet the bulk
717 * allocation Rx preconditions we will reset it.
719 adapter->rx_bulk_alloc_allowed = true;
725 ngbe_dev_phy_intr_setup(struct rte_eth_dev *dev)
727 struct ngbe_hw *hw = ngbe_dev_hw(dev);
728 struct ngbe_interrupt *intr = ngbe_dev_intr(dev);
730 wr32(hw, NGBE_GPIODIR, NGBE_GPIODIR_DDR(1));
731 wr32(hw, NGBE_GPIOINTEN, NGBE_GPIOINTEN_INT(3));
732 wr32(hw, NGBE_GPIOINTTYPE, NGBE_GPIOINTTYPE_LEVEL(0));
733 if (hw->phy.type == ngbe_phy_yt8521s_sfi)
734 wr32(hw, NGBE_GPIOINTPOL, NGBE_GPIOINTPOL_ACT(0));
736 wr32(hw, NGBE_GPIOINTPOL, NGBE_GPIOINTPOL_ACT(3));
738 intr->mask_misc |= NGBE_ICRMISC_GPIO;
742 * Configure device link speed and setup link.
743 * It returns 0 on success.
746 ngbe_dev_start(struct rte_eth_dev *dev)
748 struct ngbe_hw *hw = ngbe_dev_hw(dev);
749 struct ngbe_hw_stats *hw_stats = NGBE_DEV_STATS(dev);
750 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
751 struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
752 uint32_t intr_vector = 0;
754 bool link_up = false, negotiate = false;
756 uint32_t allowed_speeds = 0;
759 uint32_t *link_speeds;
761 PMD_INIT_FUNC_TRACE();
763 /* disable uio/vfio intr/eventfd mapping */
764 rte_intr_disable(intr_handle);
767 hw->adapter_stopped = 0;
770 /* reinitialize adapter, this calls reset and start */
771 hw->nb_rx_queues = dev->data->nb_rx_queues;
772 hw->nb_tx_queues = dev->data->nb_tx_queues;
773 status = ngbe_pf_reset_hw(hw);
776 hw->mac.start_hw(hw);
777 hw->mac.get_link_status = true;
779 ngbe_dev_phy_intr_setup(dev);
781 /* check and configure queue intr-vector mapping */
782 if ((rte_intr_cap_multiple(intr_handle) ||
783 !RTE_ETH_DEV_SRIOV(dev).active) &&
784 dev->data->dev_conf.intr_conf.rxq != 0) {
785 intr_vector = dev->data->nb_rx_queues;
786 if (rte_intr_efd_enable(intr_handle, intr_vector))
790 if (rte_intr_dp_is_en(intr_handle)) {
791 if (rte_intr_vec_list_alloc(intr_handle, "intr_vec",
792 dev->data->nb_rx_queues)) {
794 "Failed to allocate %d rx_queues intr_vec",
795 dev->data->nb_rx_queues);
800 /* confiugre MSI-X for sleep until Rx interrupt */
801 ngbe_configure_msix(dev);
803 /* initialize transmission unit */
804 ngbe_dev_tx_init(dev);
806 /* This can fail when allocating mbufs for descriptor rings */
807 err = ngbe_dev_rx_init(dev);
809 PMD_INIT_LOG(ERR, "Unable to initialize Rx hardware");
813 mask = RTE_ETH_VLAN_STRIP_MASK | RTE_ETH_VLAN_FILTER_MASK |
814 RTE_ETH_VLAN_EXTEND_MASK;
815 err = ngbe_vlan_offload_config(dev, mask);
817 PMD_INIT_LOG(ERR, "Unable to set VLAN offload");
821 ngbe_configure_port(dev);
823 err = ngbe_dev_rxtx_start(dev);
825 PMD_INIT_LOG(ERR, "Unable to start rxtx queues");
829 err = hw->mac.check_link(hw, &speed, &link_up, 0);
832 dev->data->dev_link.link_status = link_up;
834 link_speeds = &dev->data->dev_conf.link_speeds;
835 if (*link_speeds == RTE_ETH_LINK_SPEED_AUTONEG)
838 err = hw->mac.get_link_capabilities(hw, &speed, &negotiate);
843 if (hw->mac.default_speeds & NGBE_LINK_SPEED_1GB_FULL)
844 allowed_speeds |= RTE_ETH_LINK_SPEED_1G;
845 if (hw->mac.default_speeds & NGBE_LINK_SPEED_100M_FULL)
846 allowed_speeds |= RTE_ETH_LINK_SPEED_100M;
847 if (hw->mac.default_speeds & NGBE_LINK_SPEED_10M_FULL)
848 allowed_speeds |= RTE_ETH_LINK_SPEED_10M;
850 if (*link_speeds & ~allowed_speeds) {
851 PMD_INIT_LOG(ERR, "Invalid link setting");
856 if (*link_speeds == RTE_ETH_LINK_SPEED_AUTONEG) {
857 speed = hw->mac.default_speeds;
859 if (*link_speeds & RTE_ETH_LINK_SPEED_1G)
860 speed |= NGBE_LINK_SPEED_1GB_FULL;
861 if (*link_speeds & RTE_ETH_LINK_SPEED_100M)
862 speed |= NGBE_LINK_SPEED_100M_FULL;
863 if (*link_speeds & RTE_ETH_LINK_SPEED_10M)
864 speed |= NGBE_LINK_SPEED_10M_FULL;
868 err = hw->mac.setup_link(hw, speed, link_up);
872 if (rte_intr_allow_others(intr_handle)) {
873 ngbe_dev_misc_interrupt_setup(dev);
874 /* check if lsc interrupt is enabled */
875 if (dev->data->dev_conf.intr_conf.lsc != 0)
876 ngbe_dev_lsc_interrupt_setup(dev, TRUE);
878 ngbe_dev_lsc_interrupt_setup(dev, FALSE);
879 ngbe_dev_macsec_interrupt_setup(dev);
880 ngbe_set_ivar_map(hw, -1, 1, NGBE_MISC_VEC_ID);
882 rte_intr_callback_unregister(intr_handle,
883 ngbe_dev_interrupt_handler, dev);
884 if (dev->data->dev_conf.intr_conf.lsc != 0)
886 "LSC won't enable because of no intr multiplex");
889 /* check if rxq interrupt is enabled */
890 if (dev->data->dev_conf.intr_conf.rxq != 0 &&
891 rte_intr_dp_is_en(intr_handle))
892 ngbe_dev_rxq_interrupt_setup(dev);
894 /* enable UIO/VFIO intr/eventfd mapping */
895 rte_intr_enable(intr_handle);
897 /* resume enabled intr since HW reset */
898 ngbe_enable_intr(dev);
900 if ((hw->sub_system_id & NGBE_OEM_MASK) == NGBE_LY_M88E1512_SFP ||
901 (hw->sub_system_id & NGBE_OEM_MASK) == NGBE_LY_YT8521S_SFP) {
902 /* gpio0 is used to power on/off control*/
903 wr32(hw, NGBE_GPIODATA, 0);
907 * Update link status right before return, because it may
908 * start link configuration process in a separate thread.
910 ngbe_dev_link_update(dev, 0);
912 ngbe_read_stats_registers(hw, hw_stats);
913 hw->offset_loaded = 1;
918 PMD_INIT_LOG(ERR, "failure in dev start: %d", err);
919 ngbe_dev_clear_queues(dev);
924 * Stop device: disable rx and tx functions to allow for reconfiguring.
927 ngbe_dev_stop(struct rte_eth_dev *dev)
929 struct rte_eth_link link;
930 struct ngbe_hw *hw = ngbe_dev_hw(dev);
931 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
932 struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
934 if (hw->adapter_stopped)
937 PMD_INIT_FUNC_TRACE();
939 if ((hw->sub_system_id & NGBE_OEM_MASK) == NGBE_LY_M88E1512_SFP ||
940 (hw->sub_system_id & NGBE_OEM_MASK) == NGBE_LY_YT8521S_SFP) {
941 /* gpio0 is used to power on/off control*/
942 wr32(hw, NGBE_GPIODATA, NGBE_GPIOBIT_0);
945 /* disable interrupts */
946 ngbe_disable_intr(hw);
949 ngbe_pf_reset_hw(hw);
950 hw->adapter_stopped = 0;
955 ngbe_dev_clear_queues(dev);
957 /* Clear stored conf */
958 dev->data->scattered_rx = 0;
960 /* Clear recorded link status */
961 memset(&link, 0, sizeof(link));
962 rte_eth_linkstatus_set(dev, &link);
964 if (!rte_intr_allow_others(intr_handle))
965 /* resume to the default handler */
966 rte_intr_callback_register(intr_handle,
967 ngbe_dev_interrupt_handler,
970 /* Clean datapath event and queue/vec mapping */
971 rte_intr_efd_disable(intr_handle);
972 rte_intr_vec_list_free(intr_handle);
974 hw->adapter_stopped = true;
975 dev->data->dev_started = 0;
981 * Reset and stop device.
984 ngbe_dev_close(struct rte_eth_dev *dev)
986 struct ngbe_hw *hw = ngbe_dev_hw(dev);
987 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
988 struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
992 PMD_INIT_FUNC_TRACE();
994 ngbe_pf_reset_hw(hw);
998 ngbe_dev_free_queues(dev);
1000 /* reprogram the RAR[0] in case user changed it. */
1001 ngbe_set_rar(hw, 0, hw->mac.addr, 0, true);
1003 /* Unlock any pending hardware semaphore */
1004 ngbe_swfw_lock_reset(hw);
1006 /* disable uio intr before callback unregister */
1007 rte_intr_disable(intr_handle);
1010 ret = rte_intr_callback_unregister(intr_handle,
1011 ngbe_dev_interrupt_handler, dev);
1012 if (ret >= 0 || ret == -ENOENT) {
1014 } else if (ret != -EAGAIN) {
1016 "intr callback unregister failed: %d",
1020 } while (retries++ < (10 + NGBE_LINK_UP_TIME));
1022 rte_free(dev->data->mac_addrs);
1023 dev->data->mac_addrs = NULL;
1025 rte_free(dev->data->hash_mac_addrs);
1026 dev->data->hash_mac_addrs = NULL;
1035 ngbe_dev_reset(struct rte_eth_dev *dev)
1039 ret = eth_ngbe_dev_uninit(dev);
1043 ret = eth_ngbe_dev_init(dev, NULL);
1048 #define UPDATE_QP_COUNTER_32bit(reg, last_counter, counter) \
1050 uint32_t current_counter = rd32(hw, reg); \
1051 if (current_counter < last_counter) \
1052 current_counter += 0x100000000LL; \
1053 if (!hw->offset_loaded) \
1054 last_counter = current_counter; \
1055 counter = current_counter - last_counter; \
1056 counter &= 0xFFFFFFFFLL; \
1059 #define UPDATE_QP_COUNTER_36bit(reg_lsb, reg_msb, last_counter, counter) \
1061 uint64_t current_counter_lsb = rd32(hw, reg_lsb); \
1062 uint64_t current_counter_msb = rd32(hw, reg_msb); \
1063 uint64_t current_counter = (current_counter_msb << 32) | \
1064 current_counter_lsb; \
1065 if (current_counter < last_counter) \
1066 current_counter += 0x1000000000LL; \
1067 if (!hw->offset_loaded) \
1068 last_counter = current_counter; \
1069 counter = current_counter - last_counter; \
1070 counter &= 0xFFFFFFFFFLL; \
1074 ngbe_read_stats_registers(struct ngbe_hw *hw,
1075 struct ngbe_hw_stats *hw_stats)
1080 for (i = 0; i < hw->nb_rx_queues; i++) {
1081 UPDATE_QP_COUNTER_32bit(NGBE_QPRXPKT(i),
1082 hw->qp_last[i].rx_qp_packets,
1083 hw_stats->qp[i].rx_qp_packets);
1084 UPDATE_QP_COUNTER_36bit(NGBE_QPRXOCTL(i), NGBE_QPRXOCTH(i),
1085 hw->qp_last[i].rx_qp_bytes,
1086 hw_stats->qp[i].rx_qp_bytes);
1087 UPDATE_QP_COUNTER_32bit(NGBE_QPRXMPKT(i),
1088 hw->qp_last[i].rx_qp_mc_packets,
1089 hw_stats->qp[i].rx_qp_mc_packets);
1090 UPDATE_QP_COUNTER_32bit(NGBE_QPRXBPKT(i),
1091 hw->qp_last[i].rx_qp_bc_packets,
1092 hw_stats->qp[i].rx_qp_bc_packets);
1095 for (i = 0; i < hw->nb_tx_queues; i++) {
1096 UPDATE_QP_COUNTER_32bit(NGBE_QPTXPKT(i),
1097 hw->qp_last[i].tx_qp_packets,
1098 hw_stats->qp[i].tx_qp_packets);
1099 UPDATE_QP_COUNTER_36bit(NGBE_QPTXOCTL(i), NGBE_QPTXOCTH(i),
1100 hw->qp_last[i].tx_qp_bytes,
1101 hw_stats->qp[i].tx_qp_bytes);
1102 UPDATE_QP_COUNTER_32bit(NGBE_QPTXMPKT(i),
1103 hw->qp_last[i].tx_qp_mc_packets,
1104 hw_stats->qp[i].tx_qp_mc_packets);
1105 UPDATE_QP_COUNTER_32bit(NGBE_QPTXBPKT(i),
1106 hw->qp_last[i].tx_qp_bc_packets,
1107 hw_stats->qp[i].tx_qp_bc_packets);
1111 hw_stats->rx_up_dropped += rd32(hw, NGBE_PBRXMISS);
1112 hw_stats->rdb_pkt_cnt += rd32(hw, NGBE_PBRXPKT);
1113 hw_stats->rdb_repli_cnt += rd32(hw, NGBE_PBRXREP);
1114 hw_stats->rdb_drp_cnt += rd32(hw, NGBE_PBRXDROP);
1115 hw_stats->tx_xoff_packets += rd32(hw, NGBE_PBTXLNKXOFF);
1116 hw_stats->tx_xon_packets += rd32(hw, NGBE_PBTXLNKXON);
1118 hw_stats->rx_xon_packets += rd32(hw, NGBE_PBRXLNKXON);
1119 hw_stats->rx_xoff_packets += rd32(hw, NGBE_PBRXLNKXOFF);
1122 hw_stats->rx_drop_packets += rd32(hw, NGBE_DMARXDROP);
1123 hw_stats->tx_drop_packets += rd32(hw, NGBE_DMATXDROP);
1124 hw_stats->rx_dma_drop += rd32(hw, NGBE_DMARXDROP);
1125 hw_stats->tx_secdrp_packets += rd32(hw, NGBE_DMATXSECDROP);
1126 hw_stats->rx_packets += rd32(hw, NGBE_DMARXPKT);
1127 hw_stats->tx_packets += rd32(hw, NGBE_DMATXPKT);
1128 hw_stats->rx_bytes += rd64(hw, NGBE_DMARXOCTL);
1129 hw_stats->tx_bytes += rd64(hw, NGBE_DMATXOCTL);
1132 hw_stats->rx_crc_errors += rd64(hw, NGBE_MACRXERRCRCL);
1133 hw_stats->rx_multicast_packets += rd64(hw, NGBE_MACRXMPKTL);
1134 hw_stats->tx_multicast_packets += rd64(hw, NGBE_MACTXMPKTL);
1136 hw_stats->rx_total_packets += rd64(hw, NGBE_MACRXPKTL);
1137 hw_stats->tx_total_packets += rd64(hw, NGBE_MACTXPKTL);
1138 hw_stats->rx_total_bytes += rd64(hw, NGBE_MACRXGBOCTL);
1140 hw_stats->rx_broadcast_packets += rd64(hw, NGBE_MACRXOCTL);
1141 hw_stats->tx_broadcast_packets += rd32(hw, NGBE_MACTXOCTL);
1143 hw_stats->rx_size_64_packets += rd64(hw, NGBE_MACRX1TO64L);
1144 hw_stats->rx_size_65_to_127_packets += rd64(hw, NGBE_MACRX65TO127L);
1145 hw_stats->rx_size_128_to_255_packets += rd64(hw, NGBE_MACRX128TO255L);
1146 hw_stats->rx_size_256_to_511_packets += rd64(hw, NGBE_MACRX256TO511L);
1147 hw_stats->rx_size_512_to_1023_packets +=
1148 rd64(hw, NGBE_MACRX512TO1023L);
1149 hw_stats->rx_size_1024_to_max_packets +=
1150 rd64(hw, NGBE_MACRX1024TOMAXL);
1151 hw_stats->tx_size_64_packets += rd64(hw, NGBE_MACTX1TO64L);
1152 hw_stats->tx_size_65_to_127_packets += rd64(hw, NGBE_MACTX65TO127L);
1153 hw_stats->tx_size_128_to_255_packets += rd64(hw, NGBE_MACTX128TO255L);
1154 hw_stats->tx_size_256_to_511_packets += rd64(hw, NGBE_MACTX256TO511L);
1155 hw_stats->tx_size_512_to_1023_packets +=
1156 rd64(hw, NGBE_MACTX512TO1023L);
1157 hw_stats->tx_size_1024_to_max_packets +=
1158 rd64(hw, NGBE_MACTX1024TOMAXL);
1160 hw_stats->rx_undersize_errors += rd64(hw, NGBE_MACRXERRLENL);
1161 hw_stats->rx_oversize_errors += rd32(hw, NGBE_MACRXOVERSIZE);
1162 hw_stats->rx_jabber_errors += rd32(hw, NGBE_MACRXJABBER);
1165 hw_stats->mng_bmc2host_packets = rd32(hw, NGBE_MNGBMC2OS);
1166 hw_stats->mng_host2bmc_packets = rd32(hw, NGBE_MNGOS2BMC);
1167 hw_stats->rx_management_packets = rd32(hw, NGBE_DMARXMNG);
1168 hw_stats->tx_management_packets = rd32(hw, NGBE_DMATXMNG);
1171 hw_stats->tx_macsec_pkts_untagged += rd32(hw, NGBE_LSECTX_UTPKT);
1172 hw_stats->tx_macsec_pkts_encrypted +=
1173 rd32(hw, NGBE_LSECTX_ENCPKT);
1174 hw_stats->tx_macsec_pkts_protected +=
1175 rd32(hw, NGBE_LSECTX_PROTPKT);
1176 hw_stats->tx_macsec_octets_encrypted +=
1177 rd32(hw, NGBE_LSECTX_ENCOCT);
1178 hw_stats->tx_macsec_octets_protected +=
1179 rd32(hw, NGBE_LSECTX_PROTOCT);
1180 hw_stats->rx_macsec_pkts_untagged += rd32(hw, NGBE_LSECRX_UTPKT);
1181 hw_stats->rx_macsec_pkts_badtag += rd32(hw, NGBE_LSECRX_BTPKT);
1182 hw_stats->rx_macsec_pkts_nosci += rd32(hw, NGBE_LSECRX_NOSCIPKT);
1183 hw_stats->rx_macsec_pkts_unknownsci += rd32(hw, NGBE_LSECRX_UNSCIPKT);
1184 hw_stats->rx_macsec_octets_decrypted += rd32(hw, NGBE_LSECRX_DECOCT);
1185 hw_stats->rx_macsec_octets_validated += rd32(hw, NGBE_LSECRX_VLDOCT);
1186 hw_stats->rx_macsec_sc_pkts_unchecked +=
1187 rd32(hw, NGBE_LSECRX_UNCHKPKT);
1188 hw_stats->rx_macsec_sc_pkts_delayed += rd32(hw, NGBE_LSECRX_DLYPKT);
1189 hw_stats->rx_macsec_sc_pkts_late += rd32(hw, NGBE_LSECRX_LATEPKT);
1190 for (i = 0; i < 2; i++) {
1191 hw_stats->rx_macsec_sa_pkts_ok +=
1192 rd32(hw, NGBE_LSECRX_OKPKT(i));
1193 hw_stats->rx_macsec_sa_pkts_invalid +=
1194 rd32(hw, NGBE_LSECRX_INVPKT(i));
1195 hw_stats->rx_macsec_sa_pkts_notvalid +=
1196 rd32(hw, NGBE_LSECRX_BADPKT(i));
1198 for (i = 0; i < 4; i++) {
1199 hw_stats->rx_macsec_sa_pkts_unusedsa +=
1200 rd32(hw, NGBE_LSECRX_INVSAPKT(i));
1201 hw_stats->rx_macsec_sa_pkts_notusingsa +=
1202 rd32(hw, NGBE_LSECRX_BADSAPKT(i));
1204 hw_stats->rx_total_missed_packets =
1205 hw_stats->rx_up_dropped;
1209 ngbe_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
1211 struct ngbe_hw *hw = ngbe_dev_hw(dev);
1212 struct ngbe_hw_stats *hw_stats = NGBE_DEV_STATS(dev);
1213 struct ngbe_stat_mappings *stat_mappings =
1214 NGBE_DEV_STAT_MAPPINGS(dev);
1217 ngbe_read_stats_registers(hw, hw_stats);
1222 /* Fill out the rte_eth_stats statistics structure */
1223 stats->ipackets = hw_stats->rx_packets;
1224 stats->ibytes = hw_stats->rx_bytes;
1225 stats->opackets = hw_stats->tx_packets;
1226 stats->obytes = hw_stats->tx_bytes;
1228 memset(&stats->q_ipackets, 0, sizeof(stats->q_ipackets));
1229 memset(&stats->q_opackets, 0, sizeof(stats->q_opackets));
1230 memset(&stats->q_ibytes, 0, sizeof(stats->q_ibytes));
1231 memset(&stats->q_obytes, 0, sizeof(stats->q_obytes));
1232 memset(&stats->q_errors, 0, sizeof(stats->q_errors));
1233 for (i = 0; i < NGBE_MAX_QP; i++) {
1234 uint32_t n = i / NB_QMAP_FIELDS_PER_QSM_REG;
1235 uint32_t offset = (i % NB_QMAP_FIELDS_PER_QSM_REG) * 8;
1238 q_map = (stat_mappings->rqsm[n] >> offset)
1239 & QMAP_FIELD_RESERVED_BITS_MASK;
1240 j = (q_map < RTE_ETHDEV_QUEUE_STAT_CNTRS
1241 ? q_map : q_map % RTE_ETHDEV_QUEUE_STAT_CNTRS);
1242 stats->q_ipackets[j] += hw_stats->qp[i].rx_qp_packets;
1243 stats->q_ibytes[j] += hw_stats->qp[i].rx_qp_bytes;
1245 q_map = (stat_mappings->tqsm[n] >> offset)
1246 & QMAP_FIELD_RESERVED_BITS_MASK;
1247 j = (q_map < RTE_ETHDEV_QUEUE_STAT_CNTRS
1248 ? q_map : q_map % RTE_ETHDEV_QUEUE_STAT_CNTRS);
1249 stats->q_opackets[j] += hw_stats->qp[i].tx_qp_packets;
1250 stats->q_obytes[j] += hw_stats->qp[i].tx_qp_bytes;
1254 stats->imissed = hw_stats->rx_total_missed_packets +
1255 hw_stats->rx_dma_drop;
1256 stats->ierrors = hw_stats->rx_crc_errors +
1257 hw_stats->rx_mac_short_packet_dropped +
1258 hw_stats->rx_length_errors +
1259 hw_stats->rx_undersize_errors +
1260 hw_stats->rx_oversize_errors +
1261 hw_stats->rx_illegal_byte_errors +
1262 hw_stats->rx_error_bytes +
1263 hw_stats->rx_fragment_errors;
1271 ngbe_dev_stats_reset(struct rte_eth_dev *dev)
1273 struct ngbe_hw *hw = ngbe_dev_hw(dev);
1274 struct ngbe_hw_stats *hw_stats = NGBE_DEV_STATS(dev);
1276 /* HW registers are cleared on read */
1277 hw->offset_loaded = 0;
1278 ngbe_dev_stats_get(dev, NULL);
1279 hw->offset_loaded = 1;
1281 /* Reset software totals */
1282 memset(hw_stats, 0, sizeof(*hw_stats));
1288 ngbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
1290 struct ngbe_hw *hw = ngbe_dev_hw(dev);
1292 dev_info->max_rx_queues = (uint16_t)hw->mac.max_rx_queues;
1293 dev_info->max_tx_queues = (uint16_t)hw->mac.max_tx_queues;
1294 dev_info->min_rx_bufsize = 1024;
1295 dev_info->max_rx_pktlen = 15872;
1296 dev_info->rx_queue_offload_capa = ngbe_get_rx_queue_offloads(dev);
1297 dev_info->rx_offload_capa = (ngbe_get_rx_port_offloads(dev) |
1298 dev_info->rx_queue_offload_capa);
1299 dev_info->tx_queue_offload_capa = 0;
1300 dev_info->tx_offload_capa = ngbe_get_tx_port_offloads(dev);
1302 dev_info->default_rxconf = (struct rte_eth_rxconf) {
1304 .pthresh = NGBE_DEFAULT_RX_PTHRESH,
1305 .hthresh = NGBE_DEFAULT_RX_HTHRESH,
1306 .wthresh = NGBE_DEFAULT_RX_WTHRESH,
1308 .rx_free_thresh = NGBE_DEFAULT_RX_FREE_THRESH,
1313 dev_info->default_txconf = (struct rte_eth_txconf) {
1315 .pthresh = NGBE_DEFAULT_TX_PTHRESH,
1316 .hthresh = NGBE_DEFAULT_TX_HTHRESH,
1317 .wthresh = NGBE_DEFAULT_TX_WTHRESH,
1319 .tx_free_thresh = NGBE_DEFAULT_TX_FREE_THRESH,
1323 dev_info->rx_desc_lim = rx_desc_lim;
1324 dev_info->tx_desc_lim = tx_desc_lim;
1326 dev_info->speed_capa = RTE_ETH_LINK_SPEED_1G | RTE_ETH_LINK_SPEED_100M |
1327 RTE_ETH_LINK_SPEED_10M;
1329 /* Driver-preferred Rx/Tx parameters */
1330 dev_info->default_rxportconf.burst_size = 32;
1331 dev_info->default_txportconf.burst_size = 32;
1332 dev_info->default_rxportconf.nb_queues = 1;
1333 dev_info->default_txportconf.nb_queues = 1;
1334 dev_info->default_rxportconf.ring_size = 256;
1335 dev_info->default_txportconf.ring_size = 256;
1341 ngbe_dev_supported_ptypes_get(struct rte_eth_dev *dev)
1343 if (dev->rx_pkt_burst == ngbe_recv_pkts ||
1344 dev->rx_pkt_burst == ngbe_recv_pkts_sc_single_alloc ||
1345 dev->rx_pkt_burst == ngbe_recv_pkts_sc_bulk_alloc ||
1346 dev->rx_pkt_burst == ngbe_recv_pkts_bulk_alloc)
1347 return ngbe_get_supported_ptypes();
1352 /* return 0 means link status changed, -1 means not changed */
1354 ngbe_dev_link_update_share(struct rte_eth_dev *dev,
1355 int wait_to_complete)
1357 struct ngbe_hw *hw = ngbe_dev_hw(dev);
1358 struct rte_eth_link link;
1359 u32 link_speed = NGBE_LINK_SPEED_UNKNOWN;
1361 struct ngbe_interrupt *intr = ngbe_dev_intr(dev);
1366 memset(&link, 0, sizeof(link));
1367 link.link_status = RTE_ETH_LINK_DOWN;
1368 link.link_speed = RTE_ETH_SPEED_NUM_NONE;
1369 link.link_duplex = RTE_ETH_LINK_HALF_DUPLEX;
1370 link.link_autoneg = !(dev->data->dev_conf.link_speeds &
1371 ~RTE_ETH_LINK_SPEED_AUTONEG);
1373 hw->mac.get_link_status = true;
1375 if (intr->flags & NGBE_FLAG_NEED_LINK_CONFIG)
1376 return rte_eth_linkstatus_set(dev, &link);
1378 /* check if it needs to wait to complete, if lsc interrupt is enabled */
1379 if (wait_to_complete == 0 || dev->data->dev_conf.intr_conf.lsc != 0)
1382 err = hw->mac.check_link(hw, &link_speed, &link_up, wait);
1384 link.link_speed = RTE_ETH_SPEED_NUM_NONE;
1385 link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
1386 return rte_eth_linkstatus_set(dev, &link);
1390 return rte_eth_linkstatus_set(dev, &link);
1392 intr->flags &= ~NGBE_FLAG_NEED_LINK_CONFIG;
1393 link.link_status = RTE_ETH_LINK_UP;
1394 link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
1396 switch (link_speed) {
1398 case NGBE_LINK_SPEED_UNKNOWN:
1399 link.link_speed = RTE_ETH_SPEED_NUM_NONE;
1402 case NGBE_LINK_SPEED_10M_FULL:
1403 link.link_speed = RTE_ETH_SPEED_NUM_10M;
1407 case NGBE_LINK_SPEED_100M_FULL:
1408 link.link_speed = RTE_ETH_SPEED_NUM_100M;
1412 case NGBE_LINK_SPEED_1GB_FULL:
1413 link.link_speed = RTE_ETH_SPEED_NUM_1G;
1419 wr32m(hw, NGBE_LAN_SPEED, NGBE_LAN_SPEED_MASK, lan_speed);
1420 if (link_speed & (NGBE_LINK_SPEED_1GB_FULL |
1421 NGBE_LINK_SPEED_100M_FULL |
1422 NGBE_LINK_SPEED_10M_FULL)) {
1423 wr32m(hw, NGBE_MACTXCFG, NGBE_MACTXCFG_SPEED_MASK,
1424 NGBE_MACTXCFG_SPEED_1G | NGBE_MACTXCFG_TE);
1428 return rte_eth_linkstatus_set(dev, &link);
1432 ngbe_dev_link_update(struct rte_eth_dev *dev, int wait_to_complete)
1434 return ngbe_dev_link_update_share(dev, wait_to_complete);
1438 * It clears the interrupt causes and enables the interrupt.
1439 * It will be called once only during NIC initialized.
1442 * Pointer to struct rte_eth_dev.
1444 * Enable or Disable.
1447 * - On success, zero.
1448 * - On failure, a negative value.
1451 ngbe_dev_lsc_interrupt_setup(struct rte_eth_dev *dev, uint8_t on)
1453 struct ngbe_interrupt *intr = ngbe_dev_intr(dev);
1455 ngbe_dev_link_status_print(dev);
1457 intr->mask_misc |= NGBE_ICRMISC_PHY;
1458 intr->mask_misc |= NGBE_ICRMISC_GPIO;
1460 intr->mask_misc &= ~NGBE_ICRMISC_PHY;
1461 intr->mask_misc &= ~NGBE_ICRMISC_GPIO;
1468 * It clears the interrupt causes and enables the interrupt.
1469 * It will be called once only during NIC initialized.
1472 * Pointer to struct rte_eth_dev.
1475 * - On success, zero.
1476 * - On failure, a negative value.
1479 ngbe_dev_misc_interrupt_setup(struct rte_eth_dev *dev)
1481 struct ngbe_interrupt *intr = ngbe_dev_intr(dev);
1484 mask = NGBE_ICR_MASK;
1485 mask &= (1ULL << NGBE_MISC_VEC_ID);
1487 intr->mask_misc |= NGBE_ICRMISC_GPIO;
1493 * It clears the interrupt causes and enables the interrupt.
1494 * It will be called once only during NIC initialized.
1497 * Pointer to struct rte_eth_dev.
1500 * - On success, zero.
1501 * - On failure, a negative value.
1504 ngbe_dev_rxq_interrupt_setup(struct rte_eth_dev *dev)
1506 struct ngbe_interrupt *intr = ngbe_dev_intr(dev);
1509 mask = NGBE_ICR_MASK;
1510 mask &= ~((1ULL << NGBE_RX_VEC_START) - 1);
1517 * It clears the interrupt causes and enables the interrupt.
1518 * It will be called once only during NIC initialized.
1521 * Pointer to struct rte_eth_dev.
1524 * - On success, zero.
1525 * - On failure, a negative value.
1528 ngbe_dev_macsec_interrupt_setup(struct rte_eth_dev *dev)
1530 struct ngbe_interrupt *intr = ngbe_dev_intr(dev);
1532 intr->mask_misc |= NGBE_ICRMISC_LNKSEC;
1538 * It reads ICR and sets flag for the link_update.
1541 * Pointer to struct rte_eth_dev.
1544 * - On success, zero.
1545 * - On failure, a negative value.
1548 ngbe_dev_interrupt_get_status(struct rte_eth_dev *dev)
1551 struct ngbe_hw *hw = ngbe_dev_hw(dev);
1552 struct ngbe_interrupt *intr = ngbe_dev_intr(dev);
1554 /* clear all cause mask */
1555 ngbe_disable_intr(hw);
1557 /* read-on-clear nic registers here */
1558 eicr = ((u32 *)hw->isb_mem)[NGBE_ISB_MISC];
1559 PMD_DRV_LOG(DEBUG, "eicr %x", eicr);
1563 /* set flag for async link update */
1564 if (eicr & NGBE_ICRMISC_PHY)
1565 intr->flags |= NGBE_FLAG_NEED_LINK_UPDATE;
1567 if (eicr & NGBE_ICRMISC_VFMBX)
1568 intr->flags |= NGBE_FLAG_MAILBOX;
1570 if (eicr & NGBE_ICRMISC_LNKSEC)
1571 intr->flags |= NGBE_FLAG_MACSEC;
1573 if (eicr & NGBE_ICRMISC_GPIO)
1574 intr->flags |= NGBE_FLAG_NEED_LINK_UPDATE;
1580 * It gets and then prints the link status.
1583 * Pointer to struct rte_eth_dev.
1586 * - On success, zero.
1587 * - On failure, a negative value.
1590 ngbe_dev_link_status_print(struct rte_eth_dev *dev)
1592 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1593 struct rte_eth_link link;
1595 rte_eth_linkstatus_get(dev, &link);
1597 if (link.link_status == RTE_ETH_LINK_UP) {
1598 PMD_INIT_LOG(INFO, "Port %d: Link Up - speed %u Mbps - %s",
1599 (int)(dev->data->port_id),
1600 (unsigned int)link.link_speed,
1601 link.link_duplex == RTE_ETH_LINK_FULL_DUPLEX ?
1602 "full-duplex" : "half-duplex");
1604 PMD_INIT_LOG(INFO, " Port %d: Link Down",
1605 (int)(dev->data->port_id));
1607 PMD_INIT_LOG(DEBUG, "PCI Address: " PCI_PRI_FMT,
1608 pci_dev->addr.domain,
1610 pci_dev->addr.devid,
1611 pci_dev->addr.function);
1615 * It executes link_update after knowing an interrupt occurred.
1618 * Pointer to struct rte_eth_dev.
1621 * - On success, zero.
1622 * - On failure, a negative value.
1625 ngbe_dev_interrupt_action(struct rte_eth_dev *dev)
1627 struct ngbe_interrupt *intr = ngbe_dev_intr(dev);
1630 PMD_DRV_LOG(DEBUG, "intr action type %d", intr->flags);
1632 if (intr->flags & NGBE_FLAG_NEED_LINK_UPDATE) {
1633 struct rte_eth_link link;
1635 /*get the link status before link update, for predicting later*/
1636 rte_eth_linkstatus_get(dev, &link);
1638 ngbe_dev_link_update(dev, 0);
1641 if (link.link_status != RTE_ETH_LINK_UP)
1642 /* handle it 1 sec later, wait it being stable */
1643 timeout = NGBE_LINK_UP_CHECK_TIMEOUT;
1644 /* likely to down */
1646 /* handle it 4 sec later, wait it being stable */
1647 timeout = NGBE_LINK_DOWN_CHECK_TIMEOUT;
1649 ngbe_dev_link_status_print(dev);
1650 if (rte_eal_alarm_set(timeout * 1000,
1651 ngbe_dev_interrupt_delayed_handler,
1653 PMD_DRV_LOG(ERR, "Error setting alarm");
1655 /* remember original mask */
1656 intr->mask_misc_orig = intr->mask_misc;
1657 /* only disable lsc interrupt */
1658 intr->mask_misc &= ~NGBE_ICRMISC_PHY;
1660 intr->mask_orig = intr->mask;
1661 /* only disable all misc interrupts */
1662 intr->mask &= ~(1ULL << NGBE_MISC_VEC_ID);
1666 PMD_DRV_LOG(DEBUG, "enable intr immediately");
1667 ngbe_enable_intr(dev);
1673 * Interrupt handler which shall be registered for alarm callback for delayed
1674 * handling specific interrupt to wait for the stable nic state. As the
1675 * NIC interrupt state is not stable for ngbe after link is just down,
1676 * it needs to wait 4 seconds to get the stable status.
1679 * The address of parameter (struct rte_eth_dev *) registered before.
1682 ngbe_dev_interrupt_delayed_handler(void *param)
1684 struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
1685 struct ngbe_interrupt *intr = ngbe_dev_intr(dev);
1686 struct ngbe_hw *hw = ngbe_dev_hw(dev);
1689 ngbe_disable_intr(hw);
1691 eicr = ((u32 *)hw->isb_mem)[NGBE_ISB_MISC];
1693 if (intr->flags & NGBE_FLAG_NEED_LINK_UPDATE) {
1694 ngbe_dev_link_update(dev, 0);
1695 intr->flags &= ~NGBE_FLAG_NEED_LINK_UPDATE;
1696 ngbe_dev_link_status_print(dev);
1697 rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC,
1701 if (intr->flags & NGBE_FLAG_MACSEC) {
1702 rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_MACSEC,
1704 intr->flags &= ~NGBE_FLAG_MACSEC;
1707 /* restore original mask */
1708 intr->mask_misc = intr->mask_misc_orig;
1709 intr->mask_misc_orig = 0;
1710 intr->mask = intr->mask_orig;
1711 intr->mask_orig = 0;
1713 PMD_DRV_LOG(DEBUG, "enable intr in delayed handler S[%08x]", eicr);
1714 ngbe_enable_intr(dev);
1718 * Interrupt handler triggered by NIC for handling
1719 * specific interrupt.
1722 * The address of parameter (struct rte_eth_dev *) registered before.
1725 ngbe_dev_interrupt_handler(void *param)
1727 struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
1729 ngbe_dev_interrupt_get_status(dev);
1730 ngbe_dev_interrupt_action(dev);
1734 * Set the IVAR registers, mapping interrupt causes to vectors
1736 * pointer to ngbe_hw struct
1738 * 0 for Rx, 1 for Tx, -1 for other causes
1740 * queue to map the corresponding interrupt to
1742 * the vector to map to the corresponding queue
1745 ngbe_set_ivar_map(struct ngbe_hw *hw, int8_t direction,
1746 uint8_t queue, uint8_t msix_vector)
1750 if (direction == -1) {
1752 msix_vector |= NGBE_IVARMISC_VLD;
1754 tmp = rd32(hw, NGBE_IVARMISC);
1755 tmp &= ~(0xFF << idx);
1756 tmp |= (msix_vector << idx);
1757 wr32(hw, NGBE_IVARMISC, tmp);
1759 /* rx or tx causes */
1760 /* Workround for ICR lost */
1761 idx = ((16 * (queue & 1)) + (8 * direction));
1762 tmp = rd32(hw, NGBE_IVAR(queue >> 1));
1763 tmp &= ~(0xFF << idx);
1764 tmp |= (msix_vector << idx);
1765 wr32(hw, NGBE_IVAR(queue >> 1), tmp);
1770 * Sets up the hardware to properly generate MSI-X interrupts
1772 * board private structure
1775 ngbe_configure_msix(struct rte_eth_dev *dev)
1777 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1778 struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
1779 struct ngbe_hw *hw = ngbe_dev_hw(dev);
1780 uint32_t queue_id, base = NGBE_MISC_VEC_ID;
1781 uint32_t vec = NGBE_MISC_VEC_ID;
1785 * Won't configure MSI-X register if no mapping is done
1786 * between intr vector and event fd
1787 * but if MSI-X has been enabled already, need to configure
1788 * auto clean, auto mask and throttling.
1790 gpie = rd32(hw, NGBE_GPIE);
1791 if (!rte_intr_dp_is_en(intr_handle) &&
1792 !(gpie & NGBE_GPIE_MSIX))
1795 if (rte_intr_allow_others(intr_handle)) {
1796 base = NGBE_RX_VEC_START;
1800 /* setup GPIE for MSI-X mode */
1801 gpie = rd32(hw, NGBE_GPIE);
1802 gpie |= NGBE_GPIE_MSIX;
1803 wr32(hw, NGBE_GPIE, gpie);
1805 /* Populate the IVAR table and set the ITR values to the
1806 * corresponding register.
1808 if (rte_intr_dp_is_en(intr_handle)) {
1809 for (queue_id = 0; queue_id < dev->data->nb_rx_queues;
1811 /* by default, 1:1 mapping */
1812 ngbe_set_ivar_map(hw, 0, queue_id, vec);
1813 rte_intr_vec_list_index_set(intr_handle,
1815 if (vec < base + rte_intr_nb_efd_get(intr_handle)
1820 ngbe_set_ivar_map(hw, -1, 1, NGBE_MISC_VEC_ID);
1822 wr32(hw, NGBE_ITR(NGBE_MISC_VEC_ID),
1823 NGBE_ITR_IVAL_1G(NGBE_QUEUE_ITR_INTERVAL_DEFAULT)
1827 static const struct eth_dev_ops ngbe_eth_dev_ops = {
1828 .dev_configure = ngbe_dev_configure,
1829 .dev_infos_get = ngbe_dev_info_get,
1830 .dev_start = ngbe_dev_start,
1831 .dev_stop = ngbe_dev_stop,
1832 .dev_close = ngbe_dev_close,
1833 .dev_reset = ngbe_dev_reset,
1834 .link_update = ngbe_dev_link_update,
1835 .stats_get = ngbe_dev_stats_get,
1836 .stats_reset = ngbe_dev_stats_reset,
1837 .dev_supported_ptypes_get = ngbe_dev_supported_ptypes_get,
1838 .vlan_filter_set = ngbe_vlan_filter_set,
1839 .vlan_tpid_set = ngbe_vlan_tpid_set,
1840 .vlan_offload_set = ngbe_vlan_offload_set,
1841 .vlan_strip_queue_set = ngbe_vlan_strip_queue_set,
1842 .rx_queue_start = ngbe_dev_rx_queue_start,
1843 .rx_queue_stop = ngbe_dev_rx_queue_stop,
1844 .tx_queue_start = ngbe_dev_tx_queue_start,
1845 .tx_queue_stop = ngbe_dev_tx_queue_stop,
1846 .rx_queue_setup = ngbe_dev_rx_queue_setup,
1847 .rx_queue_release = ngbe_dev_rx_queue_release,
1848 .tx_queue_setup = ngbe_dev_tx_queue_setup,
1849 .tx_queue_release = ngbe_dev_tx_queue_release,
1850 .rx_burst_mode_get = ngbe_rx_burst_mode_get,
1851 .tx_burst_mode_get = ngbe_tx_burst_mode_get,
1854 RTE_PMD_REGISTER_PCI(net_ngbe, rte_ngbe_pmd);
1855 RTE_PMD_REGISTER_PCI_TABLE(net_ngbe, pci_id_ngbe_map);
1856 RTE_PMD_REGISTER_KMOD_DEP(net_ngbe, "* igb_uio | uio_pci_generic | vfio-pci");
1858 RTE_LOG_REGISTER_SUFFIX(ngbe_logtype_init, init, NOTICE);
1859 RTE_LOG_REGISTER_SUFFIX(ngbe_logtype_driver, driver, NOTICE);
1861 #ifdef RTE_ETHDEV_DEBUG_RX
1862 RTE_LOG_REGISTER_SUFFIX(ngbe_logtype_rx, rx, DEBUG);
1864 #ifdef RTE_ETHDEV_DEBUG_TX
1865 RTE_LOG_REGISTER_SUFFIX(ngbe_logtype_tx, tx, DEBUG);