1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2018-2021 Beijing WangXun Technology Co., Ltd.
3 * Copyright(c) 2010-2017 Intel Corporation
7 #include <rte_common.h>
8 #include <ethdev_pci.h>
10 #include <rte_alarm.h>
12 #include "ngbe_logs.h"
14 #include "ngbe_ethdev.h"
15 #include "ngbe_rxtx.h"
17 static int ngbe_dev_close(struct rte_eth_dev *dev);
18 static int ngbe_dev_link_update(struct rte_eth_dev *dev,
19 int wait_to_complete);
20 static int ngbe_dev_stats_reset(struct rte_eth_dev *dev);
21 static void ngbe_vlan_hw_strip_enable(struct rte_eth_dev *dev, uint16_t queue);
22 static void ngbe_vlan_hw_strip_disable(struct rte_eth_dev *dev,
25 static void ngbe_dev_link_status_print(struct rte_eth_dev *dev);
26 static int ngbe_dev_lsc_interrupt_setup(struct rte_eth_dev *dev, uint8_t on);
27 static int ngbe_dev_macsec_interrupt_setup(struct rte_eth_dev *dev);
28 static int ngbe_dev_misc_interrupt_setup(struct rte_eth_dev *dev);
29 static int ngbe_dev_rxq_interrupt_setup(struct rte_eth_dev *dev);
30 static void ngbe_dev_interrupt_handler(void *param);
31 static void ngbe_dev_interrupt_delayed_handler(void *param);
32 static void ngbe_configure_msix(struct rte_eth_dev *dev);
34 #define NGBE_SET_HWSTRIP(h, q) do {\
35 uint32_t idx = (q) / (sizeof((h)->bitmap[0]) * NBBY); \
36 uint32_t bit = (q) % (sizeof((h)->bitmap[0]) * NBBY); \
37 (h)->bitmap[idx] |= 1 << bit;\
40 #define NGBE_CLEAR_HWSTRIP(h, q) do {\
41 uint32_t idx = (q) / (sizeof((h)->bitmap[0]) * NBBY); \
42 uint32_t bit = (q) % (sizeof((h)->bitmap[0]) * NBBY); \
43 (h)->bitmap[idx] &= ~(1 << bit);\
46 #define NGBE_GET_HWSTRIP(h, q, r) do {\
47 uint32_t idx = (q) / (sizeof((h)->bitmap[0]) * NBBY); \
48 uint32_t bit = (q) % (sizeof((h)->bitmap[0]) * NBBY); \
49 (r) = (h)->bitmap[idx] >> bit & 1;\
53 * The set of PCI devices this driver supports
55 static const struct rte_pci_id pci_id_ngbe_map[] = {
56 { RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860A2) },
57 { RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860A2S) },
58 { RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860A4) },
59 { RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860A4S) },
60 { RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860AL2) },
61 { RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860AL2S) },
62 { RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860AL4) },
63 { RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860AL4S) },
64 { RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860NCSI) },
65 { RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860A1) },
66 { RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860A1L) },
67 { RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860AL_W) },
68 { .vendor_id = 0, /* sentinel */ },
71 static const struct rte_eth_desc_lim rx_desc_lim = {
72 .nb_max = NGBE_RING_DESC_MAX,
73 .nb_min = NGBE_RING_DESC_MIN,
74 .nb_align = NGBE_RXD_ALIGN,
77 static const struct rte_eth_desc_lim tx_desc_lim = {
78 .nb_max = NGBE_RING_DESC_MAX,
79 .nb_min = NGBE_RING_DESC_MIN,
80 .nb_align = NGBE_TXD_ALIGN,
81 .nb_seg_max = NGBE_TX_MAX_SEG,
82 .nb_mtu_seg_max = NGBE_TX_MAX_SEG,
85 static const struct eth_dev_ops ngbe_eth_dev_ops;
87 #define HW_XSTAT(m) {#m, offsetof(struct ngbe_hw_stats, m)}
88 #define HW_XSTAT_NAME(m, n) {n, offsetof(struct ngbe_hw_stats, m)}
89 static const struct rte_ngbe_xstats_name_off rte_ngbe_stats_strings[] = {
91 HW_XSTAT(mng_bmc2host_packets),
92 HW_XSTAT(mng_host2bmc_packets),
98 HW_XSTAT(rx_total_bytes),
99 HW_XSTAT(rx_total_packets),
100 HW_XSTAT(tx_total_packets),
101 HW_XSTAT(rx_total_missed_packets),
102 HW_XSTAT(rx_broadcast_packets),
103 HW_XSTAT(rx_multicast_packets),
104 HW_XSTAT(rx_management_packets),
105 HW_XSTAT(tx_management_packets),
106 HW_XSTAT(rx_management_dropped),
109 HW_XSTAT(rx_crc_errors),
110 HW_XSTAT(rx_illegal_byte_errors),
111 HW_XSTAT(rx_error_bytes),
112 HW_XSTAT(rx_mac_short_packet_dropped),
113 HW_XSTAT(rx_length_errors),
114 HW_XSTAT(rx_undersize_errors),
115 HW_XSTAT(rx_fragment_errors),
116 HW_XSTAT(rx_oversize_errors),
117 HW_XSTAT(rx_jabber_errors),
118 HW_XSTAT(rx_l3_l4_xsum_error),
119 HW_XSTAT(mac_local_errors),
120 HW_XSTAT(mac_remote_errors),
123 HW_XSTAT(tx_macsec_pkts_untagged),
124 HW_XSTAT(tx_macsec_pkts_encrypted),
125 HW_XSTAT(tx_macsec_pkts_protected),
126 HW_XSTAT(tx_macsec_octets_encrypted),
127 HW_XSTAT(tx_macsec_octets_protected),
128 HW_XSTAT(rx_macsec_pkts_untagged),
129 HW_XSTAT(rx_macsec_pkts_badtag),
130 HW_XSTAT(rx_macsec_pkts_nosci),
131 HW_XSTAT(rx_macsec_pkts_unknownsci),
132 HW_XSTAT(rx_macsec_octets_decrypted),
133 HW_XSTAT(rx_macsec_octets_validated),
134 HW_XSTAT(rx_macsec_sc_pkts_unchecked),
135 HW_XSTAT(rx_macsec_sc_pkts_delayed),
136 HW_XSTAT(rx_macsec_sc_pkts_late),
137 HW_XSTAT(rx_macsec_sa_pkts_ok),
138 HW_XSTAT(rx_macsec_sa_pkts_invalid),
139 HW_XSTAT(rx_macsec_sa_pkts_notvalid),
140 HW_XSTAT(rx_macsec_sa_pkts_unusedsa),
141 HW_XSTAT(rx_macsec_sa_pkts_notusingsa),
144 HW_XSTAT(rx_size_64_packets),
145 HW_XSTAT(rx_size_65_to_127_packets),
146 HW_XSTAT(rx_size_128_to_255_packets),
147 HW_XSTAT(rx_size_256_to_511_packets),
148 HW_XSTAT(rx_size_512_to_1023_packets),
149 HW_XSTAT(rx_size_1024_to_max_packets),
150 HW_XSTAT(tx_size_64_packets),
151 HW_XSTAT(tx_size_65_to_127_packets),
152 HW_XSTAT(tx_size_128_to_255_packets),
153 HW_XSTAT(tx_size_256_to_511_packets),
154 HW_XSTAT(tx_size_512_to_1023_packets),
155 HW_XSTAT(tx_size_1024_to_max_packets),
158 HW_XSTAT(tx_xon_packets),
159 HW_XSTAT(rx_xon_packets),
160 HW_XSTAT(tx_xoff_packets),
161 HW_XSTAT(rx_xoff_packets),
163 HW_XSTAT_NAME(tx_xon_packets, "tx_flow_control_xon_packets"),
164 HW_XSTAT_NAME(rx_xon_packets, "rx_flow_control_xon_packets"),
165 HW_XSTAT_NAME(tx_xoff_packets, "tx_flow_control_xoff_packets"),
166 HW_XSTAT_NAME(rx_xoff_packets, "rx_flow_control_xoff_packets"),
169 #define NGBE_NB_HW_STATS (sizeof(rte_ngbe_stats_strings) / \
170 sizeof(rte_ngbe_stats_strings[0]))
172 /* Per-queue statistics */
173 #define QP_XSTAT(m) {#m, offsetof(struct ngbe_hw_stats, qp[0].m)}
174 static const struct rte_ngbe_xstats_name_off rte_ngbe_qp_strings[] = {
175 QP_XSTAT(rx_qp_packets),
176 QP_XSTAT(tx_qp_packets),
177 QP_XSTAT(rx_qp_bytes),
178 QP_XSTAT(tx_qp_bytes),
179 QP_XSTAT(rx_qp_mc_packets),
182 #define NGBE_NB_QP_STATS (sizeof(rte_ngbe_qp_strings) / \
183 sizeof(rte_ngbe_qp_strings[0]))
185 static inline int32_t
186 ngbe_pf_reset_hw(struct ngbe_hw *hw)
191 status = hw->mac.reset_hw(hw);
193 ctrl_ext = rd32(hw, NGBE_PORTCTL);
194 /* Set PF Reset Done bit so PF/VF Mail Ops can work */
195 ctrl_ext |= NGBE_PORTCTL_RSTDONE;
196 wr32(hw, NGBE_PORTCTL, ctrl_ext);
199 if (status == NGBE_ERR_SFP_NOT_PRESENT)
205 ngbe_enable_intr(struct rte_eth_dev *dev)
207 struct ngbe_interrupt *intr = ngbe_dev_intr(dev);
208 struct ngbe_hw *hw = ngbe_dev_hw(dev);
210 wr32(hw, NGBE_IENMISC, intr->mask_misc);
211 wr32(hw, NGBE_IMC(0), intr->mask & BIT_MASK32);
216 ngbe_disable_intr(struct ngbe_hw *hw)
218 PMD_INIT_FUNC_TRACE();
220 wr32(hw, NGBE_IMS(0), NGBE_IMS_MASK);
225 * Ensure that all locks are released before first NVM or PHY access
228 ngbe_swfw_lock_reset(struct ngbe_hw *hw)
233 * These ones are more tricky since they are common to all ports; but
234 * swfw_sync retries last long enough (1s) to be almost sure that if
235 * lock can not be taken it is due to an improper lock of the
238 mask = NGBE_MNGSEM_SWPHY |
241 if (hw->mac.acquire_swfw_sync(hw, mask) < 0)
242 PMD_DRV_LOG(DEBUG, "SWFW common locks released");
244 hw->mac.release_swfw_sync(hw, mask);
248 eth_ngbe_dev_init(struct rte_eth_dev *eth_dev, void *init_params __rte_unused)
250 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
251 struct ngbe_hw *hw = ngbe_dev_hw(eth_dev);
252 struct ngbe_vfta *shadow_vfta = NGBE_DEV_VFTA(eth_dev);
253 struct ngbe_hwstrip *hwstrip = NGBE_DEV_HWSTRIP(eth_dev);
254 struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
255 const struct rte_memzone *mz;
259 PMD_INIT_FUNC_TRACE();
261 eth_dev->dev_ops = &ngbe_eth_dev_ops;
262 eth_dev->rx_pkt_burst = &ngbe_recv_pkts;
263 eth_dev->tx_pkt_burst = &ngbe_xmit_pkts;
264 eth_dev->tx_pkt_prepare = &ngbe_prep_pkts;
267 * For secondary processes, we don't initialise any further as primary
268 * has already done this work. Only check we don't need a different
269 * Rx and Tx function.
271 if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
272 struct ngbe_tx_queue *txq;
273 /* Tx queue function in primary, set by last queue initialized
274 * Tx queue may not initialized by primary process
276 if (eth_dev->data->tx_queues) {
277 uint16_t nb_tx_queues = eth_dev->data->nb_tx_queues;
278 txq = eth_dev->data->tx_queues[nb_tx_queues - 1];
279 ngbe_set_tx_function(eth_dev, txq);
281 /* Use default Tx function if we get here */
283 "No Tx queues configured yet. Using default Tx function.");
286 ngbe_set_rx_function(eth_dev);
291 rte_eth_copy_pci_info(eth_dev, pci_dev);
292 eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
294 /* Vendor and Device ID need to be set before init of shared code */
295 hw->device_id = pci_dev->id.device_id;
296 hw->vendor_id = pci_dev->id.vendor_id;
297 hw->sub_system_id = pci_dev->id.subsystem_device_id;
298 ngbe_map_device_id(hw);
299 hw->hw_addr = (void *)pci_dev->mem_resource[0].addr;
301 /* Reserve memory for interrupt status block */
302 mz = rte_eth_dma_zone_reserve(eth_dev, "ngbe_driver", -1,
303 NGBE_ISB_SIZE, NGBE_ALIGN, SOCKET_ID_ANY);
307 hw->isb_dma = TMZ_PADDR(mz);
308 hw->isb_mem = TMZ_VADDR(mz);
310 /* Initialize the shared code (base driver) */
311 err = ngbe_init_shared_code(hw);
313 PMD_INIT_LOG(ERR, "Shared code init failed: %d", err);
317 /* Unlock any pending hardware semaphore */
318 ngbe_swfw_lock_reset(hw);
320 err = hw->rom.init_params(hw);
322 PMD_INIT_LOG(ERR, "The EEPROM init failed: %d", err);
326 /* Make sure we have a good EEPROM before we read from it */
327 err = hw->rom.validate_checksum(hw, NULL);
329 PMD_INIT_LOG(ERR, "The EEPROM checksum is not valid: %d", err);
333 err = hw->mac.init_hw(hw);
335 PMD_INIT_LOG(ERR, "Hardware Initialization Failure: %d", err);
339 /* Reset the hw statistics */
340 ngbe_dev_stats_reset(eth_dev);
342 /* disable interrupt */
343 ngbe_disable_intr(hw);
345 /* Allocate memory for storing MAC addresses */
346 eth_dev->data->mac_addrs = rte_zmalloc("ngbe", RTE_ETHER_ADDR_LEN *
347 hw->mac.num_rar_entries, 0);
348 if (eth_dev->data->mac_addrs == NULL) {
350 "Failed to allocate %u bytes needed to store MAC addresses",
351 RTE_ETHER_ADDR_LEN * hw->mac.num_rar_entries);
355 /* Copy the permanent MAC address */
356 rte_ether_addr_copy((struct rte_ether_addr *)hw->mac.perm_addr,
357 ð_dev->data->mac_addrs[0]);
359 /* Allocate memory for storing hash filter MAC addresses */
360 eth_dev->data->hash_mac_addrs = rte_zmalloc("ngbe",
361 RTE_ETHER_ADDR_LEN * NGBE_VMDQ_NUM_UC_MAC, 0);
362 if (eth_dev->data->hash_mac_addrs == NULL) {
364 "Failed to allocate %d bytes needed to store MAC addresses",
365 RTE_ETHER_ADDR_LEN * NGBE_VMDQ_NUM_UC_MAC);
366 rte_free(eth_dev->data->mac_addrs);
367 eth_dev->data->mac_addrs = NULL;
371 /* initialize the vfta */
372 memset(shadow_vfta, 0, sizeof(*shadow_vfta));
374 /* initialize the hw strip bitmap*/
375 memset(hwstrip, 0, sizeof(*hwstrip));
377 ctrl_ext = rd32(hw, NGBE_PORTCTL);
378 /* let hardware know driver is loaded */
379 ctrl_ext |= NGBE_PORTCTL_DRVLOAD;
380 /* Set PF Reset Done bit so PF/VF Mail Ops can work */
381 ctrl_ext |= NGBE_PORTCTL_RSTDONE;
382 wr32(hw, NGBE_PORTCTL, ctrl_ext);
385 PMD_INIT_LOG(DEBUG, "MAC: %d, PHY: %d",
386 (int)hw->mac.type, (int)hw->phy.type);
388 PMD_INIT_LOG(DEBUG, "port %d vendorID=0x%x deviceID=0x%x",
389 eth_dev->data->port_id, pci_dev->id.vendor_id,
390 pci_dev->id.device_id);
392 rte_intr_callback_register(intr_handle,
393 ngbe_dev_interrupt_handler, eth_dev);
395 /* enable uio/vfio intr/eventfd mapping */
396 rte_intr_enable(intr_handle);
398 /* enable support intr */
399 ngbe_enable_intr(eth_dev);
405 eth_ngbe_dev_uninit(struct rte_eth_dev *eth_dev)
407 PMD_INIT_FUNC_TRACE();
409 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
412 ngbe_dev_close(eth_dev);
418 eth_ngbe_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
419 struct rte_pci_device *pci_dev)
421 return rte_eth_dev_create(&pci_dev->device, pci_dev->device.name,
422 sizeof(struct ngbe_adapter),
423 eth_dev_pci_specific_init, pci_dev,
424 eth_ngbe_dev_init, NULL);
427 static int eth_ngbe_pci_remove(struct rte_pci_device *pci_dev)
429 struct rte_eth_dev *ethdev;
431 ethdev = rte_eth_dev_allocated(pci_dev->device.name);
435 return rte_eth_dev_destroy(ethdev, eth_ngbe_dev_uninit);
438 static struct rte_pci_driver rte_ngbe_pmd = {
439 .id_table = pci_id_ngbe_map,
440 .drv_flags = RTE_PCI_DRV_NEED_MAPPING |
441 RTE_PCI_DRV_INTR_LSC,
442 .probe = eth_ngbe_pci_probe,
443 .remove = eth_ngbe_pci_remove,
447 ngbe_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
449 struct ngbe_hw *hw = ngbe_dev_hw(dev);
450 struct ngbe_vfta *shadow_vfta = NGBE_DEV_VFTA(dev);
455 vid_idx = (uint32_t)((vlan_id >> 5) & 0x7F);
456 vid_bit = (uint32_t)(1 << (vlan_id & 0x1F));
457 vfta = rd32(hw, NGBE_VLANTBL(vid_idx));
462 wr32(hw, NGBE_VLANTBL(vid_idx), vfta);
464 /* update local VFTA copy */
465 shadow_vfta->vfta[vid_idx] = vfta;
471 ngbe_vlan_strip_queue_set(struct rte_eth_dev *dev, uint16_t queue, int on)
473 struct ngbe_hw *hw = ngbe_dev_hw(dev);
474 struct ngbe_rx_queue *rxq;
476 uint32_t rxcfg, rxbal, rxbah;
479 ngbe_vlan_hw_strip_enable(dev, queue);
481 ngbe_vlan_hw_strip_disable(dev, queue);
483 rxq = dev->data->rx_queues[queue];
484 rxbal = rd32(hw, NGBE_RXBAL(rxq->reg_idx));
485 rxbah = rd32(hw, NGBE_RXBAH(rxq->reg_idx));
486 rxcfg = rd32(hw, NGBE_RXCFG(rxq->reg_idx));
487 if (rxq->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP) {
488 restart = (rxcfg & NGBE_RXCFG_ENA) &&
489 !(rxcfg & NGBE_RXCFG_VLAN);
490 rxcfg |= NGBE_RXCFG_VLAN;
492 restart = (rxcfg & NGBE_RXCFG_ENA) &&
493 (rxcfg & NGBE_RXCFG_VLAN);
494 rxcfg &= ~NGBE_RXCFG_VLAN;
496 rxcfg &= ~NGBE_RXCFG_ENA;
499 /* set vlan strip for ring */
500 ngbe_dev_rx_queue_stop(dev, queue);
501 wr32(hw, NGBE_RXBAL(rxq->reg_idx), rxbal);
502 wr32(hw, NGBE_RXBAH(rxq->reg_idx), rxbah);
503 wr32(hw, NGBE_RXCFG(rxq->reg_idx), rxcfg);
504 ngbe_dev_rx_queue_start(dev, queue);
509 ngbe_vlan_tpid_set(struct rte_eth_dev *dev,
510 enum rte_vlan_type vlan_type,
513 struct ngbe_hw *hw = ngbe_dev_hw(dev);
515 uint32_t portctrl, vlan_ext, qinq;
517 portctrl = rd32(hw, NGBE_PORTCTL);
519 vlan_ext = (portctrl & NGBE_PORTCTL_VLANEXT);
520 qinq = vlan_ext && (portctrl & NGBE_PORTCTL_QINQ);
522 case RTE_ETH_VLAN_TYPE_INNER:
524 wr32m(hw, NGBE_VLANCTL,
525 NGBE_VLANCTL_TPID_MASK,
526 NGBE_VLANCTL_TPID(tpid));
527 wr32m(hw, NGBE_DMATXCTRL,
528 NGBE_DMATXCTRL_TPID_MASK,
529 NGBE_DMATXCTRL_TPID(tpid));
533 "Inner type is not supported by single VLAN");
537 wr32m(hw, NGBE_TAGTPID(0),
538 NGBE_TAGTPID_LSB_MASK,
539 NGBE_TAGTPID_LSB(tpid));
542 case RTE_ETH_VLAN_TYPE_OUTER:
544 /* Only the high 16-bits is valid */
545 wr32m(hw, NGBE_EXTAG,
546 NGBE_EXTAG_VLAN_MASK,
547 NGBE_EXTAG_VLAN(tpid));
549 wr32m(hw, NGBE_VLANCTL,
550 NGBE_VLANCTL_TPID_MASK,
551 NGBE_VLANCTL_TPID(tpid));
552 wr32m(hw, NGBE_DMATXCTRL,
553 NGBE_DMATXCTRL_TPID_MASK,
554 NGBE_DMATXCTRL_TPID(tpid));
558 wr32m(hw, NGBE_TAGTPID(0),
559 NGBE_TAGTPID_MSB_MASK,
560 NGBE_TAGTPID_MSB(tpid));
564 PMD_DRV_LOG(ERR, "Unsupported VLAN type %d", vlan_type);
572 ngbe_vlan_hw_filter_disable(struct rte_eth_dev *dev)
574 struct ngbe_hw *hw = ngbe_dev_hw(dev);
577 PMD_INIT_FUNC_TRACE();
579 /* Filter Table Disable */
580 vlnctrl = rd32(hw, NGBE_VLANCTL);
581 vlnctrl &= ~NGBE_VLANCTL_VFE;
582 wr32(hw, NGBE_VLANCTL, vlnctrl);
586 ngbe_vlan_hw_filter_enable(struct rte_eth_dev *dev)
588 struct ngbe_hw *hw = ngbe_dev_hw(dev);
589 struct ngbe_vfta *shadow_vfta = NGBE_DEV_VFTA(dev);
593 PMD_INIT_FUNC_TRACE();
595 /* Filter Table Enable */
596 vlnctrl = rd32(hw, NGBE_VLANCTL);
597 vlnctrl &= ~NGBE_VLANCTL_CFIENA;
598 vlnctrl |= NGBE_VLANCTL_VFE;
599 wr32(hw, NGBE_VLANCTL, vlnctrl);
601 /* write whatever is in local vfta copy */
602 for (i = 0; i < NGBE_VFTA_SIZE; i++)
603 wr32(hw, NGBE_VLANTBL(i), shadow_vfta->vfta[i]);
607 ngbe_vlan_hw_strip_bitmap_set(struct rte_eth_dev *dev, uint16_t queue, bool on)
609 struct ngbe_hwstrip *hwstrip = NGBE_DEV_HWSTRIP(dev);
610 struct ngbe_rx_queue *rxq;
612 if (queue >= NGBE_MAX_RX_QUEUE_NUM)
616 NGBE_SET_HWSTRIP(hwstrip, queue);
618 NGBE_CLEAR_HWSTRIP(hwstrip, queue);
620 if (queue >= dev->data->nb_rx_queues)
623 rxq = dev->data->rx_queues[queue];
626 rxq->vlan_flags = RTE_MBUF_F_RX_VLAN | RTE_MBUF_F_RX_VLAN_STRIPPED;
627 rxq->offloads |= RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
629 rxq->vlan_flags = RTE_MBUF_F_RX_VLAN;
630 rxq->offloads &= ~RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
635 ngbe_vlan_hw_strip_disable(struct rte_eth_dev *dev, uint16_t queue)
637 struct ngbe_hw *hw = ngbe_dev_hw(dev);
640 PMD_INIT_FUNC_TRACE();
642 ctrl = rd32(hw, NGBE_RXCFG(queue));
643 ctrl &= ~NGBE_RXCFG_VLAN;
644 wr32(hw, NGBE_RXCFG(queue), ctrl);
646 /* record those setting for HW strip per queue */
647 ngbe_vlan_hw_strip_bitmap_set(dev, queue, 0);
651 ngbe_vlan_hw_strip_enable(struct rte_eth_dev *dev, uint16_t queue)
653 struct ngbe_hw *hw = ngbe_dev_hw(dev);
656 PMD_INIT_FUNC_TRACE();
658 ctrl = rd32(hw, NGBE_RXCFG(queue));
659 ctrl |= NGBE_RXCFG_VLAN;
660 wr32(hw, NGBE_RXCFG(queue), ctrl);
662 /* record those setting for HW strip per queue */
663 ngbe_vlan_hw_strip_bitmap_set(dev, queue, 1);
667 ngbe_vlan_hw_extend_disable(struct rte_eth_dev *dev)
669 struct ngbe_hw *hw = ngbe_dev_hw(dev);
672 PMD_INIT_FUNC_TRACE();
674 ctrl = rd32(hw, NGBE_PORTCTL);
675 ctrl &= ~NGBE_PORTCTL_VLANEXT;
676 ctrl &= ~NGBE_PORTCTL_QINQ;
677 wr32(hw, NGBE_PORTCTL, ctrl);
681 ngbe_vlan_hw_extend_enable(struct rte_eth_dev *dev)
683 struct ngbe_hw *hw = ngbe_dev_hw(dev);
686 PMD_INIT_FUNC_TRACE();
688 ctrl = rd32(hw, NGBE_PORTCTL);
689 ctrl |= NGBE_PORTCTL_VLANEXT | NGBE_PORTCTL_QINQ;
690 wr32(hw, NGBE_PORTCTL, ctrl);
694 ngbe_qinq_hw_strip_disable(struct rte_eth_dev *dev)
696 struct ngbe_hw *hw = ngbe_dev_hw(dev);
699 PMD_INIT_FUNC_TRACE();
701 ctrl = rd32(hw, NGBE_PORTCTL);
702 ctrl &= ~NGBE_PORTCTL_QINQ;
703 wr32(hw, NGBE_PORTCTL, ctrl);
707 ngbe_qinq_hw_strip_enable(struct rte_eth_dev *dev)
709 struct ngbe_hw *hw = ngbe_dev_hw(dev);
712 PMD_INIT_FUNC_TRACE();
714 ctrl = rd32(hw, NGBE_PORTCTL);
715 ctrl |= NGBE_PORTCTL_QINQ | NGBE_PORTCTL_VLANEXT;
716 wr32(hw, NGBE_PORTCTL, ctrl);
720 ngbe_vlan_hw_strip_config(struct rte_eth_dev *dev)
722 struct ngbe_rx_queue *rxq;
725 PMD_INIT_FUNC_TRACE();
727 for (i = 0; i < dev->data->nb_rx_queues; i++) {
728 rxq = dev->data->rx_queues[i];
730 if (rxq->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
731 ngbe_vlan_hw_strip_enable(dev, i);
733 ngbe_vlan_hw_strip_disable(dev, i);
738 ngbe_config_vlan_strip_on_all_queues(struct rte_eth_dev *dev, int mask)
741 struct rte_eth_rxmode *rxmode;
742 struct ngbe_rx_queue *rxq;
744 if (mask & RTE_ETH_VLAN_STRIP_MASK) {
745 rxmode = &dev->data->dev_conf.rxmode;
746 if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
747 for (i = 0; i < dev->data->nb_rx_queues; i++) {
748 rxq = dev->data->rx_queues[i];
749 rxq->offloads |= RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
752 for (i = 0; i < dev->data->nb_rx_queues; i++) {
753 rxq = dev->data->rx_queues[i];
754 rxq->offloads &= ~RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
760 ngbe_vlan_offload_config(struct rte_eth_dev *dev, int mask)
762 struct rte_eth_rxmode *rxmode;
763 rxmode = &dev->data->dev_conf.rxmode;
765 if (mask & RTE_ETH_VLAN_STRIP_MASK)
766 ngbe_vlan_hw_strip_config(dev);
768 if (mask & RTE_ETH_VLAN_FILTER_MASK) {
769 if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER)
770 ngbe_vlan_hw_filter_enable(dev);
772 ngbe_vlan_hw_filter_disable(dev);
775 if (mask & RTE_ETH_VLAN_EXTEND_MASK) {
776 if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_EXTEND)
777 ngbe_vlan_hw_extend_enable(dev);
779 ngbe_vlan_hw_extend_disable(dev);
782 if (mask & RTE_ETH_QINQ_STRIP_MASK) {
783 if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_QINQ_STRIP)
784 ngbe_qinq_hw_strip_enable(dev);
786 ngbe_qinq_hw_strip_disable(dev);
793 ngbe_vlan_offload_set(struct rte_eth_dev *dev, int mask)
795 ngbe_config_vlan_strip_on_all_queues(dev, mask);
797 ngbe_vlan_offload_config(dev, mask);
803 ngbe_dev_configure(struct rte_eth_dev *dev)
805 struct ngbe_interrupt *intr = ngbe_dev_intr(dev);
806 struct ngbe_adapter *adapter = ngbe_dev_adapter(dev);
808 PMD_INIT_FUNC_TRACE();
810 /* set flag to update link status after init */
811 intr->flags |= NGBE_FLAG_NEED_LINK_UPDATE;
814 * Initialize to TRUE. If any of Rx queues doesn't meet the bulk
815 * allocation Rx preconditions we will reset it.
817 adapter->rx_bulk_alloc_allowed = true;
823 ngbe_dev_phy_intr_setup(struct rte_eth_dev *dev)
825 struct ngbe_hw *hw = ngbe_dev_hw(dev);
826 struct ngbe_interrupt *intr = ngbe_dev_intr(dev);
828 wr32(hw, NGBE_GPIODIR, NGBE_GPIODIR_DDR(1));
829 wr32(hw, NGBE_GPIOINTEN, NGBE_GPIOINTEN_INT(3));
830 wr32(hw, NGBE_GPIOINTTYPE, NGBE_GPIOINTTYPE_LEVEL(0));
831 if (hw->phy.type == ngbe_phy_yt8521s_sfi)
832 wr32(hw, NGBE_GPIOINTPOL, NGBE_GPIOINTPOL_ACT(0));
834 wr32(hw, NGBE_GPIOINTPOL, NGBE_GPIOINTPOL_ACT(3));
836 intr->mask_misc |= NGBE_ICRMISC_GPIO;
840 * Configure device link speed and setup link.
841 * It returns 0 on success.
844 ngbe_dev_start(struct rte_eth_dev *dev)
846 struct ngbe_hw *hw = ngbe_dev_hw(dev);
847 struct ngbe_hw_stats *hw_stats = NGBE_DEV_STATS(dev);
848 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
849 struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
850 uint32_t intr_vector = 0;
852 bool link_up = false, negotiate = false;
854 uint32_t allowed_speeds = 0;
857 uint32_t *link_speeds;
859 PMD_INIT_FUNC_TRACE();
861 /* disable uio/vfio intr/eventfd mapping */
862 rte_intr_disable(intr_handle);
865 hw->adapter_stopped = 0;
868 /* reinitialize adapter, this calls reset and start */
869 hw->nb_rx_queues = dev->data->nb_rx_queues;
870 hw->nb_tx_queues = dev->data->nb_tx_queues;
871 status = ngbe_pf_reset_hw(hw);
874 hw->mac.start_hw(hw);
875 hw->mac.get_link_status = true;
877 ngbe_dev_phy_intr_setup(dev);
879 /* check and configure queue intr-vector mapping */
880 if ((rte_intr_cap_multiple(intr_handle) ||
881 !RTE_ETH_DEV_SRIOV(dev).active) &&
882 dev->data->dev_conf.intr_conf.rxq != 0) {
883 intr_vector = dev->data->nb_rx_queues;
884 if (rte_intr_efd_enable(intr_handle, intr_vector))
888 if (rte_intr_dp_is_en(intr_handle)) {
889 if (rte_intr_vec_list_alloc(intr_handle, "intr_vec",
890 dev->data->nb_rx_queues)) {
892 "Failed to allocate %d rx_queues intr_vec",
893 dev->data->nb_rx_queues);
898 /* confiugre MSI-X for sleep until Rx interrupt */
899 ngbe_configure_msix(dev);
901 /* initialize transmission unit */
902 ngbe_dev_tx_init(dev);
904 /* This can fail when allocating mbufs for descriptor rings */
905 err = ngbe_dev_rx_init(dev);
907 PMD_INIT_LOG(ERR, "Unable to initialize Rx hardware");
911 mask = RTE_ETH_VLAN_STRIP_MASK | RTE_ETH_VLAN_FILTER_MASK |
912 RTE_ETH_VLAN_EXTEND_MASK;
913 err = ngbe_vlan_offload_config(dev, mask);
915 PMD_INIT_LOG(ERR, "Unable to set VLAN offload");
919 ngbe_configure_port(dev);
921 err = ngbe_dev_rxtx_start(dev);
923 PMD_INIT_LOG(ERR, "Unable to start rxtx queues");
927 err = hw->mac.check_link(hw, &speed, &link_up, 0);
930 dev->data->dev_link.link_status = link_up;
932 link_speeds = &dev->data->dev_conf.link_speeds;
933 if (*link_speeds == RTE_ETH_LINK_SPEED_AUTONEG)
936 err = hw->mac.get_link_capabilities(hw, &speed, &negotiate);
941 if (hw->mac.default_speeds & NGBE_LINK_SPEED_1GB_FULL)
942 allowed_speeds |= RTE_ETH_LINK_SPEED_1G;
943 if (hw->mac.default_speeds & NGBE_LINK_SPEED_100M_FULL)
944 allowed_speeds |= RTE_ETH_LINK_SPEED_100M;
945 if (hw->mac.default_speeds & NGBE_LINK_SPEED_10M_FULL)
946 allowed_speeds |= RTE_ETH_LINK_SPEED_10M;
948 if (*link_speeds & ~allowed_speeds) {
949 PMD_INIT_LOG(ERR, "Invalid link setting");
954 if (*link_speeds == RTE_ETH_LINK_SPEED_AUTONEG) {
955 speed = hw->mac.default_speeds;
957 if (*link_speeds & RTE_ETH_LINK_SPEED_1G)
958 speed |= NGBE_LINK_SPEED_1GB_FULL;
959 if (*link_speeds & RTE_ETH_LINK_SPEED_100M)
960 speed |= NGBE_LINK_SPEED_100M_FULL;
961 if (*link_speeds & RTE_ETH_LINK_SPEED_10M)
962 speed |= NGBE_LINK_SPEED_10M_FULL;
966 err = hw->mac.setup_link(hw, speed, link_up);
970 if (rte_intr_allow_others(intr_handle)) {
971 ngbe_dev_misc_interrupt_setup(dev);
972 /* check if lsc interrupt is enabled */
973 if (dev->data->dev_conf.intr_conf.lsc != 0)
974 ngbe_dev_lsc_interrupt_setup(dev, TRUE);
976 ngbe_dev_lsc_interrupt_setup(dev, FALSE);
977 ngbe_dev_macsec_interrupt_setup(dev);
978 ngbe_set_ivar_map(hw, -1, 1, NGBE_MISC_VEC_ID);
980 rte_intr_callback_unregister(intr_handle,
981 ngbe_dev_interrupt_handler, dev);
982 if (dev->data->dev_conf.intr_conf.lsc != 0)
984 "LSC won't enable because of no intr multiplex");
987 /* check if rxq interrupt is enabled */
988 if (dev->data->dev_conf.intr_conf.rxq != 0 &&
989 rte_intr_dp_is_en(intr_handle))
990 ngbe_dev_rxq_interrupt_setup(dev);
992 /* enable UIO/VFIO intr/eventfd mapping */
993 rte_intr_enable(intr_handle);
995 /* resume enabled intr since HW reset */
996 ngbe_enable_intr(dev);
998 if ((hw->sub_system_id & NGBE_OEM_MASK) == NGBE_LY_M88E1512_SFP ||
999 (hw->sub_system_id & NGBE_OEM_MASK) == NGBE_LY_YT8521S_SFP) {
1000 /* gpio0 is used to power on/off control*/
1001 wr32(hw, NGBE_GPIODATA, 0);
1005 * Update link status right before return, because it may
1006 * start link configuration process in a separate thread.
1008 ngbe_dev_link_update(dev, 0);
1010 ngbe_read_stats_registers(hw, hw_stats);
1011 hw->offset_loaded = 1;
1016 PMD_INIT_LOG(ERR, "failure in dev start: %d", err);
1017 ngbe_dev_clear_queues(dev);
1022 * Stop device: disable rx and tx functions to allow for reconfiguring.
1025 ngbe_dev_stop(struct rte_eth_dev *dev)
1027 struct rte_eth_link link;
1028 struct ngbe_hw *hw = ngbe_dev_hw(dev);
1029 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1030 struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
1032 if (hw->adapter_stopped)
1035 PMD_INIT_FUNC_TRACE();
1037 if ((hw->sub_system_id & NGBE_OEM_MASK) == NGBE_LY_M88E1512_SFP ||
1038 (hw->sub_system_id & NGBE_OEM_MASK) == NGBE_LY_YT8521S_SFP) {
1039 /* gpio0 is used to power on/off control*/
1040 wr32(hw, NGBE_GPIODATA, NGBE_GPIOBIT_0);
1043 /* disable interrupts */
1044 ngbe_disable_intr(hw);
1047 ngbe_pf_reset_hw(hw);
1048 hw->adapter_stopped = 0;
1053 ngbe_dev_clear_queues(dev);
1055 /* Clear stored conf */
1056 dev->data->scattered_rx = 0;
1058 /* Clear recorded link status */
1059 memset(&link, 0, sizeof(link));
1060 rte_eth_linkstatus_set(dev, &link);
1062 if (!rte_intr_allow_others(intr_handle))
1063 /* resume to the default handler */
1064 rte_intr_callback_register(intr_handle,
1065 ngbe_dev_interrupt_handler,
1068 /* Clean datapath event and queue/vec mapping */
1069 rte_intr_efd_disable(intr_handle);
1070 rte_intr_vec_list_free(intr_handle);
1072 hw->adapter_stopped = true;
1073 dev->data->dev_started = 0;
1079 * Reset and stop device.
1082 ngbe_dev_close(struct rte_eth_dev *dev)
1084 struct ngbe_hw *hw = ngbe_dev_hw(dev);
1085 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1086 struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
1090 PMD_INIT_FUNC_TRACE();
1092 ngbe_pf_reset_hw(hw);
1096 ngbe_dev_free_queues(dev);
1098 /* reprogram the RAR[0] in case user changed it. */
1099 ngbe_set_rar(hw, 0, hw->mac.addr, 0, true);
1101 /* Unlock any pending hardware semaphore */
1102 ngbe_swfw_lock_reset(hw);
1104 /* disable uio intr before callback unregister */
1105 rte_intr_disable(intr_handle);
1108 ret = rte_intr_callback_unregister(intr_handle,
1109 ngbe_dev_interrupt_handler, dev);
1110 if (ret >= 0 || ret == -ENOENT) {
1112 } else if (ret != -EAGAIN) {
1114 "intr callback unregister failed: %d",
1118 } while (retries++ < (10 + NGBE_LINK_UP_TIME));
1120 rte_free(dev->data->mac_addrs);
1121 dev->data->mac_addrs = NULL;
1123 rte_free(dev->data->hash_mac_addrs);
1124 dev->data->hash_mac_addrs = NULL;
1133 ngbe_dev_reset(struct rte_eth_dev *dev)
1137 ret = eth_ngbe_dev_uninit(dev);
1141 ret = eth_ngbe_dev_init(dev, NULL);
1146 #define UPDATE_QP_COUNTER_32bit(reg, last_counter, counter) \
1148 uint32_t current_counter = rd32(hw, reg); \
1149 if (current_counter < last_counter) \
1150 current_counter += 0x100000000LL; \
1151 if (!hw->offset_loaded) \
1152 last_counter = current_counter; \
1153 counter = current_counter - last_counter; \
1154 counter &= 0xFFFFFFFFLL; \
1157 #define UPDATE_QP_COUNTER_36bit(reg_lsb, reg_msb, last_counter, counter) \
1159 uint64_t current_counter_lsb = rd32(hw, reg_lsb); \
1160 uint64_t current_counter_msb = rd32(hw, reg_msb); \
1161 uint64_t current_counter = (current_counter_msb << 32) | \
1162 current_counter_lsb; \
1163 if (current_counter < last_counter) \
1164 current_counter += 0x1000000000LL; \
1165 if (!hw->offset_loaded) \
1166 last_counter = current_counter; \
1167 counter = current_counter - last_counter; \
1168 counter &= 0xFFFFFFFFFLL; \
1172 ngbe_read_stats_registers(struct ngbe_hw *hw,
1173 struct ngbe_hw_stats *hw_stats)
1178 for (i = 0; i < hw->nb_rx_queues; i++) {
1179 UPDATE_QP_COUNTER_32bit(NGBE_QPRXPKT(i),
1180 hw->qp_last[i].rx_qp_packets,
1181 hw_stats->qp[i].rx_qp_packets);
1182 UPDATE_QP_COUNTER_36bit(NGBE_QPRXOCTL(i), NGBE_QPRXOCTH(i),
1183 hw->qp_last[i].rx_qp_bytes,
1184 hw_stats->qp[i].rx_qp_bytes);
1185 UPDATE_QP_COUNTER_32bit(NGBE_QPRXMPKT(i),
1186 hw->qp_last[i].rx_qp_mc_packets,
1187 hw_stats->qp[i].rx_qp_mc_packets);
1188 UPDATE_QP_COUNTER_32bit(NGBE_QPRXBPKT(i),
1189 hw->qp_last[i].rx_qp_bc_packets,
1190 hw_stats->qp[i].rx_qp_bc_packets);
1193 for (i = 0; i < hw->nb_tx_queues; i++) {
1194 UPDATE_QP_COUNTER_32bit(NGBE_QPTXPKT(i),
1195 hw->qp_last[i].tx_qp_packets,
1196 hw_stats->qp[i].tx_qp_packets);
1197 UPDATE_QP_COUNTER_36bit(NGBE_QPTXOCTL(i), NGBE_QPTXOCTH(i),
1198 hw->qp_last[i].tx_qp_bytes,
1199 hw_stats->qp[i].tx_qp_bytes);
1200 UPDATE_QP_COUNTER_32bit(NGBE_QPTXMPKT(i),
1201 hw->qp_last[i].tx_qp_mc_packets,
1202 hw_stats->qp[i].tx_qp_mc_packets);
1203 UPDATE_QP_COUNTER_32bit(NGBE_QPTXBPKT(i),
1204 hw->qp_last[i].tx_qp_bc_packets,
1205 hw_stats->qp[i].tx_qp_bc_packets);
1209 hw_stats->rx_up_dropped += rd32(hw, NGBE_PBRXMISS);
1210 hw_stats->rdb_pkt_cnt += rd32(hw, NGBE_PBRXPKT);
1211 hw_stats->rdb_repli_cnt += rd32(hw, NGBE_PBRXREP);
1212 hw_stats->rdb_drp_cnt += rd32(hw, NGBE_PBRXDROP);
1213 hw_stats->tx_xoff_packets += rd32(hw, NGBE_PBTXLNKXOFF);
1214 hw_stats->tx_xon_packets += rd32(hw, NGBE_PBTXLNKXON);
1216 hw_stats->rx_xon_packets += rd32(hw, NGBE_PBRXLNKXON);
1217 hw_stats->rx_xoff_packets += rd32(hw, NGBE_PBRXLNKXOFF);
1220 hw_stats->rx_drop_packets += rd32(hw, NGBE_DMARXDROP);
1221 hw_stats->tx_drop_packets += rd32(hw, NGBE_DMATXDROP);
1222 hw_stats->rx_dma_drop += rd32(hw, NGBE_DMARXDROP);
1223 hw_stats->tx_secdrp_packets += rd32(hw, NGBE_DMATXSECDROP);
1224 hw_stats->rx_packets += rd32(hw, NGBE_DMARXPKT);
1225 hw_stats->tx_packets += rd32(hw, NGBE_DMATXPKT);
1226 hw_stats->rx_bytes += rd64(hw, NGBE_DMARXOCTL);
1227 hw_stats->tx_bytes += rd64(hw, NGBE_DMATXOCTL);
1230 hw_stats->rx_crc_errors += rd64(hw, NGBE_MACRXERRCRCL);
1231 hw_stats->rx_multicast_packets += rd64(hw, NGBE_MACRXMPKTL);
1232 hw_stats->tx_multicast_packets += rd64(hw, NGBE_MACTXMPKTL);
1234 hw_stats->rx_total_packets += rd64(hw, NGBE_MACRXPKTL);
1235 hw_stats->tx_total_packets += rd64(hw, NGBE_MACTXPKTL);
1236 hw_stats->rx_total_bytes += rd64(hw, NGBE_MACRXGBOCTL);
1238 hw_stats->rx_broadcast_packets += rd64(hw, NGBE_MACRXOCTL);
1239 hw_stats->tx_broadcast_packets += rd32(hw, NGBE_MACTXOCTL);
1241 hw_stats->rx_size_64_packets += rd64(hw, NGBE_MACRX1TO64L);
1242 hw_stats->rx_size_65_to_127_packets += rd64(hw, NGBE_MACRX65TO127L);
1243 hw_stats->rx_size_128_to_255_packets += rd64(hw, NGBE_MACRX128TO255L);
1244 hw_stats->rx_size_256_to_511_packets += rd64(hw, NGBE_MACRX256TO511L);
1245 hw_stats->rx_size_512_to_1023_packets +=
1246 rd64(hw, NGBE_MACRX512TO1023L);
1247 hw_stats->rx_size_1024_to_max_packets +=
1248 rd64(hw, NGBE_MACRX1024TOMAXL);
1249 hw_stats->tx_size_64_packets += rd64(hw, NGBE_MACTX1TO64L);
1250 hw_stats->tx_size_65_to_127_packets += rd64(hw, NGBE_MACTX65TO127L);
1251 hw_stats->tx_size_128_to_255_packets += rd64(hw, NGBE_MACTX128TO255L);
1252 hw_stats->tx_size_256_to_511_packets += rd64(hw, NGBE_MACTX256TO511L);
1253 hw_stats->tx_size_512_to_1023_packets +=
1254 rd64(hw, NGBE_MACTX512TO1023L);
1255 hw_stats->tx_size_1024_to_max_packets +=
1256 rd64(hw, NGBE_MACTX1024TOMAXL);
1258 hw_stats->rx_undersize_errors += rd64(hw, NGBE_MACRXERRLENL);
1259 hw_stats->rx_oversize_errors += rd32(hw, NGBE_MACRXOVERSIZE);
1260 hw_stats->rx_jabber_errors += rd32(hw, NGBE_MACRXJABBER);
1263 hw_stats->mng_bmc2host_packets = rd32(hw, NGBE_MNGBMC2OS);
1264 hw_stats->mng_host2bmc_packets = rd32(hw, NGBE_MNGOS2BMC);
1265 hw_stats->rx_management_packets = rd32(hw, NGBE_DMARXMNG);
1266 hw_stats->tx_management_packets = rd32(hw, NGBE_DMATXMNG);
1269 hw_stats->tx_macsec_pkts_untagged += rd32(hw, NGBE_LSECTX_UTPKT);
1270 hw_stats->tx_macsec_pkts_encrypted +=
1271 rd32(hw, NGBE_LSECTX_ENCPKT);
1272 hw_stats->tx_macsec_pkts_protected +=
1273 rd32(hw, NGBE_LSECTX_PROTPKT);
1274 hw_stats->tx_macsec_octets_encrypted +=
1275 rd32(hw, NGBE_LSECTX_ENCOCT);
1276 hw_stats->tx_macsec_octets_protected +=
1277 rd32(hw, NGBE_LSECTX_PROTOCT);
1278 hw_stats->rx_macsec_pkts_untagged += rd32(hw, NGBE_LSECRX_UTPKT);
1279 hw_stats->rx_macsec_pkts_badtag += rd32(hw, NGBE_LSECRX_BTPKT);
1280 hw_stats->rx_macsec_pkts_nosci += rd32(hw, NGBE_LSECRX_NOSCIPKT);
1281 hw_stats->rx_macsec_pkts_unknownsci += rd32(hw, NGBE_LSECRX_UNSCIPKT);
1282 hw_stats->rx_macsec_octets_decrypted += rd32(hw, NGBE_LSECRX_DECOCT);
1283 hw_stats->rx_macsec_octets_validated += rd32(hw, NGBE_LSECRX_VLDOCT);
1284 hw_stats->rx_macsec_sc_pkts_unchecked +=
1285 rd32(hw, NGBE_LSECRX_UNCHKPKT);
1286 hw_stats->rx_macsec_sc_pkts_delayed += rd32(hw, NGBE_LSECRX_DLYPKT);
1287 hw_stats->rx_macsec_sc_pkts_late += rd32(hw, NGBE_LSECRX_LATEPKT);
1288 for (i = 0; i < 2; i++) {
1289 hw_stats->rx_macsec_sa_pkts_ok +=
1290 rd32(hw, NGBE_LSECRX_OKPKT(i));
1291 hw_stats->rx_macsec_sa_pkts_invalid +=
1292 rd32(hw, NGBE_LSECRX_INVPKT(i));
1293 hw_stats->rx_macsec_sa_pkts_notvalid +=
1294 rd32(hw, NGBE_LSECRX_BADPKT(i));
1296 for (i = 0; i < 4; i++) {
1297 hw_stats->rx_macsec_sa_pkts_unusedsa +=
1298 rd32(hw, NGBE_LSECRX_INVSAPKT(i));
1299 hw_stats->rx_macsec_sa_pkts_notusingsa +=
1300 rd32(hw, NGBE_LSECRX_BADSAPKT(i));
1302 hw_stats->rx_total_missed_packets =
1303 hw_stats->rx_up_dropped;
1307 ngbe_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
1309 struct ngbe_hw *hw = ngbe_dev_hw(dev);
1310 struct ngbe_hw_stats *hw_stats = NGBE_DEV_STATS(dev);
1311 struct ngbe_stat_mappings *stat_mappings =
1312 NGBE_DEV_STAT_MAPPINGS(dev);
1315 ngbe_read_stats_registers(hw, hw_stats);
1320 /* Fill out the rte_eth_stats statistics structure */
1321 stats->ipackets = hw_stats->rx_packets;
1322 stats->ibytes = hw_stats->rx_bytes;
1323 stats->opackets = hw_stats->tx_packets;
1324 stats->obytes = hw_stats->tx_bytes;
1326 memset(&stats->q_ipackets, 0, sizeof(stats->q_ipackets));
1327 memset(&stats->q_opackets, 0, sizeof(stats->q_opackets));
1328 memset(&stats->q_ibytes, 0, sizeof(stats->q_ibytes));
1329 memset(&stats->q_obytes, 0, sizeof(stats->q_obytes));
1330 memset(&stats->q_errors, 0, sizeof(stats->q_errors));
1331 for (i = 0; i < NGBE_MAX_QP; i++) {
1332 uint32_t n = i / NB_QMAP_FIELDS_PER_QSM_REG;
1333 uint32_t offset = (i % NB_QMAP_FIELDS_PER_QSM_REG) * 8;
1336 q_map = (stat_mappings->rqsm[n] >> offset)
1337 & QMAP_FIELD_RESERVED_BITS_MASK;
1338 j = (q_map < RTE_ETHDEV_QUEUE_STAT_CNTRS
1339 ? q_map : q_map % RTE_ETHDEV_QUEUE_STAT_CNTRS);
1340 stats->q_ipackets[j] += hw_stats->qp[i].rx_qp_packets;
1341 stats->q_ibytes[j] += hw_stats->qp[i].rx_qp_bytes;
1343 q_map = (stat_mappings->tqsm[n] >> offset)
1344 & QMAP_FIELD_RESERVED_BITS_MASK;
1345 j = (q_map < RTE_ETHDEV_QUEUE_STAT_CNTRS
1346 ? q_map : q_map % RTE_ETHDEV_QUEUE_STAT_CNTRS);
1347 stats->q_opackets[j] += hw_stats->qp[i].tx_qp_packets;
1348 stats->q_obytes[j] += hw_stats->qp[i].tx_qp_bytes;
1352 stats->imissed = hw_stats->rx_total_missed_packets +
1353 hw_stats->rx_dma_drop;
1354 stats->ierrors = hw_stats->rx_crc_errors +
1355 hw_stats->rx_mac_short_packet_dropped +
1356 hw_stats->rx_length_errors +
1357 hw_stats->rx_undersize_errors +
1358 hw_stats->rx_oversize_errors +
1359 hw_stats->rx_illegal_byte_errors +
1360 hw_stats->rx_error_bytes +
1361 hw_stats->rx_fragment_errors;
1369 ngbe_dev_stats_reset(struct rte_eth_dev *dev)
1371 struct ngbe_hw *hw = ngbe_dev_hw(dev);
1372 struct ngbe_hw_stats *hw_stats = NGBE_DEV_STATS(dev);
1374 /* HW registers are cleared on read */
1375 hw->offset_loaded = 0;
1376 ngbe_dev_stats_get(dev, NULL);
1377 hw->offset_loaded = 1;
1379 /* Reset software totals */
1380 memset(hw_stats, 0, sizeof(*hw_stats));
1385 /* This function calculates the number of xstats based on the current config */
1387 ngbe_xstats_calc_num(struct rte_eth_dev *dev)
1389 int nb_queues = max(dev->data->nb_rx_queues, dev->data->nb_tx_queues);
1390 return NGBE_NB_HW_STATS +
1391 NGBE_NB_QP_STATS * nb_queues;
1395 ngbe_get_name_by_id(uint32_t id, char *name, uint32_t size)
1399 /* Extended stats from ngbe_hw_stats */
1400 if (id < NGBE_NB_HW_STATS) {
1401 snprintf(name, size, "[hw]%s",
1402 rte_ngbe_stats_strings[id].name);
1405 id -= NGBE_NB_HW_STATS;
1408 if (id < NGBE_NB_QP_STATS * NGBE_MAX_QP) {
1409 nb = id / NGBE_NB_QP_STATS;
1410 st = id % NGBE_NB_QP_STATS;
1411 snprintf(name, size, "[q%u]%s", nb,
1412 rte_ngbe_qp_strings[st].name);
1415 id -= NGBE_NB_QP_STATS * NGBE_MAX_QP;
1417 return -(int)(id + 1);
1421 ngbe_get_offset_by_id(uint32_t id, uint32_t *offset)
1425 /* Extended stats from ngbe_hw_stats */
1426 if (id < NGBE_NB_HW_STATS) {
1427 *offset = rte_ngbe_stats_strings[id].offset;
1430 id -= NGBE_NB_HW_STATS;
1433 if (id < NGBE_NB_QP_STATS * NGBE_MAX_QP) {
1434 nb = id / NGBE_NB_QP_STATS;
1435 st = id % NGBE_NB_QP_STATS;
1436 *offset = rte_ngbe_qp_strings[st].offset +
1437 nb * (NGBE_NB_QP_STATS * sizeof(uint64_t));
1444 static int ngbe_dev_xstats_get_names(struct rte_eth_dev *dev,
1445 struct rte_eth_xstat_name *xstats_names, unsigned int limit)
1447 unsigned int i, count;
1449 count = ngbe_xstats_calc_num(dev);
1450 if (xstats_names == NULL)
1453 /* Note: limit >= cnt_stats checked upstream
1454 * in rte_eth_xstats_names()
1456 limit = min(limit, count);
1458 /* Extended stats from ngbe_hw_stats */
1459 for (i = 0; i < limit; i++) {
1460 if (ngbe_get_name_by_id(i, xstats_names[i].name,
1461 sizeof(xstats_names[i].name))) {
1462 PMD_INIT_LOG(WARNING, "id value %d isn't valid", i);
1470 static int ngbe_dev_xstats_get_names_by_id(struct rte_eth_dev *dev,
1471 const uint64_t *ids,
1472 struct rte_eth_xstat_name *xstats_names,
1478 return ngbe_dev_xstats_get_names(dev, xstats_names, limit);
1480 for (i = 0; i < limit; i++) {
1481 if (ngbe_get_name_by_id(ids[i], xstats_names[i].name,
1482 sizeof(xstats_names[i].name))) {
1483 PMD_INIT_LOG(WARNING, "id value %d isn't valid", i);
1492 ngbe_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
1495 struct ngbe_hw *hw = ngbe_dev_hw(dev);
1496 struct ngbe_hw_stats *hw_stats = NGBE_DEV_STATS(dev);
1497 unsigned int i, count;
1499 ngbe_read_stats_registers(hw, hw_stats);
1501 /* If this is a reset xstats is NULL, and we have cleared the
1502 * registers by reading them.
1504 count = ngbe_xstats_calc_num(dev);
1508 limit = min(limit, ngbe_xstats_calc_num(dev));
1510 /* Extended stats from ngbe_hw_stats */
1511 for (i = 0; i < limit; i++) {
1512 uint32_t offset = 0;
1514 if (ngbe_get_offset_by_id(i, &offset)) {
1515 PMD_INIT_LOG(WARNING, "id value %d isn't valid", i);
1518 xstats[i].value = *(uint64_t *)(((char *)hw_stats) + offset);
1526 ngbe_dev_xstats_get_(struct rte_eth_dev *dev, uint64_t *values,
1529 struct ngbe_hw *hw = ngbe_dev_hw(dev);
1530 struct ngbe_hw_stats *hw_stats = NGBE_DEV_STATS(dev);
1531 unsigned int i, count;
1533 ngbe_read_stats_registers(hw, hw_stats);
1535 /* If this is a reset xstats is NULL, and we have cleared the
1536 * registers by reading them.
1538 count = ngbe_xstats_calc_num(dev);
1542 limit = min(limit, ngbe_xstats_calc_num(dev));
1544 /* Extended stats from ngbe_hw_stats */
1545 for (i = 0; i < limit; i++) {
1548 if (ngbe_get_offset_by_id(i, &offset)) {
1549 PMD_INIT_LOG(WARNING, "id value %d isn't valid", i);
1552 values[i] = *(uint64_t *)(((char *)hw_stats) + offset);
1559 ngbe_dev_xstats_get_by_id(struct rte_eth_dev *dev, const uint64_t *ids,
1560 uint64_t *values, unsigned int limit)
1562 struct ngbe_hw_stats *hw_stats = NGBE_DEV_STATS(dev);
1566 return ngbe_dev_xstats_get_(dev, values, limit);
1568 for (i = 0; i < limit; i++) {
1571 if (ngbe_get_offset_by_id(ids[i], &offset)) {
1572 PMD_INIT_LOG(WARNING, "id value %d isn't valid", i);
1575 values[i] = *(uint64_t *)(((char *)hw_stats) + offset);
1582 ngbe_dev_xstats_reset(struct rte_eth_dev *dev)
1584 struct ngbe_hw *hw = ngbe_dev_hw(dev);
1585 struct ngbe_hw_stats *hw_stats = NGBE_DEV_STATS(dev);
1587 /* HW registers are cleared on read */
1588 hw->offset_loaded = 0;
1589 ngbe_read_stats_registers(hw, hw_stats);
1590 hw->offset_loaded = 1;
1592 /* Reset software totals */
1593 memset(hw_stats, 0, sizeof(*hw_stats));
1599 ngbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
1601 struct ngbe_hw *hw = ngbe_dev_hw(dev);
1603 dev_info->max_rx_queues = (uint16_t)hw->mac.max_rx_queues;
1604 dev_info->max_tx_queues = (uint16_t)hw->mac.max_tx_queues;
1605 dev_info->min_rx_bufsize = 1024;
1606 dev_info->max_rx_pktlen = 15872;
1607 dev_info->rx_queue_offload_capa = ngbe_get_rx_queue_offloads(dev);
1608 dev_info->rx_offload_capa = (ngbe_get_rx_port_offloads(dev) |
1609 dev_info->rx_queue_offload_capa);
1610 dev_info->tx_queue_offload_capa = 0;
1611 dev_info->tx_offload_capa = ngbe_get_tx_port_offloads(dev);
1613 dev_info->default_rxconf = (struct rte_eth_rxconf) {
1615 .pthresh = NGBE_DEFAULT_RX_PTHRESH,
1616 .hthresh = NGBE_DEFAULT_RX_HTHRESH,
1617 .wthresh = NGBE_DEFAULT_RX_WTHRESH,
1619 .rx_free_thresh = NGBE_DEFAULT_RX_FREE_THRESH,
1624 dev_info->default_txconf = (struct rte_eth_txconf) {
1626 .pthresh = NGBE_DEFAULT_TX_PTHRESH,
1627 .hthresh = NGBE_DEFAULT_TX_HTHRESH,
1628 .wthresh = NGBE_DEFAULT_TX_WTHRESH,
1630 .tx_free_thresh = NGBE_DEFAULT_TX_FREE_THRESH,
1634 dev_info->rx_desc_lim = rx_desc_lim;
1635 dev_info->tx_desc_lim = tx_desc_lim;
1637 dev_info->speed_capa = RTE_ETH_LINK_SPEED_1G | RTE_ETH_LINK_SPEED_100M |
1638 RTE_ETH_LINK_SPEED_10M;
1640 /* Driver-preferred Rx/Tx parameters */
1641 dev_info->default_rxportconf.burst_size = 32;
1642 dev_info->default_txportconf.burst_size = 32;
1643 dev_info->default_rxportconf.nb_queues = 1;
1644 dev_info->default_txportconf.nb_queues = 1;
1645 dev_info->default_rxportconf.ring_size = 256;
1646 dev_info->default_txportconf.ring_size = 256;
1652 ngbe_dev_supported_ptypes_get(struct rte_eth_dev *dev)
1654 if (dev->rx_pkt_burst == ngbe_recv_pkts ||
1655 dev->rx_pkt_burst == ngbe_recv_pkts_sc_single_alloc ||
1656 dev->rx_pkt_burst == ngbe_recv_pkts_sc_bulk_alloc ||
1657 dev->rx_pkt_burst == ngbe_recv_pkts_bulk_alloc)
1658 return ngbe_get_supported_ptypes();
1663 /* return 0 means link status changed, -1 means not changed */
1665 ngbe_dev_link_update_share(struct rte_eth_dev *dev,
1666 int wait_to_complete)
1668 struct ngbe_hw *hw = ngbe_dev_hw(dev);
1669 struct rte_eth_link link;
1670 u32 link_speed = NGBE_LINK_SPEED_UNKNOWN;
1672 struct ngbe_interrupt *intr = ngbe_dev_intr(dev);
1677 memset(&link, 0, sizeof(link));
1678 link.link_status = RTE_ETH_LINK_DOWN;
1679 link.link_speed = RTE_ETH_SPEED_NUM_NONE;
1680 link.link_duplex = RTE_ETH_LINK_HALF_DUPLEX;
1681 link.link_autoneg = !(dev->data->dev_conf.link_speeds &
1682 ~RTE_ETH_LINK_SPEED_AUTONEG);
1684 hw->mac.get_link_status = true;
1686 if (intr->flags & NGBE_FLAG_NEED_LINK_CONFIG)
1687 return rte_eth_linkstatus_set(dev, &link);
1689 /* check if it needs to wait to complete, if lsc interrupt is enabled */
1690 if (wait_to_complete == 0 || dev->data->dev_conf.intr_conf.lsc != 0)
1693 err = hw->mac.check_link(hw, &link_speed, &link_up, wait);
1695 link.link_speed = RTE_ETH_SPEED_NUM_NONE;
1696 link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
1697 return rte_eth_linkstatus_set(dev, &link);
1701 return rte_eth_linkstatus_set(dev, &link);
1703 intr->flags &= ~NGBE_FLAG_NEED_LINK_CONFIG;
1704 link.link_status = RTE_ETH_LINK_UP;
1705 link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
1707 switch (link_speed) {
1709 case NGBE_LINK_SPEED_UNKNOWN:
1710 link.link_speed = RTE_ETH_SPEED_NUM_NONE;
1713 case NGBE_LINK_SPEED_10M_FULL:
1714 link.link_speed = RTE_ETH_SPEED_NUM_10M;
1718 case NGBE_LINK_SPEED_100M_FULL:
1719 link.link_speed = RTE_ETH_SPEED_NUM_100M;
1723 case NGBE_LINK_SPEED_1GB_FULL:
1724 link.link_speed = RTE_ETH_SPEED_NUM_1G;
1730 wr32m(hw, NGBE_LAN_SPEED, NGBE_LAN_SPEED_MASK, lan_speed);
1731 if (link_speed & (NGBE_LINK_SPEED_1GB_FULL |
1732 NGBE_LINK_SPEED_100M_FULL |
1733 NGBE_LINK_SPEED_10M_FULL)) {
1734 wr32m(hw, NGBE_MACTXCFG, NGBE_MACTXCFG_SPEED_MASK,
1735 NGBE_MACTXCFG_SPEED_1G | NGBE_MACTXCFG_TE);
1739 return rte_eth_linkstatus_set(dev, &link);
1743 ngbe_dev_link_update(struct rte_eth_dev *dev, int wait_to_complete)
1745 return ngbe_dev_link_update_share(dev, wait_to_complete);
1749 * It clears the interrupt causes and enables the interrupt.
1750 * It will be called once only during NIC initialized.
1753 * Pointer to struct rte_eth_dev.
1755 * Enable or Disable.
1758 * - On success, zero.
1759 * - On failure, a negative value.
1762 ngbe_dev_lsc_interrupt_setup(struct rte_eth_dev *dev, uint8_t on)
1764 struct ngbe_interrupt *intr = ngbe_dev_intr(dev);
1766 ngbe_dev_link_status_print(dev);
1768 intr->mask_misc |= NGBE_ICRMISC_PHY;
1769 intr->mask_misc |= NGBE_ICRMISC_GPIO;
1771 intr->mask_misc &= ~NGBE_ICRMISC_PHY;
1772 intr->mask_misc &= ~NGBE_ICRMISC_GPIO;
1779 * It clears the interrupt causes and enables the interrupt.
1780 * It will be called once only during NIC initialized.
1783 * Pointer to struct rte_eth_dev.
1786 * - On success, zero.
1787 * - On failure, a negative value.
1790 ngbe_dev_misc_interrupt_setup(struct rte_eth_dev *dev)
1792 struct ngbe_interrupt *intr = ngbe_dev_intr(dev);
1795 mask = NGBE_ICR_MASK;
1796 mask &= (1ULL << NGBE_MISC_VEC_ID);
1798 intr->mask_misc |= NGBE_ICRMISC_GPIO;
1804 * It clears the interrupt causes and enables the interrupt.
1805 * It will be called once only during NIC initialized.
1808 * Pointer to struct rte_eth_dev.
1811 * - On success, zero.
1812 * - On failure, a negative value.
1815 ngbe_dev_rxq_interrupt_setup(struct rte_eth_dev *dev)
1817 struct ngbe_interrupt *intr = ngbe_dev_intr(dev);
1820 mask = NGBE_ICR_MASK;
1821 mask &= ~((1ULL << NGBE_RX_VEC_START) - 1);
1828 * It clears the interrupt causes and enables the interrupt.
1829 * It will be called once only during NIC initialized.
1832 * Pointer to struct rte_eth_dev.
1835 * - On success, zero.
1836 * - On failure, a negative value.
1839 ngbe_dev_macsec_interrupt_setup(struct rte_eth_dev *dev)
1841 struct ngbe_interrupt *intr = ngbe_dev_intr(dev);
1843 intr->mask_misc |= NGBE_ICRMISC_LNKSEC;
1849 * It reads ICR and sets flag for the link_update.
1852 * Pointer to struct rte_eth_dev.
1855 * - On success, zero.
1856 * - On failure, a negative value.
1859 ngbe_dev_interrupt_get_status(struct rte_eth_dev *dev)
1862 struct ngbe_hw *hw = ngbe_dev_hw(dev);
1863 struct ngbe_interrupt *intr = ngbe_dev_intr(dev);
1865 /* clear all cause mask */
1866 ngbe_disable_intr(hw);
1868 /* read-on-clear nic registers here */
1869 eicr = ((u32 *)hw->isb_mem)[NGBE_ISB_MISC];
1870 PMD_DRV_LOG(DEBUG, "eicr %x", eicr);
1874 /* set flag for async link update */
1875 if (eicr & NGBE_ICRMISC_PHY)
1876 intr->flags |= NGBE_FLAG_NEED_LINK_UPDATE;
1878 if (eicr & NGBE_ICRMISC_VFMBX)
1879 intr->flags |= NGBE_FLAG_MAILBOX;
1881 if (eicr & NGBE_ICRMISC_LNKSEC)
1882 intr->flags |= NGBE_FLAG_MACSEC;
1884 if (eicr & NGBE_ICRMISC_GPIO)
1885 intr->flags |= NGBE_FLAG_NEED_LINK_UPDATE;
1891 * It gets and then prints the link status.
1894 * Pointer to struct rte_eth_dev.
1897 * - On success, zero.
1898 * - On failure, a negative value.
1901 ngbe_dev_link_status_print(struct rte_eth_dev *dev)
1903 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1904 struct rte_eth_link link;
1906 rte_eth_linkstatus_get(dev, &link);
1908 if (link.link_status == RTE_ETH_LINK_UP) {
1909 PMD_INIT_LOG(INFO, "Port %d: Link Up - speed %u Mbps - %s",
1910 (int)(dev->data->port_id),
1911 (unsigned int)link.link_speed,
1912 link.link_duplex == RTE_ETH_LINK_FULL_DUPLEX ?
1913 "full-duplex" : "half-duplex");
1915 PMD_INIT_LOG(INFO, " Port %d: Link Down",
1916 (int)(dev->data->port_id));
1918 PMD_INIT_LOG(DEBUG, "PCI Address: " PCI_PRI_FMT,
1919 pci_dev->addr.domain,
1921 pci_dev->addr.devid,
1922 pci_dev->addr.function);
1926 * It executes link_update after knowing an interrupt occurred.
1929 * Pointer to struct rte_eth_dev.
1932 * - On success, zero.
1933 * - On failure, a negative value.
1936 ngbe_dev_interrupt_action(struct rte_eth_dev *dev)
1938 struct ngbe_interrupt *intr = ngbe_dev_intr(dev);
1941 PMD_DRV_LOG(DEBUG, "intr action type %d", intr->flags);
1943 if (intr->flags & NGBE_FLAG_NEED_LINK_UPDATE) {
1944 struct rte_eth_link link;
1946 /*get the link status before link update, for predicting later*/
1947 rte_eth_linkstatus_get(dev, &link);
1949 ngbe_dev_link_update(dev, 0);
1952 if (link.link_status != RTE_ETH_LINK_UP)
1953 /* handle it 1 sec later, wait it being stable */
1954 timeout = NGBE_LINK_UP_CHECK_TIMEOUT;
1955 /* likely to down */
1957 /* handle it 4 sec later, wait it being stable */
1958 timeout = NGBE_LINK_DOWN_CHECK_TIMEOUT;
1960 ngbe_dev_link_status_print(dev);
1961 if (rte_eal_alarm_set(timeout * 1000,
1962 ngbe_dev_interrupt_delayed_handler,
1964 PMD_DRV_LOG(ERR, "Error setting alarm");
1966 /* remember original mask */
1967 intr->mask_misc_orig = intr->mask_misc;
1968 /* only disable lsc interrupt */
1969 intr->mask_misc &= ~NGBE_ICRMISC_PHY;
1971 intr->mask_orig = intr->mask;
1972 /* only disable all misc interrupts */
1973 intr->mask &= ~(1ULL << NGBE_MISC_VEC_ID);
1977 PMD_DRV_LOG(DEBUG, "enable intr immediately");
1978 ngbe_enable_intr(dev);
1984 * Interrupt handler which shall be registered for alarm callback for delayed
1985 * handling specific interrupt to wait for the stable nic state. As the
1986 * NIC interrupt state is not stable for ngbe after link is just down,
1987 * it needs to wait 4 seconds to get the stable status.
1990 * The address of parameter (struct rte_eth_dev *) registered before.
1993 ngbe_dev_interrupt_delayed_handler(void *param)
1995 struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
1996 struct ngbe_interrupt *intr = ngbe_dev_intr(dev);
1997 struct ngbe_hw *hw = ngbe_dev_hw(dev);
2000 ngbe_disable_intr(hw);
2002 eicr = ((u32 *)hw->isb_mem)[NGBE_ISB_MISC];
2004 if (intr->flags & NGBE_FLAG_NEED_LINK_UPDATE) {
2005 ngbe_dev_link_update(dev, 0);
2006 intr->flags &= ~NGBE_FLAG_NEED_LINK_UPDATE;
2007 ngbe_dev_link_status_print(dev);
2008 rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC,
2012 if (intr->flags & NGBE_FLAG_MACSEC) {
2013 rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_MACSEC,
2015 intr->flags &= ~NGBE_FLAG_MACSEC;
2018 /* restore original mask */
2019 intr->mask_misc = intr->mask_misc_orig;
2020 intr->mask_misc_orig = 0;
2021 intr->mask = intr->mask_orig;
2022 intr->mask_orig = 0;
2024 PMD_DRV_LOG(DEBUG, "enable intr in delayed handler S[%08x]", eicr);
2025 ngbe_enable_intr(dev);
2029 * Interrupt handler triggered by NIC for handling
2030 * specific interrupt.
2033 * The address of parameter (struct rte_eth_dev *) registered before.
2036 ngbe_dev_interrupt_handler(void *param)
2038 struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
2040 ngbe_dev_interrupt_get_status(dev);
2041 ngbe_dev_interrupt_action(dev);
2045 ngbe_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
2047 struct ngbe_hw *hw = ngbe_dev_hw(dev);
2048 uint32_t frame_size = mtu + RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN + 4;
2049 struct rte_eth_dev_data *dev_data = dev->data;
2051 /* If device is started, refuse mtu that requires the support of
2052 * scattered packets when this feature has not been enabled before.
2054 if (dev_data->dev_started && !dev_data->scattered_rx &&
2055 (frame_size + 2 * NGBE_VLAN_TAG_SIZE >
2056 dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM)) {
2057 PMD_INIT_LOG(ERR, "Stop port first.");
2062 wr32m(hw, NGBE_FRMSZ, NGBE_FRMSZ_MAX_MASK,
2063 NGBE_FRAME_SIZE_MAX);
2065 wr32m(hw, NGBE_FRMSZ, NGBE_FRMSZ_MAX_MASK,
2066 NGBE_FRMSZ_MAX(frame_size));
2072 * Set the IVAR registers, mapping interrupt causes to vectors
2074 * pointer to ngbe_hw struct
2076 * 0 for Rx, 1 for Tx, -1 for other causes
2078 * queue to map the corresponding interrupt to
2080 * the vector to map to the corresponding queue
2083 ngbe_set_ivar_map(struct ngbe_hw *hw, int8_t direction,
2084 uint8_t queue, uint8_t msix_vector)
2088 if (direction == -1) {
2090 msix_vector |= NGBE_IVARMISC_VLD;
2092 tmp = rd32(hw, NGBE_IVARMISC);
2093 tmp &= ~(0xFF << idx);
2094 tmp |= (msix_vector << idx);
2095 wr32(hw, NGBE_IVARMISC, tmp);
2097 /* rx or tx causes */
2098 /* Workround for ICR lost */
2099 idx = ((16 * (queue & 1)) + (8 * direction));
2100 tmp = rd32(hw, NGBE_IVAR(queue >> 1));
2101 tmp &= ~(0xFF << idx);
2102 tmp |= (msix_vector << idx);
2103 wr32(hw, NGBE_IVAR(queue >> 1), tmp);
2108 * Sets up the hardware to properly generate MSI-X interrupts
2110 * board private structure
2113 ngbe_configure_msix(struct rte_eth_dev *dev)
2115 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
2116 struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
2117 struct ngbe_hw *hw = ngbe_dev_hw(dev);
2118 uint32_t queue_id, base = NGBE_MISC_VEC_ID;
2119 uint32_t vec = NGBE_MISC_VEC_ID;
2123 * Won't configure MSI-X register if no mapping is done
2124 * between intr vector and event fd
2125 * but if MSI-X has been enabled already, need to configure
2126 * auto clean, auto mask and throttling.
2128 gpie = rd32(hw, NGBE_GPIE);
2129 if (!rte_intr_dp_is_en(intr_handle) &&
2130 !(gpie & NGBE_GPIE_MSIX))
2133 if (rte_intr_allow_others(intr_handle)) {
2134 base = NGBE_RX_VEC_START;
2138 /* setup GPIE for MSI-X mode */
2139 gpie = rd32(hw, NGBE_GPIE);
2140 gpie |= NGBE_GPIE_MSIX;
2141 wr32(hw, NGBE_GPIE, gpie);
2143 /* Populate the IVAR table and set the ITR values to the
2144 * corresponding register.
2146 if (rte_intr_dp_is_en(intr_handle)) {
2147 for (queue_id = 0; queue_id < dev->data->nb_rx_queues;
2149 /* by default, 1:1 mapping */
2150 ngbe_set_ivar_map(hw, 0, queue_id, vec);
2151 rte_intr_vec_list_index_set(intr_handle,
2153 if (vec < base + rte_intr_nb_efd_get(intr_handle)
2158 ngbe_set_ivar_map(hw, -1, 1, NGBE_MISC_VEC_ID);
2160 wr32(hw, NGBE_ITR(NGBE_MISC_VEC_ID),
2161 NGBE_ITR_IVAL_1G(NGBE_QUEUE_ITR_INTERVAL_DEFAULT)
2165 static const struct eth_dev_ops ngbe_eth_dev_ops = {
2166 .dev_configure = ngbe_dev_configure,
2167 .dev_infos_get = ngbe_dev_info_get,
2168 .dev_start = ngbe_dev_start,
2169 .dev_stop = ngbe_dev_stop,
2170 .dev_close = ngbe_dev_close,
2171 .dev_reset = ngbe_dev_reset,
2172 .link_update = ngbe_dev_link_update,
2173 .stats_get = ngbe_dev_stats_get,
2174 .xstats_get = ngbe_dev_xstats_get,
2175 .xstats_get_by_id = ngbe_dev_xstats_get_by_id,
2176 .stats_reset = ngbe_dev_stats_reset,
2177 .xstats_reset = ngbe_dev_xstats_reset,
2178 .xstats_get_names = ngbe_dev_xstats_get_names,
2179 .xstats_get_names_by_id = ngbe_dev_xstats_get_names_by_id,
2180 .dev_supported_ptypes_get = ngbe_dev_supported_ptypes_get,
2181 .mtu_set = ngbe_dev_mtu_set,
2182 .vlan_filter_set = ngbe_vlan_filter_set,
2183 .vlan_tpid_set = ngbe_vlan_tpid_set,
2184 .vlan_offload_set = ngbe_vlan_offload_set,
2185 .vlan_strip_queue_set = ngbe_vlan_strip_queue_set,
2186 .rx_queue_start = ngbe_dev_rx_queue_start,
2187 .rx_queue_stop = ngbe_dev_rx_queue_stop,
2188 .tx_queue_start = ngbe_dev_tx_queue_start,
2189 .tx_queue_stop = ngbe_dev_tx_queue_stop,
2190 .rx_queue_setup = ngbe_dev_rx_queue_setup,
2191 .rx_queue_release = ngbe_dev_rx_queue_release,
2192 .tx_queue_setup = ngbe_dev_tx_queue_setup,
2193 .tx_queue_release = ngbe_dev_tx_queue_release,
2194 .rx_burst_mode_get = ngbe_rx_burst_mode_get,
2195 .tx_burst_mode_get = ngbe_tx_burst_mode_get,
2198 RTE_PMD_REGISTER_PCI(net_ngbe, rte_ngbe_pmd);
2199 RTE_PMD_REGISTER_PCI_TABLE(net_ngbe, pci_id_ngbe_map);
2200 RTE_PMD_REGISTER_KMOD_DEP(net_ngbe, "* igb_uio | uio_pci_generic | vfio-pci");
2202 RTE_LOG_REGISTER_SUFFIX(ngbe_logtype_init, init, NOTICE);
2203 RTE_LOG_REGISTER_SUFFIX(ngbe_logtype_driver, driver, NOTICE);
2205 #ifdef RTE_ETHDEV_DEBUG_RX
2206 RTE_LOG_REGISTER_SUFFIX(ngbe_logtype_rx, rx, DEBUG);
2208 #ifdef RTE_ETHDEV_DEBUG_TX
2209 RTE_LOG_REGISTER_SUFFIX(ngbe_logtype_tx, tx, DEBUG);