1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2018-2021 Beijing WangXun Technology Co., Ltd.
3 * Copyright(c) 2010-2017 Intel Corporation
7 #include <rte_common.h>
8 #include <ethdev_pci.h>
10 #include <rte_alarm.h>
12 #include "ngbe_logs.h"
14 #include "ngbe_ethdev.h"
15 #include "ngbe_rxtx.h"
17 static int ngbe_dev_close(struct rte_eth_dev *dev);
18 static int ngbe_dev_link_update(struct rte_eth_dev *dev,
19 int wait_to_complete);
20 static int ngbe_dev_stats_reset(struct rte_eth_dev *dev);
21 static void ngbe_vlan_hw_strip_enable(struct rte_eth_dev *dev, uint16_t queue);
22 static void ngbe_vlan_hw_strip_disable(struct rte_eth_dev *dev,
25 static void ngbe_dev_link_status_print(struct rte_eth_dev *dev);
26 static int ngbe_dev_lsc_interrupt_setup(struct rte_eth_dev *dev, uint8_t on);
27 static int ngbe_dev_macsec_interrupt_setup(struct rte_eth_dev *dev);
28 static int ngbe_dev_misc_interrupt_setup(struct rte_eth_dev *dev);
29 static int ngbe_dev_rxq_interrupt_setup(struct rte_eth_dev *dev);
30 static void ngbe_dev_interrupt_handler(void *param);
31 static void ngbe_dev_interrupt_delayed_handler(void *param);
32 static void ngbe_configure_msix(struct rte_eth_dev *dev);
34 #define NGBE_SET_HWSTRIP(h, q) do {\
35 uint32_t idx = (q) / (sizeof((h)->bitmap[0]) * NBBY); \
36 uint32_t bit = (q) % (sizeof((h)->bitmap[0]) * NBBY); \
37 (h)->bitmap[idx] |= 1 << bit;\
40 #define NGBE_CLEAR_HWSTRIP(h, q) do {\
41 uint32_t idx = (q) / (sizeof((h)->bitmap[0]) * NBBY); \
42 uint32_t bit = (q) % (sizeof((h)->bitmap[0]) * NBBY); \
43 (h)->bitmap[idx] &= ~(1 << bit);\
46 #define NGBE_GET_HWSTRIP(h, q, r) do {\
47 uint32_t idx = (q) / (sizeof((h)->bitmap[0]) * NBBY); \
48 uint32_t bit = (q) % (sizeof((h)->bitmap[0]) * NBBY); \
49 (r) = (h)->bitmap[idx] >> bit & 1;\
53 * The set of PCI devices this driver supports
55 static const struct rte_pci_id pci_id_ngbe_map[] = {
56 { RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860A2) },
57 { RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860A2S) },
58 { RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860A4) },
59 { RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860A4S) },
60 { RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860AL2) },
61 { RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860AL2S) },
62 { RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860AL4) },
63 { RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860AL4S) },
64 { RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860NCSI) },
65 { RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860A1) },
66 { RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860A1L) },
67 { RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860AL_W) },
68 { .vendor_id = 0, /* sentinel */ },
71 static const struct rte_eth_desc_lim rx_desc_lim = {
72 .nb_max = NGBE_RING_DESC_MAX,
73 .nb_min = NGBE_RING_DESC_MIN,
74 .nb_align = NGBE_RXD_ALIGN,
77 static const struct rte_eth_desc_lim tx_desc_lim = {
78 .nb_max = NGBE_RING_DESC_MAX,
79 .nb_min = NGBE_RING_DESC_MIN,
80 .nb_align = NGBE_TXD_ALIGN,
81 .nb_seg_max = NGBE_TX_MAX_SEG,
82 .nb_mtu_seg_max = NGBE_TX_MAX_SEG,
85 static const struct eth_dev_ops ngbe_eth_dev_ops;
87 #define HW_XSTAT(m) {#m, offsetof(struct ngbe_hw_stats, m)}
88 #define HW_XSTAT_NAME(m, n) {n, offsetof(struct ngbe_hw_stats, m)}
89 static const struct rte_ngbe_xstats_name_off rte_ngbe_stats_strings[] = {
91 HW_XSTAT(mng_bmc2host_packets),
92 HW_XSTAT(mng_host2bmc_packets),
98 HW_XSTAT(rx_total_bytes),
99 HW_XSTAT(rx_total_packets),
100 HW_XSTAT(tx_total_packets),
101 HW_XSTAT(rx_total_missed_packets),
102 HW_XSTAT(rx_broadcast_packets),
103 HW_XSTAT(rx_multicast_packets),
104 HW_XSTAT(rx_management_packets),
105 HW_XSTAT(tx_management_packets),
106 HW_XSTAT(rx_management_dropped),
109 HW_XSTAT(rx_crc_errors),
110 HW_XSTAT(rx_illegal_byte_errors),
111 HW_XSTAT(rx_error_bytes),
112 HW_XSTAT(rx_mac_short_packet_dropped),
113 HW_XSTAT(rx_length_errors),
114 HW_XSTAT(rx_undersize_errors),
115 HW_XSTAT(rx_fragment_errors),
116 HW_XSTAT(rx_oversize_errors),
117 HW_XSTAT(rx_jabber_errors),
118 HW_XSTAT(rx_l3_l4_xsum_error),
119 HW_XSTAT(mac_local_errors),
120 HW_XSTAT(mac_remote_errors),
123 HW_XSTAT(tx_macsec_pkts_untagged),
124 HW_XSTAT(tx_macsec_pkts_encrypted),
125 HW_XSTAT(tx_macsec_pkts_protected),
126 HW_XSTAT(tx_macsec_octets_encrypted),
127 HW_XSTAT(tx_macsec_octets_protected),
128 HW_XSTAT(rx_macsec_pkts_untagged),
129 HW_XSTAT(rx_macsec_pkts_badtag),
130 HW_XSTAT(rx_macsec_pkts_nosci),
131 HW_XSTAT(rx_macsec_pkts_unknownsci),
132 HW_XSTAT(rx_macsec_octets_decrypted),
133 HW_XSTAT(rx_macsec_octets_validated),
134 HW_XSTAT(rx_macsec_sc_pkts_unchecked),
135 HW_XSTAT(rx_macsec_sc_pkts_delayed),
136 HW_XSTAT(rx_macsec_sc_pkts_late),
137 HW_XSTAT(rx_macsec_sa_pkts_ok),
138 HW_XSTAT(rx_macsec_sa_pkts_invalid),
139 HW_XSTAT(rx_macsec_sa_pkts_notvalid),
140 HW_XSTAT(rx_macsec_sa_pkts_unusedsa),
141 HW_XSTAT(rx_macsec_sa_pkts_notusingsa),
144 HW_XSTAT(rx_size_64_packets),
145 HW_XSTAT(rx_size_65_to_127_packets),
146 HW_XSTAT(rx_size_128_to_255_packets),
147 HW_XSTAT(rx_size_256_to_511_packets),
148 HW_XSTAT(rx_size_512_to_1023_packets),
149 HW_XSTAT(rx_size_1024_to_max_packets),
150 HW_XSTAT(tx_size_64_packets),
151 HW_XSTAT(tx_size_65_to_127_packets),
152 HW_XSTAT(tx_size_128_to_255_packets),
153 HW_XSTAT(tx_size_256_to_511_packets),
154 HW_XSTAT(tx_size_512_to_1023_packets),
155 HW_XSTAT(tx_size_1024_to_max_packets),
158 HW_XSTAT(tx_xon_packets),
159 HW_XSTAT(rx_xon_packets),
160 HW_XSTAT(tx_xoff_packets),
161 HW_XSTAT(rx_xoff_packets),
163 HW_XSTAT_NAME(tx_xon_packets, "tx_flow_control_xon_packets"),
164 HW_XSTAT_NAME(rx_xon_packets, "rx_flow_control_xon_packets"),
165 HW_XSTAT_NAME(tx_xoff_packets, "tx_flow_control_xoff_packets"),
166 HW_XSTAT_NAME(rx_xoff_packets, "rx_flow_control_xoff_packets"),
169 #define NGBE_NB_HW_STATS (sizeof(rte_ngbe_stats_strings) / \
170 sizeof(rte_ngbe_stats_strings[0]))
172 /* Per-queue statistics */
173 #define QP_XSTAT(m) {#m, offsetof(struct ngbe_hw_stats, qp[0].m)}
174 static const struct rte_ngbe_xstats_name_off rte_ngbe_qp_strings[] = {
175 QP_XSTAT(rx_qp_packets),
176 QP_XSTAT(tx_qp_packets),
177 QP_XSTAT(rx_qp_bytes),
178 QP_XSTAT(tx_qp_bytes),
179 QP_XSTAT(rx_qp_mc_packets),
182 #define NGBE_NB_QP_STATS (sizeof(rte_ngbe_qp_strings) / \
183 sizeof(rte_ngbe_qp_strings[0]))
185 static inline int32_t
186 ngbe_pf_reset_hw(struct ngbe_hw *hw)
191 status = hw->mac.reset_hw(hw);
193 ctrl_ext = rd32(hw, NGBE_PORTCTL);
194 /* Set PF Reset Done bit so PF/VF Mail Ops can work */
195 ctrl_ext |= NGBE_PORTCTL_RSTDONE;
196 wr32(hw, NGBE_PORTCTL, ctrl_ext);
199 if (status == NGBE_ERR_SFP_NOT_PRESENT)
205 ngbe_enable_intr(struct rte_eth_dev *dev)
207 struct ngbe_interrupt *intr = ngbe_dev_intr(dev);
208 struct ngbe_hw *hw = ngbe_dev_hw(dev);
210 wr32(hw, NGBE_IENMISC, intr->mask_misc);
211 wr32(hw, NGBE_IMC(0), intr->mask & BIT_MASK32);
216 ngbe_disable_intr(struct ngbe_hw *hw)
218 PMD_INIT_FUNC_TRACE();
220 wr32(hw, NGBE_IMS(0), NGBE_IMS_MASK);
225 * Ensure that all locks are released before first NVM or PHY access
228 ngbe_swfw_lock_reset(struct ngbe_hw *hw)
233 * These ones are more tricky since they are common to all ports; but
234 * swfw_sync retries last long enough (1s) to be almost sure that if
235 * lock can not be taken it is due to an improper lock of the
238 mask = NGBE_MNGSEM_SWPHY |
241 if (hw->mac.acquire_swfw_sync(hw, mask) < 0)
242 PMD_DRV_LOG(DEBUG, "SWFW common locks released");
244 hw->mac.release_swfw_sync(hw, mask);
248 eth_ngbe_dev_init(struct rte_eth_dev *eth_dev, void *init_params __rte_unused)
250 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
251 struct ngbe_hw *hw = ngbe_dev_hw(eth_dev);
252 struct ngbe_vfta *shadow_vfta = NGBE_DEV_VFTA(eth_dev);
253 struct ngbe_hwstrip *hwstrip = NGBE_DEV_HWSTRIP(eth_dev);
254 struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
255 const struct rte_memzone *mz;
259 PMD_INIT_FUNC_TRACE();
261 eth_dev->dev_ops = &ngbe_eth_dev_ops;
262 eth_dev->rx_pkt_burst = &ngbe_recv_pkts;
263 eth_dev->tx_pkt_burst = &ngbe_xmit_pkts;
264 eth_dev->tx_pkt_prepare = &ngbe_prep_pkts;
267 * For secondary processes, we don't initialise any further as primary
268 * has already done this work. Only check we don't need a different
269 * Rx and Tx function.
271 if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
272 struct ngbe_tx_queue *txq;
273 /* Tx queue function in primary, set by last queue initialized
274 * Tx queue may not initialized by primary process
276 if (eth_dev->data->tx_queues) {
277 uint16_t nb_tx_queues = eth_dev->data->nb_tx_queues;
278 txq = eth_dev->data->tx_queues[nb_tx_queues - 1];
279 ngbe_set_tx_function(eth_dev, txq);
281 /* Use default Tx function if we get here */
283 "No Tx queues configured yet. Using default Tx function.");
286 ngbe_set_rx_function(eth_dev);
291 rte_eth_copy_pci_info(eth_dev, pci_dev);
292 eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
294 /* Vendor and Device ID need to be set before init of shared code */
295 hw->device_id = pci_dev->id.device_id;
296 hw->vendor_id = pci_dev->id.vendor_id;
297 hw->sub_system_id = pci_dev->id.subsystem_device_id;
298 ngbe_map_device_id(hw);
299 hw->hw_addr = (void *)pci_dev->mem_resource[0].addr;
301 /* Reserve memory for interrupt status block */
302 mz = rte_eth_dma_zone_reserve(eth_dev, "ngbe_driver", -1,
303 NGBE_ISB_SIZE, NGBE_ALIGN, SOCKET_ID_ANY);
307 hw->isb_dma = TMZ_PADDR(mz);
308 hw->isb_mem = TMZ_VADDR(mz);
310 /* Initialize the shared code (base driver) */
311 err = ngbe_init_shared_code(hw);
313 PMD_INIT_LOG(ERR, "Shared code init failed: %d", err);
317 /* Unlock any pending hardware semaphore */
318 ngbe_swfw_lock_reset(hw);
320 err = hw->rom.init_params(hw);
322 PMD_INIT_LOG(ERR, "The EEPROM init failed: %d", err);
326 /* Make sure we have a good EEPROM before we read from it */
327 err = hw->rom.validate_checksum(hw, NULL);
329 PMD_INIT_LOG(ERR, "The EEPROM checksum is not valid: %d", err);
333 err = hw->mac.init_hw(hw);
335 PMD_INIT_LOG(ERR, "Hardware Initialization Failure: %d", err);
339 /* Reset the hw statistics */
340 ngbe_dev_stats_reset(eth_dev);
342 /* disable interrupt */
343 ngbe_disable_intr(hw);
345 /* Allocate memory for storing MAC addresses */
346 eth_dev->data->mac_addrs = rte_zmalloc("ngbe", RTE_ETHER_ADDR_LEN *
347 hw->mac.num_rar_entries, 0);
348 if (eth_dev->data->mac_addrs == NULL) {
350 "Failed to allocate %u bytes needed to store MAC addresses",
351 RTE_ETHER_ADDR_LEN * hw->mac.num_rar_entries);
355 /* Copy the permanent MAC address */
356 rte_ether_addr_copy((struct rte_ether_addr *)hw->mac.perm_addr,
357 ð_dev->data->mac_addrs[0]);
359 /* Allocate memory for storing hash filter MAC addresses */
360 eth_dev->data->hash_mac_addrs = rte_zmalloc("ngbe",
361 RTE_ETHER_ADDR_LEN * NGBE_VMDQ_NUM_UC_MAC, 0);
362 if (eth_dev->data->hash_mac_addrs == NULL) {
364 "Failed to allocate %d bytes needed to store MAC addresses",
365 RTE_ETHER_ADDR_LEN * NGBE_VMDQ_NUM_UC_MAC);
366 rte_free(eth_dev->data->mac_addrs);
367 eth_dev->data->mac_addrs = NULL;
371 /* initialize the vfta */
372 memset(shadow_vfta, 0, sizeof(*shadow_vfta));
374 /* initialize the hw strip bitmap*/
375 memset(hwstrip, 0, sizeof(*hwstrip));
377 ctrl_ext = rd32(hw, NGBE_PORTCTL);
378 /* let hardware know driver is loaded */
379 ctrl_ext |= NGBE_PORTCTL_DRVLOAD;
380 /* Set PF Reset Done bit so PF/VF Mail Ops can work */
381 ctrl_ext |= NGBE_PORTCTL_RSTDONE;
382 wr32(hw, NGBE_PORTCTL, ctrl_ext);
385 PMD_INIT_LOG(DEBUG, "MAC: %d, PHY: %d",
386 (int)hw->mac.type, (int)hw->phy.type);
388 PMD_INIT_LOG(DEBUG, "port %d vendorID=0x%x deviceID=0x%x",
389 eth_dev->data->port_id, pci_dev->id.vendor_id,
390 pci_dev->id.device_id);
392 rte_intr_callback_register(intr_handle,
393 ngbe_dev_interrupt_handler, eth_dev);
395 /* enable uio/vfio intr/eventfd mapping */
396 rte_intr_enable(intr_handle);
398 /* enable support intr */
399 ngbe_enable_intr(eth_dev);
405 eth_ngbe_dev_uninit(struct rte_eth_dev *eth_dev)
407 PMD_INIT_FUNC_TRACE();
409 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
412 ngbe_dev_close(eth_dev);
418 eth_ngbe_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
419 struct rte_pci_device *pci_dev)
421 return rte_eth_dev_create(&pci_dev->device, pci_dev->device.name,
422 sizeof(struct ngbe_adapter),
423 eth_dev_pci_specific_init, pci_dev,
424 eth_ngbe_dev_init, NULL);
427 static int eth_ngbe_pci_remove(struct rte_pci_device *pci_dev)
429 struct rte_eth_dev *ethdev;
431 ethdev = rte_eth_dev_allocated(pci_dev->device.name);
435 return rte_eth_dev_destroy(ethdev, eth_ngbe_dev_uninit);
438 static struct rte_pci_driver rte_ngbe_pmd = {
439 .id_table = pci_id_ngbe_map,
440 .drv_flags = RTE_PCI_DRV_NEED_MAPPING |
441 RTE_PCI_DRV_INTR_LSC,
442 .probe = eth_ngbe_pci_probe,
443 .remove = eth_ngbe_pci_remove,
447 ngbe_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
449 struct ngbe_hw *hw = ngbe_dev_hw(dev);
450 struct ngbe_vfta *shadow_vfta = NGBE_DEV_VFTA(dev);
455 vid_idx = (uint32_t)((vlan_id >> 5) & 0x7F);
456 vid_bit = (uint32_t)(1 << (vlan_id & 0x1F));
457 vfta = rd32(hw, NGBE_VLANTBL(vid_idx));
462 wr32(hw, NGBE_VLANTBL(vid_idx), vfta);
464 /* update local VFTA copy */
465 shadow_vfta->vfta[vid_idx] = vfta;
471 ngbe_vlan_strip_queue_set(struct rte_eth_dev *dev, uint16_t queue, int on)
473 struct ngbe_hw *hw = ngbe_dev_hw(dev);
474 struct ngbe_rx_queue *rxq;
476 uint32_t rxcfg, rxbal, rxbah;
479 ngbe_vlan_hw_strip_enable(dev, queue);
481 ngbe_vlan_hw_strip_disable(dev, queue);
483 rxq = dev->data->rx_queues[queue];
484 rxbal = rd32(hw, NGBE_RXBAL(rxq->reg_idx));
485 rxbah = rd32(hw, NGBE_RXBAH(rxq->reg_idx));
486 rxcfg = rd32(hw, NGBE_RXCFG(rxq->reg_idx));
487 if (rxq->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP) {
488 restart = (rxcfg & NGBE_RXCFG_ENA) &&
489 !(rxcfg & NGBE_RXCFG_VLAN);
490 rxcfg |= NGBE_RXCFG_VLAN;
492 restart = (rxcfg & NGBE_RXCFG_ENA) &&
493 (rxcfg & NGBE_RXCFG_VLAN);
494 rxcfg &= ~NGBE_RXCFG_VLAN;
496 rxcfg &= ~NGBE_RXCFG_ENA;
499 /* set vlan strip for ring */
500 ngbe_dev_rx_queue_stop(dev, queue);
501 wr32(hw, NGBE_RXBAL(rxq->reg_idx), rxbal);
502 wr32(hw, NGBE_RXBAH(rxq->reg_idx), rxbah);
503 wr32(hw, NGBE_RXCFG(rxq->reg_idx), rxcfg);
504 ngbe_dev_rx_queue_start(dev, queue);
509 ngbe_vlan_tpid_set(struct rte_eth_dev *dev,
510 enum rte_vlan_type vlan_type,
513 struct ngbe_hw *hw = ngbe_dev_hw(dev);
515 uint32_t portctrl, vlan_ext, qinq;
517 portctrl = rd32(hw, NGBE_PORTCTL);
519 vlan_ext = (portctrl & NGBE_PORTCTL_VLANEXT);
520 qinq = vlan_ext && (portctrl & NGBE_PORTCTL_QINQ);
522 case RTE_ETH_VLAN_TYPE_INNER:
524 wr32m(hw, NGBE_VLANCTL,
525 NGBE_VLANCTL_TPID_MASK,
526 NGBE_VLANCTL_TPID(tpid));
527 wr32m(hw, NGBE_DMATXCTRL,
528 NGBE_DMATXCTRL_TPID_MASK,
529 NGBE_DMATXCTRL_TPID(tpid));
533 "Inner type is not supported by single VLAN");
537 wr32m(hw, NGBE_TAGTPID(0),
538 NGBE_TAGTPID_LSB_MASK,
539 NGBE_TAGTPID_LSB(tpid));
542 case RTE_ETH_VLAN_TYPE_OUTER:
544 /* Only the high 16-bits is valid */
545 wr32m(hw, NGBE_EXTAG,
546 NGBE_EXTAG_VLAN_MASK,
547 NGBE_EXTAG_VLAN(tpid));
549 wr32m(hw, NGBE_VLANCTL,
550 NGBE_VLANCTL_TPID_MASK,
551 NGBE_VLANCTL_TPID(tpid));
552 wr32m(hw, NGBE_DMATXCTRL,
553 NGBE_DMATXCTRL_TPID_MASK,
554 NGBE_DMATXCTRL_TPID(tpid));
558 wr32m(hw, NGBE_TAGTPID(0),
559 NGBE_TAGTPID_MSB_MASK,
560 NGBE_TAGTPID_MSB(tpid));
564 PMD_DRV_LOG(ERR, "Unsupported VLAN type %d", vlan_type);
572 ngbe_vlan_hw_filter_disable(struct rte_eth_dev *dev)
574 struct ngbe_hw *hw = ngbe_dev_hw(dev);
577 PMD_INIT_FUNC_TRACE();
579 /* Filter Table Disable */
580 vlnctrl = rd32(hw, NGBE_VLANCTL);
581 vlnctrl &= ~NGBE_VLANCTL_VFE;
582 wr32(hw, NGBE_VLANCTL, vlnctrl);
586 ngbe_vlan_hw_filter_enable(struct rte_eth_dev *dev)
588 struct ngbe_hw *hw = ngbe_dev_hw(dev);
589 struct ngbe_vfta *shadow_vfta = NGBE_DEV_VFTA(dev);
593 PMD_INIT_FUNC_TRACE();
595 /* Filter Table Enable */
596 vlnctrl = rd32(hw, NGBE_VLANCTL);
597 vlnctrl &= ~NGBE_VLANCTL_CFIENA;
598 vlnctrl |= NGBE_VLANCTL_VFE;
599 wr32(hw, NGBE_VLANCTL, vlnctrl);
601 /* write whatever is in local vfta copy */
602 for (i = 0; i < NGBE_VFTA_SIZE; i++)
603 wr32(hw, NGBE_VLANTBL(i), shadow_vfta->vfta[i]);
607 ngbe_vlan_hw_strip_bitmap_set(struct rte_eth_dev *dev, uint16_t queue, bool on)
609 struct ngbe_hwstrip *hwstrip = NGBE_DEV_HWSTRIP(dev);
610 struct ngbe_rx_queue *rxq;
612 if (queue >= NGBE_MAX_RX_QUEUE_NUM)
616 NGBE_SET_HWSTRIP(hwstrip, queue);
618 NGBE_CLEAR_HWSTRIP(hwstrip, queue);
620 if (queue >= dev->data->nb_rx_queues)
623 rxq = dev->data->rx_queues[queue];
626 rxq->vlan_flags = RTE_MBUF_F_RX_VLAN | RTE_MBUF_F_RX_VLAN_STRIPPED;
627 rxq->offloads |= RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
629 rxq->vlan_flags = RTE_MBUF_F_RX_VLAN;
630 rxq->offloads &= ~RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
635 ngbe_vlan_hw_strip_disable(struct rte_eth_dev *dev, uint16_t queue)
637 struct ngbe_hw *hw = ngbe_dev_hw(dev);
640 PMD_INIT_FUNC_TRACE();
642 ctrl = rd32(hw, NGBE_RXCFG(queue));
643 ctrl &= ~NGBE_RXCFG_VLAN;
644 wr32(hw, NGBE_RXCFG(queue), ctrl);
646 /* record those setting for HW strip per queue */
647 ngbe_vlan_hw_strip_bitmap_set(dev, queue, 0);
651 ngbe_vlan_hw_strip_enable(struct rte_eth_dev *dev, uint16_t queue)
653 struct ngbe_hw *hw = ngbe_dev_hw(dev);
656 PMD_INIT_FUNC_TRACE();
658 ctrl = rd32(hw, NGBE_RXCFG(queue));
659 ctrl |= NGBE_RXCFG_VLAN;
660 wr32(hw, NGBE_RXCFG(queue), ctrl);
662 /* record those setting for HW strip per queue */
663 ngbe_vlan_hw_strip_bitmap_set(dev, queue, 1);
667 ngbe_vlan_hw_extend_disable(struct rte_eth_dev *dev)
669 struct ngbe_hw *hw = ngbe_dev_hw(dev);
672 PMD_INIT_FUNC_TRACE();
674 ctrl = rd32(hw, NGBE_PORTCTL);
675 ctrl &= ~NGBE_PORTCTL_VLANEXT;
676 ctrl &= ~NGBE_PORTCTL_QINQ;
677 wr32(hw, NGBE_PORTCTL, ctrl);
681 ngbe_vlan_hw_extend_enable(struct rte_eth_dev *dev)
683 struct ngbe_hw *hw = ngbe_dev_hw(dev);
686 PMD_INIT_FUNC_TRACE();
688 ctrl = rd32(hw, NGBE_PORTCTL);
689 ctrl |= NGBE_PORTCTL_VLANEXT | NGBE_PORTCTL_QINQ;
690 wr32(hw, NGBE_PORTCTL, ctrl);
694 ngbe_qinq_hw_strip_disable(struct rte_eth_dev *dev)
696 struct ngbe_hw *hw = ngbe_dev_hw(dev);
699 PMD_INIT_FUNC_TRACE();
701 ctrl = rd32(hw, NGBE_PORTCTL);
702 ctrl &= ~NGBE_PORTCTL_QINQ;
703 wr32(hw, NGBE_PORTCTL, ctrl);
707 ngbe_qinq_hw_strip_enable(struct rte_eth_dev *dev)
709 struct ngbe_hw *hw = ngbe_dev_hw(dev);
712 PMD_INIT_FUNC_TRACE();
714 ctrl = rd32(hw, NGBE_PORTCTL);
715 ctrl |= NGBE_PORTCTL_QINQ | NGBE_PORTCTL_VLANEXT;
716 wr32(hw, NGBE_PORTCTL, ctrl);
720 ngbe_vlan_hw_strip_config(struct rte_eth_dev *dev)
722 struct ngbe_rx_queue *rxq;
725 PMD_INIT_FUNC_TRACE();
727 for (i = 0; i < dev->data->nb_rx_queues; i++) {
728 rxq = dev->data->rx_queues[i];
730 if (rxq->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
731 ngbe_vlan_hw_strip_enable(dev, i);
733 ngbe_vlan_hw_strip_disable(dev, i);
738 ngbe_config_vlan_strip_on_all_queues(struct rte_eth_dev *dev, int mask)
741 struct rte_eth_rxmode *rxmode;
742 struct ngbe_rx_queue *rxq;
744 if (mask & RTE_ETH_VLAN_STRIP_MASK) {
745 rxmode = &dev->data->dev_conf.rxmode;
746 if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
747 for (i = 0; i < dev->data->nb_rx_queues; i++) {
748 rxq = dev->data->rx_queues[i];
749 rxq->offloads |= RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
752 for (i = 0; i < dev->data->nb_rx_queues; i++) {
753 rxq = dev->data->rx_queues[i];
754 rxq->offloads &= ~RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
760 ngbe_vlan_offload_config(struct rte_eth_dev *dev, int mask)
762 struct rte_eth_rxmode *rxmode;
763 rxmode = &dev->data->dev_conf.rxmode;
765 if (mask & RTE_ETH_VLAN_STRIP_MASK)
766 ngbe_vlan_hw_strip_config(dev);
768 if (mask & RTE_ETH_VLAN_FILTER_MASK) {
769 if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER)
770 ngbe_vlan_hw_filter_enable(dev);
772 ngbe_vlan_hw_filter_disable(dev);
775 if (mask & RTE_ETH_VLAN_EXTEND_MASK) {
776 if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_EXTEND)
777 ngbe_vlan_hw_extend_enable(dev);
779 ngbe_vlan_hw_extend_disable(dev);
782 if (mask & RTE_ETH_QINQ_STRIP_MASK) {
783 if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_QINQ_STRIP)
784 ngbe_qinq_hw_strip_enable(dev);
786 ngbe_qinq_hw_strip_disable(dev);
793 ngbe_vlan_offload_set(struct rte_eth_dev *dev, int mask)
795 ngbe_config_vlan_strip_on_all_queues(dev, mask);
797 ngbe_vlan_offload_config(dev, mask);
803 ngbe_dev_configure(struct rte_eth_dev *dev)
805 struct ngbe_interrupt *intr = ngbe_dev_intr(dev);
806 struct ngbe_adapter *adapter = ngbe_dev_adapter(dev);
808 PMD_INIT_FUNC_TRACE();
810 /* set flag to update link status after init */
811 intr->flags |= NGBE_FLAG_NEED_LINK_UPDATE;
814 * Initialize to TRUE. If any of Rx queues doesn't meet the bulk
815 * allocation Rx preconditions we will reset it.
817 adapter->rx_bulk_alloc_allowed = true;
823 ngbe_dev_phy_intr_setup(struct rte_eth_dev *dev)
825 struct ngbe_hw *hw = ngbe_dev_hw(dev);
826 struct ngbe_interrupt *intr = ngbe_dev_intr(dev);
828 wr32(hw, NGBE_GPIODIR, NGBE_GPIODIR_DDR(1));
829 wr32(hw, NGBE_GPIOINTEN, NGBE_GPIOINTEN_INT(3));
830 wr32(hw, NGBE_GPIOINTTYPE, NGBE_GPIOINTTYPE_LEVEL(0));
831 if (hw->phy.type == ngbe_phy_yt8521s_sfi)
832 wr32(hw, NGBE_GPIOINTPOL, NGBE_GPIOINTPOL_ACT(0));
834 wr32(hw, NGBE_GPIOINTPOL, NGBE_GPIOINTPOL_ACT(3));
836 intr->mask_misc |= NGBE_ICRMISC_GPIO;
840 * Configure device link speed and setup link.
841 * It returns 0 on success.
844 ngbe_dev_start(struct rte_eth_dev *dev)
846 struct ngbe_hw *hw = ngbe_dev_hw(dev);
847 struct ngbe_hw_stats *hw_stats = NGBE_DEV_STATS(dev);
848 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
849 struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
850 uint32_t intr_vector = 0;
852 bool link_up = false, negotiate = false;
854 uint32_t allowed_speeds = 0;
857 uint32_t *link_speeds;
859 PMD_INIT_FUNC_TRACE();
861 /* disable uio/vfio intr/eventfd mapping */
862 rte_intr_disable(intr_handle);
865 hw->adapter_stopped = 0;
868 /* reinitialize adapter, this calls reset and start */
869 hw->nb_rx_queues = dev->data->nb_rx_queues;
870 hw->nb_tx_queues = dev->data->nb_tx_queues;
871 status = ngbe_pf_reset_hw(hw);
874 hw->mac.start_hw(hw);
875 hw->mac.get_link_status = true;
877 ngbe_dev_phy_intr_setup(dev);
879 /* check and configure queue intr-vector mapping */
880 if ((rte_intr_cap_multiple(intr_handle) ||
881 !RTE_ETH_DEV_SRIOV(dev).active) &&
882 dev->data->dev_conf.intr_conf.rxq != 0) {
883 intr_vector = dev->data->nb_rx_queues;
884 if (rte_intr_efd_enable(intr_handle, intr_vector))
888 if (rte_intr_dp_is_en(intr_handle)) {
889 if (rte_intr_vec_list_alloc(intr_handle, "intr_vec",
890 dev->data->nb_rx_queues)) {
892 "Failed to allocate %d rx_queues intr_vec",
893 dev->data->nb_rx_queues);
898 /* confiugre MSI-X for sleep until Rx interrupt */
899 ngbe_configure_msix(dev);
901 /* initialize transmission unit */
902 ngbe_dev_tx_init(dev);
904 /* This can fail when allocating mbufs for descriptor rings */
905 err = ngbe_dev_rx_init(dev);
907 PMD_INIT_LOG(ERR, "Unable to initialize Rx hardware");
911 mask = RTE_ETH_VLAN_STRIP_MASK | RTE_ETH_VLAN_FILTER_MASK |
912 RTE_ETH_VLAN_EXTEND_MASK;
913 err = ngbe_vlan_offload_config(dev, mask);
915 PMD_INIT_LOG(ERR, "Unable to set VLAN offload");
919 ngbe_configure_port(dev);
921 err = ngbe_dev_rxtx_start(dev);
923 PMD_INIT_LOG(ERR, "Unable to start rxtx queues");
927 /* Skip link setup if loopback mode is enabled. */
928 if (hw->is_pf && dev->data->dev_conf.lpbk_mode)
929 goto skip_link_setup;
931 err = hw->mac.check_link(hw, &speed, &link_up, 0);
934 dev->data->dev_link.link_status = link_up;
936 link_speeds = &dev->data->dev_conf.link_speeds;
937 if (*link_speeds == RTE_ETH_LINK_SPEED_AUTONEG)
940 err = hw->mac.get_link_capabilities(hw, &speed, &negotiate);
945 if (hw->mac.default_speeds & NGBE_LINK_SPEED_1GB_FULL)
946 allowed_speeds |= RTE_ETH_LINK_SPEED_1G;
947 if (hw->mac.default_speeds & NGBE_LINK_SPEED_100M_FULL)
948 allowed_speeds |= RTE_ETH_LINK_SPEED_100M;
949 if (hw->mac.default_speeds & NGBE_LINK_SPEED_10M_FULL)
950 allowed_speeds |= RTE_ETH_LINK_SPEED_10M;
952 if (*link_speeds & ~allowed_speeds) {
953 PMD_INIT_LOG(ERR, "Invalid link setting");
958 if (*link_speeds == RTE_ETH_LINK_SPEED_AUTONEG) {
959 speed = hw->mac.default_speeds;
961 if (*link_speeds & RTE_ETH_LINK_SPEED_1G)
962 speed |= NGBE_LINK_SPEED_1GB_FULL;
963 if (*link_speeds & RTE_ETH_LINK_SPEED_100M)
964 speed |= NGBE_LINK_SPEED_100M_FULL;
965 if (*link_speeds & RTE_ETH_LINK_SPEED_10M)
966 speed |= NGBE_LINK_SPEED_10M_FULL;
970 err = hw->mac.setup_link(hw, speed, link_up);
976 if (rte_intr_allow_others(intr_handle)) {
977 ngbe_dev_misc_interrupt_setup(dev);
978 /* check if lsc interrupt is enabled */
979 if (dev->data->dev_conf.intr_conf.lsc != 0)
980 ngbe_dev_lsc_interrupt_setup(dev, TRUE);
982 ngbe_dev_lsc_interrupt_setup(dev, FALSE);
983 ngbe_dev_macsec_interrupt_setup(dev);
984 ngbe_set_ivar_map(hw, -1, 1, NGBE_MISC_VEC_ID);
986 rte_intr_callback_unregister(intr_handle,
987 ngbe_dev_interrupt_handler, dev);
988 if (dev->data->dev_conf.intr_conf.lsc != 0)
990 "LSC won't enable because of no intr multiplex");
993 /* check if rxq interrupt is enabled */
994 if (dev->data->dev_conf.intr_conf.rxq != 0 &&
995 rte_intr_dp_is_en(intr_handle))
996 ngbe_dev_rxq_interrupt_setup(dev);
998 /* enable UIO/VFIO intr/eventfd mapping */
999 rte_intr_enable(intr_handle);
1001 /* resume enabled intr since HW reset */
1002 ngbe_enable_intr(dev);
1004 if ((hw->sub_system_id & NGBE_OEM_MASK) == NGBE_LY_M88E1512_SFP ||
1005 (hw->sub_system_id & NGBE_OEM_MASK) == NGBE_LY_YT8521S_SFP) {
1006 /* gpio0 is used to power on/off control*/
1007 wr32(hw, NGBE_GPIODATA, 0);
1011 * Update link status right before return, because it may
1012 * start link configuration process in a separate thread.
1014 ngbe_dev_link_update(dev, 0);
1016 ngbe_read_stats_registers(hw, hw_stats);
1017 hw->offset_loaded = 1;
1022 PMD_INIT_LOG(ERR, "failure in dev start: %d", err);
1023 ngbe_dev_clear_queues(dev);
1028 * Stop device: disable rx and tx functions to allow for reconfiguring.
1031 ngbe_dev_stop(struct rte_eth_dev *dev)
1033 struct rte_eth_link link;
1034 struct ngbe_hw *hw = ngbe_dev_hw(dev);
1035 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1036 struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
1038 if (hw->adapter_stopped)
1041 PMD_INIT_FUNC_TRACE();
1043 if ((hw->sub_system_id & NGBE_OEM_MASK) == NGBE_LY_M88E1512_SFP ||
1044 (hw->sub_system_id & NGBE_OEM_MASK) == NGBE_LY_YT8521S_SFP) {
1045 /* gpio0 is used to power on/off control*/
1046 wr32(hw, NGBE_GPIODATA, NGBE_GPIOBIT_0);
1049 /* disable interrupts */
1050 ngbe_disable_intr(hw);
1053 ngbe_pf_reset_hw(hw);
1054 hw->adapter_stopped = 0;
1059 ngbe_dev_clear_queues(dev);
1061 /* Clear stored conf */
1062 dev->data->scattered_rx = 0;
1064 /* Clear recorded link status */
1065 memset(&link, 0, sizeof(link));
1066 rte_eth_linkstatus_set(dev, &link);
1068 if (!rte_intr_allow_others(intr_handle))
1069 /* resume to the default handler */
1070 rte_intr_callback_register(intr_handle,
1071 ngbe_dev_interrupt_handler,
1074 /* Clean datapath event and queue/vec mapping */
1075 rte_intr_efd_disable(intr_handle);
1076 rte_intr_vec_list_free(intr_handle);
1078 hw->adapter_stopped = true;
1079 dev->data->dev_started = 0;
1085 * Reset and stop device.
1088 ngbe_dev_close(struct rte_eth_dev *dev)
1090 struct ngbe_hw *hw = ngbe_dev_hw(dev);
1091 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1092 struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
1096 PMD_INIT_FUNC_TRACE();
1098 ngbe_pf_reset_hw(hw);
1102 ngbe_dev_free_queues(dev);
1104 /* reprogram the RAR[0] in case user changed it. */
1105 ngbe_set_rar(hw, 0, hw->mac.addr, 0, true);
1107 /* Unlock any pending hardware semaphore */
1108 ngbe_swfw_lock_reset(hw);
1110 /* disable uio intr before callback unregister */
1111 rte_intr_disable(intr_handle);
1114 ret = rte_intr_callback_unregister(intr_handle,
1115 ngbe_dev_interrupt_handler, dev);
1116 if (ret >= 0 || ret == -ENOENT) {
1118 } else if (ret != -EAGAIN) {
1120 "intr callback unregister failed: %d",
1124 } while (retries++ < (10 + NGBE_LINK_UP_TIME));
1126 rte_free(dev->data->mac_addrs);
1127 dev->data->mac_addrs = NULL;
1129 rte_free(dev->data->hash_mac_addrs);
1130 dev->data->hash_mac_addrs = NULL;
1139 ngbe_dev_reset(struct rte_eth_dev *dev)
1143 ret = eth_ngbe_dev_uninit(dev);
1147 ret = eth_ngbe_dev_init(dev, NULL);
1152 #define UPDATE_QP_COUNTER_32bit(reg, last_counter, counter) \
1154 uint32_t current_counter = rd32(hw, reg); \
1155 if (current_counter < last_counter) \
1156 current_counter += 0x100000000LL; \
1157 if (!hw->offset_loaded) \
1158 last_counter = current_counter; \
1159 counter = current_counter - last_counter; \
1160 counter &= 0xFFFFFFFFLL; \
1163 #define UPDATE_QP_COUNTER_36bit(reg_lsb, reg_msb, last_counter, counter) \
1165 uint64_t current_counter_lsb = rd32(hw, reg_lsb); \
1166 uint64_t current_counter_msb = rd32(hw, reg_msb); \
1167 uint64_t current_counter = (current_counter_msb << 32) | \
1168 current_counter_lsb; \
1169 if (current_counter < last_counter) \
1170 current_counter += 0x1000000000LL; \
1171 if (!hw->offset_loaded) \
1172 last_counter = current_counter; \
1173 counter = current_counter - last_counter; \
1174 counter &= 0xFFFFFFFFFLL; \
1178 ngbe_read_stats_registers(struct ngbe_hw *hw,
1179 struct ngbe_hw_stats *hw_stats)
1184 for (i = 0; i < hw->nb_rx_queues; i++) {
1185 UPDATE_QP_COUNTER_32bit(NGBE_QPRXPKT(i),
1186 hw->qp_last[i].rx_qp_packets,
1187 hw_stats->qp[i].rx_qp_packets);
1188 UPDATE_QP_COUNTER_36bit(NGBE_QPRXOCTL(i), NGBE_QPRXOCTH(i),
1189 hw->qp_last[i].rx_qp_bytes,
1190 hw_stats->qp[i].rx_qp_bytes);
1191 UPDATE_QP_COUNTER_32bit(NGBE_QPRXMPKT(i),
1192 hw->qp_last[i].rx_qp_mc_packets,
1193 hw_stats->qp[i].rx_qp_mc_packets);
1194 UPDATE_QP_COUNTER_32bit(NGBE_QPRXBPKT(i),
1195 hw->qp_last[i].rx_qp_bc_packets,
1196 hw_stats->qp[i].rx_qp_bc_packets);
1199 for (i = 0; i < hw->nb_tx_queues; i++) {
1200 UPDATE_QP_COUNTER_32bit(NGBE_QPTXPKT(i),
1201 hw->qp_last[i].tx_qp_packets,
1202 hw_stats->qp[i].tx_qp_packets);
1203 UPDATE_QP_COUNTER_36bit(NGBE_QPTXOCTL(i), NGBE_QPTXOCTH(i),
1204 hw->qp_last[i].tx_qp_bytes,
1205 hw_stats->qp[i].tx_qp_bytes);
1206 UPDATE_QP_COUNTER_32bit(NGBE_QPTXMPKT(i),
1207 hw->qp_last[i].tx_qp_mc_packets,
1208 hw_stats->qp[i].tx_qp_mc_packets);
1209 UPDATE_QP_COUNTER_32bit(NGBE_QPTXBPKT(i),
1210 hw->qp_last[i].tx_qp_bc_packets,
1211 hw_stats->qp[i].tx_qp_bc_packets);
1215 hw_stats->rx_up_dropped += rd32(hw, NGBE_PBRXMISS);
1216 hw_stats->rdb_pkt_cnt += rd32(hw, NGBE_PBRXPKT);
1217 hw_stats->rdb_repli_cnt += rd32(hw, NGBE_PBRXREP);
1218 hw_stats->rdb_drp_cnt += rd32(hw, NGBE_PBRXDROP);
1219 hw_stats->tx_xoff_packets += rd32(hw, NGBE_PBTXLNKXOFF);
1220 hw_stats->tx_xon_packets += rd32(hw, NGBE_PBTXLNKXON);
1222 hw_stats->rx_xon_packets += rd32(hw, NGBE_PBRXLNKXON);
1223 hw_stats->rx_xoff_packets += rd32(hw, NGBE_PBRXLNKXOFF);
1226 hw_stats->rx_drop_packets += rd32(hw, NGBE_DMARXDROP);
1227 hw_stats->tx_drop_packets += rd32(hw, NGBE_DMATXDROP);
1228 hw_stats->rx_dma_drop += rd32(hw, NGBE_DMARXDROP);
1229 hw_stats->tx_secdrp_packets += rd32(hw, NGBE_DMATXSECDROP);
1230 hw_stats->rx_packets += rd32(hw, NGBE_DMARXPKT);
1231 hw_stats->tx_packets += rd32(hw, NGBE_DMATXPKT);
1232 hw_stats->rx_bytes += rd64(hw, NGBE_DMARXOCTL);
1233 hw_stats->tx_bytes += rd64(hw, NGBE_DMATXOCTL);
1236 hw_stats->rx_crc_errors += rd64(hw, NGBE_MACRXERRCRCL);
1237 hw_stats->rx_multicast_packets += rd64(hw, NGBE_MACRXMPKTL);
1238 hw_stats->tx_multicast_packets += rd64(hw, NGBE_MACTXMPKTL);
1240 hw_stats->rx_total_packets += rd64(hw, NGBE_MACRXPKTL);
1241 hw_stats->tx_total_packets += rd64(hw, NGBE_MACTXPKTL);
1242 hw_stats->rx_total_bytes += rd64(hw, NGBE_MACRXGBOCTL);
1244 hw_stats->rx_broadcast_packets += rd64(hw, NGBE_MACRXOCTL);
1245 hw_stats->tx_broadcast_packets += rd32(hw, NGBE_MACTXOCTL);
1247 hw_stats->rx_size_64_packets += rd64(hw, NGBE_MACRX1TO64L);
1248 hw_stats->rx_size_65_to_127_packets += rd64(hw, NGBE_MACRX65TO127L);
1249 hw_stats->rx_size_128_to_255_packets += rd64(hw, NGBE_MACRX128TO255L);
1250 hw_stats->rx_size_256_to_511_packets += rd64(hw, NGBE_MACRX256TO511L);
1251 hw_stats->rx_size_512_to_1023_packets +=
1252 rd64(hw, NGBE_MACRX512TO1023L);
1253 hw_stats->rx_size_1024_to_max_packets +=
1254 rd64(hw, NGBE_MACRX1024TOMAXL);
1255 hw_stats->tx_size_64_packets += rd64(hw, NGBE_MACTX1TO64L);
1256 hw_stats->tx_size_65_to_127_packets += rd64(hw, NGBE_MACTX65TO127L);
1257 hw_stats->tx_size_128_to_255_packets += rd64(hw, NGBE_MACTX128TO255L);
1258 hw_stats->tx_size_256_to_511_packets += rd64(hw, NGBE_MACTX256TO511L);
1259 hw_stats->tx_size_512_to_1023_packets +=
1260 rd64(hw, NGBE_MACTX512TO1023L);
1261 hw_stats->tx_size_1024_to_max_packets +=
1262 rd64(hw, NGBE_MACTX1024TOMAXL);
1264 hw_stats->rx_undersize_errors += rd64(hw, NGBE_MACRXERRLENL);
1265 hw_stats->rx_oversize_errors += rd32(hw, NGBE_MACRXOVERSIZE);
1266 hw_stats->rx_jabber_errors += rd32(hw, NGBE_MACRXJABBER);
1269 hw_stats->mng_bmc2host_packets = rd32(hw, NGBE_MNGBMC2OS);
1270 hw_stats->mng_host2bmc_packets = rd32(hw, NGBE_MNGOS2BMC);
1271 hw_stats->rx_management_packets = rd32(hw, NGBE_DMARXMNG);
1272 hw_stats->tx_management_packets = rd32(hw, NGBE_DMATXMNG);
1275 hw_stats->tx_macsec_pkts_untagged += rd32(hw, NGBE_LSECTX_UTPKT);
1276 hw_stats->tx_macsec_pkts_encrypted +=
1277 rd32(hw, NGBE_LSECTX_ENCPKT);
1278 hw_stats->tx_macsec_pkts_protected +=
1279 rd32(hw, NGBE_LSECTX_PROTPKT);
1280 hw_stats->tx_macsec_octets_encrypted +=
1281 rd32(hw, NGBE_LSECTX_ENCOCT);
1282 hw_stats->tx_macsec_octets_protected +=
1283 rd32(hw, NGBE_LSECTX_PROTOCT);
1284 hw_stats->rx_macsec_pkts_untagged += rd32(hw, NGBE_LSECRX_UTPKT);
1285 hw_stats->rx_macsec_pkts_badtag += rd32(hw, NGBE_LSECRX_BTPKT);
1286 hw_stats->rx_macsec_pkts_nosci += rd32(hw, NGBE_LSECRX_NOSCIPKT);
1287 hw_stats->rx_macsec_pkts_unknownsci += rd32(hw, NGBE_LSECRX_UNSCIPKT);
1288 hw_stats->rx_macsec_octets_decrypted += rd32(hw, NGBE_LSECRX_DECOCT);
1289 hw_stats->rx_macsec_octets_validated += rd32(hw, NGBE_LSECRX_VLDOCT);
1290 hw_stats->rx_macsec_sc_pkts_unchecked +=
1291 rd32(hw, NGBE_LSECRX_UNCHKPKT);
1292 hw_stats->rx_macsec_sc_pkts_delayed += rd32(hw, NGBE_LSECRX_DLYPKT);
1293 hw_stats->rx_macsec_sc_pkts_late += rd32(hw, NGBE_LSECRX_LATEPKT);
1294 for (i = 0; i < 2; i++) {
1295 hw_stats->rx_macsec_sa_pkts_ok +=
1296 rd32(hw, NGBE_LSECRX_OKPKT(i));
1297 hw_stats->rx_macsec_sa_pkts_invalid +=
1298 rd32(hw, NGBE_LSECRX_INVPKT(i));
1299 hw_stats->rx_macsec_sa_pkts_notvalid +=
1300 rd32(hw, NGBE_LSECRX_BADPKT(i));
1302 for (i = 0; i < 4; i++) {
1303 hw_stats->rx_macsec_sa_pkts_unusedsa +=
1304 rd32(hw, NGBE_LSECRX_INVSAPKT(i));
1305 hw_stats->rx_macsec_sa_pkts_notusingsa +=
1306 rd32(hw, NGBE_LSECRX_BADSAPKT(i));
1308 hw_stats->rx_total_missed_packets =
1309 hw_stats->rx_up_dropped;
1313 ngbe_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
1315 struct ngbe_hw *hw = ngbe_dev_hw(dev);
1316 struct ngbe_hw_stats *hw_stats = NGBE_DEV_STATS(dev);
1317 struct ngbe_stat_mappings *stat_mappings =
1318 NGBE_DEV_STAT_MAPPINGS(dev);
1321 ngbe_read_stats_registers(hw, hw_stats);
1326 /* Fill out the rte_eth_stats statistics structure */
1327 stats->ipackets = hw_stats->rx_packets;
1328 stats->ibytes = hw_stats->rx_bytes;
1329 stats->opackets = hw_stats->tx_packets;
1330 stats->obytes = hw_stats->tx_bytes;
1332 memset(&stats->q_ipackets, 0, sizeof(stats->q_ipackets));
1333 memset(&stats->q_opackets, 0, sizeof(stats->q_opackets));
1334 memset(&stats->q_ibytes, 0, sizeof(stats->q_ibytes));
1335 memset(&stats->q_obytes, 0, sizeof(stats->q_obytes));
1336 memset(&stats->q_errors, 0, sizeof(stats->q_errors));
1337 for (i = 0; i < NGBE_MAX_QP; i++) {
1338 uint32_t n = i / NB_QMAP_FIELDS_PER_QSM_REG;
1339 uint32_t offset = (i % NB_QMAP_FIELDS_PER_QSM_REG) * 8;
1342 q_map = (stat_mappings->rqsm[n] >> offset)
1343 & QMAP_FIELD_RESERVED_BITS_MASK;
1344 j = (q_map < RTE_ETHDEV_QUEUE_STAT_CNTRS
1345 ? q_map : q_map % RTE_ETHDEV_QUEUE_STAT_CNTRS);
1346 stats->q_ipackets[j] += hw_stats->qp[i].rx_qp_packets;
1347 stats->q_ibytes[j] += hw_stats->qp[i].rx_qp_bytes;
1349 q_map = (stat_mappings->tqsm[n] >> offset)
1350 & QMAP_FIELD_RESERVED_BITS_MASK;
1351 j = (q_map < RTE_ETHDEV_QUEUE_STAT_CNTRS
1352 ? q_map : q_map % RTE_ETHDEV_QUEUE_STAT_CNTRS);
1353 stats->q_opackets[j] += hw_stats->qp[i].tx_qp_packets;
1354 stats->q_obytes[j] += hw_stats->qp[i].tx_qp_bytes;
1358 stats->imissed = hw_stats->rx_total_missed_packets +
1359 hw_stats->rx_dma_drop;
1360 stats->ierrors = hw_stats->rx_crc_errors +
1361 hw_stats->rx_mac_short_packet_dropped +
1362 hw_stats->rx_length_errors +
1363 hw_stats->rx_undersize_errors +
1364 hw_stats->rx_oversize_errors +
1365 hw_stats->rx_illegal_byte_errors +
1366 hw_stats->rx_error_bytes +
1367 hw_stats->rx_fragment_errors;
1375 ngbe_dev_stats_reset(struct rte_eth_dev *dev)
1377 struct ngbe_hw *hw = ngbe_dev_hw(dev);
1378 struct ngbe_hw_stats *hw_stats = NGBE_DEV_STATS(dev);
1380 /* HW registers are cleared on read */
1381 hw->offset_loaded = 0;
1382 ngbe_dev_stats_get(dev, NULL);
1383 hw->offset_loaded = 1;
1385 /* Reset software totals */
1386 memset(hw_stats, 0, sizeof(*hw_stats));
1391 /* This function calculates the number of xstats based on the current config */
1393 ngbe_xstats_calc_num(struct rte_eth_dev *dev)
1395 int nb_queues = max(dev->data->nb_rx_queues, dev->data->nb_tx_queues);
1396 return NGBE_NB_HW_STATS +
1397 NGBE_NB_QP_STATS * nb_queues;
1401 ngbe_get_name_by_id(uint32_t id, char *name, uint32_t size)
1405 /* Extended stats from ngbe_hw_stats */
1406 if (id < NGBE_NB_HW_STATS) {
1407 snprintf(name, size, "[hw]%s",
1408 rte_ngbe_stats_strings[id].name);
1411 id -= NGBE_NB_HW_STATS;
1414 if (id < NGBE_NB_QP_STATS * NGBE_MAX_QP) {
1415 nb = id / NGBE_NB_QP_STATS;
1416 st = id % NGBE_NB_QP_STATS;
1417 snprintf(name, size, "[q%u]%s", nb,
1418 rte_ngbe_qp_strings[st].name);
1421 id -= NGBE_NB_QP_STATS * NGBE_MAX_QP;
1423 return -(int)(id + 1);
1427 ngbe_get_offset_by_id(uint32_t id, uint32_t *offset)
1431 /* Extended stats from ngbe_hw_stats */
1432 if (id < NGBE_NB_HW_STATS) {
1433 *offset = rte_ngbe_stats_strings[id].offset;
1436 id -= NGBE_NB_HW_STATS;
1439 if (id < NGBE_NB_QP_STATS * NGBE_MAX_QP) {
1440 nb = id / NGBE_NB_QP_STATS;
1441 st = id % NGBE_NB_QP_STATS;
1442 *offset = rte_ngbe_qp_strings[st].offset +
1443 nb * (NGBE_NB_QP_STATS * sizeof(uint64_t));
1450 static int ngbe_dev_xstats_get_names(struct rte_eth_dev *dev,
1451 struct rte_eth_xstat_name *xstats_names, unsigned int limit)
1453 unsigned int i, count;
1455 count = ngbe_xstats_calc_num(dev);
1456 if (xstats_names == NULL)
1459 /* Note: limit >= cnt_stats checked upstream
1460 * in rte_eth_xstats_names()
1462 limit = min(limit, count);
1464 /* Extended stats from ngbe_hw_stats */
1465 for (i = 0; i < limit; i++) {
1466 if (ngbe_get_name_by_id(i, xstats_names[i].name,
1467 sizeof(xstats_names[i].name))) {
1468 PMD_INIT_LOG(WARNING, "id value %d isn't valid", i);
1476 static int ngbe_dev_xstats_get_names_by_id(struct rte_eth_dev *dev,
1477 const uint64_t *ids,
1478 struct rte_eth_xstat_name *xstats_names,
1484 return ngbe_dev_xstats_get_names(dev, xstats_names, limit);
1486 for (i = 0; i < limit; i++) {
1487 if (ngbe_get_name_by_id(ids[i], xstats_names[i].name,
1488 sizeof(xstats_names[i].name))) {
1489 PMD_INIT_LOG(WARNING, "id value %d isn't valid", i);
1498 ngbe_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
1501 struct ngbe_hw *hw = ngbe_dev_hw(dev);
1502 struct ngbe_hw_stats *hw_stats = NGBE_DEV_STATS(dev);
1503 unsigned int i, count;
1505 ngbe_read_stats_registers(hw, hw_stats);
1507 /* If this is a reset xstats is NULL, and we have cleared the
1508 * registers by reading them.
1510 count = ngbe_xstats_calc_num(dev);
1514 limit = min(limit, ngbe_xstats_calc_num(dev));
1516 /* Extended stats from ngbe_hw_stats */
1517 for (i = 0; i < limit; i++) {
1518 uint32_t offset = 0;
1520 if (ngbe_get_offset_by_id(i, &offset)) {
1521 PMD_INIT_LOG(WARNING, "id value %d isn't valid", i);
1524 xstats[i].value = *(uint64_t *)(((char *)hw_stats) + offset);
1532 ngbe_dev_xstats_get_(struct rte_eth_dev *dev, uint64_t *values,
1535 struct ngbe_hw *hw = ngbe_dev_hw(dev);
1536 struct ngbe_hw_stats *hw_stats = NGBE_DEV_STATS(dev);
1537 unsigned int i, count;
1539 ngbe_read_stats_registers(hw, hw_stats);
1541 /* If this is a reset xstats is NULL, and we have cleared the
1542 * registers by reading them.
1544 count = ngbe_xstats_calc_num(dev);
1548 limit = min(limit, ngbe_xstats_calc_num(dev));
1550 /* Extended stats from ngbe_hw_stats */
1551 for (i = 0; i < limit; i++) {
1554 if (ngbe_get_offset_by_id(i, &offset)) {
1555 PMD_INIT_LOG(WARNING, "id value %d isn't valid", i);
1558 values[i] = *(uint64_t *)(((char *)hw_stats) + offset);
1565 ngbe_dev_xstats_get_by_id(struct rte_eth_dev *dev, const uint64_t *ids,
1566 uint64_t *values, unsigned int limit)
1568 struct ngbe_hw_stats *hw_stats = NGBE_DEV_STATS(dev);
1572 return ngbe_dev_xstats_get_(dev, values, limit);
1574 for (i = 0; i < limit; i++) {
1577 if (ngbe_get_offset_by_id(ids[i], &offset)) {
1578 PMD_INIT_LOG(WARNING, "id value %d isn't valid", i);
1581 values[i] = *(uint64_t *)(((char *)hw_stats) + offset);
1588 ngbe_dev_xstats_reset(struct rte_eth_dev *dev)
1590 struct ngbe_hw *hw = ngbe_dev_hw(dev);
1591 struct ngbe_hw_stats *hw_stats = NGBE_DEV_STATS(dev);
1593 /* HW registers are cleared on read */
1594 hw->offset_loaded = 0;
1595 ngbe_read_stats_registers(hw, hw_stats);
1596 hw->offset_loaded = 1;
1598 /* Reset software totals */
1599 memset(hw_stats, 0, sizeof(*hw_stats));
1605 ngbe_fw_version_get(struct rte_eth_dev *dev, char *fw_version, size_t fw_size)
1607 struct ngbe_hw *hw = ngbe_dev_hw(dev);
1610 ret = snprintf(fw_version, fw_size, "0x%08x", hw->eeprom_id);
1615 ret += 1; /* add the size of '\0' */
1616 if (fw_size < (size_t)ret)
1623 ngbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
1625 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1626 struct ngbe_hw *hw = ngbe_dev_hw(dev);
1628 dev_info->max_rx_queues = (uint16_t)hw->mac.max_rx_queues;
1629 dev_info->max_tx_queues = (uint16_t)hw->mac.max_tx_queues;
1630 dev_info->min_rx_bufsize = 1024;
1631 dev_info->max_rx_pktlen = 15872;
1632 dev_info->max_mac_addrs = hw->mac.num_rar_entries;
1633 dev_info->max_hash_mac_addrs = NGBE_VMDQ_NUM_UC_MAC;
1634 dev_info->max_vfs = pci_dev->max_vfs;
1635 dev_info->rx_queue_offload_capa = ngbe_get_rx_queue_offloads(dev);
1636 dev_info->rx_offload_capa = (ngbe_get_rx_port_offloads(dev) |
1637 dev_info->rx_queue_offload_capa);
1638 dev_info->tx_queue_offload_capa = 0;
1639 dev_info->tx_offload_capa = ngbe_get_tx_port_offloads(dev);
1641 dev_info->default_rxconf = (struct rte_eth_rxconf) {
1643 .pthresh = NGBE_DEFAULT_RX_PTHRESH,
1644 .hthresh = NGBE_DEFAULT_RX_HTHRESH,
1645 .wthresh = NGBE_DEFAULT_RX_WTHRESH,
1647 .rx_free_thresh = NGBE_DEFAULT_RX_FREE_THRESH,
1652 dev_info->default_txconf = (struct rte_eth_txconf) {
1654 .pthresh = NGBE_DEFAULT_TX_PTHRESH,
1655 .hthresh = NGBE_DEFAULT_TX_HTHRESH,
1656 .wthresh = NGBE_DEFAULT_TX_WTHRESH,
1658 .tx_free_thresh = NGBE_DEFAULT_TX_FREE_THRESH,
1662 dev_info->rx_desc_lim = rx_desc_lim;
1663 dev_info->tx_desc_lim = tx_desc_lim;
1665 dev_info->speed_capa = RTE_ETH_LINK_SPEED_1G | RTE_ETH_LINK_SPEED_100M |
1666 RTE_ETH_LINK_SPEED_10M;
1668 /* Driver-preferred Rx/Tx parameters */
1669 dev_info->default_rxportconf.burst_size = 32;
1670 dev_info->default_txportconf.burst_size = 32;
1671 dev_info->default_rxportconf.nb_queues = 1;
1672 dev_info->default_txportconf.nb_queues = 1;
1673 dev_info->default_rxportconf.ring_size = 256;
1674 dev_info->default_txportconf.ring_size = 256;
1680 ngbe_dev_supported_ptypes_get(struct rte_eth_dev *dev)
1682 if (dev->rx_pkt_burst == ngbe_recv_pkts ||
1683 dev->rx_pkt_burst == ngbe_recv_pkts_sc_single_alloc ||
1684 dev->rx_pkt_burst == ngbe_recv_pkts_sc_bulk_alloc ||
1685 dev->rx_pkt_burst == ngbe_recv_pkts_bulk_alloc)
1686 return ngbe_get_supported_ptypes();
1691 /* return 0 means link status changed, -1 means not changed */
1693 ngbe_dev_link_update_share(struct rte_eth_dev *dev,
1694 int wait_to_complete)
1696 struct ngbe_hw *hw = ngbe_dev_hw(dev);
1697 struct rte_eth_link link;
1698 u32 link_speed = NGBE_LINK_SPEED_UNKNOWN;
1700 struct ngbe_interrupt *intr = ngbe_dev_intr(dev);
1705 memset(&link, 0, sizeof(link));
1706 link.link_status = RTE_ETH_LINK_DOWN;
1707 link.link_speed = RTE_ETH_SPEED_NUM_NONE;
1708 link.link_duplex = RTE_ETH_LINK_HALF_DUPLEX;
1709 link.link_autoneg = !(dev->data->dev_conf.link_speeds &
1710 ~RTE_ETH_LINK_SPEED_AUTONEG);
1712 hw->mac.get_link_status = true;
1714 if (intr->flags & NGBE_FLAG_NEED_LINK_CONFIG)
1715 return rte_eth_linkstatus_set(dev, &link);
1717 /* check if it needs to wait to complete, if lsc interrupt is enabled */
1718 if (wait_to_complete == 0 || dev->data->dev_conf.intr_conf.lsc != 0)
1721 err = hw->mac.check_link(hw, &link_speed, &link_up, wait);
1723 link.link_speed = RTE_ETH_SPEED_NUM_NONE;
1724 link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
1725 return rte_eth_linkstatus_set(dev, &link);
1729 return rte_eth_linkstatus_set(dev, &link);
1731 intr->flags &= ~NGBE_FLAG_NEED_LINK_CONFIG;
1732 link.link_status = RTE_ETH_LINK_UP;
1733 link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
1735 switch (link_speed) {
1737 case NGBE_LINK_SPEED_UNKNOWN:
1738 link.link_speed = RTE_ETH_SPEED_NUM_NONE;
1741 case NGBE_LINK_SPEED_10M_FULL:
1742 link.link_speed = RTE_ETH_SPEED_NUM_10M;
1746 case NGBE_LINK_SPEED_100M_FULL:
1747 link.link_speed = RTE_ETH_SPEED_NUM_100M;
1751 case NGBE_LINK_SPEED_1GB_FULL:
1752 link.link_speed = RTE_ETH_SPEED_NUM_1G;
1758 wr32m(hw, NGBE_LAN_SPEED, NGBE_LAN_SPEED_MASK, lan_speed);
1759 if (link_speed & (NGBE_LINK_SPEED_1GB_FULL |
1760 NGBE_LINK_SPEED_100M_FULL |
1761 NGBE_LINK_SPEED_10M_FULL)) {
1762 wr32m(hw, NGBE_MACTXCFG, NGBE_MACTXCFG_SPEED_MASK,
1763 NGBE_MACTXCFG_SPEED_1G | NGBE_MACTXCFG_TE);
1767 return rte_eth_linkstatus_set(dev, &link);
1771 ngbe_dev_link_update(struct rte_eth_dev *dev, int wait_to_complete)
1773 return ngbe_dev_link_update_share(dev, wait_to_complete);
1777 ngbe_dev_promiscuous_enable(struct rte_eth_dev *dev)
1779 struct ngbe_hw *hw = ngbe_dev_hw(dev);
1782 fctrl = rd32(hw, NGBE_PSRCTL);
1783 fctrl |= (NGBE_PSRCTL_UCP | NGBE_PSRCTL_MCP);
1784 wr32(hw, NGBE_PSRCTL, fctrl);
1790 ngbe_dev_promiscuous_disable(struct rte_eth_dev *dev)
1792 struct ngbe_hw *hw = ngbe_dev_hw(dev);
1795 fctrl = rd32(hw, NGBE_PSRCTL);
1796 fctrl &= (~NGBE_PSRCTL_UCP);
1797 if (dev->data->all_multicast == 1)
1798 fctrl |= NGBE_PSRCTL_MCP;
1800 fctrl &= (~NGBE_PSRCTL_MCP);
1801 wr32(hw, NGBE_PSRCTL, fctrl);
1807 ngbe_dev_allmulticast_enable(struct rte_eth_dev *dev)
1809 struct ngbe_hw *hw = ngbe_dev_hw(dev);
1812 fctrl = rd32(hw, NGBE_PSRCTL);
1813 fctrl |= NGBE_PSRCTL_MCP;
1814 wr32(hw, NGBE_PSRCTL, fctrl);
1820 ngbe_dev_allmulticast_disable(struct rte_eth_dev *dev)
1822 struct ngbe_hw *hw = ngbe_dev_hw(dev);
1825 if (dev->data->promiscuous == 1)
1826 return 0; /* must remain in all_multicast mode */
1828 fctrl = rd32(hw, NGBE_PSRCTL);
1829 fctrl &= (~NGBE_PSRCTL_MCP);
1830 wr32(hw, NGBE_PSRCTL, fctrl);
1836 * It clears the interrupt causes and enables the interrupt.
1837 * It will be called once only during NIC initialized.
1840 * Pointer to struct rte_eth_dev.
1842 * Enable or Disable.
1845 * - On success, zero.
1846 * - On failure, a negative value.
1849 ngbe_dev_lsc_interrupt_setup(struct rte_eth_dev *dev, uint8_t on)
1851 struct ngbe_interrupt *intr = ngbe_dev_intr(dev);
1853 ngbe_dev_link_status_print(dev);
1855 intr->mask_misc |= NGBE_ICRMISC_PHY;
1856 intr->mask_misc |= NGBE_ICRMISC_GPIO;
1858 intr->mask_misc &= ~NGBE_ICRMISC_PHY;
1859 intr->mask_misc &= ~NGBE_ICRMISC_GPIO;
1866 * It clears the interrupt causes and enables the interrupt.
1867 * It will be called once only during NIC initialized.
1870 * Pointer to struct rte_eth_dev.
1873 * - On success, zero.
1874 * - On failure, a negative value.
1877 ngbe_dev_misc_interrupt_setup(struct rte_eth_dev *dev)
1879 struct ngbe_interrupt *intr = ngbe_dev_intr(dev);
1882 mask = NGBE_ICR_MASK;
1883 mask &= (1ULL << NGBE_MISC_VEC_ID);
1885 intr->mask_misc |= NGBE_ICRMISC_GPIO;
1891 * It clears the interrupt causes and enables the interrupt.
1892 * It will be called once only during NIC initialized.
1895 * Pointer to struct rte_eth_dev.
1898 * - On success, zero.
1899 * - On failure, a negative value.
1902 ngbe_dev_rxq_interrupt_setup(struct rte_eth_dev *dev)
1904 struct ngbe_interrupt *intr = ngbe_dev_intr(dev);
1907 mask = NGBE_ICR_MASK;
1908 mask &= ~((1ULL << NGBE_RX_VEC_START) - 1);
1915 * It clears the interrupt causes and enables the interrupt.
1916 * It will be called once only during NIC initialized.
1919 * Pointer to struct rte_eth_dev.
1922 * - On success, zero.
1923 * - On failure, a negative value.
1926 ngbe_dev_macsec_interrupt_setup(struct rte_eth_dev *dev)
1928 struct ngbe_interrupt *intr = ngbe_dev_intr(dev);
1930 intr->mask_misc |= NGBE_ICRMISC_LNKSEC;
1936 * It reads ICR and sets flag for the link_update.
1939 * Pointer to struct rte_eth_dev.
1942 * - On success, zero.
1943 * - On failure, a negative value.
1946 ngbe_dev_interrupt_get_status(struct rte_eth_dev *dev)
1949 struct ngbe_hw *hw = ngbe_dev_hw(dev);
1950 struct ngbe_interrupt *intr = ngbe_dev_intr(dev);
1952 /* clear all cause mask */
1953 ngbe_disable_intr(hw);
1955 /* read-on-clear nic registers here */
1956 eicr = ((u32 *)hw->isb_mem)[NGBE_ISB_MISC];
1957 PMD_DRV_LOG(DEBUG, "eicr %x", eicr);
1961 /* set flag for async link update */
1962 if (eicr & NGBE_ICRMISC_PHY)
1963 intr->flags |= NGBE_FLAG_NEED_LINK_UPDATE;
1965 if (eicr & NGBE_ICRMISC_VFMBX)
1966 intr->flags |= NGBE_FLAG_MAILBOX;
1968 if (eicr & NGBE_ICRMISC_LNKSEC)
1969 intr->flags |= NGBE_FLAG_MACSEC;
1971 if (eicr & NGBE_ICRMISC_GPIO)
1972 intr->flags |= NGBE_FLAG_NEED_LINK_UPDATE;
1978 * It gets and then prints the link status.
1981 * Pointer to struct rte_eth_dev.
1984 * - On success, zero.
1985 * - On failure, a negative value.
1988 ngbe_dev_link_status_print(struct rte_eth_dev *dev)
1990 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1991 struct rte_eth_link link;
1993 rte_eth_linkstatus_get(dev, &link);
1995 if (link.link_status == RTE_ETH_LINK_UP) {
1996 PMD_INIT_LOG(INFO, "Port %d: Link Up - speed %u Mbps - %s",
1997 (int)(dev->data->port_id),
1998 (unsigned int)link.link_speed,
1999 link.link_duplex == RTE_ETH_LINK_FULL_DUPLEX ?
2000 "full-duplex" : "half-duplex");
2002 PMD_INIT_LOG(INFO, " Port %d: Link Down",
2003 (int)(dev->data->port_id));
2005 PMD_INIT_LOG(DEBUG, "PCI Address: " PCI_PRI_FMT,
2006 pci_dev->addr.domain,
2008 pci_dev->addr.devid,
2009 pci_dev->addr.function);
2013 * It executes link_update after knowing an interrupt occurred.
2016 * Pointer to struct rte_eth_dev.
2019 * - On success, zero.
2020 * - On failure, a negative value.
2023 ngbe_dev_interrupt_action(struct rte_eth_dev *dev)
2025 struct ngbe_interrupt *intr = ngbe_dev_intr(dev);
2028 PMD_DRV_LOG(DEBUG, "intr action type %d", intr->flags);
2030 if (intr->flags & NGBE_FLAG_NEED_LINK_UPDATE) {
2031 struct rte_eth_link link;
2033 /*get the link status before link update, for predicting later*/
2034 rte_eth_linkstatus_get(dev, &link);
2036 ngbe_dev_link_update(dev, 0);
2039 if (link.link_status != RTE_ETH_LINK_UP)
2040 /* handle it 1 sec later, wait it being stable */
2041 timeout = NGBE_LINK_UP_CHECK_TIMEOUT;
2042 /* likely to down */
2044 /* handle it 4 sec later, wait it being stable */
2045 timeout = NGBE_LINK_DOWN_CHECK_TIMEOUT;
2047 ngbe_dev_link_status_print(dev);
2048 if (rte_eal_alarm_set(timeout * 1000,
2049 ngbe_dev_interrupt_delayed_handler,
2051 PMD_DRV_LOG(ERR, "Error setting alarm");
2053 /* remember original mask */
2054 intr->mask_misc_orig = intr->mask_misc;
2055 /* only disable lsc interrupt */
2056 intr->mask_misc &= ~NGBE_ICRMISC_PHY;
2058 intr->mask_orig = intr->mask;
2059 /* only disable all misc interrupts */
2060 intr->mask &= ~(1ULL << NGBE_MISC_VEC_ID);
2064 PMD_DRV_LOG(DEBUG, "enable intr immediately");
2065 ngbe_enable_intr(dev);
2071 * Interrupt handler which shall be registered for alarm callback for delayed
2072 * handling specific interrupt to wait for the stable nic state. As the
2073 * NIC interrupt state is not stable for ngbe after link is just down,
2074 * it needs to wait 4 seconds to get the stable status.
2077 * The address of parameter (struct rte_eth_dev *) registered before.
2080 ngbe_dev_interrupt_delayed_handler(void *param)
2082 struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
2083 struct ngbe_interrupt *intr = ngbe_dev_intr(dev);
2084 struct ngbe_hw *hw = ngbe_dev_hw(dev);
2087 ngbe_disable_intr(hw);
2089 eicr = ((u32 *)hw->isb_mem)[NGBE_ISB_MISC];
2091 if (intr->flags & NGBE_FLAG_NEED_LINK_UPDATE) {
2092 ngbe_dev_link_update(dev, 0);
2093 intr->flags &= ~NGBE_FLAG_NEED_LINK_UPDATE;
2094 ngbe_dev_link_status_print(dev);
2095 rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC,
2099 if (intr->flags & NGBE_FLAG_MACSEC) {
2100 rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_MACSEC,
2102 intr->flags &= ~NGBE_FLAG_MACSEC;
2105 /* restore original mask */
2106 intr->mask_misc = intr->mask_misc_orig;
2107 intr->mask_misc_orig = 0;
2108 intr->mask = intr->mask_orig;
2109 intr->mask_orig = 0;
2111 PMD_DRV_LOG(DEBUG, "enable intr in delayed handler S[%08x]", eicr);
2112 ngbe_enable_intr(dev);
2116 * Interrupt handler triggered by NIC for handling
2117 * specific interrupt.
2120 * The address of parameter (struct rte_eth_dev *) registered before.
2123 ngbe_dev_interrupt_handler(void *param)
2125 struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
2127 ngbe_dev_interrupt_get_status(dev);
2128 ngbe_dev_interrupt_action(dev);
2132 ngbe_add_rar(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr,
2133 uint32_t index, uint32_t pool)
2135 struct ngbe_hw *hw = ngbe_dev_hw(dev);
2136 uint32_t enable_addr = 1;
2138 return ngbe_set_rar(hw, index, mac_addr->addr_bytes,
2143 ngbe_remove_rar(struct rte_eth_dev *dev, uint32_t index)
2145 struct ngbe_hw *hw = ngbe_dev_hw(dev);
2147 ngbe_clear_rar(hw, index);
2151 ngbe_set_default_mac_addr(struct rte_eth_dev *dev, struct rte_ether_addr *addr)
2153 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
2155 ngbe_remove_rar(dev, 0);
2156 ngbe_add_rar(dev, addr, 0, pci_dev->max_vfs);
2162 ngbe_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
2164 struct ngbe_hw *hw = ngbe_dev_hw(dev);
2165 uint32_t frame_size = mtu + RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN + 4;
2166 struct rte_eth_dev_data *dev_data = dev->data;
2168 /* If device is started, refuse mtu that requires the support of
2169 * scattered packets when this feature has not been enabled before.
2171 if (dev_data->dev_started && !dev_data->scattered_rx &&
2172 (frame_size + 2 * NGBE_VLAN_TAG_SIZE >
2173 dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM)) {
2174 PMD_INIT_LOG(ERR, "Stop port first.");
2179 wr32m(hw, NGBE_FRMSZ, NGBE_FRMSZ_MAX_MASK,
2180 NGBE_FRAME_SIZE_MAX);
2182 wr32m(hw, NGBE_FRMSZ, NGBE_FRMSZ_MAX_MASK,
2183 NGBE_FRMSZ_MAX(frame_size));
2189 ngbe_uta_vector(struct ngbe_hw *hw, struct rte_ether_addr *uc_addr)
2191 uint32_t vector = 0;
2193 switch (hw->mac.mc_filter_type) {
2194 case 0: /* use bits [47:36] of the address */
2195 vector = ((uc_addr->addr_bytes[4] >> 4) |
2196 (((uint16_t)uc_addr->addr_bytes[5]) << 4));
2198 case 1: /* use bits [46:35] of the address */
2199 vector = ((uc_addr->addr_bytes[4] >> 3) |
2200 (((uint16_t)uc_addr->addr_bytes[5]) << 5));
2202 case 2: /* use bits [45:34] of the address */
2203 vector = ((uc_addr->addr_bytes[4] >> 2) |
2204 (((uint16_t)uc_addr->addr_bytes[5]) << 6));
2206 case 3: /* use bits [43:32] of the address */
2207 vector = ((uc_addr->addr_bytes[4]) |
2208 (((uint16_t)uc_addr->addr_bytes[5]) << 8));
2210 default: /* Invalid mc_filter_type */
2214 /* vector can only be 12-bits or boundary will be exceeded */
2220 ngbe_uc_hash_table_set(struct rte_eth_dev *dev,
2221 struct rte_ether_addr *mac_addr, uint8_t on)
2229 struct ngbe_hw *hw = ngbe_dev_hw(dev);
2230 struct ngbe_uta_info *uta_info = NGBE_DEV_UTA_INFO(dev);
2232 vector = ngbe_uta_vector(hw, mac_addr);
2233 uta_idx = (vector >> 5) & 0x7F;
2234 uta_mask = 0x1UL << (vector & 0x1F);
2236 if (!!on == !!(uta_info->uta_shadow[uta_idx] & uta_mask))
2239 reg_val = rd32(hw, NGBE_UCADDRTBL(uta_idx));
2241 uta_info->uta_in_use++;
2242 reg_val |= uta_mask;
2243 uta_info->uta_shadow[uta_idx] |= uta_mask;
2245 uta_info->uta_in_use--;
2246 reg_val &= ~uta_mask;
2247 uta_info->uta_shadow[uta_idx] &= ~uta_mask;
2250 wr32(hw, NGBE_UCADDRTBL(uta_idx), reg_val);
2252 psrctl = rd32(hw, NGBE_PSRCTL);
2253 if (uta_info->uta_in_use > 0)
2254 psrctl |= NGBE_PSRCTL_UCHFENA;
2256 psrctl &= ~NGBE_PSRCTL_UCHFENA;
2258 psrctl &= ~NGBE_PSRCTL_ADHF12_MASK;
2259 psrctl |= NGBE_PSRCTL_ADHF12(hw->mac.mc_filter_type);
2260 wr32(hw, NGBE_PSRCTL, psrctl);
2266 ngbe_uc_all_hash_table_set(struct rte_eth_dev *dev, uint8_t on)
2268 struct ngbe_hw *hw = ngbe_dev_hw(dev);
2269 struct ngbe_uta_info *uta_info = NGBE_DEV_UTA_INFO(dev);
2274 for (i = 0; i < RTE_ETH_VMDQ_NUM_UC_HASH_ARRAY; i++) {
2275 uta_info->uta_shadow[i] = ~0;
2276 wr32(hw, NGBE_UCADDRTBL(i), ~0);
2279 for (i = 0; i < RTE_ETH_VMDQ_NUM_UC_HASH_ARRAY; i++) {
2280 uta_info->uta_shadow[i] = 0;
2281 wr32(hw, NGBE_UCADDRTBL(i), 0);
2285 psrctl = rd32(hw, NGBE_PSRCTL);
2287 psrctl |= NGBE_PSRCTL_UCHFENA;
2289 psrctl &= ~NGBE_PSRCTL_UCHFENA;
2291 psrctl &= ~NGBE_PSRCTL_ADHF12_MASK;
2292 psrctl |= NGBE_PSRCTL_ADHF12(hw->mac.mc_filter_type);
2293 wr32(hw, NGBE_PSRCTL, psrctl);
2299 * Set the IVAR registers, mapping interrupt causes to vectors
2301 * pointer to ngbe_hw struct
2303 * 0 for Rx, 1 for Tx, -1 for other causes
2305 * queue to map the corresponding interrupt to
2307 * the vector to map to the corresponding queue
2310 ngbe_set_ivar_map(struct ngbe_hw *hw, int8_t direction,
2311 uint8_t queue, uint8_t msix_vector)
2315 if (direction == -1) {
2317 msix_vector |= NGBE_IVARMISC_VLD;
2319 tmp = rd32(hw, NGBE_IVARMISC);
2320 tmp &= ~(0xFF << idx);
2321 tmp |= (msix_vector << idx);
2322 wr32(hw, NGBE_IVARMISC, tmp);
2324 /* rx or tx causes */
2325 /* Workround for ICR lost */
2326 idx = ((16 * (queue & 1)) + (8 * direction));
2327 tmp = rd32(hw, NGBE_IVAR(queue >> 1));
2328 tmp &= ~(0xFF << idx);
2329 tmp |= (msix_vector << idx);
2330 wr32(hw, NGBE_IVAR(queue >> 1), tmp);
2335 * Sets up the hardware to properly generate MSI-X interrupts
2337 * board private structure
2340 ngbe_configure_msix(struct rte_eth_dev *dev)
2342 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
2343 struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
2344 struct ngbe_hw *hw = ngbe_dev_hw(dev);
2345 uint32_t queue_id, base = NGBE_MISC_VEC_ID;
2346 uint32_t vec = NGBE_MISC_VEC_ID;
2350 * Won't configure MSI-X register if no mapping is done
2351 * between intr vector and event fd
2352 * but if MSI-X has been enabled already, need to configure
2353 * auto clean, auto mask and throttling.
2355 gpie = rd32(hw, NGBE_GPIE);
2356 if (!rte_intr_dp_is_en(intr_handle) &&
2357 !(gpie & NGBE_GPIE_MSIX))
2360 if (rte_intr_allow_others(intr_handle)) {
2361 base = NGBE_RX_VEC_START;
2365 /* setup GPIE for MSI-X mode */
2366 gpie = rd32(hw, NGBE_GPIE);
2367 gpie |= NGBE_GPIE_MSIX;
2368 wr32(hw, NGBE_GPIE, gpie);
2370 /* Populate the IVAR table and set the ITR values to the
2371 * corresponding register.
2373 if (rte_intr_dp_is_en(intr_handle)) {
2374 for (queue_id = 0; queue_id < dev->data->nb_rx_queues;
2376 /* by default, 1:1 mapping */
2377 ngbe_set_ivar_map(hw, 0, queue_id, vec);
2378 rte_intr_vec_list_index_set(intr_handle,
2380 if (vec < base + rte_intr_nb_efd_get(intr_handle)
2385 ngbe_set_ivar_map(hw, -1, 1, NGBE_MISC_VEC_ID);
2387 wr32(hw, NGBE_ITR(NGBE_MISC_VEC_ID),
2388 NGBE_ITR_IVAL_1G(NGBE_QUEUE_ITR_INTERVAL_DEFAULT)
2393 ngbe_dev_addr_list_itr(__rte_unused struct ngbe_hw *hw,
2394 u8 **mc_addr_ptr, u32 *vmdq)
2399 mc_addr = *mc_addr_ptr;
2400 *mc_addr_ptr = (mc_addr + sizeof(struct rte_ether_addr));
2405 ngbe_dev_set_mc_addr_list(struct rte_eth_dev *dev,
2406 struct rte_ether_addr *mc_addr_set,
2407 uint32_t nb_mc_addr)
2409 struct ngbe_hw *hw = ngbe_dev_hw(dev);
2412 mc_addr_list = (u8 *)mc_addr_set;
2413 return hw->mac.update_mc_addr_list(hw, mc_addr_list, nb_mc_addr,
2414 ngbe_dev_addr_list_itr, TRUE);
2417 static const struct eth_dev_ops ngbe_eth_dev_ops = {
2418 .dev_configure = ngbe_dev_configure,
2419 .dev_infos_get = ngbe_dev_info_get,
2420 .dev_start = ngbe_dev_start,
2421 .dev_stop = ngbe_dev_stop,
2422 .dev_close = ngbe_dev_close,
2423 .dev_reset = ngbe_dev_reset,
2424 .promiscuous_enable = ngbe_dev_promiscuous_enable,
2425 .promiscuous_disable = ngbe_dev_promiscuous_disable,
2426 .allmulticast_enable = ngbe_dev_allmulticast_enable,
2427 .allmulticast_disable = ngbe_dev_allmulticast_disable,
2428 .link_update = ngbe_dev_link_update,
2429 .stats_get = ngbe_dev_stats_get,
2430 .xstats_get = ngbe_dev_xstats_get,
2431 .xstats_get_by_id = ngbe_dev_xstats_get_by_id,
2432 .stats_reset = ngbe_dev_stats_reset,
2433 .xstats_reset = ngbe_dev_xstats_reset,
2434 .xstats_get_names = ngbe_dev_xstats_get_names,
2435 .xstats_get_names_by_id = ngbe_dev_xstats_get_names_by_id,
2436 .fw_version_get = ngbe_fw_version_get,
2437 .dev_supported_ptypes_get = ngbe_dev_supported_ptypes_get,
2438 .mtu_set = ngbe_dev_mtu_set,
2439 .vlan_filter_set = ngbe_vlan_filter_set,
2440 .vlan_tpid_set = ngbe_vlan_tpid_set,
2441 .vlan_offload_set = ngbe_vlan_offload_set,
2442 .vlan_strip_queue_set = ngbe_vlan_strip_queue_set,
2443 .rx_queue_start = ngbe_dev_rx_queue_start,
2444 .rx_queue_stop = ngbe_dev_rx_queue_stop,
2445 .tx_queue_start = ngbe_dev_tx_queue_start,
2446 .tx_queue_stop = ngbe_dev_tx_queue_stop,
2447 .rx_queue_setup = ngbe_dev_rx_queue_setup,
2448 .rx_queue_release = ngbe_dev_rx_queue_release,
2449 .tx_queue_setup = ngbe_dev_tx_queue_setup,
2450 .tx_queue_release = ngbe_dev_tx_queue_release,
2451 .mac_addr_add = ngbe_add_rar,
2452 .mac_addr_remove = ngbe_remove_rar,
2453 .mac_addr_set = ngbe_set_default_mac_addr,
2454 .uc_hash_table_set = ngbe_uc_hash_table_set,
2455 .uc_all_hash_table_set = ngbe_uc_all_hash_table_set,
2456 .set_mc_addr_list = ngbe_dev_set_mc_addr_list,
2457 .rx_burst_mode_get = ngbe_rx_burst_mode_get,
2458 .tx_burst_mode_get = ngbe_tx_burst_mode_get,
2461 RTE_PMD_REGISTER_PCI(net_ngbe, rte_ngbe_pmd);
2462 RTE_PMD_REGISTER_PCI_TABLE(net_ngbe, pci_id_ngbe_map);
2463 RTE_PMD_REGISTER_KMOD_DEP(net_ngbe, "* igb_uio | uio_pci_generic | vfio-pci");
2465 RTE_LOG_REGISTER_SUFFIX(ngbe_logtype_init, init, NOTICE);
2466 RTE_LOG_REGISTER_SUFFIX(ngbe_logtype_driver, driver, NOTICE);
2468 #ifdef RTE_ETHDEV_DEBUG_RX
2469 RTE_LOG_REGISTER_SUFFIX(ngbe_logtype_rx, rx, DEBUG);
2471 #ifdef RTE_ETHDEV_DEBUG_TX
2472 RTE_LOG_REGISTER_SUFFIX(ngbe_logtype_tx, tx, DEBUG);