1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2018-2021 Beijing WangXun Technology Co., Ltd.
3 * Copyright(c) 2010-2017 Intel Corporation
7 #include <rte_common.h>
8 #include <ethdev_pci.h>
10 #include <rte_alarm.h>
12 #include "ngbe_logs.h"
14 #include "ngbe_ethdev.h"
15 #include "ngbe_rxtx.h"
16 #include "ngbe_regs_group.h"
18 static const struct reg_info ngbe_regs_general[] = {
19 {NGBE_RST, 1, 1, "NGBE_RST"},
20 {NGBE_STAT, 1, 1, "NGBE_STAT"},
21 {NGBE_PORTCTL, 1, 1, "NGBE_PORTCTL"},
22 {NGBE_GPIODATA, 1, 1, "NGBE_GPIODATA"},
23 {NGBE_GPIOCTL, 1, 1, "NGBE_GPIOCTL"},
24 {NGBE_LEDCTL, 1, 1, "NGBE_LEDCTL"},
28 static const struct reg_info ngbe_regs_nvm[] = {
32 static const struct reg_info ngbe_regs_interrupt[] = {
36 static const struct reg_info ngbe_regs_fctl_others[] = {
40 static const struct reg_info ngbe_regs_rxdma[] = {
44 static const struct reg_info ngbe_regs_rx[] = {
48 static struct reg_info ngbe_regs_tx[] = {
52 static const struct reg_info ngbe_regs_wakeup[] = {
56 static const struct reg_info ngbe_regs_mac[] = {
60 static const struct reg_info ngbe_regs_diagnostic[] = {
65 static const struct reg_info *ngbe_regs_others[] = {
69 ngbe_regs_fctl_others,
78 static int ngbe_dev_close(struct rte_eth_dev *dev);
79 static int ngbe_dev_link_update(struct rte_eth_dev *dev,
80 int wait_to_complete);
81 static int ngbe_dev_stats_reset(struct rte_eth_dev *dev);
82 static void ngbe_vlan_hw_strip_enable(struct rte_eth_dev *dev, uint16_t queue);
83 static void ngbe_vlan_hw_strip_disable(struct rte_eth_dev *dev,
86 static void ngbe_dev_link_status_print(struct rte_eth_dev *dev);
87 static int ngbe_dev_lsc_interrupt_setup(struct rte_eth_dev *dev, uint8_t on);
88 static int ngbe_dev_macsec_interrupt_setup(struct rte_eth_dev *dev);
89 static int ngbe_dev_misc_interrupt_setup(struct rte_eth_dev *dev);
90 static int ngbe_dev_rxq_interrupt_setup(struct rte_eth_dev *dev);
91 static void ngbe_dev_interrupt_handler(void *param);
92 static void ngbe_dev_interrupt_delayed_handler(void *param);
93 static void ngbe_configure_msix(struct rte_eth_dev *dev);
95 #define NGBE_SET_HWSTRIP(h, q) do {\
96 uint32_t idx = (q) / (sizeof((h)->bitmap[0]) * NBBY); \
97 uint32_t bit = (q) % (sizeof((h)->bitmap[0]) * NBBY); \
98 (h)->bitmap[idx] |= 1 << bit;\
101 #define NGBE_CLEAR_HWSTRIP(h, q) do {\
102 uint32_t idx = (q) / (sizeof((h)->bitmap[0]) * NBBY); \
103 uint32_t bit = (q) % (sizeof((h)->bitmap[0]) * NBBY); \
104 (h)->bitmap[idx] &= ~(1 << bit);\
107 #define NGBE_GET_HWSTRIP(h, q, r) do {\
108 uint32_t idx = (q) / (sizeof((h)->bitmap[0]) * NBBY); \
109 uint32_t bit = (q) % (sizeof((h)->bitmap[0]) * NBBY); \
110 (r) = (h)->bitmap[idx] >> bit & 1;\
114 * The set of PCI devices this driver supports
116 static const struct rte_pci_id pci_id_ngbe_map[] = {
117 { RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860A2) },
118 { RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860A2S) },
119 { RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860A4) },
120 { RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860A4S) },
121 { RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860AL2) },
122 { RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860AL2S) },
123 { RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860AL4) },
124 { RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860AL4S) },
125 { RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860NCSI) },
126 { RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860A1) },
127 { RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860A1L) },
128 { RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860AL_W) },
129 { .vendor_id = 0, /* sentinel */ },
132 static const struct rte_eth_desc_lim rx_desc_lim = {
133 .nb_max = NGBE_RING_DESC_MAX,
134 .nb_min = NGBE_RING_DESC_MIN,
135 .nb_align = NGBE_RXD_ALIGN,
138 static const struct rte_eth_desc_lim tx_desc_lim = {
139 .nb_max = NGBE_RING_DESC_MAX,
140 .nb_min = NGBE_RING_DESC_MIN,
141 .nb_align = NGBE_TXD_ALIGN,
142 .nb_seg_max = NGBE_TX_MAX_SEG,
143 .nb_mtu_seg_max = NGBE_TX_MAX_SEG,
146 static const struct eth_dev_ops ngbe_eth_dev_ops;
148 #define HW_XSTAT(m) {#m, offsetof(struct ngbe_hw_stats, m)}
149 #define HW_XSTAT_NAME(m, n) {n, offsetof(struct ngbe_hw_stats, m)}
150 static const struct rte_ngbe_xstats_name_off rte_ngbe_stats_strings[] = {
152 HW_XSTAT(mng_bmc2host_packets),
153 HW_XSTAT(mng_host2bmc_packets),
155 HW_XSTAT(rx_packets),
156 HW_XSTAT(tx_packets),
159 HW_XSTAT(rx_total_bytes),
160 HW_XSTAT(rx_total_packets),
161 HW_XSTAT(tx_total_packets),
162 HW_XSTAT(rx_total_missed_packets),
163 HW_XSTAT(rx_broadcast_packets),
164 HW_XSTAT(rx_multicast_packets),
165 HW_XSTAT(rx_management_packets),
166 HW_XSTAT(tx_management_packets),
167 HW_XSTAT(rx_management_dropped),
170 HW_XSTAT(rx_crc_errors),
171 HW_XSTAT(rx_illegal_byte_errors),
172 HW_XSTAT(rx_error_bytes),
173 HW_XSTAT(rx_mac_short_packet_dropped),
174 HW_XSTAT(rx_length_errors),
175 HW_XSTAT(rx_undersize_errors),
176 HW_XSTAT(rx_fragment_errors),
177 HW_XSTAT(rx_oversize_errors),
178 HW_XSTAT(rx_jabber_errors),
179 HW_XSTAT(rx_l3_l4_xsum_error),
180 HW_XSTAT(mac_local_errors),
181 HW_XSTAT(mac_remote_errors),
184 HW_XSTAT(tx_macsec_pkts_untagged),
185 HW_XSTAT(tx_macsec_pkts_encrypted),
186 HW_XSTAT(tx_macsec_pkts_protected),
187 HW_XSTAT(tx_macsec_octets_encrypted),
188 HW_XSTAT(tx_macsec_octets_protected),
189 HW_XSTAT(rx_macsec_pkts_untagged),
190 HW_XSTAT(rx_macsec_pkts_badtag),
191 HW_XSTAT(rx_macsec_pkts_nosci),
192 HW_XSTAT(rx_macsec_pkts_unknownsci),
193 HW_XSTAT(rx_macsec_octets_decrypted),
194 HW_XSTAT(rx_macsec_octets_validated),
195 HW_XSTAT(rx_macsec_sc_pkts_unchecked),
196 HW_XSTAT(rx_macsec_sc_pkts_delayed),
197 HW_XSTAT(rx_macsec_sc_pkts_late),
198 HW_XSTAT(rx_macsec_sa_pkts_ok),
199 HW_XSTAT(rx_macsec_sa_pkts_invalid),
200 HW_XSTAT(rx_macsec_sa_pkts_notvalid),
201 HW_XSTAT(rx_macsec_sa_pkts_unusedsa),
202 HW_XSTAT(rx_macsec_sa_pkts_notusingsa),
205 HW_XSTAT(rx_size_64_packets),
206 HW_XSTAT(rx_size_65_to_127_packets),
207 HW_XSTAT(rx_size_128_to_255_packets),
208 HW_XSTAT(rx_size_256_to_511_packets),
209 HW_XSTAT(rx_size_512_to_1023_packets),
210 HW_XSTAT(rx_size_1024_to_max_packets),
211 HW_XSTAT(tx_size_64_packets),
212 HW_XSTAT(tx_size_65_to_127_packets),
213 HW_XSTAT(tx_size_128_to_255_packets),
214 HW_XSTAT(tx_size_256_to_511_packets),
215 HW_XSTAT(tx_size_512_to_1023_packets),
216 HW_XSTAT(tx_size_1024_to_max_packets),
219 HW_XSTAT(tx_xon_packets),
220 HW_XSTAT(rx_xon_packets),
221 HW_XSTAT(tx_xoff_packets),
222 HW_XSTAT(rx_xoff_packets),
224 HW_XSTAT_NAME(tx_xon_packets, "tx_flow_control_xon_packets"),
225 HW_XSTAT_NAME(rx_xon_packets, "rx_flow_control_xon_packets"),
226 HW_XSTAT_NAME(tx_xoff_packets, "tx_flow_control_xoff_packets"),
227 HW_XSTAT_NAME(rx_xoff_packets, "rx_flow_control_xoff_packets"),
230 #define NGBE_NB_HW_STATS (sizeof(rte_ngbe_stats_strings) / \
231 sizeof(rte_ngbe_stats_strings[0]))
233 /* Per-queue statistics */
234 #define QP_XSTAT(m) {#m, offsetof(struct ngbe_hw_stats, qp[0].m)}
235 static const struct rte_ngbe_xstats_name_off rte_ngbe_qp_strings[] = {
236 QP_XSTAT(rx_qp_packets),
237 QP_XSTAT(tx_qp_packets),
238 QP_XSTAT(rx_qp_bytes),
239 QP_XSTAT(tx_qp_bytes),
240 QP_XSTAT(rx_qp_mc_packets),
243 #define NGBE_NB_QP_STATS (sizeof(rte_ngbe_qp_strings) / \
244 sizeof(rte_ngbe_qp_strings[0]))
246 static inline int32_t
247 ngbe_pf_reset_hw(struct ngbe_hw *hw)
252 status = hw->mac.reset_hw(hw);
254 ctrl_ext = rd32(hw, NGBE_PORTCTL);
255 /* Set PF Reset Done bit so PF/VF Mail Ops can work */
256 ctrl_ext |= NGBE_PORTCTL_RSTDONE;
257 wr32(hw, NGBE_PORTCTL, ctrl_ext);
260 if (status == NGBE_ERR_SFP_NOT_PRESENT)
266 ngbe_enable_intr(struct rte_eth_dev *dev)
268 struct ngbe_interrupt *intr = ngbe_dev_intr(dev);
269 struct ngbe_hw *hw = ngbe_dev_hw(dev);
271 wr32(hw, NGBE_IENMISC, intr->mask_misc);
272 wr32(hw, NGBE_IMC(0), intr->mask & BIT_MASK32);
277 ngbe_disable_intr(struct ngbe_hw *hw)
279 PMD_INIT_FUNC_TRACE();
281 wr32(hw, NGBE_IMS(0), NGBE_IMS_MASK);
286 * Ensure that all locks are released before first NVM or PHY access
289 ngbe_swfw_lock_reset(struct ngbe_hw *hw)
294 * These ones are more tricky since they are common to all ports; but
295 * swfw_sync retries last long enough (1s) to be almost sure that if
296 * lock can not be taken it is due to an improper lock of the
299 mask = NGBE_MNGSEM_SWPHY |
302 if (hw->mac.acquire_swfw_sync(hw, mask) < 0)
303 PMD_DRV_LOG(DEBUG, "SWFW common locks released");
305 hw->mac.release_swfw_sync(hw, mask);
309 eth_ngbe_dev_init(struct rte_eth_dev *eth_dev, void *init_params __rte_unused)
311 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
312 struct ngbe_hw *hw = ngbe_dev_hw(eth_dev);
313 struct ngbe_vfta *shadow_vfta = NGBE_DEV_VFTA(eth_dev);
314 struct ngbe_hwstrip *hwstrip = NGBE_DEV_HWSTRIP(eth_dev);
315 struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
316 const struct rte_memzone *mz;
320 PMD_INIT_FUNC_TRACE();
322 eth_dev->dev_ops = &ngbe_eth_dev_ops;
323 eth_dev->rx_queue_count = ngbe_dev_rx_queue_count;
324 eth_dev->rx_descriptor_status = ngbe_dev_rx_descriptor_status;
325 eth_dev->tx_descriptor_status = ngbe_dev_tx_descriptor_status;
326 eth_dev->rx_pkt_burst = &ngbe_recv_pkts;
327 eth_dev->tx_pkt_burst = &ngbe_xmit_pkts;
328 eth_dev->tx_pkt_prepare = &ngbe_prep_pkts;
331 * For secondary processes, we don't initialise any further as primary
332 * has already done this work. Only check we don't need a different
333 * Rx and Tx function.
335 if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
336 struct ngbe_tx_queue *txq;
337 /* Tx queue function in primary, set by last queue initialized
338 * Tx queue may not initialized by primary process
340 if (eth_dev->data->tx_queues) {
341 uint16_t nb_tx_queues = eth_dev->data->nb_tx_queues;
342 txq = eth_dev->data->tx_queues[nb_tx_queues - 1];
343 ngbe_set_tx_function(eth_dev, txq);
345 /* Use default Tx function if we get here */
347 "No Tx queues configured yet. Using default Tx function.");
350 ngbe_set_rx_function(eth_dev);
355 rte_eth_copy_pci_info(eth_dev, pci_dev);
356 eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
358 /* Vendor and Device ID need to be set before init of shared code */
359 hw->device_id = pci_dev->id.device_id;
360 hw->vendor_id = pci_dev->id.vendor_id;
361 hw->sub_system_id = pci_dev->id.subsystem_device_id;
362 ngbe_map_device_id(hw);
363 hw->hw_addr = (void *)pci_dev->mem_resource[0].addr;
365 /* Reserve memory for interrupt status block */
366 mz = rte_eth_dma_zone_reserve(eth_dev, "ngbe_driver", -1,
367 NGBE_ISB_SIZE, NGBE_ALIGN, SOCKET_ID_ANY);
371 hw->isb_dma = TMZ_PADDR(mz);
372 hw->isb_mem = TMZ_VADDR(mz);
374 /* Initialize the shared code (base driver) */
375 err = ngbe_init_shared_code(hw);
377 PMD_INIT_LOG(ERR, "Shared code init failed: %d", err);
381 /* Unlock any pending hardware semaphore */
382 ngbe_swfw_lock_reset(hw);
384 /* Get Hardware Flow Control setting */
385 hw->fc.requested_mode = ngbe_fc_full;
386 hw->fc.current_mode = ngbe_fc_full;
387 hw->fc.pause_time = NGBE_FC_PAUSE_TIME;
388 hw->fc.low_water = NGBE_FC_XON_LOTH;
389 hw->fc.high_water = NGBE_FC_XOFF_HITH;
392 err = hw->rom.init_params(hw);
394 PMD_INIT_LOG(ERR, "The EEPROM init failed: %d", err);
398 /* Make sure we have a good EEPROM before we read from it */
399 err = hw->rom.validate_checksum(hw, NULL);
401 PMD_INIT_LOG(ERR, "The EEPROM checksum is not valid: %d", err);
405 err = hw->mac.init_hw(hw);
407 PMD_INIT_LOG(ERR, "Hardware Initialization Failure: %d", err);
411 /* Reset the hw statistics */
412 ngbe_dev_stats_reset(eth_dev);
414 /* disable interrupt */
415 ngbe_disable_intr(hw);
417 /* Allocate memory for storing MAC addresses */
418 eth_dev->data->mac_addrs = rte_zmalloc("ngbe", RTE_ETHER_ADDR_LEN *
419 hw->mac.num_rar_entries, 0);
420 if (eth_dev->data->mac_addrs == NULL) {
422 "Failed to allocate %u bytes needed to store MAC addresses",
423 RTE_ETHER_ADDR_LEN * hw->mac.num_rar_entries);
427 /* Copy the permanent MAC address */
428 rte_ether_addr_copy((struct rte_ether_addr *)hw->mac.perm_addr,
429 ð_dev->data->mac_addrs[0]);
431 /* Allocate memory for storing hash filter MAC addresses */
432 eth_dev->data->hash_mac_addrs = rte_zmalloc("ngbe",
433 RTE_ETHER_ADDR_LEN * NGBE_VMDQ_NUM_UC_MAC, 0);
434 if (eth_dev->data->hash_mac_addrs == NULL) {
436 "Failed to allocate %d bytes needed to store MAC addresses",
437 RTE_ETHER_ADDR_LEN * NGBE_VMDQ_NUM_UC_MAC);
438 rte_free(eth_dev->data->mac_addrs);
439 eth_dev->data->mac_addrs = NULL;
443 /* initialize the vfta */
444 memset(shadow_vfta, 0, sizeof(*shadow_vfta));
446 /* initialize the hw strip bitmap*/
447 memset(hwstrip, 0, sizeof(*hwstrip));
449 /* initialize PF if max_vfs not zero */
450 ret = ngbe_pf_host_init(eth_dev);
452 rte_free(eth_dev->data->mac_addrs);
453 eth_dev->data->mac_addrs = NULL;
454 rte_free(eth_dev->data->hash_mac_addrs);
455 eth_dev->data->hash_mac_addrs = NULL;
459 ctrl_ext = rd32(hw, NGBE_PORTCTL);
460 /* let hardware know driver is loaded */
461 ctrl_ext |= NGBE_PORTCTL_DRVLOAD;
462 /* Set PF Reset Done bit so PF/VF Mail Ops can work */
463 ctrl_ext |= NGBE_PORTCTL_RSTDONE;
464 wr32(hw, NGBE_PORTCTL, ctrl_ext);
467 PMD_INIT_LOG(DEBUG, "MAC: %d, PHY: %d",
468 (int)hw->mac.type, (int)hw->phy.type);
470 PMD_INIT_LOG(DEBUG, "port %d vendorID=0x%x deviceID=0x%x",
471 eth_dev->data->port_id, pci_dev->id.vendor_id,
472 pci_dev->id.device_id);
474 rte_intr_callback_register(intr_handle,
475 ngbe_dev_interrupt_handler, eth_dev);
477 /* enable uio/vfio intr/eventfd mapping */
478 rte_intr_enable(intr_handle);
480 /* enable support intr */
481 ngbe_enable_intr(eth_dev);
487 eth_ngbe_dev_uninit(struct rte_eth_dev *eth_dev)
489 PMD_INIT_FUNC_TRACE();
491 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
494 ngbe_dev_close(eth_dev);
500 eth_ngbe_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
501 struct rte_pci_device *pci_dev)
503 return rte_eth_dev_create(&pci_dev->device, pci_dev->device.name,
504 sizeof(struct ngbe_adapter),
505 eth_dev_pci_specific_init, pci_dev,
506 eth_ngbe_dev_init, NULL);
509 static int eth_ngbe_pci_remove(struct rte_pci_device *pci_dev)
511 struct rte_eth_dev *ethdev;
513 ethdev = rte_eth_dev_allocated(pci_dev->device.name);
517 return rte_eth_dev_destroy(ethdev, eth_ngbe_dev_uninit);
520 static struct rte_pci_driver rte_ngbe_pmd = {
521 .id_table = pci_id_ngbe_map,
522 .drv_flags = RTE_PCI_DRV_NEED_MAPPING |
523 RTE_PCI_DRV_INTR_LSC,
524 .probe = eth_ngbe_pci_probe,
525 .remove = eth_ngbe_pci_remove,
529 ngbe_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
531 struct ngbe_hw *hw = ngbe_dev_hw(dev);
532 struct ngbe_vfta *shadow_vfta = NGBE_DEV_VFTA(dev);
537 vid_idx = (uint32_t)((vlan_id >> 5) & 0x7F);
538 vid_bit = (uint32_t)(1 << (vlan_id & 0x1F));
539 vfta = rd32(hw, NGBE_VLANTBL(vid_idx));
544 wr32(hw, NGBE_VLANTBL(vid_idx), vfta);
546 /* update local VFTA copy */
547 shadow_vfta->vfta[vid_idx] = vfta;
553 ngbe_vlan_strip_queue_set(struct rte_eth_dev *dev, uint16_t queue, int on)
555 struct ngbe_hw *hw = ngbe_dev_hw(dev);
556 struct ngbe_rx_queue *rxq;
558 uint32_t rxcfg, rxbal, rxbah;
561 ngbe_vlan_hw_strip_enable(dev, queue);
563 ngbe_vlan_hw_strip_disable(dev, queue);
565 rxq = dev->data->rx_queues[queue];
566 rxbal = rd32(hw, NGBE_RXBAL(rxq->reg_idx));
567 rxbah = rd32(hw, NGBE_RXBAH(rxq->reg_idx));
568 rxcfg = rd32(hw, NGBE_RXCFG(rxq->reg_idx));
569 if (rxq->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP) {
570 restart = (rxcfg & NGBE_RXCFG_ENA) &&
571 !(rxcfg & NGBE_RXCFG_VLAN);
572 rxcfg |= NGBE_RXCFG_VLAN;
574 restart = (rxcfg & NGBE_RXCFG_ENA) &&
575 (rxcfg & NGBE_RXCFG_VLAN);
576 rxcfg &= ~NGBE_RXCFG_VLAN;
578 rxcfg &= ~NGBE_RXCFG_ENA;
581 /* set vlan strip for ring */
582 ngbe_dev_rx_queue_stop(dev, queue);
583 wr32(hw, NGBE_RXBAL(rxq->reg_idx), rxbal);
584 wr32(hw, NGBE_RXBAH(rxq->reg_idx), rxbah);
585 wr32(hw, NGBE_RXCFG(rxq->reg_idx), rxcfg);
586 ngbe_dev_rx_queue_start(dev, queue);
591 ngbe_vlan_tpid_set(struct rte_eth_dev *dev,
592 enum rte_vlan_type vlan_type,
595 struct ngbe_hw *hw = ngbe_dev_hw(dev);
597 uint32_t portctrl, vlan_ext, qinq;
599 portctrl = rd32(hw, NGBE_PORTCTL);
601 vlan_ext = (portctrl & NGBE_PORTCTL_VLANEXT);
602 qinq = vlan_ext && (portctrl & NGBE_PORTCTL_QINQ);
604 case RTE_ETH_VLAN_TYPE_INNER:
606 wr32m(hw, NGBE_VLANCTL,
607 NGBE_VLANCTL_TPID_MASK,
608 NGBE_VLANCTL_TPID(tpid));
609 wr32m(hw, NGBE_DMATXCTRL,
610 NGBE_DMATXCTRL_TPID_MASK,
611 NGBE_DMATXCTRL_TPID(tpid));
615 "Inner type is not supported by single VLAN");
619 wr32m(hw, NGBE_TAGTPID(0),
620 NGBE_TAGTPID_LSB_MASK,
621 NGBE_TAGTPID_LSB(tpid));
624 case RTE_ETH_VLAN_TYPE_OUTER:
626 /* Only the high 16-bits is valid */
627 wr32m(hw, NGBE_EXTAG,
628 NGBE_EXTAG_VLAN_MASK,
629 NGBE_EXTAG_VLAN(tpid));
631 wr32m(hw, NGBE_VLANCTL,
632 NGBE_VLANCTL_TPID_MASK,
633 NGBE_VLANCTL_TPID(tpid));
634 wr32m(hw, NGBE_DMATXCTRL,
635 NGBE_DMATXCTRL_TPID_MASK,
636 NGBE_DMATXCTRL_TPID(tpid));
640 wr32m(hw, NGBE_TAGTPID(0),
641 NGBE_TAGTPID_MSB_MASK,
642 NGBE_TAGTPID_MSB(tpid));
646 PMD_DRV_LOG(ERR, "Unsupported VLAN type %d", vlan_type);
654 ngbe_vlan_hw_filter_disable(struct rte_eth_dev *dev)
656 struct ngbe_hw *hw = ngbe_dev_hw(dev);
659 PMD_INIT_FUNC_TRACE();
661 /* Filter Table Disable */
662 vlnctrl = rd32(hw, NGBE_VLANCTL);
663 vlnctrl &= ~NGBE_VLANCTL_VFE;
664 wr32(hw, NGBE_VLANCTL, vlnctrl);
668 ngbe_vlan_hw_filter_enable(struct rte_eth_dev *dev)
670 struct ngbe_hw *hw = ngbe_dev_hw(dev);
671 struct ngbe_vfta *shadow_vfta = NGBE_DEV_VFTA(dev);
675 PMD_INIT_FUNC_TRACE();
677 /* Filter Table Enable */
678 vlnctrl = rd32(hw, NGBE_VLANCTL);
679 vlnctrl &= ~NGBE_VLANCTL_CFIENA;
680 vlnctrl |= NGBE_VLANCTL_VFE;
681 wr32(hw, NGBE_VLANCTL, vlnctrl);
683 /* write whatever is in local vfta copy */
684 for (i = 0; i < NGBE_VFTA_SIZE; i++)
685 wr32(hw, NGBE_VLANTBL(i), shadow_vfta->vfta[i]);
689 ngbe_vlan_hw_strip_bitmap_set(struct rte_eth_dev *dev, uint16_t queue, bool on)
691 struct ngbe_hwstrip *hwstrip = NGBE_DEV_HWSTRIP(dev);
692 struct ngbe_rx_queue *rxq;
694 if (queue >= NGBE_MAX_RX_QUEUE_NUM)
698 NGBE_SET_HWSTRIP(hwstrip, queue);
700 NGBE_CLEAR_HWSTRIP(hwstrip, queue);
702 if (queue >= dev->data->nb_rx_queues)
705 rxq = dev->data->rx_queues[queue];
708 rxq->vlan_flags = RTE_MBUF_F_RX_VLAN | RTE_MBUF_F_RX_VLAN_STRIPPED;
709 rxq->offloads |= RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
711 rxq->vlan_flags = RTE_MBUF_F_RX_VLAN;
712 rxq->offloads &= ~RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
717 ngbe_vlan_hw_strip_disable(struct rte_eth_dev *dev, uint16_t queue)
719 struct ngbe_hw *hw = ngbe_dev_hw(dev);
722 PMD_INIT_FUNC_TRACE();
724 ctrl = rd32(hw, NGBE_RXCFG(queue));
725 ctrl &= ~NGBE_RXCFG_VLAN;
726 wr32(hw, NGBE_RXCFG(queue), ctrl);
728 /* record those setting for HW strip per queue */
729 ngbe_vlan_hw_strip_bitmap_set(dev, queue, 0);
733 ngbe_vlan_hw_strip_enable(struct rte_eth_dev *dev, uint16_t queue)
735 struct ngbe_hw *hw = ngbe_dev_hw(dev);
738 PMD_INIT_FUNC_TRACE();
740 ctrl = rd32(hw, NGBE_RXCFG(queue));
741 ctrl |= NGBE_RXCFG_VLAN;
742 wr32(hw, NGBE_RXCFG(queue), ctrl);
744 /* record those setting for HW strip per queue */
745 ngbe_vlan_hw_strip_bitmap_set(dev, queue, 1);
749 ngbe_vlan_hw_extend_disable(struct rte_eth_dev *dev)
751 struct ngbe_hw *hw = ngbe_dev_hw(dev);
754 PMD_INIT_FUNC_TRACE();
756 ctrl = rd32(hw, NGBE_PORTCTL);
757 ctrl &= ~NGBE_PORTCTL_VLANEXT;
758 ctrl &= ~NGBE_PORTCTL_QINQ;
759 wr32(hw, NGBE_PORTCTL, ctrl);
763 ngbe_vlan_hw_extend_enable(struct rte_eth_dev *dev)
765 struct ngbe_hw *hw = ngbe_dev_hw(dev);
768 PMD_INIT_FUNC_TRACE();
770 ctrl = rd32(hw, NGBE_PORTCTL);
771 ctrl |= NGBE_PORTCTL_VLANEXT | NGBE_PORTCTL_QINQ;
772 wr32(hw, NGBE_PORTCTL, ctrl);
776 ngbe_qinq_hw_strip_disable(struct rte_eth_dev *dev)
778 struct ngbe_hw *hw = ngbe_dev_hw(dev);
781 PMD_INIT_FUNC_TRACE();
783 ctrl = rd32(hw, NGBE_PORTCTL);
784 ctrl &= ~NGBE_PORTCTL_QINQ;
785 wr32(hw, NGBE_PORTCTL, ctrl);
789 ngbe_qinq_hw_strip_enable(struct rte_eth_dev *dev)
791 struct ngbe_hw *hw = ngbe_dev_hw(dev);
794 PMD_INIT_FUNC_TRACE();
796 ctrl = rd32(hw, NGBE_PORTCTL);
797 ctrl |= NGBE_PORTCTL_QINQ | NGBE_PORTCTL_VLANEXT;
798 wr32(hw, NGBE_PORTCTL, ctrl);
802 ngbe_vlan_hw_strip_config(struct rte_eth_dev *dev)
804 struct ngbe_rx_queue *rxq;
807 PMD_INIT_FUNC_TRACE();
809 for (i = 0; i < dev->data->nb_rx_queues; i++) {
810 rxq = dev->data->rx_queues[i];
812 if (rxq->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
813 ngbe_vlan_hw_strip_enable(dev, i);
815 ngbe_vlan_hw_strip_disable(dev, i);
820 ngbe_config_vlan_strip_on_all_queues(struct rte_eth_dev *dev, int mask)
823 struct rte_eth_rxmode *rxmode;
824 struct ngbe_rx_queue *rxq;
826 if (mask & RTE_ETH_VLAN_STRIP_MASK) {
827 rxmode = &dev->data->dev_conf.rxmode;
828 if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
829 for (i = 0; i < dev->data->nb_rx_queues; i++) {
830 rxq = dev->data->rx_queues[i];
831 rxq->offloads |= RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
834 for (i = 0; i < dev->data->nb_rx_queues; i++) {
835 rxq = dev->data->rx_queues[i];
836 rxq->offloads &= ~RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
842 ngbe_vlan_offload_config(struct rte_eth_dev *dev, int mask)
844 struct rte_eth_rxmode *rxmode;
845 rxmode = &dev->data->dev_conf.rxmode;
847 if (mask & RTE_ETH_VLAN_STRIP_MASK)
848 ngbe_vlan_hw_strip_config(dev);
850 if (mask & RTE_ETH_VLAN_FILTER_MASK) {
851 if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER)
852 ngbe_vlan_hw_filter_enable(dev);
854 ngbe_vlan_hw_filter_disable(dev);
857 if (mask & RTE_ETH_VLAN_EXTEND_MASK) {
858 if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_EXTEND)
859 ngbe_vlan_hw_extend_enable(dev);
861 ngbe_vlan_hw_extend_disable(dev);
864 if (mask & RTE_ETH_QINQ_STRIP_MASK) {
865 if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_QINQ_STRIP)
866 ngbe_qinq_hw_strip_enable(dev);
868 ngbe_qinq_hw_strip_disable(dev);
875 ngbe_vlan_offload_set(struct rte_eth_dev *dev, int mask)
877 ngbe_config_vlan_strip_on_all_queues(dev, mask);
879 ngbe_vlan_offload_config(dev, mask);
885 ngbe_dev_configure(struct rte_eth_dev *dev)
887 struct ngbe_interrupt *intr = ngbe_dev_intr(dev);
888 struct ngbe_adapter *adapter = ngbe_dev_adapter(dev);
890 PMD_INIT_FUNC_TRACE();
892 if (dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG)
893 dev->data->dev_conf.rxmode.offloads |= RTE_ETH_RX_OFFLOAD_RSS_HASH;
895 /* set flag to update link status after init */
896 intr->flags |= NGBE_FLAG_NEED_LINK_UPDATE;
899 * Initialize to TRUE. If any of Rx queues doesn't meet the bulk
900 * allocation Rx preconditions we will reset it.
902 adapter->rx_bulk_alloc_allowed = true;
908 ngbe_dev_phy_intr_setup(struct rte_eth_dev *dev)
910 struct ngbe_hw *hw = ngbe_dev_hw(dev);
911 struct ngbe_interrupt *intr = ngbe_dev_intr(dev);
913 wr32(hw, NGBE_GPIODIR, NGBE_GPIODIR_DDR(1));
914 wr32(hw, NGBE_GPIOINTEN, NGBE_GPIOINTEN_INT(3));
915 wr32(hw, NGBE_GPIOINTTYPE, NGBE_GPIOINTTYPE_LEVEL(0));
916 if (hw->phy.type == ngbe_phy_yt8521s_sfi)
917 wr32(hw, NGBE_GPIOINTPOL, NGBE_GPIOINTPOL_ACT(0));
919 wr32(hw, NGBE_GPIOINTPOL, NGBE_GPIOINTPOL_ACT(3));
921 intr->mask_misc |= NGBE_ICRMISC_GPIO;
925 * Configure device link speed and setup link.
926 * It returns 0 on success.
929 ngbe_dev_start(struct rte_eth_dev *dev)
931 struct ngbe_hw *hw = ngbe_dev_hw(dev);
932 struct ngbe_hw_stats *hw_stats = NGBE_DEV_STATS(dev);
933 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
934 struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
935 uint32_t intr_vector = 0;
937 bool link_up = false, negotiate = false;
939 uint32_t allowed_speeds = 0;
942 uint32_t *link_speeds;
944 PMD_INIT_FUNC_TRACE();
946 /* disable uio/vfio intr/eventfd mapping */
947 rte_intr_disable(intr_handle);
950 hw->adapter_stopped = 0;
953 /* reinitialize adapter, this calls reset and start */
954 hw->nb_rx_queues = dev->data->nb_rx_queues;
955 hw->nb_tx_queues = dev->data->nb_tx_queues;
956 status = ngbe_pf_reset_hw(hw);
959 hw->mac.start_hw(hw);
960 hw->mac.get_link_status = true;
962 /* configure PF module if SRIOV enabled */
963 ngbe_pf_host_configure(dev);
965 ngbe_dev_phy_intr_setup(dev);
967 /* check and configure queue intr-vector mapping */
968 if ((rte_intr_cap_multiple(intr_handle) ||
969 !RTE_ETH_DEV_SRIOV(dev).active) &&
970 dev->data->dev_conf.intr_conf.rxq != 0) {
971 intr_vector = dev->data->nb_rx_queues;
972 if (rte_intr_efd_enable(intr_handle, intr_vector))
976 if (rte_intr_dp_is_en(intr_handle)) {
977 if (rte_intr_vec_list_alloc(intr_handle, "intr_vec",
978 dev->data->nb_rx_queues)) {
980 "Failed to allocate %d rx_queues intr_vec",
981 dev->data->nb_rx_queues);
986 /* configure MSI-X for sleep until Rx interrupt */
987 ngbe_configure_msix(dev);
989 /* initialize transmission unit */
990 ngbe_dev_tx_init(dev);
992 /* This can fail when allocating mbufs for descriptor rings */
993 err = ngbe_dev_rx_init(dev);
995 PMD_INIT_LOG(ERR, "Unable to initialize Rx hardware");
999 mask = RTE_ETH_VLAN_STRIP_MASK | RTE_ETH_VLAN_FILTER_MASK |
1000 RTE_ETH_VLAN_EXTEND_MASK;
1001 err = ngbe_vlan_offload_config(dev, mask);
1003 PMD_INIT_LOG(ERR, "Unable to set VLAN offload");
1007 hw->mac.setup_pba(hw);
1008 ngbe_configure_port(dev);
1010 err = ngbe_dev_rxtx_start(dev);
1012 PMD_INIT_LOG(ERR, "Unable to start rxtx queues");
1016 /* Skip link setup if loopback mode is enabled. */
1017 if (hw->is_pf && dev->data->dev_conf.lpbk_mode)
1018 goto skip_link_setup;
1020 err = hw->mac.check_link(hw, &speed, &link_up, 0);
1023 dev->data->dev_link.link_status = link_up;
1025 link_speeds = &dev->data->dev_conf.link_speeds;
1026 if (*link_speeds == RTE_ETH_LINK_SPEED_AUTONEG)
1029 err = hw->mac.get_link_capabilities(hw, &speed, &negotiate);
1034 if (hw->mac.default_speeds & NGBE_LINK_SPEED_1GB_FULL)
1035 allowed_speeds |= RTE_ETH_LINK_SPEED_1G;
1036 if (hw->mac.default_speeds & NGBE_LINK_SPEED_100M_FULL)
1037 allowed_speeds |= RTE_ETH_LINK_SPEED_100M;
1038 if (hw->mac.default_speeds & NGBE_LINK_SPEED_10M_FULL)
1039 allowed_speeds |= RTE_ETH_LINK_SPEED_10M;
1041 if (*link_speeds & ~allowed_speeds) {
1042 PMD_INIT_LOG(ERR, "Invalid link setting");
1047 if (*link_speeds == RTE_ETH_LINK_SPEED_AUTONEG) {
1048 speed = hw->mac.default_speeds;
1050 if (*link_speeds & RTE_ETH_LINK_SPEED_1G)
1051 speed |= NGBE_LINK_SPEED_1GB_FULL;
1052 if (*link_speeds & RTE_ETH_LINK_SPEED_100M)
1053 speed |= NGBE_LINK_SPEED_100M_FULL;
1054 if (*link_speeds & RTE_ETH_LINK_SPEED_10M)
1055 speed |= NGBE_LINK_SPEED_10M_FULL;
1058 hw->phy.init_hw(hw);
1059 err = hw->mac.setup_link(hw, speed, link_up);
1065 if (rte_intr_allow_others(intr_handle)) {
1066 ngbe_dev_misc_interrupt_setup(dev);
1067 /* check if lsc interrupt is enabled */
1068 if (dev->data->dev_conf.intr_conf.lsc != 0)
1069 ngbe_dev_lsc_interrupt_setup(dev, TRUE);
1071 ngbe_dev_lsc_interrupt_setup(dev, FALSE);
1072 ngbe_dev_macsec_interrupt_setup(dev);
1073 ngbe_set_ivar_map(hw, -1, 1, NGBE_MISC_VEC_ID);
1075 rte_intr_callback_unregister(intr_handle,
1076 ngbe_dev_interrupt_handler, dev);
1077 if (dev->data->dev_conf.intr_conf.lsc != 0)
1079 "LSC won't enable because of no intr multiplex");
1082 /* check if rxq interrupt is enabled */
1083 if (dev->data->dev_conf.intr_conf.rxq != 0 &&
1084 rte_intr_dp_is_en(intr_handle))
1085 ngbe_dev_rxq_interrupt_setup(dev);
1087 /* enable UIO/VFIO intr/eventfd mapping */
1088 rte_intr_enable(intr_handle);
1090 /* resume enabled intr since HW reset */
1091 ngbe_enable_intr(dev);
1093 if ((hw->sub_system_id & NGBE_OEM_MASK) == NGBE_LY_M88E1512_SFP ||
1094 (hw->sub_system_id & NGBE_OEM_MASK) == NGBE_LY_YT8521S_SFP) {
1095 /* gpio0 is used to power on/off control*/
1096 wr32(hw, NGBE_GPIODATA, 0);
1100 * Update link status right before return, because it may
1101 * start link configuration process in a separate thread.
1103 ngbe_dev_link_update(dev, 0);
1105 ngbe_read_stats_registers(hw, hw_stats);
1106 hw->offset_loaded = 1;
1111 PMD_INIT_LOG(ERR, "failure in dev start: %d", err);
1112 ngbe_dev_clear_queues(dev);
1117 * Stop device: disable rx and tx functions to allow for reconfiguring.
1120 ngbe_dev_stop(struct rte_eth_dev *dev)
1122 struct rte_eth_link link;
1123 struct ngbe_adapter *adapter = ngbe_dev_adapter(dev);
1124 struct ngbe_hw *hw = ngbe_dev_hw(dev);
1125 struct ngbe_vf_info *vfinfo = *NGBE_DEV_VFDATA(dev);
1126 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1127 struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
1130 if (hw->adapter_stopped)
1133 PMD_INIT_FUNC_TRACE();
1135 if ((hw->sub_system_id & NGBE_OEM_MASK) == NGBE_LY_M88E1512_SFP ||
1136 (hw->sub_system_id & NGBE_OEM_MASK) == NGBE_LY_YT8521S_SFP) {
1137 /* gpio0 is used to power on/off control*/
1138 wr32(hw, NGBE_GPIODATA, NGBE_GPIOBIT_0);
1141 /* disable interrupts */
1142 ngbe_disable_intr(hw);
1145 ngbe_pf_reset_hw(hw);
1146 hw->adapter_stopped = 0;
1151 for (vf = 0; vfinfo != NULL && vf < pci_dev->max_vfs; vf++)
1152 vfinfo[vf].clear_to_send = false;
1154 ngbe_dev_clear_queues(dev);
1156 /* Clear stored conf */
1157 dev->data->scattered_rx = 0;
1159 /* Clear recorded link status */
1160 memset(&link, 0, sizeof(link));
1161 rte_eth_linkstatus_set(dev, &link);
1163 if (!rte_intr_allow_others(intr_handle))
1164 /* resume to the default handler */
1165 rte_intr_callback_register(intr_handle,
1166 ngbe_dev_interrupt_handler,
1169 /* Clean datapath event and queue/vec mapping */
1170 rte_intr_efd_disable(intr_handle);
1171 rte_intr_vec_list_free(intr_handle);
1173 adapter->rss_reta_updated = 0;
1175 hw->adapter_stopped = true;
1176 dev->data->dev_started = 0;
1182 * Reset and stop device.
1185 ngbe_dev_close(struct rte_eth_dev *dev)
1187 struct ngbe_hw *hw = ngbe_dev_hw(dev);
1188 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1189 struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
1193 PMD_INIT_FUNC_TRACE();
1195 ngbe_pf_reset_hw(hw);
1199 ngbe_dev_free_queues(dev);
1201 /* reprogram the RAR[0] in case user changed it. */
1202 ngbe_set_rar(hw, 0, hw->mac.addr, 0, true);
1204 /* Unlock any pending hardware semaphore */
1205 ngbe_swfw_lock_reset(hw);
1207 /* disable uio intr before callback unregister */
1208 rte_intr_disable(intr_handle);
1211 ret = rte_intr_callback_unregister(intr_handle,
1212 ngbe_dev_interrupt_handler, dev);
1213 if (ret >= 0 || ret == -ENOENT) {
1215 } else if (ret != -EAGAIN) {
1217 "intr callback unregister failed: %d",
1221 } while (retries++ < (10 + NGBE_LINK_UP_TIME));
1223 /* uninitialize PF if max_vfs not zero */
1224 ngbe_pf_host_uninit(dev);
1226 rte_free(dev->data->mac_addrs);
1227 dev->data->mac_addrs = NULL;
1229 rte_free(dev->data->hash_mac_addrs);
1230 dev->data->hash_mac_addrs = NULL;
1239 ngbe_dev_reset(struct rte_eth_dev *dev)
1243 /* When a DPDK PMD PF begin to reset PF port, it should notify all
1244 * its VF to make them align with it. The detailed notification
1245 * mechanism is PMD specific. As to ngbe PF, it is rather complex.
1246 * To avoid unexpected behavior in VF, currently reset of PF with
1247 * SR-IOV activation is not supported. It might be supported later.
1249 if (dev->data->sriov.active)
1252 ret = eth_ngbe_dev_uninit(dev);
1256 ret = eth_ngbe_dev_init(dev, NULL);
1261 #define UPDATE_QP_COUNTER_32bit(reg, last_counter, counter) \
1263 uint32_t current_counter = rd32(hw, reg); \
1264 if (current_counter < last_counter) \
1265 current_counter += 0x100000000LL; \
1266 if (!hw->offset_loaded) \
1267 last_counter = current_counter; \
1268 counter = current_counter - last_counter; \
1269 counter &= 0xFFFFFFFFLL; \
1272 #define UPDATE_QP_COUNTER_36bit(reg_lsb, reg_msb, last_counter, counter) \
1274 uint64_t current_counter_lsb = rd32(hw, reg_lsb); \
1275 uint64_t current_counter_msb = rd32(hw, reg_msb); \
1276 uint64_t current_counter = (current_counter_msb << 32) | \
1277 current_counter_lsb; \
1278 if (current_counter < last_counter) \
1279 current_counter += 0x1000000000LL; \
1280 if (!hw->offset_loaded) \
1281 last_counter = current_counter; \
1282 counter = current_counter - last_counter; \
1283 counter &= 0xFFFFFFFFFLL; \
1287 ngbe_read_stats_registers(struct ngbe_hw *hw,
1288 struct ngbe_hw_stats *hw_stats)
1293 for (i = 0; i < hw->nb_rx_queues; i++) {
1294 UPDATE_QP_COUNTER_32bit(NGBE_QPRXPKT(i),
1295 hw->qp_last[i].rx_qp_packets,
1296 hw_stats->qp[i].rx_qp_packets);
1297 UPDATE_QP_COUNTER_36bit(NGBE_QPRXOCTL(i), NGBE_QPRXOCTH(i),
1298 hw->qp_last[i].rx_qp_bytes,
1299 hw_stats->qp[i].rx_qp_bytes);
1300 UPDATE_QP_COUNTER_32bit(NGBE_QPRXMPKT(i),
1301 hw->qp_last[i].rx_qp_mc_packets,
1302 hw_stats->qp[i].rx_qp_mc_packets);
1303 UPDATE_QP_COUNTER_32bit(NGBE_QPRXBPKT(i),
1304 hw->qp_last[i].rx_qp_bc_packets,
1305 hw_stats->qp[i].rx_qp_bc_packets);
1308 for (i = 0; i < hw->nb_tx_queues; i++) {
1309 UPDATE_QP_COUNTER_32bit(NGBE_QPTXPKT(i),
1310 hw->qp_last[i].tx_qp_packets,
1311 hw_stats->qp[i].tx_qp_packets);
1312 UPDATE_QP_COUNTER_36bit(NGBE_QPTXOCTL(i), NGBE_QPTXOCTH(i),
1313 hw->qp_last[i].tx_qp_bytes,
1314 hw_stats->qp[i].tx_qp_bytes);
1315 UPDATE_QP_COUNTER_32bit(NGBE_QPTXMPKT(i),
1316 hw->qp_last[i].tx_qp_mc_packets,
1317 hw_stats->qp[i].tx_qp_mc_packets);
1318 UPDATE_QP_COUNTER_32bit(NGBE_QPTXBPKT(i),
1319 hw->qp_last[i].tx_qp_bc_packets,
1320 hw_stats->qp[i].tx_qp_bc_packets);
1324 hw_stats->rx_up_dropped += rd32(hw, NGBE_PBRXMISS);
1325 hw_stats->rdb_pkt_cnt += rd32(hw, NGBE_PBRXPKT);
1326 hw_stats->rdb_repli_cnt += rd32(hw, NGBE_PBRXREP);
1327 hw_stats->rdb_drp_cnt += rd32(hw, NGBE_PBRXDROP);
1328 hw_stats->tx_xoff_packets += rd32(hw, NGBE_PBTXLNKXOFF);
1329 hw_stats->tx_xon_packets += rd32(hw, NGBE_PBTXLNKXON);
1331 hw_stats->rx_xon_packets += rd32(hw, NGBE_PBRXLNKXON);
1332 hw_stats->rx_xoff_packets += rd32(hw, NGBE_PBRXLNKXOFF);
1335 hw_stats->rx_drop_packets += rd32(hw, NGBE_DMARXDROP);
1336 hw_stats->tx_drop_packets += rd32(hw, NGBE_DMATXDROP);
1337 hw_stats->rx_dma_drop += rd32(hw, NGBE_DMARXDROP);
1338 hw_stats->tx_secdrp_packets += rd32(hw, NGBE_DMATXSECDROP);
1339 hw_stats->rx_packets += rd32(hw, NGBE_DMARXPKT);
1340 hw_stats->tx_packets += rd32(hw, NGBE_DMATXPKT);
1341 hw_stats->rx_bytes += rd64(hw, NGBE_DMARXOCTL);
1342 hw_stats->tx_bytes += rd64(hw, NGBE_DMATXOCTL);
1345 hw_stats->rx_crc_errors += rd64(hw, NGBE_MACRXERRCRCL);
1346 hw_stats->rx_multicast_packets += rd64(hw, NGBE_MACRXMPKTL);
1347 hw_stats->tx_multicast_packets += rd64(hw, NGBE_MACTXMPKTL);
1349 hw_stats->rx_total_packets += rd64(hw, NGBE_MACRXPKTL);
1350 hw_stats->tx_total_packets += rd64(hw, NGBE_MACTXPKTL);
1351 hw_stats->rx_total_bytes += rd64(hw, NGBE_MACRXGBOCTL);
1353 hw_stats->rx_broadcast_packets += rd64(hw, NGBE_MACRXOCTL);
1354 hw_stats->tx_broadcast_packets += rd32(hw, NGBE_MACTXOCTL);
1356 hw_stats->rx_size_64_packets += rd64(hw, NGBE_MACRX1TO64L);
1357 hw_stats->rx_size_65_to_127_packets += rd64(hw, NGBE_MACRX65TO127L);
1358 hw_stats->rx_size_128_to_255_packets += rd64(hw, NGBE_MACRX128TO255L);
1359 hw_stats->rx_size_256_to_511_packets += rd64(hw, NGBE_MACRX256TO511L);
1360 hw_stats->rx_size_512_to_1023_packets +=
1361 rd64(hw, NGBE_MACRX512TO1023L);
1362 hw_stats->rx_size_1024_to_max_packets +=
1363 rd64(hw, NGBE_MACRX1024TOMAXL);
1364 hw_stats->tx_size_64_packets += rd64(hw, NGBE_MACTX1TO64L);
1365 hw_stats->tx_size_65_to_127_packets += rd64(hw, NGBE_MACTX65TO127L);
1366 hw_stats->tx_size_128_to_255_packets += rd64(hw, NGBE_MACTX128TO255L);
1367 hw_stats->tx_size_256_to_511_packets += rd64(hw, NGBE_MACTX256TO511L);
1368 hw_stats->tx_size_512_to_1023_packets +=
1369 rd64(hw, NGBE_MACTX512TO1023L);
1370 hw_stats->tx_size_1024_to_max_packets +=
1371 rd64(hw, NGBE_MACTX1024TOMAXL);
1373 hw_stats->rx_undersize_errors += rd64(hw, NGBE_MACRXERRLENL);
1374 hw_stats->rx_oversize_errors += rd32(hw, NGBE_MACRXOVERSIZE);
1375 hw_stats->rx_jabber_errors += rd32(hw, NGBE_MACRXJABBER);
1378 hw_stats->mng_bmc2host_packets = rd32(hw, NGBE_MNGBMC2OS);
1379 hw_stats->mng_host2bmc_packets = rd32(hw, NGBE_MNGOS2BMC);
1380 hw_stats->rx_management_packets = rd32(hw, NGBE_DMARXMNG);
1381 hw_stats->tx_management_packets = rd32(hw, NGBE_DMATXMNG);
1384 hw_stats->tx_macsec_pkts_untagged += rd32(hw, NGBE_LSECTX_UTPKT);
1385 hw_stats->tx_macsec_pkts_encrypted +=
1386 rd32(hw, NGBE_LSECTX_ENCPKT);
1387 hw_stats->tx_macsec_pkts_protected +=
1388 rd32(hw, NGBE_LSECTX_PROTPKT);
1389 hw_stats->tx_macsec_octets_encrypted +=
1390 rd32(hw, NGBE_LSECTX_ENCOCT);
1391 hw_stats->tx_macsec_octets_protected +=
1392 rd32(hw, NGBE_LSECTX_PROTOCT);
1393 hw_stats->rx_macsec_pkts_untagged += rd32(hw, NGBE_LSECRX_UTPKT);
1394 hw_stats->rx_macsec_pkts_badtag += rd32(hw, NGBE_LSECRX_BTPKT);
1395 hw_stats->rx_macsec_pkts_nosci += rd32(hw, NGBE_LSECRX_NOSCIPKT);
1396 hw_stats->rx_macsec_pkts_unknownsci += rd32(hw, NGBE_LSECRX_UNSCIPKT);
1397 hw_stats->rx_macsec_octets_decrypted += rd32(hw, NGBE_LSECRX_DECOCT);
1398 hw_stats->rx_macsec_octets_validated += rd32(hw, NGBE_LSECRX_VLDOCT);
1399 hw_stats->rx_macsec_sc_pkts_unchecked +=
1400 rd32(hw, NGBE_LSECRX_UNCHKPKT);
1401 hw_stats->rx_macsec_sc_pkts_delayed += rd32(hw, NGBE_LSECRX_DLYPKT);
1402 hw_stats->rx_macsec_sc_pkts_late += rd32(hw, NGBE_LSECRX_LATEPKT);
1403 for (i = 0; i < 2; i++) {
1404 hw_stats->rx_macsec_sa_pkts_ok +=
1405 rd32(hw, NGBE_LSECRX_OKPKT(i));
1406 hw_stats->rx_macsec_sa_pkts_invalid +=
1407 rd32(hw, NGBE_LSECRX_INVPKT(i));
1408 hw_stats->rx_macsec_sa_pkts_notvalid +=
1409 rd32(hw, NGBE_LSECRX_BADPKT(i));
1411 for (i = 0; i < 4; i++) {
1412 hw_stats->rx_macsec_sa_pkts_unusedsa +=
1413 rd32(hw, NGBE_LSECRX_INVSAPKT(i));
1414 hw_stats->rx_macsec_sa_pkts_notusingsa +=
1415 rd32(hw, NGBE_LSECRX_BADSAPKT(i));
1417 hw_stats->rx_total_missed_packets =
1418 hw_stats->rx_up_dropped;
1422 ngbe_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
1424 struct ngbe_hw *hw = ngbe_dev_hw(dev);
1425 struct ngbe_hw_stats *hw_stats = NGBE_DEV_STATS(dev);
1426 struct ngbe_stat_mappings *stat_mappings =
1427 NGBE_DEV_STAT_MAPPINGS(dev);
1430 ngbe_read_stats_registers(hw, hw_stats);
1435 /* Fill out the rte_eth_stats statistics structure */
1436 stats->ipackets = hw_stats->rx_packets;
1437 stats->ibytes = hw_stats->rx_bytes;
1438 stats->opackets = hw_stats->tx_packets;
1439 stats->obytes = hw_stats->tx_bytes;
1441 memset(&stats->q_ipackets, 0, sizeof(stats->q_ipackets));
1442 memset(&stats->q_opackets, 0, sizeof(stats->q_opackets));
1443 memset(&stats->q_ibytes, 0, sizeof(stats->q_ibytes));
1444 memset(&stats->q_obytes, 0, sizeof(stats->q_obytes));
1445 memset(&stats->q_errors, 0, sizeof(stats->q_errors));
1446 for (i = 0; i < NGBE_MAX_QP; i++) {
1447 uint32_t n = i / NB_QMAP_FIELDS_PER_QSM_REG;
1448 uint32_t offset = (i % NB_QMAP_FIELDS_PER_QSM_REG) * 8;
1451 q_map = (stat_mappings->rqsm[n] >> offset)
1452 & QMAP_FIELD_RESERVED_BITS_MASK;
1453 j = (q_map < RTE_ETHDEV_QUEUE_STAT_CNTRS
1454 ? q_map : q_map % RTE_ETHDEV_QUEUE_STAT_CNTRS);
1455 stats->q_ipackets[j] += hw_stats->qp[i].rx_qp_packets;
1456 stats->q_ibytes[j] += hw_stats->qp[i].rx_qp_bytes;
1458 q_map = (stat_mappings->tqsm[n] >> offset)
1459 & QMAP_FIELD_RESERVED_BITS_MASK;
1460 j = (q_map < RTE_ETHDEV_QUEUE_STAT_CNTRS
1461 ? q_map : q_map % RTE_ETHDEV_QUEUE_STAT_CNTRS);
1462 stats->q_opackets[j] += hw_stats->qp[i].tx_qp_packets;
1463 stats->q_obytes[j] += hw_stats->qp[i].tx_qp_bytes;
1467 stats->imissed = hw_stats->rx_total_missed_packets +
1468 hw_stats->rx_dma_drop;
1469 stats->ierrors = hw_stats->rx_crc_errors +
1470 hw_stats->rx_mac_short_packet_dropped +
1471 hw_stats->rx_length_errors +
1472 hw_stats->rx_undersize_errors +
1473 hw_stats->rx_oversize_errors +
1474 hw_stats->rx_illegal_byte_errors +
1475 hw_stats->rx_error_bytes +
1476 hw_stats->rx_fragment_errors;
1484 ngbe_dev_stats_reset(struct rte_eth_dev *dev)
1486 struct ngbe_hw *hw = ngbe_dev_hw(dev);
1487 struct ngbe_hw_stats *hw_stats = NGBE_DEV_STATS(dev);
1489 /* HW registers are cleared on read */
1490 hw->offset_loaded = 0;
1491 ngbe_dev_stats_get(dev, NULL);
1492 hw->offset_loaded = 1;
1494 /* Reset software totals */
1495 memset(hw_stats, 0, sizeof(*hw_stats));
1500 /* This function calculates the number of xstats based on the current config */
1502 ngbe_xstats_calc_num(struct rte_eth_dev *dev)
1504 int nb_queues = max(dev->data->nb_rx_queues, dev->data->nb_tx_queues);
1505 return NGBE_NB_HW_STATS +
1506 NGBE_NB_QP_STATS * nb_queues;
1510 ngbe_get_name_by_id(uint32_t id, char *name, uint32_t size)
1514 /* Extended stats from ngbe_hw_stats */
1515 if (id < NGBE_NB_HW_STATS) {
1516 snprintf(name, size, "[hw]%s",
1517 rte_ngbe_stats_strings[id].name);
1520 id -= NGBE_NB_HW_STATS;
1523 if (id < NGBE_NB_QP_STATS * NGBE_MAX_QP) {
1524 nb = id / NGBE_NB_QP_STATS;
1525 st = id % NGBE_NB_QP_STATS;
1526 snprintf(name, size, "[q%u]%s", nb,
1527 rte_ngbe_qp_strings[st].name);
1530 id -= NGBE_NB_QP_STATS * NGBE_MAX_QP;
1532 return -(int)(id + 1);
1536 ngbe_get_offset_by_id(uint32_t id, uint32_t *offset)
1540 /* Extended stats from ngbe_hw_stats */
1541 if (id < NGBE_NB_HW_STATS) {
1542 *offset = rte_ngbe_stats_strings[id].offset;
1545 id -= NGBE_NB_HW_STATS;
1548 if (id < NGBE_NB_QP_STATS * NGBE_MAX_QP) {
1549 nb = id / NGBE_NB_QP_STATS;
1550 st = id % NGBE_NB_QP_STATS;
1551 *offset = rte_ngbe_qp_strings[st].offset +
1552 nb * (NGBE_NB_QP_STATS * sizeof(uint64_t));
1559 static int ngbe_dev_xstats_get_names(struct rte_eth_dev *dev,
1560 struct rte_eth_xstat_name *xstats_names, unsigned int limit)
1562 unsigned int i, count;
1564 count = ngbe_xstats_calc_num(dev);
1565 if (xstats_names == NULL)
1568 /* Note: limit >= cnt_stats checked upstream
1569 * in rte_eth_xstats_names()
1571 limit = min(limit, count);
1573 /* Extended stats from ngbe_hw_stats */
1574 for (i = 0; i < limit; i++) {
1575 if (ngbe_get_name_by_id(i, xstats_names[i].name,
1576 sizeof(xstats_names[i].name))) {
1577 PMD_INIT_LOG(WARNING, "id value %d isn't valid", i);
1585 static int ngbe_dev_xstats_get_names_by_id(struct rte_eth_dev *dev,
1586 const uint64_t *ids,
1587 struct rte_eth_xstat_name *xstats_names,
1593 return ngbe_dev_xstats_get_names(dev, xstats_names, limit);
1595 for (i = 0; i < limit; i++) {
1596 if (ngbe_get_name_by_id(ids[i], xstats_names[i].name,
1597 sizeof(xstats_names[i].name))) {
1598 PMD_INIT_LOG(WARNING, "id value %d isn't valid", i);
1607 ngbe_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
1610 struct ngbe_hw *hw = ngbe_dev_hw(dev);
1611 struct ngbe_hw_stats *hw_stats = NGBE_DEV_STATS(dev);
1612 unsigned int i, count;
1614 ngbe_read_stats_registers(hw, hw_stats);
1616 /* If this is a reset xstats is NULL, and we have cleared the
1617 * registers by reading them.
1619 count = ngbe_xstats_calc_num(dev);
1623 limit = min(limit, ngbe_xstats_calc_num(dev));
1625 /* Extended stats from ngbe_hw_stats */
1626 for (i = 0; i < limit; i++) {
1627 uint32_t offset = 0;
1629 if (ngbe_get_offset_by_id(i, &offset)) {
1630 PMD_INIT_LOG(WARNING, "id value %d isn't valid", i);
1633 xstats[i].value = *(uint64_t *)(((char *)hw_stats) + offset);
1641 ngbe_dev_xstats_get_(struct rte_eth_dev *dev, uint64_t *values,
1644 struct ngbe_hw *hw = ngbe_dev_hw(dev);
1645 struct ngbe_hw_stats *hw_stats = NGBE_DEV_STATS(dev);
1646 unsigned int i, count;
1648 ngbe_read_stats_registers(hw, hw_stats);
1650 /* If this is a reset xstats is NULL, and we have cleared the
1651 * registers by reading them.
1653 count = ngbe_xstats_calc_num(dev);
1657 limit = min(limit, ngbe_xstats_calc_num(dev));
1659 /* Extended stats from ngbe_hw_stats */
1660 for (i = 0; i < limit; i++) {
1663 if (ngbe_get_offset_by_id(i, &offset)) {
1664 PMD_INIT_LOG(WARNING, "id value %d isn't valid", i);
1667 values[i] = *(uint64_t *)(((char *)hw_stats) + offset);
1674 ngbe_dev_xstats_get_by_id(struct rte_eth_dev *dev, const uint64_t *ids,
1675 uint64_t *values, unsigned int limit)
1677 struct ngbe_hw_stats *hw_stats = NGBE_DEV_STATS(dev);
1681 return ngbe_dev_xstats_get_(dev, values, limit);
1683 for (i = 0; i < limit; i++) {
1686 if (ngbe_get_offset_by_id(ids[i], &offset)) {
1687 PMD_INIT_LOG(WARNING, "id value %d isn't valid", i);
1690 values[i] = *(uint64_t *)(((char *)hw_stats) + offset);
1697 ngbe_dev_xstats_reset(struct rte_eth_dev *dev)
1699 struct ngbe_hw *hw = ngbe_dev_hw(dev);
1700 struct ngbe_hw_stats *hw_stats = NGBE_DEV_STATS(dev);
1702 /* HW registers are cleared on read */
1703 hw->offset_loaded = 0;
1704 ngbe_read_stats_registers(hw, hw_stats);
1705 hw->offset_loaded = 1;
1707 /* Reset software totals */
1708 memset(hw_stats, 0, sizeof(*hw_stats));
1714 ngbe_fw_version_get(struct rte_eth_dev *dev, char *fw_version, size_t fw_size)
1716 struct ngbe_hw *hw = ngbe_dev_hw(dev);
1719 ret = snprintf(fw_version, fw_size, "0x%08x", hw->eeprom_id);
1724 ret += 1; /* add the size of '\0' */
1725 if (fw_size < (size_t)ret)
1732 ngbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
1734 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1735 struct ngbe_hw *hw = ngbe_dev_hw(dev);
1737 dev_info->max_rx_queues = (uint16_t)hw->mac.max_rx_queues;
1738 dev_info->max_tx_queues = (uint16_t)hw->mac.max_tx_queues;
1739 dev_info->min_rx_bufsize = 1024;
1740 dev_info->max_rx_pktlen = 15872;
1741 dev_info->max_mac_addrs = hw->mac.num_rar_entries;
1742 dev_info->max_hash_mac_addrs = NGBE_VMDQ_NUM_UC_MAC;
1743 dev_info->max_vfs = pci_dev->max_vfs;
1744 dev_info->rx_queue_offload_capa = ngbe_get_rx_queue_offloads(dev);
1745 dev_info->rx_offload_capa = (ngbe_get_rx_port_offloads(dev) |
1746 dev_info->rx_queue_offload_capa);
1747 dev_info->tx_queue_offload_capa = 0;
1748 dev_info->tx_offload_capa = ngbe_get_tx_port_offloads(dev);
1750 dev_info->default_rxconf = (struct rte_eth_rxconf) {
1752 .pthresh = NGBE_DEFAULT_RX_PTHRESH,
1753 .hthresh = NGBE_DEFAULT_RX_HTHRESH,
1754 .wthresh = NGBE_DEFAULT_RX_WTHRESH,
1756 .rx_free_thresh = NGBE_DEFAULT_RX_FREE_THRESH,
1761 dev_info->default_txconf = (struct rte_eth_txconf) {
1763 .pthresh = NGBE_DEFAULT_TX_PTHRESH,
1764 .hthresh = NGBE_DEFAULT_TX_HTHRESH,
1765 .wthresh = NGBE_DEFAULT_TX_WTHRESH,
1767 .tx_free_thresh = NGBE_DEFAULT_TX_FREE_THRESH,
1771 dev_info->rx_desc_lim = rx_desc_lim;
1772 dev_info->tx_desc_lim = tx_desc_lim;
1774 dev_info->hash_key_size = NGBE_HKEY_MAX_INDEX * sizeof(uint32_t);
1775 dev_info->reta_size = RTE_ETH_RSS_RETA_SIZE_128;
1776 dev_info->flow_type_rss_offloads = NGBE_RSS_OFFLOAD_ALL;
1778 dev_info->speed_capa = RTE_ETH_LINK_SPEED_1G | RTE_ETH_LINK_SPEED_100M |
1779 RTE_ETH_LINK_SPEED_10M;
1781 /* Driver-preferred Rx/Tx parameters */
1782 dev_info->default_rxportconf.burst_size = 32;
1783 dev_info->default_txportconf.burst_size = 32;
1784 dev_info->default_rxportconf.nb_queues = 1;
1785 dev_info->default_txportconf.nb_queues = 1;
1786 dev_info->default_rxportconf.ring_size = 256;
1787 dev_info->default_txportconf.ring_size = 256;
1793 ngbe_dev_supported_ptypes_get(struct rte_eth_dev *dev)
1795 if (dev->rx_pkt_burst == ngbe_recv_pkts ||
1796 dev->rx_pkt_burst == ngbe_recv_pkts_sc_single_alloc ||
1797 dev->rx_pkt_burst == ngbe_recv_pkts_sc_bulk_alloc ||
1798 dev->rx_pkt_burst == ngbe_recv_pkts_bulk_alloc)
1799 return ngbe_get_supported_ptypes();
1804 /* return 0 means link status changed, -1 means not changed */
1806 ngbe_dev_link_update_share(struct rte_eth_dev *dev,
1807 int wait_to_complete)
1809 struct ngbe_hw *hw = ngbe_dev_hw(dev);
1810 struct rte_eth_link link;
1811 u32 link_speed = NGBE_LINK_SPEED_UNKNOWN;
1813 struct ngbe_interrupt *intr = ngbe_dev_intr(dev);
1818 memset(&link, 0, sizeof(link));
1819 link.link_status = RTE_ETH_LINK_DOWN;
1820 link.link_speed = RTE_ETH_SPEED_NUM_NONE;
1821 link.link_duplex = RTE_ETH_LINK_HALF_DUPLEX;
1822 link.link_autoneg = !(dev->data->dev_conf.link_speeds &
1823 ~RTE_ETH_LINK_SPEED_AUTONEG);
1825 hw->mac.get_link_status = true;
1827 if (intr->flags & NGBE_FLAG_NEED_LINK_CONFIG)
1828 return rte_eth_linkstatus_set(dev, &link);
1830 /* check if it needs to wait to complete, if lsc interrupt is enabled */
1831 if (wait_to_complete == 0 || dev->data->dev_conf.intr_conf.lsc != 0)
1834 err = hw->mac.check_link(hw, &link_speed, &link_up, wait);
1836 link.link_speed = RTE_ETH_SPEED_NUM_NONE;
1837 link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
1838 return rte_eth_linkstatus_set(dev, &link);
1842 return rte_eth_linkstatus_set(dev, &link);
1844 intr->flags &= ~NGBE_FLAG_NEED_LINK_CONFIG;
1845 link.link_status = RTE_ETH_LINK_UP;
1846 link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
1848 switch (link_speed) {
1850 case NGBE_LINK_SPEED_UNKNOWN:
1851 link.link_speed = RTE_ETH_SPEED_NUM_NONE;
1854 case NGBE_LINK_SPEED_10M_FULL:
1855 link.link_speed = RTE_ETH_SPEED_NUM_10M;
1859 case NGBE_LINK_SPEED_100M_FULL:
1860 link.link_speed = RTE_ETH_SPEED_NUM_100M;
1864 case NGBE_LINK_SPEED_1GB_FULL:
1865 link.link_speed = RTE_ETH_SPEED_NUM_1G;
1871 wr32m(hw, NGBE_LAN_SPEED, NGBE_LAN_SPEED_MASK, lan_speed);
1872 if (link_speed & (NGBE_LINK_SPEED_1GB_FULL |
1873 NGBE_LINK_SPEED_100M_FULL |
1874 NGBE_LINK_SPEED_10M_FULL)) {
1875 wr32m(hw, NGBE_MACTXCFG, NGBE_MACTXCFG_SPEED_MASK,
1876 NGBE_MACTXCFG_SPEED_1G | NGBE_MACTXCFG_TE);
1880 return rte_eth_linkstatus_set(dev, &link);
1884 ngbe_dev_link_update(struct rte_eth_dev *dev, int wait_to_complete)
1886 return ngbe_dev_link_update_share(dev, wait_to_complete);
1890 ngbe_dev_promiscuous_enable(struct rte_eth_dev *dev)
1892 struct ngbe_hw *hw = ngbe_dev_hw(dev);
1895 fctrl = rd32(hw, NGBE_PSRCTL);
1896 fctrl |= (NGBE_PSRCTL_UCP | NGBE_PSRCTL_MCP);
1897 wr32(hw, NGBE_PSRCTL, fctrl);
1903 ngbe_dev_promiscuous_disable(struct rte_eth_dev *dev)
1905 struct ngbe_hw *hw = ngbe_dev_hw(dev);
1908 fctrl = rd32(hw, NGBE_PSRCTL);
1909 fctrl &= (~NGBE_PSRCTL_UCP);
1910 if (dev->data->all_multicast == 1)
1911 fctrl |= NGBE_PSRCTL_MCP;
1913 fctrl &= (~NGBE_PSRCTL_MCP);
1914 wr32(hw, NGBE_PSRCTL, fctrl);
1920 ngbe_dev_allmulticast_enable(struct rte_eth_dev *dev)
1922 struct ngbe_hw *hw = ngbe_dev_hw(dev);
1925 fctrl = rd32(hw, NGBE_PSRCTL);
1926 fctrl |= NGBE_PSRCTL_MCP;
1927 wr32(hw, NGBE_PSRCTL, fctrl);
1933 ngbe_dev_allmulticast_disable(struct rte_eth_dev *dev)
1935 struct ngbe_hw *hw = ngbe_dev_hw(dev);
1938 if (dev->data->promiscuous == 1)
1939 return 0; /* must remain in all_multicast mode */
1941 fctrl = rd32(hw, NGBE_PSRCTL);
1942 fctrl &= (~NGBE_PSRCTL_MCP);
1943 wr32(hw, NGBE_PSRCTL, fctrl);
1949 * It clears the interrupt causes and enables the interrupt.
1950 * It will be called once only during NIC initialized.
1953 * Pointer to struct rte_eth_dev.
1955 * Enable or Disable.
1958 * - On success, zero.
1959 * - On failure, a negative value.
1962 ngbe_dev_lsc_interrupt_setup(struct rte_eth_dev *dev, uint8_t on)
1964 struct ngbe_interrupt *intr = ngbe_dev_intr(dev);
1966 ngbe_dev_link_status_print(dev);
1968 intr->mask_misc |= NGBE_ICRMISC_PHY;
1969 intr->mask_misc |= NGBE_ICRMISC_GPIO;
1971 intr->mask_misc &= ~NGBE_ICRMISC_PHY;
1972 intr->mask_misc &= ~NGBE_ICRMISC_GPIO;
1979 * It clears the interrupt causes and enables the interrupt.
1980 * It will be called once only during NIC initialized.
1983 * Pointer to struct rte_eth_dev.
1986 * - On success, zero.
1987 * - On failure, a negative value.
1990 ngbe_dev_misc_interrupt_setup(struct rte_eth_dev *dev)
1992 struct ngbe_interrupt *intr = ngbe_dev_intr(dev);
1995 mask = NGBE_ICR_MASK;
1996 mask &= (1ULL << NGBE_MISC_VEC_ID);
1998 intr->mask_misc |= NGBE_ICRMISC_GPIO;
2004 * It clears the interrupt causes and enables the interrupt.
2005 * It will be called once only during NIC initialized.
2008 * Pointer to struct rte_eth_dev.
2011 * - On success, zero.
2012 * - On failure, a negative value.
2015 ngbe_dev_rxq_interrupt_setup(struct rte_eth_dev *dev)
2017 struct ngbe_interrupt *intr = ngbe_dev_intr(dev);
2020 mask = NGBE_ICR_MASK;
2021 mask &= ~((1ULL << NGBE_RX_VEC_START) - 1);
2028 * It clears the interrupt causes and enables the interrupt.
2029 * It will be called once only during NIC initialized.
2032 * Pointer to struct rte_eth_dev.
2035 * - On success, zero.
2036 * - On failure, a negative value.
2039 ngbe_dev_macsec_interrupt_setup(struct rte_eth_dev *dev)
2041 struct ngbe_interrupt *intr = ngbe_dev_intr(dev);
2043 intr->mask_misc |= NGBE_ICRMISC_LNKSEC;
2049 * It reads ICR and sets flag for the link_update.
2052 * Pointer to struct rte_eth_dev.
2055 * - On success, zero.
2056 * - On failure, a negative value.
2059 ngbe_dev_interrupt_get_status(struct rte_eth_dev *dev)
2062 struct ngbe_hw *hw = ngbe_dev_hw(dev);
2063 struct ngbe_interrupt *intr = ngbe_dev_intr(dev);
2065 /* clear all cause mask */
2066 ngbe_disable_intr(hw);
2068 /* read-on-clear nic registers here */
2069 eicr = ((u32 *)hw->isb_mem)[NGBE_ISB_MISC];
2070 PMD_DRV_LOG(DEBUG, "eicr %x", eicr);
2074 /* set flag for async link update */
2075 if (eicr & NGBE_ICRMISC_PHY)
2076 intr->flags |= NGBE_FLAG_NEED_LINK_UPDATE;
2078 if (eicr & NGBE_ICRMISC_VFMBX)
2079 intr->flags |= NGBE_FLAG_MAILBOX;
2081 if (eicr & NGBE_ICRMISC_LNKSEC)
2082 intr->flags |= NGBE_FLAG_MACSEC;
2084 if (eicr & NGBE_ICRMISC_GPIO)
2085 intr->flags |= NGBE_FLAG_NEED_LINK_UPDATE;
2091 * It gets and then prints the link status.
2094 * Pointer to struct rte_eth_dev.
2097 * - On success, zero.
2098 * - On failure, a negative value.
2101 ngbe_dev_link_status_print(struct rte_eth_dev *dev)
2103 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
2104 struct rte_eth_link link;
2106 rte_eth_linkstatus_get(dev, &link);
2108 if (link.link_status == RTE_ETH_LINK_UP) {
2109 PMD_INIT_LOG(INFO, "Port %d: Link Up - speed %u Mbps - %s",
2110 (int)(dev->data->port_id),
2111 (unsigned int)link.link_speed,
2112 link.link_duplex == RTE_ETH_LINK_FULL_DUPLEX ?
2113 "full-duplex" : "half-duplex");
2115 PMD_INIT_LOG(INFO, " Port %d: Link Down",
2116 (int)(dev->data->port_id));
2118 PMD_INIT_LOG(DEBUG, "PCI Address: " PCI_PRI_FMT,
2119 pci_dev->addr.domain,
2121 pci_dev->addr.devid,
2122 pci_dev->addr.function);
2126 * It executes link_update after knowing an interrupt occurred.
2129 * Pointer to struct rte_eth_dev.
2132 * - On success, zero.
2133 * - On failure, a negative value.
2136 ngbe_dev_interrupt_action(struct rte_eth_dev *dev)
2138 struct ngbe_interrupt *intr = ngbe_dev_intr(dev);
2141 PMD_DRV_LOG(DEBUG, "intr action type %d", intr->flags);
2143 if (intr->flags & NGBE_FLAG_MAILBOX) {
2144 ngbe_pf_mbx_process(dev);
2145 intr->flags &= ~NGBE_FLAG_MAILBOX;
2148 if (intr->flags & NGBE_FLAG_NEED_LINK_UPDATE) {
2149 struct rte_eth_link link;
2151 /*get the link status before link update, for predicting later*/
2152 rte_eth_linkstatus_get(dev, &link);
2154 ngbe_dev_link_update(dev, 0);
2157 if (link.link_status != RTE_ETH_LINK_UP)
2158 /* handle it 1 sec later, wait it being stable */
2159 timeout = NGBE_LINK_UP_CHECK_TIMEOUT;
2160 /* likely to down */
2162 /* handle it 4 sec later, wait it being stable */
2163 timeout = NGBE_LINK_DOWN_CHECK_TIMEOUT;
2165 ngbe_dev_link_status_print(dev);
2166 if (rte_eal_alarm_set(timeout * 1000,
2167 ngbe_dev_interrupt_delayed_handler,
2169 PMD_DRV_LOG(ERR, "Error setting alarm");
2171 /* remember original mask */
2172 intr->mask_misc_orig = intr->mask_misc;
2173 /* only disable lsc interrupt */
2174 intr->mask_misc &= ~NGBE_ICRMISC_PHY;
2176 intr->mask_orig = intr->mask;
2177 /* only disable all misc interrupts */
2178 intr->mask &= ~(1ULL << NGBE_MISC_VEC_ID);
2182 PMD_DRV_LOG(DEBUG, "enable intr immediately");
2183 ngbe_enable_intr(dev);
2189 * Interrupt handler which shall be registered for alarm callback for delayed
2190 * handling specific interrupt to wait for the stable nic state. As the
2191 * NIC interrupt state is not stable for ngbe after link is just down,
2192 * it needs to wait 4 seconds to get the stable status.
2195 * The address of parameter (struct rte_eth_dev *) registered before.
2198 ngbe_dev_interrupt_delayed_handler(void *param)
2200 struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
2201 struct ngbe_interrupt *intr = ngbe_dev_intr(dev);
2202 struct ngbe_hw *hw = ngbe_dev_hw(dev);
2205 ngbe_disable_intr(hw);
2207 eicr = ((u32 *)hw->isb_mem)[NGBE_ISB_MISC];
2208 if (eicr & NGBE_ICRMISC_VFMBX)
2209 ngbe_pf_mbx_process(dev);
2211 if (intr->flags & NGBE_FLAG_NEED_LINK_UPDATE) {
2212 ngbe_dev_link_update(dev, 0);
2213 intr->flags &= ~NGBE_FLAG_NEED_LINK_UPDATE;
2214 ngbe_dev_link_status_print(dev);
2215 rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC,
2219 if (intr->flags & NGBE_FLAG_MACSEC) {
2220 rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_MACSEC,
2222 intr->flags &= ~NGBE_FLAG_MACSEC;
2225 /* restore original mask */
2226 intr->mask_misc = intr->mask_misc_orig;
2227 intr->mask_misc_orig = 0;
2228 intr->mask = intr->mask_orig;
2229 intr->mask_orig = 0;
2231 PMD_DRV_LOG(DEBUG, "enable intr in delayed handler S[%08x]", eicr);
2232 ngbe_enable_intr(dev);
2236 * Interrupt handler triggered by NIC for handling
2237 * specific interrupt.
2240 * The address of parameter (struct rte_eth_dev *) registered before.
2243 ngbe_dev_interrupt_handler(void *param)
2245 struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
2247 ngbe_dev_interrupt_get_status(dev);
2248 ngbe_dev_interrupt_action(dev);
2252 ngbe_dev_led_on(struct rte_eth_dev *dev)
2254 struct ngbe_hw *hw = ngbe_dev_hw(dev);
2255 return hw->mac.led_on(hw, 0) == 0 ? 0 : -ENOTSUP;
2259 ngbe_dev_led_off(struct rte_eth_dev *dev)
2261 struct ngbe_hw *hw = ngbe_dev_hw(dev);
2262 return hw->mac.led_off(hw, 0) == 0 ? 0 : -ENOTSUP;
2266 ngbe_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
2268 struct ngbe_hw *hw = ngbe_dev_hw(dev);
2274 fc_conf->pause_time = hw->fc.pause_time;
2275 fc_conf->high_water = hw->fc.high_water;
2276 fc_conf->low_water = hw->fc.low_water;
2277 fc_conf->send_xon = hw->fc.send_xon;
2278 fc_conf->autoneg = !hw->fc.disable_fc_autoneg;
2281 * Return rx_pause status according to actual setting of
2284 mflcn_reg = rd32(hw, NGBE_RXFCCFG);
2285 if (mflcn_reg & NGBE_RXFCCFG_FC)
2291 * Return tx_pause status according to actual setting of
2294 fccfg_reg = rd32(hw, NGBE_TXFCCFG);
2295 if (fccfg_reg & NGBE_TXFCCFG_FC)
2300 if (rx_pause && tx_pause)
2301 fc_conf->mode = RTE_ETH_FC_FULL;
2303 fc_conf->mode = RTE_ETH_FC_RX_PAUSE;
2305 fc_conf->mode = RTE_ETH_FC_TX_PAUSE;
2307 fc_conf->mode = RTE_ETH_FC_NONE;
2313 ngbe_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
2315 struct ngbe_hw *hw = ngbe_dev_hw(dev);
2317 uint32_t rx_buf_size;
2318 uint32_t max_high_water;
2319 enum ngbe_fc_mode rte_fcmode_2_ngbe_fcmode[] = {
2326 PMD_INIT_FUNC_TRACE();
2328 rx_buf_size = rd32(hw, NGBE_PBRXSIZE);
2329 PMD_INIT_LOG(DEBUG, "Rx packet buffer size = 0x%x", rx_buf_size);
2332 * At least reserve one Ethernet frame for watermark
2333 * high_water/low_water in kilo bytes for ngbe
2335 max_high_water = (rx_buf_size - RTE_ETHER_MAX_LEN) >> 10;
2336 if (fc_conf->high_water > max_high_water ||
2337 fc_conf->high_water < fc_conf->low_water) {
2338 PMD_INIT_LOG(ERR, "Invalid high/low water setup value in KB");
2339 PMD_INIT_LOG(ERR, "High_water must <= 0x%x", max_high_water);
2343 hw->fc.requested_mode = rte_fcmode_2_ngbe_fcmode[fc_conf->mode];
2344 hw->fc.pause_time = fc_conf->pause_time;
2345 hw->fc.high_water = fc_conf->high_water;
2346 hw->fc.low_water = fc_conf->low_water;
2347 hw->fc.send_xon = fc_conf->send_xon;
2348 hw->fc.disable_fc_autoneg = !fc_conf->autoneg;
2350 err = hw->mac.fc_enable(hw);
2352 /* Not negotiated is not an error case */
2353 if (err == 0 || err == NGBE_ERR_FC_NOT_NEGOTIATED) {
2354 wr32m(hw, NGBE_MACRXFLT, NGBE_MACRXFLT_CTL_MASK,
2355 (fc_conf->mac_ctrl_frame_fwd
2356 ? NGBE_MACRXFLT_CTL_NOPS : NGBE_MACRXFLT_CTL_DROP));
2362 PMD_INIT_LOG(ERR, "ngbe_fc_enable = 0x%x", err);
2367 ngbe_dev_rss_reta_update(struct rte_eth_dev *dev,
2368 struct rte_eth_rss_reta_entry64 *reta_conf,
2373 uint16_t idx, shift;
2374 struct ngbe_adapter *adapter = ngbe_dev_adapter(dev);
2375 struct ngbe_hw *hw = ngbe_dev_hw(dev);
2377 PMD_INIT_FUNC_TRACE();
2380 PMD_DRV_LOG(ERR, "RSS reta update is not supported on this "
2385 if (reta_size != RTE_ETH_RSS_RETA_SIZE_128) {
2386 PMD_DRV_LOG(ERR, "The size of hash lookup table configured "
2387 "(%d) doesn't match the number hardware can supported "
2388 "(%d)", reta_size, RTE_ETH_RSS_RETA_SIZE_128);
2392 for (i = 0; i < reta_size; i += 4) {
2393 idx = i / RTE_ETH_RETA_GROUP_SIZE;
2394 shift = i % RTE_ETH_RETA_GROUP_SIZE;
2395 mask = (uint8_t)RS64(reta_conf[idx].mask, shift, 0xF);
2399 reta = rd32a(hw, NGBE_REG_RSSTBL, i >> 2);
2400 for (j = 0; j < 4; j++) {
2401 if (RS8(mask, j, 0x1)) {
2402 reta &= ~(MS32(8 * j, 0xFF));
2403 reta |= LS32(reta_conf[idx].reta[shift + j],
2407 wr32a(hw, NGBE_REG_RSSTBL, i >> 2, reta);
2409 adapter->rss_reta_updated = 1;
2415 ngbe_dev_rss_reta_query(struct rte_eth_dev *dev,
2416 struct rte_eth_rss_reta_entry64 *reta_conf,
2419 struct ngbe_hw *hw = ngbe_dev_hw(dev);
2422 uint16_t idx, shift;
2424 PMD_INIT_FUNC_TRACE();
2426 if (reta_size != RTE_ETH_RSS_RETA_SIZE_128) {
2427 PMD_DRV_LOG(ERR, "The size of hash lookup table configured "
2428 "(%d) doesn't match the number hardware can supported "
2429 "(%d)", reta_size, RTE_ETH_RSS_RETA_SIZE_128);
2433 for (i = 0; i < reta_size; i += 4) {
2434 idx = i / RTE_ETH_RETA_GROUP_SIZE;
2435 shift = i % RTE_ETH_RETA_GROUP_SIZE;
2436 mask = (uint8_t)RS64(reta_conf[idx].mask, shift, 0xF);
2440 reta = rd32a(hw, NGBE_REG_RSSTBL, i >> 2);
2441 for (j = 0; j < 4; j++) {
2442 if (RS8(mask, j, 0x1))
2443 reta_conf[idx].reta[shift + j] =
2444 (uint16_t)RS32(reta, 8 * j, 0xFF);
2452 ngbe_add_rar(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr,
2453 uint32_t index, uint32_t pool)
2455 struct ngbe_hw *hw = ngbe_dev_hw(dev);
2456 uint32_t enable_addr = 1;
2458 return ngbe_set_rar(hw, index, mac_addr->addr_bytes,
2463 ngbe_remove_rar(struct rte_eth_dev *dev, uint32_t index)
2465 struct ngbe_hw *hw = ngbe_dev_hw(dev);
2467 ngbe_clear_rar(hw, index);
2471 ngbe_set_default_mac_addr(struct rte_eth_dev *dev, struct rte_ether_addr *addr)
2473 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
2475 ngbe_remove_rar(dev, 0);
2476 ngbe_add_rar(dev, addr, 0, pci_dev->max_vfs);
2482 ngbe_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
2484 struct ngbe_hw *hw = ngbe_dev_hw(dev);
2485 uint32_t frame_size = mtu + RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN + 4;
2486 struct rte_eth_dev_data *dev_data = dev->data;
2488 /* If device is started, refuse mtu that requires the support of
2489 * scattered packets when this feature has not been enabled before.
2491 if (dev_data->dev_started && !dev_data->scattered_rx &&
2492 (frame_size + 2 * RTE_VLAN_HLEN >
2493 dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM)) {
2494 PMD_INIT_LOG(ERR, "Stop port first.");
2499 wr32m(hw, NGBE_FRMSZ, NGBE_FRMSZ_MAX_MASK,
2500 NGBE_FRAME_SIZE_MAX);
2502 wr32m(hw, NGBE_FRMSZ, NGBE_FRMSZ_MAX_MASK,
2503 NGBE_FRMSZ_MAX(frame_size));
2509 ngbe_uta_vector(struct ngbe_hw *hw, struct rte_ether_addr *uc_addr)
2511 uint32_t vector = 0;
2513 switch (hw->mac.mc_filter_type) {
2514 case 0: /* use bits [47:36] of the address */
2515 vector = ((uc_addr->addr_bytes[4] >> 4) |
2516 (((uint16_t)uc_addr->addr_bytes[5]) << 4));
2518 case 1: /* use bits [46:35] of the address */
2519 vector = ((uc_addr->addr_bytes[4] >> 3) |
2520 (((uint16_t)uc_addr->addr_bytes[5]) << 5));
2522 case 2: /* use bits [45:34] of the address */
2523 vector = ((uc_addr->addr_bytes[4] >> 2) |
2524 (((uint16_t)uc_addr->addr_bytes[5]) << 6));
2526 case 3: /* use bits [43:32] of the address */
2527 vector = ((uc_addr->addr_bytes[4]) |
2528 (((uint16_t)uc_addr->addr_bytes[5]) << 8));
2530 default: /* Invalid mc_filter_type */
2534 /* vector can only be 12-bits or boundary will be exceeded */
2540 ngbe_uc_hash_table_set(struct rte_eth_dev *dev,
2541 struct rte_ether_addr *mac_addr, uint8_t on)
2549 struct ngbe_hw *hw = ngbe_dev_hw(dev);
2550 struct ngbe_uta_info *uta_info = NGBE_DEV_UTA_INFO(dev);
2552 vector = ngbe_uta_vector(hw, mac_addr);
2553 uta_idx = (vector >> 5) & 0x7F;
2554 uta_mask = 0x1UL << (vector & 0x1F);
2556 if (!!on == !!(uta_info->uta_shadow[uta_idx] & uta_mask))
2559 reg_val = rd32(hw, NGBE_UCADDRTBL(uta_idx));
2561 uta_info->uta_in_use++;
2562 reg_val |= uta_mask;
2563 uta_info->uta_shadow[uta_idx] |= uta_mask;
2565 uta_info->uta_in_use--;
2566 reg_val &= ~uta_mask;
2567 uta_info->uta_shadow[uta_idx] &= ~uta_mask;
2570 wr32(hw, NGBE_UCADDRTBL(uta_idx), reg_val);
2572 psrctl = rd32(hw, NGBE_PSRCTL);
2573 if (uta_info->uta_in_use > 0)
2574 psrctl |= NGBE_PSRCTL_UCHFENA;
2576 psrctl &= ~NGBE_PSRCTL_UCHFENA;
2578 psrctl &= ~NGBE_PSRCTL_ADHF12_MASK;
2579 psrctl |= NGBE_PSRCTL_ADHF12(hw->mac.mc_filter_type);
2580 wr32(hw, NGBE_PSRCTL, psrctl);
2586 ngbe_uc_all_hash_table_set(struct rte_eth_dev *dev, uint8_t on)
2588 struct ngbe_hw *hw = ngbe_dev_hw(dev);
2589 struct ngbe_uta_info *uta_info = NGBE_DEV_UTA_INFO(dev);
2594 for (i = 0; i < RTE_ETH_VMDQ_NUM_UC_HASH_ARRAY; i++) {
2595 uta_info->uta_shadow[i] = ~0;
2596 wr32(hw, NGBE_UCADDRTBL(i), ~0);
2599 for (i = 0; i < RTE_ETH_VMDQ_NUM_UC_HASH_ARRAY; i++) {
2600 uta_info->uta_shadow[i] = 0;
2601 wr32(hw, NGBE_UCADDRTBL(i), 0);
2605 psrctl = rd32(hw, NGBE_PSRCTL);
2607 psrctl |= NGBE_PSRCTL_UCHFENA;
2609 psrctl &= ~NGBE_PSRCTL_UCHFENA;
2611 psrctl &= ~NGBE_PSRCTL_ADHF12_MASK;
2612 psrctl |= NGBE_PSRCTL_ADHF12(hw->mac.mc_filter_type);
2613 wr32(hw, NGBE_PSRCTL, psrctl);
2619 * Set the IVAR registers, mapping interrupt causes to vectors
2621 * pointer to ngbe_hw struct
2623 * 0 for Rx, 1 for Tx, -1 for other causes
2625 * queue to map the corresponding interrupt to
2627 * the vector to map to the corresponding queue
2630 ngbe_set_ivar_map(struct ngbe_hw *hw, int8_t direction,
2631 uint8_t queue, uint8_t msix_vector)
2635 if (direction == -1) {
2637 msix_vector |= NGBE_IVARMISC_VLD;
2639 tmp = rd32(hw, NGBE_IVARMISC);
2640 tmp &= ~(0xFF << idx);
2641 tmp |= (msix_vector << idx);
2642 wr32(hw, NGBE_IVARMISC, tmp);
2644 /* rx or tx causes */
2645 /* Workaround for ICR lost */
2646 idx = ((16 * (queue & 1)) + (8 * direction));
2647 tmp = rd32(hw, NGBE_IVAR(queue >> 1));
2648 tmp &= ~(0xFF << idx);
2649 tmp |= (msix_vector << idx);
2650 wr32(hw, NGBE_IVAR(queue >> 1), tmp);
2655 * Sets up the hardware to properly generate MSI-X interrupts
2657 * board private structure
2660 ngbe_configure_msix(struct rte_eth_dev *dev)
2662 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
2663 struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
2664 struct ngbe_hw *hw = ngbe_dev_hw(dev);
2665 uint32_t queue_id, base = NGBE_MISC_VEC_ID;
2666 uint32_t vec = NGBE_MISC_VEC_ID;
2670 * Won't configure MSI-X register if no mapping is done
2671 * between intr vector and event fd
2672 * but if MSI-X has been enabled already, need to configure
2673 * auto clean, auto mask and throttling.
2675 gpie = rd32(hw, NGBE_GPIE);
2676 if (!rte_intr_dp_is_en(intr_handle) &&
2677 !(gpie & NGBE_GPIE_MSIX))
2680 if (rte_intr_allow_others(intr_handle)) {
2681 base = NGBE_RX_VEC_START;
2685 /* setup GPIE for MSI-X mode */
2686 gpie = rd32(hw, NGBE_GPIE);
2687 gpie |= NGBE_GPIE_MSIX;
2688 wr32(hw, NGBE_GPIE, gpie);
2690 /* Populate the IVAR table and set the ITR values to the
2691 * corresponding register.
2693 if (rte_intr_dp_is_en(intr_handle)) {
2694 for (queue_id = 0; queue_id < dev->data->nb_rx_queues;
2696 /* by default, 1:1 mapping */
2697 ngbe_set_ivar_map(hw, 0, queue_id, vec);
2698 rte_intr_vec_list_index_set(intr_handle,
2700 if (vec < base + rte_intr_nb_efd_get(intr_handle)
2705 ngbe_set_ivar_map(hw, -1, 1, NGBE_MISC_VEC_ID);
2707 wr32(hw, NGBE_ITR(NGBE_MISC_VEC_ID),
2708 NGBE_ITR_IVAL_1G(NGBE_QUEUE_ITR_INTERVAL_DEFAULT)
2713 ngbe_dev_addr_list_itr(__rte_unused struct ngbe_hw *hw,
2714 u8 **mc_addr_ptr, u32 *vmdq)
2719 mc_addr = *mc_addr_ptr;
2720 *mc_addr_ptr = (mc_addr + sizeof(struct rte_ether_addr));
2725 ngbe_dev_set_mc_addr_list(struct rte_eth_dev *dev,
2726 struct rte_ether_addr *mc_addr_set,
2727 uint32_t nb_mc_addr)
2729 struct ngbe_hw *hw = ngbe_dev_hw(dev);
2732 mc_addr_list = (u8 *)mc_addr_set;
2733 return hw->mac.update_mc_addr_list(hw, mc_addr_list, nb_mc_addr,
2734 ngbe_dev_addr_list_itr, TRUE);
2738 ngbe_read_systime_cyclecounter(struct rte_eth_dev *dev)
2740 struct ngbe_hw *hw = ngbe_dev_hw(dev);
2741 uint64_t systime_cycles;
2743 systime_cycles = (uint64_t)rd32(hw, NGBE_TSTIMEL);
2744 systime_cycles |= (uint64_t)rd32(hw, NGBE_TSTIMEH) << 32;
2746 return systime_cycles;
2750 ngbe_read_rx_tstamp_cyclecounter(struct rte_eth_dev *dev)
2752 struct ngbe_hw *hw = ngbe_dev_hw(dev);
2753 uint64_t rx_tstamp_cycles;
2755 /* TSRXSTMPL stores ns and TSRXSTMPH stores seconds. */
2756 rx_tstamp_cycles = (uint64_t)rd32(hw, NGBE_TSRXSTMPL);
2757 rx_tstamp_cycles |= (uint64_t)rd32(hw, NGBE_TSRXSTMPH) << 32;
2759 return rx_tstamp_cycles;
2763 ngbe_read_tx_tstamp_cyclecounter(struct rte_eth_dev *dev)
2765 struct ngbe_hw *hw = ngbe_dev_hw(dev);
2766 uint64_t tx_tstamp_cycles;
2768 /* TSTXSTMPL stores ns and TSTXSTMPH stores seconds. */
2769 tx_tstamp_cycles = (uint64_t)rd32(hw, NGBE_TSTXSTMPL);
2770 tx_tstamp_cycles |= (uint64_t)rd32(hw, NGBE_TSTXSTMPH) << 32;
2772 return tx_tstamp_cycles;
2776 ngbe_start_timecounters(struct rte_eth_dev *dev)
2778 struct ngbe_hw *hw = ngbe_dev_hw(dev);
2779 struct ngbe_adapter *adapter = ngbe_dev_adapter(dev);
2780 uint32_t incval = 0;
2783 incval = NGBE_INCVAL_1GB;
2784 shift = NGBE_INCVAL_SHIFT_1GB;
2786 wr32(hw, NGBE_TSTIMEINC, NGBE_TSTIMEINC_IV(incval));
2788 memset(&adapter->systime_tc, 0, sizeof(struct rte_timecounter));
2789 memset(&adapter->rx_tstamp_tc, 0, sizeof(struct rte_timecounter));
2790 memset(&adapter->tx_tstamp_tc, 0, sizeof(struct rte_timecounter));
2792 adapter->systime_tc.cc_mask = NGBE_CYCLECOUNTER_MASK;
2793 adapter->systime_tc.cc_shift = shift;
2794 adapter->systime_tc.nsec_mask = (1ULL << shift) - 1;
2796 adapter->rx_tstamp_tc.cc_mask = NGBE_CYCLECOUNTER_MASK;
2797 adapter->rx_tstamp_tc.cc_shift = shift;
2798 adapter->rx_tstamp_tc.nsec_mask = (1ULL << shift) - 1;
2800 adapter->tx_tstamp_tc.cc_mask = NGBE_CYCLECOUNTER_MASK;
2801 adapter->tx_tstamp_tc.cc_shift = shift;
2802 adapter->tx_tstamp_tc.nsec_mask = (1ULL << shift) - 1;
2806 ngbe_timesync_adjust_time(struct rte_eth_dev *dev, int64_t delta)
2808 struct ngbe_adapter *adapter = ngbe_dev_adapter(dev);
2810 adapter->systime_tc.nsec += delta;
2811 adapter->rx_tstamp_tc.nsec += delta;
2812 adapter->tx_tstamp_tc.nsec += delta;
2818 ngbe_timesync_write_time(struct rte_eth_dev *dev, const struct timespec *ts)
2821 struct ngbe_adapter *adapter = ngbe_dev_adapter(dev);
2823 ns = rte_timespec_to_ns(ts);
2824 /* Set the timecounters to a new value. */
2825 adapter->systime_tc.nsec = ns;
2826 adapter->rx_tstamp_tc.nsec = ns;
2827 adapter->tx_tstamp_tc.nsec = ns;
2833 ngbe_timesync_read_time(struct rte_eth_dev *dev, struct timespec *ts)
2835 uint64_t ns, systime_cycles;
2836 struct ngbe_adapter *adapter = ngbe_dev_adapter(dev);
2838 systime_cycles = ngbe_read_systime_cyclecounter(dev);
2839 ns = rte_timecounter_update(&adapter->systime_tc, systime_cycles);
2840 *ts = rte_ns_to_timespec(ns);
2846 ngbe_timesync_enable(struct rte_eth_dev *dev)
2848 struct ngbe_hw *hw = ngbe_dev_hw(dev);
2851 /* Stop the timesync system time. */
2852 wr32(hw, NGBE_TSTIMEINC, 0x0);
2853 /* Reset the timesync system time value. */
2854 wr32(hw, NGBE_TSTIMEL, 0x0);
2855 wr32(hw, NGBE_TSTIMEH, 0x0);
2857 ngbe_start_timecounters(dev);
2859 /* Enable L2 filtering of IEEE1588/802.1AS Ethernet frame types. */
2860 wr32(hw, NGBE_ETFLT(NGBE_ETF_ID_1588),
2861 RTE_ETHER_TYPE_1588 | NGBE_ETFLT_ENA | NGBE_ETFLT_1588);
2863 /* Enable timestamping of received PTP packets. */
2864 tsync_ctl = rd32(hw, NGBE_TSRXCTL);
2865 tsync_ctl |= NGBE_TSRXCTL_ENA;
2866 wr32(hw, NGBE_TSRXCTL, tsync_ctl);
2868 /* Enable timestamping of transmitted PTP packets. */
2869 tsync_ctl = rd32(hw, NGBE_TSTXCTL);
2870 tsync_ctl |= NGBE_TSTXCTL_ENA;
2871 wr32(hw, NGBE_TSTXCTL, tsync_ctl);
2879 ngbe_timesync_disable(struct rte_eth_dev *dev)
2881 struct ngbe_hw *hw = ngbe_dev_hw(dev);
2884 /* Disable timestamping of transmitted PTP packets. */
2885 tsync_ctl = rd32(hw, NGBE_TSTXCTL);
2886 tsync_ctl &= ~NGBE_TSTXCTL_ENA;
2887 wr32(hw, NGBE_TSTXCTL, tsync_ctl);
2889 /* Disable timestamping of received PTP packets. */
2890 tsync_ctl = rd32(hw, NGBE_TSRXCTL);
2891 tsync_ctl &= ~NGBE_TSRXCTL_ENA;
2892 wr32(hw, NGBE_TSRXCTL, tsync_ctl);
2894 /* Disable L2 filtering of IEEE1588/802.1AS Ethernet frame types. */
2895 wr32(hw, NGBE_ETFLT(NGBE_ETF_ID_1588), 0);
2897 /* Stop incrementing the System Time registers. */
2898 wr32(hw, NGBE_TSTIMEINC, 0);
2904 ngbe_timesync_read_rx_timestamp(struct rte_eth_dev *dev,
2905 struct timespec *timestamp,
2906 uint32_t flags __rte_unused)
2908 struct ngbe_hw *hw = ngbe_dev_hw(dev);
2909 struct ngbe_adapter *adapter = ngbe_dev_adapter(dev);
2910 uint32_t tsync_rxctl;
2911 uint64_t rx_tstamp_cycles;
2914 tsync_rxctl = rd32(hw, NGBE_TSRXCTL);
2915 if ((tsync_rxctl & NGBE_TSRXCTL_VLD) == 0)
2918 rx_tstamp_cycles = ngbe_read_rx_tstamp_cyclecounter(dev);
2919 ns = rte_timecounter_update(&adapter->rx_tstamp_tc, rx_tstamp_cycles);
2920 *timestamp = rte_ns_to_timespec(ns);
2926 ngbe_timesync_read_tx_timestamp(struct rte_eth_dev *dev,
2927 struct timespec *timestamp)
2929 struct ngbe_hw *hw = ngbe_dev_hw(dev);
2930 struct ngbe_adapter *adapter = ngbe_dev_adapter(dev);
2931 uint32_t tsync_txctl;
2932 uint64_t tx_tstamp_cycles;
2935 tsync_txctl = rd32(hw, NGBE_TSTXCTL);
2936 if ((tsync_txctl & NGBE_TSTXCTL_VLD) == 0)
2939 tx_tstamp_cycles = ngbe_read_tx_tstamp_cyclecounter(dev);
2940 ns = rte_timecounter_update(&adapter->tx_tstamp_tc, tx_tstamp_cycles);
2941 *timestamp = rte_ns_to_timespec(ns);
2947 ngbe_get_reg_length(struct rte_eth_dev *dev __rte_unused)
2951 const struct reg_info *reg_group;
2952 const struct reg_info **reg_set = ngbe_regs_others;
2954 while ((reg_group = reg_set[g_ind++]))
2955 count += ngbe_regs_group_count(reg_group);
2961 ngbe_get_regs(struct rte_eth_dev *dev,
2962 struct rte_dev_reg_info *regs)
2964 struct ngbe_hw *hw = ngbe_dev_hw(dev);
2965 uint32_t *data = regs->data;
2968 const struct reg_info *reg_group;
2969 const struct reg_info **reg_set = ngbe_regs_others;
2972 regs->length = ngbe_get_reg_length(dev);
2973 regs->width = sizeof(uint32_t);
2977 /* Support only full register dump */
2978 if (regs->length == 0 ||
2979 regs->length == (uint32_t)ngbe_get_reg_length(dev)) {
2980 regs->version = hw->mac.type << 24 |
2981 hw->revision_id << 16 |
2983 while ((reg_group = reg_set[g_ind++]))
2984 count += ngbe_read_regs_group(dev, &data[count],
2993 ngbe_get_eeprom_length(struct rte_eth_dev *dev)
2995 struct ngbe_hw *hw = ngbe_dev_hw(dev);
2997 /* Return unit is byte count */
2998 return hw->rom.word_size * 2;
3002 ngbe_get_eeprom(struct rte_eth_dev *dev,
3003 struct rte_dev_eeprom_info *in_eeprom)
3005 struct ngbe_hw *hw = ngbe_dev_hw(dev);
3006 struct ngbe_rom_info *eeprom = &hw->rom;
3007 uint16_t *data = in_eeprom->data;
3010 first = in_eeprom->offset >> 1;
3011 length = in_eeprom->length >> 1;
3012 if (first > hw->rom.word_size ||
3013 ((first + length) > hw->rom.word_size))
3016 in_eeprom->magic = hw->vendor_id | (hw->device_id << 16);
3018 return eeprom->readw_buffer(hw, first, length, data);
3022 ngbe_set_eeprom(struct rte_eth_dev *dev,
3023 struct rte_dev_eeprom_info *in_eeprom)
3025 struct ngbe_hw *hw = ngbe_dev_hw(dev);
3026 struct ngbe_rom_info *eeprom = &hw->rom;
3027 uint16_t *data = in_eeprom->data;
3030 first = in_eeprom->offset >> 1;
3031 length = in_eeprom->length >> 1;
3032 if (first > hw->rom.word_size ||
3033 ((first + length) > hw->rom.word_size))
3036 in_eeprom->magic = hw->vendor_id | (hw->device_id << 16);
3038 return eeprom->writew_buffer(hw, first, length, data);
3041 static const struct eth_dev_ops ngbe_eth_dev_ops = {
3042 .dev_configure = ngbe_dev_configure,
3043 .dev_infos_get = ngbe_dev_info_get,
3044 .dev_start = ngbe_dev_start,
3045 .dev_stop = ngbe_dev_stop,
3046 .dev_close = ngbe_dev_close,
3047 .dev_reset = ngbe_dev_reset,
3048 .promiscuous_enable = ngbe_dev_promiscuous_enable,
3049 .promiscuous_disable = ngbe_dev_promiscuous_disable,
3050 .allmulticast_enable = ngbe_dev_allmulticast_enable,
3051 .allmulticast_disable = ngbe_dev_allmulticast_disable,
3052 .link_update = ngbe_dev_link_update,
3053 .stats_get = ngbe_dev_stats_get,
3054 .xstats_get = ngbe_dev_xstats_get,
3055 .xstats_get_by_id = ngbe_dev_xstats_get_by_id,
3056 .stats_reset = ngbe_dev_stats_reset,
3057 .xstats_reset = ngbe_dev_xstats_reset,
3058 .xstats_get_names = ngbe_dev_xstats_get_names,
3059 .xstats_get_names_by_id = ngbe_dev_xstats_get_names_by_id,
3060 .fw_version_get = ngbe_fw_version_get,
3061 .dev_supported_ptypes_get = ngbe_dev_supported_ptypes_get,
3062 .mtu_set = ngbe_dev_mtu_set,
3063 .vlan_filter_set = ngbe_vlan_filter_set,
3064 .vlan_tpid_set = ngbe_vlan_tpid_set,
3065 .vlan_offload_set = ngbe_vlan_offload_set,
3066 .vlan_strip_queue_set = ngbe_vlan_strip_queue_set,
3067 .rx_queue_start = ngbe_dev_rx_queue_start,
3068 .rx_queue_stop = ngbe_dev_rx_queue_stop,
3069 .tx_queue_start = ngbe_dev_tx_queue_start,
3070 .tx_queue_stop = ngbe_dev_tx_queue_stop,
3071 .rx_queue_setup = ngbe_dev_rx_queue_setup,
3072 .rx_queue_release = ngbe_dev_rx_queue_release,
3073 .tx_queue_setup = ngbe_dev_tx_queue_setup,
3074 .tx_queue_release = ngbe_dev_tx_queue_release,
3075 .dev_led_on = ngbe_dev_led_on,
3076 .dev_led_off = ngbe_dev_led_off,
3077 .flow_ctrl_get = ngbe_flow_ctrl_get,
3078 .flow_ctrl_set = ngbe_flow_ctrl_set,
3079 .mac_addr_add = ngbe_add_rar,
3080 .mac_addr_remove = ngbe_remove_rar,
3081 .mac_addr_set = ngbe_set_default_mac_addr,
3082 .uc_hash_table_set = ngbe_uc_hash_table_set,
3083 .uc_all_hash_table_set = ngbe_uc_all_hash_table_set,
3084 .reta_update = ngbe_dev_rss_reta_update,
3085 .reta_query = ngbe_dev_rss_reta_query,
3086 .rss_hash_update = ngbe_dev_rss_hash_update,
3087 .rss_hash_conf_get = ngbe_dev_rss_hash_conf_get,
3088 .set_mc_addr_list = ngbe_dev_set_mc_addr_list,
3089 .rxq_info_get = ngbe_rxq_info_get,
3090 .txq_info_get = ngbe_txq_info_get,
3091 .rx_burst_mode_get = ngbe_rx_burst_mode_get,
3092 .tx_burst_mode_get = ngbe_tx_burst_mode_get,
3093 .timesync_enable = ngbe_timesync_enable,
3094 .timesync_disable = ngbe_timesync_disable,
3095 .timesync_read_rx_timestamp = ngbe_timesync_read_rx_timestamp,
3096 .timesync_read_tx_timestamp = ngbe_timesync_read_tx_timestamp,
3097 .get_reg = ngbe_get_regs,
3098 .get_eeprom_length = ngbe_get_eeprom_length,
3099 .get_eeprom = ngbe_get_eeprom,
3100 .set_eeprom = ngbe_set_eeprom,
3101 .timesync_adjust_time = ngbe_timesync_adjust_time,
3102 .timesync_read_time = ngbe_timesync_read_time,
3103 .timesync_write_time = ngbe_timesync_write_time,
3104 .tx_done_cleanup = ngbe_dev_tx_done_cleanup,
3107 RTE_PMD_REGISTER_PCI(net_ngbe, rte_ngbe_pmd);
3108 RTE_PMD_REGISTER_PCI_TABLE(net_ngbe, pci_id_ngbe_map);
3109 RTE_PMD_REGISTER_KMOD_DEP(net_ngbe, "* igb_uio | uio_pci_generic | vfio-pci");
3111 RTE_LOG_REGISTER_SUFFIX(ngbe_logtype_init, init, NOTICE);
3112 RTE_LOG_REGISTER_SUFFIX(ngbe_logtype_driver, driver, NOTICE);
3114 #ifdef RTE_ETHDEV_DEBUG_RX
3115 RTE_LOG_REGISTER_SUFFIX(ngbe_logtype_rx, rx, DEBUG);
3117 #ifdef RTE_ETHDEV_DEBUG_TX
3118 RTE_LOG_REGISTER_SUFFIX(ngbe_logtype_tx, tx, DEBUG);