1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2018-2021 Beijing WangXun Technology Co., Ltd.
3 * Copyright(c) 2010-2017 Intel Corporation
7 #include <rte_common.h>
8 #include <ethdev_pci.h>
10 #include <rte_alarm.h>
12 #include "ngbe_logs.h"
14 #include "ngbe_ethdev.h"
15 #include "ngbe_rxtx.h"
17 static int ngbe_dev_close(struct rte_eth_dev *dev);
18 static int ngbe_dev_link_update(struct rte_eth_dev *dev,
19 int wait_to_complete);
20 static int ngbe_dev_stats_reset(struct rte_eth_dev *dev);
21 static void ngbe_vlan_hw_strip_enable(struct rte_eth_dev *dev, uint16_t queue);
22 static void ngbe_vlan_hw_strip_disable(struct rte_eth_dev *dev,
25 static void ngbe_dev_link_status_print(struct rte_eth_dev *dev);
26 static int ngbe_dev_lsc_interrupt_setup(struct rte_eth_dev *dev, uint8_t on);
27 static int ngbe_dev_macsec_interrupt_setup(struct rte_eth_dev *dev);
28 static int ngbe_dev_misc_interrupt_setup(struct rte_eth_dev *dev);
29 static int ngbe_dev_rxq_interrupt_setup(struct rte_eth_dev *dev);
30 static void ngbe_dev_interrupt_handler(void *param);
31 static void ngbe_dev_interrupt_delayed_handler(void *param);
32 static void ngbe_configure_msix(struct rte_eth_dev *dev);
34 #define NGBE_SET_HWSTRIP(h, q) do {\
35 uint32_t idx = (q) / (sizeof((h)->bitmap[0]) * NBBY); \
36 uint32_t bit = (q) % (sizeof((h)->bitmap[0]) * NBBY); \
37 (h)->bitmap[idx] |= 1 << bit;\
40 #define NGBE_CLEAR_HWSTRIP(h, q) do {\
41 uint32_t idx = (q) / (sizeof((h)->bitmap[0]) * NBBY); \
42 uint32_t bit = (q) % (sizeof((h)->bitmap[0]) * NBBY); \
43 (h)->bitmap[idx] &= ~(1 << bit);\
46 #define NGBE_GET_HWSTRIP(h, q, r) do {\
47 uint32_t idx = (q) / (sizeof((h)->bitmap[0]) * NBBY); \
48 uint32_t bit = (q) % (sizeof((h)->bitmap[0]) * NBBY); \
49 (r) = (h)->bitmap[idx] >> bit & 1;\
53 * The set of PCI devices this driver supports
55 static const struct rte_pci_id pci_id_ngbe_map[] = {
56 { RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860A2) },
57 { RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860A2S) },
58 { RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860A4) },
59 { RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860A4S) },
60 { RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860AL2) },
61 { RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860AL2S) },
62 { RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860AL4) },
63 { RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860AL4S) },
64 { RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860NCSI) },
65 { RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860A1) },
66 { RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860A1L) },
67 { RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860AL_W) },
68 { .vendor_id = 0, /* sentinel */ },
71 static const struct rte_eth_desc_lim rx_desc_lim = {
72 .nb_max = NGBE_RING_DESC_MAX,
73 .nb_min = NGBE_RING_DESC_MIN,
74 .nb_align = NGBE_RXD_ALIGN,
77 static const struct rte_eth_desc_lim tx_desc_lim = {
78 .nb_max = NGBE_RING_DESC_MAX,
79 .nb_min = NGBE_RING_DESC_MIN,
80 .nb_align = NGBE_TXD_ALIGN,
81 .nb_seg_max = NGBE_TX_MAX_SEG,
82 .nb_mtu_seg_max = NGBE_TX_MAX_SEG,
85 static const struct eth_dev_ops ngbe_eth_dev_ops;
87 #define HW_XSTAT(m) {#m, offsetof(struct ngbe_hw_stats, m)}
88 #define HW_XSTAT_NAME(m, n) {n, offsetof(struct ngbe_hw_stats, m)}
89 static const struct rte_ngbe_xstats_name_off rte_ngbe_stats_strings[] = {
91 HW_XSTAT(mng_bmc2host_packets),
92 HW_XSTAT(mng_host2bmc_packets),
98 HW_XSTAT(rx_total_bytes),
99 HW_XSTAT(rx_total_packets),
100 HW_XSTAT(tx_total_packets),
101 HW_XSTAT(rx_total_missed_packets),
102 HW_XSTAT(rx_broadcast_packets),
103 HW_XSTAT(rx_multicast_packets),
104 HW_XSTAT(rx_management_packets),
105 HW_XSTAT(tx_management_packets),
106 HW_XSTAT(rx_management_dropped),
109 HW_XSTAT(rx_crc_errors),
110 HW_XSTAT(rx_illegal_byte_errors),
111 HW_XSTAT(rx_error_bytes),
112 HW_XSTAT(rx_mac_short_packet_dropped),
113 HW_XSTAT(rx_length_errors),
114 HW_XSTAT(rx_undersize_errors),
115 HW_XSTAT(rx_fragment_errors),
116 HW_XSTAT(rx_oversize_errors),
117 HW_XSTAT(rx_jabber_errors),
118 HW_XSTAT(rx_l3_l4_xsum_error),
119 HW_XSTAT(mac_local_errors),
120 HW_XSTAT(mac_remote_errors),
123 HW_XSTAT(tx_macsec_pkts_untagged),
124 HW_XSTAT(tx_macsec_pkts_encrypted),
125 HW_XSTAT(tx_macsec_pkts_protected),
126 HW_XSTAT(tx_macsec_octets_encrypted),
127 HW_XSTAT(tx_macsec_octets_protected),
128 HW_XSTAT(rx_macsec_pkts_untagged),
129 HW_XSTAT(rx_macsec_pkts_badtag),
130 HW_XSTAT(rx_macsec_pkts_nosci),
131 HW_XSTAT(rx_macsec_pkts_unknownsci),
132 HW_XSTAT(rx_macsec_octets_decrypted),
133 HW_XSTAT(rx_macsec_octets_validated),
134 HW_XSTAT(rx_macsec_sc_pkts_unchecked),
135 HW_XSTAT(rx_macsec_sc_pkts_delayed),
136 HW_XSTAT(rx_macsec_sc_pkts_late),
137 HW_XSTAT(rx_macsec_sa_pkts_ok),
138 HW_XSTAT(rx_macsec_sa_pkts_invalid),
139 HW_XSTAT(rx_macsec_sa_pkts_notvalid),
140 HW_XSTAT(rx_macsec_sa_pkts_unusedsa),
141 HW_XSTAT(rx_macsec_sa_pkts_notusingsa),
144 HW_XSTAT(rx_size_64_packets),
145 HW_XSTAT(rx_size_65_to_127_packets),
146 HW_XSTAT(rx_size_128_to_255_packets),
147 HW_XSTAT(rx_size_256_to_511_packets),
148 HW_XSTAT(rx_size_512_to_1023_packets),
149 HW_XSTAT(rx_size_1024_to_max_packets),
150 HW_XSTAT(tx_size_64_packets),
151 HW_XSTAT(tx_size_65_to_127_packets),
152 HW_XSTAT(tx_size_128_to_255_packets),
153 HW_XSTAT(tx_size_256_to_511_packets),
154 HW_XSTAT(tx_size_512_to_1023_packets),
155 HW_XSTAT(tx_size_1024_to_max_packets),
158 HW_XSTAT(tx_xon_packets),
159 HW_XSTAT(rx_xon_packets),
160 HW_XSTAT(tx_xoff_packets),
161 HW_XSTAT(rx_xoff_packets),
163 HW_XSTAT_NAME(tx_xon_packets, "tx_flow_control_xon_packets"),
164 HW_XSTAT_NAME(rx_xon_packets, "rx_flow_control_xon_packets"),
165 HW_XSTAT_NAME(tx_xoff_packets, "tx_flow_control_xoff_packets"),
166 HW_XSTAT_NAME(rx_xoff_packets, "rx_flow_control_xoff_packets"),
169 #define NGBE_NB_HW_STATS (sizeof(rte_ngbe_stats_strings) / \
170 sizeof(rte_ngbe_stats_strings[0]))
172 /* Per-queue statistics */
173 #define QP_XSTAT(m) {#m, offsetof(struct ngbe_hw_stats, qp[0].m)}
174 static const struct rte_ngbe_xstats_name_off rte_ngbe_qp_strings[] = {
175 QP_XSTAT(rx_qp_packets),
176 QP_XSTAT(tx_qp_packets),
177 QP_XSTAT(rx_qp_bytes),
178 QP_XSTAT(tx_qp_bytes),
179 QP_XSTAT(rx_qp_mc_packets),
182 #define NGBE_NB_QP_STATS (sizeof(rte_ngbe_qp_strings) / \
183 sizeof(rte_ngbe_qp_strings[0]))
185 static inline int32_t
186 ngbe_pf_reset_hw(struct ngbe_hw *hw)
191 status = hw->mac.reset_hw(hw);
193 ctrl_ext = rd32(hw, NGBE_PORTCTL);
194 /* Set PF Reset Done bit so PF/VF Mail Ops can work */
195 ctrl_ext |= NGBE_PORTCTL_RSTDONE;
196 wr32(hw, NGBE_PORTCTL, ctrl_ext);
199 if (status == NGBE_ERR_SFP_NOT_PRESENT)
205 ngbe_enable_intr(struct rte_eth_dev *dev)
207 struct ngbe_interrupt *intr = ngbe_dev_intr(dev);
208 struct ngbe_hw *hw = ngbe_dev_hw(dev);
210 wr32(hw, NGBE_IENMISC, intr->mask_misc);
211 wr32(hw, NGBE_IMC(0), intr->mask & BIT_MASK32);
216 ngbe_disable_intr(struct ngbe_hw *hw)
218 PMD_INIT_FUNC_TRACE();
220 wr32(hw, NGBE_IMS(0), NGBE_IMS_MASK);
225 * Ensure that all locks are released before first NVM or PHY access
228 ngbe_swfw_lock_reset(struct ngbe_hw *hw)
233 * These ones are more tricky since they are common to all ports; but
234 * swfw_sync retries last long enough (1s) to be almost sure that if
235 * lock can not be taken it is due to an improper lock of the
238 mask = NGBE_MNGSEM_SWPHY |
241 if (hw->mac.acquire_swfw_sync(hw, mask) < 0)
242 PMD_DRV_LOG(DEBUG, "SWFW common locks released");
244 hw->mac.release_swfw_sync(hw, mask);
248 eth_ngbe_dev_init(struct rte_eth_dev *eth_dev, void *init_params __rte_unused)
250 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
251 struct ngbe_hw *hw = ngbe_dev_hw(eth_dev);
252 struct ngbe_vfta *shadow_vfta = NGBE_DEV_VFTA(eth_dev);
253 struct ngbe_hwstrip *hwstrip = NGBE_DEV_HWSTRIP(eth_dev);
254 struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
255 const struct rte_memzone *mz;
259 PMD_INIT_FUNC_TRACE();
261 eth_dev->dev_ops = &ngbe_eth_dev_ops;
262 eth_dev->rx_pkt_burst = &ngbe_recv_pkts;
263 eth_dev->tx_pkt_burst = &ngbe_xmit_pkts;
264 eth_dev->tx_pkt_prepare = &ngbe_prep_pkts;
267 * For secondary processes, we don't initialise any further as primary
268 * has already done this work. Only check we don't need a different
269 * Rx and Tx function.
271 if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
272 struct ngbe_tx_queue *txq;
273 /* Tx queue function in primary, set by last queue initialized
274 * Tx queue may not initialized by primary process
276 if (eth_dev->data->tx_queues) {
277 uint16_t nb_tx_queues = eth_dev->data->nb_tx_queues;
278 txq = eth_dev->data->tx_queues[nb_tx_queues - 1];
279 ngbe_set_tx_function(eth_dev, txq);
281 /* Use default Tx function if we get here */
283 "No Tx queues configured yet. Using default Tx function.");
286 ngbe_set_rx_function(eth_dev);
291 rte_eth_copy_pci_info(eth_dev, pci_dev);
292 eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
294 /* Vendor and Device ID need to be set before init of shared code */
295 hw->device_id = pci_dev->id.device_id;
296 hw->vendor_id = pci_dev->id.vendor_id;
297 hw->sub_system_id = pci_dev->id.subsystem_device_id;
298 ngbe_map_device_id(hw);
299 hw->hw_addr = (void *)pci_dev->mem_resource[0].addr;
301 /* Reserve memory for interrupt status block */
302 mz = rte_eth_dma_zone_reserve(eth_dev, "ngbe_driver", -1,
303 NGBE_ISB_SIZE, NGBE_ALIGN, SOCKET_ID_ANY);
307 hw->isb_dma = TMZ_PADDR(mz);
308 hw->isb_mem = TMZ_VADDR(mz);
310 /* Initialize the shared code (base driver) */
311 err = ngbe_init_shared_code(hw);
313 PMD_INIT_LOG(ERR, "Shared code init failed: %d", err);
317 /* Unlock any pending hardware semaphore */
318 ngbe_swfw_lock_reset(hw);
320 err = hw->rom.init_params(hw);
322 PMD_INIT_LOG(ERR, "The EEPROM init failed: %d", err);
326 /* Make sure we have a good EEPROM before we read from it */
327 err = hw->rom.validate_checksum(hw, NULL);
329 PMD_INIT_LOG(ERR, "The EEPROM checksum is not valid: %d", err);
333 err = hw->mac.init_hw(hw);
335 PMD_INIT_LOG(ERR, "Hardware Initialization Failure: %d", err);
339 /* Reset the hw statistics */
340 ngbe_dev_stats_reset(eth_dev);
342 /* disable interrupt */
343 ngbe_disable_intr(hw);
345 /* Allocate memory for storing MAC addresses */
346 eth_dev->data->mac_addrs = rte_zmalloc("ngbe", RTE_ETHER_ADDR_LEN *
347 hw->mac.num_rar_entries, 0);
348 if (eth_dev->data->mac_addrs == NULL) {
350 "Failed to allocate %u bytes needed to store MAC addresses",
351 RTE_ETHER_ADDR_LEN * hw->mac.num_rar_entries);
355 /* Copy the permanent MAC address */
356 rte_ether_addr_copy((struct rte_ether_addr *)hw->mac.perm_addr,
357 ð_dev->data->mac_addrs[0]);
359 /* Allocate memory for storing hash filter MAC addresses */
360 eth_dev->data->hash_mac_addrs = rte_zmalloc("ngbe",
361 RTE_ETHER_ADDR_LEN * NGBE_VMDQ_NUM_UC_MAC, 0);
362 if (eth_dev->data->hash_mac_addrs == NULL) {
364 "Failed to allocate %d bytes needed to store MAC addresses",
365 RTE_ETHER_ADDR_LEN * NGBE_VMDQ_NUM_UC_MAC);
366 rte_free(eth_dev->data->mac_addrs);
367 eth_dev->data->mac_addrs = NULL;
371 /* initialize the vfta */
372 memset(shadow_vfta, 0, sizeof(*shadow_vfta));
374 /* initialize the hw strip bitmap*/
375 memset(hwstrip, 0, sizeof(*hwstrip));
377 /* initialize PF if max_vfs not zero */
378 ret = ngbe_pf_host_init(eth_dev);
380 rte_free(eth_dev->data->mac_addrs);
381 eth_dev->data->mac_addrs = NULL;
382 rte_free(eth_dev->data->hash_mac_addrs);
383 eth_dev->data->hash_mac_addrs = NULL;
387 ctrl_ext = rd32(hw, NGBE_PORTCTL);
388 /* let hardware know driver is loaded */
389 ctrl_ext |= NGBE_PORTCTL_DRVLOAD;
390 /* Set PF Reset Done bit so PF/VF Mail Ops can work */
391 ctrl_ext |= NGBE_PORTCTL_RSTDONE;
392 wr32(hw, NGBE_PORTCTL, ctrl_ext);
395 PMD_INIT_LOG(DEBUG, "MAC: %d, PHY: %d",
396 (int)hw->mac.type, (int)hw->phy.type);
398 PMD_INIT_LOG(DEBUG, "port %d vendorID=0x%x deviceID=0x%x",
399 eth_dev->data->port_id, pci_dev->id.vendor_id,
400 pci_dev->id.device_id);
402 rte_intr_callback_register(intr_handle,
403 ngbe_dev_interrupt_handler, eth_dev);
405 /* enable uio/vfio intr/eventfd mapping */
406 rte_intr_enable(intr_handle);
408 /* enable support intr */
409 ngbe_enable_intr(eth_dev);
415 eth_ngbe_dev_uninit(struct rte_eth_dev *eth_dev)
417 PMD_INIT_FUNC_TRACE();
419 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
422 ngbe_dev_close(eth_dev);
428 eth_ngbe_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
429 struct rte_pci_device *pci_dev)
431 return rte_eth_dev_create(&pci_dev->device, pci_dev->device.name,
432 sizeof(struct ngbe_adapter),
433 eth_dev_pci_specific_init, pci_dev,
434 eth_ngbe_dev_init, NULL);
437 static int eth_ngbe_pci_remove(struct rte_pci_device *pci_dev)
439 struct rte_eth_dev *ethdev;
441 ethdev = rte_eth_dev_allocated(pci_dev->device.name);
445 return rte_eth_dev_destroy(ethdev, eth_ngbe_dev_uninit);
448 static struct rte_pci_driver rte_ngbe_pmd = {
449 .id_table = pci_id_ngbe_map,
450 .drv_flags = RTE_PCI_DRV_NEED_MAPPING |
451 RTE_PCI_DRV_INTR_LSC,
452 .probe = eth_ngbe_pci_probe,
453 .remove = eth_ngbe_pci_remove,
457 ngbe_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
459 struct ngbe_hw *hw = ngbe_dev_hw(dev);
460 struct ngbe_vfta *shadow_vfta = NGBE_DEV_VFTA(dev);
465 vid_idx = (uint32_t)((vlan_id >> 5) & 0x7F);
466 vid_bit = (uint32_t)(1 << (vlan_id & 0x1F));
467 vfta = rd32(hw, NGBE_VLANTBL(vid_idx));
472 wr32(hw, NGBE_VLANTBL(vid_idx), vfta);
474 /* update local VFTA copy */
475 shadow_vfta->vfta[vid_idx] = vfta;
481 ngbe_vlan_strip_queue_set(struct rte_eth_dev *dev, uint16_t queue, int on)
483 struct ngbe_hw *hw = ngbe_dev_hw(dev);
484 struct ngbe_rx_queue *rxq;
486 uint32_t rxcfg, rxbal, rxbah;
489 ngbe_vlan_hw_strip_enable(dev, queue);
491 ngbe_vlan_hw_strip_disable(dev, queue);
493 rxq = dev->data->rx_queues[queue];
494 rxbal = rd32(hw, NGBE_RXBAL(rxq->reg_idx));
495 rxbah = rd32(hw, NGBE_RXBAH(rxq->reg_idx));
496 rxcfg = rd32(hw, NGBE_RXCFG(rxq->reg_idx));
497 if (rxq->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP) {
498 restart = (rxcfg & NGBE_RXCFG_ENA) &&
499 !(rxcfg & NGBE_RXCFG_VLAN);
500 rxcfg |= NGBE_RXCFG_VLAN;
502 restart = (rxcfg & NGBE_RXCFG_ENA) &&
503 (rxcfg & NGBE_RXCFG_VLAN);
504 rxcfg &= ~NGBE_RXCFG_VLAN;
506 rxcfg &= ~NGBE_RXCFG_ENA;
509 /* set vlan strip for ring */
510 ngbe_dev_rx_queue_stop(dev, queue);
511 wr32(hw, NGBE_RXBAL(rxq->reg_idx), rxbal);
512 wr32(hw, NGBE_RXBAH(rxq->reg_idx), rxbah);
513 wr32(hw, NGBE_RXCFG(rxq->reg_idx), rxcfg);
514 ngbe_dev_rx_queue_start(dev, queue);
519 ngbe_vlan_tpid_set(struct rte_eth_dev *dev,
520 enum rte_vlan_type vlan_type,
523 struct ngbe_hw *hw = ngbe_dev_hw(dev);
525 uint32_t portctrl, vlan_ext, qinq;
527 portctrl = rd32(hw, NGBE_PORTCTL);
529 vlan_ext = (portctrl & NGBE_PORTCTL_VLANEXT);
530 qinq = vlan_ext && (portctrl & NGBE_PORTCTL_QINQ);
532 case RTE_ETH_VLAN_TYPE_INNER:
534 wr32m(hw, NGBE_VLANCTL,
535 NGBE_VLANCTL_TPID_MASK,
536 NGBE_VLANCTL_TPID(tpid));
537 wr32m(hw, NGBE_DMATXCTRL,
538 NGBE_DMATXCTRL_TPID_MASK,
539 NGBE_DMATXCTRL_TPID(tpid));
543 "Inner type is not supported by single VLAN");
547 wr32m(hw, NGBE_TAGTPID(0),
548 NGBE_TAGTPID_LSB_MASK,
549 NGBE_TAGTPID_LSB(tpid));
552 case RTE_ETH_VLAN_TYPE_OUTER:
554 /* Only the high 16-bits is valid */
555 wr32m(hw, NGBE_EXTAG,
556 NGBE_EXTAG_VLAN_MASK,
557 NGBE_EXTAG_VLAN(tpid));
559 wr32m(hw, NGBE_VLANCTL,
560 NGBE_VLANCTL_TPID_MASK,
561 NGBE_VLANCTL_TPID(tpid));
562 wr32m(hw, NGBE_DMATXCTRL,
563 NGBE_DMATXCTRL_TPID_MASK,
564 NGBE_DMATXCTRL_TPID(tpid));
568 wr32m(hw, NGBE_TAGTPID(0),
569 NGBE_TAGTPID_MSB_MASK,
570 NGBE_TAGTPID_MSB(tpid));
574 PMD_DRV_LOG(ERR, "Unsupported VLAN type %d", vlan_type);
582 ngbe_vlan_hw_filter_disable(struct rte_eth_dev *dev)
584 struct ngbe_hw *hw = ngbe_dev_hw(dev);
587 PMD_INIT_FUNC_TRACE();
589 /* Filter Table Disable */
590 vlnctrl = rd32(hw, NGBE_VLANCTL);
591 vlnctrl &= ~NGBE_VLANCTL_VFE;
592 wr32(hw, NGBE_VLANCTL, vlnctrl);
596 ngbe_vlan_hw_filter_enable(struct rte_eth_dev *dev)
598 struct ngbe_hw *hw = ngbe_dev_hw(dev);
599 struct ngbe_vfta *shadow_vfta = NGBE_DEV_VFTA(dev);
603 PMD_INIT_FUNC_TRACE();
605 /* Filter Table Enable */
606 vlnctrl = rd32(hw, NGBE_VLANCTL);
607 vlnctrl &= ~NGBE_VLANCTL_CFIENA;
608 vlnctrl |= NGBE_VLANCTL_VFE;
609 wr32(hw, NGBE_VLANCTL, vlnctrl);
611 /* write whatever is in local vfta copy */
612 for (i = 0; i < NGBE_VFTA_SIZE; i++)
613 wr32(hw, NGBE_VLANTBL(i), shadow_vfta->vfta[i]);
617 ngbe_vlan_hw_strip_bitmap_set(struct rte_eth_dev *dev, uint16_t queue, bool on)
619 struct ngbe_hwstrip *hwstrip = NGBE_DEV_HWSTRIP(dev);
620 struct ngbe_rx_queue *rxq;
622 if (queue >= NGBE_MAX_RX_QUEUE_NUM)
626 NGBE_SET_HWSTRIP(hwstrip, queue);
628 NGBE_CLEAR_HWSTRIP(hwstrip, queue);
630 if (queue >= dev->data->nb_rx_queues)
633 rxq = dev->data->rx_queues[queue];
636 rxq->vlan_flags = RTE_MBUF_F_RX_VLAN | RTE_MBUF_F_RX_VLAN_STRIPPED;
637 rxq->offloads |= RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
639 rxq->vlan_flags = RTE_MBUF_F_RX_VLAN;
640 rxq->offloads &= ~RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
645 ngbe_vlan_hw_strip_disable(struct rte_eth_dev *dev, uint16_t queue)
647 struct ngbe_hw *hw = ngbe_dev_hw(dev);
650 PMD_INIT_FUNC_TRACE();
652 ctrl = rd32(hw, NGBE_RXCFG(queue));
653 ctrl &= ~NGBE_RXCFG_VLAN;
654 wr32(hw, NGBE_RXCFG(queue), ctrl);
656 /* record those setting for HW strip per queue */
657 ngbe_vlan_hw_strip_bitmap_set(dev, queue, 0);
661 ngbe_vlan_hw_strip_enable(struct rte_eth_dev *dev, uint16_t queue)
663 struct ngbe_hw *hw = ngbe_dev_hw(dev);
666 PMD_INIT_FUNC_TRACE();
668 ctrl = rd32(hw, NGBE_RXCFG(queue));
669 ctrl |= NGBE_RXCFG_VLAN;
670 wr32(hw, NGBE_RXCFG(queue), ctrl);
672 /* record those setting for HW strip per queue */
673 ngbe_vlan_hw_strip_bitmap_set(dev, queue, 1);
677 ngbe_vlan_hw_extend_disable(struct rte_eth_dev *dev)
679 struct ngbe_hw *hw = ngbe_dev_hw(dev);
682 PMD_INIT_FUNC_TRACE();
684 ctrl = rd32(hw, NGBE_PORTCTL);
685 ctrl &= ~NGBE_PORTCTL_VLANEXT;
686 ctrl &= ~NGBE_PORTCTL_QINQ;
687 wr32(hw, NGBE_PORTCTL, ctrl);
691 ngbe_vlan_hw_extend_enable(struct rte_eth_dev *dev)
693 struct ngbe_hw *hw = ngbe_dev_hw(dev);
696 PMD_INIT_FUNC_TRACE();
698 ctrl = rd32(hw, NGBE_PORTCTL);
699 ctrl |= NGBE_PORTCTL_VLANEXT | NGBE_PORTCTL_QINQ;
700 wr32(hw, NGBE_PORTCTL, ctrl);
704 ngbe_qinq_hw_strip_disable(struct rte_eth_dev *dev)
706 struct ngbe_hw *hw = ngbe_dev_hw(dev);
709 PMD_INIT_FUNC_TRACE();
711 ctrl = rd32(hw, NGBE_PORTCTL);
712 ctrl &= ~NGBE_PORTCTL_QINQ;
713 wr32(hw, NGBE_PORTCTL, ctrl);
717 ngbe_qinq_hw_strip_enable(struct rte_eth_dev *dev)
719 struct ngbe_hw *hw = ngbe_dev_hw(dev);
722 PMD_INIT_FUNC_TRACE();
724 ctrl = rd32(hw, NGBE_PORTCTL);
725 ctrl |= NGBE_PORTCTL_QINQ | NGBE_PORTCTL_VLANEXT;
726 wr32(hw, NGBE_PORTCTL, ctrl);
730 ngbe_vlan_hw_strip_config(struct rte_eth_dev *dev)
732 struct ngbe_rx_queue *rxq;
735 PMD_INIT_FUNC_TRACE();
737 for (i = 0; i < dev->data->nb_rx_queues; i++) {
738 rxq = dev->data->rx_queues[i];
740 if (rxq->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
741 ngbe_vlan_hw_strip_enable(dev, i);
743 ngbe_vlan_hw_strip_disable(dev, i);
748 ngbe_config_vlan_strip_on_all_queues(struct rte_eth_dev *dev, int mask)
751 struct rte_eth_rxmode *rxmode;
752 struct ngbe_rx_queue *rxq;
754 if (mask & RTE_ETH_VLAN_STRIP_MASK) {
755 rxmode = &dev->data->dev_conf.rxmode;
756 if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
757 for (i = 0; i < dev->data->nb_rx_queues; i++) {
758 rxq = dev->data->rx_queues[i];
759 rxq->offloads |= RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
762 for (i = 0; i < dev->data->nb_rx_queues; i++) {
763 rxq = dev->data->rx_queues[i];
764 rxq->offloads &= ~RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
770 ngbe_vlan_offload_config(struct rte_eth_dev *dev, int mask)
772 struct rte_eth_rxmode *rxmode;
773 rxmode = &dev->data->dev_conf.rxmode;
775 if (mask & RTE_ETH_VLAN_STRIP_MASK)
776 ngbe_vlan_hw_strip_config(dev);
778 if (mask & RTE_ETH_VLAN_FILTER_MASK) {
779 if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER)
780 ngbe_vlan_hw_filter_enable(dev);
782 ngbe_vlan_hw_filter_disable(dev);
785 if (mask & RTE_ETH_VLAN_EXTEND_MASK) {
786 if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_EXTEND)
787 ngbe_vlan_hw_extend_enable(dev);
789 ngbe_vlan_hw_extend_disable(dev);
792 if (mask & RTE_ETH_QINQ_STRIP_MASK) {
793 if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_QINQ_STRIP)
794 ngbe_qinq_hw_strip_enable(dev);
796 ngbe_qinq_hw_strip_disable(dev);
803 ngbe_vlan_offload_set(struct rte_eth_dev *dev, int mask)
805 ngbe_config_vlan_strip_on_all_queues(dev, mask);
807 ngbe_vlan_offload_config(dev, mask);
813 ngbe_dev_configure(struct rte_eth_dev *dev)
815 struct ngbe_interrupt *intr = ngbe_dev_intr(dev);
816 struct ngbe_adapter *adapter = ngbe_dev_adapter(dev);
818 PMD_INIT_FUNC_TRACE();
820 if (dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG)
821 dev->data->dev_conf.rxmode.offloads |= RTE_ETH_RX_OFFLOAD_RSS_HASH;
823 /* set flag to update link status after init */
824 intr->flags |= NGBE_FLAG_NEED_LINK_UPDATE;
827 * Initialize to TRUE. If any of Rx queues doesn't meet the bulk
828 * allocation Rx preconditions we will reset it.
830 adapter->rx_bulk_alloc_allowed = true;
836 ngbe_dev_phy_intr_setup(struct rte_eth_dev *dev)
838 struct ngbe_hw *hw = ngbe_dev_hw(dev);
839 struct ngbe_interrupt *intr = ngbe_dev_intr(dev);
841 wr32(hw, NGBE_GPIODIR, NGBE_GPIODIR_DDR(1));
842 wr32(hw, NGBE_GPIOINTEN, NGBE_GPIOINTEN_INT(3));
843 wr32(hw, NGBE_GPIOINTTYPE, NGBE_GPIOINTTYPE_LEVEL(0));
844 if (hw->phy.type == ngbe_phy_yt8521s_sfi)
845 wr32(hw, NGBE_GPIOINTPOL, NGBE_GPIOINTPOL_ACT(0));
847 wr32(hw, NGBE_GPIOINTPOL, NGBE_GPIOINTPOL_ACT(3));
849 intr->mask_misc |= NGBE_ICRMISC_GPIO;
853 * Configure device link speed and setup link.
854 * It returns 0 on success.
857 ngbe_dev_start(struct rte_eth_dev *dev)
859 struct ngbe_hw *hw = ngbe_dev_hw(dev);
860 struct ngbe_hw_stats *hw_stats = NGBE_DEV_STATS(dev);
861 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
862 struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
863 uint32_t intr_vector = 0;
865 bool link_up = false, negotiate = false;
867 uint32_t allowed_speeds = 0;
870 uint32_t *link_speeds;
872 PMD_INIT_FUNC_TRACE();
874 /* disable uio/vfio intr/eventfd mapping */
875 rte_intr_disable(intr_handle);
878 hw->adapter_stopped = 0;
881 /* reinitialize adapter, this calls reset and start */
882 hw->nb_rx_queues = dev->data->nb_rx_queues;
883 hw->nb_tx_queues = dev->data->nb_tx_queues;
884 status = ngbe_pf_reset_hw(hw);
887 hw->mac.start_hw(hw);
888 hw->mac.get_link_status = true;
890 /* configure PF module if SRIOV enabled */
891 ngbe_pf_host_configure(dev);
893 ngbe_dev_phy_intr_setup(dev);
895 /* check and configure queue intr-vector mapping */
896 if ((rte_intr_cap_multiple(intr_handle) ||
897 !RTE_ETH_DEV_SRIOV(dev).active) &&
898 dev->data->dev_conf.intr_conf.rxq != 0) {
899 intr_vector = dev->data->nb_rx_queues;
900 if (rte_intr_efd_enable(intr_handle, intr_vector))
904 if (rte_intr_dp_is_en(intr_handle)) {
905 if (rte_intr_vec_list_alloc(intr_handle, "intr_vec",
906 dev->data->nb_rx_queues)) {
908 "Failed to allocate %d rx_queues intr_vec",
909 dev->data->nb_rx_queues);
914 /* confiugre MSI-X for sleep until Rx interrupt */
915 ngbe_configure_msix(dev);
917 /* initialize transmission unit */
918 ngbe_dev_tx_init(dev);
920 /* This can fail when allocating mbufs for descriptor rings */
921 err = ngbe_dev_rx_init(dev);
923 PMD_INIT_LOG(ERR, "Unable to initialize Rx hardware");
927 mask = RTE_ETH_VLAN_STRIP_MASK | RTE_ETH_VLAN_FILTER_MASK |
928 RTE_ETH_VLAN_EXTEND_MASK;
929 err = ngbe_vlan_offload_config(dev, mask);
931 PMD_INIT_LOG(ERR, "Unable to set VLAN offload");
935 ngbe_configure_port(dev);
937 err = ngbe_dev_rxtx_start(dev);
939 PMD_INIT_LOG(ERR, "Unable to start rxtx queues");
943 /* Skip link setup if loopback mode is enabled. */
944 if (hw->is_pf && dev->data->dev_conf.lpbk_mode)
945 goto skip_link_setup;
947 err = hw->mac.check_link(hw, &speed, &link_up, 0);
950 dev->data->dev_link.link_status = link_up;
952 link_speeds = &dev->data->dev_conf.link_speeds;
953 if (*link_speeds == RTE_ETH_LINK_SPEED_AUTONEG)
956 err = hw->mac.get_link_capabilities(hw, &speed, &negotiate);
961 if (hw->mac.default_speeds & NGBE_LINK_SPEED_1GB_FULL)
962 allowed_speeds |= RTE_ETH_LINK_SPEED_1G;
963 if (hw->mac.default_speeds & NGBE_LINK_SPEED_100M_FULL)
964 allowed_speeds |= RTE_ETH_LINK_SPEED_100M;
965 if (hw->mac.default_speeds & NGBE_LINK_SPEED_10M_FULL)
966 allowed_speeds |= RTE_ETH_LINK_SPEED_10M;
968 if (*link_speeds & ~allowed_speeds) {
969 PMD_INIT_LOG(ERR, "Invalid link setting");
974 if (*link_speeds == RTE_ETH_LINK_SPEED_AUTONEG) {
975 speed = hw->mac.default_speeds;
977 if (*link_speeds & RTE_ETH_LINK_SPEED_1G)
978 speed |= NGBE_LINK_SPEED_1GB_FULL;
979 if (*link_speeds & RTE_ETH_LINK_SPEED_100M)
980 speed |= NGBE_LINK_SPEED_100M_FULL;
981 if (*link_speeds & RTE_ETH_LINK_SPEED_10M)
982 speed |= NGBE_LINK_SPEED_10M_FULL;
986 err = hw->mac.setup_link(hw, speed, link_up);
992 if (rte_intr_allow_others(intr_handle)) {
993 ngbe_dev_misc_interrupt_setup(dev);
994 /* check if lsc interrupt is enabled */
995 if (dev->data->dev_conf.intr_conf.lsc != 0)
996 ngbe_dev_lsc_interrupt_setup(dev, TRUE);
998 ngbe_dev_lsc_interrupt_setup(dev, FALSE);
999 ngbe_dev_macsec_interrupt_setup(dev);
1000 ngbe_set_ivar_map(hw, -1, 1, NGBE_MISC_VEC_ID);
1002 rte_intr_callback_unregister(intr_handle,
1003 ngbe_dev_interrupt_handler, dev);
1004 if (dev->data->dev_conf.intr_conf.lsc != 0)
1006 "LSC won't enable because of no intr multiplex");
1009 /* check if rxq interrupt is enabled */
1010 if (dev->data->dev_conf.intr_conf.rxq != 0 &&
1011 rte_intr_dp_is_en(intr_handle))
1012 ngbe_dev_rxq_interrupt_setup(dev);
1014 /* enable UIO/VFIO intr/eventfd mapping */
1015 rte_intr_enable(intr_handle);
1017 /* resume enabled intr since HW reset */
1018 ngbe_enable_intr(dev);
1020 if ((hw->sub_system_id & NGBE_OEM_MASK) == NGBE_LY_M88E1512_SFP ||
1021 (hw->sub_system_id & NGBE_OEM_MASK) == NGBE_LY_YT8521S_SFP) {
1022 /* gpio0 is used to power on/off control*/
1023 wr32(hw, NGBE_GPIODATA, 0);
1027 * Update link status right before return, because it may
1028 * start link configuration process in a separate thread.
1030 ngbe_dev_link_update(dev, 0);
1032 ngbe_read_stats_registers(hw, hw_stats);
1033 hw->offset_loaded = 1;
1038 PMD_INIT_LOG(ERR, "failure in dev start: %d", err);
1039 ngbe_dev_clear_queues(dev);
1044 * Stop device: disable rx and tx functions to allow for reconfiguring.
1047 ngbe_dev_stop(struct rte_eth_dev *dev)
1049 struct rte_eth_link link;
1050 struct ngbe_adapter *adapter = ngbe_dev_adapter(dev);
1051 struct ngbe_hw *hw = ngbe_dev_hw(dev);
1052 struct ngbe_vf_info *vfinfo = *NGBE_DEV_VFDATA(dev);
1053 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1054 struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
1057 if (hw->adapter_stopped)
1060 PMD_INIT_FUNC_TRACE();
1062 if ((hw->sub_system_id & NGBE_OEM_MASK) == NGBE_LY_M88E1512_SFP ||
1063 (hw->sub_system_id & NGBE_OEM_MASK) == NGBE_LY_YT8521S_SFP) {
1064 /* gpio0 is used to power on/off control*/
1065 wr32(hw, NGBE_GPIODATA, NGBE_GPIOBIT_0);
1068 /* disable interrupts */
1069 ngbe_disable_intr(hw);
1072 ngbe_pf_reset_hw(hw);
1073 hw->adapter_stopped = 0;
1078 for (vf = 0; vfinfo != NULL && vf < pci_dev->max_vfs; vf++)
1079 vfinfo[vf].clear_to_send = false;
1081 ngbe_dev_clear_queues(dev);
1083 /* Clear stored conf */
1084 dev->data->scattered_rx = 0;
1086 /* Clear recorded link status */
1087 memset(&link, 0, sizeof(link));
1088 rte_eth_linkstatus_set(dev, &link);
1090 if (!rte_intr_allow_others(intr_handle))
1091 /* resume to the default handler */
1092 rte_intr_callback_register(intr_handle,
1093 ngbe_dev_interrupt_handler,
1096 /* Clean datapath event and queue/vec mapping */
1097 rte_intr_efd_disable(intr_handle);
1098 rte_intr_vec_list_free(intr_handle);
1100 adapter->rss_reta_updated = 0;
1102 hw->adapter_stopped = true;
1103 dev->data->dev_started = 0;
1109 * Reset and stop device.
1112 ngbe_dev_close(struct rte_eth_dev *dev)
1114 struct ngbe_hw *hw = ngbe_dev_hw(dev);
1115 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1116 struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
1120 PMD_INIT_FUNC_TRACE();
1122 ngbe_pf_reset_hw(hw);
1126 ngbe_dev_free_queues(dev);
1128 /* reprogram the RAR[0] in case user changed it. */
1129 ngbe_set_rar(hw, 0, hw->mac.addr, 0, true);
1131 /* Unlock any pending hardware semaphore */
1132 ngbe_swfw_lock_reset(hw);
1134 /* disable uio intr before callback unregister */
1135 rte_intr_disable(intr_handle);
1138 ret = rte_intr_callback_unregister(intr_handle,
1139 ngbe_dev_interrupt_handler, dev);
1140 if (ret >= 0 || ret == -ENOENT) {
1142 } else if (ret != -EAGAIN) {
1144 "intr callback unregister failed: %d",
1148 } while (retries++ < (10 + NGBE_LINK_UP_TIME));
1150 /* uninitialize PF if max_vfs not zero */
1151 ngbe_pf_host_uninit(dev);
1153 rte_free(dev->data->mac_addrs);
1154 dev->data->mac_addrs = NULL;
1156 rte_free(dev->data->hash_mac_addrs);
1157 dev->data->hash_mac_addrs = NULL;
1166 ngbe_dev_reset(struct rte_eth_dev *dev)
1170 /* When a DPDK PMD PF begin to reset PF port, it should notify all
1171 * its VF to make them align with it. The detailed notification
1172 * mechanism is PMD specific. As to ngbe PF, it is rather complex.
1173 * To avoid unexpected behavior in VF, currently reset of PF with
1174 * SR-IOV activation is not supported. It might be supported later.
1176 if (dev->data->sriov.active)
1179 ret = eth_ngbe_dev_uninit(dev);
1183 ret = eth_ngbe_dev_init(dev, NULL);
1188 #define UPDATE_QP_COUNTER_32bit(reg, last_counter, counter) \
1190 uint32_t current_counter = rd32(hw, reg); \
1191 if (current_counter < last_counter) \
1192 current_counter += 0x100000000LL; \
1193 if (!hw->offset_loaded) \
1194 last_counter = current_counter; \
1195 counter = current_counter - last_counter; \
1196 counter &= 0xFFFFFFFFLL; \
1199 #define UPDATE_QP_COUNTER_36bit(reg_lsb, reg_msb, last_counter, counter) \
1201 uint64_t current_counter_lsb = rd32(hw, reg_lsb); \
1202 uint64_t current_counter_msb = rd32(hw, reg_msb); \
1203 uint64_t current_counter = (current_counter_msb << 32) | \
1204 current_counter_lsb; \
1205 if (current_counter < last_counter) \
1206 current_counter += 0x1000000000LL; \
1207 if (!hw->offset_loaded) \
1208 last_counter = current_counter; \
1209 counter = current_counter - last_counter; \
1210 counter &= 0xFFFFFFFFFLL; \
1214 ngbe_read_stats_registers(struct ngbe_hw *hw,
1215 struct ngbe_hw_stats *hw_stats)
1220 for (i = 0; i < hw->nb_rx_queues; i++) {
1221 UPDATE_QP_COUNTER_32bit(NGBE_QPRXPKT(i),
1222 hw->qp_last[i].rx_qp_packets,
1223 hw_stats->qp[i].rx_qp_packets);
1224 UPDATE_QP_COUNTER_36bit(NGBE_QPRXOCTL(i), NGBE_QPRXOCTH(i),
1225 hw->qp_last[i].rx_qp_bytes,
1226 hw_stats->qp[i].rx_qp_bytes);
1227 UPDATE_QP_COUNTER_32bit(NGBE_QPRXMPKT(i),
1228 hw->qp_last[i].rx_qp_mc_packets,
1229 hw_stats->qp[i].rx_qp_mc_packets);
1230 UPDATE_QP_COUNTER_32bit(NGBE_QPRXBPKT(i),
1231 hw->qp_last[i].rx_qp_bc_packets,
1232 hw_stats->qp[i].rx_qp_bc_packets);
1235 for (i = 0; i < hw->nb_tx_queues; i++) {
1236 UPDATE_QP_COUNTER_32bit(NGBE_QPTXPKT(i),
1237 hw->qp_last[i].tx_qp_packets,
1238 hw_stats->qp[i].tx_qp_packets);
1239 UPDATE_QP_COUNTER_36bit(NGBE_QPTXOCTL(i), NGBE_QPTXOCTH(i),
1240 hw->qp_last[i].tx_qp_bytes,
1241 hw_stats->qp[i].tx_qp_bytes);
1242 UPDATE_QP_COUNTER_32bit(NGBE_QPTXMPKT(i),
1243 hw->qp_last[i].tx_qp_mc_packets,
1244 hw_stats->qp[i].tx_qp_mc_packets);
1245 UPDATE_QP_COUNTER_32bit(NGBE_QPTXBPKT(i),
1246 hw->qp_last[i].tx_qp_bc_packets,
1247 hw_stats->qp[i].tx_qp_bc_packets);
1251 hw_stats->rx_up_dropped += rd32(hw, NGBE_PBRXMISS);
1252 hw_stats->rdb_pkt_cnt += rd32(hw, NGBE_PBRXPKT);
1253 hw_stats->rdb_repli_cnt += rd32(hw, NGBE_PBRXREP);
1254 hw_stats->rdb_drp_cnt += rd32(hw, NGBE_PBRXDROP);
1255 hw_stats->tx_xoff_packets += rd32(hw, NGBE_PBTXLNKXOFF);
1256 hw_stats->tx_xon_packets += rd32(hw, NGBE_PBTXLNKXON);
1258 hw_stats->rx_xon_packets += rd32(hw, NGBE_PBRXLNKXON);
1259 hw_stats->rx_xoff_packets += rd32(hw, NGBE_PBRXLNKXOFF);
1262 hw_stats->rx_drop_packets += rd32(hw, NGBE_DMARXDROP);
1263 hw_stats->tx_drop_packets += rd32(hw, NGBE_DMATXDROP);
1264 hw_stats->rx_dma_drop += rd32(hw, NGBE_DMARXDROP);
1265 hw_stats->tx_secdrp_packets += rd32(hw, NGBE_DMATXSECDROP);
1266 hw_stats->rx_packets += rd32(hw, NGBE_DMARXPKT);
1267 hw_stats->tx_packets += rd32(hw, NGBE_DMATXPKT);
1268 hw_stats->rx_bytes += rd64(hw, NGBE_DMARXOCTL);
1269 hw_stats->tx_bytes += rd64(hw, NGBE_DMATXOCTL);
1272 hw_stats->rx_crc_errors += rd64(hw, NGBE_MACRXERRCRCL);
1273 hw_stats->rx_multicast_packets += rd64(hw, NGBE_MACRXMPKTL);
1274 hw_stats->tx_multicast_packets += rd64(hw, NGBE_MACTXMPKTL);
1276 hw_stats->rx_total_packets += rd64(hw, NGBE_MACRXPKTL);
1277 hw_stats->tx_total_packets += rd64(hw, NGBE_MACTXPKTL);
1278 hw_stats->rx_total_bytes += rd64(hw, NGBE_MACRXGBOCTL);
1280 hw_stats->rx_broadcast_packets += rd64(hw, NGBE_MACRXOCTL);
1281 hw_stats->tx_broadcast_packets += rd32(hw, NGBE_MACTXOCTL);
1283 hw_stats->rx_size_64_packets += rd64(hw, NGBE_MACRX1TO64L);
1284 hw_stats->rx_size_65_to_127_packets += rd64(hw, NGBE_MACRX65TO127L);
1285 hw_stats->rx_size_128_to_255_packets += rd64(hw, NGBE_MACRX128TO255L);
1286 hw_stats->rx_size_256_to_511_packets += rd64(hw, NGBE_MACRX256TO511L);
1287 hw_stats->rx_size_512_to_1023_packets +=
1288 rd64(hw, NGBE_MACRX512TO1023L);
1289 hw_stats->rx_size_1024_to_max_packets +=
1290 rd64(hw, NGBE_MACRX1024TOMAXL);
1291 hw_stats->tx_size_64_packets += rd64(hw, NGBE_MACTX1TO64L);
1292 hw_stats->tx_size_65_to_127_packets += rd64(hw, NGBE_MACTX65TO127L);
1293 hw_stats->tx_size_128_to_255_packets += rd64(hw, NGBE_MACTX128TO255L);
1294 hw_stats->tx_size_256_to_511_packets += rd64(hw, NGBE_MACTX256TO511L);
1295 hw_stats->tx_size_512_to_1023_packets +=
1296 rd64(hw, NGBE_MACTX512TO1023L);
1297 hw_stats->tx_size_1024_to_max_packets +=
1298 rd64(hw, NGBE_MACTX1024TOMAXL);
1300 hw_stats->rx_undersize_errors += rd64(hw, NGBE_MACRXERRLENL);
1301 hw_stats->rx_oversize_errors += rd32(hw, NGBE_MACRXOVERSIZE);
1302 hw_stats->rx_jabber_errors += rd32(hw, NGBE_MACRXJABBER);
1305 hw_stats->mng_bmc2host_packets = rd32(hw, NGBE_MNGBMC2OS);
1306 hw_stats->mng_host2bmc_packets = rd32(hw, NGBE_MNGOS2BMC);
1307 hw_stats->rx_management_packets = rd32(hw, NGBE_DMARXMNG);
1308 hw_stats->tx_management_packets = rd32(hw, NGBE_DMATXMNG);
1311 hw_stats->tx_macsec_pkts_untagged += rd32(hw, NGBE_LSECTX_UTPKT);
1312 hw_stats->tx_macsec_pkts_encrypted +=
1313 rd32(hw, NGBE_LSECTX_ENCPKT);
1314 hw_stats->tx_macsec_pkts_protected +=
1315 rd32(hw, NGBE_LSECTX_PROTPKT);
1316 hw_stats->tx_macsec_octets_encrypted +=
1317 rd32(hw, NGBE_LSECTX_ENCOCT);
1318 hw_stats->tx_macsec_octets_protected +=
1319 rd32(hw, NGBE_LSECTX_PROTOCT);
1320 hw_stats->rx_macsec_pkts_untagged += rd32(hw, NGBE_LSECRX_UTPKT);
1321 hw_stats->rx_macsec_pkts_badtag += rd32(hw, NGBE_LSECRX_BTPKT);
1322 hw_stats->rx_macsec_pkts_nosci += rd32(hw, NGBE_LSECRX_NOSCIPKT);
1323 hw_stats->rx_macsec_pkts_unknownsci += rd32(hw, NGBE_LSECRX_UNSCIPKT);
1324 hw_stats->rx_macsec_octets_decrypted += rd32(hw, NGBE_LSECRX_DECOCT);
1325 hw_stats->rx_macsec_octets_validated += rd32(hw, NGBE_LSECRX_VLDOCT);
1326 hw_stats->rx_macsec_sc_pkts_unchecked +=
1327 rd32(hw, NGBE_LSECRX_UNCHKPKT);
1328 hw_stats->rx_macsec_sc_pkts_delayed += rd32(hw, NGBE_LSECRX_DLYPKT);
1329 hw_stats->rx_macsec_sc_pkts_late += rd32(hw, NGBE_LSECRX_LATEPKT);
1330 for (i = 0; i < 2; i++) {
1331 hw_stats->rx_macsec_sa_pkts_ok +=
1332 rd32(hw, NGBE_LSECRX_OKPKT(i));
1333 hw_stats->rx_macsec_sa_pkts_invalid +=
1334 rd32(hw, NGBE_LSECRX_INVPKT(i));
1335 hw_stats->rx_macsec_sa_pkts_notvalid +=
1336 rd32(hw, NGBE_LSECRX_BADPKT(i));
1338 for (i = 0; i < 4; i++) {
1339 hw_stats->rx_macsec_sa_pkts_unusedsa +=
1340 rd32(hw, NGBE_LSECRX_INVSAPKT(i));
1341 hw_stats->rx_macsec_sa_pkts_notusingsa +=
1342 rd32(hw, NGBE_LSECRX_BADSAPKT(i));
1344 hw_stats->rx_total_missed_packets =
1345 hw_stats->rx_up_dropped;
1349 ngbe_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
1351 struct ngbe_hw *hw = ngbe_dev_hw(dev);
1352 struct ngbe_hw_stats *hw_stats = NGBE_DEV_STATS(dev);
1353 struct ngbe_stat_mappings *stat_mappings =
1354 NGBE_DEV_STAT_MAPPINGS(dev);
1357 ngbe_read_stats_registers(hw, hw_stats);
1362 /* Fill out the rte_eth_stats statistics structure */
1363 stats->ipackets = hw_stats->rx_packets;
1364 stats->ibytes = hw_stats->rx_bytes;
1365 stats->opackets = hw_stats->tx_packets;
1366 stats->obytes = hw_stats->tx_bytes;
1368 memset(&stats->q_ipackets, 0, sizeof(stats->q_ipackets));
1369 memset(&stats->q_opackets, 0, sizeof(stats->q_opackets));
1370 memset(&stats->q_ibytes, 0, sizeof(stats->q_ibytes));
1371 memset(&stats->q_obytes, 0, sizeof(stats->q_obytes));
1372 memset(&stats->q_errors, 0, sizeof(stats->q_errors));
1373 for (i = 0; i < NGBE_MAX_QP; i++) {
1374 uint32_t n = i / NB_QMAP_FIELDS_PER_QSM_REG;
1375 uint32_t offset = (i % NB_QMAP_FIELDS_PER_QSM_REG) * 8;
1378 q_map = (stat_mappings->rqsm[n] >> offset)
1379 & QMAP_FIELD_RESERVED_BITS_MASK;
1380 j = (q_map < RTE_ETHDEV_QUEUE_STAT_CNTRS
1381 ? q_map : q_map % RTE_ETHDEV_QUEUE_STAT_CNTRS);
1382 stats->q_ipackets[j] += hw_stats->qp[i].rx_qp_packets;
1383 stats->q_ibytes[j] += hw_stats->qp[i].rx_qp_bytes;
1385 q_map = (stat_mappings->tqsm[n] >> offset)
1386 & QMAP_FIELD_RESERVED_BITS_MASK;
1387 j = (q_map < RTE_ETHDEV_QUEUE_STAT_CNTRS
1388 ? q_map : q_map % RTE_ETHDEV_QUEUE_STAT_CNTRS);
1389 stats->q_opackets[j] += hw_stats->qp[i].tx_qp_packets;
1390 stats->q_obytes[j] += hw_stats->qp[i].tx_qp_bytes;
1394 stats->imissed = hw_stats->rx_total_missed_packets +
1395 hw_stats->rx_dma_drop;
1396 stats->ierrors = hw_stats->rx_crc_errors +
1397 hw_stats->rx_mac_short_packet_dropped +
1398 hw_stats->rx_length_errors +
1399 hw_stats->rx_undersize_errors +
1400 hw_stats->rx_oversize_errors +
1401 hw_stats->rx_illegal_byte_errors +
1402 hw_stats->rx_error_bytes +
1403 hw_stats->rx_fragment_errors;
1411 ngbe_dev_stats_reset(struct rte_eth_dev *dev)
1413 struct ngbe_hw *hw = ngbe_dev_hw(dev);
1414 struct ngbe_hw_stats *hw_stats = NGBE_DEV_STATS(dev);
1416 /* HW registers are cleared on read */
1417 hw->offset_loaded = 0;
1418 ngbe_dev_stats_get(dev, NULL);
1419 hw->offset_loaded = 1;
1421 /* Reset software totals */
1422 memset(hw_stats, 0, sizeof(*hw_stats));
1427 /* This function calculates the number of xstats based on the current config */
1429 ngbe_xstats_calc_num(struct rte_eth_dev *dev)
1431 int nb_queues = max(dev->data->nb_rx_queues, dev->data->nb_tx_queues);
1432 return NGBE_NB_HW_STATS +
1433 NGBE_NB_QP_STATS * nb_queues;
1437 ngbe_get_name_by_id(uint32_t id, char *name, uint32_t size)
1441 /* Extended stats from ngbe_hw_stats */
1442 if (id < NGBE_NB_HW_STATS) {
1443 snprintf(name, size, "[hw]%s",
1444 rte_ngbe_stats_strings[id].name);
1447 id -= NGBE_NB_HW_STATS;
1450 if (id < NGBE_NB_QP_STATS * NGBE_MAX_QP) {
1451 nb = id / NGBE_NB_QP_STATS;
1452 st = id % NGBE_NB_QP_STATS;
1453 snprintf(name, size, "[q%u]%s", nb,
1454 rte_ngbe_qp_strings[st].name);
1457 id -= NGBE_NB_QP_STATS * NGBE_MAX_QP;
1459 return -(int)(id + 1);
1463 ngbe_get_offset_by_id(uint32_t id, uint32_t *offset)
1467 /* Extended stats from ngbe_hw_stats */
1468 if (id < NGBE_NB_HW_STATS) {
1469 *offset = rte_ngbe_stats_strings[id].offset;
1472 id -= NGBE_NB_HW_STATS;
1475 if (id < NGBE_NB_QP_STATS * NGBE_MAX_QP) {
1476 nb = id / NGBE_NB_QP_STATS;
1477 st = id % NGBE_NB_QP_STATS;
1478 *offset = rte_ngbe_qp_strings[st].offset +
1479 nb * (NGBE_NB_QP_STATS * sizeof(uint64_t));
1486 static int ngbe_dev_xstats_get_names(struct rte_eth_dev *dev,
1487 struct rte_eth_xstat_name *xstats_names, unsigned int limit)
1489 unsigned int i, count;
1491 count = ngbe_xstats_calc_num(dev);
1492 if (xstats_names == NULL)
1495 /* Note: limit >= cnt_stats checked upstream
1496 * in rte_eth_xstats_names()
1498 limit = min(limit, count);
1500 /* Extended stats from ngbe_hw_stats */
1501 for (i = 0; i < limit; i++) {
1502 if (ngbe_get_name_by_id(i, xstats_names[i].name,
1503 sizeof(xstats_names[i].name))) {
1504 PMD_INIT_LOG(WARNING, "id value %d isn't valid", i);
1512 static int ngbe_dev_xstats_get_names_by_id(struct rte_eth_dev *dev,
1513 const uint64_t *ids,
1514 struct rte_eth_xstat_name *xstats_names,
1520 return ngbe_dev_xstats_get_names(dev, xstats_names, limit);
1522 for (i = 0; i < limit; i++) {
1523 if (ngbe_get_name_by_id(ids[i], xstats_names[i].name,
1524 sizeof(xstats_names[i].name))) {
1525 PMD_INIT_LOG(WARNING, "id value %d isn't valid", i);
1534 ngbe_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
1537 struct ngbe_hw *hw = ngbe_dev_hw(dev);
1538 struct ngbe_hw_stats *hw_stats = NGBE_DEV_STATS(dev);
1539 unsigned int i, count;
1541 ngbe_read_stats_registers(hw, hw_stats);
1543 /* If this is a reset xstats is NULL, and we have cleared the
1544 * registers by reading them.
1546 count = ngbe_xstats_calc_num(dev);
1550 limit = min(limit, ngbe_xstats_calc_num(dev));
1552 /* Extended stats from ngbe_hw_stats */
1553 for (i = 0; i < limit; i++) {
1554 uint32_t offset = 0;
1556 if (ngbe_get_offset_by_id(i, &offset)) {
1557 PMD_INIT_LOG(WARNING, "id value %d isn't valid", i);
1560 xstats[i].value = *(uint64_t *)(((char *)hw_stats) + offset);
1568 ngbe_dev_xstats_get_(struct rte_eth_dev *dev, uint64_t *values,
1571 struct ngbe_hw *hw = ngbe_dev_hw(dev);
1572 struct ngbe_hw_stats *hw_stats = NGBE_DEV_STATS(dev);
1573 unsigned int i, count;
1575 ngbe_read_stats_registers(hw, hw_stats);
1577 /* If this is a reset xstats is NULL, and we have cleared the
1578 * registers by reading them.
1580 count = ngbe_xstats_calc_num(dev);
1584 limit = min(limit, ngbe_xstats_calc_num(dev));
1586 /* Extended stats from ngbe_hw_stats */
1587 for (i = 0; i < limit; i++) {
1590 if (ngbe_get_offset_by_id(i, &offset)) {
1591 PMD_INIT_LOG(WARNING, "id value %d isn't valid", i);
1594 values[i] = *(uint64_t *)(((char *)hw_stats) + offset);
1601 ngbe_dev_xstats_get_by_id(struct rte_eth_dev *dev, const uint64_t *ids,
1602 uint64_t *values, unsigned int limit)
1604 struct ngbe_hw_stats *hw_stats = NGBE_DEV_STATS(dev);
1608 return ngbe_dev_xstats_get_(dev, values, limit);
1610 for (i = 0; i < limit; i++) {
1613 if (ngbe_get_offset_by_id(ids[i], &offset)) {
1614 PMD_INIT_LOG(WARNING, "id value %d isn't valid", i);
1617 values[i] = *(uint64_t *)(((char *)hw_stats) + offset);
1624 ngbe_dev_xstats_reset(struct rte_eth_dev *dev)
1626 struct ngbe_hw *hw = ngbe_dev_hw(dev);
1627 struct ngbe_hw_stats *hw_stats = NGBE_DEV_STATS(dev);
1629 /* HW registers are cleared on read */
1630 hw->offset_loaded = 0;
1631 ngbe_read_stats_registers(hw, hw_stats);
1632 hw->offset_loaded = 1;
1634 /* Reset software totals */
1635 memset(hw_stats, 0, sizeof(*hw_stats));
1641 ngbe_fw_version_get(struct rte_eth_dev *dev, char *fw_version, size_t fw_size)
1643 struct ngbe_hw *hw = ngbe_dev_hw(dev);
1646 ret = snprintf(fw_version, fw_size, "0x%08x", hw->eeprom_id);
1651 ret += 1; /* add the size of '\0' */
1652 if (fw_size < (size_t)ret)
1659 ngbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
1661 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1662 struct ngbe_hw *hw = ngbe_dev_hw(dev);
1664 dev_info->max_rx_queues = (uint16_t)hw->mac.max_rx_queues;
1665 dev_info->max_tx_queues = (uint16_t)hw->mac.max_tx_queues;
1666 dev_info->min_rx_bufsize = 1024;
1667 dev_info->max_rx_pktlen = 15872;
1668 dev_info->max_mac_addrs = hw->mac.num_rar_entries;
1669 dev_info->max_hash_mac_addrs = NGBE_VMDQ_NUM_UC_MAC;
1670 dev_info->max_vfs = pci_dev->max_vfs;
1671 dev_info->rx_queue_offload_capa = ngbe_get_rx_queue_offloads(dev);
1672 dev_info->rx_offload_capa = (ngbe_get_rx_port_offloads(dev) |
1673 dev_info->rx_queue_offload_capa);
1674 dev_info->tx_queue_offload_capa = 0;
1675 dev_info->tx_offload_capa = ngbe_get_tx_port_offloads(dev);
1677 dev_info->default_rxconf = (struct rte_eth_rxconf) {
1679 .pthresh = NGBE_DEFAULT_RX_PTHRESH,
1680 .hthresh = NGBE_DEFAULT_RX_HTHRESH,
1681 .wthresh = NGBE_DEFAULT_RX_WTHRESH,
1683 .rx_free_thresh = NGBE_DEFAULT_RX_FREE_THRESH,
1688 dev_info->default_txconf = (struct rte_eth_txconf) {
1690 .pthresh = NGBE_DEFAULT_TX_PTHRESH,
1691 .hthresh = NGBE_DEFAULT_TX_HTHRESH,
1692 .wthresh = NGBE_DEFAULT_TX_WTHRESH,
1694 .tx_free_thresh = NGBE_DEFAULT_TX_FREE_THRESH,
1698 dev_info->rx_desc_lim = rx_desc_lim;
1699 dev_info->tx_desc_lim = tx_desc_lim;
1701 dev_info->hash_key_size = NGBE_HKEY_MAX_INDEX * sizeof(uint32_t);
1702 dev_info->reta_size = RTE_ETH_RSS_RETA_SIZE_128;
1703 dev_info->flow_type_rss_offloads = NGBE_RSS_OFFLOAD_ALL;
1705 dev_info->speed_capa = RTE_ETH_LINK_SPEED_1G | RTE_ETH_LINK_SPEED_100M |
1706 RTE_ETH_LINK_SPEED_10M;
1708 /* Driver-preferred Rx/Tx parameters */
1709 dev_info->default_rxportconf.burst_size = 32;
1710 dev_info->default_txportconf.burst_size = 32;
1711 dev_info->default_rxportconf.nb_queues = 1;
1712 dev_info->default_txportconf.nb_queues = 1;
1713 dev_info->default_rxportconf.ring_size = 256;
1714 dev_info->default_txportconf.ring_size = 256;
1720 ngbe_dev_supported_ptypes_get(struct rte_eth_dev *dev)
1722 if (dev->rx_pkt_burst == ngbe_recv_pkts ||
1723 dev->rx_pkt_burst == ngbe_recv_pkts_sc_single_alloc ||
1724 dev->rx_pkt_burst == ngbe_recv_pkts_sc_bulk_alloc ||
1725 dev->rx_pkt_burst == ngbe_recv_pkts_bulk_alloc)
1726 return ngbe_get_supported_ptypes();
1731 /* return 0 means link status changed, -1 means not changed */
1733 ngbe_dev_link_update_share(struct rte_eth_dev *dev,
1734 int wait_to_complete)
1736 struct ngbe_hw *hw = ngbe_dev_hw(dev);
1737 struct rte_eth_link link;
1738 u32 link_speed = NGBE_LINK_SPEED_UNKNOWN;
1740 struct ngbe_interrupt *intr = ngbe_dev_intr(dev);
1745 memset(&link, 0, sizeof(link));
1746 link.link_status = RTE_ETH_LINK_DOWN;
1747 link.link_speed = RTE_ETH_SPEED_NUM_NONE;
1748 link.link_duplex = RTE_ETH_LINK_HALF_DUPLEX;
1749 link.link_autoneg = !(dev->data->dev_conf.link_speeds &
1750 ~RTE_ETH_LINK_SPEED_AUTONEG);
1752 hw->mac.get_link_status = true;
1754 if (intr->flags & NGBE_FLAG_NEED_LINK_CONFIG)
1755 return rte_eth_linkstatus_set(dev, &link);
1757 /* check if it needs to wait to complete, if lsc interrupt is enabled */
1758 if (wait_to_complete == 0 || dev->data->dev_conf.intr_conf.lsc != 0)
1761 err = hw->mac.check_link(hw, &link_speed, &link_up, wait);
1763 link.link_speed = RTE_ETH_SPEED_NUM_NONE;
1764 link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
1765 return rte_eth_linkstatus_set(dev, &link);
1769 return rte_eth_linkstatus_set(dev, &link);
1771 intr->flags &= ~NGBE_FLAG_NEED_LINK_CONFIG;
1772 link.link_status = RTE_ETH_LINK_UP;
1773 link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
1775 switch (link_speed) {
1777 case NGBE_LINK_SPEED_UNKNOWN:
1778 link.link_speed = RTE_ETH_SPEED_NUM_NONE;
1781 case NGBE_LINK_SPEED_10M_FULL:
1782 link.link_speed = RTE_ETH_SPEED_NUM_10M;
1786 case NGBE_LINK_SPEED_100M_FULL:
1787 link.link_speed = RTE_ETH_SPEED_NUM_100M;
1791 case NGBE_LINK_SPEED_1GB_FULL:
1792 link.link_speed = RTE_ETH_SPEED_NUM_1G;
1798 wr32m(hw, NGBE_LAN_SPEED, NGBE_LAN_SPEED_MASK, lan_speed);
1799 if (link_speed & (NGBE_LINK_SPEED_1GB_FULL |
1800 NGBE_LINK_SPEED_100M_FULL |
1801 NGBE_LINK_SPEED_10M_FULL)) {
1802 wr32m(hw, NGBE_MACTXCFG, NGBE_MACTXCFG_SPEED_MASK,
1803 NGBE_MACTXCFG_SPEED_1G | NGBE_MACTXCFG_TE);
1807 return rte_eth_linkstatus_set(dev, &link);
1811 ngbe_dev_link_update(struct rte_eth_dev *dev, int wait_to_complete)
1813 return ngbe_dev_link_update_share(dev, wait_to_complete);
1817 ngbe_dev_promiscuous_enable(struct rte_eth_dev *dev)
1819 struct ngbe_hw *hw = ngbe_dev_hw(dev);
1822 fctrl = rd32(hw, NGBE_PSRCTL);
1823 fctrl |= (NGBE_PSRCTL_UCP | NGBE_PSRCTL_MCP);
1824 wr32(hw, NGBE_PSRCTL, fctrl);
1830 ngbe_dev_promiscuous_disable(struct rte_eth_dev *dev)
1832 struct ngbe_hw *hw = ngbe_dev_hw(dev);
1835 fctrl = rd32(hw, NGBE_PSRCTL);
1836 fctrl &= (~NGBE_PSRCTL_UCP);
1837 if (dev->data->all_multicast == 1)
1838 fctrl |= NGBE_PSRCTL_MCP;
1840 fctrl &= (~NGBE_PSRCTL_MCP);
1841 wr32(hw, NGBE_PSRCTL, fctrl);
1847 ngbe_dev_allmulticast_enable(struct rte_eth_dev *dev)
1849 struct ngbe_hw *hw = ngbe_dev_hw(dev);
1852 fctrl = rd32(hw, NGBE_PSRCTL);
1853 fctrl |= NGBE_PSRCTL_MCP;
1854 wr32(hw, NGBE_PSRCTL, fctrl);
1860 ngbe_dev_allmulticast_disable(struct rte_eth_dev *dev)
1862 struct ngbe_hw *hw = ngbe_dev_hw(dev);
1865 if (dev->data->promiscuous == 1)
1866 return 0; /* must remain in all_multicast mode */
1868 fctrl = rd32(hw, NGBE_PSRCTL);
1869 fctrl &= (~NGBE_PSRCTL_MCP);
1870 wr32(hw, NGBE_PSRCTL, fctrl);
1876 * It clears the interrupt causes and enables the interrupt.
1877 * It will be called once only during NIC initialized.
1880 * Pointer to struct rte_eth_dev.
1882 * Enable or Disable.
1885 * - On success, zero.
1886 * - On failure, a negative value.
1889 ngbe_dev_lsc_interrupt_setup(struct rte_eth_dev *dev, uint8_t on)
1891 struct ngbe_interrupt *intr = ngbe_dev_intr(dev);
1893 ngbe_dev_link_status_print(dev);
1895 intr->mask_misc |= NGBE_ICRMISC_PHY;
1896 intr->mask_misc |= NGBE_ICRMISC_GPIO;
1898 intr->mask_misc &= ~NGBE_ICRMISC_PHY;
1899 intr->mask_misc &= ~NGBE_ICRMISC_GPIO;
1906 * It clears the interrupt causes and enables the interrupt.
1907 * It will be called once only during NIC initialized.
1910 * Pointer to struct rte_eth_dev.
1913 * - On success, zero.
1914 * - On failure, a negative value.
1917 ngbe_dev_misc_interrupt_setup(struct rte_eth_dev *dev)
1919 struct ngbe_interrupt *intr = ngbe_dev_intr(dev);
1922 mask = NGBE_ICR_MASK;
1923 mask &= (1ULL << NGBE_MISC_VEC_ID);
1925 intr->mask_misc |= NGBE_ICRMISC_GPIO;
1931 * It clears the interrupt causes and enables the interrupt.
1932 * It will be called once only during NIC initialized.
1935 * Pointer to struct rte_eth_dev.
1938 * - On success, zero.
1939 * - On failure, a negative value.
1942 ngbe_dev_rxq_interrupt_setup(struct rte_eth_dev *dev)
1944 struct ngbe_interrupt *intr = ngbe_dev_intr(dev);
1947 mask = NGBE_ICR_MASK;
1948 mask &= ~((1ULL << NGBE_RX_VEC_START) - 1);
1955 * It clears the interrupt causes and enables the interrupt.
1956 * It will be called once only during NIC initialized.
1959 * Pointer to struct rte_eth_dev.
1962 * - On success, zero.
1963 * - On failure, a negative value.
1966 ngbe_dev_macsec_interrupt_setup(struct rte_eth_dev *dev)
1968 struct ngbe_interrupt *intr = ngbe_dev_intr(dev);
1970 intr->mask_misc |= NGBE_ICRMISC_LNKSEC;
1976 * It reads ICR and sets flag for the link_update.
1979 * Pointer to struct rte_eth_dev.
1982 * - On success, zero.
1983 * - On failure, a negative value.
1986 ngbe_dev_interrupt_get_status(struct rte_eth_dev *dev)
1989 struct ngbe_hw *hw = ngbe_dev_hw(dev);
1990 struct ngbe_interrupt *intr = ngbe_dev_intr(dev);
1992 /* clear all cause mask */
1993 ngbe_disable_intr(hw);
1995 /* read-on-clear nic registers here */
1996 eicr = ((u32 *)hw->isb_mem)[NGBE_ISB_MISC];
1997 PMD_DRV_LOG(DEBUG, "eicr %x", eicr);
2001 /* set flag for async link update */
2002 if (eicr & NGBE_ICRMISC_PHY)
2003 intr->flags |= NGBE_FLAG_NEED_LINK_UPDATE;
2005 if (eicr & NGBE_ICRMISC_VFMBX)
2006 intr->flags |= NGBE_FLAG_MAILBOX;
2008 if (eicr & NGBE_ICRMISC_LNKSEC)
2009 intr->flags |= NGBE_FLAG_MACSEC;
2011 if (eicr & NGBE_ICRMISC_GPIO)
2012 intr->flags |= NGBE_FLAG_NEED_LINK_UPDATE;
2018 * It gets and then prints the link status.
2021 * Pointer to struct rte_eth_dev.
2024 * - On success, zero.
2025 * - On failure, a negative value.
2028 ngbe_dev_link_status_print(struct rte_eth_dev *dev)
2030 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
2031 struct rte_eth_link link;
2033 rte_eth_linkstatus_get(dev, &link);
2035 if (link.link_status == RTE_ETH_LINK_UP) {
2036 PMD_INIT_LOG(INFO, "Port %d: Link Up - speed %u Mbps - %s",
2037 (int)(dev->data->port_id),
2038 (unsigned int)link.link_speed,
2039 link.link_duplex == RTE_ETH_LINK_FULL_DUPLEX ?
2040 "full-duplex" : "half-duplex");
2042 PMD_INIT_LOG(INFO, " Port %d: Link Down",
2043 (int)(dev->data->port_id));
2045 PMD_INIT_LOG(DEBUG, "PCI Address: " PCI_PRI_FMT,
2046 pci_dev->addr.domain,
2048 pci_dev->addr.devid,
2049 pci_dev->addr.function);
2053 * It executes link_update after knowing an interrupt occurred.
2056 * Pointer to struct rte_eth_dev.
2059 * - On success, zero.
2060 * - On failure, a negative value.
2063 ngbe_dev_interrupt_action(struct rte_eth_dev *dev)
2065 struct ngbe_interrupt *intr = ngbe_dev_intr(dev);
2068 PMD_DRV_LOG(DEBUG, "intr action type %d", intr->flags);
2070 if (intr->flags & NGBE_FLAG_MAILBOX) {
2071 ngbe_pf_mbx_process(dev);
2072 intr->flags &= ~NGBE_FLAG_MAILBOX;
2075 if (intr->flags & NGBE_FLAG_NEED_LINK_UPDATE) {
2076 struct rte_eth_link link;
2078 /*get the link status before link update, for predicting later*/
2079 rte_eth_linkstatus_get(dev, &link);
2081 ngbe_dev_link_update(dev, 0);
2084 if (link.link_status != RTE_ETH_LINK_UP)
2085 /* handle it 1 sec later, wait it being stable */
2086 timeout = NGBE_LINK_UP_CHECK_TIMEOUT;
2087 /* likely to down */
2089 /* handle it 4 sec later, wait it being stable */
2090 timeout = NGBE_LINK_DOWN_CHECK_TIMEOUT;
2092 ngbe_dev_link_status_print(dev);
2093 if (rte_eal_alarm_set(timeout * 1000,
2094 ngbe_dev_interrupt_delayed_handler,
2096 PMD_DRV_LOG(ERR, "Error setting alarm");
2098 /* remember original mask */
2099 intr->mask_misc_orig = intr->mask_misc;
2100 /* only disable lsc interrupt */
2101 intr->mask_misc &= ~NGBE_ICRMISC_PHY;
2103 intr->mask_orig = intr->mask;
2104 /* only disable all misc interrupts */
2105 intr->mask &= ~(1ULL << NGBE_MISC_VEC_ID);
2109 PMD_DRV_LOG(DEBUG, "enable intr immediately");
2110 ngbe_enable_intr(dev);
2116 * Interrupt handler which shall be registered for alarm callback for delayed
2117 * handling specific interrupt to wait for the stable nic state. As the
2118 * NIC interrupt state is not stable for ngbe after link is just down,
2119 * it needs to wait 4 seconds to get the stable status.
2122 * The address of parameter (struct rte_eth_dev *) registered before.
2125 ngbe_dev_interrupt_delayed_handler(void *param)
2127 struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
2128 struct ngbe_interrupt *intr = ngbe_dev_intr(dev);
2129 struct ngbe_hw *hw = ngbe_dev_hw(dev);
2132 ngbe_disable_intr(hw);
2134 eicr = ((u32 *)hw->isb_mem)[NGBE_ISB_MISC];
2135 if (eicr & NGBE_ICRMISC_VFMBX)
2136 ngbe_pf_mbx_process(dev);
2138 if (intr->flags & NGBE_FLAG_NEED_LINK_UPDATE) {
2139 ngbe_dev_link_update(dev, 0);
2140 intr->flags &= ~NGBE_FLAG_NEED_LINK_UPDATE;
2141 ngbe_dev_link_status_print(dev);
2142 rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC,
2146 if (intr->flags & NGBE_FLAG_MACSEC) {
2147 rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_MACSEC,
2149 intr->flags &= ~NGBE_FLAG_MACSEC;
2152 /* restore original mask */
2153 intr->mask_misc = intr->mask_misc_orig;
2154 intr->mask_misc_orig = 0;
2155 intr->mask = intr->mask_orig;
2156 intr->mask_orig = 0;
2158 PMD_DRV_LOG(DEBUG, "enable intr in delayed handler S[%08x]", eicr);
2159 ngbe_enable_intr(dev);
2163 * Interrupt handler triggered by NIC for handling
2164 * specific interrupt.
2167 * The address of parameter (struct rte_eth_dev *) registered before.
2170 ngbe_dev_interrupt_handler(void *param)
2172 struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
2174 ngbe_dev_interrupt_get_status(dev);
2175 ngbe_dev_interrupt_action(dev);
2179 ngbe_dev_rss_reta_update(struct rte_eth_dev *dev,
2180 struct rte_eth_rss_reta_entry64 *reta_conf,
2185 uint16_t idx, shift;
2186 struct ngbe_adapter *adapter = ngbe_dev_adapter(dev);
2187 struct ngbe_hw *hw = ngbe_dev_hw(dev);
2189 PMD_INIT_FUNC_TRACE();
2192 PMD_DRV_LOG(ERR, "RSS reta update is not supported on this "
2197 if (reta_size != RTE_ETH_RSS_RETA_SIZE_128) {
2198 PMD_DRV_LOG(ERR, "The size of hash lookup table configured "
2199 "(%d) doesn't match the number hardware can supported "
2200 "(%d)", reta_size, RTE_ETH_RSS_RETA_SIZE_128);
2204 for (i = 0; i < reta_size; i += 4) {
2205 idx = i / RTE_ETH_RETA_GROUP_SIZE;
2206 shift = i % RTE_ETH_RETA_GROUP_SIZE;
2207 mask = (uint8_t)RS64(reta_conf[idx].mask, shift, 0xF);
2211 reta = rd32a(hw, NGBE_REG_RSSTBL, i >> 2);
2212 for (j = 0; j < 4; j++) {
2213 if (RS8(mask, j, 0x1)) {
2214 reta &= ~(MS32(8 * j, 0xFF));
2215 reta |= LS32(reta_conf[idx].reta[shift + j],
2219 wr32a(hw, NGBE_REG_RSSTBL, i >> 2, reta);
2221 adapter->rss_reta_updated = 1;
2227 ngbe_dev_rss_reta_query(struct rte_eth_dev *dev,
2228 struct rte_eth_rss_reta_entry64 *reta_conf,
2231 struct ngbe_hw *hw = ngbe_dev_hw(dev);
2234 uint16_t idx, shift;
2236 PMD_INIT_FUNC_TRACE();
2238 if (reta_size != RTE_ETH_RSS_RETA_SIZE_128) {
2239 PMD_DRV_LOG(ERR, "The size of hash lookup table configured "
2240 "(%d) doesn't match the number hardware can supported "
2241 "(%d)", reta_size, RTE_ETH_RSS_RETA_SIZE_128);
2245 for (i = 0; i < reta_size; i += 4) {
2246 idx = i / RTE_ETH_RETA_GROUP_SIZE;
2247 shift = i % RTE_ETH_RETA_GROUP_SIZE;
2248 mask = (uint8_t)RS64(reta_conf[idx].mask, shift, 0xF);
2252 reta = rd32a(hw, NGBE_REG_RSSTBL, i >> 2);
2253 for (j = 0; j < 4; j++) {
2254 if (RS8(mask, j, 0x1))
2255 reta_conf[idx].reta[shift + j] =
2256 (uint16_t)RS32(reta, 8 * j, 0xFF);
2264 ngbe_add_rar(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr,
2265 uint32_t index, uint32_t pool)
2267 struct ngbe_hw *hw = ngbe_dev_hw(dev);
2268 uint32_t enable_addr = 1;
2270 return ngbe_set_rar(hw, index, mac_addr->addr_bytes,
2275 ngbe_remove_rar(struct rte_eth_dev *dev, uint32_t index)
2277 struct ngbe_hw *hw = ngbe_dev_hw(dev);
2279 ngbe_clear_rar(hw, index);
2283 ngbe_set_default_mac_addr(struct rte_eth_dev *dev, struct rte_ether_addr *addr)
2285 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
2287 ngbe_remove_rar(dev, 0);
2288 ngbe_add_rar(dev, addr, 0, pci_dev->max_vfs);
2294 ngbe_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
2296 struct ngbe_hw *hw = ngbe_dev_hw(dev);
2297 uint32_t frame_size = mtu + RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN + 4;
2298 struct rte_eth_dev_data *dev_data = dev->data;
2300 /* If device is started, refuse mtu that requires the support of
2301 * scattered packets when this feature has not been enabled before.
2303 if (dev_data->dev_started && !dev_data->scattered_rx &&
2304 (frame_size + 2 * NGBE_VLAN_TAG_SIZE >
2305 dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM)) {
2306 PMD_INIT_LOG(ERR, "Stop port first.");
2311 wr32m(hw, NGBE_FRMSZ, NGBE_FRMSZ_MAX_MASK,
2312 NGBE_FRAME_SIZE_MAX);
2314 wr32m(hw, NGBE_FRMSZ, NGBE_FRMSZ_MAX_MASK,
2315 NGBE_FRMSZ_MAX(frame_size));
2321 ngbe_uta_vector(struct ngbe_hw *hw, struct rte_ether_addr *uc_addr)
2323 uint32_t vector = 0;
2325 switch (hw->mac.mc_filter_type) {
2326 case 0: /* use bits [47:36] of the address */
2327 vector = ((uc_addr->addr_bytes[4] >> 4) |
2328 (((uint16_t)uc_addr->addr_bytes[5]) << 4));
2330 case 1: /* use bits [46:35] of the address */
2331 vector = ((uc_addr->addr_bytes[4] >> 3) |
2332 (((uint16_t)uc_addr->addr_bytes[5]) << 5));
2334 case 2: /* use bits [45:34] of the address */
2335 vector = ((uc_addr->addr_bytes[4] >> 2) |
2336 (((uint16_t)uc_addr->addr_bytes[5]) << 6));
2338 case 3: /* use bits [43:32] of the address */
2339 vector = ((uc_addr->addr_bytes[4]) |
2340 (((uint16_t)uc_addr->addr_bytes[5]) << 8));
2342 default: /* Invalid mc_filter_type */
2346 /* vector can only be 12-bits or boundary will be exceeded */
2352 ngbe_uc_hash_table_set(struct rte_eth_dev *dev,
2353 struct rte_ether_addr *mac_addr, uint8_t on)
2361 struct ngbe_hw *hw = ngbe_dev_hw(dev);
2362 struct ngbe_uta_info *uta_info = NGBE_DEV_UTA_INFO(dev);
2364 vector = ngbe_uta_vector(hw, mac_addr);
2365 uta_idx = (vector >> 5) & 0x7F;
2366 uta_mask = 0x1UL << (vector & 0x1F);
2368 if (!!on == !!(uta_info->uta_shadow[uta_idx] & uta_mask))
2371 reg_val = rd32(hw, NGBE_UCADDRTBL(uta_idx));
2373 uta_info->uta_in_use++;
2374 reg_val |= uta_mask;
2375 uta_info->uta_shadow[uta_idx] |= uta_mask;
2377 uta_info->uta_in_use--;
2378 reg_val &= ~uta_mask;
2379 uta_info->uta_shadow[uta_idx] &= ~uta_mask;
2382 wr32(hw, NGBE_UCADDRTBL(uta_idx), reg_val);
2384 psrctl = rd32(hw, NGBE_PSRCTL);
2385 if (uta_info->uta_in_use > 0)
2386 psrctl |= NGBE_PSRCTL_UCHFENA;
2388 psrctl &= ~NGBE_PSRCTL_UCHFENA;
2390 psrctl &= ~NGBE_PSRCTL_ADHF12_MASK;
2391 psrctl |= NGBE_PSRCTL_ADHF12(hw->mac.mc_filter_type);
2392 wr32(hw, NGBE_PSRCTL, psrctl);
2398 ngbe_uc_all_hash_table_set(struct rte_eth_dev *dev, uint8_t on)
2400 struct ngbe_hw *hw = ngbe_dev_hw(dev);
2401 struct ngbe_uta_info *uta_info = NGBE_DEV_UTA_INFO(dev);
2406 for (i = 0; i < RTE_ETH_VMDQ_NUM_UC_HASH_ARRAY; i++) {
2407 uta_info->uta_shadow[i] = ~0;
2408 wr32(hw, NGBE_UCADDRTBL(i), ~0);
2411 for (i = 0; i < RTE_ETH_VMDQ_NUM_UC_HASH_ARRAY; i++) {
2412 uta_info->uta_shadow[i] = 0;
2413 wr32(hw, NGBE_UCADDRTBL(i), 0);
2417 psrctl = rd32(hw, NGBE_PSRCTL);
2419 psrctl |= NGBE_PSRCTL_UCHFENA;
2421 psrctl &= ~NGBE_PSRCTL_UCHFENA;
2423 psrctl &= ~NGBE_PSRCTL_ADHF12_MASK;
2424 psrctl |= NGBE_PSRCTL_ADHF12(hw->mac.mc_filter_type);
2425 wr32(hw, NGBE_PSRCTL, psrctl);
2431 * Set the IVAR registers, mapping interrupt causes to vectors
2433 * pointer to ngbe_hw struct
2435 * 0 for Rx, 1 for Tx, -1 for other causes
2437 * queue to map the corresponding interrupt to
2439 * the vector to map to the corresponding queue
2442 ngbe_set_ivar_map(struct ngbe_hw *hw, int8_t direction,
2443 uint8_t queue, uint8_t msix_vector)
2447 if (direction == -1) {
2449 msix_vector |= NGBE_IVARMISC_VLD;
2451 tmp = rd32(hw, NGBE_IVARMISC);
2452 tmp &= ~(0xFF << idx);
2453 tmp |= (msix_vector << idx);
2454 wr32(hw, NGBE_IVARMISC, tmp);
2456 /* rx or tx causes */
2457 /* Workround for ICR lost */
2458 idx = ((16 * (queue & 1)) + (8 * direction));
2459 tmp = rd32(hw, NGBE_IVAR(queue >> 1));
2460 tmp &= ~(0xFF << idx);
2461 tmp |= (msix_vector << idx);
2462 wr32(hw, NGBE_IVAR(queue >> 1), tmp);
2467 * Sets up the hardware to properly generate MSI-X interrupts
2469 * board private structure
2472 ngbe_configure_msix(struct rte_eth_dev *dev)
2474 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
2475 struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
2476 struct ngbe_hw *hw = ngbe_dev_hw(dev);
2477 uint32_t queue_id, base = NGBE_MISC_VEC_ID;
2478 uint32_t vec = NGBE_MISC_VEC_ID;
2482 * Won't configure MSI-X register if no mapping is done
2483 * between intr vector and event fd
2484 * but if MSI-X has been enabled already, need to configure
2485 * auto clean, auto mask and throttling.
2487 gpie = rd32(hw, NGBE_GPIE);
2488 if (!rte_intr_dp_is_en(intr_handle) &&
2489 !(gpie & NGBE_GPIE_MSIX))
2492 if (rte_intr_allow_others(intr_handle)) {
2493 base = NGBE_RX_VEC_START;
2497 /* setup GPIE for MSI-X mode */
2498 gpie = rd32(hw, NGBE_GPIE);
2499 gpie |= NGBE_GPIE_MSIX;
2500 wr32(hw, NGBE_GPIE, gpie);
2502 /* Populate the IVAR table and set the ITR values to the
2503 * corresponding register.
2505 if (rte_intr_dp_is_en(intr_handle)) {
2506 for (queue_id = 0; queue_id < dev->data->nb_rx_queues;
2508 /* by default, 1:1 mapping */
2509 ngbe_set_ivar_map(hw, 0, queue_id, vec);
2510 rte_intr_vec_list_index_set(intr_handle,
2512 if (vec < base + rte_intr_nb_efd_get(intr_handle)
2517 ngbe_set_ivar_map(hw, -1, 1, NGBE_MISC_VEC_ID);
2519 wr32(hw, NGBE_ITR(NGBE_MISC_VEC_ID),
2520 NGBE_ITR_IVAL_1G(NGBE_QUEUE_ITR_INTERVAL_DEFAULT)
2525 ngbe_dev_addr_list_itr(__rte_unused struct ngbe_hw *hw,
2526 u8 **mc_addr_ptr, u32 *vmdq)
2531 mc_addr = *mc_addr_ptr;
2532 *mc_addr_ptr = (mc_addr + sizeof(struct rte_ether_addr));
2537 ngbe_dev_set_mc_addr_list(struct rte_eth_dev *dev,
2538 struct rte_ether_addr *mc_addr_set,
2539 uint32_t nb_mc_addr)
2541 struct ngbe_hw *hw = ngbe_dev_hw(dev);
2544 mc_addr_list = (u8 *)mc_addr_set;
2545 return hw->mac.update_mc_addr_list(hw, mc_addr_list, nb_mc_addr,
2546 ngbe_dev_addr_list_itr, TRUE);
2549 static const struct eth_dev_ops ngbe_eth_dev_ops = {
2550 .dev_configure = ngbe_dev_configure,
2551 .dev_infos_get = ngbe_dev_info_get,
2552 .dev_start = ngbe_dev_start,
2553 .dev_stop = ngbe_dev_stop,
2554 .dev_close = ngbe_dev_close,
2555 .dev_reset = ngbe_dev_reset,
2556 .promiscuous_enable = ngbe_dev_promiscuous_enable,
2557 .promiscuous_disable = ngbe_dev_promiscuous_disable,
2558 .allmulticast_enable = ngbe_dev_allmulticast_enable,
2559 .allmulticast_disable = ngbe_dev_allmulticast_disable,
2560 .link_update = ngbe_dev_link_update,
2561 .stats_get = ngbe_dev_stats_get,
2562 .xstats_get = ngbe_dev_xstats_get,
2563 .xstats_get_by_id = ngbe_dev_xstats_get_by_id,
2564 .stats_reset = ngbe_dev_stats_reset,
2565 .xstats_reset = ngbe_dev_xstats_reset,
2566 .xstats_get_names = ngbe_dev_xstats_get_names,
2567 .xstats_get_names_by_id = ngbe_dev_xstats_get_names_by_id,
2568 .fw_version_get = ngbe_fw_version_get,
2569 .dev_supported_ptypes_get = ngbe_dev_supported_ptypes_get,
2570 .mtu_set = ngbe_dev_mtu_set,
2571 .vlan_filter_set = ngbe_vlan_filter_set,
2572 .vlan_tpid_set = ngbe_vlan_tpid_set,
2573 .vlan_offload_set = ngbe_vlan_offload_set,
2574 .vlan_strip_queue_set = ngbe_vlan_strip_queue_set,
2575 .rx_queue_start = ngbe_dev_rx_queue_start,
2576 .rx_queue_stop = ngbe_dev_rx_queue_stop,
2577 .tx_queue_start = ngbe_dev_tx_queue_start,
2578 .tx_queue_stop = ngbe_dev_tx_queue_stop,
2579 .rx_queue_setup = ngbe_dev_rx_queue_setup,
2580 .rx_queue_release = ngbe_dev_rx_queue_release,
2581 .tx_queue_setup = ngbe_dev_tx_queue_setup,
2582 .tx_queue_release = ngbe_dev_tx_queue_release,
2583 .mac_addr_add = ngbe_add_rar,
2584 .mac_addr_remove = ngbe_remove_rar,
2585 .mac_addr_set = ngbe_set_default_mac_addr,
2586 .uc_hash_table_set = ngbe_uc_hash_table_set,
2587 .uc_all_hash_table_set = ngbe_uc_all_hash_table_set,
2588 .reta_update = ngbe_dev_rss_reta_update,
2589 .reta_query = ngbe_dev_rss_reta_query,
2590 .rss_hash_update = ngbe_dev_rss_hash_update,
2591 .rss_hash_conf_get = ngbe_dev_rss_hash_conf_get,
2592 .set_mc_addr_list = ngbe_dev_set_mc_addr_list,
2593 .rx_burst_mode_get = ngbe_rx_burst_mode_get,
2594 .tx_burst_mode_get = ngbe_tx_burst_mode_get,
2597 RTE_PMD_REGISTER_PCI(net_ngbe, rte_ngbe_pmd);
2598 RTE_PMD_REGISTER_PCI_TABLE(net_ngbe, pci_id_ngbe_map);
2599 RTE_PMD_REGISTER_KMOD_DEP(net_ngbe, "* igb_uio | uio_pci_generic | vfio-pci");
2601 RTE_LOG_REGISTER_SUFFIX(ngbe_logtype_init, init, NOTICE);
2602 RTE_LOG_REGISTER_SUFFIX(ngbe_logtype_driver, driver, NOTICE);
2604 #ifdef RTE_ETHDEV_DEBUG_RX
2605 RTE_LOG_REGISTER_SUFFIX(ngbe_logtype_rx, rx, DEBUG);
2607 #ifdef RTE_ETHDEV_DEBUG_TX
2608 RTE_LOG_REGISTER_SUFFIX(ngbe_logtype_tx, tx, DEBUG);