net/ngbe: add mailbox process operations
[dpdk.git] / drivers / net / ngbe / ngbe_ethdev.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2018-2021 Beijing WangXun Technology Co., Ltd.
3  * Copyright(c) 2010-2017 Intel Corporation
4  */
5
6 #include <errno.h>
7 #include <rte_common.h>
8 #include <ethdev_pci.h>
9
10 #include <rte_alarm.h>
11
12 #include "ngbe_logs.h"
13 #include "ngbe.h"
14 #include "ngbe_ethdev.h"
15 #include "ngbe_rxtx.h"
16
17 static int ngbe_dev_close(struct rte_eth_dev *dev);
18 static int ngbe_dev_link_update(struct rte_eth_dev *dev,
19                                 int wait_to_complete);
20 static int ngbe_dev_stats_reset(struct rte_eth_dev *dev);
21 static void ngbe_vlan_hw_strip_enable(struct rte_eth_dev *dev, uint16_t queue);
22 static void ngbe_vlan_hw_strip_disable(struct rte_eth_dev *dev,
23                                         uint16_t queue);
24
25 static void ngbe_dev_link_status_print(struct rte_eth_dev *dev);
26 static int ngbe_dev_lsc_interrupt_setup(struct rte_eth_dev *dev, uint8_t on);
27 static int ngbe_dev_macsec_interrupt_setup(struct rte_eth_dev *dev);
28 static int ngbe_dev_misc_interrupt_setup(struct rte_eth_dev *dev);
29 static int ngbe_dev_rxq_interrupt_setup(struct rte_eth_dev *dev);
30 static void ngbe_dev_interrupt_handler(void *param);
31 static void ngbe_dev_interrupt_delayed_handler(void *param);
32 static void ngbe_configure_msix(struct rte_eth_dev *dev);
33
34 #define NGBE_SET_HWSTRIP(h, q) do {\
35                 uint32_t idx = (q) / (sizeof((h)->bitmap[0]) * NBBY); \
36                 uint32_t bit = (q) % (sizeof((h)->bitmap[0]) * NBBY); \
37                 (h)->bitmap[idx] |= 1 << bit;\
38         } while (0)
39
40 #define NGBE_CLEAR_HWSTRIP(h, q) do {\
41                 uint32_t idx = (q) / (sizeof((h)->bitmap[0]) * NBBY); \
42                 uint32_t bit = (q) % (sizeof((h)->bitmap[0]) * NBBY); \
43                 (h)->bitmap[idx] &= ~(1 << bit);\
44         } while (0)
45
46 #define NGBE_GET_HWSTRIP(h, q, r) do {\
47                 uint32_t idx = (q) / (sizeof((h)->bitmap[0]) * NBBY); \
48                 uint32_t bit = (q) % (sizeof((h)->bitmap[0]) * NBBY); \
49                 (r) = (h)->bitmap[idx] >> bit & 1;\
50         } while (0)
51
52 /*
53  * The set of PCI devices this driver supports
54  */
55 static const struct rte_pci_id pci_id_ngbe_map[] = {
56         { RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860A2) },
57         { RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860A2S) },
58         { RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860A4) },
59         { RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860A4S) },
60         { RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860AL2) },
61         { RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860AL2S) },
62         { RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860AL4) },
63         { RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860AL4S) },
64         { RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860NCSI) },
65         { RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860A1) },
66         { RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860A1L) },
67         { RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860AL_W) },
68         { .vendor_id = 0, /* sentinel */ },
69 };
70
71 static const struct rte_eth_desc_lim rx_desc_lim = {
72         .nb_max = NGBE_RING_DESC_MAX,
73         .nb_min = NGBE_RING_DESC_MIN,
74         .nb_align = NGBE_RXD_ALIGN,
75 };
76
77 static const struct rte_eth_desc_lim tx_desc_lim = {
78         .nb_max = NGBE_RING_DESC_MAX,
79         .nb_min = NGBE_RING_DESC_MIN,
80         .nb_align = NGBE_TXD_ALIGN,
81         .nb_seg_max = NGBE_TX_MAX_SEG,
82         .nb_mtu_seg_max = NGBE_TX_MAX_SEG,
83 };
84
85 static const struct eth_dev_ops ngbe_eth_dev_ops;
86
87 #define HW_XSTAT(m) {#m, offsetof(struct ngbe_hw_stats, m)}
88 #define HW_XSTAT_NAME(m, n) {n, offsetof(struct ngbe_hw_stats, m)}
89 static const struct rte_ngbe_xstats_name_off rte_ngbe_stats_strings[] = {
90         /* MNG RxTx */
91         HW_XSTAT(mng_bmc2host_packets),
92         HW_XSTAT(mng_host2bmc_packets),
93         /* Basic RxTx */
94         HW_XSTAT(rx_packets),
95         HW_XSTAT(tx_packets),
96         HW_XSTAT(rx_bytes),
97         HW_XSTAT(tx_bytes),
98         HW_XSTAT(rx_total_bytes),
99         HW_XSTAT(rx_total_packets),
100         HW_XSTAT(tx_total_packets),
101         HW_XSTAT(rx_total_missed_packets),
102         HW_XSTAT(rx_broadcast_packets),
103         HW_XSTAT(rx_multicast_packets),
104         HW_XSTAT(rx_management_packets),
105         HW_XSTAT(tx_management_packets),
106         HW_XSTAT(rx_management_dropped),
107
108         /* Basic Error */
109         HW_XSTAT(rx_crc_errors),
110         HW_XSTAT(rx_illegal_byte_errors),
111         HW_XSTAT(rx_error_bytes),
112         HW_XSTAT(rx_mac_short_packet_dropped),
113         HW_XSTAT(rx_length_errors),
114         HW_XSTAT(rx_undersize_errors),
115         HW_XSTAT(rx_fragment_errors),
116         HW_XSTAT(rx_oversize_errors),
117         HW_XSTAT(rx_jabber_errors),
118         HW_XSTAT(rx_l3_l4_xsum_error),
119         HW_XSTAT(mac_local_errors),
120         HW_XSTAT(mac_remote_errors),
121
122         /* MACSEC */
123         HW_XSTAT(tx_macsec_pkts_untagged),
124         HW_XSTAT(tx_macsec_pkts_encrypted),
125         HW_XSTAT(tx_macsec_pkts_protected),
126         HW_XSTAT(tx_macsec_octets_encrypted),
127         HW_XSTAT(tx_macsec_octets_protected),
128         HW_XSTAT(rx_macsec_pkts_untagged),
129         HW_XSTAT(rx_macsec_pkts_badtag),
130         HW_XSTAT(rx_macsec_pkts_nosci),
131         HW_XSTAT(rx_macsec_pkts_unknownsci),
132         HW_XSTAT(rx_macsec_octets_decrypted),
133         HW_XSTAT(rx_macsec_octets_validated),
134         HW_XSTAT(rx_macsec_sc_pkts_unchecked),
135         HW_XSTAT(rx_macsec_sc_pkts_delayed),
136         HW_XSTAT(rx_macsec_sc_pkts_late),
137         HW_XSTAT(rx_macsec_sa_pkts_ok),
138         HW_XSTAT(rx_macsec_sa_pkts_invalid),
139         HW_XSTAT(rx_macsec_sa_pkts_notvalid),
140         HW_XSTAT(rx_macsec_sa_pkts_unusedsa),
141         HW_XSTAT(rx_macsec_sa_pkts_notusingsa),
142
143         /* MAC RxTx */
144         HW_XSTAT(rx_size_64_packets),
145         HW_XSTAT(rx_size_65_to_127_packets),
146         HW_XSTAT(rx_size_128_to_255_packets),
147         HW_XSTAT(rx_size_256_to_511_packets),
148         HW_XSTAT(rx_size_512_to_1023_packets),
149         HW_XSTAT(rx_size_1024_to_max_packets),
150         HW_XSTAT(tx_size_64_packets),
151         HW_XSTAT(tx_size_65_to_127_packets),
152         HW_XSTAT(tx_size_128_to_255_packets),
153         HW_XSTAT(tx_size_256_to_511_packets),
154         HW_XSTAT(tx_size_512_to_1023_packets),
155         HW_XSTAT(tx_size_1024_to_max_packets),
156
157         /* Flow Control */
158         HW_XSTAT(tx_xon_packets),
159         HW_XSTAT(rx_xon_packets),
160         HW_XSTAT(tx_xoff_packets),
161         HW_XSTAT(rx_xoff_packets),
162
163         HW_XSTAT_NAME(tx_xon_packets, "tx_flow_control_xon_packets"),
164         HW_XSTAT_NAME(rx_xon_packets, "rx_flow_control_xon_packets"),
165         HW_XSTAT_NAME(tx_xoff_packets, "tx_flow_control_xoff_packets"),
166         HW_XSTAT_NAME(rx_xoff_packets, "rx_flow_control_xoff_packets"),
167 };
168
169 #define NGBE_NB_HW_STATS (sizeof(rte_ngbe_stats_strings) / \
170                            sizeof(rte_ngbe_stats_strings[0]))
171
172 /* Per-queue statistics */
173 #define QP_XSTAT(m) {#m, offsetof(struct ngbe_hw_stats, qp[0].m)}
174 static const struct rte_ngbe_xstats_name_off rte_ngbe_qp_strings[] = {
175         QP_XSTAT(rx_qp_packets),
176         QP_XSTAT(tx_qp_packets),
177         QP_XSTAT(rx_qp_bytes),
178         QP_XSTAT(tx_qp_bytes),
179         QP_XSTAT(rx_qp_mc_packets),
180 };
181
182 #define NGBE_NB_QP_STATS (sizeof(rte_ngbe_qp_strings) / \
183                            sizeof(rte_ngbe_qp_strings[0]))
184
185 static inline int32_t
186 ngbe_pf_reset_hw(struct ngbe_hw *hw)
187 {
188         uint32_t ctrl_ext;
189         int32_t status;
190
191         status = hw->mac.reset_hw(hw);
192
193         ctrl_ext = rd32(hw, NGBE_PORTCTL);
194         /* Set PF Reset Done bit so PF/VF Mail Ops can work */
195         ctrl_ext |= NGBE_PORTCTL_RSTDONE;
196         wr32(hw, NGBE_PORTCTL, ctrl_ext);
197         ngbe_flush(hw);
198
199         if (status == NGBE_ERR_SFP_NOT_PRESENT)
200                 status = 0;
201         return status;
202 }
203
204 static inline void
205 ngbe_enable_intr(struct rte_eth_dev *dev)
206 {
207         struct ngbe_interrupt *intr = ngbe_dev_intr(dev);
208         struct ngbe_hw *hw = ngbe_dev_hw(dev);
209
210         wr32(hw, NGBE_IENMISC, intr->mask_misc);
211         wr32(hw, NGBE_IMC(0), intr->mask & BIT_MASK32);
212         ngbe_flush(hw);
213 }
214
215 static void
216 ngbe_disable_intr(struct ngbe_hw *hw)
217 {
218         PMD_INIT_FUNC_TRACE();
219
220         wr32(hw, NGBE_IMS(0), NGBE_IMS_MASK);
221         ngbe_flush(hw);
222 }
223
224 /*
225  * Ensure that all locks are released before first NVM or PHY access
226  */
227 static void
228 ngbe_swfw_lock_reset(struct ngbe_hw *hw)
229 {
230         uint16_t mask;
231
232         /*
233          * These ones are more tricky since they are common to all ports; but
234          * swfw_sync retries last long enough (1s) to be almost sure that if
235          * lock can not be taken it is due to an improper lock of the
236          * semaphore.
237          */
238         mask = NGBE_MNGSEM_SWPHY |
239                NGBE_MNGSEM_SWMBX |
240                NGBE_MNGSEM_SWFLASH;
241         if (hw->mac.acquire_swfw_sync(hw, mask) < 0)
242                 PMD_DRV_LOG(DEBUG, "SWFW common locks released");
243
244         hw->mac.release_swfw_sync(hw, mask);
245 }
246
247 static int
248 eth_ngbe_dev_init(struct rte_eth_dev *eth_dev, void *init_params __rte_unused)
249 {
250         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
251         struct ngbe_hw *hw = ngbe_dev_hw(eth_dev);
252         struct ngbe_vfta *shadow_vfta = NGBE_DEV_VFTA(eth_dev);
253         struct ngbe_hwstrip *hwstrip = NGBE_DEV_HWSTRIP(eth_dev);
254         struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
255         const struct rte_memzone *mz;
256         uint32_t ctrl_ext;
257         int err, ret;
258
259         PMD_INIT_FUNC_TRACE();
260
261         eth_dev->dev_ops = &ngbe_eth_dev_ops;
262         eth_dev->rx_pkt_burst = &ngbe_recv_pkts;
263         eth_dev->tx_pkt_burst = &ngbe_xmit_pkts;
264         eth_dev->tx_pkt_prepare = &ngbe_prep_pkts;
265
266         /*
267          * For secondary processes, we don't initialise any further as primary
268          * has already done this work. Only check we don't need a different
269          * Rx and Tx function.
270          */
271         if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
272                 struct ngbe_tx_queue *txq;
273                 /* Tx queue function in primary, set by last queue initialized
274                  * Tx queue may not initialized by primary process
275                  */
276                 if (eth_dev->data->tx_queues) {
277                         uint16_t nb_tx_queues = eth_dev->data->nb_tx_queues;
278                         txq = eth_dev->data->tx_queues[nb_tx_queues - 1];
279                         ngbe_set_tx_function(eth_dev, txq);
280                 } else {
281                         /* Use default Tx function if we get here */
282                         PMD_INIT_LOG(NOTICE,
283                                 "No Tx queues configured yet. Using default Tx function.");
284                 }
285
286                 ngbe_set_rx_function(eth_dev);
287
288                 return 0;
289         }
290
291         rte_eth_copy_pci_info(eth_dev, pci_dev);
292         eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
293
294         /* Vendor and Device ID need to be set before init of shared code */
295         hw->device_id = pci_dev->id.device_id;
296         hw->vendor_id = pci_dev->id.vendor_id;
297         hw->sub_system_id = pci_dev->id.subsystem_device_id;
298         ngbe_map_device_id(hw);
299         hw->hw_addr = (void *)pci_dev->mem_resource[0].addr;
300
301         /* Reserve memory for interrupt status block */
302         mz = rte_eth_dma_zone_reserve(eth_dev, "ngbe_driver", -1,
303                 NGBE_ISB_SIZE, NGBE_ALIGN, SOCKET_ID_ANY);
304         if (mz == NULL)
305                 return -ENOMEM;
306
307         hw->isb_dma = TMZ_PADDR(mz);
308         hw->isb_mem = TMZ_VADDR(mz);
309
310         /* Initialize the shared code (base driver) */
311         err = ngbe_init_shared_code(hw);
312         if (err != 0) {
313                 PMD_INIT_LOG(ERR, "Shared code init failed: %d", err);
314                 return -EIO;
315         }
316
317         /* Unlock any pending hardware semaphore */
318         ngbe_swfw_lock_reset(hw);
319
320         err = hw->rom.init_params(hw);
321         if (err != 0) {
322                 PMD_INIT_LOG(ERR, "The EEPROM init failed: %d", err);
323                 return -EIO;
324         }
325
326         /* Make sure we have a good EEPROM before we read from it */
327         err = hw->rom.validate_checksum(hw, NULL);
328         if (err != 0) {
329                 PMD_INIT_LOG(ERR, "The EEPROM checksum is not valid: %d", err);
330                 return -EIO;
331         }
332
333         err = hw->mac.init_hw(hw);
334         if (err != 0) {
335                 PMD_INIT_LOG(ERR, "Hardware Initialization Failure: %d", err);
336                 return -EIO;
337         }
338
339         /* Reset the hw statistics */
340         ngbe_dev_stats_reset(eth_dev);
341
342         /* disable interrupt */
343         ngbe_disable_intr(hw);
344
345         /* Allocate memory for storing MAC addresses */
346         eth_dev->data->mac_addrs = rte_zmalloc("ngbe", RTE_ETHER_ADDR_LEN *
347                                                hw->mac.num_rar_entries, 0);
348         if (eth_dev->data->mac_addrs == NULL) {
349                 PMD_INIT_LOG(ERR,
350                              "Failed to allocate %u bytes needed to store MAC addresses",
351                              RTE_ETHER_ADDR_LEN * hw->mac.num_rar_entries);
352                 return -ENOMEM;
353         }
354
355         /* Copy the permanent MAC address */
356         rte_ether_addr_copy((struct rte_ether_addr *)hw->mac.perm_addr,
357                         &eth_dev->data->mac_addrs[0]);
358
359         /* Allocate memory for storing hash filter MAC addresses */
360         eth_dev->data->hash_mac_addrs = rte_zmalloc("ngbe",
361                         RTE_ETHER_ADDR_LEN * NGBE_VMDQ_NUM_UC_MAC, 0);
362         if (eth_dev->data->hash_mac_addrs == NULL) {
363                 PMD_INIT_LOG(ERR,
364                              "Failed to allocate %d bytes needed to store MAC addresses",
365                              RTE_ETHER_ADDR_LEN * NGBE_VMDQ_NUM_UC_MAC);
366                 rte_free(eth_dev->data->mac_addrs);
367                 eth_dev->data->mac_addrs = NULL;
368                 return -ENOMEM;
369         }
370
371         /* initialize the vfta */
372         memset(shadow_vfta, 0, sizeof(*shadow_vfta));
373
374         /* initialize the hw strip bitmap*/
375         memset(hwstrip, 0, sizeof(*hwstrip));
376
377         /* initialize PF if max_vfs not zero */
378         ret = ngbe_pf_host_init(eth_dev);
379         if (ret) {
380                 rte_free(eth_dev->data->mac_addrs);
381                 eth_dev->data->mac_addrs = NULL;
382                 rte_free(eth_dev->data->hash_mac_addrs);
383                 eth_dev->data->hash_mac_addrs = NULL;
384                 return ret;
385         }
386
387         ctrl_ext = rd32(hw, NGBE_PORTCTL);
388         /* let hardware know driver is loaded */
389         ctrl_ext |= NGBE_PORTCTL_DRVLOAD;
390         /* Set PF Reset Done bit so PF/VF Mail Ops can work */
391         ctrl_ext |= NGBE_PORTCTL_RSTDONE;
392         wr32(hw, NGBE_PORTCTL, ctrl_ext);
393         ngbe_flush(hw);
394
395         PMD_INIT_LOG(DEBUG, "MAC: %d, PHY: %d",
396                         (int)hw->mac.type, (int)hw->phy.type);
397
398         PMD_INIT_LOG(DEBUG, "port %d vendorID=0x%x deviceID=0x%x",
399                      eth_dev->data->port_id, pci_dev->id.vendor_id,
400                      pci_dev->id.device_id);
401
402         rte_intr_callback_register(intr_handle,
403                                    ngbe_dev_interrupt_handler, eth_dev);
404
405         /* enable uio/vfio intr/eventfd mapping */
406         rte_intr_enable(intr_handle);
407
408         /* enable support intr */
409         ngbe_enable_intr(eth_dev);
410
411         return 0;
412 }
413
414 static int
415 eth_ngbe_dev_uninit(struct rte_eth_dev *eth_dev)
416 {
417         PMD_INIT_FUNC_TRACE();
418
419         if (rte_eal_process_type() != RTE_PROC_PRIMARY)
420                 return 0;
421
422         ngbe_dev_close(eth_dev);
423
424         return 0;
425 }
426
427 static int
428 eth_ngbe_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
429                 struct rte_pci_device *pci_dev)
430 {
431         return rte_eth_dev_create(&pci_dev->device, pci_dev->device.name,
432                         sizeof(struct ngbe_adapter),
433                         eth_dev_pci_specific_init, pci_dev,
434                         eth_ngbe_dev_init, NULL);
435 }
436
437 static int eth_ngbe_pci_remove(struct rte_pci_device *pci_dev)
438 {
439         struct rte_eth_dev *ethdev;
440
441         ethdev = rte_eth_dev_allocated(pci_dev->device.name);
442         if (ethdev == NULL)
443                 return 0;
444
445         return rte_eth_dev_destroy(ethdev, eth_ngbe_dev_uninit);
446 }
447
448 static struct rte_pci_driver rte_ngbe_pmd = {
449         .id_table = pci_id_ngbe_map,
450         .drv_flags = RTE_PCI_DRV_NEED_MAPPING |
451                      RTE_PCI_DRV_INTR_LSC,
452         .probe = eth_ngbe_pci_probe,
453         .remove = eth_ngbe_pci_remove,
454 };
455
456 static int
457 ngbe_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
458 {
459         struct ngbe_hw *hw = ngbe_dev_hw(dev);
460         struct ngbe_vfta *shadow_vfta = NGBE_DEV_VFTA(dev);
461         uint32_t vfta;
462         uint32_t vid_idx;
463         uint32_t vid_bit;
464
465         vid_idx = (uint32_t)((vlan_id >> 5) & 0x7F);
466         vid_bit = (uint32_t)(1 << (vlan_id & 0x1F));
467         vfta = rd32(hw, NGBE_VLANTBL(vid_idx));
468         if (on)
469                 vfta |= vid_bit;
470         else
471                 vfta &= ~vid_bit;
472         wr32(hw, NGBE_VLANTBL(vid_idx), vfta);
473
474         /* update local VFTA copy */
475         shadow_vfta->vfta[vid_idx] = vfta;
476
477         return 0;
478 }
479
480 static void
481 ngbe_vlan_strip_queue_set(struct rte_eth_dev *dev, uint16_t queue, int on)
482 {
483         struct ngbe_hw *hw = ngbe_dev_hw(dev);
484         struct ngbe_rx_queue *rxq;
485         bool restart;
486         uint32_t rxcfg, rxbal, rxbah;
487
488         if (on)
489                 ngbe_vlan_hw_strip_enable(dev, queue);
490         else
491                 ngbe_vlan_hw_strip_disable(dev, queue);
492
493         rxq = dev->data->rx_queues[queue];
494         rxbal = rd32(hw, NGBE_RXBAL(rxq->reg_idx));
495         rxbah = rd32(hw, NGBE_RXBAH(rxq->reg_idx));
496         rxcfg = rd32(hw, NGBE_RXCFG(rxq->reg_idx));
497         if (rxq->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP) {
498                 restart = (rxcfg & NGBE_RXCFG_ENA) &&
499                         !(rxcfg & NGBE_RXCFG_VLAN);
500                 rxcfg |= NGBE_RXCFG_VLAN;
501         } else {
502                 restart = (rxcfg & NGBE_RXCFG_ENA) &&
503                         (rxcfg & NGBE_RXCFG_VLAN);
504                 rxcfg &= ~NGBE_RXCFG_VLAN;
505         }
506         rxcfg &= ~NGBE_RXCFG_ENA;
507
508         if (restart) {
509                 /* set vlan strip for ring */
510                 ngbe_dev_rx_queue_stop(dev, queue);
511                 wr32(hw, NGBE_RXBAL(rxq->reg_idx), rxbal);
512                 wr32(hw, NGBE_RXBAH(rxq->reg_idx), rxbah);
513                 wr32(hw, NGBE_RXCFG(rxq->reg_idx), rxcfg);
514                 ngbe_dev_rx_queue_start(dev, queue);
515         }
516 }
517
518 static int
519 ngbe_vlan_tpid_set(struct rte_eth_dev *dev,
520                     enum rte_vlan_type vlan_type,
521                     uint16_t tpid)
522 {
523         struct ngbe_hw *hw = ngbe_dev_hw(dev);
524         int ret = 0;
525         uint32_t portctrl, vlan_ext, qinq;
526
527         portctrl = rd32(hw, NGBE_PORTCTL);
528
529         vlan_ext = (portctrl & NGBE_PORTCTL_VLANEXT);
530         qinq = vlan_ext && (portctrl & NGBE_PORTCTL_QINQ);
531         switch (vlan_type) {
532         case RTE_ETH_VLAN_TYPE_INNER:
533                 if (vlan_ext) {
534                         wr32m(hw, NGBE_VLANCTL,
535                                 NGBE_VLANCTL_TPID_MASK,
536                                 NGBE_VLANCTL_TPID(tpid));
537                         wr32m(hw, NGBE_DMATXCTRL,
538                                 NGBE_DMATXCTRL_TPID_MASK,
539                                 NGBE_DMATXCTRL_TPID(tpid));
540                 } else {
541                         ret = -ENOTSUP;
542                         PMD_DRV_LOG(ERR,
543                                 "Inner type is not supported by single VLAN");
544                 }
545
546                 if (qinq) {
547                         wr32m(hw, NGBE_TAGTPID(0),
548                                 NGBE_TAGTPID_LSB_MASK,
549                                 NGBE_TAGTPID_LSB(tpid));
550                 }
551                 break;
552         case RTE_ETH_VLAN_TYPE_OUTER:
553                 if (vlan_ext) {
554                         /* Only the high 16-bits is valid */
555                         wr32m(hw, NGBE_EXTAG,
556                                 NGBE_EXTAG_VLAN_MASK,
557                                 NGBE_EXTAG_VLAN(tpid));
558                 } else {
559                         wr32m(hw, NGBE_VLANCTL,
560                                 NGBE_VLANCTL_TPID_MASK,
561                                 NGBE_VLANCTL_TPID(tpid));
562                         wr32m(hw, NGBE_DMATXCTRL,
563                                 NGBE_DMATXCTRL_TPID_MASK,
564                                 NGBE_DMATXCTRL_TPID(tpid));
565                 }
566
567                 if (qinq) {
568                         wr32m(hw, NGBE_TAGTPID(0),
569                                 NGBE_TAGTPID_MSB_MASK,
570                                 NGBE_TAGTPID_MSB(tpid));
571                 }
572                 break;
573         default:
574                 PMD_DRV_LOG(ERR, "Unsupported VLAN type %d", vlan_type);
575                 return -EINVAL;
576         }
577
578         return ret;
579 }
580
581 void
582 ngbe_vlan_hw_filter_disable(struct rte_eth_dev *dev)
583 {
584         struct ngbe_hw *hw = ngbe_dev_hw(dev);
585         uint32_t vlnctrl;
586
587         PMD_INIT_FUNC_TRACE();
588
589         /* Filter Table Disable */
590         vlnctrl = rd32(hw, NGBE_VLANCTL);
591         vlnctrl &= ~NGBE_VLANCTL_VFE;
592         wr32(hw, NGBE_VLANCTL, vlnctrl);
593 }
594
595 void
596 ngbe_vlan_hw_filter_enable(struct rte_eth_dev *dev)
597 {
598         struct ngbe_hw *hw = ngbe_dev_hw(dev);
599         struct ngbe_vfta *shadow_vfta = NGBE_DEV_VFTA(dev);
600         uint32_t vlnctrl;
601         uint16_t i;
602
603         PMD_INIT_FUNC_TRACE();
604
605         /* Filter Table Enable */
606         vlnctrl = rd32(hw, NGBE_VLANCTL);
607         vlnctrl &= ~NGBE_VLANCTL_CFIENA;
608         vlnctrl |= NGBE_VLANCTL_VFE;
609         wr32(hw, NGBE_VLANCTL, vlnctrl);
610
611         /* write whatever is in local vfta copy */
612         for (i = 0; i < NGBE_VFTA_SIZE; i++)
613                 wr32(hw, NGBE_VLANTBL(i), shadow_vfta->vfta[i]);
614 }
615
616 void
617 ngbe_vlan_hw_strip_bitmap_set(struct rte_eth_dev *dev, uint16_t queue, bool on)
618 {
619         struct ngbe_hwstrip *hwstrip = NGBE_DEV_HWSTRIP(dev);
620         struct ngbe_rx_queue *rxq;
621
622         if (queue >= NGBE_MAX_RX_QUEUE_NUM)
623                 return;
624
625         if (on)
626                 NGBE_SET_HWSTRIP(hwstrip, queue);
627         else
628                 NGBE_CLEAR_HWSTRIP(hwstrip, queue);
629
630         if (queue >= dev->data->nb_rx_queues)
631                 return;
632
633         rxq = dev->data->rx_queues[queue];
634
635         if (on) {
636                 rxq->vlan_flags = RTE_MBUF_F_RX_VLAN | RTE_MBUF_F_RX_VLAN_STRIPPED;
637                 rxq->offloads |= RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
638         } else {
639                 rxq->vlan_flags = RTE_MBUF_F_RX_VLAN;
640                 rxq->offloads &= ~RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
641         }
642 }
643
644 static void
645 ngbe_vlan_hw_strip_disable(struct rte_eth_dev *dev, uint16_t queue)
646 {
647         struct ngbe_hw *hw = ngbe_dev_hw(dev);
648         uint32_t ctrl;
649
650         PMD_INIT_FUNC_TRACE();
651
652         ctrl = rd32(hw, NGBE_RXCFG(queue));
653         ctrl &= ~NGBE_RXCFG_VLAN;
654         wr32(hw, NGBE_RXCFG(queue), ctrl);
655
656         /* record those setting for HW strip per queue */
657         ngbe_vlan_hw_strip_bitmap_set(dev, queue, 0);
658 }
659
660 static void
661 ngbe_vlan_hw_strip_enable(struct rte_eth_dev *dev, uint16_t queue)
662 {
663         struct ngbe_hw *hw = ngbe_dev_hw(dev);
664         uint32_t ctrl;
665
666         PMD_INIT_FUNC_TRACE();
667
668         ctrl = rd32(hw, NGBE_RXCFG(queue));
669         ctrl |= NGBE_RXCFG_VLAN;
670         wr32(hw, NGBE_RXCFG(queue), ctrl);
671
672         /* record those setting for HW strip per queue */
673         ngbe_vlan_hw_strip_bitmap_set(dev, queue, 1);
674 }
675
676 static void
677 ngbe_vlan_hw_extend_disable(struct rte_eth_dev *dev)
678 {
679         struct ngbe_hw *hw = ngbe_dev_hw(dev);
680         uint32_t ctrl;
681
682         PMD_INIT_FUNC_TRACE();
683
684         ctrl = rd32(hw, NGBE_PORTCTL);
685         ctrl &= ~NGBE_PORTCTL_VLANEXT;
686         ctrl &= ~NGBE_PORTCTL_QINQ;
687         wr32(hw, NGBE_PORTCTL, ctrl);
688 }
689
690 static void
691 ngbe_vlan_hw_extend_enable(struct rte_eth_dev *dev)
692 {
693         struct ngbe_hw *hw = ngbe_dev_hw(dev);
694         uint32_t ctrl;
695
696         PMD_INIT_FUNC_TRACE();
697
698         ctrl  = rd32(hw, NGBE_PORTCTL);
699         ctrl |= NGBE_PORTCTL_VLANEXT | NGBE_PORTCTL_QINQ;
700         wr32(hw, NGBE_PORTCTL, ctrl);
701 }
702
703 static void
704 ngbe_qinq_hw_strip_disable(struct rte_eth_dev *dev)
705 {
706         struct ngbe_hw *hw = ngbe_dev_hw(dev);
707         uint32_t ctrl;
708
709         PMD_INIT_FUNC_TRACE();
710
711         ctrl = rd32(hw, NGBE_PORTCTL);
712         ctrl &= ~NGBE_PORTCTL_QINQ;
713         wr32(hw, NGBE_PORTCTL, ctrl);
714 }
715
716 static void
717 ngbe_qinq_hw_strip_enable(struct rte_eth_dev *dev)
718 {
719         struct ngbe_hw *hw = ngbe_dev_hw(dev);
720         uint32_t ctrl;
721
722         PMD_INIT_FUNC_TRACE();
723
724         ctrl  = rd32(hw, NGBE_PORTCTL);
725         ctrl |= NGBE_PORTCTL_QINQ | NGBE_PORTCTL_VLANEXT;
726         wr32(hw, NGBE_PORTCTL, ctrl);
727 }
728
729 void
730 ngbe_vlan_hw_strip_config(struct rte_eth_dev *dev)
731 {
732         struct ngbe_rx_queue *rxq;
733         uint16_t i;
734
735         PMD_INIT_FUNC_TRACE();
736
737         for (i = 0; i < dev->data->nb_rx_queues; i++) {
738                 rxq = dev->data->rx_queues[i];
739
740                 if (rxq->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
741                         ngbe_vlan_hw_strip_enable(dev, i);
742                 else
743                         ngbe_vlan_hw_strip_disable(dev, i);
744         }
745 }
746
747 void
748 ngbe_config_vlan_strip_on_all_queues(struct rte_eth_dev *dev, int mask)
749 {
750         uint16_t i;
751         struct rte_eth_rxmode *rxmode;
752         struct ngbe_rx_queue *rxq;
753
754         if (mask & RTE_ETH_VLAN_STRIP_MASK) {
755                 rxmode = &dev->data->dev_conf.rxmode;
756                 if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
757                         for (i = 0; i < dev->data->nb_rx_queues; i++) {
758                                 rxq = dev->data->rx_queues[i];
759                                 rxq->offloads |= RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
760                         }
761                 else
762                         for (i = 0; i < dev->data->nb_rx_queues; i++) {
763                                 rxq = dev->data->rx_queues[i];
764                                 rxq->offloads &= ~RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
765                         }
766         }
767 }
768
769 static int
770 ngbe_vlan_offload_config(struct rte_eth_dev *dev, int mask)
771 {
772         struct rte_eth_rxmode *rxmode;
773         rxmode = &dev->data->dev_conf.rxmode;
774
775         if (mask & RTE_ETH_VLAN_STRIP_MASK)
776                 ngbe_vlan_hw_strip_config(dev);
777
778         if (mask & RTE_ETH_VLAN_FILTER_MASK) {
779                 if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER)
780                         ngbe_vlan_hw_filter_enable(dev);
781                 else
782                         ngbe_vlan_hw_filter_disable(dev);
783         }
784
785         if (mask & RTE_ETH_VLAN_EXTEND_MASK) {
786                 if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_EXTEND)
787                         ngbe_vlan_hw_extend_enable(dev);
788                 else
789                         ngbe_vlan_hw_extend_disable(dev);
790         }
791
792         if (mask & RTE_ETH_QINQ_STRIP_MASK) {
793                 if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_QINQ_STRIP)
794                         ngbe_qinq_hw_strip_enable(dev);
795                 else
796                         ngbe_qinq_hw_strip_disable(dev);
797         }
798
799         return 0;
800 }
801
802 static int
803 ngbe_vlan_offload_set(struct rte_eth_dev *dev, int mask)
804 {
805         ngbe_config_vlan_strip_on_all_queues(dev, mask);
806
807         ngbe_vlan_offload_config(dev, mask);
808
809         return 0;
810 }
811
812 static int
813 ngbe_dev_configure(struct rte_eth_dev *dev)
814 {
815         struct ngbe_interrupt *intr = ngbe_dev_intr(dev);
816         struct ngbe_adapter *adapter = ngbe_dev_adapter(dev);
817
818         PMD_INIT_FUNC_TRACE();
819
820         if (dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG)
821                 dev->data->dev_conf.rxmode.offloads |= RTE_ETH_RX_OFFLOAD_RSS_HASH;
822
823         /* set flag to update link status after init */
824         intr->flags |= NGBE_FLAG_NEED_LINK_UPDATE;
825
826         /*
827          * Initialize to TRUE. If any of Rx queues doesn't meet the bulk
828          * allocation Rx preconditions we will reset it.
829          */
830         adapter->rx_bulk_alloc_allowed = true;
831
832         return 0;
833 }
834
835 static void
836 ngbe_dev_phy_intr_setup(struct rte_eth_dev *dev)
837 {
838         struct ngbe_hw *hw = ngbe_dev_hw(dev);
839         struct ngbe_interrupt *intr = ngbe_dev_intr(dev);
840
841         wr32(hw, NGBE_GPIODIR, NGBE_GPIODIR_DDR(1));
842         wr32(hw, NGBE_GPIOINTEN, NGBE_GPIOINTEN_INT(3));
843         wr32(hw, NGBE_GPIOINTTYPE, NGBE_GPIOINTTYPE_LEVEL(0));
844         if (hw->phy.type == ngbe_phy_yt8521s_sfi)
845                 wr32(hw, NGBE_GPIOINTPOL, NGBE_GPIOINTPOL_ACT(0));
846         else
847                 wr32(hw, NGBE_GPIOINTPOL, NGBE_GPIOINTPOL_ACT(3));
848
849         intr->mask_misc |= NGBE_ICRMISC_GPIO;
850 }
851
852 /*
853  * Configure device link speed and setup link.
854  * It returns 0 on success.
855  */
856 static int
857 ngbe_dev_start(struct rte_eth_dev *dev)
858 {
859         struct ngbe_hw *hw = ngbe_dev_hw(dev);
860         struct ngbe_hw_stats *hw_stats = NGBE_DEV_STATS(dev);
861         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
862         struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
863         uint32_t intr_vector = 0;
864         int err;
865         bool link_up = false, negotiate = false;
866         uint32_t speed = 0;
867         uint32_t allowed_speeds = 0;
868         int mask = 0;
869         int status;
870         uint32_t *link_speeds;
871
872         PMD_INIT_FUNC_TRACE();
873
874         /* disable uio/vfio intr/eventfd mapping */
875         rte_intr_disable(intr_handle);
876
877         /* stop adapter */
878         hw->adapter_stopped = 0;
879         ngbe_stop_hw(hw);
880
881         /* reinitialize adapter, this calls reset and start */
882         hw->nb_rx_queues = dev->data->nb_rx_queues;
883         hw->nb_tx_queues = dev->data->nb_tx_queues;
884         status = ngbe_pf_reset_hw(hw);
885         if (status != 0)
886                 return -1;
887         hw->mac.start_hw(hw);
888         hw->mac.get_link_status = true;
889
890         /* configure PF module if SRIOV enabled */
891         ngbe_pf_host_configure(dev);
892
893         ngbe_dev_phy_intr_setup(dev);
894
895         /* check and configure queue intr-vector mapping */
896         if ((rte_intr_cap_multiple(intr_handle) ||
897              !RTE_ETH_DEV_SRIOV(dev).active) &&
898             dev->data->dev_conf.intr_conf.rxq != 0) {
899                 intr_vector = dev->data->nb_rx_queues;
900                 if (rte_intr_efd_enable(intr_handle, intr_vector))
901                         return -1;
902         }
903
904         if (rte_intr_dp_is_en(intr_handle)) {
905                 if (rte_intr_vec_list_alloc(intr_handle, "intr_vec",
906                                                    dev->data->nb_rx_queues)) {
907                         PMD_INIT_LOG(ERR,
908                                      "Failed to allocate %d rx_queues intr_vec",
909                                      dev->data->nb_rx_queues);
910                         return -ENOMEM;
911                 }
912         }
913
914         /* confiugre MSI-X for sleep until Rx interrupt */
915         ngbe_configure_msix(dev);
916
917         /* initialize transmission unit */
918         ngbe_dev_tx_init(dev);
919
920         /* This can fail when allocating mbufs for descriptor rings */
921         err = ngbe_dev_rx_init(dev);
922         if (err != 0) {
923                 PMD_INIT_LOG(ERR, "Unable to initialize Rx hardware");
924                 goto error;
925         }
926
927         mask = RTE_ETH_VLAN_STRIP_MASK | RTE_ETH_VLAN_FILTER_MASK |
928                 RTE_ETH_VLAN_EXTEND_MASK;
929         err = ngbe_vlan_offload_config(dev, mask);
930         if (err != 0) {
931                 PMD_INIT_LOG(ERR, "Unable to set VLAN offload");
932                 goto error;
933         }
934
935         ngbe_configure_port(dev);
936
937         err = ngbe_dev_rxtx_start(dev);
938         if (err < 0) {
939                 PMD_INIT_LOG(ERR, "Unable to start rxtx queues");
940                 goto error;
941         }
942
943         /* Skip link setup if loopback mode is enabled. */
944         if (hw->is_pf && dev->data->dev_conf.lpbk_mode)
945                 goto skip_link_setup;
946
947         err = hw->mac.check_link(hw, &speed, &link_up, 0);
948         if (err != 0)
949                 goto error;
950         dev->data->dev_link.link_status = link_up;
951
952         link_speeds = &dev->data->dev_conf.link_speeds;
953         if (*link_speeds == RTE_ETH_LINK_SPEED_AUTONEG)
954                 negotiate = true;
955
956         err = hw->mac.get_link_capabilities(hw, &speed, &negotiate);
957         if (err != 0)
958                 goto error;
959
960         allowed_speeds = 0;
961         if (hw->mac.default_speeds & NGBE_LINK_SPEED_1GB_FULL)
962                 allowed_speeds |= RTE_ETH_LINK_SPEED_1G;
963         if (hw->mac.default_speeds & NGBE_LINK_SPEED_100M_FULL)
964                 allowed_speeds |= RTE_ETH_LINK_SPEED_100M;
965         if (hw->mac.default_speeds & NGBE_LINK_SPEED_10M_FULL)
966                 allowed_speeds |= RTE_ETH_LINK_SPEED_10M;
967
968         if (*link_speeds & ~allowed_speeds) {
969                 PMD_INIT_LOG(ERR, "Invalid link setting");
970                 goto error;
971         }
972
973         speed = 0x0;
974         if (*link_speeds == RTE_ETH_LINK_SPEED_AUTONEG) {
975                 speed = hw->mac.default_speeds;
976         } else {
977                 if (*link_speeds & RTE_ETH_LINK_SPEED_1G)
978                         speed |= NGBE_LINK_SPEED_1GB_FULL;
979                 if (*link_speeds & RTE_ETH_LINK_SPEED_100M)
980                         speed |= NGBE_LINK_SPEED_100M_FULL;
981                 if (*link_speeds & RTE_ETH_LINK_SPEED_10M)
982                         speed |= NGBE_LINK_SPEED_10M_FULL;
983         }
984
985         hw->phy.init_hw(hw);
986         err = hw->mac.setup_link(hw, speed, link_up);
987         if (err != 0)
988                 goto error;
989
990 skip_link_setup:
991
992         if (rte_intr_allow_others(intr_handle)) {
993                 ngbe_dev_misc_interrupt_setup(dev);
994                 /* check if lsc interrupt is enabled */
995                 if (dev->data->dev_conf.intr_conf.lsc != 0)
996                         ngbe_dev_lsc_interrupt_setup(dev, TRUE);
997                 else
998                         ngbe_dev_lsc_interrupt_setup(dev, FALSE);
999                 ngbe_dev_macsec_interrupt_setup(dev);
1000                 ngbe_set_ivar_map(hw, -1, 1, NGBE_MISC_VEC_ID);
1001         } else {
1002                 rte_intr_callback_unregister(intr_handle,
1003                                              ngbe_dev_interrupt_handler, dev);
1004                 if (dev->data->dev_conf.intr_conf.lsc != 0)
1005                         PMD_INIT_LOG(INFO,
1006                                      "LSC won't enable because of no intr multiplex");
1007         }
1008
1009         /* check if rxq interrupt is enabled */
1010         if (dev->data->dev_conf.intr_conf.rxq != 0 &&
1011             rte_intr_dp_is_en(intr_handle))
1012                 ngbe_dev_rxq_interrupt_setup(dev);
1013
1014         /* enable UIO/VFIO intr/eventfd mapping */
1015         rte_intr_enable(intr_handle);
1016
1017         /* resume enabled intr since HW reset */
1018         ngbe_enable_intr(dev);
1019
1020         if ((hw->sub_system_id & NGBE_OEM_MASK) == NGBE_LY_M88E1512_SFP ||
1021                 (hw->sub_system_id & NGBE_OEM_MASK) == NGBE_LY_YT8521S_SFP) {
1022                 /* gpio0 is used to power on/off control*/
1023                 wr32(hw, NGBE_GPIODATA, 0);
1024         }
1025
1026         /*
1027          * Update link status right before return, because it may
1028          * start link configuration process in a separate thread.
1029          */
1030         ngbe_dev_link_update(dev, 0);
1031
1032         ngbe_read_stats_registers(hw, hw_stats);
1033         hw->offset_loaded = 1;
1034
1035         return 0;
1036
1037 error:
1038         PMD_INIT_LOG(ERR, "failure in dev start: %d", err);
1039         ngbe_dev_clear_queues(dev);
1040         return -EIO;
1041 }
1042
1043 /*
1044  * Stop device: disable rx and tx functions to allow for reconfiguring.
1045  */
1046 static int
1047 ngbe_dev_stop(struct rte_eth_dev *dev)
1048 {
1049         struct rte_eth_link link;
1050         struct ngbe_adapter *adapter = ngbe_dev_adapter(dev);
1051         struct ngbe_hw *hw = ngbe_dev_hw(dev);
1052         struct ngbe_vf_info *vfinfo = *NGBE_DEV_VFDATA(dev);
1053         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1054         struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
1055         int vf;
1056
1057         if (hw->adapter_stopped)
1058                 return 0;
1059
1060         PMD_INIT_FUNC_TRACE();
1061
1062         if ((hw->sub_system_id & NGBE_OEM_MASK) == NGBE_LY_M88E1512_SFP ||
1063                 (hw->sub_system_id & NGBE_OEM_MASK) == NGBE_LY_YT8521S_SFP) {
1064                 /* gpio0 is used to power on/off control*/
1065                 wr32(hw, NGBE_GPIODATA, NGBE_GPIOBIT_0);
1066         }
1067
1068         /* disable interrupts */
1069         ngbe_disable_intr(hw);
1070
1071         /* reset the NIC */
1072         ngbe_pf_reset_hw(hw);
1073         hw->adapter_stopped = 0;
1074
1075         /* stop adapter */
1076         ngbe_stop_hw(hw);
1077
1078         for (vf = 0; vfinfo != NULL && vf < pci_dev->max_vfs; vf++)
1079                 vfinfo[vf].clear_to_send = false;
1080
1081         ngbe_dev_clear_queues(dev);
1082
1083         /* Clear stored conf */
1084         dev->data->scattered_rx = 0;
1085
1086         /* Clear recorded link status */
1087         memset(&link, 0, sizeof(link));
1088         rte_eth_linkstatus_set(dev, &link);
1089
1090         if (!rte_intr_allow_others(intr_handle))
1091                 /* resume to the default handler */
1092                 rte_intr_callback_register(intr_handle,
1093                                            ngbe_dev_interrupt_handler,
1094                                            (void *)dev);
1095
1096         /* Clean datapath event and queue/vec mapping */
1097         rte_intr_efd_disable(intr_handle);
1098         rte_intr_vec_list_free(intr_handle);
1099
1100         adapter->rss_reta_updated = 0;
1101
1102         hw->adapter_stopped = true;
1103         dev->data->dev_started = 0;
1104
1105         return 0;
1106 }
1107
1108 /*
1109  * Reset and stop device.
1110  */
1111 static int
1112 ngbe_dev_close(struct rte_eth_dev *dev)
1113 {
1114         struct ngbe_hw *hw = ngbe_dev_hw(dev);
1115         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1116         struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
1117         int retries = 0;
1118         int ret;
1119
1120         PMD_INIT_FUNC_TRACE();
1121
1122         ngbe_pf_reset_hw(hw);
1123
1124         ngbe_dev_stop(dev);
1125
1126         ngbe_dev_free_queues(dev);
1127
1128         /* reprogram the RAR[0] in case user changed it. */
1129         ngbe_set_rar(hw, 0, hw->mac.addr, 0, true);
1130
1131         /* Unlock any pending hardware semaphore */
1132         ngbe_swfw_lock_reset(hw);
1133
1134         /* disable uio intr before callback unregister */
1135         rte_intr_disable(intr_handle);
1136
1137         do {
1138                 ret = rte_intr_callback_unregister(intr_handle,
1139                                 ngbe_dev_interrupt_handler, dev);
1140                 if (ret >= 0 || ret == -ENOENT) {
1141                         break;
1142                 } else if (ret != -EAGAIN) {
1143                         PMD_INIT_LOG(ERR,
1144                                 "intr callback unregister failed: %d",
1145                                 ret);
1146                 }
1147                 rte_delay_ms(100);
1148         } while (retries++ < (10 + NGBE_LINK_UP_TIME));
1149
1150         /* uninitialize PF if max_vfs not zero */
1151         ngbe_pf_host_uninit(dev);
1152
1153         rte_free(dev->data->mac_addrs);
1154         dev->data->mac_addrs = NULL;
1155
1156         rte_free(dev->data->hash_mac_addrs);
1157         dev->data->hash_mac_addrs = NULL;
1158
1159         return ret;
1160 }
1161
1162 /*
1163  * Reset PF device.
1164  */
1165 static int
1166 ngbe_dev_reset(struct rte_eth_dev *dev)
1167 {
1168         int ret;
1169
1170         /* When a DPDK PMD PF begin to reset PF port, it should notify all
1171          * its VF to make them align with it. The detailed notification
1172          * mechanism is PMD specific. As to ngbe PF, it is rather complex.
1173          * To avoid unexpected behavior in VF, currently reset of PF with
1174          * SR-IOV activation is not supported. It might be supported later.
1175          */
1176         if (dev->data->sriov.active)
1177                 return -ENOTSUP;
1178
1179         ret = eth_ngbe_dev_uninit(dev);
1180         if (ret != 0)
1181                 return ret;
1182
1183         ret = eth_ngbe_dev_init(dev, NULL);
1184
1185         return ret;
1186 }
1187
1188 #define UPDATE_QP_COUNTER_32bit(reg, last_counter, counter)     \
1189         {                                                       \
1190                 uint32_t current_counter = rd32(hw, reg);       \
1191                 if (current_counter < last_counter)             \
1192                         current_counter += 0x100000000LL;       \
1193                 if (!hw->offset_loaded)                         \
1194                         last_counter = current_counter;         \
1195                 counter = current_counter - last_counter;       \
1196                 counter &= 0xFFFFFFFFLL;                        \
1197         }
1198
1199 #define UPDATE_QP_COUNTER_36bit(reg_lsb, reg_msb, last_counter, counter) \
1200         {                                                                \
1201                 uint64_t current_counter_lsb = rd32(hw, reg_lsb);        \
1202                 uint64_t current_counter_msb = rd32(hw, reg_msb);        \
1203                 uint64_t current_counter = (current_counter_msb << 32) | \
1204                         current_counter_lsb;                             \
1205                 if (current_counter < last_counter)                      \
1206                         current_counter += 0x1000000000LL;               \
1207                 if (!hw->offset_loaded)                                  \
1208                         last_counter = current_counter;                  \
1209                 counter = current_counter - last_counter;                \
1210                 counter &= 0xFFFFFFFFFLL;                                \
1211         }
1212
1213 void
1214 ngbe_read_stats_registers(struct ngbe_hw *hw,
1215                            struct ngbe_hw_stats *hw_stats)
1216 {
1217         unsigned int i;
1218
1219         /* QP Stats */
1220         for (i = 0; i < hw->nb_rx_queues; i++) {
1221                 UPDATE_QP_COUNTER_32bit(NGBE_QPRXPKT(i),
1222                         hw->qp_last[i].rx_qp_packets,
1223                         hw_stats->qp[i].rx_qp_packets);
1224                 UPDATE_QP_COUNTER_36bit(NGBE_QPRXOCTL(i), NGBE_QPRXOCTH(i),
1225                         hw->qp_last[i].rx_qp_bytes,
1226                         hw_stats->qp[i].rx_qp_bytes);
1227                 UPDATE_QP_COUNTER_32bit(NGBE_QPRXMPKT(i),
1228                         hw->qp_last[i].rx_qp_mc_packets,
1229                         hw_stats->qp[i].rx_qp_mc_packets);
1230                 UPDATE_QP_COUNTER_32bit(NGBE_QPRXBPKT(i),
1231                         hw->qp_last[i].rx_qp_bc_packets,
1232                         hw_stats->qp[i].rx_qp_bc_packets);
1233         }
1234
1235         for (i = 0; i < hw->nb_tx_queues; i++) {
1236                 UPDATE_QP_COUNTER_32bit(NGBE_QPTXPKT(i),
1237                         hw->qp_last[i].tx_qp_packets,
1238                         hw_stats->qp[i].tx_qp_packets);
1239                 UPDATE_QP_COUNTER_36bit(NGBE_QPTXOCTL(i), NGBE_QPTXOCTH(i),
1240                         hw->qp_last[i].tx_qp_bytes,
1241                         hw_stats->qp[i].tx_qp_bytes);
1242                 UPDATE_QP_COUNTER_32bit(NGBE_QPTXMPKT(i),
1243                         hw->qp_last[i].tx_qp_mc_packets,
1244                         hw_stats->qp[i].tx_qp_mc_packets);
1245                 UPDATE_QP_COUNTER_32bit(NGBE_QPTXBPKT(i),
1246                         hw->qp_last[i].tx_qp_bc_packets,
1247                         hw_stats->qp[i].tx_qp_bc_packets);
1248         }
1249
1250         /* PB Stats */
1251         hw_stats->rx_up_dropped += rd32(hw, NGBE_PBRXMISS);
1252         hw_stats->rdb_pkt_cnt += rd32(hw, NGBE_PBRXPKT);
1253         hw_stats->rdb_repli_cnt += rd32(hw, NGBE_PBRXREP);
1254         hw_stats->rdb_drp_cnt += rd32(hw, NGBE_PBRXDROP);
1255         hw_stats->tx_xoff_packets += rd32(hw, NGBE_PBTXLNKXOFF);
1256         hw_stats->tx_xon_packets += rd32(hw, NGBE_PBTXLNKXON);
1257
1258         hw_stats->rx_xon_packets += rd32(hw, NGBE_PBRXLNKXON);
1259         hw_stats->rx_xoff_packets += rd32(hw, NGBE_PBRXLNKXOFF);
1260
1261         /* DMA Stats */
1262         hw_stats->rx_drop_packets += rd32(hw, NGBE_DMARXDROP);
1263         hw_stats->tx_drop_packets += rd32(hw, NGBE_DMATXDROP);
1264         hw_stats->rx_dma_drop += rd32(hw, NGBE_DMARXDROP);
1265         hw_stats->tx_secdrp_packets += rd32(hw, NGBE_DMATXSECDROP);
1266         hw_stats->rx_packets += rd32(hw, NGBE_DMARXPKT);
1267         hw_stats->tx_packets += rd32(hw, NGBE_DMATXPKT);
1268         hw_stats->rx_bytes += rd64(hw, NGBE_DMARXOCTL);
1269         hw_stats->tx_bytes += rd64(hw, NGBE_DMATXOCTL);
1270
1271         /* MAC Stats */
1272         hw_stats->rx_crc_errors += rd64(hw, NGBE_MACRXERRCRCL);
1273         hw_stats->rx_multicast_packets += rd64(hw, NGBE_MACRXMPKTL);
1274         hw_stats->tx_multicast_packets += rd64(hw, NGBE_MACTXMPKTL);
1275
1276         hw_stats->rx_total_packets += rd64(hw, NGBE_MACRXPKTL);
1277         hw_stats->tx_total_packets += rd64(hw, NGBE_MACTXPKTL);
1278         hw_stats->rx_total_bytes += rd64(hw, NGBE_MACRXGBOCTL);
1279
1280         hw_stats->rx_broadcast_packets += rd64(hw, NGBE_MACRXOCTL);
1281         hw_stats->tx_broadcast_packets += rd32(hw, NGBE_MACTXOCTL);
1282
1283         hw_stats->rx_size_64_packets += rd64(hw, NGBE_MACRX1TO64L);
1284         hw_stats->rx_size_65_to_127_packets += rd64(hw, NGBE_MACRX65TO127L);
1285         hw_stats->rx_size_128_to_255_packets += rd64(hw, NGBE_MACRX128TO255L);
1286         hw_stats->rx_size_256_to_511_packets += rd64(hw, NGBE_MACRX256TO511L);
1287         hw_stats->rx_size_512_to_1023_packets +=
1288                         rd64(hw, NGBE_MACRX512TO1023L);
1289         hw_stats->rx_size_1024_to_max_packets +=
1290                         rd64(hw, NGBE_MACRX1024TOMAXL);
1291         hw_stats->tx_size_64_packets += rd64(hw, NGBE_MACTX1TO64L);
1292         hw_stats->tx_size_65_to_127_packets += rd64(hw, NGBE_MACTX65TO127L);
1293         hw_stats->tx_size_128_to_255_packets += rd64(hw, NGBE_MACTX128TO255L);
1294         hw_stats->tx_size_256_to_511_packets += rd64(hw, NGBE_MACTX256TO511L);
1295         hw_stats->tx_size_512_to_1023_packets +=
1296                         rd64(hw, NGBE_MACTX512TO1023L);
1297         hw_stats->tx_size_1024_to_max_packets +=
1298                         rd64(hw, NGBE_MACTX1024TOMAXL);
1299
1300         hw_stats->rx_undersize_errors += rd64(hw, NGBE_MACRXERRLENL);
1301         hw_stats->rx_oversize_errors += rd32(hw, NGBE_MACRXOVERSIZE);
1302         hw_stats->rx_jabber_errors += rd32(hw, NGBE_MACRXJABBER);
1303
1304         /* MNG Stats */
1305         hw_stats->mng_bmc2host_packets = rd32(hw, NGBE_MNGBMC2OS);
1306         hw_stats->mng_host2bmc_packets = rd32(hw, NGBE_MNGOS2BMC);
1307         hw_stats->rx_management_packets = rd32(hw, NGBE_DMARXMNG);
1308         hw_stats->tx_management_packets = rd32(hw, NGBE_DMATXMNG);
1309
1310         /* MACsec Stats */
1311         hw_stats->tx_macsec_pkts_untagged += rd32(hw, NGBE_LSECTX_UTPKT);
1312         hw_stats->tx_macsec_pkts_encrypted +=
1313                         rd32(hw, NGBE_LSECTX_ENCPKT);
1314         hw_stats->tx_macsec_pkts_protected +=
1315                         rd32(hw, NGBE_LSECTX_PROTPKT);
1316         hw_stats->tx_macsec_octets_encrypted +=
1317                         rd32(hw, NGBE_LSECTX_ENCOCT);
1318         hw_stats->tx_macsec_octets_protected +=
1319                         rd32(hw, NGBE_LSECTX_PROTOCT);
1320         hw_stats->rx_macsec_pkts_untagged += rd32(hw, NGBE_LSECRX_UTPKT);
1321         hw_stats->rx_macsec_pkts_badtag += rd32(hw, NGBE_LSECRX_BTPKT);
1322         hw_stats->rx_macsec_pkts_nosci += rd32(hw, NGBE_LSECRX_NOSCIPKT);
1323         hw_stats->rx_macsec_pkts_unknownsci += rd32(hw, NGBE_LSECRX_UNSCIPKT);
1324         hw_stats->rx_macsec_octets_decrypted += rd32(hw, NGBE_LSECRX_DECOCT);
1325         hw_stats->rx_macsec_octets_validated += rd32(hw, NGBE_LSECRX_VLDOCT);
1326         hw_stats->rx_macsec_sc_pkts_unchecked +=
1327                         rd32(hw, NGBE_LSECRX_UNCHKPKT);
1328         hw_stats->rx_macsec_sc_pkts_delayed += rd32(hw, NGBE_LSECRX_DLYPKT);
1329         hw_stats->rx_macsec_sc_pkts_late += rd32(hw, NGBE_LSECRX_LATEPKT);
1330         for (i = 0; i < 2; i++) {
1331                 hw_stats->rx_macsec_sa_pkts_ok +=
1332                         rd32(hw, NGBE_LSECRX_OKPKT(i));
1333                 hw_stats->rx_macsec_sa_pkts_invalid +=
1334                         rd32(hw, NGBE_LSECRX_INVPKT(i));
1335                 hw_stats->rx_macsec_sa_pkts_notvalid +=
1336                         rd32(hw, NGBE_LSECRX_BADPKT(i));
1337         }
1338         for (i = 0; i < 4; i++) {
1339                 hw_stats->rx_macsec_sa_pkts_unusedsa +=
1340                         rd32(hw, NGBE_LSECRX_INVSAPKT(i));
1341                 hw_stats->rx_macsec_sa_pkts_notusingsa +=
1342                         rd32(hw, NGBE_LSECRX_BADSAPKT(i));
1343         }
1344         hw_stats->rx_total_missed_packets =
1345                         hw_stats->rx_up_dropped;
1346 }
1347
1348 static int
1349 ngbe_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
1350 {
1351         struct ngbe_hw *hw = ngbe_dev_hw(dev);
1352         struct ngbe_hw_stats *hw_stats = NGBE_DEV_STATS(dev);
1353         struct ngbe_stat_mappings *stat_mappings =
1354                         NGBE_DEV_STAT_MAPPINGS(dev);
1355         uint32_t i, j;
1356
1357         ngbe_read_stats_registers(hw, hw_stats);
1358
1359         if (stats == NULL)
1360                 return -EINVAL;
1361
1362         /* Fill out the rte_eth_stats statistics structure */
1363         stats->ipackets = hw_stats->rx_packets;
1364         stats->ibytes = hw_stats->rx_bytes;
1365         stats->opackets = hw_stats->tx_packets;
1366         stats->obytes = hw_stats->tx_bytes;
1367
1368         memset(&stats->q_ipackets, 0, sizeof(stats->q_ipackets));
1369         memset(&stats->q_opackets, 0, sizeof(stats->q_opackets));
1370         memset(&stats->q_ibytes, 0, sizeof(stats->q_ibytes));
1371         memset(&stats->q_obytes, 0, sizeof(stats->q_obytes));
1372         memset(&stats->q_errors, 0, sizeof(stats->q_errors));
1373         for (i = 0; i < NGBE_MAX_QP; i++) {
1374                 uint32_t n = i / NB_QMAP_FIELDS_PER_QSM_REG;
1375                 uint32_t offset = (i % NB_QMAP_FIELDS_PER_QSM_REG) * 8;
1376                 uint32_t q_map;
1377
1378                 q_map = (stat_mappings->rqsm[n] >> offset)
1379                                 & QMAP_FIELD_RESERVED_BITS_MASK;
1380                 j = (q_map < RTE_ETHDEV_QUEUE_STAT_CNTRS
1381                      ? q_map : q_map % RTE_ETHDEV_QUEUE_STAT_CNTRS);
1382                 stats->q_ipackets[j] += hw_stats->qp[i].rx_qp_packets;
1383                 stats->q_ibytes[j] += hw_stats->qp[i].rx_qp_bytes;
1384
1385                 q_map = (stat_mappings->tqsm[n] >> offset)
1386                                 & QMAP_FIELD_RESERVED_BITS_MASK;
1387                 j = (q_map < RTE_ETHDEV_QUEUE_STAT_CNTRS
1388                      ? q_map : q_map % RTE_ETHDEV_QUEUE_STAT_CNTRS);
1389                 stats->q_opackets[j] += hw_stats->qp[i].tx_qp_packets;
1390                 stats->q_obytes[j] += hw_stats->qp[i].tx_qp_bytes;
1391         }
1392
1393         /* Rx Errors */
1394         stats->imissed  = hw_stats->rx_total_missed_packets +
1395                           hw_stats->rx_dma_drop;
1396         stats->ierrors  = hw_stats->rx_crc_errors +
1397                           hw_stats->rx_mac_short_packet_dropped +
1398                           hw_stats->rx_length_errors +
1399                           hw_stats->rx_undersize_errors +
1400                           hw_stats->rx_oversize_errors +
1401                           hw_stats->rx_illegal_byte_errors +
1402                           hw_stats->rx_error_bytes +
1403                           hw_stats->rx_fragment_errors;
1404
1405         /* Tx Errors */
1406         stats->oerrors  = 0;
1407         return 0;
1408 }
1409
1410 static int
1411 ngbe_dev_stats_reset(struct rte_eth_dev *dev)
1412 {
1413         struct ngbe_hw *hw = ngbe_dev_hw(dev);
1414         struct ngbe_hw_stats *hw_stats = NGBE_DEV_STATS(dev);
1415
1416         /* HW registers are cleared on read */
1417         hw->offset_loaded = 0;
1418         ngbe_dev_stats_get(dev, NULL);
1419         hw->offset_loaded = 1;
1420
1421         /* Reset software totals */
1422         memset(hw_stats, 0, sizeof(*hw_stats));
1423
1424         return 0;
1425 }
1426
1427 /* This function calculates the number of xstats based on the current config */
1428 static unsigned
1429 ngbe_xstats_calc_num(struct rte_eth_dev *dev)
1430 {
1431         int nb_queues = max(dev->data->nb_rx_queues, dev->data->nb_tx_queues);
1432         return NGBE_NB_HW_STATS +
1433                NGBE_NB_QP_STATS * nb_queues;
1434 }
1435
1436 static inline int
1437 ngbe_get_name_by_id(uint32_t id, char *name, uint32_t size)
1438 {
1439         int nb, st;
1440
1441         /* Extended stats from ngbe_hw_stats */
1442         if (id < NGBE_NB_HW_STATS) {
1443                 snprintf(name, size, "[hw]%s",
1444                         rte_ngbe_stats_strings[id].name);
1445                 return 0;
1446         }
1447         id -= NGBE_NB_HW_STATS;
1448
1449         /* Queue Stats */
1450         if (id < NGBE_NB_QP_STATS * NGBE_MAX_QP) {
1451                 nb = id / NGBE_NB_QP_STATS;
1452                 st = id % NGBE_NB_QP_STATS;
1453                 snprintf(name, size, "[q%u]%s", nb,
1454                         rte_ngbe_qp_strings[st].name);
1455                 return 0;
1456         }
1457         id -= NGBE_NB_QP_STATS * NGBE_MAX_QP;
1458
1459         return -(int)(id + 1);
1460 }
1461
1462 static inline int
1463 ngbe_get_offset_by_id(uint32_t id, uint32_t *offset)
1464 {
1465         int nb, st;
1466
1467         /* Extended stats from ngbe_hw_stats */
1468         if (id < NGBE_NB_HW_STATS) {
1469                 *offset = rte_ngbe_stats_strings[id].offset;
1470                 return 0;
1471         }
1472         id -= NGBE_NB_HW_STATS;
1473
1474         /* Queue Stats */
1475         if (id < NGBE_NB_QP_STATS * NGBE_MAX_QP) {
1476                 nb = id / NGBE_NB_QP_STATS;
1477                 st = id % NGBE_NB_QP_STATS;
1478                 *offset = rte_ngbe_qp_strings[st].offset +
1479                         nb * (NGBE_NB_QP_STATS * sizeof(uint64_t));
1480                 return 0;
1481         }
1482
1483         return -1;
1484 }
1485
1486 static int ngbe_dev_xstats_get_names(struct rte_eth_dev *dev,
1487         struct rte_eth_xstat_name *xstats_names, unsigned int limit)
1488 {
1489         unsigned int i, count;
1490
1491         count = ngbe_xstats_calc_num(dev);
1492         if (xstats_names == NULL)
1493                 return count;
1494
1495         /* Note: limit >= cnt_stats checked upstream
1496          * in rte_eth_xstats_names()
1497          */
1498         limit = min(limit, count);
1499
1500         /* Extended stats from ngbe_hw_stats */
1501         for (i = 0; i < limit; i++) {
1502                 if (ngbe_get_name_by_id(i, xstats_names[i].name,
1503                         sizeof(xstats_names[i].name))) {
1504                         PMD_INIT_LOG(WARNING, "id value %d isn't valid", i);
1505                         break;
1506                 }
1507         }
1508
1509         return i;
1510 }
1511
1512 static int ngbe_dev_xstats_get_names_by_id(struct rte_eth_dev *dev,
1513         const uint64_t *ids,
1514         struct rte_eth_xstat_name *xstats_names,
1515         unsigned int limit)
1516 {
1517         unsigned int i;
1518
1519         if (ids == NULL)
1520                 return ngbe_dev_xstats_get_names(dev, xstats_names, limit);
1521
1522         for (i = 0; i < limit; i++) {
1523                 if (ngbe_get_name_by_id(ids[i], xstats_names[i].name,
1524                                 sizeof(xstats_names[i].name))) {
1525                         PMD_INIT_LOG(WARNING, "id value %d isn't valid", i);
1526                         return -1;
1527                 }
1528         }
1529
1530         return i;
1531 }
1532
1533 static int
1534 ngbe_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
1535                                          unsigned int limit)
1536 {
1537         struct ngbe_hw *hw = ngbe_dev_hw(dev);
1538         struct ngbe_hw_stats *hw_stats = NGBE_DEV_STATS(dev);
1539         unsigned int i, count;
1540
1541         ngbe_read_stats_registers(hw, hw_stats);
1542
1543         /* If this is a reset xstats is NULL, and we have cleared the
1544          * registers by reading them.
1545          */
1546         count = ngbe_xstats_calc_num(dev);
1547         if (xstats == NULL)
1548                 return count;
1549
1550         limit = min(limit, ngbe_xstats_calc_num(dev));
1551
1552         /* Extended stats from ngbe_hw_stats */
1553         for (i = 0; i < limit; i++) {
1554                 uint32_t offset = 0;
1555
1556                 if (ngbe_get_offset_by_id(i, &offset)) {
1557                         PMD_INIT_LOG(WARNING, "id value %d isn't valid", i);
1558                         break;
1559                 }
1560                 xstats[i].value = *(uint64_t *)(((char *)hw_stats) + offset);
1561                 xstats[i].id = i;
1562         }
1563
1564         return i;
1565 }
1566
1567 static int
1568 ngbe_dev_xstats_get_(struct rte_eth_dev *dev, uint64_t *values,
1569                                          unsigned int limit)
1570 {
1571         struct ngbe_hw *hw = ngbe_dev_hw(dev);
1572         struct ngbe_hw_stats *hw_stats = NGBE_DEV_STATS(dev);
1573         unsigned int i, count;
1574
1575         ngbe_read_stats_registers(hw, hw_stats);
1576
1577         /* If this is a reset xstats is NULL, and we have cleared the
1578          * registers by reading them.
1579          */
1580         count = ngbe_xstats_calc_num(dev);
1581         if (values == NULL)
1582                 return count;
1583
1584         limit = min(limit, ngbe_xstats_calc_num(dev));
1585
1586         /* Extended stats from ngbe_hw_stats */
1587         for (i = 0; i < limit; i++) {
1588                 uint32_t offset;
1589
1590                 if (ngbe_get_offset_by_id(i, &offset)) {
1591                         PMD_INIT_LOG(WARNING, "id value %d isn't valid", i);
1592                         break;
1593                 }
1594                 values[i] = *(uint64_t *)(((char *)hw_stats) + offset);
1595         }
1596
1597         return i;
1598 }
1599
1600 static int
1601 ngbe_dev_xstats_get_by_id(struct rte_eth_dev *dev, const uint64_t *ids,
1602                 uint64_t *values, unsigned int limit)
1603 {
1604         struct ngbe_hw_stats *hw_stats = NGBE_DEV_STATS(dev);
1605         unsigned int i;
1606
1607         if (ids == NULL)
1608                 return ngbe_dev_xstats_get_(dev, values, limit);
1609
1610         for (i = 0; i < limit; i++) {
1611                 uint32_t offset;
1612
1613                 if (ngbe_get_offset_by_id(ids[i], &offset)) {
1614                         PMD_INIT_LOG(WARNING, "id value %d isn't valid", i);
1615                         break;
1616                 }
1617                 values[i] = *(uint64_t *)(((char *)hw_stats) + offset);
1618         }
1619
1620         return i;
1621 }
1622
1623 static int
1624 ngbe_dev_xstats_reset(struct rte_eth_dev *dev)
1625 {
1626         struct ngbe_hw *hw = ngbe_dev_hw(dev);
1627         struct ngbe_hw_stats *hw_stats = NGBE_DEV_STATS(dev);
1628
1629         /* HW registers are cleared on read */
1630         hw->offset_loaded = 0;
1631         ngbe_read_stats_registers(hw, hw_stats);
1632         hw->offset_loaded = 1;
1633
1634         /* Reset software totals */
1635         memset(hw_stats, 0, sizeof(*hw_stats));
1636
1637         return 0;
1638 }
1639
1640 static int
1641 ngbe_fw_version_get(struct rte_eth_dev *dev, char *fw_version, size_t fw_size)
1642 {
1643         struct ngbe_hw *hw = ngbe_dev_hw(dev);
1644         int ret;
1645
1646         ret = snprintf(fw_version, fw_size, "0x%08x", hw->eeprom_id);
1647
1648         if (ret < 0)
1649                 return -EINVAL;
1650
1651         ret += 1; /* add the size of '\0' */
1652         if (fw_size < (size_t)ret)
1653                 return ret;
1654
1655         return 0;
1656 }
1657
1658 static int
1659 ngbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
1660 {
1661         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1662         struct ngbe_hw *hw = ngbe_dev_hw(dev);
1663
1664         dev_info->max_rx_queues = (uint16_t)hw->mac.max_rx_queues;
1665         dev_info->max_tx_queues = (uint16_t)hw->mac.max_tx_queues;
1666         dev_info->min_rx_bufsize = 1024;
1667         dev_info->max_rx_pktlen = 15872;
1668         dev_info->max_mac_addrs = hw->mac.num_rar_entries;
1669         dev_info->max_hash_mac_addrs = NGBE_VMDQ_NUM_UC_MAC;
1670         dev_info->max_vfs = pci_dev->max_vfs;
1671         dev_info->rx_queue_offload_capa = ngbe_get_rx_queue_offloads(dev);
1672         dev_info->rx_offload_capa = (ngbe_get_rx_port_offloads(dev) |
1673                                      dev_info->rx_queue_offload_capa);
1674         dev_info->tx_queue_offload_capa = 0;
1675         dev_info->tx_offload_capa = ngbe_get_tx_port_offloads(dev);
1676
1677         dev_info->default_rxconf = (struct rte_eth_rxconf) {
1678                 .rx_thresh = {
1679                         .pthresh = NGBE_DEFAULT_RX_PTHRESH,
1680                         .hthresh = NGBE_DEFAULT_RX_HTHRESH,
1681                         .wthresh = NGBE_DEFAULT_RX_WTHRESH,
1682                 },
1683                 .rx_free_thresh = NGBE_DEFAULT_RX_FREE_THRESH,
1684                 .rx_drop_en = 0,
1685                 .offloads = 0,
1686         };
1687
1688         dev_info->default_txconf = (struct rte_eth_txconf) {
1689                 .tx_thresh = {
1690                         .pthresh = NGBE_DEFAULT_TX_PTHRESH,
1691                         .hthresh = NGBE_DEFAULT_TX_HTHRESH,
1692                         .wthresh = NGBE_DEFAULT_TX_WTHRESH,
1693                 },
1694                 .tx_free_thresh = NGBE_DEFAULT_TX_FREE_THRESH,
1695                 .offloads = 0,
1696         };
1697
1698         dev_info->rx_desc_lim = rx_desc_lim;
1699         dev_info->tx_desc_lim = tx_desc_lim;
1700
1701         dev_info->hash_key_size = NGBE_HKEY_MAX_INDEX * sizeof(uint32_t);
1702         dev_info->reta_size = RTE_ETH_RSS_RETA_SIZE_128;
1703         dev_info->flow_type_rss_offloads = NGBE_RSS_OFFLOAD_ALL;
1704
1705         dev_info->speed_capa = RTE_ETH_LINK_SPEED_1G | RTE_ETH_LINK_SPEED_100M |
1706                                 RTE_ETH_LINK_SPEED_10M;
1707
1708         /* Driver-preferred Rx/Tx parameters */
1709         dev_info->default_rxportconf.burst_size = 32;
1710         dev_info->default_txportconf.burst_size = 32;
1711         dev_info->default_rxportconf.nb_queues = 1;
1712         dev_info->default_txportconf.nb_queues = 1;
1713         dev_info->default_rxportconf.ring_size = 256;
1714         dev_info->default_txportconf.ring_size = 256;
1715
1716         return 0;
1717 }
1718
1719 const uint32_t *
1720 ngbe_dev_supported_ptypes_get(struct rte_eth_dev *dev)
1721 {
1722         if (dev->rx_pkt_burst == ngbe_recv_pkts ||
1723             dev->rx_pkt_burst == ngbe_recv_pkts_sc_single_alloc ||
1724             dev->rx_pkt_burst == ngbe_recv_pkts_sc_bulk_alloc ||
1725             dev->rx_pkt_burst == ngbe_recv_pkts_bulk_alloc)
1726                 return ngbe_get_supported_ptypes();
1727
1728         return NULL;
1729 }
1730
1731 /* return 0 means link status changed, -1 means not changed */
1732 int
1733 ngbe_dev_link_update_share(struct rte_eth_dev *dev,
1734                             int wait_to_complete)
1735 {
1736         struct ngbe_hw *hw = ngbe_dev_hw(dev);
1737         struct rte_eth_link link;
1738         u32 link_speed = NGBE_LINK_SPEED_UNKNOWN;
1739         u32 lan_speed = 0;
1740         struct ngbe_interrupt *intr = ngbe_dev_intr(dev);
1741         bool link_up;
1742         int err;
1743         int wait = 1;
1744
1745         memset(&link, 0, sizeof(link));
1746         link.link_status = RTE_ETH_LINK_DOWN;
1747         link.link_speed = RTE_ETH_SPEED_NUM_NONE;
1748         link.link_duplex = RTE_ETH_LINK_HALF_DUPLEX;
1749         link.link_autoneg = !(dev->data->dev_conf.link_speeds &
1750                         ~RTE_ETH_LINK_SPEED_AUTONEG);
1751
1752         hw->mac.get_link_status = true;
1753
1754         if (intr->flags & NGBE_FLAG_NEED_LINK_CONFIG)
1755                 return rte_eth_linkstatus_set(dev, &link);
1756
1757         /* check if it needs to wait to complete, if lsc interrupt is enabled */
1758         if (wait_to_complete == 0 || dev->data->dev_conf.intr_conf.lsc != 0)
1759                 wait = 0;
1760
1761         err = hw->mac.check_link(hw, &link_speed, &link_up, wait);
1762         if (err != 0) {
1763                 link.link_speed = RTE_ETH_SPEED_NUM_NONE;
1764                 link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
1765                 return rte_eth_linkstatus_set(dev, &link);
1766         }
1767
1768         if (!link_up)
1769                 return rte_eth_linkstatus_set(dev, &link);
1770
1771         intr->flags &= ~NGBE_FLAG_NEED_LINK_CONFIG;
1772         link.link_status = RTE_ETH_LINK_UP;
1773         link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
1774
1775         switch (link_speed) {
1776         default:
1777         case NGBE_LINK_SPEED_UNKNOWN:
1778                 link.link_speed = RTE_ETH_SPEED_NUM_NONE;
1779                 break;
1780
1781         case NGBE_LINK_SPEED_10M_FULL:
1782                 link.link_speed = RTE_ETH_SPEED_NUM_10M;
1783                 lan_speed = 0;
1784                 break;
1785
1786         case NGBE_LINK_SPEED_100M_FULL:
1787                 link.link_speed = RTE_ETH_SPEED_NUM_100M;
1788                 lan_speed = 1;
1789                 break;
1790
1791         case NGBE_LINK_SPEED_1GB_FULL:
1792                 link.link_speed = RTE_ETH_SPEED_NUM_1G;
1793                 lan_speed = 2;
1794                 break;
1795         }
1796
1797         if (hw->is_pf) {
1798                 wr32m(hw, NGBE_LAN_SPEED, NGBE_LAN_SPEED_MASK, lan_speed);
1799                 if (link_speed & (NGBE_LINK_SPEED_1GB_FULL |
1800                                 NGBE_LINK_SPEED_100M_FULL |
1801                                 NGBE_LINK_SPEED_10M_FULL)) {
1802                         wr32m(hw, NGBE_MACTXCFG, NGBE_MACTXCFG_SPEED_MASK,
1803                                 NGBE_MACTXCFG_SPEED_1G | NGBE_MACTXCFG_TE);
1804                 }
1805         }
1806
1807         return rte_eth_linkstatus_set(dev, &link);
1808 }
1809
1810 static int
1811 ngbe_dev_link_update(struct rte_eth_dev *dev, int wait_to_complete)
1812 {
1813         return ngbe_dev_link_update_share(dev, wait_to_complete);
1814 }
1815
1816 static int
1817 ngbe_dev_promiscuous_enable(struct rte_eth_dev *dev)
1818 {
1819         struct ngbe_hw *hw = ngbe_dev_hw(dev);
1820         uint32_t fctrl;
1821
1822         fctrl = rd32(hw, NGBE_PSRCTL);
1823         fctrl |= (NGBE_PSRCTL_UCP | NGBE_PSRCTL_MCP);
1824         wr32(hw, NGBE_PSRCTL, fctrl);
1825
1826         return 0;
1827 }
1828
1829 static int
1830 ngbe_dev_promiscuous_disable(struct rte_eth_dev *dev)
1831 {
1832         struct ngbe_hw *hw = ngbe_dev_hw(dev);
1833         uint32_t fctrl;
1834
1835         fctrl = rd32(hw, NGBE_PSRCTL);
1836         fctrl &= (~NGBE_PSRCTL_UCP);
1837         if (dev->data->all_multicast == 1)
1838                 fctrl |= NGBE_PSRCTL_MCP;
1839         else
1840                 fctrl &= (~NGBE_PSRCTL_MCP);
1841         wr32(hw, NGBE_PSRCTL, fctrl);
1842
1843         return 0;
1844 }
1845
1846 static int
1847 ngbe_dev_allmulticast_enable(struct rte_eth_dev *dev)
1848 {
1849         struct ngbe_hw *hw = ngbe_dev_hw(dev);
1850         uint32_t fctrl;
1851
1852         fctrl = rd32(hw, NGBE_PSRCTL);
1853         fctrl |= NGBE_PSRCTL_MCP;
1854         wr32(hw, NGBE_PSRCTL, fctrl);
1855
1856         return 0;
1857 }
1858
1859 static int
1860 ngbe_dev_allmulticast_disable(struct rte_eth_dev *dev)
1861 {
1862         struct ngbe_hw *hw = ngbe_dev_hw(dev);
1863         uint32_t fctrl;
1864
1865         if (dev->data->promiscuous == 1)
1866                 return 0; /* must remain in all_multicast mode */
1867
1868         fctrl = rd32(hw, NGBE_PSRCTL);
1869         fctrl &= (~NGBE_PSRCTL_MCP);
1870         wr32(hw, NGBE_PSRCTL, fctrl);
1871
1872         return 0;
1873 }
1874
1875 /**
1876  * It clears the interrupt causes and enables the interrupt.
1877  * It will be called once only during NIC initialized.
1878  *
1879  * @param dev
1880  *  Pointer to struct rte_eth_dev.
1881  * @param on
1882  *  Enable or Disable.
1883  *
1884  * @return
1885  *  - On success, zero.
1886  *  - On failure, a negative value.
1887  */
1888 static int
1889 ngbe_dev_lsc_interrupt_setup(struct rte_eth_dev *dev, uint8_t on)
1890 {
1891         struct ngbe_interrupt *intr = ngbe_dev_intr(dev);
1892
1893         ngbe_dev_link_status_print(dev);
1894         if (on != 0) {
1895                 intr->mask_misc |= NGBE_ICRMISC_PHY;
1896                 intr->mask_misc |= NGBE_ICRMISC_GPIO;
1897         } else {
1898                 intr->mask_misc &= ~NGBE_ICRMISC_PHY;
1899                 intr->mask_misc &= ~NGBE_ICRMISC_GPIO;
1900         }
1901
1902         return 0;
1903 }
1904
1905 /**
1906  * It clears the interrupt causes and enables the interrupt.
1907  * It will be called once only during NIC initialized.
1908  *
1909  * @param dev
1910  *  Pointer to struct rte_eth_dev.
1911  *
1912  * @return
1913  *  - On success, zero.
1914  *  - On failure, a negative value.
1915  */
1916 static int
1917 ngbe_dev_misc_interrupt_setup(struct rte_eth_dev *dev)
1918 {
1919         struct ngbe_interrupt *intr = ngbe_dev_intr(dev);
1920         u64 mask;
1921
1922         mask = NGBE_ICR_MASK;
1923         mask &= (1ULL << NGBE_MISC_VEC_ID);
1924         intr->mask |= mask;
1925         intr->mask_misc |= NGBE_ICRMISC_GPIO;
1926
1927         return 0;
1928 }
1929
1930 /**
1931  * It clears the interrupt causes and enables the interrupt.
1932  * It will be called once only during NIC initialized.
1933  *
1934  * @param dev
1935  *  Pointer to struct rte_eth_dev.
1936  *
1937  * @return
1938  *  - On success, zero.
1939  *  - On failure, a negative value.
1940  */
1941 static int
1942 ngbe_dev_rxq_interrupt_setup(struct rte_eth_dev *dev)
1943 {
1944         struct ngbe_interrupt *intr = ngbe_dev_intr(dev);
1945         u64 mask;
1946
1947         mask = NGBE_ICR_MASK;
1948         mask &= ~((1ULL << NGBE_RX_VEC_START) - 1);
1949         intr->mask |= mask;
1950
1951         return 0;
1952 }
1953
1954 /**
1955  * It clears the interrupt causes and enables the interrupt.
1956  * It will be called once only during NIC initialized.
1957  *
1958  * @param dev
1959  *  Pointer to struct rte_eth_dev.
1960  *
1961  * @return
1962  *  - On success, zero.
1963  *  - On failure, a negative value.
1964  */
1965 static int
1966 ngbe_dev_macsec_interrupt_setup(struct rte_eth_dev *dev)
1967 {
1968         struct ngbe_interrupt *intr = ngbe_dev_intr(dev);
1969
1970         intr->mask_misc |= NGBE_ICRMISC_LNKSEC;
1971
1972         return 0;
1973 }
1974
1975 /*
1976  * It reads ICR and sets flag for the link_update.
1977  *
1978  * @param dev
1979  *  Pointer to struct rte_eth_dev.
1980  *
1981  * @return
1982  *  - On success, zero.
1983  *  - On failure, a negative value.
1984  */
1985 static int
1986 ngbe_dev_interrupt_get_status(struct rte_eth_dev *dev)
1987 {
1988         uint32_t eicr;
1989         struct ngbe_hw *hw = ngbe_dev_hw(dev);
1990         struct ngbe_interrupt *intr = ngbe_dev_intr(dev);
1991
1992         /* clear all cause mask */
1993         ngbe_disable_intr(hw);
1994
1995         /* read-on-clear nic registers here */
1996         eicr = ((u32 *)hw->isb_mem)[NGBE_ISB_MISC];
1997         PMD_DRV_LOG(DEBUG, "eicr %x", eicr);
1998
1999         intr->flags = 0;
2000
2001         /* set flag for async link update */
2002         if (eicr & NGBE_ICRMISC_PHY)
2003                 intr->flags |= NGBE_FLAG_NEED_LINK_UPDATE;
2004
2005         if (eicr & NGBE_ICRMISC_VFMBX)
2006                 intr->flags |= NGBE_FLAG_MAILBOX;
2007
2008         if (eicr & NGBE_ICRMISC_LNKSEC)
2009                 intr->flags |= NGBE_FLAG_MACSEC;
2010
2011         if (eicr & NGBE_ICRMISC_GPIO)
2012                 intr->flags |= NGBE_FLAG_NEED_LINK_UPDATE;
2013
2014         return 0;
2015 }
2016
2017 /**
2018  * It gets and then prints the link status.
2019  *
2020  * @param dev
2021  *  Pointer to struct rte_eth_dev.
2022  *
2023  * @return
2024  *  - On success, zero.
2025  *  - On failure, a negative value.
2026  */
2027 static void
2028 ngbe_dev_link_status_print(struct rte_eth_dev *dev)
2029 {
2030         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
2031         struct rte_eth_link link;
2032
2033         rte_eth_linkstatus_get(dev, &link);
2034
2035         if (link.link_status == RTE_ETH_LINK_UP) {
2036                 PMD_INIT_LOG(INFO, "Port %d: Link Up - speed %u Mbps - %s",
2037                                         (int)(dev->data->port_id),
2038                                         (unsigned int)link.link_speed,
2039                         link.link_duplex == RTE_ETH_LINK_FULL_DUPLEX ?
2040                                         "full-duplex" : "half-duplex");
2041         } else {
2042                 PMD_INIT_LOG(INFO, " Port %d: Link Down",
2043                                 (int)(dev->data->port_id));
2044         }
2045         PMD_INIT_LOG(DEBUG, "PCI Address: " PCI_PRI_FMT,
2046                                 pci_dev->addr.domain,
2047                                 pci_dev->addr.bus,
2048                                 pci_dev->addr.devid,
2049                                 pci_dev->addr.function);
2050 }
2051
2052 /*
2053  * It executes link_update after knowing an interrupt occurred.
2054  *
2055  * @param dev
2056  *  Pointer to struct rte_eth_dev.
2057  *
2058  * @return
2059  *  - On success, zero.
2060  *  - On failure, a negative value.
2061  */
2062 static int
2063 ngbe_dev_interrupt_action(struct rte_eth_dev *dev)
2064 {
2065         struct ngbe_interrupt *intr = ngbe_dev_intr(dev);
2066         int64_t timeout;
2067
2068         PMD_DRV_LOG(DEBUG, "intr action type %d", intr->flags);
2069
2070         if (intr->flags & NGBE_FLAG_MAILBOX) {
2071                 ngbe_pf_mbx_process(dev);
2072                 intr->flags &= ~NGBE_FLAG_MAILBOX;
2073         }
2074
2075         if (intr->flags & NGBE_FLAG_NEED_LINK_UPDATE) {
2076                 struct rte_eth_link link;
2077
2078                 /*get the link status before link update, for predicting later*/
2079                 rte_eth_linkstatus_get(dev, &link);
2080
2081                 ngbe_dev_link_update(dev, 0);
2082
2083                 /* likely to up */
2084                 if (link.link_status != RTE_ETH_LINK_UP)
2085                         /* handle it 1 sec later, wait it being stable */
2086                         timeout = NGBE_LINK_UP_CHECK_TIMEOUT;
2087                 /* likely to down */
2088                 else
2089                         /* handle it 4 sec later, wait it being stable */
2090                         timeout = NGBE_LINK_DOWN_CHECK_TIMEOUT;
2091
2092                 ngbe_dev_link_status_print(dev);
2093                 if (rte_eal_alarm_set(timeout * 1000,
2094                                       ngbe_dev_interrupt_delayed_handler,
2095                                       (void *)dev) < 0) {
2096                         PMD_DRV_LOG(ERR, "Error setting alarm");
2097                 } else {
2098                         /* remember original mask */
2099                         intr->mask_misc_orig = intr->mask_misc;
2100                         /* only disable lsc interrupt */
2101                         intr->mask_misc &= ~NGBE_ICRMISC_PHY;
2102
2103                         intr->mask_orig = intr->mask;
2104                         /* only disable all misc interrupts */
2105                         intr->mask &= ~(1ULL << NGBE_MISC_VEC_ID);
2106                 }
2107         }
2108
2109         PMD_DRV_LOG(DEBUG, "enable intr immediately");
2110         ngbe_enable_intr(dev);
2111
2112         return 0;
2113 }
2114
2115 /**
2116  * Interrupt handler which shall be registered for alarm callback for delayed
2117  * handling specific interrupt to wait for the stable nic state. As the
2118  * NIC interrupt state is not stable for ngbe after link is just down,
2119  * it needs to wait 4 seconds to get the stable status.
2120  *
2121  * @param param
2122  *  The address of parameter (struct rte_eth_dev *) registered before.
2123  */
2124 static void
2125 ngbe_dev_interrupt_delayed_handler(void *param)
2126 {
2127         struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
2128         struct ngbe_interrupt *intr = ngbe_dev_intr(dev);
2129         struct ngbe_hw *hw = ngbe_dev_hw(dev);
2130         uint32_t eicr;
2131
2132         ngbe_disable_intr(hw);
2133
2134         eicr = ((u32 *)hw->isb_mem)[NGBE_ISB_MISC];
2135         if (eicr & NGBE_ICRMISC_VFMBX)
2136                 ngbe_pf_mbx_process(dev);
2137
2138         if (intr->flags & NGBE_FLAG_NEED_LINK_UPDATE) {
2139                 ngbe_dev_link_update(dev, 0);
2140                 intr->flags &= ~NGBE_FLAG_NEED_LINK_UPDATE;
2141                 ngbe_dev_link_status_print(dev);
2142                 rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC,
2143                                               NULL);
2144         }
2145
2146         if (intr->flags & NGBE_FLAG_MACSEC) {
2147                 rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_MACSEC,
2148                                               NULL);
2149                 intr->flags &= ~NGBE_FLAG_MACSEC;
2150         }
2151
2152         /* restore original mask */
2153         intr->mask_misc = intr->mask_misc_orig;
2154         intr->mask_misc_orig = 0;
2155         intr->mask = intr->mask_orig;
2156         intr->mask_orig = 0;
2157
2158         PMD_DRV_LOG(DEBUG, "enable intr in delayed handler S[%08x]", eicr);
2159         ngbe_enable_intr(dev);
2160 }
2161
2162 /**
2163  * Interrupt handler triggered by NIC  for handling
2164  * specific interrupt.
2165  *
2166  * @param param
2167  *  The address of parameter (struct rte_eth_dev *) registered before.
2168  */
2169 static void
2170 ngbe_dev_interrupt_handler(void *param)
2171 {
2172         struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
2173
2174         ngbe_dev_interrupt_get_status(dev);
2175         ngbe_dev_interrupt_action(dev);
2176 }
2177
2178 int
2179 ngbe_dev_rss_reta_update(struct rte_eth_dev *dev,
2180                           struct rte_eth_rss_reta_entry64 *reta_conf,
2181                           uint16_t reta_size)
2182 {
2183         uint8_t i, j, mask;
2184         uint32_t reta;
2185         uint16_t idx, shift;
2186         struct ngbe_adapter *adapter = ngbe_dev_adapter(dev);
2187         struct ngbe_hw *hw = ngbe_dev_hw(dev);
2188
2189         PMD_INIT_FUNC_TRACE();
2190
2191         if (!hw->is_pf) {
2192                 PMD_DRV_LOG(ERR, "RSS reta update is not supported on this "
2193                         "NIC.");
2194                 return -ENOTSUP;
2195         }
2196
2197         if (reta_size != RTE_ETH_RSS_RETA_SIZE_128) {
2198                 PMD_DRV_LOG(ERR, "The size of hash lookup table configured "
2199                         "(%d) doesn't match the number hardware can supported "
2200                         "(%d)", reta_size, RTE_ETH_RSS_RETA_SIZE_128);
2201                 return -EINVAL;
2202         }
2203
2204         for (i = 0; i < reta_size; i += 4) {
2205                 idx = i / RTE_ETH_RETA_GROUP_SIZE;
2206                 shift = i % RTE_ETH_RETA_GROUP_SIZE;
2207                 mask = (uint8_t)RS64(reta_conf[idx].mask, shift, 0xF);
2208                 if (!mask)
2209                         continue;
2210
2211                 reta = rd32a(hw, NGBE_REG_RSSTBL, i >> 2);
2212                 for (j = 0; j < 4; j++) {
2213                         if (RS8(mask, j, 0x1)) {
2214                                 reta  &= ~(MS32(8 * j, 0xFF));
2215                                 reta |= LS32(reta_conf[idx].reta[shift + j],
2216                                                 8 * j, 0xFF);
2217                         }
2218                 }
2219                 wr32a(hw, NGBE_REG_RSSTBL, i >> 2, reta);
2220         }
2221         adapter->rss_reta_updated = 1;
2222
2223         return 0;
2224 }
2225
2226 int
2227 ngbe_dev_rss_reta_query(struct rte_eth_dev *dev,
2228                          struct rte_eth_rss_reta_entry64 *reta_conf,
2229                          uint16_t reta_size)
2230 {
2231         struct ngbe_hw *hw = ngbe_dev_hw(dev);
2232         uint8_t i, j, mask;
2233         uint32_t reta;
2234         uint16_t idx, shift;
2235
2236         PMD_INIT_FUNC_TRACE();
2237
2238         if (reta_size != RTE_ETH_RSS_RETA_SIZE_128) {
2239                 PMD_DRV_LOG(ERR, "The size of hash lookup table configured "
2240                         "(%d) doesn't match the number hardware can supported "
2241                         "(%d)", reta_size, RTE_ETH_RSS_RETA_SIZE_128);
2242                 return -EINVAL;
2243         }
2244
2245         for (i = 0; i < reta_size; i += 4) {
2246                 idx = i / RTE_ETH_RETA_GROUP_SIZE;
2247                 shift = i % RTE_ETH_RETA_GROUP_SIZE;
2248                 mask = (uint8_t)RS64(reta_conf[idx].mask, shift, 0xF);
2249                 if (!mask)
2250                         continue;
2251
2252                 reta = rd32a(hw, NGBE_REG_RSSTBL, i >> 2);
2253                 for (j = 0; j < 4; j++) {
2254                         if (RS8(mask, j, 0x1))
2255                                 reta_conf[idx].reta[shift + j] =
2256                                         (uint16_t)RS32(reta, 8 * j, 0xFF);
2257                 }
2258         }
2259
2260         return 0;
2261 }
2262
2263 static int
2264 ngbe_add_rar(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr,
2265                                 uint32_t index, uint32_t pool)
2266 {
2267         struct ngbe_hw *hw = ngbe_dev_hw(dev);
2268         uint32_t enable_addr = 1;
2269
2270         return ngbe_set_rar(hw, index, mac_addr->addr_bytes,
2271                              pool, enable_addr);
2272 }
2273
2274 static void
2275 ngbe_remove_rar(struct rte_eth_dev *dev, uint32_t index)
2276 {
2277         struct ngbe_hw *hw = ngbe_dev_hw(dev);
2278
2279         ngbe_clear_rar(hw, index);
2280 }
2281
2282 static int
2283 ngbe_set_default_mac_addr(struct rte_eth_dev *dev, struct rte_ether_addr *addr)
2284 {
2285         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
2286
2287         ngbe_remove_rar(dev, 0);
2288         ngbe_add_rar(dev, addr, 0, pci_dev->max_vfs);
2289
2290         return 0;
2291 }
2292
2293 static int
2294 ngbe_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
2295 {
2296         struct ngbe_hw *hw = ngbe_dev_hw(dev);
2297         uint32_t frame_size = mtu + RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN + 4;
2298         struct rte_eth_dev_data *dev_data = dev->data;
2299
2300         /* If device is started, refuse mtu that requires the support of
2301          * scattered packets when this feature has not been enabled before.
2302          */
2303         if (dev_data->dev_started && !dev_data->scattered_rx &&
2304             (frame_size + 2 * NGBE_VLAN_TAG_SIZE >
2305              dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM)) {
2306                 PMD_INIT_LOG(ERR, "Stop port first.");
2307                 return -EINVAL;
2308         }
2309
2310         if (hw->mode)
2311                 wr32m(hw, NGBE_FRMSZ, NGBE_FRMSZ_MAX_MASK,
2312                         NGBE_FRAME_SIZE_MAX);
2313         else
2314                 wr32m(hw, NGBE_FRMSZ, NGBE_FRMSZ_MAX_MASK,
2315                         NGBE_FRMSZ_MAX(frame_size));
2316
2317         return 0;
2318 }
2319
2320 static uint32_t
2321 ngbe_uta_vector(struct ngbe_hw *hw, struct rte_ether_addr *uc_addr)
2322 {
2323         uint32_t vector = 0;
2324
2325         switch (hw->mac.mc_filter_type) {
2326         case 0:   /* use bits [47:36] of the address */
2327                 vector = ((uc_addr->addr_bytes[4] >> 4) |
2328                         (((uint16_t)uc_addr->addr_bytes[5]) << 4));
2329                 break;
2330         case 1:   /* use bits [46:35] of the address */
2331                 vector = ((uc_addr->addr_bytes[4] >> 3) |
2332                         (((uint16_t)uc_addr->addr_bytes[5]) << 5));
2333                 break;
2334         case 2:   /* use bits [45:34] of the address */
2335                 vector = ((uc_addr->addr_bytes[4] >> 2) |
2336                         (((uint16_t)uc_addr->addr_bytes[5]) << 6));
2337                 break;
2338         case 3:   /* use bits [43:32] of the address */
2339                 vector = ((uc_addr->addr_bytes[4]) |
2340                         (((uint16_t)uc_addr->addr_bytes[5]) << 8));
2341                 break;
2342         default:  /* Invalid mc_filter_type */
2343                 break;
2344         }
2345
2346         /* vector can only be 12-bits or boundary will be exceeded */
2347         vector &= 0xFFF;
2348         return vector;
2349 }
2350
2351 static int
2352 ngbe_uc_hash_table_set(struct rte_eth_dev *dev,
2353                         struct rte_ether_addr *mac_addr, uint8_t on)
2354 {
2355         uint32_t vector;
2356         uint32_t uta_idx;
2357         uint32_t reg_val;
2358         uint32_t uta_mask;
2359         uint32_t psrctl;
2360
2361         struct ngbe_hw *hw = ngbe_dev_hw(dev);
2362         struct ngbe_uta_info *uta_info = NGBE_DEV_UTA_INFO(dev);
2363
2364         vector = ngbe_uta_vector(hw, mac_addr);
2365         uta_idx = (vector >> 5) & 0x7F;
2366         uta_mask = 0x1UL << (vector & 0x1F);
2367
2368         if (!!on == !!(uta_info->uta_shadow[uta_idx] & uta_mask))
2369                 return 0;
2370
2371         reg_val = rd32(hw, NGBE_UCADDRTBL(uta_idx));
2372         if (on) {
2373                 uta_info->uta_in_use++;
2374                 reg_val |= uta_mask;
2375                 uta_info->uta_shadow[uta_idx] |= uta_mask;
2376         } else {
2377                 uta_info->uta_in_use--;
2378                 reg_val &= ~uta_mask;
2379                 uta_info->uta_shadow[uta_idx] &= ~uta_mask;
2380         }
2381
2382         wr32(hw, NGBE_UCADDRTBL(uta_idx), reg_val);
2383
2384         psrctl = rd32(hw, NGBE_PSRCTL);
2385         if (uta_info->uta_in_use > 0)
2386                 psrctl |= NGBE_PSRCTL_UCHFENA;
2387         else
2388                 psrctl &= ~NGBE_PSRCTL_UCHFENA;
2389
2390         psrctl &= ~NGBE_PSRCTL_ADHF12_MASK;
2391         psrctl |= NGBE_PSRCTL_ADHF12(hw->mac.mc_filter_type);
2392         wr32(hw, NGBE_PSRCTL, psrctl);
2393
2394         return 0;
2395 }
2396
2397 static int
2398 ngbe_uc_all_hash_table_set(struct rte_eth_dev *dev, uint8_t on)
2399 {
2400         struct ngbe_hw *hw = ngbe_dev_hw(dev);
2401         struct ngbe_uta_info *uta_info = NGBE_DEV_UTA_INFO(dev);
2402         uint32_t psrctl;
2403         int i;
2404
2405         if (on) {
2406                 for (i = 0; i < RTE_ETH_VMDQ_NUM_UC_HASH_ARRAY; i++) {
2407                         uta_info->uta_shadow[i] = ~0;
2408                         wr32(hw, NGBE_UCADDRTBL(i), ~0);
2409                 }
2410         } else {
2411                 for (i = 0; i < RTE_ETH_VMDQ_NUM_UC_HASH_ARRAY; i++) {
2412                         uta_info->uta_shadow[i] = 0;
2413                         wr32(hw, NGBE_UCADDRTBL(i), 0);
2414                 }
2415         }
2416
2417         psrctl = rd32(hw, NGBE_PSRCTL);
2418         if (on)
2419                 psrctl |= NGBE_PSRCTL_UCHFENA;
2420         else
2421                 psrctl &= ~NGBE_PSRCTL_UCHFENA;
2422
2423         psrctl &= ~NGBE_PSRCTL_ADHF12_MASK;
2424         psrctl |= NGBE_PSRCTL_ADHF12(hw->mac.mc_filter_type);
2425         wr32(hw, NGBE_PSRCTL, psrctl);
2426
2427         return 0;
2428 }
2429
2430 /**
2431  * Set the IVAR registers, mapping interrupt causes to vectors
2432  * @param hw
2433  *  pointer to ngbe_hw struct
2434  * @direction
2435  *  0 for Rx, 1 for Tx, -1 for other causes
2436  * @queue
2437  *  queue to map the corresponding interrupt to
2438  * @msix_vector
2439  *  the vector to map to the corresponding queue
2440  */
2441 void
2442 ngbe_set_ivar_map(struct ngbe_hw *hw, int8_t direction,
2443                    uint8_t queue, uint8_t msix_vector)
2444 {
2445         uint32_t tmp, idx;
2446
2447         if (direction == -1) {
2448                 /* other causes */
2449                 msix_vector |= NGBE_IVARMISC_VLD;
2450                 idx = 0;
2451                 tmp = rd32(hw, NGBE_IVARMISC);
2452                 tmp &= ~(0xFF << idx);
2453                 tmp |= (msix_vector << idx);
2454                 wr32(hw, NGBE_IVARMISC, tmp);
2455         } else {
2456                 /* rx or tx causes */
2457                 /* Workround for ICR lost */
2458                 idx = ((16 * (queue & 1)) + (8 * direction));
2459                 tmp = rd32(hw, NGBE_IVAR(queue >> 1));
2460                 tmp &= ~(0xFF << idx);
2461                 tmp |= (msix_vector << idx);
2462                 wr32(hw, NGBE_IVAR(queue >> 1), tmp);
2463         }
2464 }
2465
2466 /**
2467  * Sets up the hardware to properly generate MSI-X interrupts
2468  * @hw
2469  *  board private structure
2470  */
2471 static void
2472 ngbe_configure_msix(struct rte_eth_dev *dev)
2473 {
2474         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
2475         struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
2476         struct ngbe_hw *hw = ngbe_dev_hw(dev);
2477         uint32_t queue_id, base = NGBE_MISC_VEC_ID;
2478         uint32_t vec = NGBE_MISC_VEC_ID;
2479         uint32_t gpie;
2480
2481         /*
2482          * Won't configure MSI-X register if no mapping is done
2483          * between intr vector and event fd
2484          * but if MSI-X has been enabled already, need to configure
2485          * auto clean, auto mask and throttling.
2486          */
2487         gpie = rd32(hw, NGBE_GPIE);
2488         if (!rte_intr_dp_is_en(intr_handle) &&
2489             !(gpie & NGBE_GPIE_MSIX))
2490                 return;
2491
2492         if (rte_intr_allow_others(intr_handle)) {
2493                 base = NGBE_RX_VEC_START;
2494                 vec = base;
2495         }
2496
2497         /* setup GPIE for MSI-X mode */
2498         gpie = rd32(hw, NGBE_GPIE);
2499         gpie |= NGBE_GPIE_MSIX;
2500         wr32(hw, NGBE_GPIE, gpie);
2501
2502         /* Populate the IVAR table and set the ITR values to the
2503          * corresponding register.
2504          */
2505         if (rte_intr_dp_is_en(intr_handle)) {
2506                 for (queue_id = 0; queue_id < dev->data->nb_rx_queues;
2507                         queue_id++) {
2508                         /* by default, 1:1 mapping */
2509                         ngbe_set_ivar_map(hw, 0, queue_id, vec);
2510                         rte_intr_vec_list_index_set(intr_handle,
2511                                                            queue_id, vec);
2512                         if (vec < base + rte_intr_nb_efd_get(intr_handle)
2513                             - 1)
2514                                 vec++;
2515                 }
2516
2517                 ngbe_set_ivar_map(hw, -1, 1, NGBE_MISC_VEC_ID);
2518         }
2519         wr32(hw, NGBE_ITR(NGBE_MISC_VEC_ID),
2520                         NGBE_ITR_IVAL_1G(NGBE_QUEUE_ITR_INTERVAL_DEFAULT)
2521                         | NGBE_ITR_WRDSA);
2522 }
2523
2524 static u8 *
2525 ngbe_dev_addr_list_itr(__rte_unused struct ngbe_hw *hw,
2526                         u8 **mc_addr_ptr, u32 *vmdq)
2527 {
2528         u8 *mc_addr;
2529
2530         *vmdq = 0;
2531         mc_addr = *mc_addr_ptr;
2532         *mc_addr_ptr = (mc_addr + sizeof(struct rte_ether_addr));
2533         return mc_addr;
2534 }
2535
2536 int
2537 ngbe_dev_set_mc_addr_list(struct rte_eth_dev *dev,
2538                           struct rte_ether_addr *mc_addr_set,
2539                           uint32_t nb_mc_addr)
2540 {
2541         struct ngbe_hw *hw = ngbe_dev_hw(dev);
2542         u8 *mc_addr_list;
2543
2544         mc_addr_list = (u8 *)mc_addr_set;
2545         return hw->mac.update_mc_addr_list(hw, mc_addr_list, nb_mc_addr,
2546                                          ngbe_dev_addr_list_itr, TRUE);
2547 }
2548
2549 static const struct eth_dev_ops ngbe_eth_dev_ops = {
2550         .dev_configure              = ngbe_dev_configure,
2551         .dev_infos_get              = ngbe_dev_info_get,
2552         .dev_start                  = ngbe_dev_start,
2553         .dev_stop                   = ngbe_dev_stop,
2554         .dev_close                  = ngbe_dev_close,
2555         .dev_reset                  = ngbe_dev_reset,
2556         .promiscuous_enable         = ngbe_dev_promiscuous_enable,
2557         .promiscuous_disable        = ngbe_dev_promiscuous_disable,
2558         .allmulticast_enable        = ngbe_dev_allmulticast_enable,
2559         .allmulticast_disable       = ngbe_dev_allmulticast_disable,
2560         .link_update                = ngbe_dev_link_update,
2561         .stats_get                  = ngbe_dev_stats_get,
2562         .xstats_get                 = ngbe_dev_xstats_get,
2563         .xstats_get_by_id           = ngbe_dev_xstats_get_by_id,
2564         .stats_reset                = ngbe_dev_stats_reset,
2565         .xstats_reset               = ngbe_dev_xstats_reset,
2566         .xstats_get_names           = ngbe_dev_xstats_get_names,
2567         .xstats_get_names_by_id     = ngbe_dev_xstats_get_names_by_id,
2568         .fw_version_get             = ngbe_fw_version_get,
2569         .dev_supported_ptypes_get   = ngbe_dev_supported_ptypes_get,
2570         .mtu_set                    = ngbe_dev_mtu_set,
2571         .vlan_filter_set            = ngbe_vlan_filter_set,
2572         .vlan_tpid_set              = ngbe_vlan_tpid_set,
2573         .vlan_offload_set           = ngbe_vlan_offload_set,
2574         .vlan_strip_queue_set       = ngbe_vlan_strip_queue_set,
2575         .rx_queue_start             = ngbe_dev_rx_queue_start,
2576         .rx_queue_stop              = ngbe_dev_rx_queue_stop,
2577         .tx_queue_start             = ngbe_dev_tx_queue_start,
2578         .tx_queue_stop              = ngbe_dev_tx_queue_stop,
2579         .rx_queue_setup             = ngbe_dev_rx_queue_setup,
2580         .rx_queue_release           = ngbe_dev_rx_queue_release,
2581         .tx_queue_setup             = ngbe_dev_tx_queue_setup,
2582         .tx_queue_release           = ngbe_dev_tx_queue_release,
2583         .mac_addr_add               = ngbe_add_rar,
2584         .mac_addr_remove            = ngbe_remove_rar,
2585         .mac_addr_set               = ngbe_set_default_mac_addr,
2586         .uc_hash_table_set          = ngbe_uc_hash_table_set,
2587         .uc_all_hash_table_set      = ngbe_uc_all_hash_table_set,
2588         .reta_update                = ngbe_dev_rss_reta_update,
2589         .reta_query                 = ngbe_dev_rss_reta_query,
2590         .rss_hash_update            = ngbe_dev_rss_hash_update,
2591         .rss_hash_conf_get          = ngbe_dev_rss_hash_conf_get,
2592         .set_mc_addr_list           = ngbe_dev_set_mc_addr_list,
2593         .rx_burst_mode_get          = ngbe_rx_burst_mode_get,
2594         .tx_burst_mode_get          = ngbe_tx_burst_mode_get,
2595 };
2596
2597 RTE_PMD_REGISTER_PCI(net_ngbe, rte_ngbe_pmd);
2598 RTE_PMD_REGISTER_PCI_TABLE(net_ngbe, pci_id_ngbe_map);
2599 RTE_PMD_REGISTER_KMOD_DEP(net_ngbe, "* igb_uio | uio_pci_generic | vfio-pci");
2600
2601 RTE_LOG_REGISTER_SUFFIX(ngbe_logtype_init, init, NOTICE);
2602 RTE_LOG_REGISTER_SUFFIX(ngbe_logtype_driver, driver, NOTICE);
2603
2604 #ifdef RTE_ETHDEV_DEBUG_RX
2605         RTE_LOG_REGISTER_SUFFIX(ngbe_logtype_rx, rx, DEBUG);
2606 #endif
2607 #ifdef RTE_ETHDEV_DEBUG_TX
2608         RTE_LOG_REGISTER_SUFFIX(ngbe_logtype_tx, tx, DEBUG);
2609 #endif