net/ngbe: support RSS hash
[dpdk.git] / drivers / net / ngbe / ngbe_ethdev.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2018-2021 Beijing WangXun Technology Co., Ltd.
3  * Copyright(c) 2010-2017 Intel Corporation
4  */
5
6 #include <errno.h>
7 #include <rte_common.h>
8 #include <ethdev_pci.h>
9
10 #include <rte_alarm.h>
11
12 #include "ngbe_logs.h"
13 #include "ngbe.h"
14 #include "ngbe_ethdev.h"
15 #include "ngbe_rxtx.h"
16
17 static int ngbe_dev_close(struct rte_eth_dev *dev);
18 static int ngbe_dev_link_update(struct rte_eth_dev *dev,
19                                 int wait_to_complete);
20 static int ngbe_dev_stats_reset(struct rte_eth_dev *dev);
21 static void ngbe_vlan_hw_strip_enable(struct rte_eth_dev *dev, uint16_t queue);
22 static void ngbe_vlan_hw_strip_disable(struct rte_eth_dev *dev,
23                                         uint16_t queue);
24
25 static void ngbe_dev_link_status_print(struct rte_eth_dev *dev);
26 static int ngbe_dev_lsc_interrupt_setup(struct rte_eth_dev *dev, uint8_t on);
27 static int ngbe_dev_macsec_interrupt_setup(struct rte_eth_dev *dev);
28 static int ngbe_dev_misc_interrupt_setup(struct rte_eth_dev *dev);
29 static int ngbe_dev_rxq_interrupt_setup(struct rte_eth_dev *dev);
30 static void ngbe_dev_interrupt_handler(void *param);
31 static void ngbe_dev_interrupt_delayed_handler(void *param);
32 static void ngbe_configure_msix(struct rte_eth_dev *dev);
33
34 #define NGBE_SET_HWSTRIP(h, q) do {\
35                 uint32_t idx = (q) / (sizeof((h)->bitmap[0]) * NBBY); \
36                 uint32_t bit = (q) % (sizeof((h)->bitmap[0]) * NBBY); \
37                 (h)->bitmap[idx] |= 1 << bit;\
38         } while (0)
39
40 #define NGBE_CLEAR_HWSTRIP(h, q) do {\
41                 uint32_t idx = (q) / (sizeof((h)->bitmap[0]) * NBBY); \
42                 uint32_t bit = (q) % (sizeof((h)->bitmap[0]) * NBBY); \
43                 (h)->bitmap[idx] &= ~(1 << bit);\
44         } while (0)
45
46 #define NGBE_GET_HWSTRIP(h, q, r) do {\
47                 uint32_t idx = (q) / (sizeof((h)->bitmap[0]) * NBBY); \
48                 uint32_t bit = (q) % (sizeof((h)->bitmap[0]) * NBBY); \
49                 (r) = (h)->bitmap[idx] >> bit & 1;\
50         } while (0)
51
52 /*
53  * The set of PCI devices this driver supports
54  */
55 static const struct rte_pci_id pci_id_ngbe_map[] = {
56         { RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860A2) },
57         { RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860A2S) },
58         { RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860A4) },
59         { RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860A4S) },
60         { RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860AL2) },
61         { RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860AL2S) },
62         { RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860AL4) },
63         { RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860AL4S) },
64         { RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860NCSI) },
65         { RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860A1) },
66         { RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860A1L) },
67         { RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860AL_W) },
68         { .vendor_id = 0, /* sentinel */ },
69 };
70
71 static const struct rte_eth_desc_lim rx_desc_lim = {
72         .nb_max = NGBE_RING_DESC_MAX,
73         .nb_min = NGBE_RING_DESC_MIN,
74         .nb_align = NGBE_RXD_ALIGN,
75 };
76
77 static const struct rte_eth_desc_lim tx_desc_lim = {
78         .nb_max = NGBE_RING_DESC_MAX,
79         .nb_min = NGBE_RING_DESC_MIN,
80         .nb_align = NGBE_TXD_ALIGN,
81         .nb_seg_max = NGBE_TX_MAX_SEG,
82         .nb_mtu_seg_max = NGBE_TX_MAX_SEG,
83 };
84
85 static const struct eth_dev_ops ngbe_eth_dev_ops;
86
87 #define HW_XSTAT(m) {#m, offsetof(struct ngbe_hw_stats, m)}
88 #define HW_XSTAT_NAME(m, n) {n, offsetof(struct ngbe_hw_stats, m)}
89 static const struct rte_ngbe_xstats_name_off rte_ngbe_stats_strings[] = {
90         /* MNG RxTx */
91         HW_XSTAT(mng_bmc2host_packets),
92         HW_XSTAT(mng_host2bmc_packets),
93         /* Basic RxTx */
94         HW_XSTAT(rx_packets),
95         HW_XSTAT(tx_packets),
96         HW_XSTAT(rx_bytes),
97         HW_XSTAT(tx_bytes),
98         HW_XSTAT(rx_total_bytes),
99         HW_XSTAT(rx_total_packets),
100         HW_XSTAT(tx_total_packets),
101         HW_XSTAT(rx_total_missed_packets),
102         HW_XSTAT(rx_broadcast_packets),
103         HW_XSTAT(rx_multicast_packets),
104         HW_XSTAT(rx_management_packets),
105         HW_XSTAT(tx_management_packets),
106         HW_XSTAT(rx_management_dropped),
107
108         /* Basic Error */
109         HW_XSTAT(rx_crc_errors),
110         HW_XSTAT(rx_illegal_byte_errors),
111         HW_XSTAT(rx_error_bytes),
112         HW_XSTAT(rx_mac_short_packet_dropped),
113         HW_XSTAT(rx_length_errors),
114         HW_XSTAT(rx_undersize_errors),
115         HW_XSTAT(rx_fragment_errors),
116         HW_XSTAT(rx_oversize_errors),
117         HW_XSTAT(rx_jabber_errors),
118         HW_XSTAT(rx_l3_l4_xsum_error),
119         HW_XSTAT(mac_local_errors),
120         HW_XSTAT(mac_remote_errors),
121
122         /* MACSEC */
123         HW_XSTAT(tx_macsec_pkts_untagged),
124         HW_XSTAT(tx_macsec_pkts_encrypted),
125         HW_XSTAT(tx_macsec_pkts_protected),
126         HW_XSTAT(tx_macsec_octets_encrypted),
127         HW_XSTAT(tx_macsec_octets_protected),
128         HW_XSTAT(rx_macsec_pkts_untagged),
129         HW_XSTAT(rx_macsec_pkts_badtag),
130         HW_XSTAT(rx_macsec_pkts_nosci),
131         HW_XSTAT(rx_macsec_pkts_unknownsci),
132         HW_XSTAT(rx_macsec_octets_decrypted),
133         HW_XSTAT(rx_macsec_octets_validated),
134         HW_XSTAT(rx_macsec_sc_pkts_unchecked),
135         HW_XSTAT(rx_macsec_sc_pkts_delayed),
136         HW_XSTAT(rx_macsec_sc_pkts_late),
137         HW_XSTAT(rx_macsec_sa_pkts_ok),
138         HW_XSTAT(rx_macsec_sa_pkts_invalid),
139         HW_XSTAT(rx_macsec_sa_pkts_notvalid),
140         HW_XSTAT(rx_macsec_sa_pkts_unusedsa),
141         HW_XSTAT(rx_macsec_sa_pkts_notusingsa),
142
143         /* MAC RxTx */
144         HW_XSTAT(rx_size_64_packets),
145         HW_XSTAT(rx_size_65_to_127_packets),
146         HW_XSTAT(rx_size_128_to_255_packets),
147         HW_XSTAT(rx_size_256_to_511_packets),
148         HW_XSTAT(rx_size_512_to_1023_packets),
149         HW_XSTAT(rx_size_1024_to_max_packets),
150         HW_XSTAT(tx_size_64_packets),
151         HW_XSTAT(tx_size_65_to_127_packets),
152         HW_XSTAT(tx_size_128_to_255_packets),
153         HW_XSTAT(tx_size_256_to_511_packets),
154         HW_XSTAT(tx_size_512_to_1023_packets),
155         HW_XSTAT(tx_size_1024_to_max_packets),
156
157         /* Flow Control */
158         HW_XSTAT(tx_xon_packets),
159         HW_XSTAT(rx_xon_packets),
160         HW_XSTAT(tx_xoff_packets),
161         HW_XSTAT(rx_xoff_packets),
162
163         HW_XSTAT_NAME(tx_xon_packets, "tx_flow_control_xon_packets"),
164         HW_XSTAT_NAME(rx_xon_packets, "rx_flow_control_xon_packets"),
165         HW_XSTAT_NAME(tx_xoff_packets, "tx_flow_control_xoff_packets"),
166         HW_XSTAT_NAME(rx_xoff_packets, "rx_flow_control_xoff_packets"),
167 };
168
169 #define NGBE_NB_HW_STATS (sizeof(rte_ngbe_stats_strings) / \
170                            sizeof(rte_ngbe_stats_strings[0]))
171
172 /* Per-queue statistics */
173 #define QP_XSTAT(m) {#m, offsetof(struct ngbe_hw_stats, qp[0].m)}
174 static const struct rte_ngbe_xstats_name_off rte_ngbe_qp_strings[] = {
175         QP_XSTAT(rx_qp_packets),
176         QP_XSTAT(tx_qp_packets),
177         QP_XSTAT(rx_qp_bytes),
178         QP_XSTAT(tx_qp_bytes),
179         QP_XSTAT(rx_qp_mc_packets),
180 };
181
182 #define NGBE_NB_QP_STATS (sizeof(rte_ngbe_qp_strings) / \
183                            sizeof(rte_ngbe_qp_strings[0]))
184
185 static inline int32_t
186 ngbe_pf_reset_hw(struct ngbe_hw *hw)
187 {
188         uint32_t ctrl_ext;
189         int32_t status;
190
191         status = hw->mac.reset_hw(hw);
192
193         ctrl_ext = rd32(hw, NGBE_PORTCTL);
194         /* Set PF Reset Done bit so PF/VF Mail Ops can work */
195         ctrl_ext |= NGBE_PORTCTL_RSTDONE;
196         wr32(hw, NGBE_PORTCTL, ctrl_ext);
197         ngbe_flush(hw);
198
199         if (status == NGBE_ERR_SFP_NOT_PRESENT)
200                 status = 0;
201         return status;
202 }
203
204 static inline void
205 ngbe_enable_intr(struct rte_eth_dev *dev)
206 {
207         struct ngbe_interrupt *intr = ngbe_dev_intr(dev);
208         struct ngbe_hw *hw = ngbe_dev_hw(dev);
209
210         wr32(hw, NGBE_IENMISC, intr->mask_misc);
211         wr32(hw, NGBE_IMC(0), intr->mask & BIT_MASK32);
212         ngbe_flush(hw);
213 }
214
215 static void
216 ngbe_disable_intr(struct ngbe_hw *hw)
217 {
218         PMD_INIT_FUNC_TRACE();
219
220         wr32(hw, NGBE_IMS(0), NGBE_IMS_MASK);
221         ngbe_flush(hw);
222 }
223
224 /*
225  * Ensure that all locks are released before first NVM or PHY access
226  */
227 static void
228 ngbe_swfw_lock_reset(struct ngbe_hw *hw)
229 {
230         uint16_t mask;
231
232         /*
233          * These ones are more tricky since they are common to all ports; but
234          * swfw_sync retries last long enough (1s) to be almost sure that if
235          * lock can not be taken it is due to an improper lock of the
236          * semaphore.
237          */
238         mask = NGBE_MNGSEM_SWPHY |
239                NGBE_MNGSEM_SWMBX |
240                NGBE_MNGSEM_SWFLASH;
241         if (hw->mac.acquire_swfw_sync(hw, mask) < 0)
242                 PMD_DRV_LOG(DEBUG, "SWFW common locks released");
243
244         hw->mac.release_swfw_sync(hw, mask);
245 }
246
247 static int
248 eth_ngbe_dev_init(struct rte_eth_dev *eth_dev, void *init_params __rte_unused)
249 {
250         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
251         struct ngbe_hw *hw = ngbe_dev_hw(eth_dev);
252         struct ngbe_vfta *shadow_vfta = NGBE_DEV_VFTA(eth_dev);
253         struct ngbe_hwstrip *hwstrip = NGBE_DEV_HWSTRIP(eth_dev);
254         struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
255         const struct rte_memzone *mz;
256         uint32_t ctrl_ext;
257         int err;
258
259         PMD_INIT_FUNC_TRACE();
260
261         eth_dev->dev_ops = &ngbe_eth_dev_ops;
262         eth_dev->rx_pkt_burst = &ngbe_recv_pkts;
263         eth_dev->tx_pkt_burst = &ngbe_xmit_pkts;
264         eth_dev->tx_pkt_prepare = &ngbe_prep_pkts;
265
266         /*
267          * For secondary processes, we don't initialise any further as primary
268          * has already done this work. Only check we don't need a different
269          * Rx and Tx function.
270          */
271         if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
272                 struct ngbe_tx_queue *txq;
273                 /* Tx queue function in primary, set by last queue initialized
274                  * Tx queue may not initialized by primary process
275                  */
276                 if (eth_dev->data->tx_queues) {
277                         uint16_t nb_tx_queues = eth_dev->data->nb_tx_queues;
278                         txq = eth_dev->data->tx_queues[nb_tx_queues - 1];
279                         ngbe_set_tx_function(eth_dev, txq);
280                 } else {
281                         /* Use default Tx function if we get here */
282                         PMD_INIT_LOG(NOTICE,
283                                 "No Tx queues configured yet. Using default Tx function.");
284                 }
285
286                 ngbe_set_rx_function(eth_dev);
287
288                 return 0;
289         }
290
291         rte_eth_copy_pci_info(eth_dev, pci_dev);
292         eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
293
294         /* Vendor and Device ID need to be set before init of shared code */
295         hw->device_id = pci_dev->id.device_id;
296         hw->vendor_id = pci_dev->id.vendor_id;
297         hw->sub_system_id = pci_dev->id.subsystem_device_id;
298         ngbe_map_device_id(hw);
299         hw->hw_addr = (void *)pci_dev->mem_resource[0].addr;
300
301         /* Reserve memory for interrupt status block */
302         mz = rte_eth_dma_zone_reserve(eth_dev, "ngbe_driver", -1,
303                 NGBE_ISB_SIZE, NGBE_ALIGN, SOCKET_ID_ANY);
304         if (mz == NULL)
305                 return -ENOMEM;
306
307         hw->isb_dma = TMZ_PADDR(mz);
308         hw->isb_mem = TMZ_VADDR(mz);
309
310         /* Initialize the shared code (base driver) */
311         err = ngbe_init_shared_code(hw);
312         if (err != 0) {
313                 PMD_INIT_LOG(ERR, "Shared code init failed: %d", err);
314                 return -EIO;
315         }
316
317         /* Unlock any pending hardware semaphore */
318         ngbe_swfw_lock_reset(hw);
319
320         err = hw->rom.init_params(hw);
321         if (err != 0) {
322                 PMD_INIT_LOG(ERR, "The EEPROM init failed: %d", err);
323                 return -EIO;
324         }
325
326         /* Make sure we have a good EEPROM before we read from it */
327         err = hw->rom.validate_checksum(hw, NULL);
328         if (err != 0) {
329                 PMD_INIT_LOG(ERR, "The EEPROM checksum is not valid: %d", err);
330                 return -EIO;
331         }
332
333         err = hw->mac.init_hw(hw);
334         if (err != 0) {
335                 PMD_INIT_LOG(ERR, "Hardware Initialization Failure: %d", err);
336                 return -EIO;
337         }
338
339         /* Reset the hw statistics */
340         ngbe_dev_stats_reset(eth_dev);
341
342         /* disable interrupt */
343         ngbe_disable_intr(hw);
344
345         /* Allocate memory for storing MAC addresses */
346         eth_dev->data->mac_addrs = rte_zmalloc("ngbe", RTE_ETHER_ADDR_LEN *
347                                                hw->mac.num_rar_entries, 0);
348         if (eth_dev->data->mac_addrs == NULL) {
349                 PMD_INIT_LOG(ERR,
350                              "Failed to allocate %u bytes needed to store MAC addresses",
351                              RTE_ETHER_ADDR_LEN * hw->mac.num_rar_entries);
352                 return -ENOMEM;
353         }
354
355         /* Copy the permanent MAC address */
356         rte_ether_addr_copy((struct rte_ether_addr *)hw->mac.perm_addr,
357                         &eth_dev->data->mac_addrs[0]);
358
359         /* Allocate memory for storing hash filter MAC addresses */
360         eth_dev->data->hash_mac_addrs = rte_zmalloc("ngbe",
361                         RTE_ETHER_ADDR_LEN * NGBE_VMDQ_NUM_UC_MAC, 0);
362         if (eth_dev->data->hash_mac_addrs == NULL) {
363                 PMD_INIT_LOG(ERR,
364                              "Failed to allocate %d bytes needed to store MAC addresses",
365                              RTE_ETHER_ADDR_LEN * NGBE_VMDQ_NUM_UC_MAC);
366                 rte_free(eth_dev->data->mac_addrs);
367                 eth_dev->data->mac_addrs = NULL;
368                 return -ENOMEM;
369         }
370
371         /* initialize the vfta */
372         memset(shadow_vfta, 0, sizeof(*shadow_vfta));
373
374         /* initialize the hw strip bitmap*/
375         memset(hwstrip, 0, sizeof(*hwstrip));
376
377         ctrl_ext = rd32(hw, NGBE_PORTCTL);
378         /* let hardware know driver is loaded */
379         ctrl_ext |= NGBE_PORTCTL_DRVLOAD;
380         /* Set PF Reset Done bit so PF/VF Mail Ops can work */
381         ctrl_ext |= NGBE_PORTCTL_RSTDONE;
382         wr32(hw, NGBE_PORTCTL, ctrl_ext);
383         ngbe_flush(hw);
384
385         PMD_INIT_LOG(DEBUG, "MAC: %d, PHY: %d",
386                         (int)hw->mac.type, (int)hw->phy.type);
387
388         PMD_INIT_LOG(DEBUG, "port %d vendorID=0x%x deviceID=0x%x",
389                      eth_dev->data->port_id, pci_dev->id.vendor_id,
390                      pci_dev->id.device_id);
391
392         rte_intr_callback_register(intr_handle,
393                                    ngbe_dev_interrupt_handler, eth_dev);
394
395         /* enable uio/vfio intr/eventfd mapping */
396         rte_intr_enable(intr_handle);
397
398         /* enable support intr */
399         ngbe_enable_intr(eth_dev);
400
401         return 0;
402 }
403
404 static int
405 eth_ngbe_dev_uninit(struct rte_eth_dev *eth_dev)
406 {
407         PMD_INIT_FUNC_TRACE();
408
409         if (rte_eal_process_type() != RTE_PROC_PRIMARY)
410                 return 0;
411
412         ngbe_dev_close(eth_dev);
413
414         return 0;
415 }
416
417 static int
418 eth_ngbe_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
419                 struct rte_pci_device *pci_dev)
420 {
421         return rte_eth_dev_create(&pci_dev->device, pci_dev->device.name,
422                         sizeof(struct ngbe_adapter),
423                         eth_dev_pci_specific_init, pci_dev,
424                         eth_ngbe_dev_init, NULL);
425 }
426
427 static int eth_ngbe_pci_remove(struct rte_pci_device *pci_dev)
428 {
429         struct rte_eth_dev *ethdev;
430
431         ethdev = rte_eth_dev_allocated(pci_dev->device.name);
432         if (ethdev == NULL)
433                 return 0;
434
435         return rte_eth_dev_destroy(ethdev, eth_ngbe_dev_uninit);
436 }
437
438 static struct rte_pci_driver rte_ngbe_pmd = {
439         .id_table = pci_id_ngbe_map,
440         .drv_flags = RTE_PCI_DRV_NEED_MAPPING |
441                      RTE_PCI_DRV_INTR_LSC,
442         .probe = eth_ngbe_pci_probe,
443         .remove = eth_ngbe_pci_remove,
444 };
445
446 static int
447 ngbe_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
448 {
449         struct ngbe_hw *hw = ngbe_dev_hw(dev);
450         struct ngbe_vfta *shadow_vfta = NGBE_DEV_VFTA(dev);
451         uint32_t vfta;
452         uint32_t vid_idx;
453         uint32_t vid_bit;
454
455         vid_idx = (uint32_t)((vlan_id >> 5) & 0x7F);
456         vid_bit = (uint32_t)(1 << (vlan_id & 0x1F));
457         vfta = rd32(hw, NGBE_VLANTBL(vid_idx));
458         if (on)
459                 vfta |= vid_bit;
460         else
461                 vfta &= ~vid_bit;
462         wr32(hw, NGBE_VLANTBL(vid_idx), vfta);
463
464         /* update local VFTA copy */
465         shadow_vfta->vfta[vid_idx] = vfta;
466
467         return 0;
468 }
469
470 static void
471 ngbe_vlan_strip_queue_set(struct rte_eth_dev *dev, uint16_t queue, int on)
472 {
473         struct ngbe_hw *hw = ngbe_dev_hw(dev);
474         struct ngbe_rx_queue *rxq;
475         bool restart;
476         uint32_t rxcfg, rxbal, rxbah;
477
478         if (on)
479                 ngbe_vlan_hw_strip_enable(dev, queue);
480         else
481                 ngbe_vlan_hw_strip_disable(dev, queue);
482
483         rxq = dev->data->rx_queues[queue];
484         rxbal = rd32(hw, NGBE_RXBAL(rxq->reg_idx));
485         rxbah = rd32(hw, NGBE_RXBAH(rxq->reg_idx));
486         rxcfg = rd32(hw, NGBE_RXCFG(rxq->reg_idx));
487         if (rxq->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP) {
488                 restart = (rxcfg & NGBE_RXCFG_ENA) &&
489                         !(rxcfg & NGBE_RXCFG_VLAN);
490                 rxcfg |= NGBE_RXCFG_VLAN;
491         } else {
492                 restart = (rxcfg & NGBE_RXCFG_ENA) &&
493                         (rxcfg & NGBE_RXCFG_VLAN);
494                 rxcfg &= ~NGBE_RXCFG_VLAN;
495         }
496         rxcfg &= ~NGBE_RXCFG_ENA;
497
498         if (restart) {
499                 /* set vlan strip for ring */
500                 ngbe_dev_rx_queue_stop(dev, queue);
501                 wr32(hw, NGBE_RXBAL(rxq->reg_idx), rxbal);
502                 wr32(hw, NGBE_RXBAH(rxq->reg_idx), rxbah);
503                 wr32(hw, NGBE_RXCFG(rxq->reg_idx), rxcfg);
504                 ngbe_dev_rx_queue_start(dev, queue);
505         }
506 }
507
508 static int
509 ngbe_vlan_tpid_set(struct rte_eth_dev *dev,
510                     enum rte_vlan_type vlan_type,
511                     uint16_t tpid)
512 {
513         struct ngbe_hw *hw = ngbe_dev_hw(dev);
514         int ret = 0;
515         uint32_t portctrl, vlan_ext, qinq;
516
517         portctrl = rd32(hw, NGBE_PORTCTL);
518
519         vlan_ext = (portctrl & NGBE_PORTCTL_VLANEXT);
520         qinq = vlan_ext && (portctrl & NGBE_PORTCTL_QINQ);
521         switch (vlan_type) {
522         case RTE_ETH_VLAN_TYPE_INNER:
523                 if (vlan_ext) {
524                         wr32m(hw, NGBE_VLANCTL,
525                                 NGBE_VLANCTL_TPID_MASK,
526                                 NGBE_VLANCTL_TPID(tpid));
527                         wr32m(hw, NGBE_DMATXCTRL,
528                                 NGBE_DMATXCTRL_TPID_MASK,
529                                 NGBE_DMATXCTRL_TPID(tpid));
530                 } else {
531                         ret = -ENOTSUP;
532                         PMD_DRV_LOG(ERR,
533                                 "Inner type is not supported by single VLAN");
534                 }
535
536                 if (qinq) {
537                         wr32m(hw, NGBE_TAGTPID(0),
538                                 NGBE_TAGTPID_LSB_MASK,
539                                 NGBE_TAGTPID_LSB(tpid));
540                 }
541                 break;
542         case RTE_ETH_VLAN_TYPE_OUTER:
543                 if (vlan_ext) {
544                         /* Only the high 16-bits is valid */
545                         wr32m(hw, NGBE_EXTAG,
546                                 NGBE_EXTAG_VLAN_MASK,
547                                 NGBE_EXTAG_VLAN(tpid));
548                 } else {
549                         wr32m(hw, NGBE_VLANCTL,
550                                 NGBE_VLANCTL_TPID_MASK,
551                                 NGBE_VLANCTL_TPID(tpid));
552                         wr32m(hw, NGBE_DMATXCTRL,
553                                 NGBE_DMATXCTRL_TPID_MASK,
554                                 NGBE_DMATXCTRL_TPID(tpid));
555                 }
556
557                 if (qinq) {
558                         wr32m(hw, NGBE_TAGTPID(0),
559                                 NGBE_TAGTPID_MSB_MASK,
560                                 NGBE_TAGTPID_MSB(tpid));
561                 }
562                 break;
563         default:
564                 PMD_DRV_LOG(ERR, "Unsupported VLAN type %d", vlan_type);
565                 return -EINVAL;
566         }
567
568         return ret;
569 }
570
571 void
572 ngbe_vlan_hw_filter_disable(struct rte_eth_dev *dev)
573 {
574         struct ngbe_hw *hw = ngbe_dev_hw(dev);
575         uint32_t vlnctrl;
576
577         PMD_INIT_FUNC_TRACE();
578
579         /* Filter Table Disable */
580         vlnctrl = rd32(hw, NGBE_VLANCTL);
581         vlnctrl &= ~NGBE_VLANCTL_VFE;
582         wr32(hw, NGBE_VLANCTL, vlnctrl);
583 }
584
585 void
586 ngbe_vlan_hw_filter_enable(struct rte_eth_dev *dev)
587 {
588         struct ngbe_hw *hw = ngbe_dev_hw(dev);
589         struct ngbe_vfta *shadow_vfta = NGBE_DEV_VFTA(dev);
590         uint32_t vlnctrl;
591         uint16_t i;
592
593         PMD_INIT_FUNC_TRACE();
594
595         /* Filter Table Enable */
596         vlnctrl = rd32(hw, NGBE_VLANCTL);
597         vlnctrl &= ~NGBE_VLANCTL_CFIENA;
598         vlnctrl |= NGBE_VLANCTL_VFE;
599         wr32(hw, NGBE_VLANCTL, vlnctrl);
600
601         /* write whatever is in local vfta copy */
602         for (i = 0; i < NGBE_VFTA_SIZE; i++)
603                 wr32(hw, NGBE_VLANTBL(i), shadow_vfta->vfta[i]);
604 }
605
606 void
607 ngbe_vlan_hw_strip_bitmap_set(struct rte_eth_dev *dev, uint16_t queue, bool on)
608 {
609         struct ngbe_hwstrip *hwstrip = NGBE_DEV_HWSTRIP(dev);
610         struct ngbe_rx_queue *rxq;
611
612         if (queue >= NGBE_MAX_RX_QUEUE_NUM)
613                 return;
614
615         if (on)
616                 NGBE_SET_HWSTRIP(hwstrip, queue);
617         else
618                 NGBE_CLEAR_HWSTRIP(hwstrip, queue);
619
620         if (queue >= dev->data->nb_rx_queues)
621                 return;
622
623         rxq = dev->data->rx_queues[queue];
624
625         if (on) {
626                 rxq->vlan_flags = RTE_MBUF_F_RX_VLAN | RTE_MBUF_F_RX_VLAN_STRIPPED;
627                 rxq->offloads |= RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
628         } else {
629                 rxq->vlan_flags = RTE_MBUF_F_RX_VLAN;
630                 rxq->offloads &= ~RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
631         }
632 }
633
634 static void
635 ngbe_vlan_hw_strip_disable(struct rte_eth_dev *dev, uint16_t queue)
636 {
637         struct ngbe_hw *hw = ngbe_dev_hw(dev);
638         uint32_t ctrl;
639
640         PMD_INIT_FUNC_TRACE();
641
642         ctrl = rd32(hw, NGBE_RXCFG(queue));
643         ctrl &= ~NGBE_RXCFG_VLAN;
644         wr32(hw, NGBE_RXCFG(queue), ctrl);
645
646         /* record those setting for HW strip per queue */
647         ngbe_vlan_hw_strip_bitmap_set(dev, queue, 0);
648 }
649
650 static void
651 ngbe_vlan_hw_strip_enable(struct rte_eth_dev *dev, uint16_t queue)
652 {
653         struct ngbe_hw *hw = ngbe_dev_hw(dev);
654         uint32_t ctrl;
655
656         PMD_INIT_FUNC_TRACE();
657
658         ctrl = rd32(hw, NGBE_RXCFG(queue));
659         ctrl |= NGBE_RXCFG_VLAN;
660         wr32(hw, NGBE_RXCFG(queue), ctrl);
661
662         /* record those setting for HW strip per queue */
663         ngbe_vlan_hw_strip_bitmap_set(dev, queue, 1);
664 }
665
666 static void
667 ngbe_vlan_hw_extend_disable(struct rte_eth_dev *dev)
668 {
669         struct ngbe_hw *hw = ngbe_dev_hw(dev);
670         uint32_t ctrl;
671
672         PMD_INIT_FUNC_TRACE();
673
674         ctrl = rd32(hw, NGBE_PORTCTL);
675         ctrl &= ~NGBE_PORTCTL_VLANEXT;
676         ctrl &= ~NGBE_PORTCTL_QINQ;
677         wr32(hw, NGBE_PORTCTL, ctrl);
678 }
679
680 static void
681 ngbe_vlan_hw_extend_enable(struct rte_eth_dev *dev)
682 {
683         struct ngbe_hw *hw = ngbe_dev_hw(dev);
684         uint32_t ctrl;
685
686         PMD_INIT_FUNC_TRACE();
687
688         ctrl  = rd32(hw, NGBE_PORTCTL);
689         ctrl |= NGBE_PORTCTL_VLANEXT | NGBE_PORTCTL_QINQ;
690         wr32(hw, NGBE_PORTCTL, ctrl);
691 }
692
693 static void
694 ngbe_qinq_hw_strip_disable(struct rte_eth_dev *dev)
695 {
696         struct ngbe_hw *hw = ngbe_dev_hw(dev);
697         uint32_t ctrl;
698
699         PMD_INIT_FUNC_TRACE();
700
701         ctrl = rd32(hw, NGBE_PORTCTL);
702         ctrl &= ~NGBE_PORTCTL_QINQ;
703         wr32(hw, NGBE_PORTCTL, ctrl);
704 }
705
706 static void
707 ngbe_qinq_hw_strip_enable(struct rte_eth_dev *dev)
708 {
709         struct ngbe_hw *hw = ngbe_dev_hw(dev);
710         uint32_t ctrl;
711
712         PMD_INIT_FUNC_TRACE();
713
714         ctrl  = rd32(hw, NGBE_PORTCTL);
715         ctrl |= NGBE_PORTCTL_QINQ | NGBE_PORTCTL_VLANEXT;
716         wr32(hw, NGBE_PORTCTL, ctrl);
717 }
718
719 void
720 ngbe_vlan_hw_strip_config(struct rte_eth_dev *dev)
721 {
722         struct ngbe_rx_queue *rxq;
723         uint16_t i;
724
725         PMD_INIT_FUNC_TRACE();
726
727         for (i = 0; i < dev->data->nb_rx_queues; i++) {
728                 rxq = dev->data->rx_queues[i];
729
730                 if (rxq->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
731                         ngbe_vlan_hw_strip_enable(dev, i);
732                 else
733                         ngbe_vlan_hw_strip_disable(dev, i);
734         }
735 }
736
737 void
738 ngbe_config_vlan_strip_on_all_queues(struct rte_eth_dev *dev, int mask)
739 {
740         uint16_t i;
741         struct rte_eth_rxmode *rxmode;
742         struct ngbe_rx_queue *rxq;
743
744         if (mask & RTE_ETH_VLAN_STRIP_MASK) {
745                 rxmode = &dev->data->dev_conf.rxmode;
746                 if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
747                         for (i = 0; i < dev->data->nb_rx_queues; i++) {
748                                 rxq = dev->data->rx_queues[i];
749                                 rxq->offloads |= RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
750                         }
751                 else
752                         for (i = 0; i < dev->data->nb_rx_queues; i++) {
753                                 rxq = dev->data->rx_queues[i];
754                                 rxq->offloads &= ~RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
755                         }
756         }
757 }
758
759 static int
760 ngbe_vlan_offload_config(struct rte_eth_dev *dev, int mask)
761 {
762         struct rte_eth_rxmode *rxmode;
763         rxmode = &dev->data->dev_conf.rxmode;
764
765         if (mask & RTE_ETH_VLAN_STRIP_MASK)
766                 ngbe_vlan_hw_strip_config(dev);
767
768         if (mask & RTE_ETH_VLAN_FILTER_MASK) {
769                 if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER)
770                         ngbe_vlan_hw_filter_enable(dev);
771                 else
772                         ngbe_vlan_hw_filter_disable(dev);
773         }
774
775         if (mask & RTE_ETH_VLAN_EXTEND_MASK) {
776                 if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_EXTEND)
777                         ngbe_vlan_hw_extend_enable(dev);
778                 else
779                         ngbe_vlan_hw_extend_disable(dev);
780         }
781
782         if (mask & RTE_ETH_QINQ_STRIP_MASK) {
783                 if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_QINQ_STRIP)
784                         ngbe_qinq_hw_strip_enable(dev);
785                 else
786                         ngbe_qinq_hw_strip_disable(dev);
787         }
788
789         return 0;
790 }
791
792 static int
793 ngbe_vlan_offload_set(struct rte_eth_dev *dev, int mask)
794 {
795         ngbe_config_vlan_strip_on_all_queues(dev, mask);
796
797         ngbe_vlan_offload_config(dev, mask);
798
799         return 0;
800 }
801
802 static int
803 ngbe_dev_configure(struct rte_eth_dev *dev)
804 {
805         struct ngbe_interrupt *intr = ngbe_dev_intr(dev);
806         struct ngbe_adapter *adapter = ngbe_dev_adapter(dev);
807
808         PMD_INIT_FUNC_TRACE();
809
810         if (dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG)
811                 dev->data->dev_conf.rxmode.offloads |= RTE_ETH_RX_OFFLOAD_RSS_HASH;
812
813         /* set flag to update link status after init */
814         intr->flags |= NGBE_FLAG_NEED_LINK_UPDATE;
815
816         /*
817          * Initialize to TRUE. If any of Rx queues doesn't meet the bulk
818          * allocation Rx preconditions we will reset it.
819          */
820         adapter->rx_bulk_alloc_allowed = true;
821
822         return 0;
823 }
824
825 static void
826 ngbe_dev_phy_intr_setup(struct rte_eth_dev *dev)
827 {
828         struct ngbe_hw *hw = ngbe_dev_hw(dev);
829         struct ngbe_interrupt *intr = ngbe_dev_intr(dev);
830
831         wr32(hw, NGBE_GPIODIR, NGBE_GPIODIR_DDR(1));
832         wr32(hw, NGBE_GPIOINTEN, NGBE_GPIOINTEN_INT(3));
833         wr32(hw, NGBE_GPIOINTTYPE, NGBE_GPIOINTTYPE_LEVEL(0));
834         if (hw->phy.type == ngbe_phy_yt8521s_sfi)
835                 wr32(hw, NGBE_GPIOINTPOL, NGBE_GPIOINTPOL_ACT(0));
836         else
837                 wr32(hw, NGBE_GPIOINTPOL, NGBE_GPIOINTPOL_ACT(3));
838
839         intr->mask_misc |= NGBE_ICRMISC_GPIO;
840 }
841
842 /*
843  * Configure device link speed and setup link.
844  * It returns 0 on success.
845  */
846 static int
847 ngbe_dev_start(struct rte_eth_dev *dev)
848 {
849         struct ngbe_hw *hw = ngbe_dev_hw(dev);
850         struct ngbe_hw_stats *hw_stats = NGBE_DEV_STATS(dev);
851         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
852         struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
853         uint32_t intr_vector = 0;
854         int err;
855         bool link_up = false, negotiate = false;
856         uint32_t speed = 0;
857         uint32_t allowed_speeds = 0;
858         int mask = 0;
859         int status;
860         uint32_t *link_speeds;
861
862         PMD_INIT_FUNC_TRACE();
863
864         /* disable uio/vfio intr/eventfd mapping */
865         rte_intr_disable(intr_handle);
866
867         /* stop adapter */
868         hw->adapter_stopped = 0;
869         ngbe_stop_hw(hw);
870
871         /* reinitialize adapter, this calls reset and start */
872         hw->nb_rx_queues = dev->data->nb_rx_queues;
873         hw->nb_tx_queues = dev->data->nb_tx_queues;
874         status = ngbe_pf_reset_hw(hw);
875         if (status != 0)
876                 return -1;
877         hw->mac.start_hw(hw);
878         hw->mac.get_link_status = true;
879
880         ngbe_dev_phy_intr_setup(dev);
881
882         /* check and configure queue intr-vector mapping */
883         if ((rte_intr_cap_multiple(intr_handle) ||
884              !RTE_ETH_DEV_SRIOV(dev).active) &&
885             dev->data->dev_conf.intr_conf.rxq != 0) {
886                 intr_vector = dev->data->nb_rx_queues;
887                 if (rte_intr_efd_enable(intr_handle, intr_vector))
888                         return -1;
889         }
890
891         if (rte_intr_dp_is_en(intr_handle)) {
892                 if (rte_intr_vec_list_alloc(intr_handle, "intr_vec",
893                                                    dev->data->nb_rx_queues)) {
894                         PMD_INIT_LOG(ERR,
895                                      "Failed to allocate %d rx_queues intr_vec",
896                                      dev->data->nb_rx_queues);
897                         return -ENOMEM;
898                 }
899         }
900
901         /* confiugre MSI-X for sleep until Rx interrupt */
902         ngbe_configure_msix(dev);
903
904         /* initialize transmission unit */
905         ngbe_dev_tx_init(dev);
906
907         /* This can fail when allocating mbufs for descriptor rings */
908         err = ngbe_dev_rx_init(dev);
909         if (err != 0) {
910                 PMD_INIT_LOG(ERR, "Unable to initialize Rx hardware");
911                 goto error;
912         }
913
914         mask = RTE_ETH_VLAN_STRIP_MASK | RTE_ETH_VLAN_FILTER_MASK |
915                 RTE_ETH_VLAN_EXTEND_MASK;
916         err = ngbe_vlan_offload_config(dev, mask);
917         if (err != 0) {
918                 PMD_INIT_LOG(ERR, "Unable to set VLAN offload");
919                 goto error;
920         }
921
922         ngbe_configure_port(dev);
923
924         err = ngbe_dev_rxtx_start(dev);
925         if (err < 0) {
926                 PMD_INIT_LOG(ERR, "Unable to start rxtx queues");
927                 goto error;
928         }
929
930         /* Skip link setup if loopback mode is enabled. */
931         if (hw->is_pf && dev->data->dev_conf.lpbk_mode)
932                 goto skip_link_setup;
933
934         err = hw->mac.check_link(hw, &speed, &link_up, 0);
935         if (err != 0)
936                 goto error;
937         dev->data->dev_link.link_status = link_up;
938
939         link_speeds = &dev->data->dev_conf.link_speeds;
940         if (*link_speeds == RTE_ETH_LINK_SPEED_AUTONEG)
941                 negotiate = true;
942
943         err = hw->mac.get_link_capabilities(hw, &speed, &negotiate);
944         if (err != 0)
945                 goto error;
946
947         allowed_speeds = 0;
948         if (hw->mac.default_speeds & NGBE_LINK_SPEED_1GB_FULL)
949                 allowed_speeds |= RTE_ETH_LINK_SPEED_1G;
950         if (hw->mac.default_speeds & NGBE_LINK_SPEED_100M_FULL)
951                 allowed_speeds |= RTE_ETH_LINK_SPEED_100M;
952         if (hw->mac.default_speeds & NGBE_LINK_SPEED_10M_FULL)
953                 allowed_speeds |= RTE_ETH_LINK_SPEED_10M;
954
955         if (*link_speeds & ~allowed_speeds) {
956                 PMD_INIT_LOG(ERR, "Invalid link setting");
957                 goto error;
958         }
959
960         speed = 0x0;
961         if (*link_speeds == RTE_ETH_LINK_SPEED_AUTONEG) {
962                 speed = hw->mac.default_speeds;
963         } else {
964                 if (*link_speeds & RTE_ETH_LINK_SPEED_1G)
965                         speed |= NGBE_LINK_SPEED_1GB_FULL;
966                 if (*link_speeds & RTE_ETH_LINK_SPEED_100M)
967                         speed |= NGBE_LINK_SPEED_100M_FULL;
968                 if (*link_speeds & RTE_ETH_LINK_SPEED_10M)
969                         speed |= NGBE_LINK_SPEED_10M_FULL;
970         }
971
972         hw->phy.init_hw(hw);
973         err = hw->mac.setup_link(hw, speed, link_up);
974         if (err != 0)
975                 goto error;
976
977 skip_link_setup:
978
979         if (rte_intr_allow_others(intr_handle)) {
980                 ngbe_dev_misc_interrupt_setup(dev);
981                 /* check if lsc interrupt is enabled */
982                 if (dev->data->dev_conf.intr_conf.lsc != 0)
983                         ngbe_dev_lsc_interrupt_setup(dev, TRUE);
984                 else
985                         ngbe_dev_lsc_interrupt_setup(dev, FALSE);
986                 ngbe_dev_macsec_interrupt_setup(dev);
987                 ngbe_set_ivar_map(hw, -1, 1, NGBE_MISC_VEC_ID);
988         } else {
989                 rte_intr_callback_unregister(intr_handle,
990                                              ngbe_dev_interrupt_handler, dev);
991                 if (dev->data->dev_conf.intr_conf.lsc != 0)
992                         PMD_INIT_LOG(INFO,
993                                      "LSC won't enable because of no intr multiplex");
994         }
995
996         /* check if rxq interrupt is enabled */
997         if (dev->data->dev_conf.intr_conf.rxq != 0 &&
998             rte_intr_dp_is_en(intr_handle))
999                 ngbe_dev_rxq_interrupt_setup(dev);
1000
1001         /* enable UIO/VFIO intr/eventfd mapping */
1002         rte_intr_enable(intr_handle);
1003
1004         /* resume enabled intr since HW reset */
1005         ngbe_enable_intr(dev);
1006
1007         if ((hw->sub_system_id & NGBE_OEM_MASK) == NGBE_LY_M88E1512_SFP ||
1008                 (hw->sub_system_id & NGBE_OEM_MASK) == NGBE_LY_YT8521S_SFP) {
1009                 /* gpio0 is used to power on/off control*/
1010                 wr32(hw, NGBE_GPIODATA, 0);
1011         }
1012
1013         /*
1014          * Update link status right before return, because it may
1015          * start link configuration process in a separate thread.
1016          */
1017         ngbe_dev_link_update(dev, 0);
1018
1019         ngbe_read_stats_registers(hw, hw_stats);
1020         hw->offset_loaded = 1;
1021
1022         return 0;
1023
1024 error:
1025         PMD_INIT_LOG(ERR, "failure in dev start: %d", err);
1026         ngbe_dev_clear_queues(dev);
1027         return -EIO;
1028 }
1029
1030 /*
1031  * Stop device: disable rx and tx functions to allow for reconfiguring.
1032  */
1033 static int
1034 ngbe_dev_stop(struct rte_eth_dev *dev)
1035 {
1036         struct rte_eth_link link;
1037         struct ngbe_adapter *adapter = ngbe_dev_adapter(dev);
1038         struct ngbe_hw *hw = ngbe_dev_hw(dev);
1039         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1040         struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
1041
1042         if (hw->adapter_stopped)
1043                 return 0;
1044
1045         PMD_INIT_FUNC_TRACE();
1046
1047         if ((hw->sub_system_id & NGBE_OEM_MASK) == NGBE_LY_M88E1512_SFP ||
1048                 (hw->sub_system_id & NGBE_OEM_MASK) == NGBE_LY_YT8521S_SFP) {
1049                 /* gpio0 is used to power on/off control*/
1050                 wr32(hw, NGBE_GPIODATA, NGBE_GPIOBIT_0);
1051         }
1052
1053         /* disable interrupts */
1054         ngbe_disable_intr(hw);
1055
1056         /* reset the NIC */
1057         ngbe_pf_reset_hw(hw);
1058         hw->adapter_stopped = 0;
1059
1060         /* stop adapter */
1061         ngbe_stop_hw(hw);
1062
1063         ngbe_dev_clear_queues(dev);
1064
1065         /* Clear stored conf */
1066         dev->data->scattered_rx = 0;
1067
1068         /* Clear recorded link status */
1069         memset(&link, 0, sizeof(link));
1070         rte_eth_linkstatus_set(dev, &link);
1071
1072         if (!rte_intr_allow_others(intr_handle))
1073                 /* resume to the default handler */
1074                 rte_intr_callback_register(intr_handle,
1075                                            ngbe_dev_interrupt_handler,
1076                                            (void *)dev);
1077
1078         /* Clean datapath event and queue/vec mapping */
1079         rte_intr_efd_disable(intr_handle);
1080         rte_intr_vec_list_free(intr_handle);
1081
1082         adapter->rss_reta_updated = 0;
1083
1084         hw->adapter_stopped = true;
1085         dev->data->dev_started = 0;
1086
1087         return 0;
1088 }
1089
1090 /*
1091  * Reset and stop device.
1092  */
1093 static int
1094 ngbe_dev_close(struct rte_eth_dev *dev)
1095 {
1096         struct ngbe_hw *hw = ngbe_dev_hw(dev);
1097         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1098         struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
1099         int retries = 0;
1100         int ret;
1101
1102         PMD_INIT_FUNC_TRACE();
1103
1104         ngbe_pf_reset_hw(hw);
1105
1106         ngbe_dev_stop(dev);
1107
1108         ngbe_dev_free_queues(dev);
1109
1110         /* reprogram the RAR[0] in case user changed it. */
1111         ngbe_set_rar(hw, 0, hw->mac.addr, 0, true);
1112
1113         /* Unlock any pending hardware semaphore */
1114         ngbe_swfw_lock_reset(hw);
1115
1116         /* disable uio intr before callback unregister */
1117         rte_intr_disable(intr_handle);
1118
1119         do {
1120                 ret = rte_intr_callback_unregister(intr_handle,
1121                                 ngbe_dev_interrupt_handler, dev);
1122                 if (ret >= 0 || ret == -ENOENT) {
1123                         break;
1124                 } else if (ret != -EAGAIN) {
1125                         PMD_INIT_LOG(ERR,
1126                                 "intr callback unregister failed: %d",
1127                                 ret);
1128                 }
1129                 rte_delay_ms(100);
1130         } while (retries++ < (10 + NGBE_LINK_UP_TIME));
1131
1132         rte_free(dev->data->mac_addrs);
1133         dev->data->mac_addrs = NULL;
1134
1135         rte_free(dev->data->hash_mac_addrs);
1136         dev->data->hash_mac_addrs = NULL;
1137
1138         return ret;
1139 }
1140
1141 /*
1142  * Reset PF device.
1143  */
1144 static int
1145 ngbe_dev_reset(struct rte_eth_dev *dev)
1146 {
1147         int ret;
1148
1149         ret = eth_ngbe_dev_uninit(dev);
1150         if (ret != 0)
1151                 return ret;
1152
1153         ret = eth_ngbe_dev_init(dev, NULL);
1154
1155         return ret;
1156 }
1157
1158 #define UPDATE_QP_COUNTER_32bit(reg, last_counter, counter)     \
1159         {                                                       \
1160                 uint32_t current_counter = rd32(hw, reg);       \
1161                 if (current_counter < last_counter)             \
1162                         current_counter += 0x100000000LL;       \
1163                 if (!hw->offset_loaded)                         \
1164                         last_counter = current_counter;         \
1165                 counter = current_counter - last_counter;       \
1166                 counter &= 0xFFFFFFFFLL;                        \
1167         }
1168
1169 #define UPDATE_QP_COUNTER_36bit(reg_lsb, reg_msb, last_counter, counter) \
1170         {                                                                \
1171                 uint64_t current_counter_lsb = rd32(hw, reg_lsb);        \
1172                 uint64_t current_counter_msb = rd32(hw, reg_msb);        \
1173                 uint64_t current_counter = (current_counter_msb << 32) | \
1174                         current_counter_lsb;                             \
1175                 if (current_counter < last_counter)                      \
1176                         current_counter += 0x1000000000LL;               \
1177                 if (!hw->offset_loaded)                                  \
1178                         last_counter = current_counter;                  \
1179                 counter = current_counter - last_counter;                \
1180                 counter &= 0xFFFFFFFFFLL;                                \
1181         }
1182
1183 void
1184 ngbe_read_stats_registers(struct ngbe_hw *hw,
1185                            struct ngbe_hw_stats *hw_stats)
1186 {
1187         unsigned int i;
1188
1189         /* QP Stats */
1190         for (i = 0; i < hw->nb_rx_queues; i++) {
1191                 UPDATE_QP_COUNTER_32bit(NGBE_QPRXPKT(i),
1192                         hw->qp_last[i].rx_qp_packets,
1193                         hw_stats->qp[i].rx_qp_packets);
1194                 UPDATE_QP_COUNTER_36bit(NGBE_QPRXOCTL(i), NGBE_QPRXOCTH(i),
1195                         hw->qp_last[i].rx_qp_bytes,
1196                         hw_stats->qp[i].rx_qp_bytes);
1197                 UPDATE_QP_COUNTER_32bit(NGBE_QPRXMPKT(i),
1198                         hw->qp_last[i].rx_qp_mc_packets,
1199                         hw_stats->qp[i].rx_qp_mc_packets);
1200                 UPDATE_QP_COUNTER_32bit(NGBE_QPRXBPKT(i),
1201                         hw->qp_last[i].rx_qp_bc_packets,
1202                         hw_stats->qp[i].rx_qp_bc_packets);
1203         }
1204
1205         for (i = 0; i < hw->nb_tx_queues; i++) {
1206                 UPDATE_QP_COUNTER_32bit(NGBE_QPTXPKT(i),
1207                         hw->qp_last[i].tx_qp_packets,
1208                         hw_stats->qp[i].tx_qp_packets);
1209                 UPDATE_QP_COUNTER_36bit(NGBE_QPTXOCTL(i), NGBE_QPTXOCTH(i),
1210                         hw->qp_last[i].tx_qp_bytes,
1211                         hw_stats->qp[i].tx_qp_bytes);
1212                 UPDATE_QP_COUNTER_32bit(NGBE_QPTXMPKT(i),
1213                         hw->qp_last[i].tx_qp_mc_packets,
1214                         hw_stats->qp[i].tx_qp_mc_packets);
1215                 UPDATE_QP_COUNTER_32bit(NGBE_QPTXBPKT(i),
1216                         hw->qp_last[i].tx_qp_bc_packets,
1217                         hw_stats->qp[i].tx_qp_bc_packets);
1218         }
1219
1220         /* PB Stats */
1221         hw_stats->rx_up_dropped += rd32(hw, NGBE_PBRXMISS);
1222         hw_stats->rdb_pkt_cnt += rd32(hw, NGBE_PBRXPKT);
1223         hw_stats->rdb_repli_cnt += rd32(hw, NGBE_PBRXREP);
1224         hw_stats->rdb_drp_cnt += rd32(hw, NGBE_PBRXDROP);
1225         hw_stats->tx_xoff_packets += rd32(hw, NGBE_PBTXLNKXOFF);
1226         hw_stats->tx_xon_packets += rd32(hw, NGBE_PBTXLNKXON);
1227
1228         hw_stats->rx_xon_packets += rd32(hw, NGBE_PBRXLNKXON);
1229         hw_stats->rx_xoff_packets += rd32(hw, NGBE_PBRXLNKXOFF);
1230
1231         /* DMA Stats */
1232         hw_stats->rx_drop_packets += rd32(hw, NGBE_DMARXDROP);
1233         hw_stats->tx_drop_packets += rd32(hw, NGBE_DMATXDROP);
1234         hw_stats->rx_dma_drop += rd32(hw, NGBE_DMARXDROP);
1235         hw_stats->tx_secdrp_packets += rd32(hw, NGBE_DMATXSECDROP);
1236         hw_stats->rx_packets += rd32(hw, NGBE_DMARXPKT);
1237         hw_stats->tx_packets += rd32(hw, NGBE_DMATXPKT);
1238         hw_stats->rx_bytes += rd64(hw, NGBE_DMARXOCTL);
1239         hw_stats->tx_bytes += rd64(hw, NGBE_DMATXOCTL);
1240
1241         /* MAC Stats */
1242         hw_stats->rx_crc_errors += rd64(hw, NGBE_MACRXERRCRCL);
1243         hw_stats->rx_multicast_packets += rd64(hw, NGBE_MACRXMPKTL);
1244         hw_stats->tx_multicast_packets += rd64(hw, NGBE_MACTXMPKTL);
1245
1246         hw_stats->rx_total_packets += rd64(hw, NGBE_MACRXPKTL);
1247         hw_stats->tx_total_packets += rd64(hw, NGBE_MACTXPKTL);
1248         hw_stats->rx_total_bytes += rd64(hw, NGBE_MACRXGBOCTL);
1249
1250         hw_stats->rx_broadcast_packets += rd64(hw, NGBE_MACRXOCTL);
1251         hw_stats->tx_broadcast_packets += rd32(hw, NGBE_MACTXOCTL);
1252
1253         hw_stats->rx_size_64_packets += rd64(hw, NGBE_MACRX1TO64L);
1254         hw_stats->rx_size_65_to_127_packets += rd64(hw, NGBE_MACRX65TO127L);
1255         hw_stats->rx_size_128_to_255_packets += rd64(hw, NGBE_MACRX128TO255L);
1256         hw_stats->rx_size_256_to_511_packets += rd64(hw, NGBE_MACRX256TO511L);
1257         hw_stats->rx_size_512_to_1023_packets +=
1258                         rd64(hw, NGBE_MACRX512TO1023L);
1259         hw_stats->rx_size_1024_to_max_packets +=
1260                         rd64(hw, NGBE_MACRX1024TOMAXL);
1261         hw_stats->tx_size_64_packets += rd64(hw, NGBE_MACTX1TO64L);
1262         hw_stats->tx_size_65_to_127_packets += rd64(hw, NGBE_MACTX65TO127L);
1263         hw_stats->tx_size_128_to_255_packets += rd64(hw, NGBE_MACTX128TO255L);
1264         hw_stats->tx_size_256_to_511_packets += rd64(hw, NGBE_MACTX256TO511L);
1265         hw_stats->tx_size_512_to_1023_packets +=
1266                         rd64(hw, NGBE_MACTX512TO1023L);
1267         hw_stats->tx_size_1024_to_max_packets +=
1268                         rd64(hw, NGBE_MACTX1024TOMAXL);
1269
1270         hw_stats->rx_undersize_errors += rd64(hw, NGBE_MACRXERRLENL);
1271         hw_stats->rx_oversize_errors += rd32(hw, NGBE_MACRXOVERSIZE);
1272         hw_stats->rx_jabber_errors += rd32(hw, NGBE_MACRXJABBER);
1273
1274         /* MNG Stats */
1275         hw_stats->mng_bmc2host_packets = rd32(hw, NGBE_MNGBMC2OS);
1276         hw_stats->mng_host2bmc_packets = rd32(hw, NGBE_MNGOS2BMC);
1277         hw_stats->rx_management_packets = rd32(hw, NGBE_DMARXMNG);
1278         hw_stats->tx_management_packets = rd32(hw, NGBE_DMATXMNG);
1279
1280         /* MACsec Stats */
1281         hw_stats->tx_macsec_pkts_untagged += rd32(hw, NGBE_LSECTX_UTPKT);
1282         hw_stats->tx_macsec_pkts_encrypted +=
1283                         rd32(hw, NGBE_LSECTX_ENCPKT);
1284         hw_stats->tx_macsec_pkts_protected +=
1285                         rd32(hw, NGBE_LSECTX_PROTPKT);
1286         hw_stats->tx_macsec_octets_encrypted +=
1287                         rd32(hw, NGBE_LSECTX_ENCOCT);
1288         hw_stats->tx_macsec_octets_protected +=
1289                         rd32(hw, NGBE_LSECTX_PROTOCT);
1290         hw_stats->rx_macsec_pkts_untagged += rd32(hw, NGBE_LSECRX_UTPKT);
1291         hw_stats->rx_macsec_pkts_badtag += rd32(hw, NGBE_LSECRX_BTPKT);
1292         hw_stats->rx_macsec_pkts_nosci += rd32(hw, NGBE_LSECRX_NOSCIPKT);
1293         hw_stats->rx_macsec_pkts_unknownsci += rd32(hw, NGBE_LSECRX_UNSCIPKT);
1294         hw_stats->rx_macsec_octets_decrypted += rd32(hw, NGBE_LSECRX_DECOCT);
1295         hw_stats->rx_macsec_octets_validated += rd32(hw, NGBE_LSECRX_VLDOCT);
1296         hw_stats->rx_macsec_sc_pkts_unchecked +=
1297                         rd32(hw, NGBE_LSECRX_UNCHKPKT);
1298         hw_stats->rx_macsec_sc_pkts_delayed += rd32(hw, NGBE_LSECRX_DLYPKT);
1299         hw_stats->rx_macsec_sc_pkts_late += rd32(hw, NGBE_LSECRX_LATEPKT);
1300         for (i = 0; i < 2; i++) {
1301                 hw_stats->rx_macsec_sa_pkts_ok +=
1302                         rd32(hw, NGBE_LSECRX_OKPKT(i));
1303                 hw_stats->rx_macsec_sa_pkts_invalid +=
1304                         rd32(hw, NGBE_LSECRX_INVPKT(i));
1305                 hw_stats->rx_macsec_sa_pkts_notvalid +=
1306                         rd32(hw, NGBE_LSECRX_BADPKT(i));
1307         }
1308         for (i = 0; i < 4; i++) {
1309                 hw_stats->rx_macsec_sa_pkts_unusedsa +=
1310                         rd32(hw, NGBE_LSECRX_INVSAPKT(i));
1311                 hw_stats->rx_macsec_sa_pkts_notusingsa +=
1312                         rd32(hw, NGBE_LSECRX_BADSAPKT(i));
1313         }
1314         hw_stats->rx_total_missed_packets =
1315                         hw_stats->rx_up_dropped;
1316 }
1317
1318 static int
1319 ngbe_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
1320 {
1321         struct ngbe_hw *hw = ngbe_dev_hw(dev);
1322         struct ngbe_hw_stats *hw_stats = NGBE_DEV_STATS(dev);
1323         struct ngbe_stat_mappings *stat_mappings =
1324                         NGBE_DEV_STAT_MAPPINGS(dev);
1325         uint32_t i, j;
1326
1327         ngbe_read_stats_registers(hw, hw_stats);
1328
1329         if (stats == NULL)
1330                 return -EINVAL;
1331
1332         /* Fill out the rte_eth_stats statistics structure */
1333         stats->ipackets = hw_stats->rx_packets;
1334         stats->ibytes = hw_stats->rx_bytes;
1335         stats->opackets = hw_stats->tx_packets;
1336         stats->obytes = hw_stats->tx_bytes;
1337
1338         memset(&stats->q_ipackets, 0, sizeof(stats->q_ipackets));
1339         memset(&stats->q_opackets, 0, sizeof(stats->q_opackets));
1340         memset(&stats->q_ibytes, 0, sizeof(stats->q_ibytes));
1341         memset(&stats->q_obytes, 0, sizeof(stats->q_obytes));
1342         memset(&stats->q_errors, 0, sizeof(stats->q_errors));
1343         for (i = 0; i < NGBE_MAX_QP; i++) {
1344                 uint32_t n = i / NB_QMAP_FIELDS_PER_QSM_REG;
1345                 uint32_t offset = (i % NB_QMAP_FIELDS_PER_QSM_REG) * 8;
1346                 uint32_t q_map;
1347
1348                 q_map = (stat_mappings->rqsm[n] >> offset)
1349                                 & QMAP_FIELD_RESERVED_BITS_MASK;
1350                 j = (q_map < RTE_ETHDEV_QUEUE_STAT_CNTRS
1351                      ? q_map : q_map % RTE_ETHDEV_QUEUE_STAT_CNTRS);
1352                 stats->q_ipackets[j] += hw_stats->qp[i].rx_qp_packets;
1353                 stats->q_ibytes[j] += hw_stats->qp[i].rx_qp_bytes;
1354
1355                 q_map = (stat_mappings->tqsm[n] >> offset)
1356                                 & QMAP_FIELD_RESERVED_BITS_MASK;
1357                 j = (q_map < RTE_ETHDEV_QUEUE_STAT_CNTRS
1358                      ? q_map : q_map % RTE_ETHDEV_QUEUE_STAT_CNTRS);
1359                 stats->q_opackets[j] += hw_stats->qp[i].tx_qp_packets;
1360                 stats->q_obytes[j] += hw_stats->qp[i].tx_qp_bytes;
1361         }
1362
1363         /* Rx Errors */
1364         stats->imissed  = hw_stats->rx_total_missed_packets +
1365                           hw_stats->rx_dma_drop;
1366         stats->ierrors  = hw_stats->rx_crc_errors +
1367                           hw_stats->rx_mac_short_packet_dropped +
1368                           hw_stats->rx_length_errors +
1369                           hw_stats->rx_undersize_errors +
1370                           hw_stats->rx_oversize_errors +
1371                           hw_stats->rx_illegal_byte_errors +
1372                           hw_stats->rx_error_bytes +
1373                           hw_stats->rx_fragment_errors;
1374
1375         /* Tx Errors */
1376         stats->oerrors  = 0;
1377         return 0;
1378 }
1379
1380 static int
1381 ngbe_dev_stats_reset(struct rte_eth_dev *dev)
1382 {
1383         struct ngbe_hw *hw = ngbe_dev_hw(dev);
1384         struct ngbe_hw_stats *hw_stats = NGBE_DEV_STATS(dev);
1385
1386         /* HW registers are cleared on read */
1387         hw->offset_loaded = 0;
1388         ngbe_dev_stats_get(dev, NULL);
1389         hw->offset_loaded = 1;
1390
1391         /* Reset software totals */
1392         memset(hw_stats, 0, sizeof(*hw_stats));
1393
1394         return 0;
1395 }
1396
1397 /* This function calculates the number of xstats based on the current config */
1398 static unsigned
1399 ngbe_xstats_calc_num(struct rte_eth_dev *dev)
1400 {
1401         int nb_queues = max(dev->data->nb_rx_queues, dev->data->nb_tx_queues);
1402         return NGBE_NB_HW_STATS +
1403                NGBE_NB_QP_STATS * nb_queues;
1404 }
1405
1406 static inline int
1407 ngbe_get_name_by_id(uint32_t id, char *name, uint32_t size)
1408 {
1409         int nb, st;
1410
1411         /* Extended stats from ngbe_hw_stats */
1412         if (id < NGBE_NB_HW_STATS) {
1413                 snprintf(name, size, "[hw]%s",
1414                         rte_ngbe_stats_strings[id].name);
1415                 return 0;
1416         }
1417         id -= NGBE_NB_HW_STATS;
1418
1419         /* Queue Stats */
1420         if (id < NGBE_NB_QP_STATS * NGBE_MAX_QP) {
1421                 nb = id / NGBE_NB_QP_STATS;
1422                 st = id % NGBE_NB_QP_STATS;
1423                 snprintf(name, size, "[q%u]%s", nb,
1424                         rte_ngbe_qp_strings[st].name);
1425                 return 0;
1426         }
1427         id -= NGBE_NB_QP_STATS * NGBE_MAX_QP;
1428
1429         return -(int)(id + 1);
1430 }
1431
1432 static inline int
1433 ngbe_get_offset_by_id(uint32_t id, uint32_t *offset)
1434 {
1435         int nb, st;
1436
1437         /* Extended stats from ngbe_hw_stats */
1438         if (id < NGBE_NB_HW_STATS) {
1439                 *offset = rte_ngbe_stats_strings[id].offset;
1440                 return 0;
1441         }
1442         id -= NGBE_NB_HW_STATS;
1443
1444         /* Queue Stats */
1445         if (id < NGBE_NB_QP_STATS * NGBE_MAX_QP) {
1446                 nb = id / NGBE_NB_QP_STATS;
1447                 st = id % NGBE_NB_QP_STATS;
1448                 *offset = rte_ngbe_qp_strings[st].offset +
1449                         nb * (NGBE_NB_QP_STATS * sizeof(uint64_t));
1450                 return 0;
1451         }
1452
1453         return -1;
1454 }
1455
1456 static int ngbe_dev_xstats_get_names(struct rte_eth_dev *dev,
1457         struct rte_eth_xstat_name *xstats_names, unsigned int limit)
1458 {
1459         unsigned int i, count;
1460
1461         count = ngbe_xstats_calc_num(dev);
1462         if (xstats_names == NULL)
1463                 return count;
1464
1465         /* Note: limit >= cnt_stats checked upstream
1466          * in rte_eth_xstats_names()
1467          */
1468         limit = min(limit, count);
1469
1470         /* Extended stats from ngbe_hw_stats */
1471         for (i = 0; i < limit; i++) {
1472                 if (ngbe_get_name_by_id(i, xstats_names[i].name,
1473                         sizeof(xstats_names[i].name))) {
1474                         PMD_INIT_LOG(WARNING, "id value %d isn't valid", i);
1475                         break;
1476                 }
1477         }
1478
1479         return i;
1480 }
1481
1482 static int ngbe_dev_xstats_get_names_by_id(struct rte_eth_dev *dev,
1483         const uint64_t *ids,
1484         struct rte_eth_xstat_name *xstats_names,
1485         unsigned int limit)
1486 {
1487         unsigned int i;
1488
1489         if (ids == NULL)
1490                 return ngbe_dev_xstats_get_names(dev, xstats_names, limit);
1491
1492         for (i = 0; i < limit; i++) {
1493                 if (ngbe_get_name_by_id(ids[i], xstats_names[i].name,
1494                                 sizeof(xstats_names[i].name))) {
1495                         PMD_INIT_LOG(WARNING, "id value %d isn't valid", i);
1496                         return -1;
1497                 }
1498         }
1499
1500         return i;
1501 }
1502
1503 static int
1504 ngbe_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
1505                                          unsigned int limit)
1506 {
1507         struct ngbe_hw *hw = ngbe_dev_hw(dev);
1508         struct ngbe_hw_stats *hw_stats = NGBE_DEV_STATS(dev);
1509         unsigned int i, count;
1510
1511         ngbe_read_stats_registers(hw, hw_stats);
1512
1513         /* If this is a reset xstats is NULL, and we have cleared the
1514          * registers by reading them.
1515          */
1516         count = ngbe_xstats_calc_num(dev);
1517         if (xstats == NULL)
1518                 return count;
1519
1520         limit = min(limit, ngbe_xstats_calc_num(dev));
1521
1522         /* Extended stats from ngbe_hw_stats */
1523         for (i = 0; i < limit; i++) {
1524                 uint32_t offset = 0;
1525
1526                 if (ngbe_get_offset_by_id(i, &offset)) {
1527                         PMD_INIT_LOG(WARNING, "id value %d isn't valid", i);
1528                         break;
1529                 }
1530                 xstats[i].value = *(uint64_t *)(((char *)hw_stats) + offset);
1531                 xstats[i].id = i;
1532         }
1533
1534         return i;
1535 }
1536
1537 static int
1538 ngbe_dev_xstats_get_(struct rte_eth_dev *dev, uint64_t *values,
1539                                          unsigned int limit)
1540 {
1541         struct ngbe_hw *hw = ngbe_dev_hw(dev);
1542         struct ngbe_hw_stats *hw_stats = NGBE_DEV_STATS(dev);
1543         unsigned int i, count;
1544
1545         ngbe_read_stats_registers(hw, hw_stats);
1546
1547         /* If this is a reset xstats is NULL, and we have cleared the
1548          * registers by reading them.
1549          */
1550         count = ngbe_xstats_calc_num(dev);
1551         if (values == NULL)
1552                 return count;
1553
1554         limit = min(limit, ngbe_xstats_calc_num(dev));
1555
1556         /* Extended stats from ngbe_hw_stats */
1557         for (i = 0; i < limit; i++) {
1558                 uint32_t offset;
1559
1560                 if (ngbe_get_offset_by_id(i, &offset)) {
1561                         PMD_INIT_LOG(WARNING, "id value %d isn't valid", i);
1562                         break;
1563                 }
1564                 values[i] = *(uint64_t *)(((char *)hw_stats) + offset);
1565         }
1566
1567         return i;
1568 }
1569
1570 static int
1571 ngbe_dev_xstats_get_by_id(struct rte_eth_dev *dev, const uint64_t *ids,
1572                 uint64_t *values, unsigned int limit)
1573 {
1574         struct ngbe_hw_stats *hw_stats = NGBE_DEV_STATS(dev);
1575         unsigned int i;
1576
1577         if (ids == NULL)
1578                 return ngbe_dev_xstats_get_(dev, values, limit);
1579
1580         for (i = 0; i < limit; i++) {
1581                 uint32_t offset;
1582
1583                 if (ngbe_get_offset_by_id(ids[i], &offset)) {
1584                         PMD_INIT_LOG(WARNING, "id value %d isn't valid", i);
1585                         break;
1586                 }
1587                 values[i] = *(uint64_t *)(((char *)hw_stats) + offset);
1588         }
1589
1590         return i;
1591 }
1592
1593 static int
1594 ngbe_dev_xstats_reset(struct rte_eth_dev *dev)
1595 {
1596         struct ngbe_hw *hw = ngbe_dev_hw(dev);
1597         struct ngbe_hw_stats *hw_stats = NGBE_DEV_STATS(dev);
1598
1599         /* HW registers are cleared on read */
1600         hw->offset_loaded = 0;
1601         ngbe_read_stats_registers(hw, hw_stats);
1602         hw->offset_loaded = 1;
1603
1604         /* Reset software totals */
1605         memset(hw_stats, 0, sizeof(*hw_stats));
1606
1607         return 0;
1608 }
1609
1610 static int
1611 ngbe_fw_version_get(struct rte_eth_dev *dev, char *fw_version, size_t fw_size)
1612 {
1613         struct ngbe_hw *hw = ngbe_dev_hw(dev);
1614         int ret;
1615
1616         ret = snprintf(fw_version, fw_size, "0x%08x", hw->eeprom_id);
1617
1618         if (ret < 0)
1619                 return -EINVAL;
1620
1621         ret += 1; /* add the size of '\0' */
1622         if (fw_size < (size_t)ret)
1623                 return ret;
1624
1625         return 0;
1626 }
1627
1628 static int
1629 ngbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
1630 {
1631         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1632         struct ngbe_hw *hw = ngbe_dev_hw(dev);
1633
1634         dev_info->max_rx_queues = (uint16_t)hw->mac.max_rx_queues;
1635         dev_info->max_tx_queues = (uint16_t)hw->mac.max_tx_queues;
1636         dev_info->min_rx_bufsize = 1024;
1637         dev_info->max_rx_pktlen = 15872;
1638         dev_info->max_mac_addrs = hw->mac.num_rar_entries;
1639         dev_info->max_hash_mac_addrs = NGBE_VMDQ_NUM_UC_MAC;
1640         dev_info->max_vfs = pci_dev->max_vfs;
1641         dev_info->rx_queue_offload_capa = ngbe_get_rx_queue_offloads(dev);
1642         dev_info->rx_offload_capa = (ngbe_get_rx_port_offloads(dev) |
1643                                      dev_info->rx_queue_offload_capa);
1644         dev_info->tx_queue_offload_capa = 0;
1645         dev_info->tx_offload_capa = ngbe_get_tx_port_offloads(dev);
1646
1647         dev_info->default_rxconf = (struct rte_eth_rxconf) {
1648                 .rx_thresh = {
1649                         .pthresh = NGBE_DEFAULT_RX_PTHRESH,
1650                         .hthresh = NGBE_DEFAULT_RX_HTHRESH,
1651                         .wthresh = NGBE_DEFAULT_RX_WTHRESH,
1652                 },
1653                 .rx_free_thresh = NGBE_DEFAULT_RX_FREE_THRESH,
1654                 .rx_drop_en = 0,
1655                 .offloads = 0,
1656         };
1657
1658         dev_info->default_txconf = (struct rte_eth_txconf) {
1659                 .tx_thresh = {
1660                         .pthresh = NGBE_DEFAULT_TX_PTHRESH,
1661                         .hthresh = NGBE_DEFAULT_TX_HTHRESH,
1662                         .wthresh = NGBE_DEFAULT_TX_WTHRESH,
1663                 },
1664                 .tx_free_thresh = NGBE_DEFAULT_TX_FREE_THRESH,
1665                 .offloads = 0,
1666         };
1667
1668         dev_info->rx_desc_lim = rx_desc_lim;
1669         dev_info->tx_desc_lim = tx_desc_lim;
1670
1671         dev_info->hash_key_size = NGBE_HKEY_MAX_INDEX * sizeof(uint32_t);
1672         dev_info->reta_size = RTE_ETH_RSS_RETA_SIZE_128;
1673         dev_info->flow_type_rss_offloads = NGBE_RSS_OFFLOAD_ALL;
1674
1675         dev_info->speed_capa = RTE_ETH_LINK_SPEED_1G | RTE_ETH_LINK_SPEED_100M |
1676                                 RTE_ETH_LINK_SPEED_10M;
1677
1678         /* Driver-preferred Rx/Tx parameters */
1679         dev_info->default_rxportconf.burst_size = 32;
1680         dev_info->default_txportconf.burst_size = 32;
1681         dev_info->default_rxportconf.nb_queues = 1;
1682         dev_info->default_txportconf.nb_queues = 1;
1683         dev_info->default_rxportconf.ring_size = 256;
1684         dev_info->default_txportconf.ring_size = 256;
1685
1686         return 0;
1687 }
1688
1689 const uint32_t *
1690 ngbe_dev_supported_ptypes_get(struct rte_eth_dev *dev)
1691 {
1692         if (dev->rx_pkt_burst == ngbe_recv_pkts ||
1693             dev->rx_pkt_burst == ngbe_recv_pkts_sc_single_alloc ||
1694             dev->rx_pkt_burst == ngbe_recv_pkts_sc_bulk_alloc ||
1695             dev->rx_pkt_burst == ngbe_recv_pkts_bulk_alloc)
1696                 return ngbe_get_supported_ptypes();
1697
1698         return NULL;
1699 }
1700
1701 /* return 0 means link status changed, -1 means not changed */
1702 int
1703 ngbe_dev_link_update_share(struct rte_eth_dev *dev,
1704                             int wait_to_complete)
1705 {
1706         struct ngbe_hw *hw = ngbe_dev_hw(dev);
1707         struct rte_eth_link link;
1708         u32 link_speed = NGBE_LINK_SPEED_UNKNOWN;
1709         u32 lan_speed = 0;
1710         struct ngbe_interrupt *intr = ngbe_dev_intr(dev);
1711         bool link_up;
1712         int err;
1713         int wait = 1;
1714
1715         memset(&link, 0, sizeof(link));
1716         link.link_status = RTE_ETH_LINK_DOWN;
1717         link.link_speed = RTE_ETH_SPEED_NUM_NONE;
1718         link.link_duplex = RTE_ETH_LINK_HALF_DUPLEX;
1719         link.link_autoneg = !(dev->data->dev_conf.link_speeds &
1720                         ~RTE_ETH_LINK_SPEED_AUTONEG);
1721
1722         hw->mac.get_link_status = true;
1723
1724         if (intr->flags & NGBE_FLAG_NEED_LINK_CONFIG)
1725                 return rte_eth_linkstatus_set(dev, &link);
1726
1727         /* check if it needs to wait to complete, if lsc interrupt is enabled */
1728         if (wait_to_complete == 0 || dev->data->dev_conf.intr_conf.lsc != 0)
1729                 wait = 0;
1730
1731         err = hw->mac.check_link(hw, &link_speed, &link_up, wait);
1732         if (err != 0) {
1733                 link.link_speed = RTE_ETH_SPEED_NUM_NONE;
1734                 link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
1735                 return rte_eth_linkstatus_set(dev, &link);
1736         }
1737
1738         if (!link_up)
1739                 return rte_eth_linkstatus_set(dev, &link);
1740
1741         intr->flags &= ~NGBE_FLAG_NEED_LINK_CONFIG;
1742         link.link_status = RTE_ETH_LINK_UP;
1743         link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
1744
1745         switch (link_speed) {
1746         default:
1747         case NGBE_LINK_SPEED_UNKNOWN:
1748                 link.link_speed = RTE_ETH_SPEED_NUM_NONE;
1749                 break;
1750
1751         case NGBE_LINK_SPEED_10M_FULL:
1752                 link.link_speed = RTE_ETH_SPEED_NUM_10M;
1753                 lan_speed = 0;
1754                 break;
1755
1756         case NGBE_LINK_SPEED_100M_FULL:
1757                 link.link_speed = RTE_ETH_SPEED_NUM_100M;
1758                 lan_speed = 1;
1759                 break;
1760
1761         case NGBE_LINK_SPEED_1GB_FULL:
1762                 link.link_speed = RTE_ETH_SPEED_NUM_1G;
1763                 lan_speed = 2;
1764                 break;
1765         }
1766
1767         if (hw->is_pf) {
1768                 wr32m(hw, NGBE_LAN_SPEED, NGBE_LAN_SPEED_MASK, lan_speed);
1769                 if (link_speed & (NGBE_LINK_SPEED_1GB_FULL |
1770                                 NGBE_LINK_SPEED_100M_FULL |
1771                                 NGBE_LINK_SPEED_10M_FULL)) {
1772                         wr32m(hw, NGBE_MACTXCFG, NGBE_MACTXCFG_SPEED_MASK,
1773                                 NGBE_MACTXCFG_SPEED_1G | NGBE_MACTXCFG_TE);
1774                 }
1775         }
1776
1777         return rte_eth_linkstatus_set(dev, &link);
1778 }
1779
1780 static int
1781 ngbe_dev_link_update(struct rte_eth_dev *dev, int wait_to_complete)
1782 {
1783         return ngbe_dev_link_update_share(dev, wait_to_complete);
1784 }
1785
1786 static int
1787 ngbe_dev_promiscuous_enable(struct rte_eth_dev *dev)
1788 {
1789         struct ngbe_hw *hw = ngbe_dev_hw(dev);
1790         uint32_t fctrl;
1791
1792         fctrl = rd32(hw, NGBE_PSRCTL);
1793         fctrl |= (NGBE_PSRCTL_UCP | NGBE_PSRCTL_MCP);
1794         wr32(hw, NGBE_PSRCTL, fctrl);
1795
1796         return 0;
1797 }
1798
1799 static int
1800 ngbe_dev_promiscuous_disable(struct rte_eth_dev *dev)
1801 {
1802         struct ngbe_hw *hw = ngbe_dev_hw(dev);
1803         uint32_t fctrl;
1804
1805         fctrl = rd32(hw, NGBE_PSRCTL);
1806         fctrl &= (~NGBE_PSRCTL_UCP);
1807         if (dev->data->all_multicast == 1)
1808                 fctrl |= NGBE_PSRCTL_MCP;
1809         else
1810                 fctrl &= (~NGBE_PSRCTL_MCP);
1811         wr32(hw, NGBE_PSRCTL, fctrl);
1812
1813         return 0;
1814 }
1815
1816 static int
1817 ngbe_dev_allmulticast_enable(struct rte_eth_dev *dev)
1818 {
1819         struct ngbe_hw *hw = ngbe_dev_hw(dev);
1820         uint32_t fctrl;
1821
1822         fctrl = rd32(hw, NGBE_PSRCTL);
1823         fctrl |= NGBE_PSRCTL_MCP;
1824         wr32(hw, NGBE_PSRCTL, fctrl);
1825
1826         return 0;
1827 }
1828
1829 static int
1830 ngbe_dev_allmulticast_disable(struct rte_eth_dev *dev)
1831 {
1832         struct ngbe_hw *hw = ngbe_dev_hw(dev);
1833         uint32_t fctrl;
1834
1835         if (dev->data->promiscuous == 1)
1836                 return 0; /* must remain in all_multicast mode */
1837
1838         fctrl = rd32(hw, NGBE_PSRCTL);
1839         fctrl &= (~NGBE_PSRCTL_MCP);
1840         wr32(hw, NGBE_PSRCTL, fctrl);
1841
1842         return 0;
1843 }
1844
1845 /**
1846  * It clears the interrupt causes and enables the interrupt.
1847  * It will be called once only during NIC initialized.
1848  *
1849  * @param dev
1850  *  Pointer to struct rte_eth_dev.
1851  * @param on
1852  *  Enable or Disable.
1853  *
1854  * @return
1855  *  - On success, zero.
1856  *  - On failure, a negative value.
1857  */
1858 static int
1859 ngbe_dev_lsc_interrupt_setup(struct rte_eth_dev *dev, uint8_t on)
1860 {
1861         struct ngbe_interrupt *intr = ngbe_dev_intr(dev);
1862
1863         ngbe_dev_link_status_print(dev);
1864         if (on != 0) {
1865                 intr->mask_misc |= NGBE_ICRMISC_PHY;
1866                 intr->mask_misc |= NGBE_ICRMISC_GPIO;
1867         } else {
1868                 intr->mask_misc &= ~NGBE_ICRMISC_PHY;
1869                 intr->mask_misc &= ~NGBE_ICRMISC_GPIO;
1870         }
1871
1872         return 0;
1873 }
1874
1875 /**
1876  * It clears the interrupt causes and enables the interrupt.
1877  * It will be called once only during NIC initialized.
1878  *
1879  * @param dev
1880  *  Pointer to struct rte_eth_dev.
1881  *
1882  * @return
1883  *  - On success, zero.
1884  *  - On failure, a negative value.
1885  */
1886 static int
1887 ngbe_dev_misc_interrupt_setup(struct rte_eth_dev *dev)
1888 {
1889         struct ngbe_interrupt *intr = ngbe_dev_intr(dev);
1890         u64 mask;
1891
1892         mask = NGBE_ICR_MASK;
1893         mask &= (1ULL << NGBE_MISC_VEC_ID);
1894         intr->mask |= mask;
1895         intr->mask_misc |= NGBE_ICRMISC_GPIO;
1896
1897         return 0;
1898 }
1899
1900 /**
1901  * It clears the interrupt causes and enables the interrupt.
1902  * It will be called once only during NIC initialized.
1903  *
1904  * @param dev
1905  *  Pointer to struct rte_eth_dev.
1906  *
1907  * @return
1908  *  - On success, zero.
1909  *  - On failure, a negative value.
1910  */
1911 static int
1912 ngbe_dev_rxq_interrupt_setup(struct rte_eth_dev *dev)
1913 {
1914         struct ngbe_interrupt *intr = ngbe_dev_intr(dev);
1915         u64 mask;
1916
1917         mask = NGBE_ICR_MASK;
1918         mask &= ~((1ULL << NGBE_RX_VEC_START) - 1);
1919         intr->mask |= mask;
1920
1921         return 0;
1922 }
1923
1924 /**
1925  * It clears the interrupt causes and enables the interrupt.
1926  * It will be called once only during NIC initialized.
1927  *
1928  * @param dev
1929  *  Pointer to struct rte_eth_dev.
1930  *
1931  * @return
1932  *  - On success, zero.
1933  *  - On failure, a negative value.
1934  */
1935 static int
1936 ngbe_dev_macsec_interrupt_setup(struct rte_eth_dev *dev)
1937 {
1938         struct ngbe_interrupt *intr = ngbe_dev_intr(dev);
1939
1940         intr->mask_misc |= NGBE_ICRMISC_LNKSEC;
1941
1942         return 0;
1943 }
1944
1945 /*
1946  * It reads ICR and sets flag for the link_update.
1947  *
1948  * @param dev
1949  *  Pointer to struct rte_eth_dev.
1950  *
1951  * @return
1952  *  - On success, zero.
1953  *  - On failure, a negative value.
1954  */
1955 static int
1956 ngbe_dev_interrupt_get_status(struct rte_eth_dev *dev)
1957 {
1958         uint32_t eicr;
1959         struct ngbe_hw *hw = ngbe_dev_hw(dev);
1960         struct ngbe_interrupt *intr = ngbe_dev_intr(dev);
1961
1962         /* clear all cause mask */
1963         ngbe_disable_intr(hw);
1964
1965         /* read-on-clear nic registers here */
1966         eicr = ((u32 *)hw->isb_mem)[NGBE_ISB_MISC];
1967         PMD_DRV_LOG(DEBUG, "eicr %x", eicr);
1968
1969         intr->flags = 0;
1970
1971         /* set flag for async link update */
1972         if (eicr & NGBE_ICRMISC_PHY)
1973                 intr->flags |= NGBE_FLAG_NEED_LINK_UPDATE;
1974
1975         if (eicr & NGBE_ICRMISC_VFMBX)
1976                 intr->flags |= NGBE_FLAG_MAILBOX;
1977
1978         if (eicr & NGBE_ICRMISC_LNKSEC)
1979                 intr->flags |= NGBE_FLAG_MACSEC;
1980
1981         if (eicr & NGBE_ICRMISC_GPIO)
1982                 intr->flags |= NGBE_FLAG_NEED_LINK_UPDATE;
1983
1984         return 0;
1985 }
1986
1987 /**
1988  * It gets and then prints the link status.
1989  *
1990  * @param dev
1991  *  Pointer to struct rte_eth_dev.
1992  *
1993  * @return
1994  *  - On success, zero.
1995  *  - On failure, a negative value.
1996  */
1997 static void
1998 ngbe_dev_link_status_print(struct rte_eth_dev *dev)
1999 {
2000         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
2001         struct rte_eth_link link;
2002
2003         rte_eth_linkstatus_get(dev, &link);
2004
2005         if (link.link_status == RTE_ETH_LINK_UP) {
2006                 PMD_INIT_LOG(INFO, "Port %d: Link Up - speed %u Mbps - %s",
2007                                         (int)(dev->data->port_id),
2008                                         (unsigned int)link.link_speed,
2009                         link.link_duplex == RTE_ETH_LINK_FULL_DUPLEX ?
2010                                         "full-duplex" : "half-duplex");
2011         } else {
2012                 PMD_INIT_LOG(INFO, " Port %d: Link Down",
2013                                 (int)(dev->data->port_id));
2014         }
2015         PMD_INIT_LOG(DEBUG, "PCI Address: " PCI_PRI_FMT,
2016                                 pci_dev->addr.domain,
2017                                 pci_dev->addr.bus,
2018                                 pci_dev->addr.devid,
2019                                 pci_dev->addr.function);
2020 }
2021
2022 /*
2023  * It executes link_update after knowing an interrupt occurred.
2024  *
2025  * @param dev
2026  *  Pointer to struct rte_eth_dev.
2027  *
2028  * @return
2029  *  - On success, zero.
2030  *  - On failure, a negative value.
2031  */
2032 static int
2033 ngbe_dev_interrupt_action(struct rte_eth_dev *dev)
2034 {
2035         struct ngbe_interrupt *intr = ngbe_dev_intr(dev);
2036         int64_t timeout;
2037
2038         PMD_DRV_LOG(DEBUG, "intr action type %d", intr->flags);
2039
2040         if (intr->flags & NGBE_FLAG_NEED_LINK_UPDATE) {
2041                 struct rte_eth_link link;
2042
2043                 /*get the link status before link update, for predicting later*/
2044                 rte_eth_linkstatus_get(dev, &link);
2045
2046                 ngbe_dev_link_update(dev, 0);
2047
2048                 /* likely to up */
2049                 if (link.link_status != RTE_ETH_LINK_UP)
2050                         /* handle it 1 sec later, wait it being stable */
2051                         timeout = NGBE_LINK_UP_CHECK_TIMEOUT;
2052                 /* likely to down */
2053                 else
2054                         /* handle it 4 sec later, wait it being stable */
2055                         timeout = NGBE_LINK_DOWN_CHECK_TIMEOUT;
2056
2057                 ngbe_dev_link_status_print(dev);
2058                 if (rte_eal_alarm_set(timeout * 1000,
2059                                       ngbe_dev_interrupt_delayed_handler,
2060                                       (void *)dev) < 0) {
2061                         PMD_DRV_LOG(ERR, "Error setting alarm");
2062                 } else {
2063                         /* remember original mask */
2064                         intr->mask_misc_orig = intr->mask_misc;
2065                         /* only disable lsc interrupt */
2066                         intr->mask_misc &= ~NGBE_ICRMISC_PHY;
2067
2068                         intr->mask_orig = intr->mask;
2069                         /* only disable all misc interrupts */
2070                         intr->mask &= ~(1ULL << NGBE_MISC_VEC_ID);
2071                 }
2072         }
2073
2074         PMD_DRV_LOG(DEBUG, "enable intr immediately");
2075         ngbe_enable_intr(dev);
2076
2077         return 0;
2078 }
2079
2080 /**
2081  * Interrupt handler which shall be registered for alarm callback for delayed
2082  * handling specific interrupt to wait for the stable nic state. As the
2083  * NIC interrupt state is not stable for ngbe after link is just down,
2084  * it needs to wait 4 seconds to get the stable status.
2085  *
2086  * @param param
2087  *  The address of parameter (struct rte_eth_dev *) registered before.
2088  */
2089 static void
2090 ngbe_dev_interrupt_delayed_handler(void *param)
2091 {
2092         struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
2093         struct ngbe_interrupt *intr = ngbe_dev_intr(dev);
2094         struct ngbe_hw *hw = ngbe_dev_hw(dev);
2095         uint32_t eicr;
2096
2097         ngbe_disable_intr(hw);
2098
2099         eicr = ((u32 *)hw->isb_mem)[NGBE_ISB_MISC];
2100
2101         if (intr->flags & NGBE_FLAG_NEED_LINK_UPDATE) {
2102                 ngbe_dev_link_update(dev, 0);
2103                 intr->flags &= ~NGBE_FLAG_NEED_LINK_UPDATE;
2104                 ngbe_dev_link_status_print(dev);
2105                 rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC,
2106                                               NULL);
2107         }
2108
2109         if (intr->flags & NGBE_FLAG_MACSEC) {
2110                 rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_MACSEC,
2111                                               NULL);
2112                 intr->flags &= ~NGBE_FLAG_MACSEC;
2113         }
2114
2115         /* restore original mask */
2116         intr->mask_misc = intr->mask_misc_orig;
2117         intr->mask_misc_orig = 0;
2118         intr->mask = intr->mask_orig;
2119         intr->mask_orig = 0;
2120
2121         PMD_DRV_LOG(DEBUG, "enable intr in delayed handler S[%08x]", eicr);
2122         ngbe_enable_intr(dev);
2123 }
2124
2125 /**
2126  * Interrupt handler triggered by NIC  for handling
2127  * specific interrupt.
2128  *
2129  * @param param
2130  *  The address of parameter (struct rte_eth_dev *) registered before.
2131  */
2132 static void
2133 ngbe_dev_interrupt_handler(void *param)
2134 {
2135         struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
2136
2137         ngbe_dev_interrupt_get_status(dev);
2138         ngbe_dev_interrupt_action(dev);
2139 }
2140
2141 int
2142 ngbe_dev_rss_reta_update(struct rte_eth_dev *dev,
2143                           struct rte_eth_rss_reta_entry64 *reta_conf,
2144                           uint16_t reta_size)
2145 {
2146         uint8_t i, j, mask;
2147         uint32_t reta;
2148         uint16_t idx, shift;
2149         struct ngbe_adapter *adapter = ngbe_dev_adapter(dev);
2150         struct ngbe_hw *hw = ngbe_dev_hw(dev);
2151
2152         PMD_INIT_FUNC_TRACE();
2153
2154         if (!hw->is_pf) {
2155                 PMD_DRV_LOG(ERR, "RSS reta update is not supported on this "
2156                         "NIC.");
2157                 return -ENOTSUP;
2158         }
2159
2160         if (reta_size != RTE_ETH_RSS_RETA_SIZE_128) {
2161                 PMD_DRV_LOG(ERR, "The size of hash lookup table configured "
2162                         "(%d) doesn't match the number hardware can supported "
2163                         "(%d)", reta_size, RTE_ETH_RSS_RETA_SIZE_128);
2164                 return -EINVAL;
2165         }
2166
2167         for (i = 0; i < reta_size; i += 4) {
2168                 idx = i / RTE_ETH_RETA_GROUP_SIZE;
2169                 shift = i % RTE_ETH_RETA_GROUP_SIZE;
2170                 mask = (uint8_t)RS64(reta_conf[idx].mask, shift, 0xF);
2171                 if (!mask)
2172                         continue;
2173
2174                 reta = rd32a(hw, NGBE_REG_RSSTBL, i >> 2);
2175                 for (j = 0; j < 4; j++) {
2176                         if (RS8(mask, j, 0x1)) {
2177                                 reta  &= ~(MS32(8 * j, 0xFF));
2178                                 reta |= LS32(reta_conf[idx].reta[shift + j],
2179                                                 8 * j, 0xFF);
2180                         }
2181                 }
2182                 wr32a(hw, NGBE_REG_RSSTBL, i >> 2, reta);
2183         }
2184         adapter->rss_reta_updated = 1;
2185
2186         return 0;
2187 }
2188
2189 int
2190 ngbe_dev_rss_reta_query(struct rte_eth_dev *dev,
2191                          struct rte_eth_rss_reta_entry64 *reta_conf,
2192                          uint16_t reta_size)
2193 {
2194         struct ngbe_hw *hw = ngbe_dev_hw(dev);
2195         uint8_t i, j, mask;
2196         uint32_t reta;
2197         uint16_t idx, shift;
2198
2199         PMD_INIT_FUNC_TRACE();
2200
2201         if (reta_size != RTE_ETH_RSS_RETA_SIZE_128) {
2202                 PMD_DRV_LOG(ERR, "The size of hash lookup table configured "
2203                         "(%d) doesn't match the number hardware can supported "
2204                         "(%d)", reta_size, RTE_ETH_RSS_RETA_SIZE_128);
2205                 return -EINVAL;
2206         }
2207
2208         for (i = 0; i < reta_size; i += 4) {
2209                 idx = i / RTE_ETH_RETA_GROUP_SIZE;
2210                 shift = i % RTE_ETH_RETA_GROUP_SIZE;
2211                 mask = (uint8_t)RS64(reta_conf[idx].mask, shift, 0xF);
2212                 if (!mask)
2213                         continue;
2214
2215                 reta = rd32a(hw, NGBE_REG_RSSTBL, i >> 2);
2216                 for (j = 0; j < 4; j++) {
2217                         if (RS8(mask, j, 0x1))
2218                                 reta_conf[idx].reta[shift + j] =
2219                                         (uint16_t)RS32(reta, 8 * j, 0xFF);
2220                 }
2221         }
2222
2223         return 0;
2224 }
2225
2226 static int
2227 ngbe_add_rar(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr,
2228                                 uint32_t index, uint32_t pool)
2229 {
2230         struct ngbe_hw *hw = ngbe_dev_hw(dev);
2231         uint32_t enable_addr = 1;
2232
2233         return ngbe_set_rar(hw, index, mac_addr->addr_bytes,
2234                              pool, enable_addr);
2235 }
2236
2237 static void
2238 ngbe_remove_rar(struct rte_eth_dev *dev, uint32_t index)
2239 {
2240         struct ngbe_hw *hw = ngbe_dev_hw(dev);
2241
2242         ngbe_clear_rar(hw, index);
2243 }
2244
2245 static int
2246 ngbe_set_default_mac_addr(struct rte_eth_dev *dev, struct rte_ether_addr *addr)
2247 {
2248         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
2249
2250         ngbe_remove_rar(dev, 0);
2251         ngbe_add_rar(dev, addr, 0, pci_dev->max_vfs);
2252
2253         return 0;
2254 }
2255
2256 static int
2257 ngbe_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
2258 {
2259         struct ngbe_hw *hw = ngbe_dev_hw(dev);
2260         uint32_t frame_size = mtu + RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN + 4;
2261         struct rte_eth_dev_data *dev_data = dev->data;
2262
2263         /* If device is started, refuse mtu that requires the support of
2264          * scattered packets when this feature has not been enabled before.
2265          */
2266         if (dev_data->dev_started && !dev_data->scattered_rx &&
2267             (frame_size + 2 * NGBE_VLAN_TAG_SIZE >
2268              dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM)) {
2269                 PMD_INIT_LOG(ERR, "Stop port first.");
2270                 return -EINVAL;
2271         }
2272
2273         if (hw->mode)
2274                 wr32m(hw, NGBE_FRMSZ, NGBE_FRMSZ_MAX_MASK,
2275                         NGBE_FRAME_SIZE_MAX);
2276         else
2277                 wr32m(hw, NGBE_FRMSZ, NGBE_FRMSZ_MAX_MASK,
2278                         NGBE_FRMSZ_MAX(frame_size));
2279
2280         return 0;
2281 }
2282
2283 static uint32_t
2284 ngbe_uta_vector(struct ngbe_hw *hw, struct rte_ether_addr *uc_addr)
2285 {
2286         uint32_t vector = 0;
2287
2288         switch (hw->mac.mc_filter_type) {
2289         case 0:   /* use bits [47:36] of the address */
2290                 vector = ((uc_addr->addr_bytes[4] >> 4) |
2291                         (((uint16_t)uc_addr->addr_bytes[5]) << 4));
2292                 break;
2293         case 1:   /* use bits [46:35] of the address */
2294                 vector = ((uc_addr->addr_bytes[4] >> 3) |
2295                         (((uint16_t)uc_addr->addr_bytes[5]) << 5));
2296                 break;
2297         case 2:   /* use bits [45:34] of the address */
2298                 vector = ((uc_addr->addr_bytes[4] >> 2) |
2299                         (((uint16_t)uc_addr->addr_bytes[5]) << 6));
2300                 break;
2301         case 3:   /* use bits [43:32] of the address */
2302                 vector = ((uc_addr->addr_bytes[4]) |
2303                         (((uint16_t)uc_addr->addr_bytes[5]) << 8));
2304                 break;
2305         default:  /* Invalid mc_filter_type */
2306                 break;
2307         }
2308
2309         /* vector can only be 12-bits or boundary will be exceeded */
2310         vector &= 0xFFF;
2311         return vector;
2312 }
2313
2314 static int
2315 ngbe_uc_hash_table_set(struct rte_eth_dev *dev,
2316                         struct rte_ether_addr *mac_addr, uint8_t on)
2317 {
2318         uint32_t vector;
2319         uint32_t uta_idx;
2320         uint32_t reg_val;
2321         uint32_t uta_mask;
2322         uint32_t psrctl;
2323
2324         struct ngbe_hw *hw = ngbe_dev_hw(dev);
2325         struct ngbe_uta_info *uta_info = NGBE_DEV_UTA_INFO(dev);
2326
2327         vector = ngbe_uta_vector(hw, mac_addr);
2328         uta_idx = (vector >> 5) & 0x7F;
2329         uta_mask = 0x1UL << (vector & 0x1F);
2330
2331         if (!!on == !!(uta_info->uta_shadow[uta_idx] & uta_mask))
2332                 return 0;
2333
2334         reg_val = rd32(hw, NGBE_UCADDRTBL(uta_idx));
2335         if (on) {
2336                 uta_info->uta_in_use++;
2337                 reg_val |= uta_mask;
2338                 uta_info->uta_shadow[uta_idx] |= uta_mask;
2339         } else {
2340                 uta_info->uta_in_use--;
2341                 reg_val &= ~uta_mask;
2342                 uta_info->uta_shadow[uta_idx] &= ~uta_mask;
2343         }
2344
2345         wr32(hw, NGBE_UCADDRTBL(uta_idx), reg_val);
2346
2347         psrctl = rd32(hw, NGBE_PSRCTL);
2348         if (uta_info->uta_in_use > 0)
2349                 psrctl |= NGBE_PSRCTL_UCHFENA;
2350         else
2351                 psrctl &= ~NGBE_PSRCTL_UCHFENA;
2352
2353         psrctl &= ~NGBE_PSRCTL_ADHF12_MASK;
2354         psrctl |= NGBE_PSRCTL_ADHF12(hw->mac.mc_filter_type);
2355         wr32(hw, NGBE_PSRCTL, psrctl);
2356
2357         return 0;
2358 }
2359
2360 static int
2361 ngbe_uc_all_hash_table_set(struct rte_eth_dev *dev, uint8_t on)
2362 {
2363         struct ngbe_hw *hw = ngbe_dev_hw(dev);
2364         struct ngbe_uta_info *uta_info = NGBE_DEV_UTA_INFO(dev);
2365         uint32_t psrctl;
2366         int i;
2367
2368         if (on) {
2369                 for (i = 0; i < RTE_ETH_VMDQ_NUM_UC_HASH_ARRAY; i++) {
2370                         uta_info->uta_shadow[i] = ~0;
2371                         wr32(hw, NGBE_UCADDRTBL(i), ~0);
2372                 }
2373         } else {
2374                 for (i = 0; i < RTE_ETH_VMDQ_NUM_UC_HASH_ARRAY; i++) {
2375                         uta_info->uta_shadow[i] = 0;
2376                         wr32(hw, NGBE_UCADDRTBL(i), 0);
2377                 }
2378         }
2379
2380         psrctl = rd32(hw, NGBE_PSRCTL);
2381         if (on)
2382                 psrctl |= NGBE_PSRCTL_UCHFENA;
2383         else
2384                 psrctl &= ~NGBE_PSRCTL_UCHFENA;
2385
2386         psrctl &= ~NGBE_PSRCTL_ADHF12_MASK;
2387         psrctl |= NGBE_PSRCTL_ADHF12(hw->mac.mc_filter_type);
2388         wr32(hw, NGBE_PSRCTL, psrctl);
2389
2390         return 0;
2391 }
2392
2393 /**
2394  * Set the IVAR registers, mapping interrupt causes to vectors
2395  * @param hw
2396  *  pointer to ngbe_hw struct
2397  * @direction
2398  *  0 for Rx, 1 for Tx, -1 for other causes
2399  * @queue
2400  *  queue to map the corresponding interrupt to
2401  * @msix_vector
2402  *  the vector to map to the corresponding queue
2403  */
2404 void
2405 ngbe_set_ivar_map(struct ngbe_hw *hw, int8_t direction,
2406                    uint8_t queue, uint8_t msix_vector)
2407 {
2408         uint32_t tmp, idx;
2409
2410         if (direction == -1) {
2411                 /* other causes */
2412                 msix_vector |= NGBE_IVARMISC_VLD;
2413                 idx = 0;
2414                 tmp = rd32(hw, NGBE_IVARMISC);
2415                 tmp &= ~(0xFF << idx);
2416                 tmp |= (msix_vector << idx);
2417                 wr32(hw, NGBE_IVARMISC, tmp);
2418         } else {
2419                 /* rx or tx causes */
2420                 /* Workround for ICR lost */
2421                 idx = ((16 * (queue & 1)) + (8 * direction));
2422                 tmp = rd32(hw, NGBE_IVAR(queue >> 1));
2423                 tmp &= ~(0xFF << idx);
2424                 tmp |= (msix_vector << idx);
2425                 wr32(hw, NGBE_IVAR(queue >> 1), tmp);
2426         }
2427 }
2428
2429 /**
2430  * Sets up the hardware to properly generate MSI-X interrupts
2431  * @hw
2432  *  board private structure
2433  */
2434 static void
2435 ngbe_configure_msix(struct rte_eth_dev *dev)
2436 {
2437         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
2438         struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
2439         struct ngbe_hw *hw = ngbe_dev_hw(dev);
2440         uint32_t queue_id, base = NGBE_MISC_VEC_ID;
2441         uint32_t vec = NGBE_MISC_VEC_ID;
2442         uint32_t gpie;
2443
2444         /*
2445          * Won't configure MSI-X register if no mapping is done
2446          * between intr vector and event fd
2447          * but if MSI-X has been enabled already, need to configure
2448          * auto clean, auto mask and throttling.
2449          */
2450         gpie = rd32(hw, NGBE_GPIE);
2451         if (!rte_intr_dp_is_en(intr_handle) &&
2452             !(gpie & NGBE_GPIE_MSIX))
2453                 return;
2454
2455         if (rte_intr_allow_others(intr_handle)) {
2456                 base = NGBE_RX_VEC_START;
2457                 vec = base;
2458         }
2459
2460         /* setup GPIE for MSI-X mode */
2461         gpie = rd32(hw, NGBE_GPIE);
2462         gpie |= NGBE_GPIE_MSIX;
2463         wr32(hw, NGBE_GPIE, gpie);
2464
2465         /* Populate the IVAR table and set the ITR values to the
2466          * corresponding register.
2467          */
2468         if (rte_intr_dp_is_en(intr_handle)) {
2469                 for (queue_id = 0; queue_id < dev->data->nb_rx_queues;
2470                         queue_id++) {
2471                         /* by default, 1:1 mapping */
2472                         ngbe_set_ivar_map(hw, 0, queue_id, vec);
2473                         rte_intr_vec_list_index_set(intr_handle,
2474                                                            queue_id, vec);
2475                         if (vec < base + rte_intr_nb_efd_get(intr_handle)
2476                             - 1)
2477                                 vec++;
2478                 }
2479
2480                 ngbe_set_ivar_map(hw, -1, 1, NGBE_MISC_VEC_ID);
2481         }
2482         wr32(hw, NGBE_ITR(NGBE_MISC_VEC_ID),
2483                         NGBE_ITR_IVAL_1G(NGBE_QUEUE_ITR_INTERVAL_DEFAULT)
2484                         | NGBE_ITR_WRDSA);
2485 }
2486
2487 static u8 *
2488 ngbe_dev_addr_list_itr(__rte_unused struct ngbe_hw *hw,
2489                         u8 **mc_addr_ptr, u32 *vmdq)
2490 {
2491         u8 *mc_addr;
2492
2493         *vmdq = 0;
2494         mc_addr = *mc_addr_ptr;
2495         *mc_addr_ptr = (mc_addr + sizeof(struct rte_ether_addr));
2496         return mc_addr;
2497 }
2498
2499 int
2500 ngbe_dev_set_mc_addr_list(struct rte_eth_dev *dev,
2501                           struct rte_ether_addr *mc_addr_set,
2502                           uint32_t nb_mc_addr)
2503 {
2504         struct ngbe_hw *hw = ngbe_dev_hw(dev);
2505         u8 *mc_addr_list;
2506
2507         mc_addr_list = (u8 *)mc_addr_set;
2508         return hw->mac.update_mc_addr_list(hw, mc_addr_list, nb_mc_addr,
2509                                          ngbe_dev_addr_list_itr, TRUE);
2510 }
2511
2512 static const struct eth_dev_ops ngbe_eth_dev_ops = {
2513         .dev_configure              = ngbe_dev_configure,
2514         .dev_infos_get              = ngbe_dev_info_get,
2515         .dev_start                  = ngbe_dev_start,
2516         .dev_stop                   = ngbe_dev_stop,
2517         .dev_close                  = ngbe_dev_close,
2518         .dev_reset                  = ngbe_dev_reset,
2519         .promiscuous_enable         = ngbe_dev_promiscuous_enable,
2520         .promiscuous_disable        = ngbe_dev_promiscuous_disable,
2521         .allmulticast_enable        = ngbe_dev_allmulticast_enable,
2522         .allmulticast_disable       = ngbe_dev_allmulticast_disable,
2523         .link_update                = ngbe_dev_link_update,
2524         .stats_get                  = ngbe_dev_stats_get,
2525         .xstats_get                 = ngbe_dev_xstats_get,
2526         .xstats_get_by_id           = ngbe_dev_xstats_get_by_id,
2527         .stats_reset                = ngbe_dev_stats_reset,
2528         .xstats_reset               = ngbe_dev_xstats_reset,
2529         .xstats_get_names           = ngbe_dev_xstats_get_names,
2530         .xstats_get_names_by_id     = ngbe_dev_xstats_get_names_by_id,
2531         .fw_version_get             = ngbe_fw_version_get,
2532         .dev_supported_ptypes_get   = ngbe_dev_supported_ptypes_get,
2533         .mtu_set                    = ngbe_dev_mtu_set,
2534         .vlan_filter_set            = ngbe_vlan_filter_set,
2535         .vlan_tpid_set              = ngbe_vlan_tpid_set,
2536         .vlan_offload_set           = ngbe_vlan_offload_set,
2537         .vlan_strip_queue_set       = ngbe_vlan_strip_queue_set,
2538         .rx_queue_start             = ngbe_dev_rx_queue_start,
2539         .rx_queue_stop              = ngbe_dev_rx_queue_stop,
2540         .tx_queue_start             = ngbe_dev_tx_queue_start,
2541         .tx_queue_stop              = ngbe_dev_tx_queue_stop,
2542         .rx_queue_setup             = ngbe_dev_rx_queue_setup,
2543         .rx_queue_release           = ngbe_dev_rx_queue_release,
2544         .tx_queue_setup             = ngbe_dev_tx_queue_setup,
2545         .tx_queue_release           = ngbe_dev_tx_queue_release,
2546         .mac_addr_add               = ngbe_add_rar,
2547         .mac_addr_remove            = ngbe_remove_rar,
2548         .mac_addr_set               = ngbe_set_default_mac_addr,
2549         .uc_hash_table_set          = ngbe_uc_hash_table_set,
2550         .uc_all_hash_table_set      = ngbe_uc_all_hash_table_set,
2551         .reta_update                = ngbe_dev_rss_reta_update,
2552         .reta_query                 = ngbe_dev_rss_reta_query,
2553         .rss_hash_update            = ngbe_dev_rss_hash_update,
2554         .rss_hash_conf_get          = ngbe_dev_rss_hash_conf_get,
2555         .set_mc_addr_list           = ngbe_dev_set_mc_addr_list,
2556         .rx_burst_mode_get          = ngbe_rx_burst_mode_get,
2557         .tx_burst_mode_get          = ngbe_tx_burst_mode_get,
2558 };
2559
2560 RTE_PMD_REGISTER_PCI(net_ngbe, rte_ngbe_pmd);
2561 RTE_PMD_REGISTER_PCI_TABLE(net_ngbe, pci_id_ngbe_map);
2562 RTE_PMD_REGISTER_KMOD_DEP(net_ngbe, "* igb_uio | uio_pci_generic | vfio-pci");
2563
2564 RTE_LOG_REGISTER_SUFFIX(ngbe_logtype_init, init, NOTICE);
2565 RTE_LOG_REGISTER_SUFFIX(ngbe_logtype_driver, driver, NOTICE);
2566
2567 #ifdef RTE_ETHDEV_DEBUG_RX
2568         RTE_LOG_REGISTER_SUFFIX(ngbe_logtype_rx, rx, DEBUG);
2569 #endif
2570 #ifdef RTE_ETHDEV_DEBUG_TX
2571         RTE_LOG_REGISTER_SUFFIX(ngbe_logtype_tx, tx, DEBUG);
2572 #endif