59361325101c7689eae46426bf1be383f7af7fea
[dpdk.git] / drivers / net / ngbe / ngbe_ethdev.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2018-2021 Beijing WangXun Technology Co., Ltd.
3  * Copyright(c) 2010-2017 Intel Corporation
4  */
5
6 #include <errno.h>
7 #include <rte_common.h>
8 #include <ethdev_pci.h>
9
10 #include <rte_alarm.h>
11
12 #include "ngbe_logs.h"
13 #include "ngbe.h"
14 #include "ngbe_ethdev.h"
15 #include "ngbe_rxtx.h"
16
17 static int ngbe_dev_close(struct rte_eth_dev *dev);
18 static int ngbe_dev_link_update(struct rte_eth_dev *dev,
19                                 int wait_to_complete);
20 static int ngbe_dev_stats_reset(struct rte_eth_dev *dev);
21 static void ngbe_vlan_hw_strip_enable(struct rte_eth_dev *dev, uint16_t queue);
22 static void ngbe_vlan_hw_strip_disable(struct rte_eth_dev *dev,
23                                         uint16_t queue);
24
25 static void ngbe_dev_link_status_print(struct rte_eth_dev *dev);
26 static int ngbe_dev_lsc_interrupt_setup(struct rte_eth_dev *dev, uint8_t on);
27 static int ngbe_dev_macsec_interrupt_setup(struct rte_eth_dev *dev);
28 static int ngbe_dev_misc_interrupt_setup(struct rte_eth_dev *dev);
29 static int ngbe_dev_rxq_interrupt_setup(struct rte_eth_dev *dev);
30 static void ngbe_dev_interrupt_handler(void *param);
31 static void ngbe_dev_interrupt_delayed_handler(void *param);
32 static void ngbe_configure_msix(struct rte_eth_dev *dev);
33
34 #define NGBE_SET_HWSTRIP(h, q) do {\
35                 uint32_t idx = (q) / (sizeof((h)->bitmap[0]) * NBBY); \
36                 uint32_t bit = (q) % (sizeof((h)->bitmap[0]) * NBBY); \
37                 (h)->bitmap[idx] |= 1 << bit;\
38         } while (0)
39
40 #define NGBE_CLEAR_HWSTRIP(h, q) do {\
41                 uint32_t idx = (q) / (sizeof((h)->bitmap[0]) * NBBY); \
42                 uint32_t bit = (q) % (sizeof((h)->bitmap[0]) * NBBY); \
43                 (h)->bitmap[idx] &= ~(1 << bit);\
44         } while (0)
45
46 #define NGBE_GET_HWSTRIP(h, q, r) do {\
47                 uint32_t idx = (q) / (sizeof((h)->bitmap[0]) * NBBY); \
48                 uint32_t bit = (q) % (sizeof((h)->bitmap[0]) * NBBY); \
49                 (r) = (h)->bitmap[idx] >> bit & 1;\
50         } while (0)
51
52 /*
53  * The set of PCI devices this driver supports
54  */
55 static const struct rte_pci_id pci_id_ngbe_map[] = {
56         { RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860A2) },
57         { RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860A2S) },
58         { RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860A4) },
59         { RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860A4S) },
60         { RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860AL2) },
61         { RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860AL2S) },
62         { RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860AL4) },
63         { RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860AL4S) },
64         { RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860NCSI) },
65         { RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860A1) },
66         { RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860A1L) },
67         { RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860AL_W) },
68         { .vendor_id = 0, /* sentinel */ },
69 };
70
71 static const struct rte_eth_desc_lim rx_desc_lim = {
72         .nb_max = NGBE_RING_DESC_MAX,
73         .nb_min = NGBE_RING_DESC_MIN,
74         .nb_align = NGBE_RXD_ALIGN,
75 };
76
77 static const struct rte_eth_desc_lim tx_desc_lim = {
78         .nb_max = NGBE_RING_DESC_MAX,
79         .nb_min = NGBE_RING_DESC_MIN,
80         .nb_align = NGBE_TXD_ALIGN,
81         .nb_seg_max = NGBE_TX_MAX_SEG,
82         .nb_mtu_seg_max = NGBE_TX_MAX_SEG,
83 };
84
85 static const struct eth_dev_ops ngbe_eth_dev_ops;
86
87 #define HW_XSTAT(m) {#m, offsetof(struct ngbe_hw_stats, m)}
88 #define HW_XSTAT_NAME(m, n) {n, offsetof(struct ngbe_hw_stats, m)}
89 static const struct rte_ngbe_xstats_name_off rte_ngbe_stats_strings[] = {
90         /* MNG RxTx */
91         HW_XSTAT(mng_bmc2host_packets),
92         HW_XSTAT(mng_host2bmc_packets),
93         /* Basic RxTx */
94         HW_XSTAT(rx_packets),
95         HW_XSTAT(tx_packets),
96         HW_XSTAT(rx_bytes),
97         HW_XSTAT(tx_bytes),
98         HW_XSTAT(rx_total_bytes),
99         HW_XSTAT(rx_total_packets),
100         HW_XSTAT(tx_total_packets),
101         HW_XSTAT(rx_total_missed_packets),
102         HW_XSTAT(rx_broadcast_packets),
103         HW_XSTAT(rx_multicast_packets),
104         HW_XSTAT(rx_management_packets),
105         HW_XSTAT(tx_management_packets),
106         HW_XSTAT(rx_management_dropped),
107
108         /* Basic Error */
109         HW_XSTAT(rx_crc_errors),
110         HW_XSTAT(rx_illegal_byte_errors),
111         HW_XSTAT(rx_error_bytes),
112         HW_XSTAT(rx_mac_short_packet_dropped),
113         HW_XSTAT(rx_length_errors),
114         HW_XSTAT(rx_undersize_errors),
115         HW_XSTAT(rx_fragment_errors),
116         HW_XSTAT(rx_oversize_errors),
117         HW_XSTAT(rx_jabber_errors),
118         HW_XSTAT(rx_l3_l4_xsum_error),
119         HW_XSTAT(mac_local_errors),
120         HW_XSTAT(mac_remote_errors),
121
122         /* MACSEC */
123         HW_XSTAT(tx_macsec_pkts_untagged),
124         HW_XSTAT(tx_macsec_pkts_encrypted),
125         HW_XSTAT(tx_macsec_pkts_protected),
126         HW_XSTAT(tx_macsec_octets_encrypted),
127         HW_XSTAT(tx_macsec_octets_protected),
128         HW_XSTAT(rx_macsec_pkts_untagged),
129         HW_XSTAT(rx_macsec_pkts_badtag),
130         HW_XSTAT(rx_macsec_pkts_nosci),
131         HW_XSTAT(rx_macsec_pkts_unknownsci),
132         HW_XSTAT(rx_macsec_octets_decrypted),
133         HW_XSTAT(rx_macsec_octets_validated),
134         HW_XSTAT(rx_macsec_sc_pkts_unchecked),
135         HW_XSTAT(rx_macsec_sc_pkts_delayed),
136         HW_XSTAT(rx_macsec_sc_pkts_late),
137         HW_XSTAT(rx_macsec_sa_pkts_ok),
138         HW_XSTAT(rx_macsec_sa_pkts_invalid),
139         HW_XSTAT(rx_macsec_sa_pkts_notvalid),
140         HW_XSTAT(rx_macsec_sa_pkts_unusedsa),
141         HW_XSTAT(rx_macsec_sa_pkts_notusingsa),
142
143         /* MAC RxTx */
144         HW_XSTAT(rx_size_64_packets),
145         HW_XSTAT(rx_size_65_to_127_packets),
146         HW_XSTAT(rx_size_128_to_255_packets),
147         HW_XSTAT(rx_size_256_to_511_packets),
148         HW_XSTAT(rx_size_512_to_1023_packets),
149         HW_XSTAT(rx_size_1024_to_max_packets),
150         HW_XSTAT(tx_size_64_packets),
151         HW_XSTAT(tx_size_65_to_127_packets),
152         HW_XSTAT(tx_size_128_to_255_packets),
153         HW_XSTAT(tx_size_256_to_511_packets),
154         HW_XSTAT(tx_size_512_to_1023_packets),
155         HW_XSTAT(tx_size_1024_to_max_packets),
156
157         /* Flow Control */
158         HW_XSTAT(tx_xon_packets),
159         HW_XSTAT(rx_xon_packets),
160         HW_XSTAT(tx_xoff_packets),
161         HW_XSTAT(rx_xoff_packets),
162
163         HW_XSTAT_NAME(tx_xon_packets, "tx_flow_control_xon_packets"),
164         HW_XSTAT_NAME(rx_xon_packets, "rx_flow_control_xon_packets"),
165         HW_XSTAT_NAME(tx_xoff_packets, "tx_flow_control_xoff_packets"),
166         HW_XSTAT_NAME(rx_xoff_packets, "rx_flow_control_xoff_packets"),
167 };
168
169 #define NGBE_NB_HW_STATS (sizeof(rte_ngbe_stats_strings) / \
170                            sizeof(rte_ngbe_stats_strings[0]))
171
172 /* Per-queue statistics */
173 #define QP_XSTAT(m) {#m, offsetof(struct ngbe_hw_stats, qp[0].m)}
174 static const struct rte_ngbe_xstats_name_off rte_ngbe_qp_strings[] = {
175         QP_XSTAT(rx_qp_packets),
176         QP_XSTAT(tx_qp_packets),
177         QP_XSTAT(rx_qp_bytes),
178         QP_XSTAT(tx_qp_bytes),
179         QP_XSTAT(rx_qp_mc_packets),
180 };
181
182 #define NGBE_NB_QP_STATS (sizeof(rte_ngbe_qp_strings) / \
183                            sizeof(rte_ngbe_qp_strings[0]))
184
185 static inline int32_t
186 ngbe_pf_reset_hw(struct ngbe_hw *hw)
187 {
188         uint32_t ctrl_ext;
189         int32_t status;
190
191         status = hw->mac.reset_hw(hw);
192
193         ctrl_ext = rd32(hw, NGBE_PORTCTL);
194         /* Set PF Reset Done bit so PF/VF Mail Ops can work */
195         ctrl_ext |= NGBE_PORTCTL_RSTDONE;
196         wr32(hw, NGBE_PORTCTL, ctrl_ext);
197         ngbe_flush(hw);
198
199         if (status == NGBE_ERR_SFP_NOT_PRESENT)
200                 status = 0;
201         return status;
202 }
203
204 static inline void
205 ngbe_enable_intr(struct rte_eth_dev *dev)
206 {
207         struct ngbe_interrupt *intr = ngbe_dev_intr(dev);
208         struct ngbe_hw *hw = ngbe_dev_hw(dev);
209
210         wr32(hw, NGBE_IENMISC, intr->mask_misc);
211         wr32(hw, NGBE_IMC(0), intr->mask & BIT_MASK32);
212         ngbe_flush(hw);
213 }
214
215 static void
216 ngbe_disable_intr(struct ngbe_hw *hw)
217 {
218         PMD_INIT_FUNC_TRACE();
219
220         wr32(hw, NGBE_IMS(0), NGBE_IMS_MASK);
221         ngbe_flush(hw);
222 }
223
224 /*
225  * Ensure that all locks are released before first NVM or PHY access
226  */
227 static void
228 ngbe_swfw_lock_reset(struct ngbe_hw *hw)
229 {
230         uint16_t mask;
231
232         /*
233          * These ones are more tricky since they are common to all ports; but
234          * swfw_sync retries last long enough (1s) to be almost sure that if
235          * lock can not be taken it is due to an improper lock of the
236          * semaphore.
237          */
238         mask = NGBE_MNGSEM_SWPHY |
239                NGBE_MNGSEM_SWMBX |
240                NGBE_MNGSEM_SWFLASH;
241         if (hw->mac.acquire_swfw_sync(hw, mask) < 0)
242                 PMD_DRV_LOG(DEBUG, "SWFW common locks released");
243
244         hw->mac.release_swfw_sync(hw, mask);
245 }
246
247 static int
248 eth_ngbe_dev_init(struct rte_eth_dev *eth_dev, void *init_params __rte_unused)
249 {
250         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
251         struct ngbe_hw *hw = ngbe_dev_hw(eth_dev);
252         struct ngbe_vfta *shadow_vfta = NGBE_DEV_VFTA(eth_dev);
253         struct ngbe_hwstrip *hwstrip = NGBE_DEV_HWSTRIP(eth_dev);
254         struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
255         const struct rte_memzone *mz;
256         uint32_t ctrl_ext;
257         int err;
258
259         PMD_INIT_FUNC_TRACE();
260
261         eth_dev->dev_ops = &ngbe_eth_dev_ops;
262         eth_dev->rx_pkt_burst = &ngbe_recv_pkts;
263         eth_dev->tx_pkt_burst = &ngbe_xmit_pkts;
264         eth_dev->tx_pkt_prepare = &ngbe_prep_pkts;
265
266         /*
267          * For secondary processes, we don't initialise any further as primary
268          * has already done this work. Only check we don't need a different
269          * Rx and Tx function.
270          */
271         if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
272                 struct ngbe_tx_queue *txq;
273                 /* Tx queue function in primary, set by last queue initialized
274                  * Tx queue may not initialized by primary process
275                  */
276                 if (eth_dev->data->tx_queues) {
277                         uint16_t nb_tx_queues = eth_dev->data->nb_tx_queues;
278                         txq = eth_dev->data->tx_queues[nb_tx_queues - 1];
279                         ngbe_set_tx_function(eth_dev, txq);
280                 } else {
281                         /* Use default Tx function if we get here */
282                         PMD_INIT_LOG(NOTICE,
283                                 "No Tx queues configured yet. Using default Tx function.");
284                 }
285
286                 ngbe_set_rx_function(eth_dev);
287
288                 return 0;
289         }
290
291         rte_eth_copy_pci_info(eth_dev, pci_dev);
292         eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
293
294         /* Vendor and Device ID need to be set before init of shared code */
295         hw->device_id = pci_dev->id.device_id;
296         hw->vendor_id = pci_dev->id.vendor_id;
297         hw->sub_system_id = pci_dev->id.subsystem_device_id;
298         ngbe_map_device_id(hw);
299         hw->hw_addr = (void *)pci_dev->mem_resource[0].addr;
300
301         /* Reserve memory for interrupt status block */
302         mz = rte_eth_dma_zone_reserve(eth_dev, "ngbe_driver", -1,
303                 NGBE_ISB_SIZE, NGBE_ALIGN, SOCKET_ID_ANY);
304         if (mz == NULL)
305                 return -ENOMEM;
306
307         hw->isb_dma = TMZ_PADDR(mz);
308         hw->isb_mem = TMZ_VADDR(mz);
309
310         /* Initialize the shared code (base driver) */
311         err = ngbe_init_shared_code(hw);
312         if (err != 0) {
313                 PMD_INIT_LOG(ERR, "Shared code init failed: %d", err);
314                 return -EIO;
315         }
316
317         /* Unlock any pending hardware semaphore */
318         ngbe_swfw_lock_reset(hw);
319
320         err = hw->rom.init_params(hw);
321         if (err != 0) {
322                 PMD_INIT_LOG(ERR, "The EEPROM init failed: %d", err);
323                 return -EIO;
324         }
325
326         /* Make sure we have a good EEPROM before we read from it */
327         err = hw->rom.validate_checksum(hw, NULL);
328         if (err != 0) {
329                 PMD_INIT_LOG(ERR, "The EEPROM checksum is not valid: %d", err);
330                 return -EIO;
331         }
332
333         err = hw->mac.init_hw(hw);
334         if (err != 0) {
335                 PMD_INIT_LOG(ERR, "Hardware Initialization Failure: %d", err);
336                 return -EIO;
337         }
338
339         /* Reset the hw statistics */
340         ngbe_dev_stats_reset(eth_dev);
341
342         /* disable interrupt */
343         ngbe_disable_intr(hw);
344
345         /* Allocate memory for storing MAC addresses */
346         eth_dev->data->mac_addrs = rte_zmalloc("ngbe", RTE_ETHER_ADDR_LEN *
347                                                hw->mac.num_rar_entries, 0);
348         if (eth_dev->data->mac_addrs == NULL) {
349                 PMD_INIT_LOG(ERR,
350                              "Failed to allocate %u bytes needed to store MAC addresses",
351                              RTE_ETHER_ADDR_LEN * hw->mac.num_rar_entries);
352                 return -ENOMEM;
353         }
354
355         /* Copy the permanent MAC address */
356         rte_ether_addr_copy((struct rte_ether_addr *)hw->mac.perm_addr,
357                         &eth_dev->data->mac_addrs[0]);
358
359         /* Allocate memory for storing hash filter MAC addresses */
360         eth_dev->data->hash_mac_addrs = rte_zmalloc("ngbe",
361                         RTE_ETHER_ADDR_LEN * NGBE_VMDQ_NUM_UC_MAC, 0);
362         if (eth_dev->data->hash_mac_addrs == NULL) {
363                 PMD_INIT_LOG(ERR,
364                              "Failed to allocate %d bytes needed to store MAC addresses",
365                              RTE_ETHER_ADDR_LEN * NGBE_VMDQ_NUM_UC_MAC);
366                 rte_free(eth_dev->data->mac_addrs);
367                 eth_dev->data->mac_addrs = NULL;
368                 return -ENOMEM;
369         }
370
371         /* initialize the vfta */
372         memset(shadow_vfta, 0, sizeof(*shadow_vfta));
373
374         /* initialize the hw strip bitmap*/
375         memset(hwstrip, 0, sizeof(*hwstrip));
376
377         ctrl_ext = rd32(hw, NGBE_PORTCTL);
378         /* let hardware know driver is loaded */
379         ctrl_ext |= NGBE_PORTCTL_DRVLOAD;
380         /* Set PF Reset Done bit so PF/VF Mail Ops can work */
381         ctrl_ext |= NGBE_PORTCTL_RSTDONE;
382         wr32(hw, NGBE_PORTCTL, ctrl_ext);
383         ngbe_flush(hw);
384
385         PMD_INIT_LOG(DEBUG, "MAC: %d, PHY: %d",
386                         (int)hw->mac.type, (int)hw->phy.type);
387
388         PMD_INIT_LOG(DEBUG, "port %d vendorID=0x%x deviceID=0x%x",
389                      eth_dev->data->port_id, pci_dev->id.vendor_id,
390                      pci_dev->id.device_id);
391
392         rte_intr_callback_register(intr_handle,
393                                    ngbe_dev_interrupt_handler, eth_dev);
394
395         /* enable uio/vfio intr/eventfd mapping */
396         rte_intr_enable(intr_handle);
397
398         /* enable support intr */
399         ngbe_enable_intr(eth_dev);
400
401         return 0;
402 }
403
404 static int
405 eth_ngbe_dev_uninit(struct rte_eth_dev *eth_dev)
406 {
407         PMD_INIT_FUNC_TRACE();
408
409         if (rte_eal_process_type() != RTE_PROC_PRIMARY)
410                 return 0;
411
412         ngbe_dev_close(eth_dev);
413
414         return 0;
415 }
416
417 static int
418 eth_ngbe_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
419                 struct rte_pci_device *pci_dev)
420 {
421         return rte_eth_dev_create(&pci_dev->device, pci_dev->device.name,
422                         sizeof(struct ngbe_adapter),
423                         eth_dev_pci_specific_init, pci_dev,
424                         eth_ngbe_dev_init, NULL);
425 }
426
427 static int eth_ngbe_pci_remove(struct rte_pci_device *pci_dev)
428 {
429         struct rte_eth_dev *ethdev;
430
431         ethdev = rte_eth_dev_allocated(pci_dev->device.name);
432         if (ethdev == NULL)
433                 return 0;
434
435         return rte_eth_dev_destroy(ethdev, eth_ngbe_dev_uninit);
436 }
437
438 static struct rte_pci_driver rte_ngbe_pmd = {
439         .id_table = pci_id_ngbe_map,
440         .drv_flags = RTE_PCI_DRV_NEED_MAPPING |
441                      RTE_PCI_DRV_INTR_LSC,
442         .probe = eth_ngbe_pci_probe,
443         .remove = eth_ngbe_pci_remove,
444 };
445
446 static int
447 ngbe_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
448 {
449         struct ngbe_hw *hw = ngbe_dev_hw(dev);
450         struct ngbe_vfta *shadow_vfta = NGBE_DEV_VFTA(dev);
451         uint32_t vfta;
452         uint32_t vid_idx;
453         uint32_t vid_bit;
454
455         vid_idx = (uint32_t)((vlan_id >> 5) & 0x7F);
456         vid_bit = (uint32_t)(1 << (vlan_id & 0x1F));
457         vfta = rd32(hw, NGBE_VLANTBL(vid_idx));
458         if (on)
459                 vfta |= vid_bit;
460         else
461                 vfta &= ~vid_bit;
462         wr32(hw, NGBE_VLANTBL(vid_idx), vfta);
463
464         /* update local VFTA copy */
465         shadow_vfta->vfta[vid_idx] = vfta;
466
467         return 0;
468 }
469
470 static void
471 ngbe_vlan_strip_queue_set(struct rte_eth_dev *dev, uint16_t queue, int on)
472 {
473         struct ngbe_hw *hw = ngbe_dev_hw(dev);
474         struct ngbe_rx_queue *rxq;
475         bool restart;
476         uint32_t rxcfg, rxbal, rxbah;
477
478         if (on)
479                 ngbe_vlan_hw_strip_enable(dev, queue);
480         else
481                 ngbe_vlan_hw_strip_disable(dev, queue);
482
483         rxq = dev->data->rx_queues[queue];
484         rxbal = rd32(hw, NGBE_RXBAL(rxq->reg_idx));
485         rxbah = rd32(hw, NGBE_RXBAH(rxq->reg_idx));
486         rxcfg = rd32(hw, NGBE_RXCFG(rxq->reg_idx));
487         if (rxq->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP) {
488                 restart = (rxcfg & NGBE_RXCFG_ENA) &&
489                         !(rxcfg & NGBE_RXCFG_VLAN);
490                 rxcfg |= NGBE_RXCFG_VLAN;
491         } else {
492                 restart = (rxcfg & NGBE_RXCFG_ENA) &&
493                         (rxcfg & NGBE_RXCFG_VLAN);
494                 rxcfg &= ~NGBE_RXCFG_VLAN;
495         }
496         rxcfg &= ~NGBE_RXCFG_ENA;
497
498         if (restart) {
499                 /* set vlan strip for ring */
500                 ngbe_dev_rx_queue_stop(dev, queue);
501                 wr32(hw, NGBE_RXBAL(rxq->reg_idx), rxbal);
502                 wr32(hw, NGBE_RXBAH(rxq->reg_idx), rxbah);
503                 wr32(hw, NGBE_RXCFG(rxq->reg_idx), rxcfg);
504                 ngbe_dev_rx_queue_start(dev, queue);
505         }
506 }
507
508 static int
509 ngbe_vlan_tpid_set(struct rte_eth_dev *dev,
510                     enum rte_vlan_type vlan_type,
511                     uint16_t tpid)
512 {
513         struct ngbe_hw *hw = ngbe_dev_hw(dev);
514         int ret = 0;
515         uint32_t portctrl, vlan_ext, qinq;
516
517         portctrl = rd32(hw, NGBE_PORTCTL);
518
519         vlan_ext = (portctrl & NGBE_PORTCTL_VLANEXT);
520         qinq = vlan_ext && (portctrl & NGBE_PORTCTL_QINQ);
521         switch (vlan_type) {
522         case RTE_ETH_VLAN_TYPE_INNER:
523                 if (vlan_ext) {
524                         wr32m(hw, NGBE_VLANCTL,
525                                 NGBE_VLANCTL_TPID_MASK,
526                                 NGBE_VLANCTL_TPID(tpid));
527                         wr32m(hw, NGBE_DMATXCTRL,
528                                 NGBE_DMATXCTRL_TPID_MASK,
529                                 NGBE_DMATXCTRL_TPID(tpid));
530                 } else {
531                         ret = -ENOTSUP;
532                         PMD_DRV_LOG(ERR,
533                                 "Inner type is not supported by single VLAN");
534                 }
535
536                 if (qinq) {
537                         wr32m(hw, NGBE_TAGTPID(0),
538                                 NGBE_TAGTPID_LSB_MASK,
539                                 NGBE_TAGTPID_LSB(tpid));
540                 }
541                 break;
542         case RTE_ETH_VLAN_TYPE_OUTER:
543                 if (vlan_ext) {
544                         /* Only the high 16-bits is valid */
545                         wr32m(hw, NGBE_EXTAG,
546                                 NGBE_EXTAG_VLAN_MASK,
547                                 NGBE_EXTAG_VLAN(tpid));
548                 } else {
549                         wr32m(hw, NGBE_VLANCTL,
550                                 NGBE_VLANCTL_TPID_MASK,
551                                 NGBE_VLANCTL_TPID(tpid));
552                         wr32m(hw, NGBE_DMATXCTRL,
553                                 NGBE_DMATXCTRL_TPID_MASK,
554                                 NGBE_DMATXCTRL_TPID(tpid));
555                 }
556
557                 if (qinq) {
558                         wr32m(hw, NGBE_TAGTPID(0),
559                                 NGBE_TAGTPID_MSB_MASK,
560                                 NGBE_TAGTPID_MSB(tpid));
561                 }
562                 break;
563         default:
564                 PMD_DRV_LOG(ERR, "Unsupported VLAN type %d", vlan_type);
565                 return -EINVAL;
566         }
567
568         return ret;
569 }
570
571 void
572 ngbe_vlan_hw_filter_disable(struct rte_eth_dev *dev)
573 {
574         struct ngbe_hw *hw = ngbe_dev_hw(dev);
575         uint32_t vlnctrl;
576
577         PMD_INIT_FUNC_TRACE();
578
579         /* Filter Table Disable */
580         vlnctrl = rd32(hw, NGBE_VLANCTL);
581         vlnctrl &= ~NGBE_VLANCTL_VFE;
582         wr32(hw, NGBE_VLANCTL, vlnctrl);
583 }
584
585 void
586 ngbe_vlan_hw_filter_enable(struct rte_eth_dev *dev)
587 {
588         struct ngbe_hw *hw = ngbe_dev_hw(dev);
589         struct ngbe_vfta *shadow_vfta = NGBE_DEV_VFTA(dev);
590         uint32_t vlnctrl;
591         uint16_t i;
592
593         PMD_INIT_FUNC_TRACE();
594
595         /* Filter Table Enable */
596         vlnctrl = rd32(hw, NGBE_VLANCTL);
597         vlnctrl &= ~NGBE_VLANCTL_CFIENA;
598         vlnctrl |= NGBE_VLANCTL_VFE;
599         wr32(hw, NGBE_VLANCTL, vlnctrl);
600
601         /* write whatever is in local vfta copy */
602         for (i = 0; i < NGBE_VFTA_SIZE; i++)
603                 wr32(hw, NGBE_VLANTBL(i), shadow_vfta->vfta[i]);
604 }
605
606 void
607 ngbe_vlan_hw_strip_bitmap_set(struct rte_eth_dev *dev, uint16_t queue, bool on)
608 {
609         struct ngbe_hwstrip *hwstrip = NGBE_DEV_HWSTRIP(dev);
610         struct ngbe_rx_queue *rxq;
611
612         if (queue >= NGBE_MAX_RX_QUEUE_NUM)
613                 return;
614
615         if (on)
616                 NGBE_SET_HWSTRIP(hwstrip, queue);
617         else
618                 NGBE_CLEAR_HWSTRIP(hwstrip, queue);
619
620         if (queue >= dev->data->nb_rx_queues)
621                 return;
622
623         rxq = dev->data->rx_queues[queue];
624
625         if (on) {
626                 rxq->vlan_flags = RTE_MBUF_F_RX_VLAN | RTE_MBUF_F_RX_VLAN_STRIPPED;
627                 rxq->offloads |= RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
628         } else {
629                 rxq->vlan_flags = RTE_MBUF_F_RX_VLAN;
630                 rxq->offloads &= ~RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
631         }
632 }
633
634 static void
635 ngbe_vlan_hw_strip_disable(struct rte_eth_dev *dev, uint16_t queue)
636 {
637         struct ngbe_hw *hw = ngbe_dev_hw(dev);
638         uint32_t ctrl;
639
640         PMD_INIT_FUNC_TRACE();
641
642         ctrl = rd32(hw, NGBE_RXCFG(queue));
643         ctrl &= ~NGBE_RXCFG_VLAN;
644         wr32(hw, NGBE_RXCFG(queue), ctrl);
645
646         /* record those setting for HW strip per queue */
647         ngbe_vlan_hw_strip_bitmap_set(dev, queue, 0);
648 }
649
650 static void
651 ngbe_vlan_hw_strip_enable(struct rte_eth_dev *dev, uint16_t queue)
652 {
653         struct ngbe_hw *hw = ngbe_dev_hw(dev);
654         uint32_t ctrl;
655
656         PMD_INIT_FUNC_TRACE();
657
658         ctrl = rd32(hw, NGBE_RXCFG(queue));
659         ctrl |= NGBE_RXCFG_VLAN;
660         wr32(hw, NGBE_RXCFG(queue), ctrl);
661
662         /* record those setting for HW strip per queue */
663         ngbe_vlan_hw_strip_bitmap_set(dev, queue, 1);
664 }
665
666 static void
667 ngbe_vlan_hw_extend_disable(struct rte_eth_dev *dev)
668 {
669         struct ngbe_hw *hw = ngbe_dev_hw(dev);
670         uint32_t ctrl;
671
672         PMD_INIT_FUNC_TRACE();
673
674         ctrl = rd32(hw, NGBE_PORTCTL);
675         ctrl &= ~NGBE_PORTCTL_VLANEXT;
676         ctrl &= ~NGBE_PORTCTL_QINQ;
677         wr32(hw, NGBE_PORTCTL, ctrl);
678 }
679
680 static void
681 ngbe_vlan_hw_extend_enable(struct rte_eth_dev *dev)
682 {
683         struct ngbe_hw *hw = ngbe_dev_hw(dev);
684         uint32_t ctrl;
685
686         PMD_INIT_FUNC_TRACE();
687
688         ctrl  = rd32(hw, NGBE_PORTCTL);
689         ctrl |= NGBE_PORTCTL_VLANEXT | NGBE_PORTCTL_QINQ;
690         wr32(hw, NGBE_PORTCTL, ctrl);
691 }
692
693 static void
694 ngbe_qinq_hw_strip_disable(struct rte_eth_dev *dev)
695 {
696         struct ngbe_hw *hw = ngbe_dev_hw(dev);
697         uint32_t ctrl;
698
699         PMD_INIT_FUNC_TRACE();
700
701         ctrl = rd32(hw, NGBE_PORTCTL);
702         ctrl &= ~NGBE_PORTCTL_QINQ;
703         wr32(hw, NGBE_PORTCTL, ctrl);
704 }
705
706 static void
707 ngbe_qinq_hw_strip_enable(struct rte_eth_dev *dev)
708 {
709         struct ngbe_hw *hw = ngbe_dev_hw(dev);
710         uint32_t ctrl;
711
712         PMD_INIT_FUNC_TRACE();
713
714         ctrl  = rd32(hw, NGBE_PORTCTL);
715         ctrl |= NGBE_PORTCTL_QINQ | NGBE_PORTCTL_VLANEXT;
716         wr32(hw, NGBE_PORTCTL, ctrl);
717 }
718
719 void
720 ngbe_vlan_hw_strip_config(struct rte_eth_dev *dev)
721 {
722         struct ngbe_rx_queue *rxq;
723         uint16_t i;
724
725         PMD_INIT_FUNC_TRACE();
726
727         for (i = 0; i < dev->data->nb_rx_queues; i++) {
728                 rxq = dev->data->rx_queues[i];
729
730                 if (rxq->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
731                         ngbe_vlan_hw_strip_enable(dev, i);
732                 else
733                         ngbe_vlan_hw_strip_disable(dev, i);
734         }
735 }
736
737 void
738 ngbe_config_vlan_strip_on_all_queues(struct rte_eth_dev *dev, int mask)
739 {
740         uint16_t i;
741         struct rte_eth_rxmode *rxmode;
742         struct ngbe_rx_queue *rxq;
743
744         if (mask & RTE_ETH_VLAN_STRIP_MASK) {
745                 rxmode = &dev->data->dev_conf.rxmode;
746                 if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
747                         for (i = 0; i < dev->data->nb_rx_queues; i++) {
748                                 rxq = dev->data->rx_queues[i];
749                                 rxq->offloads |= RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
750                         }
751                 else
752                         for (i = 0; i < dev->data->nb_rx_queues; i++) {
753                                 rxq = dev->data->rx_queues[i];
754                                 rxq->offloads &= ~RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
755                         }
756         }
757 }
758
759 static int
760 ngbe_vlan_offload_config(struct rte_eth_dev *dev, int mask)
761 {
762         struct rte_eth_rxmode *rxmode;
763         rxmode = &dev->data->dev_conf.rxmode;
764
765         if (mask & RTE_ETH_VLAN_STRIP_MASK)
766                 ngbe_vlan_hw_strip_config(dev);
767
768         if (mask & RTE_ETH_VLAN_FILTER_MASK) {
769                 if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER)
770                         ngbe_vlan_hw_filter_enable(dev);
771                 else
772                         ngbe_vlan_hw_filter_disable(dev);
773         }
774
775         if (mask & RTE_ETH_VLAN_EXTEND_MASK) {
776                 if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_EXTEND)
777                         ngbe_vlan_hw_extend_enable(dev);
778                 else
779                         ngbe_vlan_hw_extend_disable(dev);
780         }
781
782         if (mask & RTE_ETH_QINQ_STRIP_MASK) {
783                 if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_QINQ_STRIP)
784                         ngbe_qinq_hw_strip_enable(dev);
785                 else
786                         ngbe_qinq_hw_strip_disable(dev);
787         }
788
789         return 0;
790 }
791
792 static int
793 ngbe_vlan_offload_set(struct rte_eth_dev *dev, int mask)
794 {
795         ngbe_config_vlan_strip_on_all_queues(dev, mask);
796
797         ngbe_vlan_offload_config(dev, mask);
798
799         return 0;
800 }
801
802 static int
803 ngbe_dev_configure(struct rte_eth_dev *dev)
804 {
805         struct ngbe_interrupt *intr = ngbe_dev_intr(dev);
806         struct ngbe_adapter *adapter = ngbe_dev_adapter(dev);
807
808         PMD_INIT_FUNC_TRACE();
809
810         /* set flag to update link status after init */
811         intr->flags |= NGBE_FLAG_NEED_LINK_UPDATE;
812
813         /*
814          * Initialize to TRUE. If any of Rx queues doesn't meet the bulk
815          * allocation Rx preconditions we will reset it.
816          */
817         adapter->rx_bulk_alloc_allowed = true;
818
819         return 0;
820 }
821
822 static void
823 ngbe_dev_phy_intr_setup(struct rte_eth_dev *dev)
824 {
825         struct ngbe_hw *hw = ngbe_dev_hw(dev);
826         struct ngbe_interrupt *intr = ngbe_dev_intr(dev);
827
828         wr32(hw, NGBE_GPIODIR, NGBE_GPIODIR_DDR(1));
829         wr32(hw, NGBE_GPIOINTEN, NGBE_GPIOINTEN_INT(3));
830         wr32(hw, NGBE_GPIOINTTYPE, NGBE_GPIOINTTYPE_LEVEL(0));
831         if (hw->phy.type == ngbe_phy_yt8521s_sfi)
832                 wr32(hw, NGBE_GPIOINTPOL, NGBE_GPIOINTPOL_ACT(0));
833         else
834                 wr32(hw, NGBE_GPIOINTPOL, NGBE_GPIOINTPOL_ACT(3));
835
836         intr->mask_misc |= NGBE_ICRMISC_GPIO;
837 }
838
839 /*
840  * Configure device link speed and setup link.
841  * It returns 0 on success.
842  */
843 static int
844 ngbe_dev_start(struct rte_eth_dev *dev)
845 {
846         struct ngbe_hw *hw = ngbe_dev_hw(dev);
847         struct ngbe_hw_stats *hw_stats = NGBE_DEV_STATS(dev);
848         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
849         struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
850         uint32_t intr_vector = 0;
851         int err;
852         bool link_up = false, negotiate = false;
853         uint32_t speed = 0;
854         uint32_t allowed_speeds = 0;
855         int mask = 0;
856         int status;
857         uint32_t *link_speeds;
858
859         PMD_INIT_FUNC_TRACE();
860
861         /* disable uio/vfio intr/eventfd mapping */
862         rte_intr_disable(intr_handle);
863
864         /* stop adapter */
865         hw->adapter_stopped = 0;
866         ngbe_stop_hw(hw);
867
868         /* reinitialize adapter, this calls reset and start */
869         hw->nb_rx_queues = dev->data->nb_rx_queues;
870         hw->nb_tx_queues = dev->data->nb_tx_queues;
871         status = ngbe_pf_reset_hw(hw);
872         if (status != 0)
873                 return -1;
874         hw->mac.start_hw(hw);
875         hw->mac.get_link_status = true;
876
877         ngbe_dev_phy_intr_setup(dev);
878
879         /* check and configure queue intr-vector mapping */
880         if ((rte_intr_cap_multiple(intr_handle) ||
881              !RTE_ETH_DEV_SRIOV(dev).active) &&
882             dev->data->dev_conf.intr_conf.rxq != 0) {
883                 intr_vector = dev->data->nb_rx_queues;
884                 if (rte_intr_efd_enable(intr_handle, intr_vector))
885                         return -1;
886         }
887
888         if (rte_intr_dp_is_en(intr_handle)) {
889                 if (rte_intr_vec_list_alloc(intr_handle, "intr_vec",
890                                                    dev->data->nb_rx_queues)) {
891                         PMD_INIT_LOG(ERR,
892                                      "Failed to allocate %d rx_queues intr_vec",
893                                      dev->data->nb_rx_queues);
894                         return -ENOMEM;
895                 }
896         }
897
898         /* confiugre MSI-X for sleep until Rx interrupt */
899         ngbe_configure_msix(dev);
900
901         /* initialize transmission unit */
902         ngbe_dev_tx_init(dev);
903
904         /* This can fail when allocating mbufs for descriptor rings */
905         err = ngbe_dev_rx_init(dev);
906         if (err != 0) {
907                 PMD_INIT_LOG(ERR, "Unable to initialize Rx hardware");
908                 goto error;
909         }
910
911         mask = RTE_ETH_VLAN_STRIP_MASK | RTE_ETH_VLAN_FILTER_MASK |
912                 RTE_ETH_VLAN_EXTEND_MASK;
913         err = ngbe_vlan_offload_config(dev, mask);
914         if (err != 0) {
915                 PMD_INIT_LOG(ERR, "Unable to set VLAN offload");
916                 goto error;
917         }
918
919         ngbe_configure_port(dev);
920
921         err = ngbe_dev_rxtx_start(dev);
922         if (err < 0) {
923                 PMD_INIT_LOG(ERR, "Unable to start rxtx queues");
924                 goto error;
925         }
926
927         /* Skip link setup if loopback mode is enabled. */
928         if (hw->is_pf && dev->data->dev_conf.lpbk_mode)
929                 goto skip_link_setup;
930
931         err = hw->mac.check_link(hw, &speed, &link_up, 0);
932         if (err != 0)
933                 goto error;
934         dev->data->dev_link.link_status = link_up;
935
936         link_speeds = &dev->data->dev_conf.link_speeds;
937         if (*link_speeds == RTE_ETH_LINK_SPEED_AUTONEG)
938                 negotiate = true;
939
940         err = hw->mac.get_link_capabilities(hw, &speed, &negotiate);
941         if (err != 0)
942                 goto error;
943
944         allowed_speeds = 0;
945         if (hw->mac.default_speeds & NGBE_LINK_SPEED_1GB_FULL)
946                 allowed_speeds |= RTE_ETH_LINK_SPEED_1G;
947         if (hw->mac.default_speeds & NGBE_LINK_SPEED_100M_FULL)
948                 allowed_speeds |= RTE_ETH_LINK_SPEED_100M;
949         if (hw->mac.default_speeds & NGBE_LINK_SPEED_10M_FULL)
950                 allowed_speeds |= RTE_ETH_LINK_SPEED_10M;
951
952         if (*link_speeds & ~allowed_speeds) {
953                 PMD_INIT_LOG(ERR, "Invalid link setting");
954                 goto error;
955         }
956
957         speed = 0x0;
958         if (*link_speeds == RTE_ETH_LINK_SPEED_AUTONEG) {
959                 speed = hw->mac.default_speeds;
960         } else {
961                 if (*link_speeds & RTE_ETH_LINK_SPEED_1G)
962                         speed |= NGBE_LINK_SPEED_1GB_FULL;
963                 if (*link_speeds & RTE_ETH_LINK_SPEED_100M)
964                         speed |= NGBE_LINK_SPEED_100M_FULL;
965                 if (*link_speeds & RTE_ETH_LINK_SPEED_10M)
966                         speed |= NGBE_LINK_SPEED_10M_FULL;
967         }
968
969         hw->phy.init_hw(hw);
970         err = hw->mac.setup_link(hw, speed, link_up);
971         if (err != 0)
972                 goto error;
973
974 skip_link_setup:
975
976         if (rte_intr_allow_others(intr_handle)) {
977                 ngbe_dev_misc_interrupt_setup(dev);
978                 /* check if lsc interrupt is enabled */
979                 if (dev->data->dev_conf.intr_conf.lsc != 0)
980                         ngbe_dev_lsc_interrupt_setup(dev, TRUE);
981                 else
982                         ngbe_dev_lsc_interrupt_setup(dev, FALSE);
983                 ngbe_dev_macsec_interrupt_setup(dev);
984                 ngbe_set_ivar_map(hw, -1, 1, NGBE_MISC_VEC_ID);
985         } else {
986                 rte_intr_callback_unregister(intr_handle,
987                                              ngbe_dev_interrupt_handler, dev);
988                 if (dev->data->dev_conf.intr_conf.lsc != 0)
989                         PMD_INIT_LOG(INFO,
990                                      "LSC won't enable because of no intr multiplex");
991         }
992
993         /* check if rxq interrupt is enabled */
994         if (dev->data->dev_conf.intr_conf.rxq != 0 &&
995             rte_intr_dp_is_en(intr_handle))
996                 ngbe_dev_rxq_interrupt_setup(dev);
997
998         /* enable UIO/VFIO intr/eventfd mapping */
999         rte_intr_enable(intr_handle);
1000
1001         /* resume enabled intr since HW reset */
1002         ngbe_enable_intr(dev);
1003
1004         if ((hw->sub_system_id & NGBE_OEM_MASK) == NGBE_LY_M88E1512_SFP ||
1005                 (hw->sub_system_id & NGBE_OEM_MASK) == NGBE_LY_YT8521S_SFP) {
1006                 /* gpio0 is used to power on/off control*/
1007                 wr32(hw, NGBE_GPIODATA, 0);
1008         }
1009
1010         /*
1011          * Update link status right before return, because it may
1012          * start link configuration process in a separate thread.
1013          */
1014         ngbe_dev_link_update(dev, 0);
1015
1016         ngbe_read_stats_registers(hw, hw_stats);
1017         hw->offset_loaded = 1;
1018
1019         return 0;
1020
1021 error:
1022         PMD_INIT_LOG(ERR, "failure in dev start: %d", err);
1023         ngbe_dev_clear_queues(dev);
1024         return -EIO;
1025 }
1026
1027 /*
1028  * Stop device: disable rx and tx functions to allow for reconfiguring.
1029  */
1030 static int
1031 ngbe_dev_stop(struct rte_eth_dev *dev)
1032 {
1033         struct rte_eth_link link;
1034         struct ngbe_hw *hw = ngbe_dev_hw(dev);
1035         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1036         struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
1037
1038         if (hw->adapter_stopped)
1039                 return 0;
1040
1041         PMD_INIT_FUNC_TRACE();
1042
1043         if ((hw->sub_system_id & NGBE_OEM_MASK) == NGBE_LY_M88E1512_SFP ||
1044                 (hw->sub_system_id & NGBE_OEM_MASK) == NGBE_LY_YT8521S_SFP) {
1045                 /* gpio0 is used to power on/off control*/
1046                 wr32(hw, NGBE_GPIODATA, NGBE_GPIOBIT_0);
1047         }
1048
1049         /* disable interrupts */
1050         ngbe_disable_intr(hw);
1051
1052         /* reset the NIC */
1053         ngbe_pf_reset_hw(hw);
1054         hw->adapter_stopped = 0;
1055
1056         /* stop adapter */
1057         ngbe_stop_hw(hw);
1058
1059         ngbe_dev_clear_queues(dev);
1060
1061         /* Clear stored conf */
1062         dev->data->scattered_rx = 0;
1063
1064         /* Clear recorded link status */
1065         memset(&link, 0, sizeof(link));
1066         rte_eth_linkstatus_set(dev, &link);
1067
1068         if (!rte_intr_allow_others(intr_handle))
1069                 /* resume to the default handler */
1070                 rte_intr_callback_register(intr_handle,
1071                                            ngbe_dev_interrupt_handler,
1072                                            (void *)dev);
1073
1074         /* Clean datapath event and queue/vec mapping */
1075         rte_intr_efd_disable(intr_handle);
1076         rte_intr_vec_list_free(intr_handle);
1077
1078         hw->adapter_stopped = true;
1079         dev->data->dev_started = 0;
1080
1081         return 0;
1082 }
1083
1084 /*
1085  * Reset and stop device.
1086  */
1087 static int
1088 ngbe_dev_close(struct rte_eth_dev *dev)
1089 {
1090         struct ngbe_hw *hw = ngbe_dev_hw(dev);
1091         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1092         struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
1093         int retries = 0;
1094         int ret;
1095
1096         PMD_INIT_FUNC_TRACE();
1097
1098         ngbe_pf_reset_hw(hw);
1099
1100         ngbe_dev_stop(dev);
1101
1102         ngbe_dev_free_queues(dev);
1103
1104         /* reprogram the RAR[0] in case user changed it. */
1105         ngbe_set_rar(hw, 0, hw->mac.addr, 0, true);
1106
1107         /* Unlock any pending hardware semaphore */
1108         ngbe_swfw_lock_reset(hw);
1109
1110         /* disable uio intr before callback unregister */
1111         rte_intr_disable(intr_handle);
1112
1113         do {
1114                 ret = rte_intr_callback_unregister(intr_handle,
1115                                 ngbe_dev_interrupt_handler, dev);
1116                 if (ret >= 0 || ret == -ENOENT) {
1117                         break;
1118                 } else if (ret != -EAGAIN) {
1119                         PMD_INIT_LOG(ERR,
1120                                 "intr callback unregister failed: %d",
1121                                 ret);
1122                 }
1123                 rte_delay_ms(100);
1124         } while (retries++ < (10 + NGBE_LINK_UP_TIME));
1125
1126         rte_free(dev->data->mac_addrs);
1127         dev->data->mac_addrs = NULL;
1128
1129         rte_free(dev->data->hash_mac_addrs);
1130         dev->data->hash_mac_addrs = NULL;
1131
1132         return ret;
1133 }
1134
1135 /*
1136  * Reset PF device.
1137  */
1138 static int
1139 ngbe_dev_reset(struct rte_eth_dev *dev)
1140 {
1141         int ret;
1142
1143         ret = eth_ngbe_dev_uninit(dev);
1144         if (ret != 0)
1145                 return ret;
1146
1147         ret = eth_ngbe_dev_init(dev, NULL);
1148
1149         return ret;
1150 }
1151
1152 #define UPDATE_QP_COUNTER_32bit(reg, last_counter, counter)     \
1153         {                                                       \
1154                 uint32_t current_counter = rd32(hw, reg);       \
1155                 if (current_counter < last_counter)             \
1156                         current_counter += 0x100000000LL;       \
1157                 if (!hw->offset_loaded)                         \
1158                         last_counter = current_counter;         \
1159                 counter = current_counter - last_counter;       \
1160                 counter &= 0xFFFFFFFFLL;                        \
1161         }
1162
1163 #define UPDATE_QP_COUNTER_36bit(reg_lsb, reg_msb, last_counter, counter) \
1164         {                                                                \
1165                 uint64_t current_counter_lsb = rd32(hw, reg_lsb);        \
1166                 uint64_t current_counter_msb = rd32(hw, reg_msb);        \
1167                 uint64_t current_counter = (current_counter_msb << 32) | \
1168                         current_counter_lsb;                             \
1169                 if (current_counter < last_counter)                      \
1170                         current_counter += 0x1000000000LL;               \
1171                 if (!hw->offset_loaded)                                  \
1172                         last_counter = current_counter;                  \
1173                 counter = current_counter - last_counter;                \
1174                 counter &= 0xFFFFFFFFFLL;                                \
1175         }
1176
1177 void
1178 ngbe_read_stats_registers(struct ngbe_hw *hw,
1179                            struct ngbe_hw_stats *hw_stats)
1180 {
1181         unsigned int i;
1182
1183         /* QP Stats */
1184         for (i = 0; i < hw->nb_rx_queues; i++) {
1185                 UPDATE_QP_COUNTER_32bit(NGBE_QPRXPKT(i),
1186                         hw->qp_last[i].rx_qp_packets,
1187                         hw_stats->qp[i].rx_qp_packets);
1188                 UPDATE_QP_COUNTER_36bit(NGBE_QPRXOCTL(i), NGBE_QPRXOCTH(i),
1189                         hw->qp_last[i].rx_qp_bytes,
1190                         hw_stats->qp[i].rx_qp_bytes);
1191                 UPDATE_QP_COUNTER_32bit(NGBE_QPRXMPKT(i),
1192                         hw->qp_last[i].rx_qp_mc_packets,
1193                         hw_stats->qp[i].rx_qp_mc_packets);
1194                 UPDATE_QP_COUNTER_32bit(NGBE_QPRXBPKT(i),
1195                         hw->qp_last[i].rx_qp_bc_packets,
1196                         hw_stats->qp[i].rx_qp_bc_packets);
1197         }
1198
1199         for (i = 0; i < hw->nb_tx_queues; i++) {
1200                 UPDATE_QP_COUNTER_32bit(NGBE_QPTXPKT(i),
1201                         hw->qp_last[i].tx_qp_packets,
1202                         hw_stats->qp[i].tx_qp_packets);
1203                 UPDATE_QP_COUNTER_36bit(NGBE_QPTXOCTL(i), NGBE_QPTXOCTH(i),
1204                         hw->qp_last[i].tx_qp_bytes,
1205                         hw_stats->qp[i].tx_qp_bytes);
1206                 UPDATE_QP_COUNTER_32bit(NGBE_QPTXMPKT(i),
1207                         hw->qp_last[i].tx_qp_mc_packets,
1208                         hw_stats->qp[i].tx_qp_mc_packets);
1209                 UPDATE_QP_COUNTER_32bit(NGBE_QPTXBPKT(i),
1210                         hw->qp_last[i].tx_qp_bc_packets,
1211                         hw_stats->qp[i].tx_qp_bc_packets);
1212         }
1213
1214         /* PB Stats */
1215         hw_stats->rx_up_dropped += rd32(hw, NGBE_PBRXMISS);
1216         hw_stats->rdb_pkt_cnt += rd32(hw, NGBE_PBRXPKT);
1217         hw_stats->rdb_repli_cnt += rd32(hw, NGBE_PBRXREP);
1218         hw_stats->rdb_drp_cnt += rd32(hw, NGBE_PBRXDROP);
1219         hw_stats->tx_xoff_packets += rd32(hw, NGBE_PBTXLNKXOFF);
1220         hw_stats->tx_xon_packets += rd32(hw, NGBE_PBTXLNKXON);
1221
1222         hw_stats->rx_xon_packets += rd32(hw, NGBE_PBRXLNKXON);
1223         hw_stats->rx_xoff_packets += rd32(hw, NGBE_PBRXLNKXOFF);
1224
1225         /* DMA Stats */
1226         hw_stats->rx_drop_packets += rd32(hw, NGBE_DMARXDROP);
1227         hw_stats->tx_drop_packets += rd32(hw, NGBE_DMATXDROP);
1228         hw_stats->rx_dma_drop += rd32(hw, NGBE_DMARXDROP);
1229         hw_stats->tx_secdrp_packets += rd32(hw, NGBE_DMATXSECDROP);
1230         hw_stats->rx_packets += rd32(hw, NGBE_DMARXPKT);
1231         hw_stats->tx_packets += rd32(hw, NGBE_DMATXPKT);
1232         hw_stats->rx_bytes += rd64(hw, NGBE_DMARXOCTL);
1233         hw_stats->tx_bytes += rd64(hw, NGBE_DMATXOCTL);
1234
1235         /* MAC Stats */
1236         hw_stats->rx_crc_errors += rd64(hw, NGBE_MACRXERRCRCL);
1237         hw_stats->rx_multicast_packets += rd64(hw, NGBE_MACRXMPKTL);
1238         hw_stats->tx_multicast_packets += rd64(hw, NGBE_MACTXMPKTL);
1239
1240         hw_stats->rx_total_packets += rd64(hw, NGBE_MACRXPKTL);
1241         hw_stats->tx_total_packets += rd64(hw, NGBE_MACTXPKTL);
1242         hw_stats->rx_total_bytes += rd64(hw, NGBE_MACRXGBOCTL);
1243
1244         hw_stats->rx_broadcast_packets += rd64(hw, NGBE_MACRXOCTL);
1245         hw_stats->tx_broadcast_packets += rd32(hw, NGBE_MACTXOCTL);
1246
1247         hw_stats->rx_size_64_packets += rd64(hw, NGBE_MACRX1TO64L);
1248         hw_stats->rx_size_65_to_127_packets += rd64(hw, NGBE_MACRX65TO127L);
1249         hw_stats->rx_size_128_to_255_packets += rd64(hw, NGBE_MACRX128TO255L);
1250         hw_stats->rx_size_256_to_511_packets += rd64(hw, NGBE_MACRX256TO511L);
1251         hw_stats->rx_size_512_to_1023_packets +=
1252                         rd64(hw, NGBE_MACRX512TO1023L);
1253         hw_stats->rx_size_1024_to_max_packets +=
1254                         rd64(hw, NGBE_MACRX1024TOMAXL);
1255         hw_stats->tx_size_64_packets += rd64(hw, NGBE_MACTX1TO64L);
1256         hw_stats->tx_size_65_to_127_packets += rd64(hw, NGBE_MACTX65TO127L);
1257         hw_stats->tx_size_128_to_255_packets += rd64(hw, NGBE_MACTX128TO255L);
1258         hw_stats->tx_size_256_to_511_packets += rd64(hw, NGBE_MACTX256TO511L);
1259         hw_stats->tx_size_512_to_1023_packets +=
1260                         rd64(hw, NGBE_MACTX512TO1023L);
1261         hw_stats->tx_size_1024_to_max_packets +=
1262                         rd64(hw, NGBE_MACTX1024TOMAXL);
1263
1264         hw_stats->rx_undersize_errors += rd64(hw, NGBE_MACRXERRLENL);
1265         hw_stats->rx_oversize_errors += rd32(hw, NGBE_MACRXOVERSIZE);
1266         hw_stats->rx_jabber_errors += rd32(hw, NGBE_MACRXJABBER);
1267
1268         /* MNG Stats */
1269         hw_stats->mng_bmc2host_packets = rd32(hw, NGBE_MNGBMC2OS);
1270         hw_stats->mng_host2bmc_packets = rd32(hw, NGBE_MNGOS2BMC);
1271         hw_stats->rx_management_packets = rd32(hw, NGBE_DMARXMNG);
1272         hw_stats->tx_management_packets = rd32(hw, NGBE_DMATXMNG);
1273
1274         /* MACsec Stats */
1275         hw_stats->tx_macsec_pkts_untagged += rd32(hw, NGBE_LSECTX_UTPKT);
1276         hw_stats->tx_macsec_pkts_encrypted +=
1277                         rd32(hw, NGBE_LSECTX_ENCPKT);
1278         hw_stats->tx_macsec_pkts_protected +=
1279                         rd32(hw, NGBE_LSECTX_PROTPKT);
1280         hw_stats->tx_macsec_octets_encrypted +=
1281                         rd32(hw, NGBE_LSECTX_ENCOCT);
1282         hw_stats->tx_macsec_octets_protected +=
1283                         rd32(hw, NGBE_LSECTX_PROTOCT);
1284         hw_stats->rx_macsec_pkts_untagged += rd32(hw, NGBE_LSECRX_UTPKT);
1285         hw_stats->rx_macsec_pkts_badtag += rd32(hw, NGBE_LSECRX_BTPKT);
1286         hw_stats->rx_macsec_pkts_nosci += rd32(hw, NGBE_LSECRX_NOSCIPKT);
1287         hw_stats->rx_macsec_pkts_unknownsci += rd32(hw, NGBE_LSECRX_UNSCIPKT);
1288         hw_stats->rx_macsec_octets_decrypted += rd32(hw, NGBE_LSECRX_DECOCT);
1289         hw_stats->rx_macsec_octets_validated += rd32(hw, NGBE_LSECRX_VLDOCT);
1290         hw_stats->rx_macsec_sc_pkts_unchecked +=
1291                         rd32(hw, NGBE_LSECRX_UNCHKPKT);
1292         hw_stats->rx_macsec_sc_pkts_delayed += rd32(hw, NGBE_LSECRX_DLYPKT);
1293         hw_stats->rx_macsec_sc_pkts_late += rd32(hw, NGBE_LSECRX_LATEPKT);
1294         for (i = 0; i < 2; i++) {
1295                 hw_stats->rx_macsec_sa_pkts_ok +=
1296                         rd32(hw, NGBE_LSECRX_OKPKT(i));
1297                 hw_stats->rx_macsec_sa_pkts_invalid +=
1298                         rd32(hw, NGBE_LSECRX_INVPKT(i));
1299                 hw_stats->rx_macsec_sa_pkts_notvalid +=
1300                         rd32(hw, NGBE_LSECRX_BADPKT(i));
1301         }
1302         for (i = 0; i < 4; i++) {
1303                 hw_stats->rx_macsec_sa_pkts_unusedsa +=
1304                         rd32(hw, NGBE_LSECRX_INVSAPKT(i));
1305                 hw_stats->rx_macsec_sa_pkts_notusingsa +=
1306                         rd32(hw, NGBE_LSECRX_BADSAPKT(i));
1307         }
1308         hw_stats->rx_total_missed_packets =
1309                         hw_stats->rx_up_dropped;
1310 }
1311
1312 static int
1313 ngbe_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
1314 {
1315         struct ngbe_hw *hw = ngbe_dev_hw(dev);
1316         struct ngbe_hw_stats *hw_stats = NGBE_DEV_STATS(dev);
1317         struct ngbe_stat_mappings *stat_mappings =
1318                         NGBE_DEV_STAT_MAPPINGS(dev);
1319         uint32_t i, j;
1320
1321         ngbe_read_stats_registers(hw, hw_stats);
1322
1323         if (stats == NULL)
1324                 return -EINVAL;
1325
1326         /* Fill out the rte_eth_stats statistics structure */
1327         stats->ipackets = hw_stats->rx_packets;
1328         stats->ibytes = hw_stats->rx_bytes;
1329         stats->opackets = hw_stats->tx_packets;
1330         stats->obytes = hw_stats->tx_bytes;
1331
1332         memset(&stats->q_ipackets, 0, sizeof(stats->q_ipackets));
1333         memset(&stats->q_opackets, 0, sizeof(stats->q_opackets));
1334         memset(&stats->q_ibytes, 0, sizeof(stats->q_ibytes));
1335         memset(&stats->q_obytes, 0, sizeof(stats->q_obytes));
1336         memset(&stats->q_errors, 0, sizeof(stats->q_errors));
1337         for (i = 0; i < NGBE_MAX_QP; i++) {
1338                 uint32_t n = i / NB_QMAP_FIELDS_PER_QSM_REG;
1339                 uint32_t offset = (i % NB_QMAP_FIELDS_PER_QSM_REG) * 8;
1340                 uint32_t q_map;
1341
1342                 q_map = (stat_mappings->rqsm[n] >> offset)
1343                                 & QMAP_FIELD_RESERVED_BITS_MASK;
1344                 j = (q_map < RTE_ETHDEV_QUEUE_STAT_CNTRS
1345                      ? q_map : q_map % RTE_ETHDEV_QUEUE_STAT_CNTRS);
1346                 stats->q_ipackets[j] += hw_stats->qp[i].rx_qp_packets;
1347                 stats->q_ibytes[j] += hw_stats->qp[i].rx_qp_bytes;
1348
1349                 q_map = (stat_mappings->tqsm[n] >> offset)
1350                                 & QMAP_FIELD_RESERVED_BITS_MASK;
1351                 j = (q_map < RTE_ETHDEV_QUEUE_STAT_CNTRS
1352                      ? q_map : q_map % RTE_ETHDEV_QUEUE_STAT_CNTRS);
1353                 stats->q_opackets[j] += hw_stats->qp[i].tx_qp_packets;
1354                 stats->q_obytes[j] += hw_stats->qp[i].tx_qp_bytes;
1355         }
1356
1357         /* Rx Errors */
1358         stats->imissed  = hw_stats->rx_total_missed_packets +
1359                           hw_stats->rx_dma_drop;
1360         stats->ierrors  = hw_stats->rx_crc_errors +
1361                           hw_stats->rx_mac_short_packet_dropped +
1362                           hw_stats->rx_length_errors +
1363                           hw_stats->rx_undersize_errors +
1364                           hw_stats->rx_oversize_errors +
1365                           hw_stats->rx_illegal_byte_errors +
1366                           hw_stats->rx_error_bytes +
1367                           hw_stats->rx_fragment_errors;
1368
1369         /* Tx Errors */
1370         stats->oerrors  = 0;
1371         return 0;
1372 }
1373
1374 static int
1375 ngbe_dev_stats_reset(struct rte_eth_dev *dev)
1376 {
1377         struct ngbe_hw *hw = ngbe_dev_hw(dev);
1378         struct ngbe_hw_stats *hw_stats = NGBE_DEV_STATS(dev);
1379
1380         /* HW registers are cleared on read */
1381         hw->offset_loaded = 0;
1382         ngbe_dev_stats_get(dev, NULL);
1383         hw->offset_loaded = 1;
1384
1385         /* Reset software totals */
1386         memset(hw_stats, 0, sizeof(*hw_stats));
1387
1388         return 0;
1389 }
1390
1391 /* This function calculates the number of xstats based on the current config */
1392 static unsigned
1393 ngbe_xstats_calc_num(struct rte_eth_dev *dev)
1394 {
1395         int nb_queues = max(dev->data->nb_rx_queues, dev->data->nb_tx_queues);
1396         return NGBE_NB_HW_STATS +
1397                NGBE_NB_QP_STATS * nb_queues;
1398 }
1399
1400 static inline int
1401 ngbe_get_name_by_id(uint32_t id, char *name, uint32_t size)
1402 {
1403         int nb, st;
1404
1405         /* Extended stats from ngbe_hw_stats */
1406         if (id < NGBE_NB_HW_STATS) {
1407                 snprintf(name, size, "[hw]%s",
1408                         rte_ngbe_stats_strings[id].name);
1409                 return 0;
1410         }
1411         id -= NGBE_NB_HW_STATS;
1412
1413         /* Queue Stats */
1414         if (id < NGBE_NB_QP_STATS * NGBE_MAX_QP) {
1415                 nb = id / NGBE_NB_QP_STATS;
1416                 st = id % NGBE_NB_QP_STATS;
1417                 snprintf(name, size, "[q%u]%s", nb,
1418                         rte_ngbe_qp_strings[st].name);
1419                 return 0;
1420         }
1421         id -= NGBE_NB_QP_STATS * NGBE_MAX_QP;
1422
1423         return -(int)(id + 1);
1424 }
1425
1426 static inline int
1427 ngbe_get_offset_by_id(uint32_t id, uint32_t *offset)
1428 {
1429         int nb, st;
1430
1431         /* Extended stats from ngbe_hw_stats */
1432         if (id < NGBE_NB_HW_STATS) {
1433                 *offset = rte_ngbe_stats_strings[id].offset;
1434                 return 0;
1435         }
1436         id -= NGBE_NB_HW_STATS;
1437
1438         /* Queue Stats */
1439         if (id < NGBE_NB_QP_STATS * NGBE_MAX_QP) {
1440                 nb = id / NGBE_NB_QP_STATS;
1441                 st = id % NGBE_NB_QP_STATS;
1442                 *offset = rte_ngbe_qp_strings[st].offset +
1443                         nb * (NGBE_NB_QP_STATS * sizeof(uint64_t));
1444                 return 0;
1445         }
1446
1447         return -1;
1448 }
1449
1450 static int ngbe_dev_xstats_get_names(struct rte_eth_dev *dev,
1451         struct rte_eth_xstat_name *xstats_names, unsigned int limit)
1452 {
1453         unsigned int i, count;
1454
1455         count = ngbe_xstats_calc_num(dev);
1456         if (xstats_names == NULL)
1457                 return count;
1458
1459         /* Note: limit >= cnt_stats checked upstream
1460          * in rte_eth_xstats_names()
1461          */
1462         limit = min(limit, count);
1463
1464         /* Extended stats from ngbe_hw_stats */
1465         for (i = 0; i < limit; i++) {
1466                 if (ngbe_get_name_by_id(i, xstats_names[i].name,
1467                         sizeof(xstats_names[i].name))) {
1468                         PMD_INIT_LOG(WARNING, "id value %d isn't valid", i);
1469                         break;
1470                 }
1471         }
1472
1473         return i;
1474 }
1475
1476 static int ngbe_dev_xstats_get_names_by_id(struct rte_eth_dev *dev,
1477         const uint64_t *ids,
1478         struct rte_eth_xstat_name *xstats_names,
1479         unsigned int limit)
1480 {
1481         unsigned int i;
1482
1483         if (ids == NULL)
1484                 return ngbe_dev_xstats_get_names(dev, xstats_names, limit);
1485
1486         for (i = 0; i < limit; i++) {
1487                 if (ngbe_get_name_by_id(ids[i], xstats_names[i].name,
1488                                 sizeof(xstats_names[i].name))) {
1489                         PMD_INIT_LOG(WARNING, "id value %d isn't valid", i);
1490                         return -1;
1491                 }
1492         }
1493
1494         return i;
1495 }
1496
1497 static int
1498 ngbe_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
1499                                          unsigned int limit)
1500 {
1501         struct ngbe_hw *hw = ngbe_dev_hw(dev);
1502         struct ngbe_hw_stats *hw_stats = NGBE_DEV_STATS(dev);
1503         unsigned int i, count;
1504
1505         ngbe_read_stats_registers(hw, hw_stats);
1506
1507         /* If this is a reset xstats is NULL, and we have cleared the
1508          * registers by reading them.
1509          */
1510         count = ngbe_xstats_calc_num(dev);
1511         if (xstats == NULL)
1512                 return count;
1513
1514         limit = min(limit, ngbe_xstats_calc_num(dev));
1515
1516         /* Extended stats from ngbe_hw_stats */
1517         for (i = 0; i < limit; i++) {
1518                 uint32_t offset = 0;
1519
1520                 if (ngbe_get_offset_by_id(i, &offset)) {
1521                         PMD_INIT_LOG(WARNING, "id value %d isn't valid", i);
1522                         break;
1523                 }
1524                 xstats[i].value = *(uint64_t *)(((char *)hw_stats) + offset);
1525                 xstats[i].id = i;
1526         }
1527
1528         return i;
1529 }
1530
1531 static int
1532 ngbe_dev_xstats_get_(struct rte_eth_dev *dev, uint64_t *values,
1533                                          unsigned int limit)
1534 {
1535         struct ngbe_hw *hw = ngbe_dev_hw(dev);
1536         struct ngbe_hw_stats *hw_stats = NGBE_DEV_STATS(dev);
1537         unsigned int i, count;
1538
1539         ngbe_read_stats_registers(hw, hw_stats);
1540
1541         /* If this is a reset xstats is NULL, and we have cleared the
1542          * registers by reading them.
1543          */
1544         count = ngbe_xstats_calc_num(dev);
1545         if (values == NULL)
1546                 return count;
1547
1548         limit = min(limit, ngbe_xstats_calc_num(dev));
1549
1550         /* Extended stats from ngbe_hw_stats */
1551         for (i = 0; i < limit; i++) {
1552                 uint32_t offset;
1553
1554                 if (ngbe_get_offset_by_id(i, &offset)) {
1555                         PMD_INIT_LOG(WARNING, "id value %d isn't valid", i);
1556                         break;
1557                 }
1558                 values[i] = *(uint64_t *)(((char *)hw_stats) + offset);
1559         }
1560
1561         return i;
1562 }
1563
1564 static int
1565 ngbe_dev_xstats_get_by_id(struct rte_eth_dev *dev, const uint64_t *ids,
1566                 uint64_t *values, unsigned int limit)
1567 {
1568         struct ngbe_hw_stats *hw_stats = NGBE_DEV_STATS(dev);
1569         unsigned int i;
1570
1571         if (ids == NULL)
1572                 return ngbe_dev_xstats_get_(dev, values, limit);
1573
1574         for (i = 0; i < limit; i++) {
1575                 uint32_t offset;
1576
1577                 if (ngbe_get_offset_by_id(ids[i], &offset)) {
1578                         PMD_INIT_LOG(WARNING, "id value %d isn't valid", i);
1579                         break;
1580                 }
1581                 values[i] = *(uint64_t *)(((char *)hw_stats) + offset);
1582         }
1583
1584         return i;
1585 }
1586
1587 static int
1588 ngbe_dev_xstats_reset(struct rte_eth_dev *dev)
1589 {
1590         struct ngbe_hw *hw = ngbe_dev_hw(dev);
1591         struct ngbe_hw_stats *hw_stats = NGBE_DEV_STATS(dev);
1592
1593         /* HW registers are cleared on read */
1594         hw->offset_loaded = 0;
1595         ngbe_read_stats_registers(hw, hw_stats);
1596         hw->offset_loaded = 1;
1597
1598         /* Reset software totals */
1599         memset(hw_stats, 0, sizeof(*hw_stats));
1600
1601         return 0;
1602 }
1603
1604 static int
1605 ngbe_fw_version_get(struct rte_eth_dev *dev, char *fw_version, size_t fw_size)
1606 {
1607         struct ngbe_hw *hw = ngbe_dev_hw(dev);
1608         int ret;
1609
1610         ret = snprintf(fw_version, fw_size, "0x%08x", hw->eeprom_id);
1611
1612         if (ret < 0)
1613                 return -EINVAL;
1614
1615         ret += 1; /* add the size of '\0' */
1616         if (fw_size < (size_t)ret)
1617                 return ret;
1618
1619         return 0;
1620 }
1621
1622 static int
1623 ngbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
1624 {
1625         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1626         struct ngbe_hw *hw = ngbe_dev_hw(dev);
1627
1628         dev_info->max_rx_queues = (uint16_t)hw->mac.max_rx_queues;
1629         dev_info->max_tx_queues = (uint16_t)hw->mac.max_tx_queues;
1630         dev_info->min_rx_bufsize = 1024;
1631         dev_info->max_rx_pktlen = 15872;
1632         dev_info->max_mac_addrs = hw->mac.num_rar_entries;
1633         dev_info->max_hash_mac_addrs = NGBE_VMDQ_NUM_UC_MAC;
1634         dev_info->max_vfs = pci_dev->max_vfs;
1635         dev_info->rx_queue_offload_capa = ngbe_get_rx_queue_offloads(dev);
1636         dev_info->rx_offload_capa = (ngbe_get_rx_port_offloads(dev) |
1637                                      dev_info->rx_queue_offload_capa);
1638         dev_info->tx_queue_offload_capa = 0;
1639         dev_info->tx_offload_capa = ngbe_get_tx_port_offloads(dev);
1640
1641         dev_info->default_rxconf = (struct rte_eth_rxconf) {
1642                 .rx_thresh = {
1643                         .pthresh = NGBE_DEFAULT_RX_PTHRESH,
1644                         .hthresh = NGBE_DEFAULT_RX_HTHRESH,
1645                         .wthresh = NGBE_DEFAULT_RX_WTHRESH,
1646                 },
1647                 .rx_free_thresh = NGBE_DEFAULT_RX_FREE_THRESH,
1648                 .rx_drop_en = 0,
1649                 .offloads = 0,
1650         };
1651
1652         dev_info->default_txconf = (struct rte_eth_txconf) {
1653                 .tx_thresh = {
1654                         .pthresh = NGBE_DEFAULT_TX_PTHRESH,
1655                         .hthresh = NGBE_DEFAULT_TX_HTHRESH,
1656                         .wthresh = NGBE_DEFAULT_TX_WTHRESH,
1657                 },
1658                 .tx_free_thresh = NGBE_DEFAULT_TX_FREE_THRESH,
1659                 .offloads = 0,
1660         };
1661
1662         dev_info->rx_desc_lim = rx_desc_lim;
1663         dev_info->tx_desc_lim = tx_desc_lim;
1664
1665         dev_info->speed_capa = RTE_ETH_LINK_SPEED_1G | RTE_ETH_LINK_SPEED_100M |
1666                                 RTE_ETH_LINK_SPEED_10M;
1667
1668         /* Driver-preferred Rx/Tx parameters */
1669         dev_info->default_rxportconf.burst_size = 32;
1670         dev_info->default_txportconf.burst_size = 32;
1671         dev_info->default_rxportconf.nb_queues = 1;
1672         dev_info->default_txportconf.nb_queues = 1;
1673         dev_info->default_rxportconf.ring_size = 256;
1674         dev_info->default_txportconf.ring_size = 256;
1675
1676         return 0;
1677 }
1678
1679 const uint32_t *
1680 ngbe_dev_supported_ptypes_get(struct rte_eth_dev *dev)
1681 {
1682         if (dev->rx_pkt_burst == ngbe_recv_pkts ||
1683             dev->rx_pkt_burst == ngbe_recv_pkts_sc_single_alloc ||
1684             dev->rx_pkt_burst == ngbe_recv_pkts_sc_bulk_alloc ||
1685             dev->rx_pkt_burst == ngbe_recv_pkts_bulk_alloc)
1686                 return ngbe_get_supported_ptypes();
1687
1688         return NULL;
1689 }
1690
1691 /* return 0 means link status changed, -1 means not changed */
1692 int
1693 ngbe_dev_link_update_share(struct rte_eth_dev *dev,
1694                             int wait_to_complete)
1695 {
1696         struct ngbe_hw *hw = ngbe_dev_hw(dev);
1697         struct rte_eth_link link;
1698         u32 link_speed = NGBE_LINK_SPEED_UNKNOWN;
1699         u32 lan_speed = 0;
1700         struct ngbe_interrupt *intr = ngbe_dev_intr(dev);
1701         bool link_up;
1702         int err;
1703         int wait = 1;
1704
1705         memset(&link, 0, sizeof(link));
1706         link.link_status = RTE_ETH_LINK_DOWN;
1707         link.link_speed = RTE_ETH_SPEED_NUM_NONE;
1708         link.link_duplex = RTE_ETH_LINK_HALF_DUPLEX;
1709         link.link_autoneg = !(dev->data->dev_conf.link_speeds &
1710                         ~RTE_ETH_LINK_SPEED_AUTONEG);
1711
1712         hw->mac.get_link_status = true;
1713
1714         if (intr->flags & NGBE_FLAG_NEED_LINK_CONFIG)
1715                 return rte_eth_linkstatus_set(dev, &link);
1716
1717         /* check if it needs to wait to complete, if lsc interrupt is enabled */
1718         if (wait_to_complete == 0 || dev->data->dev_conf.intr_conf.lsc != 0)
1719                 wait = 0;
1720
1721         err = hw->mac.check_link(hw, &link_speed, &link_up, wait);
1722         if (err != 0) {
1723                 link.link_speed = RTE_ETH_SPEED_NUM_NONE;
1724                 link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
1725                 return rte_eth_linkstatus_set(dev, &link);
1726         }
1727
1728         if (!link_up)
1729                 return rte_eth_linkstatus_set(dev, &link);
1730
1731         intr->flags &= ~NGBE_FLAG_NEED_LINK_CONFIG;
1732         link.link_status = RTE_ETH_LINK_UP;
1733         link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
1734
1735         switch (link_speed) {
1736         default:
1737         case NGBE_LINK_SPEED_UNKNOWN:
1738                 link.link_speed = RTE_ETH_SPEED_NUM_NONE;
1739                 break;
1740
1741         case NGBE_LINK_SPEED_10M_FULL:
1742                 link.link_speed = RTE_ETH_SPEED_NUM_10M;
1743                 lan_speed = 0;
1744                 break;
1745
1746         case NGBE_LINK_SPEED_100M_FULL:
1747                 link.link_speed = RTE_ETH_SPEED_NUM_100M;
1748                 lan_speed = 1;
1749                 break;
1750
1751         case NGBE_LINK_SPEED_1GB_FULL:
1752                 link.link_speed = RTE_ETH_SPEED_NUM_1G;
1753                 lan_speed = 2;
1754                 break;
1755         }
1756
1757         if (hw->is_pf) {
1758                 wr32m(hw, NGBE_LAN_SPEED, NGBE_LAN_SPEED_MASK, lan_speed);
1759                 if (link_speed & (NGBE_LINK_SPEED_1GB_FULL |
1760                                 NGBE_LINK_SPEED_100M_FULL |
1761                                 NGBE_LINK_SPEED_10M_FULL)) {
1762                         wr32m(hw, NGBE_MACTXCFG, NGBE_MACTXCFG_SPEED_MASK,
1763                                 NGBE_MACTXCFG_SPEED_1G | NGBE_MACTXCFG_TE);
1764                 }
1765         }
1766
1767         return rte_eth_linkstatus_set(dev, &link);
1768 }
1769
1770 static int
1771 ngbe_dev_link_update(struct rte_eth_dev *dev, int wait_to_complete)
1772 {
1773         return ngbe_dev_link_update_share(dev, wait_to_complete);
1774 }
1775
1776 static int
1777 ngbe_dev_promiscuous_enable(struct rte_eth_dev *dev)
1778 {
1779         struct ngbe_hw *hw = ngbe_dev_hw(dev);
1780         uint32_t fctrl;
1781
1782         fctrl = rd32(hw, NGBE_PSRCTL);
1783         fctrl |= (NGBE_PSRCTL_UCP | NGBE_PSRCTL_MCP);
1784         wr32(hw, NGBE_PSRCTL, fctrl);
1785
1786         return 0;
1787 }
1788
1789 static int
1790 ngbe_dev_promiscuous_disable(struct rte_eth_dev *dev)
1791 {
1792         struct ngbe_hw *hw = ngbe_dev_hw(dev);
1793         uint32_t fctrl;
1794
1795         fctrl = rd32(hw, NGBE_PSRCTL);
1796         fctrl &= (~NGBE_PSRCTL_UCP);
1797         if (dev->data->all_multicast == 1)
1798                 fctrl |= NGBE_PSRCTL_MCP;
1799         else
1800                 fctrl &= (~NGBE_PSRCTL_MCP);
1801         wr32(hw, NGBE_PSRCTL, fctrl);
1802
1803         return 0;
1804 }
1805
1806 static int
1807 ngbe_dev_allmulticast_enable(struct rte_eth_dev *dev)
1808 {
1809         struct ngbe_hw *hw = ngbe_dev_hw(dev);
1810         uint32_t fctrl;
1811
1812         fctrl = rd32(hw, NGBE_PSRCTL);
1813         fctrl |= NGBE_PSRCTL_MCP;
1814         wr32(hw, NGBE_PSRCTL, fctrl);
1815
1816         return 0;
1817 }
1818
1819 static int
1820 ngbe_dev_allmulticast_disable(struct rte_eth_dev *dev)
1821 {
1822         struct ngbe_hw *hw = ngbe_dev_hw(dev);
1823         uint32_t fctrl;
1824
1825         if (dev->data->promiscuous == 1)
1826                 return 0; /* must remain in all_multicast mode */
1827
1828         fctrl = rd32(hw, NGBE_PSRCTL);
1829         fctrl &= (~NGBE_PSRCTL_MCP);
1830         wr32(hw, NGBE_PSRCTL, fctrl);
1831
1832         return 0;
1833 }
1834
1835 /**
1836  * It clears the interrupt causes and enables the interrupt.
1837  * It will be called once only during NIC initialized.
1838  *
1839  * @param dev
1840  *  Pointer to struct rte_eth_dev.
1841  * @param on
1842  *  Enable or Disable.
1843  *
1844  * @return
1845  *  - On success, zero.
1846  *  - On failure, a negative value.
1847  */
1848 static int
1849 ngbe_dev_lsc_interrupt_setup(struct rte_eth_dev *dev, uint8_t on)
1850 {
1851         struct ngbe_interrupt *intr = ngbe_dev_intr(dev);
1852
1853         ngbe_dev_link_status_print(dev);
1854         if (on != 0) {
1855                 intr->mask_misc |= NGBE_ICRMISC_PHY;
1856                 intr->mask_misc |= NGBE_ICRMISC_GPIO;
1857         } else {
1858                 intr->mask_misc &= ~NGBE_ICRMISC_PHY;
1859                 intr->mask_misc &= ~NGBE_ICRMISC_GPIO;
1860         }
1861
1862         return 0;
1863 }
1864
1865 /**
1866  * It clears the interrupt causes and enables the interrupt.
1867  * It will be called once only during NIC initialized.
1868  *
1869  * @param dev
1870  *  Pointer to struct rte_eth_dev.
1871  *
1872  * @return
1873  *  - On success, zero.
1874  *  - On failure, a negative value.
1875  */
1876 static int
1877 ngbe_dev_misc_interrupt_setup(struct rte_eth_dev *dev)
1878 {
1879         struct ngbe_interrupt *intr = ngbe_dev_intr(dev);
1880         u64 mask;
1881
1882         mask = NGBE_ICR_MASK;
1883         mask &= (1ULL << NGBE_MISC_VEC_ID);
1884         intr->mask |= mask;
1885         intr->mask_misc |= NGBE_ICRMISC_GPIO;
1886
1887         return 0;
1888 }
1889
1890 /**
1891  * It clears the interrupt causes and enables the interrupt.
1892  * It will be called once only during NIC initialized.
1893  *
1894  * @param dev
1895  *  Pointer to struct rte_eth_dev.
1896  *
1897  * @return
1898  *  - On success, zero.
1899  *  - On failure, a negative value.
1900  */
1901 static int
1902 ngbe_dev_rxq_interrupt_setup(struct rte_eth_dev *dev)
1903 {
1904         struct ngbe_interrupt *intr = ngbe_dev_intr(dev);
1905         u64 mask;
1906
1907         mask = NGBE_ICR_MASK;
1908         mask &= ~((1ULL << NGBE_RX_VEC_START) - 1);
1909         intr->mask |= mask;
1910
1911         return 0;
1912 }
1913
1914 /**
1915  * It clears the interrupt causes and enables the interrupt.
1916  * It will be called once only during NIC initialized.
1917  *
1918  * @param dev
1919  *  Pointer to struct rte_eth_dev.
1920  *
1921  * @return
1922  *  - On success, zero.
1923  *  - On failure, a negative value.
1924  */
1925 static int
1926 ngbe_dev_macsec_interrupt_setup(struct rte_eth_dev *dev)
1927 {
1928         struct ngbe_interrupt *intr = ngbe_dev_intr(dev);
1929
1930         intr->mask_misc |= NGBE_ICRMISC_LNKSEC;
1931
1932         return 0;
1933 }
1934
1935 /*
1936  * It reads ICR and sets flag for the link_update.
1937  *
1938  * @param dev
1939  *  Pointer to struct rte_eth_dev.
1940  *
1941  * @return
1942  *  - On success, zero.
1943  *  - On failure, a negative value.
1944  */
1945 static int
1946 ngbe_dev_interrupt_get_status(struct rte_eth_dev *dev)
1947 {
1948         uint32_t eicr;
1949         struct ngbe_hw *hw = ngbe_dev_hw(dev);
1950         struct ngbe_interrupt *intr = ngbe_dev_intr(dev);
1951
1952         /* clear all cause mask */
1953         ngbe_disable_intr(hw);
1954
1955         /* read-on-clear nic registers here */
1956         eicr = ((u32 *)hw->isb_mem)[NGBE_ISB_MISC];
1957         PMD_DRV_LOG(DEBUG, "eicr %x", eicr);
1958
1959         intr->flags = 0;
1960
1961         /* set flag for async link update */
1962         if (eicr & NGBE_ICRMISC_PHY)
1963                 intr->flags |= NGBE_FLAG_NEED_LINK_UPDATE;
1964
1965         if (eicr & NGBE_ICRMISC_VFMBX)
1966                 intr->flags |= NGBE_FLAG_MAILBOX;
1967
1968         if (eicr & NGBE_ICRMISC_LNKSEC)
1969                 intr->flags |= NGBE_FLAG_MACSEC;
1970
1971         if (eicr & NGBE_ICRMISC_GPIO)
1972                 intr->flags |= NGBE_FLAG_NEED_LINK_UPDATE;
1973
1974         return 0;
1975 }
1976
1977 /**
1978  * It gets and then prints the link status.
1979  *
1980  * @param dev
1981  *  Pointer to struct rte_eth_dev.
1982  *
1983  * @return
1984  *  - On success, zero.
1985  *  - On failure, a negative value.
1986  */
1987 static void
1988 ngbe_dev_link_status_print(struct rte_eth_dev *dev)
1989 {
1990         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1991         struct rte_eth_link link;
1992
1993         rte_eth_linkstatus_get(dev, &link);
1994
1995         if (link.link_status == RTE_ETH_LINK_UP) {
1996                 PMD_INIT_LOG(INFO, "Port %d: Link Up - speed %u Mbps - %s",
1997                                         (int)(dev->data->port_id),
1998                                         (unsigned int)link.link_speed,
1999                         link.link_duplex == RTE_ETH_LINK_FULL_DUPLEX ?
2000                                         "full-duplex" : "half-duplex");
2001         } else {
2002                 PMD_INIT_LOG(INFO, " Port %d: Link Down",
2003                                 (int)(dev->data->port_id));
2004         }
2005         PMD_INIT_LOG(DEBUG, "PCI Address: " PCI_PRI_FMT,
2006                                 pci_dev->addr.domain,
2007                                 pci_dev->addr.bus,
2008                                 pci_dev->addr.devid,
2009                                 pci_dev->addr.function);
2010 }
2011
2012 /*
2013  * It executes link_update after knowing an interrupt occurred.
2014  *
2015  * @param dev
2016  *  Pointer to struct rte_eth_dev.
2017  *
2018  * @return
2019  *  - On success, zero.
2020  *  - On failure, a negative value.
2021  */
2022 static int
2023 ngbe_dev_interrupt_action(struct rte_eth_dev *dev)
2024 {
2025         struct ngbe_interrupt *intr = ngbe_dev_intr(dev);
2026         int64_t timeout;
2027
2028         PMD_DRV_LOG(DEBUG, "intr action type %d", intr->flags);
2029
2030         if (intr->flags & NGBE_FLAG_NEED_LINK_UPDATE) {
2031                 struct rte_eth_link link;
2032
2033                 /*get the link status before link update, for predicting later*/
2034                 rte_eth_linkstatus_get(dev, &link);
2035
2036                 ngbe_dev_link_update(dev, 0);
2037
2038                 /* likely to up */
2039                 if (link.link_status != RTE_ETH_LINK_UP)
2040                         /* handle it 1 sec later, wait it being stable */
2041                         timeout = NGBE_LINK_UP_CHECK_TIMEOUT;
2042                 /* likely to down */
2043                 else
2044                         /* handle it 4 sec later, wait it being stable */
2045                         timeout = NGBE_LINK_DOWN_CHECK_TIMEOUT;
2046
2047                 ngbe_dev_link_status_print(dev);
2048                 if (rte_eal_alarm_set(timeout * 1000,
2049                                       ngbe_dev_interrupt_delayed_handler,
2050                                       (void *)dev) < 0) {
2051                         PMD_DRV_LOG(ERR, "Error setting alarm");
2052                 } else {
2053                         /* remember original mask */
2054                         intr->mask_misc_orig = intr->mask_misc;
2055                         /* only disable lsc interrupt */
2056                         intr->mask_misc &= ~NGBE_ICRMISC_PHY;
2057
2058                         intr->mask_orig = intr->mask;
2059                         /* only disable all misc interrupts */
2060                         intr->mask &= ~(1ULL << NGBE_MISC_VEC_ID);
2061                 }
2062         }
2063
2064         PMD_DRV_LOG(DEBUG, "enable intr immediately");
2065         ngbe_enable_intr(dev);
2066
2067         return 0;
2068 }
2069
2070 /**
2071  * Interrupt handler which shall be registered for alarm callback for delayed
2072  * handling specific interrupt to wait for the stable nic state. As the
2073  * NIC interrupt state is not stable for ngbe after link is just down,
2074  * it needs to wait 4 seconds to get the stable status.
2075  *
2076  * @param param
2077  *  The address of parameter (struct rte_eth_dev *) registered before.
2078  */
2079 static void
2080 ngbe_dev_interrupt_delayed_handler(void *param)
2081 {
2082         struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
2083         struct ngbe_interrupt *intr = ngbe_dev_intr(dev);
2084         struct ngbe_hw *hw = ngbe_dev_hw(dev);
2085         uint32_t eicr;
2086
2087         ngbe_disable_intr(hw);
2088
2089         eicr = ((u32 *)hw->isb_mem)[NGBE_ISB_MISC];
2090
2091         if (intr->flags & NGBE_FLAG_NEED_LINK_UPDATE) {
2092                 ngbe_dev_link_update(dev, 0);
2093                 intr->flags &= ~NGBE_FLAG_NEED_LINK_UPDATE;
2094                 ngbe_dev_link_status_print(dev);
2095                 rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC,
2096                                               NULL);
2097         }
2098
2099         if (intr->flags & NGBE_FLAG_MACSEC) {
2100                 rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_MACSEC,
2101                                               NULL);
2102                 intr->flags &= ~NGBE_FLAG_MACSEC;
2103         }
2104
2105         /* restore original mask */
2106         intr->mask_misc = intr->mask_misc_orig;
2107         intr->mask_misc_orig = 0;
2108         intr->mask = intr->mask_orig;
2109         intr->mask_orig = 0;
2110
2111         PMD_DRV_LOG(DEBUG, "enable intr in delayed handler S[%08x]", eicr);
2112         ngbe_enable_intr(dev);
2113 }
2114
2115 /**
2116  * Interrupt handler triggered by NIC  for handling
2117  * specific interrupt.
2118  *
2119  * @param param
2120  *  The address of parameter (struct rte_eth_dev *) registered before.
2121  */
2122 static void
2123 ngbe_dev_interrupt_handler(void *param)
2124 {
2125         struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
2126
2127         ngbe_dev_interrupt_get_status(dev);
2128         ngbe_dev_interrupt_action(dev);
2129 }
2130
2131 static int
2132 ngbe_add_rar(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr,
2133                                 uint32_t index, uint32_t pool)
2134 {
2135         struct ngbe_hw *hw = ngbe_dev_hw(dev);
2136         uint32_t enable_addr = 1;
2137
2138         return ngbe_set_rar(hw, index, mac_addr->addr_bytes,
2139                              pool, enable_addr);
2140 }
2141
2142 static void
2143 ngbe_remove_rar(struct rte_eth_dev *dev, uint32_t index)
2144 {
2145         struct ngbe_hw *hw = ngbe_dev_hw(dev);
2146
2147         ngbe_clear_rar(hw, index);
2148 }
2149
2150 static int
2151 ngbe_set_default_mac_addr(struct rte_eth_dev *dev, struct rte_ether_addr *addr)
2152 {
2153         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
2154
2155         ngbe_remove_rar(dev, 0);
2156         ngbe_add_rar(dev, addr, 0, pci_dev->max_vfs);
2157
2158         return 0;
2159 }
2160
2161 static int
2162 ngbe_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
2163 {
2164         struct ngbe_hw *hw = ngbe_dev_hw(dev);
2165         uint32_t frame_size = mtu + RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN + 4;
2166         struct rte_eth_dev_data *dev_data = dev->data;
2167
2168         /* If device is started, refuse mtu that requires the support of
2169          * scattered packets when this feature has not been enabled before.
2170          */
2171         if (dev_data->dev_started && !dev_data->scattered_rx &&
2172             (frame_size + 2 * NGBE_VLAN_TAG_SIZE >
2173              dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM)) {
2174                 PMD_INIT_LOG(ERR, "Stop port first.");
2175                 return -EINVAL;
2176         }
2177
2178         if (hw->mode)
2179                 wr32m(hw, NGBE_FRMSZ, NGBE_FRMSZ_MAX_MASK,
2180                         NGBE_FRAME_SIZE_MAX);
2181         else
2182                 wr32m(hw, NGBE_FRMSZ, NGBE_FRMSZ_MAX_MASK,
2183                         NGBE_FRMSZ_MAX(frame_size));
2184
2185         return 0;
2186 }
2187
2188 static uint32_t
2189 ngbe_uta_vector(struct ngbe_hw *hw, struct rte_ether_addr *uc_addr)
2190 {
2191         uint32_t vector = 0;
2192
2193         switch (hw->mac.mc_filter_type) {
2194         case 0:   /* use bits [47:36] of the address */
2195                 vector = ((uc_addr->addr_bytes[4] >> 4) |
2196                         (((uint16_t)uc_addr->addr_bytes[5]) << 4));
2197                 break;
2198         case 1:   /* use bits [46:35] of the address */
2199                 vector = ((uc_addr->addr_bytes[4] >> 3) |
2200                         (((uint16_t)uc_addr->addr_bytes[5]) << 5));
2201                 break;
2202         case 2:   /* use bits [45:34] of the address */
2203                 vector = ((uc_addr->addr_bytes[4] >> 2) |
2204                         (((uint16_t)uc_addr->addr_bytes[5]) << 6));
2205                 break;
2206         case 3:   /* use bits [43:32] of the address */
2207                 vector = ((uc_addr->addr_bytes[4]) |
2208                         (((uint16_t)uc_addr->addr_bytes[5]) << 8));
2209                 break;
2210         default:  /* Invalid mc_filter_type */
2211                 break;
2212         }
2213
2214         /* vector can only be 12-bits or boundary will be exceeded */
2215         vector &= 0xFFF;
2216         return vector;
2217 }
2218
2219 static int
2220 ngbe_uc_hash_table_set(struct rte_eth_dev *dev,
2221                         struct rte_ether_addr *mac_addr, uint8_t on)
2222 {
2223         uint32_t vector;
2224         uint32_t uta_idx;
2225         uint32_t reg_val;
2226         uint32_t uta_mask;
2227         uint32_t psrctl;
2228
2229         struct ngbe_hw *hw = ngbe_dev_hw(dev);
2230         struct ngbe_uta_info *uta_info = NGBE_DEV_UTA_INFO(dev);
2231
2232         vector = ngbe_uta_vector(hw, mac_addr);
2233         uta_idx = (vector >> 5) & 0x7F;
2234         uta_mask = 0x1UL << (vector & 0x1F);
2235
2236         if (!!on == !!(uta_info->uta_shadow[uta_idx] & uta_mask))
2237                 return 0;
2238
2239         reg_val = rd32(hw, NGBE_UCADDRTBL(uta_idx));
2240         if (on) {
2241                 uta_info->uta_in_use++;
2242                 reg_val |= uta_mask;
2243                 uta_info->uta_shadow[uta_idx] |= uta_mask;
2244         } else {
2245                 uta_info->uta_in_use--;
2246                 reg_val &= ~uta_mask;
2247                 uta_info->uta_shadow[uta_idx] &= ~uta_mask;
2248         }
2249
2250         wr32(hw, NGBE_UCADDRTBL(uta_idx), reg_val);
2251
2252         psrctl = rd32(hw, NGBE_PSRCTL);
2253         if (uta_info->uta_in_use > 0)
2254                 psrctl |= NGBE_PSRCTL_UCHFENA;
2255         else
2256                 psrctl &= ~NGBE_PSRCTL_UCHFENA;
2257
2258         psrctl &= ~NGBE_PSRCTL_ADHF12_MASK;
2259         psrctl |= NGBE_PSRCTL_ADHF12(hw->mac.mc_filter_type);
2260         wr32(hw, NGBE_PSRCTL, psrctl);
2261
2262         return 0;
2263 }
2264
2265 static int
2266 ngbe_uc_all_hash_table_set(struct rte_eth_dev *dev, uint8_t on)
2267 {
2268         struct ngbe_hw *hw = ngbe_dev_hw(dev);
2269         struct ngbe_uta_info *uta_info = NGBE_DEV_UTA_INFO(dev);
2270         uint32_t psrctl;
2271         int i;
2272
2273         if (on) {
2274                 for (i = 0; i < RTE_ETH_VMDQ_NUM_UC_HASH_ARRAY; i++) {
2275                         uta_info->uta_shadow[i] = ~0;
2276                         wr32(hw, NGBE_UCADDRTBL(i), ~0);
2277                 }
2278         } else {
2279                 for (i = 0; i < RTE_ETH_VMDQ_NUM_UC_HASH_ARRAY; i++) {
2280                         uta_info->uta_shadow[i] = 0;
2281                         wr32(hw, NGBE_UCADDRTBL(i), 0);
2282                 }
2283         }
2284
2285         psrctl = rd32(hw, NGBE_PSRCTL);
2286         if (on)
2287                 psrctl |= NGBE_PSRCTL_UCHFENA;
2288         else
2289                 psrctl &= ~NGBE_PSRCTL_UCHFENA;
2290
2291         psrctl &= ~NGBE_PSRCTL_ADHF12_MASK;
2292         psrctl |= NGBE_PSRCTL_ADHF12(hw->mac.mc_filter_type);
2293         wr32(hw, NGBE_PSRCTL, psrctl);
2294
2295         return 0;
2296 }
2297
2298 /**
2299  * Set the IVAR registers, mapping interrupt causes to vectors
2300  * @param hw
2301  *  pointer to ngbe_hw struct
2302  * @direction
2303  *  0 for Rx, 1 for Tx, -1 for other causes
2304  * @queue
2305  *  queue to map the corresponding interrupt to
2306  * @msix_vector
2307  *  the vector to map to the corresponding queue
2308  */
2309 void
2310 ngbe_set_ivar_map(struct ngbe_hw *hw, int8_t direction,
2311                    uint8_t queue, uint8_t msix_vector)
2312 {
2313         uint32_t tmp, idx;
2314
2315         if (direction == -1) {
2316                 /* other causes */
2317                 msix_vector |= NGBE_IVARMISC_VLD;
2318                 idx = 0;
2319                 tmp = rd32(hw, NGBE_IVARMISC);
2320                 tmp &= ~(0xFF << idx);
2321                 tmp |= (msix_vector << idx);
2322                 wr32(hw, NGBE_IVARMISC, tmp);
2323         } else {
2324                 /* rx or tx causes */
2325                 /* Workround for ICR lost */
2326                 idx = ((16 * (queue & 1)) + (8 * direction));
2327                 tmp = rd32(hw, NGBE_IVAR(queue >> 1));
2328                 tmp &= ~(0xFF << idx);
2329                 tmp |= (msix_vector << idx);
2330                 wr32(hw, NGBE_IVAR(queue >> 1), tmp);
2331         }
2332 }
2333
2334 /**
2335  * Sets up the hardware to properly generate MSI-X interrupts
2336  * @hw
2337  *  board private structure
2338  */
2339 static void
2340 ngbe_configure_msix(struct rte_eth_dev *dev)
2341 {
2342         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
2343         struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
2344         struct ngbe_hw *hw = ngbe_dev_hw(dev);
2345         uint32_t queue_id, base = NGBE_MISC_VEC_ID;
2346         uint32_t vec = NGBE_MISC_VEC_ID;
2347         uint32_t gpie;
2348
2349         /*
2350          * Won't configure MSI-X register if no mapping is done
2351          * between intr vector and event fd
2352          * but if MSI-X has been enabled already, need to configure
2353          * auto clean, auto mask and throttling.
2354          */
2355         gpie = rd32(hw, NGBE_GPIE);
2356         if (!rte_intr_dp_is_en(intr_handle) &&
2357             !(gpie & NGBE_GPIE_MSIX))
2358                 return;
2359
2360         if (rte_intr_allow_others(intr_handle)) {
2361                 base = NGBE_RX_VEC_START;
2362                 vec = base;
2363         }
2364
2365         /* setup GPIE for MSI-X mode */
2366         gpie = rd32(hw, NGBE_GPIE);
2367         gpie |= NGBE_GPIE_MSIX;
2368         wr32(hw, NGBE_GPIE, gpie);
2369
2370         /* Populate the IVAR table and set the ITR values to the
2371          * corresponding register.
2372          */
2373         if (rte_intr_dp_is_en(intr_handle)) {
2374                 for (queue_id = 0; queue_id < dev->data->nb_rx_queues;
2375                         queue_id++) {
2376                         /* by default, 1:1 mapping */
2377                         ngbe_set_ivar_map(hw, 0, queue_id, vec);
2378                         rte_intr_vec_list_index_set(intr_handle,
2379                                                            queue_id, vec);
2380                         if (vec < base + rte_intr_nb_efd_get(intr_handle)
2381                             - 1)
2382                                 vec++;
2383                 }
2384
2385                 ngbe_set_ivar_map(hw, -1, 1, NGBE_MISC_VEC_ID);
2386         }
2387         wr32(hw, NGBE_ITR(NGBE_MISC_VEC_ID),
2388                         NGBE_ITR_IVAL_1G(NGBE_QUEUE_ITR_INTERVAL_DEFAULT)
2389                         | NGBE_ITR_WRDSA);
2390 }
2391
2392 static u8 *
2393 ngbe_dev_addr_list_itr(__rte_unused struct ngbe_hw *hw,
2394                         u8 **mc_addr_ptr, u32 *vmdq)
2395 {
2396         u8 *mc_addr;
2397
2398         *vmdq = 0;
2399         mc_addr = *mc_addr_ptr;
2400         *mc_addr_ptr = (mc_addr + sizeof(struct rte_ether_addr));
2401         return mc_addr;
2402 }
2403
2404 int
2405 ngbe_dev_set_mc_addr_list(struct rte_eth_dev *dev,
2406                           struct rte_ether_addr *mc_addr_set,
2407                           uint32_t nb_mc_addr)
2408 {
2409         struct ngbe_hw *hw = ngbe_dev_hw(dev);
2410         u8 *mc_addr_list;
2411
2412         mc_addr_list = (u8 *)mc_addr_set;
2413         return hw->mac.update_mc_addr_list(hw, mc_addr_list, nb_mc_addr,
2414                                          ngbe_dev_addr_list_itr, TRUE);
2415 }
2416
2417 static const struct eth_dev_ops ngbe_eth_dev_ops = {
2418         .dev_configure              = ngbe_dev_configure,
2419         .dev_infos_get              = ngbe_dev_info_get,
2420         .dev_start                  = ngbe_dev_start,
2421         .dev_stop                   = ngbe_dev_stop,
2422         .dev_close                  = ngbe_dev_close,
2423         .dev_reset                  = ngbe_dev_reset,
2424         .promiscuous_enable         = ngbe_dev_promiscuous_enable,
2425         .promiscuous_disable        = ngbe_dev_promiscuous_disable,
2426         .allmulticast_enable        = ngbe_dev_allmulticast_enable,
2427         .allmulticast_disable       = ngbe_dev_allmulticast_disable,
2428         .link_update                = ngbe_dev_link_update,
2429         .stats_get                  = ngbe_dev_stats_get,
2430         .xstats_get                 = ngbe_dev_xstats_get,
2431         .xstats_get_by_id           = ngbe_dev_xstats_get_by_id,
2432         .stats_reset                = ngbe_dev_stats_reset,
2433         .xstats_reset               = ngbe_dev_xstats_reset,
2434         .xstats_get_names           = ngbe_dev_xstats_get_names,
2435         .xstats_get_names_by_id     = ngbe_dev_xstats_get_names_by_id,
2436         .fw_version_get             = ngbe_fw_version_get,
2437         .dev_supported_ptypes_get   = ngbe_dev_supported_ptypes_get,
2438         .mtu_set                    = ngbe_dev_mtu_set,
2439         .vlan_filter_set            = ngbe_vlan_filter_set,
2440         .vlan_tpid_set              = ngbe_vlan_tpid_set,
2441         .vlan_offload_set           = ngbe_vlan_offload_set,
2442         .vlan_strip_queue_set       = ngbe_vlan_strip_queue_set,
2443         .rx_queue_start             = ngbe_dev_rx_queue_start,
2444         .rx_queue_stop              = ngbe_dev_rx_queue_stop,
2445         .tx_queue_start             = ngbe_dev_tx_queue_start,
2446         .tx_queue_stop              = ngbe_dev_tx_queue_stop,
2447         .rx_queue_setup             = ngbe_dev_rx_queue_setup,
2448         .rx_queue_release           = ngbe_dev_rx_queue_release,
2449         .tx_queue_setup             = ngbe_dev_tx_queue_setup,
2450         .tx_queue_release           = ngbe_dev_tx_queue_release,
2451         .mac_addr_add               = ngbe_add_rar,
2452         .mac_addr_remove            = ngbe_remove_rar,
2453         .mac_addr_set               = ngbe_set_default_mac_addr,
2454         .uc_hash_table_set          = ngbe_uc_hash_table_set,
2455         .uc_all_hash_table_set      = ngbe_uc_all_hash_table_set,
2456         .set_mc_addr_list           = ngbe_dev_set_mc_addr_list,
2457         .rx_burst_mode_get          = ngbe_rx_burst_mode_get,
2458         .tx_burst_mode_get          = ngbe_tx_burst_mode_get,
2459 };
2460
2461 RTE_PMD_REGISTER_PCI(net_ngbe, rte_ngbe_pmd);
2462 RTE_PMD_REGISTER_PCI_TABLE(net_ngbe, pci_id_ngbe_map);
2463 RTE_PMD_REGISTER_KMOD_DEP(net_ngbe, "* igb_uio | uio_pci_generic | vfio-pci");
2464
2465 RTE_LOG_REGISTER_SUFFIX(ngbe_logtype_init, init, NOTICE);
2466 RTE_LOG_REGISTER_SUFFIX(ngbe_logtype_driver, driver, NOTICE);
2467
2468 #ifdef RTE_ETHDEV_DEBUG_RX
2469         RTE_LOG_REGISTER_SUFFIX(ngbe_logtype_rx, rx, DEBUG);
2470 #endif
2471 #ifdef RTE_ETHDEV_DEBUG_TX
2472         RTE_LOG_REGISTER_SUFFIX(ngbe_logtype_tx, tx, DEBUG);
2473 #endif