999bf27e2edbf9f5ed84e434f9e6a42303ee966d
[dpdk.git] / drivers / net / txgbe / txgbe_ethdev.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2015-2020
3  */
4
5 #include <stdio.h>
6 #include <errno.h>
7 #include <stdint.h>
8 #include <string.h>
9 #include <rte_common.h>
10 #include <rte_ethdev_pci.h>
11
12 #include <rte_interrupts.h>
13 #include <rte_log.h>
14 #include <rte_debug.h>
15 #include <rte_pci.h>
16 #include <rte_memory.h>
17 #include <rte_eal.h>
18 #include <rte_alarm.h>
19
20 #include "txgbe_logs.h"
21 #include "base/txgbe.h"
22 #include "txgbe_ethdev.h"
23 #include "txgbe_rxtx.h"
24
25 static int  txgbe_dev_set_link_up(struct rte_eth_dev *dev);
26 static int  txgbe_dev_set_link_down(struct rte_eth_dev *dev);
27 static int txgbe_dev_close(struct rte_eth_dev *dev);
28 static int txgbe_dev_link_update(struct rte_eth_dev *dev,
29                                 int wait_to_complete);
30 static int txgbe_dev_stats_reset(struct rte_eth_dev *dev);
31 static void txgbe_vlan_hw_strip_enable(struct rte_eth_dev *dev, uint16_t queue);
32 static void txgbe_vlan_hw_strip_disable(struct rte_eth_dev *dev,
33                                         uint16_t queue);
34
35 static void txgbe_dev_link_status_print(struct rte_eth_dev *dev);
36 static int txgbe_dev_lsc_interrupt_setup(struct rte_eth_dev *dev, uint8_t on);
37 static int txgbe_dev_macsec_interrupt_setup(struct rte_eth_dev *dev);
38 static int txgbe_dev_rxq_interrupt_setup(struct rte_eth_dev *dev);
39 static int txgbe_dev_interrupt_get_status(struct rte_eth_dev *dev);
40 static int txgbe_dev_interrupt_action(struct rte_eth_dev *dev,
41                                       struct rte_intr_handle *handle);
42 static void txgbe_dev_interrupt_handler(void *param);
43 static void txgbe_dev_interrupt_delayed_handler(void *param);
44 static void txgbe_configure_msix(struct rte_eth_dev *dev);
45
46 #define TXGBE_SET_HWSTRIP(h, q) do {\
47                 uint32_t idx = (q) / (sizeof((h)->bitmap[0]) * NBBY); \
48                 uint32_t bit = (q) % (sizeof((h)->bitmap[0]) * NBBY); \
49                 (h)->bitmap[idx] |= 1 << bit;\
50         } while (0)
51
52 #define TXGBE_CLEAR_HWSTRIP(h, q) do {\
53                 uint32_t idx = (q) / (sizeof((h)->bitmap[0]) * NBBY); \
54                 uint32_t bit = (q) % (sizeof((h)->bitmap[0]) * NBBY); \
55                 (h)->bitmap[idx] &= ~(1 << bit);\
56         } while (0)
57
58 #define TXGBE_GET_HWSTRIP(h, q, r) do {\
59                 uint32_t idx = (q) / (sizeof((h)->bitmap[0]) * NBBY); \
60                 uint32_t bit = (q) % (sizeof((h)->bitmap[0]) * NBBY); \
61                 (r) = (h)->bitmap[idx] >> bit & 1;\
62         } while (0)
63
64 /*
65  * The set of PCI devices this driver supports
66  */
67 static const struct rte_pci_id pci_id_txgbe_map[] = {
68         { RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, TXGBE_DEV_ID_RAPTOR_SFP) },
69         { RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, TXGBE_DEV_ID_WX1820_SFP) },
70         { .vendor_id = 0, /* sentinel */ },
71 };
72
73 static const struct rte_eth_desc_lim rx_desc_lim = {
74         .nb_max = TXGBE_RING_DESC_MAX,
75         .nb_min = TXGBE_RING_DESC_MIN,
76         .nb_align = TXGBE_RXD_ALIGN,
77 };
78
79 static const struct rte_eth_desc_lim tx_desc_lim = {
80         .nb_max = TXGBE_RING_DESC_MAX,
81         .nb_min = TXGBE_RING_DESC_MIN,
82         .nb_align = TXGBE_TXD_ALIGN,
83         .nb_seg_max = TXGBE_TX_MAX_SEG,
84         .nb_mtu_seg_max = TXGBE_TX_MAX_SEG,
85 };
86
87 static const struct eth_dev_ops txgbe_eth_dev_ops;
88
89 #define HW_XSTAT(m) {#m, offsetof(struct txgbe_hw_stats, m)}
90 #define HW_XSTAT_NAME(m, n) {n, offsetof(struct txgbe_hw_stats, m)}
91 static const struct rte_txgbe_xstats_name_off rte_txgbe_stats_strings[] = {
92         /* MNG RxTx */
93         HW_XSTAT(mng_bmc2host_packets),
94         HW_XSTAT(mng_host2bmc_packets),
95         /* Basic RxTx */
96         HW_XSTAT(rx_packets),
97         HW_XSTAT(tx_packets),
98         HW_XSTAT(rx_bytes),
99         HW_XSTAT(tx_bytes),
100         HW_XSTAT(rx_total_bytes),
101         HW_XSTAT(rx_total_packets),
102         HW_XSTAT(tx_total_packets),
103         HW_XSTAT(rx_total_missed_packets),
104         HW_XSTAT(rx_broadcast_packets),
105         HW_XSTAT(rx_multicast_packets),
106         HW_XSTAT(rx_management_packets),
107         HW_XSTAT(tx_management_packets),
108         HW_XSTAT(rx_management_dropped),
109
110         /* Basic Error */
111         HW_XSTAT(rx_crc_errors),
112         HW_XSTAT(rx_illegal_byte_errors),
113         HW_XSTAT(rx_error_bytes),
114         HW_XSTAT(rx_mac_short_packet_dropped),
115         HW_XSTAT(rx_length_errors),
116         HW_XSTAT(rx_undersize_errors),
117         HW_XSTAT(rx_fragment_errors),
118         HW_XSTAT(rx_oversize_errors),
119         HW_XSTAT(rx_jabber_errors),
120         HW_XSTAT(rx_l3_l4_xsum_error),
121         HW_XSTAT(mac_local_errors),
122         HW_XSTAT(mac_remote_errors),
123
124         /* Flow Director */
125         HW_XSTAT(flow_director_added_filters),
126         HW_XSTAT(flow_director_removed_filters),
127         HW_XSTAT(flow_director_filter_add_errors),
128         HW_XSTAT(flow_director_filter_remove_errors),
129         HW_XSTAT(flow_director_matched_filters),
130         HW_XSTAT(flow_director_missed_filters),
131
132         /* FCoE */
133         HW_XSTAT(rx_fcoe_crc_errors),
134         HW_XSTAT(rx_fcoe_mbuf_allocation_errors),
135         HW_XSTAT(rx_fcoe_dropped),
136         HW_XSTAT(rx_fcoe_packets),
137         HW_XSTAT(tx_fcoe_packets),
138         HW_XSTAT(rx_fcoe_bytes),
139         HW_XSTAT(tx_fcoe_bytes),
140         HW_XSTAT(rx_fcoe_no_ddp),
141         HW_XSTAT(rx_fcoe_no_ddp_ext_buff),
142
143         /* MACSEC */
144         HW_XSTAT(tx_macsec_pkts_untagged),
145         HW_XSTAT(tx_macsec_pkts_encrypted),
146         HW_XSTAT(tx_macsec_pkts_protected),
147         HW_XSTAT(tx_macsec_octets_encrypted),
148         HW_XSTAT(tx_macsec_octets_protected),
149         HW_XSTAT(rx_macsec_pkts_untagged),
150         HW_XSTAT(rx_macsec_pkts_badtag),
151         HW_XSTAT(rx_macsec_pkts_nosci),
152         HW_XSTAT(rx_macsec_pkts_unknownsci),
153         HW_XSTAT(rx_macsec_octets_decrypted),
154         HW_XSTAT(rx_macsec_octets_validated),
155         HW_XSTAT(rx_macsec_sc_pkts_unchecked),
156         HW_XSTAT(rx_macsec_sc_pkts_delayed),
157         HW_XSTAT(rx_macsec_sc_pkts_late),
158         HW_XSTAT(rx_macsec_sa_pkts_ok),
159         HW_XSTAT(rx_macsec_sa_pkts_invalid),
160         HW_XSTAT(rx_macsec_sa_pkts_notvalid),
161         HW_XSTAT(rx_macsec_sa_pkts_unusedsa),
162         HW_XSTAT(rx_macsec_sa_pkts_notusingsa),
163
164         /* MAC RxTx */
165         HW_XSTAT(rx_size_64_packets),
166         HW_XSTAT(rx_size_65_to_127_packets),
167         HW_XSTAT(rx_size_128_to_255_packets),
168         HW_XSTAT(rx_size_256_to_511_packets),
169         HW_XSTAT(rx_size_512_to_1023_packets),
170         HW_XSTAT(rx_size_1024_to_max_packets),
171         HW_XSTAT(tx_size_64_packets),
172         HW_XSTAT(tx_size_65_to_127_packets),
173         HW_XSTAT(tx_size_128_to_255_packets),
174         HW_XSTAT(tx_size_256_to_511_packets),
175         HW_XSTAT(tx_size_512_to_1023_packets),
176         HW_XSTAT(tx_size_1024_to_max_packets),
177
178         /* Flow Control */
179         HW_XSTAT(tx_xon_packets),
180         HW_XSTAT(rx_xon_packets),
181         HW_XSTAT(tx_xoff_packets),
182         HW_XSTAT(rx_xoff_packets),
183
184         HW_XSTAT_NAME(tx_xon_packets, "tx_flow_control_xon_packets"),
185         HW_XSTAT_NAME(rx_xon_packets, "rx_flow_control_xon_packets"),
186         HW_XSTAT_NAME(tx_xoff_packets, "tx_flow_control_xoff_packets"),
187         HW_XSTAT_NAME(rx_xoff_packets, "rx_flow_control_xoff_packets"),
188 };
189
190 #define TXGBE_NB_HW_STATS (sizeof(rte_txgbe_stats_strings) / \
191                            sizeof(rte_txgbe_stats_strings[0]))
192
193 /* Per-priority statistics */
194 #define UP_XSTAT(m) {#m, offsetof(struct txgbe_hw_stats, up[0].m)}
195 static const struct rte_txgbe_xstats_name_off rte_txgbe_up_strings[] = {
196         UP_XSTAT(rx_up_packets),
197         UP_XSTAT(tx_up_packets),
198         UP_XSTAT(rx_up_bytes),
199         UP_XSTAT(tx_up_bytes),
200         UP_XSTAT(rx_up_drop_packets),
201
202         UP_XSTAT(tx_up_xon_packets),
203         UP_XSTAT(rx_up_xon_packets),
204         UP_XSTAT(tx_up_xoff_packets),
205         UP_XSTAT(rx_up_xoff_packets),
206         UP_XSTAT(rx_up_dropped),
207         UP_XSTAT(rx_up_mbuf_alloc_errors),
208         UP_XSTAT(tx_up_xon2off_packets),
209 };
210
211 #define TXGBE_NB_UP_STATS (sizeof(rte_txgbe_up_strings) / \
212                            sizeof(rte_txgbe_up_strings[0]))
213
214 /* Per-queue statistics */
215 #define QP_XSTAT(m) {#m, offsetof(struct txgbe_hw_stats, qp[0].m)}
216 static const struct rte_txgbe_xstats_name_off rte_txgbe_qp_strings[] = {
217         QP_XSTAT(rx_qp_packets),
218         QP_XSTAT(tx_qp_packets),
219         QP_XSTAT(rx_qp_bytes),
220         QP_XSTAT(tx_qp_bytes),
221         QP_XSTAT(rx_qp_mc_packets),
222 };
223
224 #define TXGBE_NB_QP_STATS (sizeof(rte_txgbe_qp_strings) / \
225                            sizeof(rte_txgbe_qp_strings[0]))
226
227 static inline int
228 txgbe_is_sfp(struct txgbe_hw *hw)
229 {
230         switch (hw->phy.type) {
231         case txgbe_phy_sfp_avago:
232         case txgbe_phy_sfp_ftl:
233         case txgbe_phy_sfp_intel:
234         case txgbe_phy_sfp_unknown:
235         case txgbe_phy_sfp_tyco_passive:
236         case txgbe_phy_sfp_unknown_passive:
237                 return 1;
238         default:
239                 return 0;
240         }
241 }
242
243 static inline int32_t
244 txgbe_pf_reset_hw(struct txgbe_hw *hw)
245 {
246         uint32_t ctrl_ext;
247         int32_t status;
248
249         status = hw->mac.reset_hw(hw);
250
251         ctrl_ext = rd32(hw, TXGBE_PORTCTL);
252         /* Set PF Reset Done bit so PF/VF Mail Ops can work */
253         ctrl_ext |= TXGBE_PORTCTL_RSTDONE;
254         wr32(hw, TXGBE_PORTCTL, ctrl_ext);
255         txgbe_flush(hw);
256
257         if (status == TXGBE_ERR_SFP_NOT_PRESENT)
258                 status = 0;
259         return status;
260 }
261
262 static inline void
263 txgbe_enable_intr(struct rte_eth_dev *dev)
264 {
265         struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev);
266         struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
267
268         wr32(hw, TXGBE_IENMISC, intr->mask_misc);
269         wr32(hw, TXGBE_IMC(0), TXGBE_IMC_MASK);
270         wr32(hw, TXGBE_IMC(1), TXGBE_IMC_MASK);
271         txgbe_flush(hw);
272 }
273
274 static void
275 txgbe_disable_intr(struct txgbe_hw *hw)
276 {
277         PMD_INIT_FUNC_TRACE();
278
279         wr32(hw, TXGBE_IENMISC, ~BIT_MASK32);
280         wr32(hw, TXGBE_IMS(0), TXGBE_IMC_MASK);
281         wr32(hw, TXGBE_IMS(1), TXGBE_IMC_MASK);
282         txgbe_flush(hw);
283 }
284
285 static int
286 txgbe_dev_queue_stats_mapping_set(struct rte_eth_dev *eth_dev,
287                                   uint16_t queue_id,
288                                   uint8_t stat_idx,
289                                   uint8_t is_rx)
290 {
291         struct txgbe_hw *hw = TXGBE_DEV_HW(eth_dev);
292         struct txgbe_stat_mappings *stat_mappings =
293                 TXGBE_DEV_STAT_MAPPINGS(eth_dev);
294         uint32_t qsmr_mask = 0;
295         uint32_t clearing_mask = QMAP_FIELD_RESERVED_BITS_MASK;
296         uint32_t q_map;
297         uint8_t n, offset;
298
299         if (hw->mac.type != txgbe_mac_raptor)
300                 return -ENOSYS;
301
302         if (stat_idx & !QMAP_FIELD_RESERVED_BITS_MASK)
303                 return -EIO;
304
305         PMD_INIT_LOG(DEBUG, "Setting port %d, %s queue_id %d to stat index %d",
306                      (int)(eth_dev->data->port_id), is_rx ? "RX" : "TX",
307                      queue_id, stat_idx);
308
309         n = (uint8_t)(queue_id / NB_QMAP_FIELDS_PER_QSM_REG);
310         if (n >= TXGBE_NB_STAT_MAPPING) {
311                 PMD_INIT_LOG(ERR, "Nb of stat mapping registers exceeded");
312                 return -EIO;
313         }
314         offset = (uint8_t)(queue_id % NB_QMAP_FIELDS_PER_QSM_REG);
315
316         /* Now clear any previous stat_idx set */
317         clearing_mask <<= (QSM_REG_NB_BITS_PER_QMAP_FIELD * offset);
318         if (!is_rx)
319                 stat_mappings->tqsm[n] &= ~clearing_mask;
320         else
321                 stat_mappings->rqsm[n] &= ~clearing_mask;
322
323         q_map = (uint32_t)stat_idx;
324         q_map &= QMAP_FIELD_RESERVED_BITS_MASK;
325         qsmr_mask = q_map << (QSM_REG_NB_BITS_PER_QMAP_FIELD * offset);
326         if (!is_rx)
327                 stat_mappings->tqsm[n] |= qsmr_mask;
328         else
329                 stat_mappings->rqsm[n] |= qsmr_mask;
330
331         PMD_INIT_LOG(DEBUG, "Set port %d, %s queue_id %d to stat index %d",
332                      (int)(eth_dev->data->port_id), is_rx ? "RX" : "TX",
333                      queue_id, stat_idx);
334         PMD_INIT_LOG(DEBUG, "%s[%d] = 0x%08x", is_rx ? "RQSMR" : "TQSM", n,
335                      is_rx ? stat_mappings->rqsm[n] : stat_mappings->tqsm[n]);
336         return 0;
337 }
338
339 /*
340  * Ensure that all locks are released before first NVM or PHY access
341  */
342 static void
343 txgbe_swfw_lock_reset(struct txgbe_hw *hw)
344 {
345         uint16_t mask;
346
347         /*
348          * These ones are more tricky since they are common to all ports; but
349          * swfw_sync retries last long enough (1s) to be almost sure that if
350          * lock can not be taken it is due to an improper lock of the
351          * semaphore.
352          */
353         mask = TXGBE_MNGSEM_SWPHY |
354                TXGBE_MNGSEM_SWMBX |
355                TXGBE_MNGSEM_SWFLASH;
356         if (hw->mac.acquire_swfw_sync(hw, mask) < 0)
357                 PMD_DRV_LOG(DEBUG, "SWFW common locks released");
358
359         hw->mac.release_swfw_sync(hw, mask);
360 }
361
362 static int
363 eth_txgbe_dev_init(struct rte_eth_dev *eth_dev, void *init_params __rte_unused)
364 {
365         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
366         struct txgbe_hw *hw = TXGBE_DEV_HW(eth_dev);
367         struct txgbe_vfta *shadow_vfta = TXGBE_DEV_VFTA(eth_dev);
368         struct txgbe_hwstrip *hwstrip = TXGBE_DEV_HWSTRIP(eth_dev);
369         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
370         const struct rte_memzone *mz;
371         uint16_t csum;
372         int err;
373
374         PMD_INIT_FUNC_TRACE();
375
376         eth_dev->dev_ops = &txgbe_eth_dev_ops;
377         eth_dev->rx_pkt_burst = &txgbe_recv_pkts;
378         eth_dev->tx_pkt_burst = &txgbe_xmit_pkts;
379         eth_dev->tx_pkt_prepare = &txgbe_prep_pkts;
380
381         /*
382          * For secondary processes, we don't initialise any further as primary
383          * has already done this work. Only check we don't need a different
384          * RX and TX function.
385          */
386         if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
387                 struct txgbe_tx_queue *txq;
388                 /* TX queue function in primary, set by last queue initialized
389                  * Tx queue may not initialized by primary process
390                  */
391                 if (eth_dev->data->tx_queues) {
392                         uint16_t nb_tx_queues = eth_dev->data->nb_tx_queues;
393                         txq = eth_dev->data->tx_queues[nb_tx_queues - 1];
394                         txgbe_set_tx_function(eth_dev, txq);
395                 } else {
396                         /* Use default TX function if we get here */
397                         PMD_INIT_LOG(NOTICE, "No TX queues configured yet. "
398                                      "Using default TX function.");
399                 }
400
401                 txgbe_set_rx_function(eth_dev);
402
403                 return 0;
404         }
405
406         rte_eth_copy_pci_info(eth_dev, pci_dev);
407
408         /* Vendor and Device ID need to be set before init of shared code */
409         hw->device_id = pci_dev->id.device_id;
410         hw->vendor_id = pci_dev->id.vendor_id;
411         hw->hw_addr = (void *)pci_dev->mem_resource[0].addr;
412         hw->allow_unsupported_sfp = 1;
413
414         /* Reserve memory for interrupt status block */
415         mz = rte_eth_dma_zone_reserve(eth_dev, "txgbe_driver", -1,
416                 16, TXGBE_ALIGN, SOCKET_ID_ANY);
417         if (mz == NULL)
418                 return -ENOMEM;
419
420         hw->isb_dma = TMZ_PADDR(mz);
421         hw->isb_mem = TMZ_VADDR(mz);
422
423         /* Initialize the shared code (base driver) */
424         err = txgbe_init_shared_code(hw);
425         if (err != 0) {
426                 PMD_INIT_LOG(ERR, "Shared code init failed: %d", err);
427                 return -EIO;
428         }
429
430         /* Unlock any pending hardware semaphore */
431         txgbe_swfw_lock_reset(hw);
432
433         err = hw->rom.init_params(hw);
434         if (err != 0) {
435                 PMD_INIT_LOG(ERR, "The EEPROM init failed: %d", err);
436                 return -EIO;
437         }
438
439         /* Make sure we have a good EEPROM before we read from it */
440         err = hw->rom.validate_checksum(hw, &csum);
441         if (err != 0) {
442                 PMD_INIT_LOG(ERR, "The EEPROM checksum is not valid: %d", err);
443                 return -EIO;
444         }
445
446         err = hw->mac.init_hw(hw);
447
448         /*
449          * Devices with copper phys will fail to initialise if txgbe_init_hw()
450          * is called too soon after the kernel driver unbinding/binding occurs.
451          * The failure occurs in txgbe_identify_phy() for all devices,
452          * but for non-copper devies, txgbe_identify_sfp_module() is
453          * also called. See txgbe_identify_phy(). The reason for the
454          * failure is not known, and only occuts when virtualisation features
455          * are disabled in the bios. A delay of 200ms  was found to be enough by
456          * trial-and-error, and is doubled to be safe.
457          */
458         if (err && hw->phy.media_type == txgbe_media_type_copper) {
459                 rte_delay_ms(200);
460                 err = hw->mac.init_hw(hw);
461         }
462
463         if (err == TXGBE_ERR_SFP_NOT_PRESENT)
464                 err = 0;
465
466         if (err == TXGBE_ERR_EEPROM_VERSION) {
467                 PMD_INIT_LOG(ERR, "This device is a pre-production adapter/"
468                              "LOM.  Please be aware there may be issues associated "
469                              "with your hardware.");
470                 PMD_INIT_LOG(ERR, "If you are experiencing problems "
471                              "please contact your hardware representative "
472                              "who provided you with this hardware.");
473         } else if (err == TXGBE_ERR_SFP_NOT_SUPPORTED) {
474                 PMD_INIT_LOG(ERR, "Unsupported SFP+ Module");
475         }
476         if (err) {
477                 PMD_INIT_LOG(ERR, "Hardware Initialization Failure: %d", err);
478                 return -EIO;
479         }
480
481         /* Reset the hw statistics */
482         txgbe_dev_stats_reset(eth_dev);
483
484         /* disable interrupt */
485         txgbe_disable_intr(hw);
486
487         /* Allocate memory for storing MAC addresses */
488         eth_dev->data->mac_addrs = rte_zmalloc("txgbe", RTE_ETHER_ADDR_LEN *
489                                                hw->mac.num_rar_entries, 0);
490         if (eth_dev->data->mac_addrs == NULL) {
491                 PMD_INIT_LOG(ERR,
492                              "Failed to allocate %u bytes needed to store "
493                              "MAC addresses",
494                              RTE_ETHER_ADDR_LEN * hw->mac.num_rar_entries);
495                 return -ENOMEM;
496         }
497
498         /* Copy the permanent MAC address */
499         rte_ether_addr_copy((struct rte_ether_addr *)hw->mac.perm_addr,
500                         &eth_dev->data->mac_addrs[0]);
501
502         /* Allocate memory for storing hash filter MAC addresses */
503         eth_dev->data->hash_mac_addrs = rte_zmalloc("txgbe",
504                         RTE_ETHER_ADDR_LEN * TXGBE_VMDQ_NUM_UC_MAC, 0);
505         if (eth_dev->data->hash_mac_addrs == NULL) {
506                 PMD_INIT_LOG(ERR,
507                              "Failed to allocate %d bytes needed to store MAC addresses",
508                              RTE_ETHER_ADDR_LEN * TXGBE_VMDQ_NUM_UC_MAC);
509                 return -ENOMEM;
510         }
511
512         /* initialize the vfta */
513         memset(shadow_vfta, 0, sizeof(*shadow_vfta));
514
515         /* initialize the hw strip bitmap*/
516         memset(hwstrip, 0, sizeof(*hwstrip));
517
518         if (txgbe_is_sfp(hw) && hw->phy.sfp_type != txgbe_sfp_type_not_present)
519                 PMD_INIT_LOG(DEBUG, "MAC: %d, PHY: %d, SFP+: %d",
520                              (int)hw->mac.type, (int)hw->phy.type,
521                              (int)hw->phy.sfp_type);
522         else
523                 PMD_INIT_LOG(DEBUG, "MAC: %d, PHY: %d",
524                              (int)hw->mac.type, (int)hw->phy.type);
525
526         PMD_INIT_LOG(DEBUG, "port %d vendorID=0x%x deviceID=0x%x",
527                      eth_dev->data->port_id, pci_dev->id.vendor_id,
528                      pci_dev->id.device_id);
529
530         rte_intr_callback_register(intr_handle,
531                                    txgbe_dev_interrupt_handler, eth_dev);
532
533         /* enable uio/vfio intr/eventfd mapping */
534         rte_intr_enable(intr_handle);
535
536         /* enable support intr */
537         txgbe_enable_intr(eth_dev);
538
539         return 0;
540 }
541
542 static int
543 eth_txgbe_dev_uninit(struct rte_eth_dev *eth_dev)
544 {
545         PMD_INIT_FUNC_TRACE();
546
547         if (rte_eal_process_type() != RTE_PROC_PRIMARY)
548                 return 0;
549
550         txgbe_dev_close(eth_dev);
551
552         return 0;
553 }
554
555 static int
556 eth_txgbe_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
557                 struct rte_pci_device *pci_dev)
558 {
559         struct rte_eth_dev *pf_ethdev;
560         struct rte_eth_devargs eth_da;
561         int retval;
562
563         if (pci_dev->device.devargs) {
564                 retval = rte_eth_devargs_parse(pci_dev->device.devargs->args,
565                                 &eth_da);
566                 if (retval)
567                         return retval;
568         } else {
569                 memset(&eth_da, 0, sizeof(eth_da));
570         }
571
572         retval = rte_eth_dev_create(&pci_dev->device, pci_dev->device.name,
573                         sizeof(struct txgbe_adapter),
574                         eth_dev_pci_specific_init, pci_dev,
575                         eth_txgbe_dev_init, NULL);
576
577         if (retval || eth_da.nb_representor_ports < 1)
578                 return retval;
579
580         pf_ethdev = rte_eth_dev_allocated(pci_dev->device.name);
581         if (pf_ethdev == NULL)
582                 return -ENODEV;
583
584         return 0;
585 }
586
587 static int eth_txgbe_pci_remove(struct rte_pci_device *pci_dev)
588 {
589         struct rte_eth_dev *ethdev;
590
591         ethdev = rte_eth_dev_allocated(pci_dev->device.name);
592         if (!ethdev)
593                 return -ENODEV;
594
595         return rte_eth_dev_destroy(ethdev, eth_txgbe_dev_uninit);
596 }
597
598 static struct rte_pci_driver rte_txgbe_pmd = {
599         .id_table = pci_id_txgbe_map,
600         .drv_flags = RTE_PCI_DRV_NEED_MAPPING |
601                      RTE_PCI_DRV_INTR_LSC,
602         .probe = eth_txgbe_pci_probe,
603         .remove = eth_txgbe_pci_remove,
604 };
605
606 static int
607 txgbe_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
608 {
609         struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
610         struct txgbe_vfta *shadow_vfta = TXGBE_DEV_VFTA(dev);
611         uint32_t vfta;
612         uint32_t vid_idx;
613         uint32_t vid_bit;
614
615         vid_idx = (uint32_t)((vlan_id >> 5) & 0x7F);
616         vid_bit = (uint32_t)(1 << (vlan_id & 0x1F));
617         vfta = rd32(hw, TXGBE_VLANTBL(vid_idx));
618         if (on)
619                 vfta |= vid_bit;
620         else
621                 vfta &= ~vid_bit;
622         wr32(hw, TXGBE_VLANTBL(vid_idx), vfta);
623
624         /* update local VFTA copy */
625         shadow_vfta->vfta[vid_idx] = vfta;
626
627         return 0;
628 }
629
630 static void
631 txgbe_vlan_strip_queue_set(struct rte_eth_dev *dev, uint16_t queue, int on)
632 {
633         struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
634         struct txgbe_rx_queue *rxq;
635         bool restart;
636         uint32_t rxcfg, rxbal, rxbah;
637
638         if (on)
639                 txgbe_vlan_hw_strip_enable(dev, queue);
640         else
641                 txgbe_vlan_hw_strip_disable(dev, queue);
642
643         rxq = dev->data->rx_queues[queue];
644         rxbal = rd32(hw, TXGBE_RXBAL(rxq->reg_idx));
645         rxbah = rd32(hw, TXGBE_RXBAH(rxq->reg_idx));
646         rxcfg = rd32(hw, TXGBE_RXCFG(rxq->reg_idx));
647         if (rxq->offloads & DEV_RX_OFFLOAD_VLAN_STRIP) {
648                 restart = (rxcfg & TXGBE_RXCFG_ENA) &&
649                         !(rxcfg & TXGBE_RXCFG_VLAN);
650                 rxcfg |= TXGBE_RXCFG_VLAN;
651         } else {
652                 restart = (rxcfg & TXGBE_RXCFG_ENA) &&
653                         (rxcfg & TXGBE_RXCFG_VLAN);
654                 rxcfg &= ~TXGBE_RXCFG_VLAN;
655         }
656         rxcfg &= ~TXGBE_RXCFG_ENA;
657
658         if (restart) {
659                 /* set vlan strip for ring */
660                 txgbe_dev_rx_queue_stop(dev, queue);
661                 wr32(hw, TXGBE_RXBAL(rxq->reg_idx), rxbal);
662                 wr32(hw, TXGBE_RXBAH(rxq->reg_idx), rxbah);
663                 wr32(hw, TXGBE_RXCFG(rxq->reg_idx), rxcfg);
664                 txgbe_dev_rx_queue_start(dev, queue);
665         }
666 }
667
668 static int
669 txgbe_vlan_tpid_set(struct rte_eth_dev *dev,
670                     enum rte_vlan_type vlan_type,
671                     uint16_t tpid)
672 {
673         struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
674         int ret = 0;
675         uint32_t portctrl, vlan_ext, qinq;
676
677         portctrl = rd32(hw, TXGBE_PORTCTL);
678
679         vlan_ext = (portctrl & TXGBE_PORTCTL_VLANEXT);
680         qinq = vlan_ext && (portctrl & TXGBE_PORTCTL_QINQ);
681         switch (vlan_type) {
682         case ETH_VLAN_TYPE_INNER:
683                 if (vlan_ext) {
684                         wr32m(hw, TXGBE_VLANCTL,
685                                 TXGBE_VLANCTL_TPID_MASK,
686                                 TXGBE_VLANCTL_TPID(tpid));
687                         wr32m(hw, TXGBE_DMATXCTRL,
688                                 TXGBE_DMATXCTRL_TPID_MASK,
689                                 TXGBE_DMATXCTRL_TPID(tpid));
690                 } else {
691                         ret = -ENOTSUP;
692                         PMD_DRV_LOG(ERR, "Inner type is not supported"
693                                     " by single VLAN");
694                 }
695
696                 if (qinq) {
697                         wr32m(hw, TXGBE_TAGTPID(0),
698                                 TXGBE_TAGTPID_LSB_MASK,
699                                 TXGBE_TAGTPID_LSB(tpid));
700                 }
701                 break;
702         case ETH_VLAN_TYPE_OUTER:
703                 if (vlan_ext) {
704                         /* Only the high 16-bits is valid */
705                         wr32m(hw, TXGBE_EXTAG,
706                                 TXGBE_EXTAG_VLAN_MASK,
707                                 TXGBE_EXTAG_VLAN(tpid));
708                 } else {
709                         wr32m(hw, TXGBE_VLANCTL,
710                                 TXGBE_VLANCTL_TPID_MASK,
711                                 TXGBE_VLANCTL_TPID(tpid));
712                         wr32m(hw, TXGBE_DMATXCTRL,
713                                 TXGBE_DMATXCTRL_TPID_MASK,
714                                 TXGBE_DMATXCTRL_TPID(tpid));
715                 }
716
717                 if (qinq) {
718                         wr32m(hw, TXGBE_TAGTPID(0),
719                                 TXGBE_TAGTPID_MSB_MASK,
720                                 TXGBE_TAGTPID_MSB(tpid));
721                 }
722                 break;
723         default:
724                 PMD_DRV_LOG(ERR, "Unsupported VLAN type %d", vlan_type);
725                 return -EINVAL;
726         }
727
728         return ret;
729 }
730
731 void
732 txgbe_vlan_hw_filter_disable(struct rte_eth_dev *dev)
733 {
734         struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
735         uint32_t vlnctrl;
736
737         PMD_INIT_FUNC_TRACE();
738
739         /* Filter Table Disable */
740         vlnctrl = rd32(hw, TXGBE_VLANCTL);
741         vlnctrl &= ~TXGBE_VLANCTL_VFE;
742         wr32(hw, TXGBE_VLANCTL, vlnctrl);
743 }
744
745 void
746 txgbe_vlan_hw_filter_enable(struct rte_eth_dev *dev)
747 {
748         struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
749         struct txgbe_vfta *shadow_vfta = TXGBE_DEV_VFTA(dev);
750         uint32_t vlnctrl;
751         uint16_t i;
752
753         PMD_INIT_FUNC_TRACE();
754
755         /* Filter Table Enable */
756         vlnctrl = rd32(hw, TXGBE_VLANCTL);
757         vlnctrl &= ~TXGBE_VLANCTL_CFIENA;
758         vlnctrl |= TXGBE_VLANCTL_VFE;
759         wr32(hw, TXGBE_VLANCTL, vlnctrl);
760
761         /* write whatever is in local vfta copy */
762         for (i = 0; i < TXGBE_VFTA_SIZE; i++)
763                 wr32(hw, TXGBE_VLANTBL(i), shadow_vfta->vfta[i]);
764 }
765
766 void
767 txgbe_vlan_hw_strip_bitmap_set(struct rte_eth_dev *dev, uint16_t queue, bool on)
768 {
769         struct txgbe_hwstrip *hwstrip = TXGBE_DEV_HWSTRIP(dev);
770         struct txgbe_rx_queue *rxq;
771
772         if (queue >= TXGBE_MAX_RX_QUEUE_NUM)
773                 return;
774
775         if (on)
776                 TXGBE_SET_HWSTRIP(hwstrip, queue);
777         else
778                 TXGBE_CLEAR_HWSTRIP(hwstrip, queue);
779
780         if (queue >= dev->data->nb_rx_queues)
781                 return;
782
783         rxq = dev->data->rx_queues[queue];
784
785         if (on) {
786                 rxq->vlan_flags = PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED;
787                 rxq->offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
788         } else {
789                 rxq->vlan_flags = PKT_RX_VLAN;
790                 rxq->offloads &= ~DEV_RX_OFFLOAD_VLAN_STRIP;
791         }
792 }
793
794 static void
795 txgbe_vlan_hw_strip_disable(struct rte_eth_dev *dev, uint16_t queue)
796 {
797         struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
798         uint32_t ctrl;
799
800         PMD_INIT_FUNC_TRACE();
801
802         ctrl = rd32(hw, TXGBE_RXCFG(queue));
803         ctrl &= ~TXGBE_RXCFG_VLAN;
804         wr32(hw, TXGBE_RXCFG(queue), ctrl);
805
806         /* record those setting for HW strip per queue */
807         txgbe_vlan_hw_strip_bitmap_set(dev, queue, 0);
808 }
809
810 static void
811 txgbe_vlan_hw_strip_enable(struct rte_eth_dev *dev, uint16_t queue)
812 {
813         struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
814         uint32_t ctrl;
815
816         PMD_INIT_FUNC_TRACE();
817
818         ctrl = rd32(hw, TXGBE_RXCFG(queue));
819         ctrl |= TXGBE_RXCFG_VLAN;
820         wr32(hw, TXGBE_RXCFG(queue), ctrl);
821
822         /* record those setting for HW strip per queue */
823         txgbe_vlan_hw_strip_bitmap_set(dev, queue, 1);
824 }
825
826 static void
827 txgbe_vlan_hw_extend_disable(struct rte_eth_dev *dev)
828 {
829         struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
830         uint32_t ctrl;
831
832         PMD_INIT_FUNC_TRACE();
833
834         ctrl = rd32(hw, TXGBE_PORTCTL);
835         ctrl &= ~TXGBE_PORTCTL_VLANEXT;
836         ctrl &= ~TXGBE_PORTCTL_QINQ;
837         wr32(hw, TXGBE_PORTCTL, ctrl);
838 }
839
840 static void
841 txgbe_vlan_hw_extend_enable(struct rte_eth_dev *dev)
842 {
843         struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
844         struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode;
845         struct rte_eth_txmode *txmode = &dev->data->dev_conf.txmode;
846         uint32_t ctrl;
847
848         PMD_INIT_FUNC_TRACE();
849
850         ctrl  = rd32(hw, TXGBE_PORTCTL);
851         ctrl |= TXGBE_PORTCTL_VLANEXT;
852         if (rxmode->offloads & DEV_RX_OFFLOAD_QINQ_STRIP ||
853             txmode->offloads & DEV_TX_OFFLOAD_QINQ_INSERT)
854                 ctrl |= TXGBE_PORTCTL_QINQ;
855         wr32(hw, TXGBE_PORTCTL, ctrl);
856 }
857
858 void
859 txgbe_vlan_hw_strip_config(struct rte_eth_dev *dev)
860 {
861         struct txgbe_rx_queue *rxq;
862         uint16_t i;
863
864         PMD_INIT_FUNC_TRACE();
865
866         for (i = 0; i < dev->data->nb_rx_queues; i++) {
867                 rxq = dev->data->rx_queues[i];
868
869                 if (rxq->offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
870                         txgbe_vlan_strip_queue_set(dev, i, 1);
871                 else
872                         txgbe_vlan_strip_queue_set(dev, i, 0);
873         }
874 }
875
876 void
877 txgbe_config_vlan_strip_on_all_queues(struct rte_eth_dev *dev, int mask)
878 {
879         uint16_t i;
880         struct rte_eth_rxmode *rxmode;
881         struct txgbe_rx_queue *rxq;
882
883         if (mask & ETH_VLAN_STRIP_MASK) {
884                 rxmode = &dev->data->dev_conf.rxmode;
885                 if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
886                         for (i = 0; i < dev->data->nb_rx_queues; i++) {
887                                 rxq = dev->data->rx_queues[i];
888                                 rxq->offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
889                         }
890                 else
891                         for (i = 0; i < dev->data->nb_rx_queues; i++) {
892                                 rxq = dev->data->rx_queues[i];
893                                 rxq->offloads &= ~DEV_RX_OFFLOAD_VLAN_STRIP;
894                         }
895         }
896 }
897
898 static int
899 txgbe_vlan_offload_config(struct rte_eth_dev *dev, int mask)
900 {
901         struct rte_eth_rxmode *rxmode;
902         rxmode = &dev->data->dev_conf.rxmode;
903
904         if (mask & ETH_VLAN_STRIP_MASK)
905                 txgbe_vlan_hw_strip_config(dev);
906
907         if (mask & ETH_VLAN_FILTER_MASK) {
908                 if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
909                         txgbe_vlan_hw_filter_enable(dev);
910                 else
911                         txgbe_vlan_hw_filter_disable(dev);
912         }
913
914         if (mask & ETH_VLAN_EXTEND_MASK) {
915                 if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_EXTEND)
916                         txgbe_vlan_hw_extend_enable(dev);
917                 else
918                         txgbe_vlan_hw_extend_disable(dev);
919         }
920
921         return 0;
922 }
923
924 static int
925 txgbe_vlan_offload_set(struct rte_eth_dev *dev, int mask)
926 {
927         txgbe_config_vlan_strip_on_all_queues(dev, mask);
928
929         txgbe_vlan_offload_config(dev, mask);
930
931         return 0;
932 }
933
934 static int
935 txgbe_check_vf_rss_rxq_num(struct rte_eth_dev *dev, uint16_t nb_rx_q)
936 {
937         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
938
939         switch (nb_rx_q) {
940         case 1:
941         case 2:
942                 RTE_ETH_DEV_SRIOV(dev).active = ETH_64_POOLS;
943                 break;
944         case 4:
945                 RTE_ETH_DEV_SRIOV(dev).active = ETH_32_POOLS;
946                 break;
947         default:
948                 return -EINVAL;
949         }
950
951         RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool =
952                 TXGBE_MAX_RX_QUEUE_NUM / RTE_ETH_DEV_SRIOV(dev).active;
953         RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx =
954                 pci_dev->max_vfs * RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool;
955         return 0;
956 }
957
958 static int
959 txgbe_check_mq_mode(struct rte_eth_dev *dev)
960 {
961         struct rte_eth_conf *dev_conf = &dev->data->dev_conf;
962         uint16_t nb_rx_q = dev->data->nb_rx_queues;
963         uint16_t nb_tx_q = dev->data->nb_tx_queues;
964
965         if (RTE_ETH_DEV_SRIOV(dev).active != 0) {
966                 /* check multi-queue mode */
967                 switch (dev_conf->rxmode.mq_mode) {
968                 case ETH_MQ_RX_VMDQ_DCB:
969                         PMD_INIT_LOG(INFO, "ETH_MQ_RX_VMDQ_DCB mode supported in SRIOV");
970                         break;
971                 case ETH_MQ_RX_VMDQ_DCB_RSS:
972                         /* DCB/RSS VMDQ in SRIOV mode, not implement yet */
973                         PMD_INIT_LOG(ERR, "SRIOV active,"
974                                         " unsupported mq_mode rx %d.",
975                                         dev_conf->rxmode.mq_mode);
976                         return -EINVAL;
977                 case ETH_MQ_RX_RSS:
978                 case ETH_MQ_RX_VMDQ_RSS:
979                         dev->data->dev_conf.rxmode.mq_mode = ETH_MQ_RX_VMDQ_RSS;
980                         if (nb_rx_q <= RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool)
981                                 if (txgbe_check_vf_rss_rxq_num(dev, nb_rx_q)) {
982                                         PMD_INIT_LOG(ERR, "SRIOV is active,"
983                                                 " invalid queue number"
984                                                 " for VMDQ RSS, allowed"
985                                                 " value are 1, 2 or 4.");
986                                         return -EINVAL;
987                                 }
988                         break;
989                 case ETH_MQ_RX_VMDQ_ONLY:
990                 case ETH_MQ_RX_NONE:
991                         /* if nothing mq mode configure, use default scheme */
992                         dev->data->dev_conf.rxmode.mq_mode =
993                                 ETH_MQ_RX_VMDQ_ONLY;
994                         break;
995                 default: /* ETH_MQ_RX_DCB, ETH_MQ_RX_DCB_RSS or ETH_MQ_TX_DCB*/
996                         /* SRIOV only works in VMDq enable mode */
997                         PMD_INIT_LOG(ERR, "SRIOV is active,"
998                                         " wrong mq_mode rx %d.",
999                                         dev_conf->rxmode.mq_mode);
1000                         return -EINVAL;
1001                 }
1002
1003                 switch (dev_conf->txmode.mq_mode) {
1004                 case ETH_MQ_TX_VMDQ_DCB:
1005                         PMD_INIT_LOG(INFO, "ETH_MQ_TX_VMDQ_DCB mode supported in SRIOV");
1006                         dev->data->dev_conf.txmode.mq_mode = ETH_MQ_TX_VMDQ_DCB;
1007                         break;
1008                 default: /* ETH_MQ_TX_VMDQ_ONLY or ETH_MQ_TX_NONE */
1009                         dev->data->dev_conf.txmode.mq_mode =
1010                                 ETH_MQ_TX_VMDQ_ONLY;
1011                         break;
1012                 }
1013
1014                 /* check valid queue number */
1015                 if ((nb_rx_q > RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool) ||
1016                     (nb_tx_q > RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool)) {
1017                         PMD_INIT_LOG(ERR, "SRIOV is active,"
1018                                         " nb_rx_q=%d nb_tx_q=%d queue number"
1019                                         " must be less than or equal to %d.",
1020                                         nb_rx_q, nb_tx_q,
1021                                         RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool);
1022                         return -EINVAL;
1023                 }
1024         } else {
1025                 if (dev_conf->rxmode.mq_mode == ETH_MQ_RX_VMDQ_DCB_RSS) {
1026                         PMD_INIT_LOG(ERR, "VMDQ+DCB+RSS mq_mode is"
1027                                           " not supported.");
1028                         return -EINVAL;
1029                 }
1030                 /* check configuration for vmdb+dcb mode */
1031                 if (dev_conf->rxmode.mq_mode == ETH_MQ_RX_VMDQ_DCB) {
1032                         const struct rte_eth_vmdq_dcb_conf *conf;
1033
1034                         if (nb_rx_q != TXGBE_VMDQ_DCB_NB_QUEUES) {
1035                                 PMD_INIT_LOG(ERR, "VMDQ+DCB, nb_rx_q != %d.",
1036                                                 TXGBE_VMDQ_DCB_NB_QUEUES);
1037                                 return -EINVAL;
1038                         }
1039                         conf = &dev_conf->rx_adv_conf.vmdq_dcb_conf;
1040                         if (!(conf->nb_queue_pools == ETH_16_POOLS ||
1041                                conf->nb_queue_pools == ETH_32_POOLS)) {
1042                                 PMD_INIT_LOG(ERR, "VMDQ+DCB selected,"
1043                                                 " nb_queue_pools must be %d or %d.",
1044                                                 ETH_16_POOLS, ETH_32_POOLS);
1045                                 return -EINVAL;
1046                         }
1047                 }
1048                 if (dev_conf->txmode.mq_mode == ETH_MQ_TX_VMDQ_DCB) {
1049                         const struct rte_eth_vmdq_dcb_tx_conf *conf;
1050
1051                         if (nb_tx_q != TXGBE_VMDQ_DCB_NB_QUEUES) {
1052                                 PMD_INIT_LOG(ERR, "VMDQ+DCB, nb_tx_q != %d",
1053                                                  TXGBE_VMDQ_DCB_NB_QUEUES);
1054                                 return -EINVAL;
1055                         }
1056                         conf = &dev_conf->tx_adv_conf.vmdq_dcb_tx_conf;
1057                         if (!(conf->nb_queue_pools == ETH_16_POOLS ||
1058                                conf->nb_queue_pools == ETH_32_POOLS)) {
1059                                 PMD_INIT_LOG(ERR, "VMDQ+DCB selected,"
1060                                                 " nb_queue_pools != %d and"
1061                                                 " nb_queue_pools != %d.",
1062                                                 ETH_16_POOLS, ETH_32_POOLS);
1063                                 return -EINVAL;
1064                         }
1065                 }
1066
1067                 /* For DCB mode check our configuration before we go further */
1068                 if (dev_conf->rxmode.mq_mode == ETH_MQ_RX_DCB) {
1069                         const struct rte_eth_dcb_rx_conf *conf;
1070
1071                         conf = &dev_conf->rx_adv_conf.dcb_rx_conf;
1072                         if (!(conf->nb_tcs == ETH_4_TCS ||
1073                                conf->nb_tcs == ETH_8_TCS)) {
1074                                 PMD_INIT_LOG(ERR, "DCB selected, nb_tcs != %d"
1075                                                 " and nb_tcs != %d.",
1076                                                 ETH_4_TCS, ETH_8_TCS);
1077                                 return -EINVAL;
1078                         }
1079                 }
1080
1081                 if (dev_conf->txmode.mq_mode == ETH_MQ_TX_DCB) {
1082                         const struct rte_eth_dcb_tx_conf *conf;
1083
1084                         conf = &dev_conf->tx_adv_conf.dcb_tx_conf;
1085                         if (!(conf->nb_tcs == ETH_4_TCS ||
1086                                conf->nb_tcs == ETH_8_TCS)) {
1087                                 PMD_INIT_LOG(ERR, "DCB selected, nb_tcs != %d"
1088                                                 " and nb_tcs != %d.",
1089                                                 ETH_4_TCS, ETH_8_TCS);
1090                                 return -EINVAL;
1091                         }
1092                 }
1093         }
1094         return 0;
1095 }
1096
1097 static int
1098 txgbe_dev_configure(struct rte_eth_dev *dev)
1099 {
1100         struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev);
1101         struct txgbe_adapter *adapter = TXGBE_DEV_ADAPTER(dev);
1102         int ret;
1103
1104         PMD_INIT_FUNC_TRACE();
1105
1106         if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG)
1107                 dev->data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_RSS_HASH;
1108
1109         /* multiple queue mode checking */
1110         ret  = txgbe_check_mq_mode(dev);
1111         if (ret != 0) {
1112                 PMD_DRV_LOG(ERR, "txgbe_check_mq_mode fails with %d.",
1113                             ret);
1114                 return ret;
1115         }
1116
1117         /* set flag to update link status after init */
1118         intr->flags |= TXGBE_FLAG_NEED_LINK_UPDATE;
1119
1120         /*
1121          * Initialize to TRUE. If any of Rx queues doesn't meet the bulk
1122          * allocation Rx preconditions we will reset it.
1123          */
1124         adapter->rx_bulk_alloc_allowed = true;
1125
1126         return 0;
1127 }
1128
1129 static void
1130 txgbe_dev_phy_intr_setup(struct rte_eth_dev *dev)
1131 {
1132         struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
1133         struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev);
1134         uint32_t gpie;
1135
1136         gpie = rd32(hw, TXGBE_GPIOINTEN);
1137         gpie |= TXGBE_GPIOBIT_6;
1138         wr32(hw, TXGBE_GPIOINTEN, gpie);
1139         intr->mask_misc |= TXGBE_ICRMISC_GPIO;
1140 }
1141
1142 /*
1143  * Configure device link speed and setup link.
1144  * It returns 0 on success.
1145  */
1146 static int
1147 txgbe_dev_start(struct rte_eth_dev *dev)
1148 {
1149         struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
1150         struct txgbe_hw_stats *hw_stats = TXGBE_DEV_STATS(dev);
1151         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1152         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
1153         uint32_t intr_vector = 0;
1154         int err;
1155         bool link_up = false, negotiate = 0;
1156         uint32_t speed = 0;
1157         uint32_t allowed_speeds = 0;
1158         int mask = 0;
1159         int status;
1160         uint32_t *link_speeds;
1161
1162         PMD_INIT_FUNC_TRACE();
1163
1164         /* TXGBE devices don't support:
1165          *    - half duplex (checked afterwards for valid speeds)
1166          *    - fixed speed: TODO implement
1167          */
1168         if (dev->data->dev_conf.link_speeds & ETH_LINK_SPEED_FIXED) {
1169                 PMD_INIT_LOG(ERR,
1170                 "Invalid link_speeds for port %u, fix speed not supported",
1171                                 dev->data->port_id);
1172                 return -EINVAL;
1173         }
1174
1175         /* Stop the link setup handler before resetting the HW. */
1176         rte_eal_alarm_cancel(txgbe_dev_setup_link_alarm_handler, dev);
1177
1178         /* disable uio/vfio intr/eventfd mapping */
1179         rte_intr_disable(intr_handle);
1180
1181         /* stop adapter */
1182         hw->adapter_stopped = 0;
1183         txgbe_stop_hw(hw);
1184
1185         /* reinitialize adapter
1186          * this calls reset and start
1187          */
1188         hw->nb_rx_queues = dev->data->nb_rx_queues;
1189         hw->nb_tx_queues = dev->data->nb_tx_queues;
1190         status = txgbe_pf_reset_hw(hw);
1191         if (status != 0)
1192                 return -1;
1193         hw->mac.start_hw(hw);
1194         hw->mac.get_link_status = true;
1195
1196         txgbe_dev_phy_intr_setup(dev);
1197
1198         /* check and configure queue intr-vector mapping */
1199         if ((rte_intr_cap_multiple(intr_handle) ||
1200              !RTE_ETH_DEV_SRIOV(dev).active) &&
1201             dev->data->dev_conf.intr_conf.rxq != 0) {
1202                 intr_vector = dev->data->nb_rx_queues;
1203                 if (rte_intr_efd_enable(intr_handle, intr_vector))
1204                         return -1;
1205         }
1206
1207         if (rte_intr_dp_is_en(intr_handle) && !intr_handle->intr_vec) {
1208                 intr_handle->intr_vec =
1209                         rte_zmalloc("intr_vec",
1210                                     dev->data->nb_rx_queues * sizeof(int), 0);
1211                 if (intr_handle->intr_vec == NULL) {
1212                         PMD_INIT_LOG(ERR, "Failed to allocate %d rx_queues"
1213                                      " intr_vec", dev->data->nb_rx_queues);
1214                         return -ENOMEM;
1215                 }
1216         }
1217
1218         /* confiugre msix for sleep until rx interrupt */
1219         txgbe_configure_msix(dev);
1220
1221         /* initialize transmission unit */
1222         txgbe_dev_tx_init(dev);
1223
1224         /* This can fail when allocating mbufs for descriptor rings */
1225         err = txgbe_dev_rx_init(dev);
1226         if (err) {
1227                 PMD_INIT_LOG(ERR, "Unable to initialize RX hardware");
1228                 goto error;
1229         }
1230
1231         mask = ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK |
1232                 ETH_VLAN_EXTEND_MASK;
1233         err = txgbe_vlan_offload_config(dev, mask);
1234         if (err) {
1235                 PMD_INIT_LOG(ERR, "Unable to set VLAN offload");
1236                 goto error;
1237         }
1238
1239         err = txgbe_dev_rxtx_start(dev);
1240         if (err < 0) {
1241                 PMD_INIT_LOG(ERR, "Unable to start rxtx queues");
1242                 goto error;
1243         }
1244
1245         /* Skip link setup if loopback mode is enabled. */
1246         if (hw->mac.type == txgbe_mac_raptor &&
1247             dev->data->dev_conf.lpbk_mode)
1248                 goto skip_link_setup;
1249
1250         if (txgbe_is_sfp(hw) && hw->phy.multispeed_fiber) {
1251                 err = hw->mac.setup_sfp(hw);
1252                 if (err)
1253                         goto error;
1254         }
1255
1256         if (hw->phy.media_type == txgbe_media_type_copper) {
1257                 /* Turn on the copper */
1258                 hw->phy.set_phy_power(hw, true);
1259         } else {
1260                 /* Turn on the laser */
1261                 hw->mac.enable_tx_laser(hw);
1262         }
1263
1264         err = hw->mac.check_link(hw, &speed, &link_up, 0);
1265         if (err)
1266                 goto error;
1267         dev->data->dev_link.link_status = link_up;
1268
1269         err = hw->mac.get_link_capabilities(hw, &speed, &negotiate);
1270         if (err)
1271                 goto error;
1272
1273         allowed_speeds = ETH_LINK_SPEED_100M | ETH_LINK_SPEED_1G |
1274                         ETH_LINK_SPEED_10G;
1275
1276         link_speeds = &dev->data->dev_conf.link_speeds;
1277         if (*link_speeds & ~allowed_speeds) {
1278                 PMD_INIT_LOG(ERR, "Invalid link setting");
1279                 goto error;
1280         }
1281
1282         speed = 0x0;
1283         if (*link_speeds == ETH_LINK_SPEED_AUTONEG) {
1284                 speed = (TXGBE_LINK_SPEED_100M_FULL |
1285                          TXGBE_LINK_SPEED_1GB_FULL |
1286                          TXGBE_LINK_SPEED_10GB_FULL);
1287         } else {
1288                 if (*link_speeds & ETH_LINK_SPEED_10G)
1289                         speed |= TXGBE_LINK_SPEED_10GB_FULL;
1290                 if (*link_speeds & ETH_LINK_SPEED_5G)
1291                         speed |= TXGBE_LINK_SPEED_5GB_FULL;
1292                 if (*link_speeds & ETH_LINK_SPEED_2_5G)
1293                         speed |= TXGBE_LINK_SPEED_2_5GB_FULL;
1294                 if (*link_speeds & ETH_LINK_SPEED_1G)
1295                         speed |= TXGBE_LINK_SPEED_1GB_FULL;
1296                 if (*link_speeds & ETH_LINK_SPEED_100M)
1297                         speed |= TXGBE_LINK_SPEED_100M_FULL;
1298         }
1299
1300         err = hw->mac.setup_link(hw, speed, link_up);
1301         if (err)
1302                 goto error;
1303
1304 skip_link_setup:
1305
1306         if (rte_intr_allow_others(intr_handle)) {
1307                 /* check if lsc interrupt is enabled */
1308                 if (dev->data->dev_conf.intr_conf.lsc != 0)
1309                         txgbe_dev_lsc_interrupt_setup(dev, TRUE);
1310                 else
1311                         txgbe_dev_lsc_interrupt_setup(dev, FALSE);
1312                 txgbe_dev_macsec_interrupt_setup(dev);
1313                 txgbe_set_ivar_map(hw, -1, 1, TXGBE_MISC_VEC_ID);
1314         } else {
1315                 rte_intr_callback_unregister(intr_handle,
1316                                              txgbe_dev_interrupt_handler, dev);
1317                 if (dev->data->dev_conf.intr_conf.lsc != 0)
1318                         PMD_INIT_LOG(INFO, "lsc won't enable because of"
1319                                      " no intr multiplex");
1320         }
1321
1322         /* check if rxq interrupt is enabled */
1323         if (dev->data->dev_conf.intr_conf.rxq != 0 &&
1324             rte_intr_dp_is_en(intr_handle))
1325                 txgbe_dev_rxq_interrupt_setup(dev);
1326
1327         /* enable uio/vfio intr/eventfd mapping */
1328         rte_intr_enable(intr_handle);
1329
1330         /* resume enabled intr since hw reset */
1331         txgbe_enable_intr(dev);
1332
1333         /*
1334          * Update link status right before return, because it may
1335          * start link configuration process in a separate thread.
1336          */
1337         txgbe_dev_link_update(dev, 0);
1338
1339         wr32m(hw, TXGBE_LEDCTL, 0xFFFFFFFF, TXGBE_LEDCTL_ORD_MASK);
1340
1341         txgbe_read_stats_registers(hw, hw_stats);
1342         hw->offset_loaded = 1;
1343
1344         return 0;
1345
1346 error:
1347         PMD_INIT_LOG(ERR, "failure in dev start: %d", err);
1348         txgbe_dev_clear_queues(dev);
1349         return -EIO;
1350 }
1351
1352 /*
1353  * Stop device: disable rx and tx functions to allow for reconfiguring.
1354  */
1355 static int
1356 txgbe_dev_stop(struct rte_eth_dev *dev)
1357 {
1358         struct rte_eth_link link;
1359         struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
1360         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1361         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
1362
1363         if (hw->adapter_stopped)
1364                 return 0;
1365
1366         PMD_INIT_FUNC_TRACE();
1367
1368         rte_eal_alarm_cancel(txgbe_dev_setup_link_alarm_handler, dev);
1369
1370         /* disable interrupts */
1371         txgbe_disable_intr(hw);
1372
1373         /* reset the NIC */
1374         txgbe_pf_reset_hw(hw);
1375         hw->adapter_stopped = 0;
1376
1377         /* stop adapter */
1378         txgbe_stop_hw(hw);
1379
1380         if (hw->phy.media_type == txgbe_media_type_copper) {
1381                 /* Turn off the copper */
1382                 hw->phy.set_phy_power(hw, false);
1383         } else {
1384                 /* Turn off the laser */
1385                 hw->mac.disable_tx_laser(hw);
1386         }
1387
1388         txgbe_dev_clear_queues(dev);
1389
1390         /* Clear stored conf */
1391         dev->data->scattered_rx = 0;
1392         dev->data->lro = 0;
1393
1394         /* Clear recorded link status */
1395         memset(&link, 0, sizeof(link));
1396         rte_eth_linkstatus_set(dev, &link);
1397
1398         if (!rte_intr_allow_others(intr_handle))
1399                 /* resume to the default handler */
1400                 rte_intr_callback_register(intr_handle,
1401                                            txgbe_dev_interrupt_handler,
1402                                            (void *)dev);
1403
1404         /* Clean datapath event and queue/vec mapping */
1405         rte_intr_efd_disable(intr_handle);
1406         if (intr_handle->intr_vec != NULL) {
1407                 rte_free(intr_handle->intr_vec);
1408                 intr_handle->intr_vec = NULL;
1409         }
1410
1411         wr32m(hw, TXGBE_LEDCTL, 0xFFFFFFFF, TXGBE_LEDCTL_SEL_MASK);
1412
1413         hw->adapter_stopped = true;
1414         dev->data->dev_started = 0;
1415
1416         return 0;
1417 }
1418
1419 /*
1420  * Set device link up: enable tx.
1421  */
1422 static int
1423 txgbe_dev_set_link_up(struct rte_eth_dev *dev)
1424 {
1425         struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
1426
1427         if (hw->phy.media_type == txgbe_media_type_copper) {
1428                 /* Turn on the copper */
1429                 hw->phy.set_phy_power(hw, true);
1430         } else {
1431                 /* Turn on the laser */
1432                 hw->mac.enable_tx_laser(hw);
1433                 txgbe_dev_link_update(dev, 0);
1434         }
1435
1436         return 0;
1437 }
1438
1439 /*
1440  * Set device link down: disable tx.
1441  */
1442 static int
1443 txgbe_dev_set_link_down(struct rte_eth_dev *dev)
1444 {
1445         struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
1446
1447         if (hw->phy.media_type == txgbe_media_type_copper) {
1448                 /* Turn off the copper */
1449                 hw->phy.set_phy_power(hw, false);
1450         } else {
1451                 /* Turn off the laser */
1452                 hw->mac.disable_tx_laser(hw);
1453                 txgbe_dev_link_update(dev, 0);
1454         }
1455
1456         return 0;
1457 }
1458
1459 /*
1460  * Reset and stop device.
1461  */
1462 static int
1463 txgbe_dev_close(struct rte_eth_dev *dev)
1464 {
1465         struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
1466         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1467         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
1468         int retries = 0;
1469         int ret;
1470
1471         PMD_INIT_FUNC_TRACE();
1472
1473         txgbe_pf_reset_hw(hw);
1474
1475         ret = txgbe_dev_stop(dev);
1476
1477         txgbe_dev_free_queues(dev);
1478
1479         /* reprogram the RAR[0] in case user changed it. */
1480         txgbe_set_rar(hw, 0, hw->mac.addr, 0, true);
1481
1482         /* Unlock any pending hardware semaphore */
1483         txgbe_swfw_lock_reset(hw);
1484
1485         /* disable uio intr before callback unregister */
1486         rte_intr_disable(intr_handle);
1487
1488         do {
1489                 ret = rte_intr_callback_unregister(intr_handle,
1490                                 txgbe_dev_interrupt_handler, dev);
1491                 if (ret >= 0 || ret == -ENOENT) {
1492                         break;
1493                 } else if (ret != -EAGAIN) {
1494                         PMD_INIT_LOG(ERR,
1495                                 "intr callback unregister failed: %d",
1496                                 ret);
1497                 }
1498                 rte_delay_ms(100);
1499         } while (retries++ < (10 + TXGBE_LINK_UP_TIME));
1500
1501         /* cancel the delay handler before remove dev */
1502         rte_eal_alarm_cancel(txgbe_dev_interrupt_delayed_handler, dev);
1503
1504         rte_free(dev->data->mac_addrs);
1505         dev->data->mac_addrs = NULL;
1506
1507         rte_free(dev->data->hash_mac_addrs);
1508         dev->data->hash_mac_addrs = NULL;
1509
1510         return ret;
1511 }
1512
1513 /*
1514  * Reset PF device.
1515  */
1516 static int
1517 txgbe_dev_reset(struct rte_eth_dev *dev)
1518 {
1519         int ret;
1520
1521         /* When a DPDK PMD PF begin to reset PF port, it should notify all
1522          * its VF to make them align with it. The detailed notification
1523          * mechanism is PMD specific. As to txgbe PF, it is rather complex.
1524          * To avoid unexpected behavior in VF, currently reset of PF with
1525          * SR-IOV activation is not supported. It might be supported later.
1526          */
1527         if (dev->data->sriov.active)
1528                 return -ENOTSUP;
1529
1530         ret = eth_txgbe_dev_uninit(dev);
1531         if (ret)
1532                 return ret;
1533
1534         ret = eth_txgbe_dev_init(dev, NULL);
1535
1536         return ret;
1537 }
1538
1539 #define UPDATE_QP_COUNTER_32bit(reg, last_counter, counter)     \
1540         {                                                       \
1541                 uint32_t current_counter = rd32(hw, reg);       \
1542                 if (current_counter < last_counter)             \
1543                         current_counter += 0x100000000LL;       \
1544                 if (!hw->offset_loaded)                         \
1545                         last_counter = current_counter;         \
1546                 counter = current_counter - last_counter;       \
1547                 counter &= 0xFFFFFFFFLL;                        \
1548         }
1549
1550 #define UPDATE_QP_COUNTER_36bit(reg_lsb, reg_msb, last_counter, counter) \
1551         {                                                                \
1552                 uint64_t current_counter_lsb = rd32(hw, reg_lsb);        \
1553                 uint64_t current_counter_msb = rd32(hw, reg_msb);        \
1554                 uint64_t current_counter = (current_counter_msb << 32) | \
1555                         current_counter_lsb;                             \
1556                 if (current_counter < last_counter)                      \
1557                         current_counter += 0x1000000000LL;               \
1558                 if (!hw->offset_loaded)                                  \
1559                         last_counter = current_counter;                  \
1560                 counter = current_counter - last_counter;                \
1561                 counter &= 0xFFFFFFFFFLL;                                \
1562         }
1563
1564 void
1565 txgbe_read_stats_registers(struct txgbe_hw *hw,
1566                            struct txgbe_hw_stats *hw_stats)
1567 {
1568         unsigned int i;
1569
1570         /* QP Stats */
1571         for (i = 0; i < hw->nb_rx_queues; i++) {
1572                 UPDATE_QP_COUNTER_32bit(TXGBE_QPRXPKT(i),
1573                         hw->qp_last[i].rx_qp_packets,
1574                         hw_stats->qp[i].rx_qp_packets);
1575                 UPDATE_QP_COUNTER_36bit(TXGBE_QPRXOCTL(i), TXGBE_QPRXOCTH(i),
1576                         hw->qp_last[i].rx_qp_bytes,
1577                         hw_stats->qp[i].rx_qp_bytes);
1578                 UPDATE_QP_COUNTER_32bit(TXGBE_QPRXMPKT(i),
1579                         hw->qp_last[i].rx_qp_mc_packets,
1580                         hw_stats->qp[i].rx_qp_mc_packets);
1581         }
1582
1583         for (i = 0; i < hw->nb_tx_queues; i++) {
1584                 UPDATE_QP_COUNTER_32bit(TXGBE_QPTXPKT(i),
1585                         hw->qp_last[i].tx_qp_packets,
1586                         hw_stats->qp[i].tx_qp_packets);
1587                 UPDATE_QP_COUNTER_36bit(TXGBE_QPTXOCTL(i), TXGBE_QPTXOCTH(i),
1588                         hw->qp_last[i].tx_qp_bytes,
1589                         hw_stats->qp[i].tx_qp_bytes);
1590         }
1591         /* PB Stats */
1592         for (i = 0; i < TXGBE_MAX_UP; i++) {
1593                 hw_stats->up[i].rx_up_xon_packets +=
1594                                 rd32(hw, TXGBE_PBRXUPXON(i));
1595                 hw_stats->up[i].rx_up_xoff_packets +=
1596                                 rd32(hw, TXGBE_PBRXUPXOFF(i));
1597                 hw_stats->up[i].tx_up_xon_packets +=
1598                                 rd32(hw, TXGBE_PBTXUPXON(i));
1599                 hw_stats->up[i].tx_up_xoff_packets +=
1600                                 rd32(hw, TXGBE_PBTXUPXOFF(i));
1601                 hw_stats->up[i].tx_up_xon2off_packets +=
1602                                 rd32(hw, TXGBE_PBTXUPOFF(i));
1603                 hw_stats->up[i].rx_up_dropped +=
1604                                 rd32(hw, TXGBE_PBRXMISS(i));
1605         }
1606         hw_stats->rx_xon_packets += rd32(hw, TXGBE_PBRXLNKXON);
1607         hw_stats->rx_xoff_packets += rd32(hw, TXGBE_PBRXLNKXOFF);
1608         hw_stats->tx_xon_packets += rd32(hw, TXGBE_PBTXLNKXON);
1609         hw_stats->tx_xoff_packets += rd32(hw, TXGBE_PBTXLNKXOFF);
1610
1611         /* DMA Stats */
1612         hw_stats->rx_packets += rd32(hw, TXGBE_DMARXPKT);
1613         hw_stats->tx_packets += rd32(hw, TXGBE_DMATXPKT);
1614
1615         hw_stats->rx_bytes += rd64(hw, TXGBE_DMARXOCTL);
1616         hw_stats->tx_bytes += rd64(hw, TXGBE_DMATXOCTL);
1617         hw_stats->rx_drop_packets += rd32(hw, TXGBE_PBRXDROP);
1618
1619         /* MAC Stats */
1620         hw_stats->rx_crc_errors += rd64(hw, TXGBE_MACRXERRCRCL);
1621         hw_stats->rx_multicast_packets += rd64(hw, TXGBE_MACRXMPKTL);
1622         hw_stats->tx_multicast_packets += rd64(hw, TXGBE_MACTXMPKTL);
1623
1624         hw_stats->rx_total_packets += rd64(hw, TXGBE_MACRXPKTL);
1625         hw_stats->tx_total_packets += rd64(hw, TXGBE_MACTXPKTL);
1626         hw_stats->rx_total_bytes += rd64(hw, TXGBE_MACRXGBOCTL);
1627
1628         hw_stats->rx_broadcast_packets += rd64(hw, TXGBE_MACRXOCTL);
1629         hw_stats->tx_broadcast_packets += rd32(hw, TXGBE_MACTXOCTL);
1630
1631         hw_stats->rx_size_64_packets += rd64(hw, TXGBE_MACRX1TO64L);
1632         hw_stats->rx_size_65_to_127_packets += rd64(hw, TXGBE_MACRX65TO127L);
1633         hw_stats->rx_size_128_to_255_packets += rd64(hw, TXGBE_MACRX128TO255L);
1634         hw_stats->rx_size_256_to_511_packets += rd64(hw, TXGBE_MACRX256TO511L);
1635         hw_stats->rx_size_512_to_1023_packets +=
1636                         rd64(hw, TXGBE_MACRX512TO1023L);
1637         hw_stats->rx_size_1024_to_max_packets +=
1638                         rd64(hw, TXGBE_MACRX1024TOMAXL);
1639         hw_stats->tx_size_64_packets += rd64(hw, TXGBE_MACTX1TO64L);
1640         hw_stats->tx_size_65_to_127_packets += rd64(hw, TXGBE_MACTX65TO127L);
1641         hw_stats->tx_size_128_to_255_packets += rd64(hw, TXGBE_MACTX128TO255L);
1642         hw_stats->tx_size_256_to_511_packets += rd64(hw, TXGBE_MACTX256TO511L);
1643         hw_stats->tx_size_512_to_1023_packets +=
1644                         rd64(hw, TXGBE_MACTX512TO1023L);
1645         hw_stats->tx_size_1024_to_max_packets +=
1646                         rd64(hw, TXGBE_MACTX1024TOMAXL);
1647
1648         hw_stats->rx_undersize_errors += rd64(hw, TXGBE_MACRXERRLENL);
1649         hw_stats->rx_oversize_errors += rd32(hw, TXGBE_MACRXOVERSIZE);
1650         hw_stats->rx_jabber_errors += rd32(hw, TXGBE_MACRXJABBER);
1651
1652         /* MNG Stats */
1653         hw_stats->mng_bmc2host_packets = rd32(hw, TXGBE_MNGBMC2OS);
1654         hw_stats->mng_host2bmc_packets = rd32(hw, TXGBE_MNGOS2BMC);
1655         hw_stats->rx_management_packets = rd32(hw, TXGBE_DMARXMNG);
1656         hw_stats->tx_management_packets = rd32(hw, TXGBE_DMATXMNG);
1657
1658         /* FCoE Stats */
1659         hw_stats->rx_fcoe_crc_errors += rd32(hw, TXGBE_FCOECRC);
1660         hw_stats->rx_fcoe_mbuf_allocation_errors += rd32(hw, TXGBE_FCOELAST);
1661         hw_stats->rx_fcoe_dropped += rd32(hw, TXGBE_FCOERPDC);
1662         hw_stats->rx_fcoe_packets += rd32(hw, TXGBE_FCOEPRC);
1663         hw_stats->tx_fcoe_packets += rd32(hw, TXGBE_FCOEPTC);
1664         hw_stats->rx_fcoe_bytes += rd32(hw, TXGBE_FCOEDWRC);
1665         hw_stats->tx_fcoe_bytes += rd32(hw, TXGBE_FCOEDWTC);
1666
1667         /* Flow Director Stats */
1668         hw_stats->flow_director_matched_filters += rd32(hw, TXGBE_FDIRMATCH);
1669         hw_stats->flow_director_missed_filters += rd32(hw, TXGBE_FDIRMISS);
1670         hw_stats->flow_director_added_filters +=
1671                 TXGBE_FDIRUSED_ADD(rd32(hw, TXGBE_FDIRUSED));
1672         hw_stats->flow_director_removed_filters +=
1673                 TXGBE_FDIRUSED_REM(rd32(hw, TXGBE_FDIRUSED));
1674         hw_stats->flow_director_filter_add_errors +=
1675                 TXGBE_FDIRFAIL_ADD(rd32(hw, TXGBE_FDIRFAIL));
1676         hw_stats->flow_director_filter_remove_errors +=
1677                 TXGBE_FDIRFAIL_REM(rd32(hw, TXGBE_FDIRFAIL));
1678
1679         /* MACsec Stats */
1680         hw_stats->tx_macsec_pkts_untagged += rd32(hw, TXGBE_LSECTX_UTPKT);
1681         hw_stats->tx_macsec_pkts_encrypted +=
1682                         rd32(hw, TXGBE_LSECTX_ENCPKT);
1683         hw_stats->tx_macsec_pkts_protected +=
1684                         rd32(hw, TXGBE_LSECTX_PROTPKT);
1685         hw_stats->tx_macsec_octets_encrypted +=
1686                         rd32(hw, TXGBE_LSECTX_ENCOCT);
1687         hw_stats->tx_macsec_octets_protected +=
1688                         rd32(hw, TXGBE_LSECTX_PROTOCT);
1689         hw_stats->rx_macsec_pkts_untagged += rd32(hw, TXGBE_LSECRX_UTPKT);
1690         hw_stats->rx_macsec_pkts_badtag += rd32(hw, TXGBE_LSECRX_BTPKT);
1691         hw_stats->rx_macsec_pkts_nosci += rd32(hw, TXGBE_LSECRX_NOSCIPKT);
1692         hw_stats->rx_macsec_pkts_unknownsci += rd32(hw, TXGBE_LSECRX_UNSCIPKT);
1693         hw_stats->rx_macsec_octets_decrypted += rd32(hw, TXGBE_LSECRX_DECOCT);
1694         hw_stats->rx_macsec_octets_validated += rd32(hw, TXGBE_LSECRX_VLDOCT);
1695         hw_stats->rx_macsec_sc_pkts_unchecked +=
1696                         rd32(hw, TXGBE_LSECRX_UNCHKPKT);
1697         hw_stats->rx_macsec_sc_pkts_delayed += rd32(hw, TXGBE_LSECRX_DLYPKT);
1698         hw_stats->rx_macsec_sc_pkts_late += rd32(hw, TXGBE_LSECRX_LATEPKT);
1699         for (i = 0; i < 2; i++) {
1700                 hw_stats->rx_macsec_sa_pkts_ok +=
1701                         rd32(hw, TXGBE_LSECRX_OKPKT(i));
1702                 hw_stats->rx_macsec_sa_pkts_invalid +=
1703                         rd32(hw, TXGBE_LSECRX_INVPKT(i));
1704                 hw_stats->rx_macsec_sa_pkts_notvalid +=
1705                         rd32(hw, TXGBE_LSECRX_BADPKT(i));
1706         }
1707         hw_stats->rx_macsec_sa_pkts_unusedsa +=
1708                         rd32(hw, TXGBE_LSECRX_INVSAPKT);
1709         hw_stats->rx_macsec_sa_pkts_notusingsa +=
1710                         rd32(hw, TXGBE_LSECRX_BADSAPKT);
1711
1712         hw_stats->rx_total_missed_packets = 0;
1713         for (i = 0; i < TXGBE_MAX_UP; i++) {
1714                 hw_stats->rx_total_missed_packets +=
1715                         hw_stats->up[i].rx_up_dropped;
1716         }
1717 }
1718
1719 static int
1720 txgbe_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
1721 {
1722         struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
1723         struct txgbe_hw_stats *hw_stats = TXGBE_DEV_STATS(dev);
1724         struct txgbe_stat_mappings *stat_mappings =
1725                         TXGBE_DEV_STAT_MAPPINGS(dev);
1726         uint32_t i, j;
1727
1728         txgbe_read_stats_registers(hw, hw_stats);
1729
1730         if (stats == NULL)
1731                 return -EINVAL;
1732
1733         /* Fill out the rte_eth_stats statistics structure */
1734         stats->ipackets = hw_stats->rx_packets;
1735         stats->ibytes = hw_stats->rx_bytes;
1736         stats->opackets = hw_stats->tx_packets;
1737         stats->obytes = hw_stats->tx_bytes;
1738
1739         memset(&stats->q_ipackets, 0, sizeof(stats->q_ipackets));
1740         memset(&stats->q_opackets, 0, sizeof(stats->q_opackets));
1741         memset(&stats->q_ibytes, 0, sizeof(stats->q_ibytes));
1742         memset(&stats->q_obytes, 0, sizeof(stats->q_obytes));
1743         memset(&stats->q_errors, 0, sizeof(stats->q_errors));
1744         for (i = 0; i < TXGBE_MAX_QP; i++) {
1745                 uint32_t n = i / NB_QMAP_FIELDS_PER_QSM_REG;
1746                 uint32_t offset = (i % NB_QMAP_FIELDS_PER_QSM_REG) * 8;
1747                 uint32_t q_map;
1748
1749                 q_map = (stat_mappings->rqsm[n] >> offset)
1750                                 & QMAP_FIELD_RESERVED_BITS_MASK;
1751                 j = (q_map < RTE_ETHDEV_QUEUE_STAT_CNTRS
1752                      ? q_map : q_map % RTE_ETHDEV_QUEUE_STAT_CNTRS);
1753                 stats->q_ipackets[j] += hw_stats->qp[i].rx_qp_packets;
1754                 stats->q_ibytes[j] += hw_stats->qp[i].rx_qp_bytes;
1755
1756                 q_map = (stat_mappings->tqsm[n] >> offset)
1757                                 & QMAP_FIELD_RESERVED_BITS_MASK;
1758                 j = (q_map < RTE_ETHDEV_QUEUE_STAT_CNTRS
1759                      ? q_map : q_map % RTE_ETHDEV_QUEUE_STAT_CNTRS);
1760                 stats->q_opackets[j] += hw_stats->qp[i].tx_qp_packets;
1761                 stats->q_obytes[j] += hw_stats->qp[i].tx_qp_bytes;
1762         }
1763
1764         /* Rx Errors */
1765         stats->imissed  = hw_stats->rx_total_missed_packets;
1766         stats->ierrors  = hw_stats->rx_crc_errors +
1767                           hw_stats->rx_mac_short_packet_dropped +
1768                           hw_stats->rx_length_errors +
1769                           hw_stats->rx_undersize_errors +
1770                           hw_stats->rx_oversize_errors +
1771                           hw_stats->rx_drop_packets +
1772                           hw_stats->rx_illegal_byte_errors +
1773                           hw_stats->rx_error_bytes +
1774                           hw_stats->rx_fragment_errors +
1775                           hw_stats->rx_fcoe_crc_errors +
1776                           hw_stats->rx_fcoe_mbuf_allocation_errors;
1777
1778         /* Tx Errors */
1779         stats->oerrors  = 0;
1780         return 0;
1781 }
1782
1783 static int
1784 txgbe_dev_stats_reset(struct rte_eth_dev *dev)
1785 {
1786         struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
1787         struct txgbe_hw_stats *hw_stats = TXGBE_DEV_STATS(dev);
1788
1789         /* HW registers are cleared on read */
1790         hw->offset_loaded = 0;
1791         txgbe_dev_stats_get(dev, NULL);
1792         hw->offset_loaded = 1;
1793
1794         /* Reset software totals */
1795         memset(hw_stats, 0, sizeof(*hw_stats));
1796
1797         return 0;
1798 }
1799
1800 /* This function calculates the number of xstats based on the current config */
1801 static unsigned
1802 txgbe_xstats_calc_num(struct rte_eth_dev *dev)
1803 {
1804         int nb_queues = max(dev->data->nb_rx_queues, dev->data->nb_tx_queues);
1805         return TXGBE_NB_HW_STATS +
1806                TXGBE_NB_UP_STATS * TXGBE_MAX_UP +
1807                TXGBE_NB_QP_STATS * nb_queues;
1808 }
1809
1810 static inline int
1811 txgbe_get_name_by_id(uint32_t id, char *name, uint32_t size)
1812 {
1813         int nb, st;
1814
1815         /* Extended stats from txgbe_hw_stats */
1816         if (id < TXGBE_NB_HW_STATS) {
1817                 snprintf(name, size, "[hw]%s",
1818                         rte_txgbe_stats_strings[id].name);
1819                 return 0;
1820         }
1821         id -= TXGBE_NB_HW_STATS;
1822
1823         /* Priority Stats */
1824         if (id < TXGBE_NB_UP_STATS * TXGBE_MAX_UP) {
1825                 nb = id / TXGBE_NB_UP_STATS;
1826                 st = id % TXGBE_NB_UP_STATS;
1827                 snprintf(name, size, "[p%u]%s", nb,
1828                         rte_txgbe_up_strings[st].name);
1829                 return 0;
1830         }
1831         id -= TXGBE_NB_UP_STATS * TXGBE_MAX_UP;
1832
1833         /* Queue Stats */
1834         if (id < TXGBE_NB_QP_STATS * TXGBE_MAX_QP) {
1835                 nb = id / TXGBE_NB_QP_STATS;
1836                 st = id % TXGBE_NB_QP_STATS;
1837                 snprintf(name, size, "[q%u]%s", nb,
1838                         rte_txgbe_qp_strings[st].name);
1839                 return 0;
1840         }
1841         id -= TXGBE_NB_QP_STATS * TXGBE_MAX_QP;
1842
1843         return -(int)(id + 1);
1844 }
1845
1846 static inline int
1847 txgbe_get_offset_by_id(uint32_t id, uint32_t *offset)
1848 {
1849         int nb, st;
1850
1851         /* Extended stats from txgbe_hw_stats */
1852         if (id < TXGBE_NB_HW_STATS) {
1853                 *offset = rte_txgbe_stats_strings[id].offset;
1854                 return 0;
1855         }
1856         id -= TXGBE_NB_HW_STATS;
1857
1858         /* Priority Stats */
1859         if (id < TXGBE_NB_UP_STATS * TXGBE_MAX_UP) {
1860                 nb = id / TXGBE_NB_UP_STATS;
1861                 st = id % TXGBE_NB_UP_STATS;
1862                 *offset = rte_txgbe_up_strings[st].offset +
1863                         nb * (TXGBE_NB_UP_STATS * sizeof(uint64_t));
1864                 return 0;
1865         }
1866         id -= TXGBE_NB_UP_STATS * TXGBE_MAX_UP;
1867
1868         /* Queue Stats */
1869         if (id < TXGBE_NB_QP_STATS * TXGBE_MAX_QP) {
1870                 nb = id / TXGBE_NB_QP_STATS;
1871                 st = id % TXGBE_NB_QP_STATS;
1872                 *offset = rte_txgbe_qp_strings[st].offset +
1873                         nb * (TXGBE_NB_QP_STATS * sizeof(uint64_t));
1874                 return 0;
1875         }
1876         id -= TXGBE_NB_QP_STATS * TXGBE_MAX_QP;
1877
1878         return -(int)(id + 1);
1879 }
1880
1881 static int txgbe_dev_xstats_get_names(struct rte_eth_dev *dev,
1882         struct rte_eth_xstat_name *xstats_names, unsigned int limit)
1883 {
1884         unsigned int i, count;
1885
1886         count = txgbe_xstats_calc_num(dev);
1887         if (xstats_names == NULL)
1888                 return count;
1889
1890         /* Note: limit >= cnt_stats checked upstream
1891          * in rte_eth_xstats_names()
1892          */
1893         limit = min(limit, count);
1894
1895         /* Extended stats from txgbe_hw_stats */
1896         for (i = 0; i < limit; i++) {
1897                 if (txgbe_get_name_by_id(i, xstats_names[i].name,
1898                         sizeof(xstats_names[i].name))) {
1899                         PMD_INIT_LOG(WARNING, "id value %d isn't valid", i);
1900                         break;
1901                 }
1902         }
1903
1904         return i;
1905 }
1906
1907 static int txgbe_dev_xstats_get_names_by_id(struct rte_eth_dev *dev,
1908         struct rte_eth_xstat_name *xstats_names,
1909         const uint64_t *ids,
1910         unsigned int limit)
1911 {
1912         unsigned int i;
1913
1914         if (ids == NULL)
1915                 return txgbe_dev_xstats_get_names(dev, xstats_names, limit);
1916
1917         for (i = 0; i < limit; i++) {
1918                 if (txgbe_get_name_by_id(ids[i], xstats_names[i].name,
1919                                 sizeof(xstats_names[i].name))) {
1920                         PMD_INIT_LOG(WARNING, "id value %d isn't valid", i);
1921                         return -1;
1922                 }
1923         }
1924
1925         return i;
1926 }
1927
1928 static int
1929 txgbe_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
1930                                          unsigned int limit)
1931 {
1932         struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
1933         struct txgbe_hw_stats *hw_stats = TXGBE_DEV_STATS(dev);
1934         unsigned int i, count;
1935
1936         txgbe_read_stats_registers(hw, hw_stats);
1937
1938         /* If this is a reset xstats is NULL, and we have cleared the
1939          * registers by reading them.
1940          */
1941         count = txgbe_xstats_calc_num(dev);
1942         if (xstats == NULL)
1943                 return count;
1944
1945         limit = min(limit, txgbe_xstats_calc_num(dev));
1946
1947         /* Extended stats from txgbe_hw_stats */
1948         for (i = 0; i < limit; i++) {
1949                 uint32_t offset = 0;
1950
1951                 if (txgbe_get_offset_by_id(i, &offset)) {
1952                         PMD_INIT_LOG(WARNING, "id value %d isn't valid", i);
1953                         break;
1954                 }
1955                 xstats[i].value = *(uint64_t *)(((char *)hw_stats) + offset);
1956                 xstats[i].id = i;
1957         }
1958
1959         return i;
1960 }
1961
1962 static int
1963 txgbe_dev_xstats_get_(struct rte_eth_dev *dev, uint64_t *values,
1964                                          unsigned int limit)
1965 {
1966         struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
1967         struct txgbe_hw_stats *hw_stats = TXGBE_DEV_STATS(dev);
1968         unsigned int i, count;
1969
1970         txgbe_read_stats_registers(hw, hw_stats);
1971
1972         /* If this is a reset xstats is NULL, and we have cleared the
1973          * registers by reading them.
1974          */
1975         count = txgbe_xstats_calc_num(dev);
1976         if (values == NULL)
1977                 return count;
1978
1979         limit = min(limit, txgbe_xstats_calc_num(dev));
1980
1981         /* Extended stats from txgbe_hw_stats */
1982         for (i = 0; i < limit; i++) {
1983                 uint32_t offset;
1984
1985                 if (txgbe_get_offset_by_id(i, &offset)) {
1986                         PMD_INIT_LOG(WARNING, "id value %d isn't valid", i);
1987                         break;
1988                 }
1989                 values[i] = *(uint64_t *)(((char *)hw_stats) + offset);
1990         }
1991
1992         return i;
1993 }
1994
1995 static int
1996 txgbe_dev_xstats_get_by_id(struct rte_eth_dev *dev, const uint64_t *ids,
1997                 uint64_t *values, unsigned int limit)
1998 {
1999         struct txgbe_hw_stats *hw_stats = TXGBE_DEV_STATS(dev);
2000         unsigned int i;
2001
2002         if (ids == NULL)
2003                 return txgbe_dev_xstats_get_(dev, values, limit);
2004
2005         for (i = 0; i < limit; i++) {
2006                 uint32_t offset;
2007
2008                 if (txgbe_get_offset_by_id(ids[i], &offset)) {
2009                         PMD_INIT_LOG(WARNING, "id value %d isn't valid", i);
2010                         break;
2011                 }
2012                 values[i] = *(uint64_t *)(((char *)hw_stats) + offset);
2013         }
2014
2015         return i;
2016 }
2017
2018 static int
2019 txgbe_dev_xstats_reset(struct rte_eth_dev *dev)
2020 {
2021         struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
2022         struct txgbe_hw_stats *hw_stats = TXGBE_DEV_STATS(dev);
2023
2024         /* HW registers are cleared on read */
2025         hw->offset_loaded = 0;
2026         txgbe_read_stats_registers(hw, hw_stats);
2027         hw->offset_loaded = 1;
2028
2029         /* Reset software totals */
2030         memset(hw_stats, 0, sizeof(*hw_stats));
2031
2032         return 0;
2033 }
2034
2035 static int
2036 txgbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
2037 {
2038         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
2039         struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
2040
2041         dev_info->max_rx_queues = (uint16_t)hw->mac.max_rx_queues;
2042         dev_info->max_tx_queues = (uint16_t)hw->mac.max_tx_queues;
2043         dev_info->min_rx_bufsize = 1024;
2044         dev_info->max_rx_pktlen = 15872;
2045         dev_info->max_mac_addrs = hw->mac.num_rar_entries;
2046         dev_info->max_hash_mac_addrs = TXGBE_VMDQ_NUM_UC_MAC;
2047         dev_info->max_vfs = pci_dev->max_vfs;
2048         dev_info->max_vmdq_pools = ETH_64_POOLS;
2049         dev_info->vmdq_queue_num = dev_info->max_rx_queues;
2050         dev_info->rx_queue_offload_capa = txgbe_get_rx_queue_offloads(dev);
2051         dev_info->rx_offload_capa = (txgbe_get_rx_port_offloads(dev) |
2052                                      dev_info->rx_queue_offload_capa);
2053         dev_info->tx_queue_offload_capa = txgbe_get_tx_queue_offloads(dev);
2054         dev_info->tx_offload_capa = txgbe_get_tx_port_offloads(dev);
2055
2056         dev_info->default_rxconf = (struct rte_eth_rxconf) {
2057                 .rx_thresh = {
2058                         .pthresh = TXGBE_DEFAULT_RX_PTHRESH,
2059                         .hthresh = TXGBE_DEFAULT_RX_HTHRESH,
2060                         .wthresh = TXGBE_DEFAULT_RX_WTHRESH,
2061                 },
2062                 .rx_free_thresh = TXGBE_DEFAULT_RX_FREE_THRESH,
2063                 .rx_drop_en = 0,
2064                 .offloads = 0,
2065         };
2066
2067         dev_info->default_txconf = (struct rte_eth_txconf) {
2068                 .tx_thresh = {
2069                         .pthresh = TXGBE_DEFAULT_TX_PTHRESH,
2070                         .hthresh = TXGBE_DEFAULT_TX_HTHRESH,
2071                         .wthresh = TXGBE_DEFAULT_TX_WTHRESH,
2072                 },
2073                 .tx_free_thresh = TXGBE_DEFAULT_TX_FREE_THRESH,
2074                 .offloads = 0,
2075         };
2076
2077         dev_info->rx_desc_lim = rx_desc_lim;
2078         dev_info->tx_desc_lim = tx_desc_lim;
2079
2080         dev_info->hash_key_size = TXGBE_HKEY_MAX_INDEX * sizeof(uint32_t);
2081         dev_info->reta_size = ETH_RSS_RETA_SIZE_128;
2082         dev_info->flow_type_rss_offloads = TXGBE_RSS_OFFLOAD_ALL;
2083
2084         dev_info->speed_capa = ETH_LINK_SPEED_1G | ETH_LINK_SPEED_10G;
2085         dev_info->speed_capa |= ETH_LINK_SPEED_100M;
2086
2087         /* Driver-preferred Rx/Tx parameters */
2088         dev_info->default_rxportconf.burst_size = 32;
2089         dev_info->default_txportconf.burst_size = 32;
2090         dev_info->default_rxportconf.nb_queues = 1;
2091         dev_info->default_txportconf.nb_queues = 1;
2092         dev_info->default_rxportconf.ring_size = 256;
2093         dev_info->default_txportconf.ring_size = 256;
2094
2095         return 0;
2096 }
2097
2098 const uint32_t *
2099 txgbe_dev_supported_ptypes_get(struct rte_eth_dev *dev)
2100 {
2101         if (dev->rx_pkt_burst == txgbe_recv_pkts ||
2102             dev->rx_pkt_burst == txgbe_recv_pkts_lro_single_alloc ||
2103             dev->rx_pkt_burst == txgbe_recv_pkts_lro_bulk_alloc ||
2104             dev->rx_pkt_burst == txgbe_recv_pkts_bulk_alloc)
2105                 return txgbe_get_supported_ptypes();
2106
2107         return NULL;
2108 }
2109
2110 void
2111 txgbe_dev_setup_link_alarm_handler(void *param)
2112 {
2113         struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
2114         struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
2115         struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev);
2116         u32 speed;
2117         bool autoneg = false;
2118
2119         speed = hw->phy.autoneg_advertised;
2120         if (!speed)
2121                 hw->mac.get_link_capabilities(hw, &speed, &autoneg);
2122
2123         hw->mac.setup_link(hw, speed, true);
2124
2125         intr->flags &= ~TXGBE_FLAG_NEED_LINK_CONFIG;
2126 }
2127
2128 /* return 0 means link status changed, -1 means not changed */
2129 int
2130 txgbe_dev_link_update_share(struct rte_eth_dev *dev,
2131                             int wait_to_complete)
2132 {
2133         struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
2134         struct rte_eth_link link;
2135         u32 link_speed = TXGBE_LINK_SPEED_UNKNOWN;
2136         struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev);
2137         bool link_up;
2138         int err;
2139         int wait = 1;
2140
2141         memset(&link, 0, sizeof(link));
2142         link.link_status = ETH_LINK_DOWN;
2143         link.link_speed = ETH_SPEED_NUM_NONE;
2144         link.link_duplex = ETH_LINK_HALF_DUPLEX;
2145         link.link_autoneg = ETH_LINK_AUTONEG;
2146
2147         hw->mac.get_link_status = true;
2148
2149         if (intr->flags & TXGBE_FLAG_NEED_LINK_CONFIG)
2150                 return rte_eth_linkstatus_set(dev, &link);
2151
2152         /* check if it needs to wait to complete, if lsc interrupt is enabled */
2153         if (wait_to_complete == 0 || dev->data->dev_conf.intr_conf.lsc != 0)
2154                 wait = 0;
2155
2156         err = hw->mac.check_link(hw, &link_speed, &link_up, wait);
2157
2158         if (err != 0) {
2159                 link.link_speed = ETH_SPEED_NUM_100M;
2160                 link.link_duplex = ETH_LINK_FULL_DUPLEX;
2161                 return rte_eth_linkstatus_set(dev, &link);
2162         }
2163
2164         if (link_up == 0) {
2165                 if (hw->phy.media_type == txgbe_media_type_fiber) {
2166                         intr->flags |= TXGBE_FLAG_NEED_LINK_CONFIG;
2167                         rte_eal_alarm_set(10,
2168                                 txgbe_dev_setup_link_alarm_handler, dev);
2169                 }
2170                 return rte_eth_linkstatus_set(dev, &link);
2171         }
2172
2173         intr->flags &= ~TXGBE_FLAG_NEED_LINK_CONFIG;
2174         link.link_status = ETH_LINK_UP;
2175         link.link_duplex = ETH_LINK_FULL_DUPLEX;
2176
2177         switch (link_speed) {
2178         default:
2179         case TXGBE_LINK_SPEED_UNKNOWN:
2180                 link.link_duplex = ETH_LINK_FULL_DUPLEX;
2181                 link.link_speed = ETH_SPEED_NUM_100M;
2182                 break;
2183
2184         case TXGBE_LINK_SPEED_100M_FULL:
2185                 link.link_speed = ETH_SPEED_NUM_100M;
2186                 break;
2187
2188         case TXGBE_LINK_SPEED_1GB_FULL:
2189                 link.link_speed = ETH_SPEED_NUM_1G;
2190                 break;
2191
2192         case TXGBE_LINK_SPEED_2_5GB_FULL:
2193                 link.link_speed = ETH_SPEED_NUM_2_5G;
2194                 break;
2195
2196         case TXGBE_LINK_SPEED_5GB_FULL:
2197                 link.link_speed = ETH_SPEED_NUM_5G;
2198                 break;
2199
2200         case TXGBE_LINK_SPEED_10GB_FULL:
2201                 link.link_speed = ETH_SPEED_NUM_10G;
2202                 break;
2203         }
2204
2205         return rte_eth_linkstatus_set(dev, &link);
2206 }
2207
2208 static int
2209 txgbe_dev_link_update(struct rte_eth_dev *dev, int wait_to_complete)
2210 {
2211         return txgbe_dev_link_update_share(dev, wait_to_complete);
2212 }
2213
2214 /**
2215  * It clears the interrupt causes and enables the interrupt.
2216  * It will be called once only during nic initialized.
2217  *
2218  * @param dev
2219  *  Pointer to struct rte_eth_dev.
2220  * @param on
2221  *  Enable or Disable.
2222  *
2223  * @return
2224  *  - On success, zero.
2225  *  - On failure, a negative value.
2226  */
2227 static int
2228 txgbe_dev_lsc_interrupt_setup(struct rte_eth_dev *dev, uint8_t on)
2229 {
2230         struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev);
2231
2232         txgbe_dev_link_status_print(dev);
2233         if (on)
2234                 intr->mask_misc |= TXGBE_ICRMISC_LSC;
2235         else
2236                 intr->mask_misc &= ~TXGBE_ICRMISC_LSC;
2237
2238         return 0;
2239 }
2240
2241 /**
2242  * It clears the interrupt causes and enables the interrupt.
2243  * It will be called once only during nic initialized.
2244  *
2245  * @param dev
2246  *  Pointer to struct rte_eth_dev.
2247  *
2248  * @return
2249  *  - On success, zero.
2250  *  - On failure, a negative value.
2251  */
2252 static int
2253 txgbe_dev_rxq_interrupt_setup(struct rte_eth_dev *dev)
2254 {
2255         struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev);
2256
2257         intr->mask[0] |= TXGBE_ICR_MASK;
2258         intr->mask[1] |= TXGBE_ICR_MASK;
2259
2260         return 0;
2261 }
2262
2263 /**
2264  * It clears the interrupt causes and enables the interrupt.
2265  * It will be called once only during nic initialized.
2266  *
2267  * @param dev
2268  *  Pointer to struct rte_eth_dev.
2269  *
2270  * @return
2271  *  - On success, zero.
2272  *  - On failure, a negative value.
2273  */
2274 static int
2275 txgbe_dev_macsec_interrupt_setup(struct rte_eth_dev *dev)
2276 {
2277         struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev);
2278
2279         intr->mask_misc |= TXGBE_ICRMISC_LNKSEC;
2280
2281         return 0;
2282 }
2283
2284 /*
2285  * It reads ICR and sets flag (TXGBE_ICRMISC_LSC) for the link_update.
2286  *
2287  * @param dev
2288  *  Pointer to struct rte_eth_dev.
2289  *
2290  * @return
2291  *  - On success, zero.
2292  *  - On failure, a negative value.
2293  */
2294 static int
2295 txgbe_dev_interrupt_get_status(struct rte_eth_dev *dev)
2296 {
2297         uint32_t eicr;
2298         struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
2299         struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev);
2300
2301         /* clear all cause mask */
2302         txgbe_disable_intr(hw);
2303
2304         /* read-on-clear nic registers here */
2305         eicr = ((u32 *)hw->isb_mem)[TXGBE_ISB_MISC];
2306         PMD_DRV_LOG(DEBUG, "eicr %x", eicr);
2307
2308         intr->flags = 0;
2309
2310         /* set flag for async link update */
2311         if (eicr & TXGBE_ICRMISC_LSC)
2312                 intr->flags |= TXGBE_FLAG_NEED_LINK_UPDATE;
2313
2314         if (eicr & TXGBE_ICRMISC_VFMBX)
2315                 intr->flags |= TXGBE_FLAG_MAILBOX;
2316
2317         if (eicr & TXGBE_ICRMISC_LNKSEC)
2318                 intr->flags |= TXGBE_FLAG_MACSEC;
2319
2320         if (eicr & TXGBE_ICRMISC_GPIO)
2321                 intr->flags |= TXGBE_FLAG_PHY_INTERRUPT;
2322
2323         return 0;
2324 }
2325
2326 /**
2327  * It gets and then prints the link status.
2328  *
2329  * @param dev
2330  *  Pointer to struct rte_eth_dev.
2331  *
2332  * @return
2333  *  - On success, zero.
2334  *  - On failure, a negative value.
2335  */
2336 static void
2337 txgbe_dev_link_status_print(struct rte_eth_dev *dev)
2338 {
2339         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
2340         struct rte_eth_link link;
2341
2342         rte_eth_linkstatus_get(dev, &link);
2343
2344         if (link.link_status) {
2345                 PMD_INIT_LOG(INFO, "Port %d: Link Up - speed %u Mbps - %s",
2346                                         (int)(dev->data->port_id),
2347                                         (unsigned int)link.link_speed,
2348                         link.link_duplex == ETH_LINK_FULL_DUPLEX ?
2349                                         "full-duplex" : "half-duplex");
2350         } else {
2351                 PMD_INIT_LOG(INFO, " Port %d: Link Down",
2352                                 (int)(dev->data->port_id));
2353         }
2354         PMD_INIT_LOG(DEBUG, "PCI Address: " PCI_PRI_FMT,
2355                                 pci_dev->addr.domain,
2356                                 pci_dev->addr.bus,
2357                                 pci_dev->addr.devid,
2358                                 pci_dev->addr.function);
2359 }
2360
2361 /*
2362  * It executes link_update after knowing an interrupt occurred.
2363  *
2364  * @param dev
2365  *  Pointer to struct rte_eth_dev.
2366  *
2367  * @return
2368  *  - On success, zero.
2369  *  - On failure, a negative value.
2370  */
2371 static int
2372 txgbe_dev_interrupt_action(struct rte_eth_dev *dev,
2373                            struct rte_intr_handle *intr_handle)
2374 {
2375         struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev);
2376         int64_t timeout;
2377         struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
2378
2379         PMD_DRV_LOG(DEBUG, "intr action type %d", intr->flags);
2380
2381         if (intr->flags & TXGBE_FLAG_MAILBOX)
2382                 intr->flags &= ~TXGBE_FLAG_MAILBOX;
2383
2384         if (intr->flags & TXGBE_FLAG_PHY_INTERRUPT) {
2385                 hw->phy.handle_lasi(hw);
2386                 intr->flags &= ~TXGBE_FLAG_PHY_INTERRUPT;
2387         }
2388
2389         if (intr->flags & TXGBE_FLAG_NEED_LINK_UPDATE) {
2390                 struct rte_eth_link link;
2391
2392                 /*get the link status before link update, for predicting later*/
2393                 rte_eth_linkstatus_get(dev, &link);
2394
2395                 txgbe_dev_link_update(dev, 0);
2396
2397                 /* likely to up */
2398                 if (!link.link_status)
2399                         /* handle it 1 sec later, wait it being stable */
2400                         timeout = TXGBE_LINK_UP_CHECK_TIMEOUT;
2401                 /* likely to down */
2402                 else
2403                         /* handle it 4 sec later, wait it being stable */
2404                         timeout = TXGBE_LINK_DOWN_CHECK_TIMEOUT;
2405
2406                 txgbe_dev_link_status_print(dev);
2407                 if (rte_eal_alarm_set(timeout * 1000,
2408                                       txgbe_dev_interrupt_delayed_handler,
2409                                       (void *)dev) < 0) {
2410                         PMD_DRV_LOG(ERR, "Error setting alarm");
2411                 } else {
2412                         /* remember original mask */
2413                         intr->mask_misc_orig = intr->mask_misc;
2414                         /* only disable lsc interrupt */
2415                         intr->mask_misc &= ~TXGBE_ICRMISC_LSC;
2416                 }
2417         }
2418
2419         PMD_DRV_LOG(DEBUG, "enable intr immediately");
2420         txgbe_enable_intr(dev);
2421         rte_intr_enable(intr_handle);
2422
2423         return 0;
2424 }
2425
2426 /**
2427  * Interrupt handler which shall be registered for alarm callback for delayed
2428  * handling specific interrupt to wait for the stable nic state. As the
2429  * NIC interrupt state is not stable for txgbe after link is just down,
2430  * it needs to wait 4 seconds to get the stable status.
2431  *
2432  * @param handle
2433  *  Pointer to interrupt handle.
2434  * @param param
2435  *  The address of parameter (struct rte_eth_dev *) registered before.
2436  *
2437  * @return
2438  *  void
2439  */
2440 static void
2441 txgbe_dev_interrupt_delayed_handler(void *param)
2442 {
2443         struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
2444         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
2445         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
2446         struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev);
2447         struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
2448         uint32_t eicr;
2449
2450         txgbe_disable_intr(hw);
2451
2452         eicr = ((u32 *)hw->isb_mem)[TXGBE_ISB_MISC];
2453
2454         if (intr->flags & TXGBE_FLAG_PHY_INTERRUPT) {
2455                 hw->phy.handle_lasi(hw);
2456                 intr->flags &= ~TXGBE_FLAG_PHY_INTERRUPT;
2457         }
2458
2459         if (intr->flags & TXGBE_FLAG_NEED_LINK_UPDATE) {
2460                 txgbe_dev_link_update(dev, 0);
2461                 intr->flags &= ~TXGBE_FLAG_NEED_LINK_UPDATE;
2462                 txgbe_dev_link_status_print(dev);
2463                 rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC,
2464                                               NULL);
2465         }
2466
2467         if (intr->flags & TXGBE_FLAG_MACSEC) {
2468                 rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_MACSEC,
2469                                               NULL);
2470                 intr->flags &= ~TXGBE_FLAG_MACSEC;
2471         }
2472
2473         /* restore original mask */
2474         intr->mask_misc = intr->mask_misc_orig;
2475         intr->mask_misc_orig = 0;
2476
2477         PMD_DRV_LOG(DEBUG, "enable intr in delayed handler S[%08x]", eicr);
2478         txgbe_enable_intr(dev);
2479         rte_intr_enable(intr_handle);
2480 }
2481
2482 /**
2483  * Interrupt handler triggered by NIC  for handling
2484  * specific interrupt.
2485  *
2486  * @param handle
2487  *  Pointer to interrupt handle.
2488  * @param param
2489  *  The address of parameter (struct rte_eth_dev *) registered before.
2490  *
2491  * @return
2492  *  void
2493  */
2494 static void
2495 txgbe_dev_interrupt_handler(void *param)
2496 {
2497         struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
2498
2499         txgbe_dev_interrupt_get_status(dev);
2500         txgbe_dev_interrupt_action(dev, dev->intr_handle);
2501 }
2502
2503 static int
2504 txgbe_add_rar(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr,
2505                                 uint32_t index, uint32_t pool)
2506 {
2507         struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
2508         uint32_t enable_addr = 1;
2509
2510         return txgbe_set_rar(hw, index, mac_addr->addr_bytes,
2511                              pool, enable_addr);
2512 }
2513
2514 static void
2515 txgbe_remove_rar(struct rte_eth_dev *dev, uint32_t index)
2516 {
2517         struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
2518
2519         txgbe_clear_rar(hw, index);
2520 }
2521
2522 static int
2523 txgbe_set_default_mac_addr(struct rte_eth_dev *dev, struct rte_ether_addr *addr)
2524 {
2525         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
2526
2527         txgbe_remove_rar(dev, 0);
2528         txgbe_add_rar(dev, addr, 0, pci_dev->max_vfs);
2529
2530         return 0;
2531 }
2532
2533 static uint32_t
2534 txgbe_uta_vector(struct txgbe_hw *hw, struct rte_ether_addr *uc_addr)
2535 {
2536         uint32_t vector = 0;
2537
2538         switch (hw->mac.mc_filter_type) {
2539         case 0:   /* use bits [47:36] of the address */
2540                 vector = ((uc_addr->addr_bytes[4] >> 4) |
2541                         (((uint16_t)uc_addr->addr_bytes[5]) << 4));
2542                 break;
2543         case 1:   /* use bits [46:35] of the address */
2544                 vector = ((uc_addr->addr_bytes[4] >> 3) |
2545                         (((uint16_t)uc_addr->addr_bytes[5]) << 5));
2546                 break;
2547         case 2:   /* use bits [45:34] of the address */
2548                 vector = ((uc_addr->addr_bytes[4] >> 2) |
2549                         (((uint16_t)uc_addr->addr_bytes[5]) << 6));
2550                 break;
2551         case 3:   /* use bits [43:32] of the address */
2552                 vector = ((uc_addr->addr_bytes[4]) |
2553                         (((uint16_t)uc_addr->addr_bytes[5]) << 8));
2554                 break;
2555         default:  /* Invalid mc_filter_type */
2556                 break;
2557         }
2558
2559         /* vector can only be 12-bits or boundary will be exceeded */
2560         vector &= 0xFFF;
2561         return vector;
2562 }
2563
2564 static int
2565 txgbe_uc_hash_table_set(struct rte_eth_dev *dev,
2566                         struct rte_ether_addr *mac_addr, uint8_t on)
2567 {
2568         uint32_t vector;
2569         uint32_t uta_idx;
2570         uint32_t reg_val;
2571         uint32_t uta_mask;
2572         uint32_t psrctl;
2573
2574         struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
2575         struct txgbe_uta_info *uta_info = TXGBE_DEV_UTA_INFO(dev);
2576
2577         /* The UTA table only exists on pf hardware */
2578         if (hw->mac.type < txgbe_mac_raptor)
2579                 return -ENOTSUP;
2580
2581         vector = txgbe_uta_vector(hw, mac_addr);
2582         uta_idx = (vector >> 5) & 0x7F;
2583         uta_mask = 0x1UL << (vector & 0x1F);
2584
2585         if (!!on == !!(uta_info->uta_shadow[uta_idx] & uta_mask))
2586                 return 0;
2587
2588         reg_val = rd32(hw, TXGBE_UCADDRTBL(uta_idx));
2589         if (on) {
2590                 uta_info->uta_in_use++;
2591                 reg_val |= uta_mask;
2592                 uta_info->uta_shadow[uta_idx] |= uta_mask;
2593         } else {
2594                 uta_info->uta_in_use--;
2595                 reg_val &= ~uta_mask;
2596                 uta_info->uta_shadow[uta_idx] &= ~uta_mask;
2597         }
2598
2599         wr32(hw, TXGBE_UCADDRTBL(uta_idx), reg_val);
2600
2601         psrctl = rd32(hw, TXGBE_PSRCTL);
2602         if (uta_info->uta_in_use > 0)
2603                 psrctl |= TXGBE_PSRCTL_UCHFENA;
2604         else
2605                 psrctl &= ~TXGBE_PSRCTL_UCHFENA;
2606
2607         psrctl &= ~TXGBE_PSRCTL_ADHF12_MASK;
2608         psrctl |= TXGBE_PSRCTL_ADHF12(hw->mac.mc_filter_type);
2609         wr32(hw, TXGBE_PSRCTL, psrctl);
2610
2611         return 0;
2612 }
2613
2614 static int
2615 txgbe_uc_all_hash_table_set(struct rte_eth_dev *dev, uint8_t on)
2616 {
2617         struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
2618         struct txgbe_uta_info *uta_info = TXGBE_DEV_UTA_INFO(dev);
2619         uint32_t psrctl;
2620         int i;
2621
2622         /* The UTA table only exists on pf hardware */
2623         if (hw->mac.type < txgbe_mac_raptor)
2624                 return -ENOTSUP;
2625
2626         if (on) {
2627                 for (i = 0; i < ETH_VMDQ_NUM_UC_HASH_ARRAY; i++) {
2628                         uta_info->uta_shadow[i] = ~0;
2629                         wr32(hw, TXGBE_UCADDRTBL(i), ~0);
2630                 }
2631         } else {
2632                 for (i = 0; i < ETH_VMDQ_NUM_UC_HASH_ARRAY; i++) {
2633                         uta_info->uta_shadow[i] = 0;
2634                         wr32(hw, TXGBE_UCADDRTBL(i), 0);
2635                 }
2636         }
2637
2638         psrctl = rd32(hw, TXGBE_PSRCTL);
2639         if (on)
2640                 psrctl |= TXGBE_PSRCTL_UCHFENA;
2641         else
2642                 psrctl &= ~TXGBE_PSRCTL_UCHFENA;
2643
2644         psrctl &= ~TXGBE_PSRCTL_ADHF12_MASK;
2645         psrctl |= TXGBE_PSRCTL_ADHF12(hw->mac.mc_filter_type);
2646         wr32(hw, TXGBE_PSRCTL, psrctl);
2647
2648         return 0;
2649 }
2650
2651 static int
2652 txgbe_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id)
2653 {
2654         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
2655         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
2656         uint32_t mask;
2657         struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
2658
2659         if (queue_id < 32) {
2660                 mask = rd32(hw, TXGBE_IMS(0));
2661                 mask &= (1 << queue_id);
2662                 wr32(hw, TXGBE_IMS(0), mask);
2663         } else if (queue_id < 64) {
2664                 mask = rd32(hw, TXGBE_IMS(1));
2665                 mask &= (1 << (queue_id - 32));
2666                 wr32(hw, TXGBE_IMS(1), mask);
2667         }
2668         rte_intr_enable(intr_handle);
2669
2670         return 0;
2671 }
2672
2673 static int
2674 txgbe_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id)
2675 {
2676         uint32_t mask;
2677         struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
2678
2679         if (queue_id < 32) {
2680                 mask = rd32(hw, TXGBE_IMS(0));
2681                 mask &= ~(1 << queue_id);
2682                 wr32(hw, TXGBE_IMS(0), mask);
2683         } else if (queue_id < 64) {
2684                 mask = rd32(hw, TXGBE_IMS(1));
2685                 mask &= ~(1 << (queue_id - 32));
2686                 wr32(hw, TXGBE_IMS(1), mask);
2687         }
2688
2689         return 0;
2690 }
2691
2692 /**
2693  * set the IVAR registers, mapping interrupt causes to vectors
2694  * @param hw
2695  *  pointer to txgbe_hw struct
2696  * @direction
2697  *  0 for Rx, 1 for Tx, -1 for other causes
2698  * @queue
2699  *  queue to map the corresponding interrupt to
2700  * @msix_vector
2701  *  the vector to map to the corresponding queue
2702  */
2703 void
2704 txgbe_set_ivar_map(struct txgbe_hw *hw, int8_t direction,
2705                    uint8_t queue, uint8_t msix_vector)
2706 {
2707         uint32_t tmp, idx;
2708
2709         if (direction == -1) {
2710                 /* other causes */
2711                 msix_vector |= TXGBE_IVARMISC_VLD;
2712                 idx = 0;
2713                 tmp = rd32(hw, TXGBE_IVARMISC);
2714                 tmp &= ~(0xFF << idx);
2715                 tmp |= (msix_vector << idx);
2716                 wr32(hw, TXGBE_IVARMISC, tmp);
2717         } else {
2718                 /* rx or tx causes */
2719                 /* Workround for ICR lost */
2720                 idx = ((16 * (queue & 1)) + (8 * direction));
2721                 tmp = rd32(hw, TXGBE_IVAR(queue >> 1));
2722                 tmp &= ~(0xFF << idx);
2723                 tmp |= (msix_vector << idx);
2724                 wr32(hw, TXGBE_IVAR(queue >> 1), tmp);
2725         }
2726 }
2727
2728 /**
2729  * Sets up the hardware to properly generate MSI-X interrupts
2730  * @hw
2731  *  board private structure
2732  */
2733 static void
2734 txgbe_configure_msix(struct rte_eth_dev *dev)
2735 {
2736         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
2737         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
2738         struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
2739         uint32_t queue_id, base = TXGBE_MISC_VEC_ID;
2740         uint32_t vec = TXGBE_MISC_VEC_ID;
2741         uint32_t gpie;
2742
2743         /* won't configure msix register if no mapping is done
2744          * between intr vector and event fd
2745          * but if misx has been enabled already, need to configure
2746          * auto clean, auto mask and throttling.
2747          */
2748         gpie = rd32(hw, TXGBE_GPIE);
2749         if (!rte_intr_dp_is_en(intr_handle) &&
2750             !(gpie & TXGBE_GPIE_MSIX))
2751                 return;
2752
2753         if (rte_intr_allow_others(intr_handle)) {
2754                 base = TXGBE_RX_VEC_START;
2755                 vec = base;
2756         }
2757
2758         /* setup GPIE for MSI-x mode */
2759         gpie = rd32(hw, TXGBE_GPIE);
2760         gpie |= TXGBE_GPIE_MSIX;
2761         wr32(hw, TXGBE_GPIE, gpie);
2762
2763         /* Populate the IVAR table and set the ITR values to the
2764          * corresponding register.
2765          */
2766         if (rte_intr_dp_is_en(intr_handle)) {
2767                 for (queue_id = 0; queue_id < dev->data->nb_rx_queues;
2768                         queue_id++) {
2769                         /* by default, 1:1 mapping */
2770                         txgbe_set_ivar_map(hw, 0, queue_id, vec);
2771                         intr_handle->intr_vec[queue_id] = vec;
2772                         if (vec < base + intr_handle->nb_efd - 1)
2773                                 vec++;
2774                 }
2775
2776                 txgbe_set_ivar_map(hw, -1, 1, TXGBE_MISC_VEC_ID);
2777         }
2778         wr32(hw, TXGBE_ITR(TXGBE_MISC_VEC_ID),
2779                         TXGBE_ITR_IVAL_10G(TXGBE_QUEUE_ITR_INTERVAL_DEFAULT)
2780                         | TXGBE_ITR_WRDSA);
2781 }
2782
2783 static u8 *
2784 txgbe_dev_addr_list_itr(__rte_unused struct txgbe_hw *hw,
2785                         u8 **mc_addr_ptr, u32 *vmdq)
2786 {
2787         u8 *mc_addr;
2788
2789         *vmdq = 0;
2790         mc_addr = *mc_addr_ptr;
2791         *mc_addr_ptr = (mc_addr + sizeof(struct rte_ether_addr));
2792         return mc_addr;
2793 }
2794
2795 int
2796 txgbe_dev_set_mc_addr_list(struct rte_eth_dev *dev,
2797                           struct rte_ether_addr *mc_addr_set,
2798                           uint32_t nb_mc_addr)
2799 {
2800         struct txgbe_hw *hw;
2801         u8 *mc_addr_list;
2802
2803         hw = TXGBE_DEV_HW(dev);
2804         mc_addr_list = (u8 *)mc_addr_set;
2805         return txgbe_update_mc_addr_list(hw, mc_addr_list, nb_mc_addr,
2806                                          txgbe_dev_addr_list_itr, TRUE);
2807 }
2808
2809 static const struct eth_dev_ops txgbe_eth_dev_ops = {
2810         .dev_configure              = txgbe_dev_configure,
2811         .dev_infos_get              = txgbe_dev_info_get,
2812         .dev_start                  = txgbe_dev_start,
2813         .dev_stop                   = txgbe_dev_stop,
2814         .dev_set_link_up            = txgbe_dev_set_link_up,
2815         .dev_set_link_down          = txgbe_dev_set_link_down,
2816         .dev_close                  = txgbe_dev_close,
2817         .dev_reset                  = txgbe_dev_reset,
2818         .link_update                = txgbe_dev_link_update,
2819         .stats_get                  = txgbe_dev_stats_get,
2820         .xstats_get                 = txgbe_dev_xstats_get,
2821         .xstats_get_by_id           = txgbe_dev_xstats_get_by_id,
2822         .stats_reset                = txgbe_dev_stats_reset,
2823         .xstats_reset               = txgbe_dev_xstats_reset,
2824         .xstats_get_names           = txgbe_dev_xstats_get_names,
2825         .xstats_get_names_by_id     = txgbe_dev_xstats_get_names_by_id,
2826         .queue_stats_mapping_set    = txgbe_dev_queue_stats_mapping_set,
2827         .dev_supported_ptypes_get   = txgbe_dev_supported_ptypes_get,
2828         .vlan_filter_set            = txgbe_vlan_filter_set,
2829         .vlan_tpid_set              = txgbe_vlan_tpid_set,
2830         .vlan_offload_set           = txgbe_vlan_offload_set,
2831         .vlan_strip_queue_set       = txgbe_vlan_strip_queue_set,
2832         .rx_queue_start             = txgbe_dev_rx_queue_start,
2833         .rx_queue_stop              = txgbe_dev_rx_queue_stop,
2834         .tx_queue_start             = txgbe_dev_tx_queue_start,
2835         .tx_queue_stop              = txgbe_dev_tx_queue_stop,
2836         .rx_queue_setup             = txgbe_dev_rx_queue_setup,
2837         .rx_queue_intr_enable       = txgbe_dev_rx_queue_intr_enable,
2838         .rx_queue_intr_disable      = txgbe_dev_rx_queue_intr_disable,
2839         .rx_queue_release           = txgbe_dev_rx_queue_release,
2840         .tx_queue_setup             = txgbe_dev_tx_queue_setup,
2841         .tx_queue_release           = txgbe_dev_tx_queue_release,
2842         .mac_addr_add               = txgbe_add_rar,
2843         .mac_addr_remove            = txgbe_remove_rar,
2844         .mac_addr_set               = txgbe_set_default_mac_addr,
2845         .uc_hash_table_set          = txgbe_uc_hash_table_set,
2846         .uc_all_hash_table_set      = txgbe_uc_all_hash_table_set,
2847         .set_mc_addr_list           = txgbe_dev_set_mc_addr_list,
2848         .rxq_info_get               = txgbe_rxq_info_get,
2849         .txq_info_get               = txgbe_txq_info_get,
2850 };
2851
2852 RTE_PMD_REGISTER_PCI(net_txgbe, rte_txgbe_pmd);
2853 RTE_PMD_REGISTER_PCI_TABLE(net_txgbe, pci_id_txgbe_map);
2854 RTE_PMD_REGISTER_KMOD_DEP(net_txgbe, "* igb_uio | uio_pci_generic | vfio-pci");
2855
2856 RTE_LOG_REGISTER(txgbe_logtype_init, pmd.net.txgbe.init, NOTICE);
2857 RTE_LOG_REGISTER(txgbe_logtype_driver, pmd.net.txgbe.driver, NOTICE);
2858
2859 #ifdef RTE_LIBRTE_TXGBE_DEBUG_RX
2860         RTE_LOG_REGISTER(txgbe_logtype_rx, pmd.net.txgbe.rx, DEBUG);
2861 #endif
2862 #ifdef RTE_LIBRTE_TXGBE_DEBUG_TX
2863         RTE_LOG_REGISTER(txgbe_logtype_tx, pmd.net.txgbe.tx, DEBUG);
2864 #endif
2865
2866 #ifdef RTE_LIBRTE_TXGBE_DEBUG_TX_FREE
2867         RTE_LOG_REGISTER(txgbe_logtype_tx_free, pmd.net.txgbe.tx_free, DEBUG);
2868 #endif