net/txgbe: add process mailbox operation
[dpdk.git] / drivers / net / txgbe / txgbe_ethdev.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2015-2020
3  */
4
5 #include <stdio.h>
6 #include <errno.h>
7 #include <stdint.h>
8 #include <string.h>
9 #include <rte_common.h>
10 #include <rte_ethdev_pci.h>
11
12 #include <rte_interrupts.h>
13 #include <rte_log.h>
14 #include <rte_debug.h>
15 #include <rte_pci.h>
16 #include <rte_memory.h>
17 #include <rte_eal.h>
18 #include <rte_alarm.h>
19
20 #include "txgbe_logs.h"
21 #include "base/txgbe.h"
22 #include "txgbe_ethdev.h"
23 #include "txgbe_rxtx.h"
24
25 static int  txgbe_dev_set_link_up(struct rte_eth_dev *dev);
26 static int  txgbe_dev_set_link_down(struct rte_eth_dev *dev);
27 static int txgbe_dev_close(struct rte_eth_dev *dev);
28 static int txgbe_dev_link_update(struct rte_eth_dev *dev,
29                                 int wait_to_complete);
30 static int txgbe_dev_stats_reset(struct rte_eth_dev *dev);
31 static void txgbe_vlan_hw_strip_enable(struct rte_eth_dev *dev, uint16_t queue);
32 static void txgbe_vlan_hw_strip_disable(struct rte_eth_dev *dev,
33                                         uint16_t queue);
34
35 static void txgbe_dev_link_status_print(struct rte_eth_dev *dev);
36 static int txgbe_dev_lsc_interrupt_setup(struct rte_eth_dev *dev, uint8_t on);
37 static int txgbe_dev_macsec_interrupt_setup(struct rte_eth_dev *dev);
38 static int txgbe_dev_rxq_interrupt_setup(struct rte_eth_dev *dev);
39 static int txgbe_dev_interrupt_get_status(struct rte_eth_dev *dev);
40 static int txgbe_dev_interrupt_action(struct rte_eth_dev *dev,
41                                       struct rte_intr_handle *handle);
42 static void txgbe_dev_interrupt_handler(void *param);
43 static void txgbe_dev_interrupt_delayed_handler(void *param);
44 static void txgbe_configure_msix(struct rte_eth_dev *dev);
45
46 #define TXGBE_SET_HWSTRIP(h, q) do {\
47                 uint32_t idx = (q) / (sizeof((h)->bitmap[0]) * NBBY); \
48                 uint32_t bit = (q) % (sizeof((h)->bitmap[0]) * NBBY); \
49                 (h)->bitmap[idx] |= 1 << bit;\
50         } while (0)
51
52 #define TXGBE_CLEAR_HWSTRIP(h, q) do {\
53                 uint32_t idx = (q) / (sizeof((h)->bitmap[0]) * NBBY); \
54                 uint32_t bit = (q) % (sizeof((h)->bitmap[0]) * NBBY); \
55                 (h)->bitmap[idx] &= ~(1 << bit);\
56         } while (0)
57
58 #define TXGBE_GET_HWSTRIP(h, q, r) do {\
59                 uint32_t idx = (q) / (sizeof((h)->bitmap[0]) * NBBY); \
60                 uint32_t bit = (q) % (sizeof((h)->bitmap[0]) * NBBY); \
61                 (r) = (h)->bitmap[idx] >> bit & 1;\
62         } while (0)
63
64 /*
65  * The set of PCI devices this driver supports
66  */
67 static const struct rte_pci_id pci_id_txgbe_map[] = {
68         { RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, TXGBE_DEV_ID_RAPTOR_SFP) },
69         { RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, TXGBE_DEV_ID_WX1820_SFP) },
70         { .vendor_id = 0, /* sentinel */ },
71 };
72
73 static const struct rte_eth_desc_lim rx_desc_lim = {
74         .nb_max = TXGBE_RING_DESC_MAX,
75         .nb_min = TXGBE_RING_DESC_MIN,
76         .nb_align = TXGBE_RXD_ALIGN,
77 };
78
79 static const struct rte_eth_desc_lim tx_desc_lim = {
80         .nb_max = TXGBE_RING_DESC_MAX,
81         .nb_min = TXGBE_RING_DESC_MIN,
82         .nb_align = TXGBE_TXD_ALIGN,
83         .nb_seg_max = TXGBE_TX_MAX_SEG,
84         .nb_mtu_seg_max = TXGBE_TX_MAX_SEG,
85 };
86
87 static const struct eth_dev_ops txgbe_eth_dev_ops;
88
89 #define HW_XSTAT(m) {#m, offsetof(struct txgbe_hw_stats, m)}
90 #define HW_XSTAT_NAME(m, n) {n, offsetof(struct txgbe_hw_stats, m)}
91 static const struct rte_txgbe_xstats_name_off rte_txgbe_stats_strings[] = {
92         /* MNG RxTx */
93         HW_XSTAT(mng_bmc2host_packets),
94         HW_XSTAT(mng_host2bmc_packets),
95         /* Basic RxTx */
96         HW_XSTAT(rx_packets),
97         HW_XSTAT(tx_packets),
98         HW_XSTAT(rx_bytes),
99         HW_XSTAT(tx_bytes),
100         HW_XSTAT(rx_total_bytes),
101         HW_XSTAT(rx_total_packets),
102         HW_XSTAT(tx_total_packets),
103         HW_XSTAT(rx_total_missed_packets),
104         HW_XSTAT(rx_broadcast_packets),
105         HW_XSTAT(rx_multicast_packets),
106         HW_XSTAT(rx_management_packets),
107         HW_XSTAT(tx_management_packets),
108         HW_XSTAT(rx_management_dropped),
109
110         /* Basic Error */
111         HW_XSTAT(rx_crc_errors),
112         HW_XSTAT(rx_illegal_byte_errors),
113         HW_XSTAT(rx_error_bytes),
114         HW_XSTAT(rx_mac_short_packet_dropped),
115         HW_XSTAT(rx_length_errors),
116         HW_XSTAT(rx_undersize_errors),
117         HW_XSTAT(rx_fragment_errors),
118         HW_XSTAT(rx_oversize_errors),
119         HW_XSTAT(rx_jabber_errors),
120         HW_XSTAT(rx_l3_l4_xsum_error),
121         HW_XSTAT(mac_local_errors),
122         HW_XSTAT(mac_remote_errors),
123
124         /* Flow Director */
125         HW_XSTAT(flow_director_added_filters),
126         HW_XSTAT(flow_director_removed_filters),
127         HW_XSTAT(flow_director_filter_add_errors),
128         HW_XSTAT(flow_director_filter_remove_errors),
129         HW_XSTAT(flow_director_matched_filters),
130         HW_XSTAT(flow_director_missed_filters),
131
132         /* FCoE */
133         HW_XSTAT(rx_fcoe_crc_errors),
134         HW_XSTAT(rx_fcoe_mbuf_allocation_errors),
135         HW_XSTAT(rx_fcoe_dropped),
136         HW_XSTAT(rx_fcoe_packets),
137         HW_XSTAT(tx_fcoe_packets),
138         HW_XSTAT(rx_fcoe_bytes),
139         HW_XSTAT(tx_fcoe_bytes),
140         HW_XSTAT(rx_fcoe_no_ddp),
141         HW_XSTAT(rx_fcoe_no_ddp_ext_buff),
142
143         /* MACSEC */
144         HW_XSTAT(tx_macsec_pkts_untagged),
145         HW_XSTAT(tx_macsec_pkts_encrypted),
146         HW_XSTAT(tx_macsec_pkts_protected),
147         HW_XSTAT(tx_macsec_octets_encrypted),
148         HW_XSTAT(tx_macsec_octets_protected),
149         HW_XSTAT(rx_macsec_pkts_untagged),
150         HW_XSTAT(rx_macsec_pkts_badtag),
151         HW_XSTAT(rx_macsec_pkts_nosci),
152         HW_XSTAT(rx_macsec_pkts_unknownsci),
153         HW_XSTAT(rx_macsec_octets_decrypted),
154         HW_XSTAT(rx_macsec_octets_validated),
155         HW_XSTAT(rx_macsec_sc_pkts_unchecked),
156         HW_XSTAT(rx_macsec_sc_pkts_delayed),
157         HW_XSTAT(rx_macsec_sc_pkts_late),
158         HW_XSTAT(rx_macsec_sa_pkts_ok),
159         HW_XSTAT(rx_macsec_sa_pkts_invalid),
160         HW_XSTAT(rx_macsec_sa_pkts_notvalid),
161         HW_XSTAT(rx_macsec_sa_pkts_unusedsa),
162         HW_XSTAT(rx_macsec_sa_pkts_notusingsa),
163
164         /* MAC RxTx */
165         HW_XSTAT(rx_size_64_packets),
166         HW_XSTAT(rx_size_65_to_127_packets),
167         HW_XSTAT(rx_size_128_to_255_packets),
168         HW_XSTAT(rx_size_256_to_511_packets),
169         HW_XSTAT(rx_size_512_to_1023_packets),
170         HW_XSTAT(rx_size_1024_to_max_packets),
171         HW_XSTAT(tx_size_64_packets),
172         HW_XSTAT(tx_size_65_to_127_packets),
173         HW_XSTAT(tx_size_128_to_255_packets),
174         HW_XSTAT(tx_size_256_to_511_packets),
175         HW_XSTAT(tx_size_512_to_1023_packets),
176         HW_XSTAT(tx_size_1024_to_max_packets),
177
178         /* Flow Control */
179         HW_XSTAT(tx_xon_packets),
180         HW_XSTAT(rx_xon_packets),
181         HW_XSTAT(tx_xoff_packets),
182         HW_XSTAT(rx_xoff_packets),
183
184         HW_XSTAT_NAME(tx_xon_packets, "tx_flow_control_xon_packets"),
185         HW_XSTAT_NAME(rx_xon_packets, "rx_flow_control_xon_packets"),
186         HW_XSTAT_NAME(tx_xoff_packets, "tx_flow_control_xoff_packets"),
187         HW_XSTAT_NAME(rx_xoff_packets, "rx_flow_control_xoff_packets"),
188 };
189
190 #define TXGBE_NB_HW_STATS (sizeof(rte_txgbe_stats_strings) / \
191                            sizeof(rte_txgbe_stats_strings[0]))
192
193 /* Per-priority statistics */
194 #define UP_XSTAT(m) {#m, offsetof(struct txgbe_hw_stats, up[0].m)}
195 static const struct rte_txgbe_xstats_name_off rte_txgbe_up_strings[] = {
196         UP_XSTAT(rx_up_packets),
197         UP_XSTAT(tx_up_packets),
198         UP_XSTAT(rx_up_bytes),
199         UP_XSTAT(tx_up_bytes),
200         UP_XSTAT(rx_up_drop_packets),
201
202         UP_XSTAT(tx_up_xon_packets),
203         UP_XSTAT(rx_up_xon_packets),
204         UP_XSTAT(tx_up_xoff_packets),
205         UP_XSTAT(rx_up_xoff_packets),
206         UP_XSTAT(rx_up_dropped),
207         UP_XSTAT(rx_up_mbuf_alloc_errors),
208         UP_XSTAT(tx_up_xon2off_packets),
209 };
210
211 #define TXGBE_NB_UP_STATS (sizeof(rte_txgbe_up_strings) / \
212                            sizeof(rte_txgbe_up_strings[0]))
213
214 /* Per-queue statistics */
215 #define QP_XSTAT(m) {#m, offsetof(struct txgbe_hw_stats, qp[0].m)}
216 static const struct rte_txgbe_xstats_name_off rte_txgbe_qp_strings[] = {
217         QP_XSTAT(rx_qp_packets),
218         QP_XSTAT(tx_qp_packets),
219         QP_XSTAT(rx_qp_bytes),
220         QP_XSTAT(tx_qp_bytes),
221         QP_XSTAT(rx_qp_mc_packets),
222 };
223
224 #define TXGBE_NB_QP_STATS (sizeof(rte_txgbe_qp_strings) / \
225                            sizeof(rte_txgbe_qp_strings[0]))
226
227 static inline int
228 txgbe_is_sfp(struct txgbe_hw *hw)
229 {
230         switch (hw->phy.type) {
231         case txgbe_phy_sfp_avago:
232         case txgbe_phy_sfp_ftl:
233         case txgbe_phy_sfp_intel:
234         case txgbe_phy_sfp_unknown:
235         case txgbe_phy_sfp_tyco_passive:
236         case txgbe_phy_sfp_unknown_passive:
237                 return 1;
238         default:
239                 return 0;
240         }
241 }
242
243 static inline int32_t
244 txgbe_pf_reset_hw(struct txgbe_hw *hw)
245 {
246         uint32_t ctrl_ext;
247         int32_t status;
248
249         status = hw->mac.reset_hw(hw);
250
251         ctrl_ext = rd32(hw, TXGBE_PORTCTL);
252         /* Set PF Reset Done bit so PF/VF Mail Ops can work */
253         ctrl_ext |= TXGBE_PORTCTL_RSTDONE;
254         wr32(hw, TXGBE_PORTCTL, ctrl_ext);
255         txgbe_flush(hw);
256
257         if (status == TXGBE_ERR_SFP_NOT_PRESENT)
258                 status = 0;
259         return status;
260 }
261
262 static inline void
263 txgbe_enable_intr(struct rte_eth_dev *dev)
264 {
265         struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev);
266         struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
267
268         wr32(hw, TXGBE_IENMISC, intr->mask_misc);
269         wr32(hw, TXGBE_IMC(0), TXGBE_IMC_MASK);
270         wr32(hw, TXGBE_IMC(1), TXGBE_IMC_MASK);
271         txgbe_flush(hw);
272 }
273
274 static void
275 txgbe_disable_intr(struct txgbe_hw *hw)
276 {
277         PMD_INIT_FUNC_TRACE();
278
279         wr32(hw, TXGBE_IENMISC, ~BIT_MASK32);
280         wr32(hw, TXGBE_IMS(0), TXGBE_IMC_MASK);
281         wr32(hw, TXGBE_IMS(1), TXGBE_IMC_MASK);
282         txgbe_flush(hw);
283 }
284
285 static int
286 txgbe_dev_queue_stats_mapping_set(struct rte_eth_dev *eth_dev,
287                                   uint16_t queue_id,
288                                   uint8_t stat_idx,
289                                   uint8_t is_rx)
290 {
291         struct txgbe_hw *hw = TXGBE_DEV_HW(eth_dev);
292         struct txgbe_stat_mappings *stat_mappings =
293                 TXGBE_DEV_STAT_MAPPINGS(eth_dev);
294         uint32_t qsmr_mask = 0;
295         uint32_t clearing_mask = QMAP_FIELD_RESERVED_BITS_MASK;
296         uint32_t q_map;
297         uint8_t n, offset;
298
299         if (hw->mac.type != txgbe_mac_raptor)
300                 return -ENOSYS;
301
302         if (stat_idx & !QMAP_FIELD_RESERVED_BITS_MASK)
303                 return -EIO;
304
305         PMD_INIT_LOG(DEBUG, "Setting port %d, %s queue_id %d to stat index %d",
306                      (int)(eth_dev->data->port_id), is_rx ? "RX" : "TX",
307                      queue_id, stat_idx);
308
309         n = (uint8_t)(queue_id / NB_QMAP_FIELDS_PER_QSM_REG);
310         if (n >= TXGBE_NB_STAT_MAPPING) {
311                 PMD_INIT_LOG(ERR, "Nb of stat mapping registers exceeded");
312                 return -EIO;
313         }
314         offset = (uint8_t)(queue_id % NB_QMAP_FIELDS_PER_QSM_REG);
315
316         /* Now clear any previous stat_idx set */
317         clearing_mask <<= (QSM_REG_NB_BITS_PER_QMAP_FIELD * offset);
318         if (!is_rx)
319                 stat_mappings->tqsm[n] &= ~clearing_mask;
320         else
321                 stat_mappings->rqsm[n] &= ~clearing_mask;
322
323         q_map = (uint32_t)stat_idx;
324         q_map &= QMAP_FIELD_RESERVED_BITS_MASK;
325         qsmr_mask = q_map << (QSM_REG_NB_BITS_PER_QMAP_FIELD * offset);
326         if (!is_rx)
327                 stat_mappings->tqsm[n] |= qsmr_mask;
328         else
329                 stat_mappings->rqsm[n] |= qsmr_mask;
330
331         PMD_INIT_LOG(DEBUG, "Set port %d, %s queue_id %d to stat index %d",
332                      (int)(eth_dev->data->port_id), is_rx ? "RX" : "TX",
333                      queue_id, stat_idx);
334         PMD_INIT_LOG(DEBUG, "%s[%d] = 0x%08x", is_rx ? "RQSMR" : "TQSM", n,
335                      is_rx ? stat_mappings->rqsm[n] : stat_mappings->tqsm[n]);
336         return 0;
337 }
338
339 /*
340  * Ensure that all locks are released before first NVM or PHY access
341  */
342 static void
343 txgbe_swfw_lock_reset(struct txgbe_hw *hw)
344 {
345         uint16_t mask;
346
347         /*
348          * These ones are more tricky since they are common to all ports; but
349          * swfw_sync retries last long enough (1s) to be almost sure that if
350          * lock can not be taken it is due to an improper lock of the
351          * semaphore.
352          */
353         mask = TXGBE_MNGSEM_SWPHY |
354                TXGBE_MNGSEM_SWMBX |
355                TXGBE_MNGSEM_SWFLASH;
356         if (hw->mac.acquire_swfw_sync(hw, mask) < 0)
357                 PMD_DRV_LOG(DEBUG, "SWFW common locks released");
358
359         hw->mac.release_swfw_sync(hw, mask);
360 }
361
362 static int
363 eth_txgbe_dev_init(struct rte_eth_dev *eth_dev, void *init_params __rte_unused)
364 {
365         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
366         struct txgbe_hw *hw = TXGBE_DEV_HW(eth_dev);
367         struct txgbe_vfta *shadow_vfta = TXGBE_DEV_VFTA(eth_dev);
368         struct txgbe_hwstrip *hwstrip = TXGBE_DEV_HWSTRIP(eth_dev);
369         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
370         const struct rte_memzone *mz;
371         uint32_t ctrl_ext;
372         uint16_t csum;
373         int err;
374
375         PMD_INIT_FUNC_TRACE();
376
377         eth_dev->dev_ops = &txgbe_eth_dev_ops;
378         eth_dev->rx_pkt_burst = &txgbe_recv_pkts;
379         eth_dev->tx_pkt_burst = &txgbe_xmit_pkts;
380         eth_dev->tx_pkt_prepare = &txgbe_prep_pkts;
381
382         /*
383          * For secondary processes, we don't initialise any further as primary
384          * has already done this work. Only check we don't need a different
385          * RX and TX function.
386          */
387         if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
388                 struct txgbe_tx_queue *txq;
389                 /* TX queue function in primary, set by last queue initialized
390                  * Tx queue may not initialized by primary process
391                  */
392                 if (eth_dev->data->tx_queues) {
393                         uint16_t nb_tx_queues = eth_dev->data->nb_tx_queues;
394                         txq = eth_dev->data->tx_queues[nb_tx_queues - 1];
395                         txgbe_set_tx_function(eth_dev, txq);
396                 } else {
397                         /* Use default TX function if we get here */
398                         PMD_INIT_LOG(NOTICE, "No TX queues configured yet. "
399                                      "Using default TX function.");
400                 }
401
402                 txgbe_set_rx_function(eth_dev);
403
404                 return 0;
405         }
406
407         rte_eth_copy_pci_info(eth_dev, pci_dev);
408
409         /* Vendor and Device ID need to be set before init of shared code */
410         hw->device_id = pci_dev->id.device_id;
411         hw->vendor_id = pci_dev->id.vendor_id;
412         hw->hw_addr = (void *)pci_dev->mem_resource[0].addr;
413         hw->allow_unsupported_sfp = 1;
414
415         /* Reserve memory for interrupt status block */
416         mz = rte_eth_dma_zone_reserve(eth_dev, "txgbe_driver", -1,
417                 16, TXGBE_ALIGN, SOCKET_ID_ANY);
418         if (mz == NULL)
419                 return -ENOMEM;
420
421         hw->isb_dma = TMZ_PADDR(mz);
422         hw->isb_mem = TMZ_VADDR(mz);
423
424         /* Initialize the shared code (base driver) */
425         err = txgbe_init_shared_code(hw);
426         if (err != 0) {
427                 PMD_INIT_LOG(ERR, "Shared code init failed: %d", err);
428                 return -EIO;
429         }
430
431         /* Unlock any pending hardware semaphore */
432         txgbe_swfw_lock_reset(hw);
433
434         err = hw->rom.init_params(hw);
435         if (err != 0) {
436                 PMD_INIT_LOG(ERR, "The EEPROM init failed: %d", err);
437                 return -EIO;
438         }
439
440         /* Make sure we have a good EEPROM before we read from it */
441         err = hw->rom.validate_checksum(hw, &csum);
442         if (err != 0) {
443                 PMD_INIT_LOG(ERR, "The EEPROM checksum is not valid: %d", err);
444                 return -EIO;
445         }
446
447         err = hw->mac.init_hw(hw);
448
449         /*
450          * Devices with copper phys will fail to initialise if txgbe_init_hw()
451          * is called too soon after the kernel driver unbinding/binding occurs.
452          * The failure occurs in txgbe_identify_phy() for all devices,
453          * but for non-copper devies, txgbe_identify_sfp_module() is
454          * also called. See txgbe_identify_phy(). The reason for the
455          * failure is not known, and only occuts when virtualisation features
456          * are disabled in the bios. A delay of 200ms  was found to be enough by
457          * trial-and-error, and is doubled to be safe.
458          */
459         if (err && hw->phy.media_type == txgbe_media_type_copper) {
460                 rte_delay_ms(200);
461                 err = hw->mac.init_hw(hw);
462         }
463
464         if (err == TXGBE_ERR_SFP_NOT_PRESENT)
465                 err = 0;
466
467         if (err == TXGBE_ERR_EEPROM_VERSION) {
468                 PMD_INIT_LOG(ERR, "This device is a pre-production adapter/"
469                              "LOM.  Please be aware there may be issues associated "
470                              "with your hardware.");
471                 PMD_INIT_LOG(ERR, "If you are experiencing problems "
472                              "please contact your hardware representative "
473                              "who provided you with this hardware.");
474         } else if (err == TXGBE_ERR_SFP_NOT_SUPPORTED) {
475                 PMD_INIT_LOG(ERR, "Unsupported SFP+ Module");
476         }
477         if (err) {
478                 PMD_INIT_LOG(ERR, "Hardware Initialization Failure: %d", err);
479                 return -EIO;
480         }
481
482         /* Reset the hw statistics */
483         txgbe_dev_stats_reset(eth_dev);
484
485         /* disable interrupt */
486         txgbe_disable_intr(hw);
487
488         /* Allocate memory for storing MAC addresses */
489         eth_dev->data->mac_addrs = rte_zmalloc("txgbe", RTE_ETHER_ADDR_LEN *
490                                                hw->mac.num_rar_entries, 0);
491         if (eth_dev->data->mac_addrs == NULL) {
492                 PMD_INIT_LOG(ERR,
493                              "Failed to allocate %u bytes needed to store "
494                              "MAC addresses",
495                              RTE_ETHER_ADDR_LEN * hw->mac.num_rar_entries);
496                 return -ENOMEM;
497         }
498
499         /* Copy the permanent MAC address */
500         rte_ether_addr_copy((struct rte_ether_addr *)hw->mac.perm_addr,
501                         &eth_dev->data->mac_addrs[0]);
502
503         /* Allocate memory for storing hash filter MAC addresses */
504         eth_dev->data->hash_mac_addrs = rte_zmalloc("txgbe",
505                         RTE_ETHER_ADDR_LEN * TXGBE_VMDQ_NUM_UC_MAC, 0);
506         if (eth_dev->data->hash_mac_addrs == NULL) {
507                 PMD_INIT_LOG(ERR,
508                              "Failed to allocate %d bytes needed to store MAC addresses",
509                              RTE_ETHER_ADDR_LEN * TXGBE_VMDQ_NUM_UC_MAC);
510                 return -ENOMEM;
511         }
512
513         /* initialize the vfta */
514         memset(shadow_vfta, 0, sizeof(*shadow_vfta));
515
516         /* initialize the hw strip bitmap*/
517         memset(hwstrip, 0, sizeof(*hwstrip));
518
519         /* initialize PF if max_vfs not zero */
520         txgbe_pf_host_init(eth_dev);
521
522         ctrl_ext = rd32(hw, TXGBE_PORTCTL);
523         /* let hardware know driver is loaded */
524         ctrl_ext |= TXGBE_PORTCTL_DRVLOAD;
525         /* Set PF Reset Done bit so PF/VF Mail Ops can work */
526         ctrl_ext |= TXGBE_PORTCTL_RSTDONE;
527         wr32(hw, TXGBE_PORTCTL, ctrl_ext);
528         txgbe_flush(hw);
529
530         if (txgbe_is_sfp(hw) && hw->phy.sfp_type != txgbe_sfp_type_not_present)
531                 PMD_INIT_LOG(DEBUG, "MAC: %d, PHY: %d, SFP+: %d",
532                              (int)hw->mac.type, (int)hw->phy.type,
533                              (int)hw->phy.sfp_type);
534         else
535                 PMD_INIT_LOG(DEBUG, "MAC: %d, PHY: %d",
536                              (int)hw->mac.type, (int)hw->phy.type);
537
538         PMD_INIT_LOG(DEBUG, "port %d vendorID=0x%x deviceID=0x%x",
539                      eth_dev->data->port_id, pci_dev->id.vendor_id,
540                      pci_dev->id.device_id);
541
542         rte_intr_callback_register(intr_handle,
543                                    txgbe_dev_interrupt_handler, eth_dev);
544
545         /* enable uio/vfio intr/eventfd mapping */
546         rte_intr_enable(intr_handle);
547
548         /* enable support intr */
549         txgbe_enable_intr(eth_dev);
550
551         return 0;
552 }
553
554 static int
555 eth_txgbe_dev_uninit(struct rte_eth_dev *eth_dev)
556 {
557         PMD_INIT_FUNC_TRACE();
558
559         if (rte_eal_process_type() != RTE_PROC_PRIMARY)
560                 return 0;
561
562         txgbe_dev_close(eth_dev);
563
564         return 0;
565 }
566
567 static int
568 eth_txgbe_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
569                 struct rte_pci_device *pci_dev)
570 {
571         struct rte_eth_dev *pf_ethdev;
572         struct rte_eth_devargs eth_da;
573         int retval;
574
575         if (pci_dev->device.devargs) {
576                 retval = rte_eth_devargs_parse(pci_dev->device.devargs->args,
577                                 &eth_da);
578                 if (retval)
579                         return retval;
580         } else {
581                 memset(&eth_da, 0, sizeof(eth_da));
582         }
583
584         retval = rte_eth_dev_create(&pci_dev->device, pci_dev->device.name,
585                         sizeof(struct txgbe_adapter),
586                         eth_dev_pci_specific_init, pci_dev,
587                         eth_txgbe_dev_init, NULL);
588
589         if (retval || eth_da.nb_representor_ports < 1)
590                 return retval;
591
592         pf_ethdev = rte_eth_dev_allocated(pci_dev->device.name);
593         if (pf_ethdev == NULL)
594                 return -ENODEV;
595
596         return 0;
597 }
598
599 static int eth_txgbe_pci_remove(struct rte_pci_device *pci_dev)
600 {
601         struct rte_eth_dev *ethdev;
602
603         ethdev = rte_eth_dev_allocated(pci_dev->device.name);
604         if (!ethdev)
605                 return -ENODEV;
606
607         return rte_eth_dev_destroy(ethdev, eth_txgbe_dev_uninit);
608 }
609
610 static struct rte_pci_driver rte_txgbe_pmd = {
611         .id_table = pci_id_txgbe_map,
612         .drv_flags = RTE_PCI_DRV_NEED_MAPPING |
613                      RTE_PCI_DRV_INTR_LSC,
614         .probe = eth_txgbe_pci_probe,
615         .remove = eth_txgbe_pci_remove,
616 };
617
618 static int
619 txgbe_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
620 {
621         struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
622         struct txgbe_vfta *shadow_vfta = TXGBE_DEV_VFTA(dev);
623         uint32_t vfta;
624         uint32_t vid_idx;
625         uint32_t vid_bit;
626
627         vid_idx = (uint32_t)((vlan_id >> 5) & 0x7F);
628         vid_bit = (uint32_t)(1 << (vlan_id & 0x1F));
629         vfta = rd32(hw, TXGBE_VLANTBL(vid_idx));
630         if (on)
631                 vfta |= vid_bit;
632         else
633                 vfta &= ~vid_bit;
634         wr32(hw, TXGBE_VLANTBL(vid_idx), vfta);
635
636         /* update local VFTA copy */
637         shadow_vfta->vfta[vid_idx] = vfta;
638
639         return 0;
640 }
641
642 static void
643 txgbe_vlan_strip_queue_set(struct rte_eth_dev *dev, uint16_t queue, int on)
644 {
645         struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
646         struct txgbe_rx_queue *rxq;
647         bool restart;
648         uint32_t rxcfg, rxbal, rxbah;
649
650         if (on)
651                 txgbe_vlan_hw_strip_enable(dev, queue);
652         else
653                 txgbe_vlan_hw_strip_disable(dev, queue);
654
655         rxq = dev->data->rx_queues[queue];
656         rxbal = rd32(hw, TXGBE_RXBAL(rxq->reg_idx));
657         rxbah = rd32(hw, TXGBE_RXBAH(rxq->reg_idx));
658         rxcfg = rd32(hw, TXGBE_RXCFG(rxq->reg_idx));
659         if (rxq->offloads & DEV_RX_OFFLOAD_VLAN_STRIP) {
660                 restart = (rxcfg & TXGBE_RXCFG_ENA) &&
661                         !(rxcfg & TXGBE_RXCFG_VLAN);
662                 rxcfg |= TXGBE_RXCFG_VLAN;
663         } else {
664                 restart = (rxcfg & TXGBE_RXCFG_ENA) &&
665                         (rxcfg & TXGBE_RXCFG_VLAN);
666                 rxcfg &= ~TXGBE_RXCFG_VLAN;
667         }
668         rxcfg &= ~TXGBE_RXCFG_ENA;
669
670         if (restart) {
671                 /* set vlan strip for ring */
672                 txgbe_dev_rx_queue_stop(dev, queue);
673                 wr32(hw, TXGBE_RXBAL(rxq->reg_idx), rxbal);
674                 wr32(hw, TXGBE_RXBAH(rxq->reg_idx), rxbah);
675                 wr32(hw, TXGBE_RXCFG(rxq->reg_idx), rxcfg);
676                 txgbe_dev_rx_queue_start(dev, queue);
677         }
678 }
679
680 static int
681 txgbe_vlan_tpid_set(struct rte_eth_dev *dev,
682                     enum rte_vlan_type vlan_type,
683                     uint16_t tpid)
684 {
685         struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
686         int ret = 0;
687         uint32_t portctrl, vlan_ext, qinq;
688
689         portctrl = rd32(hw, TXGBE_PORTCTL);
690
691         vlan_ext = (portctrl & TXGBE_PORTCTL_VLANEXT);
692         qinq = vlan_ext && (portctrl & TXGBE_PORTCTL_QINQ);
693         switch (vlan_type) {
694         case ETH_VLAN_TYPE_INNER:
695                 if (vlan_ext) {
696                         wr32m(hw, TXGBE_VLANCTL,
697                                 TXGBE_VLANCTL_TPID_MASK,
698                                 TXGBE_VLANCTL_TPID(tpid));
699                         wr32m(hw, TXGBE_DMATXCTRL,
700                                 TXGBE_DMATXCTRL_TPID_MASK,
701                                 TXGBE_DMATXCTRL_TPID(tpid));
702                 } else {
703                         ret = -ENOTSUP;
704                         PMD_DRV_LOG(ERR, "Inner type is not supported"
705                                     " by single VLAN");
706                 }
707
708                 if (qinq) {
709                         wr32m(hw, TXGBE_TAGTPID(0),
710                                 TXGBE_TAGTPID_LSB_MASK,
711                                 TXGBE_TAGTPID_LSB(tpid));
712                 }
713                 break;
714         case ETH_VLAN_TYPE_OUTER:
715                 if (vlan_ext) {
716                         /* Only the high 16-bits is valid */
717                         wr32m(hw, TXGBE_EXTAG,
718                                 TXGBE_EXTAG_VLAN_MASK,
719                                 TXGBE_EXTAG_VLAN(tpid));
720                 } else {
721                         wr32m(hw, TXGBE_VLANCTL,
722                                 TXGBE_VLANCTL_TPID_MASK,
723                                 TXGBE_VLANCTL_TPID(tpid));
724                         wr32m(hw, TXGBE_DMATXCTRL,
725                                 TXGBE_DMATXCTRL_TPID_MASK,
726                                 TXGBE_DMATXCTRL_TPID(tpid));
727                 }
728
729                 if (qinq) {
730                         wr32m(hw, TXGBE_TAGTPID(0),
731                                 TXGBE_TAGTPID_MSB_MASK,
732                                 TXGBE_TAGTPID_MSB(tpid));
733                 }
734                 break;
735         default:
736                 PMD_DRV_LOG(ERR, "Unsupported VLAN type %d", vlan_type);
737                 return -EINVAL;
738         }
739
740         return ret;
741 }
742
743 void
744 txgbe_vlan_hw_filter_disable(struct rte_eth_dev *dev)
745 {
746         struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
747         uint32_t vlnctrl;
748
749         PMD_INIT_FUNC_TRACE();
750
751         /* Filter Table Disable */
752         vlnctrl = rd32(hw, TXGBE_VLANCTL);
753         vlnctrl &= ~TXGBE_VLANCTL_VFE;
754         wr32(hw, TXGBE_VLANCTL, vlnctrl);
755 }
756
757 void
758 txgbe_vlan_hw_filter_enable(struct rte_eth_dev *dev)
759 {
760         struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
761         struct txgbe_vfta *shadow_vfta = TXGBE_DEV_VFTA(dev);
762         uint32_t vlnctrl;
763         uint16_t i;
764
765         PMD_INIT_FUNC_TRACE();
766
767         /* Filter Table Enable */
768         vlnctrl = rd32(hw, TXGBE_VLANCTL);
769         vlnctrl &= ~TXGBE_VLANCTL_CFIENA;
770         vlnctrl |= TXGBE_VLANCTL_VFE;
771         wr32(hw, TXGBE_VLANCTL, vlnctrl);
772
773         /* write whatever is in local vfta copy */
774         for (i = 0; i < TXGBE_VFTA_SIZE; i++)
775                 wr32(hw, TXGBE_VLANTBL(i), shadow_vfta->vfta[i]);
776 }
777
778 void
779 txgbe_vlan_hw_strip_bitmap_set(struct rte_eth_dev *dev, uint16_t queue, bool on)
780 {
781         struct txgbe_hwstrip *hwstrip = TXGBE_DEV_HWSTRIP(dev);
782         struct txgbe_rx_queue *rxq;
783
784         if (queue >= TXGBE_MAX_RX_QUEUE_NUM)
785                 return;
786
787         if (on)
788                 TXGBE_SET_HWSTRIP(hwstrip, queue);
789         else
790                 TXGBE_CLEAR_HWSTRIP(hwstrip, queue);
791
792         if (queue >= dev->data->nb_rx_queues)
793                 return;
794
795         rxq = dev->data->rx_queues[queue];
796
797         if (on) {
798                 rxq->vlan_flags = PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED;
799                 rxq->offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
800         } else {
801                 rxq->vlan_flags = PKT_RX_VLAN;
802                 rxq->offloads &= ~DEV_RX_OFFLOAD_VLAN_STRIP;
803         }
804 }
805
806 static void
807 txgbe_vlan_hw_strip_disable(struct rte_eth_dev *dev, uint16_t queue)
808 {
809         struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
810         uint32_t ctrl;
811
812         PMD_INIT_FUNC_TRACE();
813
814         ctrl = rd32(hw, TXGBE_RXCFG(queue));
815         ctrl &= ~TXGBE_RXCFG_VLAN;
816         wr32(hw, TXGBE_RXCFG(queue), ctrl);
817
818         /* record those setting for HW strip per queue */
819         txgbe_vlan_hw_strip_bitmap_set(dev, queue, 0);
820 }
821
822 static void
823 txgbe_vlan_hw_strip_enable(struct rte_eth_dev *dev, uint16_t queue)
824 {
825         struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
826         uint32_t ctrl;
827
828         PMD_INIT_FUNC_TRACE();
829
830         ctrl = rd32(hw, TXGBE_RXCFG(queue));
831         ctrl |= TXGBE_RXCFG_VLAN;
832         wr32(hw, TXGBE_RXCFG(queue), ctrl);
833
834         /* record those setting for HW strip per queue */
835         txgbe_vlan_hw_strip_bitmap_set(dev, queue, 1);
836 }
837
838 static void
839 txgbe_vlan_hw_extend_disable(struct rte_eth_dev *dev)
840 {
841         struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
842         uint32_t ctrl;
843
844         PMD_INIT_FUNC_TRACE();
845
846         ctrl = rd32(hw, TXGBE_PORTCTL);
847         ctrl &= ~TXGBE_PORTCTL_VLANEXT;
848         ctrl &= ~TXGBE_PORTCTL_QINQ;
849         wr32(hw, TXGBE_PORTCTL, ctrl);
850 }
851
852 static void
853 txgbe_vlan_hw_extend_enable(struct rte_eth_dev *dev)
854 {
855         struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
856         struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode;
857         struct rte_eth_txmode *txmode = &dev->data->dev_conf.txmode;
858         uint32_t ctrl;
859
860         PMD_INIT_FUNC_TRACE();
861
862         ctrl  = rd32(hw, TXGBE_PORTCTL);
863         ctrl |= TXGBE_PORTCTL_VLANEXT;
864         if (rxmode->offloads & DEV_RX_OFFLOAD_QINQ_STRIP ||
865             txmode->offloads & DEV_TX_OFFLOAD_QINQ_INSERT)
866                 ctrl |= TXGBE_PORTCTL_QINQ;
867         wr32(hw, TXGBE_PORTCTL, ctrl);
868 }
869
870 void
871 txgbe_vlan_hw_strip_config(struct rte_eth_dev *dev)
872 {
873         struct txgbe_rx_queue *rxq;
874         uint16_t i;
875
876         PMD_INIT_FUNC_TRACE();
877
878         for (i = 0; i < dev->data->nb_rx_queues; i++) {
879                 rxq = dev->data->rx_queues[i];
880
881                 if (rxq->offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
882                         txgbe_vlan_strip_queue_set(dev, i, 1);
883                 else
884                         txgbe_vlan_strip_queue_set(dev, i, 0);
885         }
886 }
887
888 void
889 txgbe_config_vlan_strip_on_all_queues(struct rte_eth_dev *dev, int mask)
890 {
891         uint16_t i;
892         struct rte_eth_rxmode *rxmode;
893         struct txgbe_rx_queue *rxq;
894
895         if (mask & ETH_VLAN_STRIP_MASK) {
896                 rxmode = &dev->data->dev_conf.rxmode;
897                 if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
898                         for (i = 0; i < dev->data->nb_rx_queues; i++) {
899                                 rxq = dev->data->rx_queues[i];
900                                 rxq->offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
901                         }
902                 else
903                         for (i = 0; i < dev->data->nb_rx_queues; i++) {
904                                 rxq = dev->data->rx_queues[i];
905                                 rxq->offloads &= ~DEV_RX_OFFLOAD_VLAN_STRIP;
906                         }
907         }
908 }
909
910 static int
911 txgbe_vlan_offload_config(struct rte_eth_dev *dev, int mask)
912 {
913         struct rte_eth_rxmode *rxmode;
914         rxmode = &dev->data->dev_conf.rxmode;
915
916         if (mask & ETH_VLAN_STRIP_MASK)
917                 txgbe_vlan_hw_strip_config(dev);
918
919         if (mask & ETH_VLAN_FILTER_MASK) {
920                 if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
921                         txgbe_vlan_hw_filter_enable(dev);
922                 else
923                         txgbe_vlan_hw_filter_disable(dev);
924         }
925
926         if (mask & ETH_VLAN_EXTEND_MASK) {
927                 if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_EXTEND)
928                         txgbe_vlan_hw_extend_enable(dev);
929                 else
930                         txgbe_vlan_hw_extend_disable(dev);
931         }
932
933         return 0;
934 }
935
936 static int
937 txgbe_vlan_offload_set(struct rte_eth_dev *dev, int mask)
938 {
939         txgbe_config_vlan_strip_on_all_queues(dev, mask);
940
941         txgbe_vlan_offload_config(dev, mask);
942
943         return 0;
944 }
945
946 static int
947 txgbe_check_vf_rss_rxq_num(struct rte_eth_dev *dev, uint16_t nb_rx_q)
948 {
949         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
950
951         switch (nb_rx_q) {
952         case 1:
953         case 2:
954                 RTE_ETH_DEV_SRIOV(dev).active = ETH_64_POOLS;
955                 break;
956         case 4:
957                 RTE_ETH_DEV_SRIOV(dev).active = ETH_32_POOLS;
958                 break;
959         default:
960                 return -EINVAL;
961         }
962
963         RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool =
964                 TXGBE_MAX_RX_QUEUE_NUM / RTE_ETH_DEV_SRIOV(dev).active;
965         RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx =
966                 pci_dev->max_vfs * RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool;
967         return 0;
968 }
969
970 static int
971 txgbe_check_mq_mode(struct rte_eth_dev *dev)
972 {
973         struct rte_eth_conf *dev_conf = &dev->data->dev_conf;
974         uint16_t nb_rx_q = dev->data->nb_rx_queues;
975         uint16_t nb_tx_q = dev->data->nb_tx_queues;
976
977         if (RTE_ETH_DEV_SRIOV(dev).active != 0) {
978                 /* check multi-queue mode */
979                 switch (dev_conf->rxmode.mq_mode) {
980                 case ETH_MQ_RX_VMDQ_DCB:
981                         PMD_INIT_LOG(INFO, "ETH_MQ_RX_VMDQ_DCB mode supported in SRIOV");
982                         break;
983                 case ETH_MQ_RX_VMDQ_DCB_RSS:
984                         /* DCB/RSS VMDQ in SRIOV mode, not implement yet */
985                         PMD_INIT_LOG(ERR, "SRIOV active,"
986                                         " unsupported mq_mode rx %d.",
987                                         dev_conf->rxmode.mq_mode);
988                         return -EINVAL;
989                 case ETH_MQ_RX_RSS:
990                 case ETH_MQ_RX_VMDQ_RSS:
991                         dev->data->dev_conf.rxmode.mq_mode = ETH_MQ_RX_VMDQ_RSS;
992                         if (nb_rx_q <= RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool)
993                                 if (txgbe_check_vf_rss_rxq_num(dev, nb_rx_q)) {
994                                         PMD_INIT_LOG(ERR, "SRIOV is active,"
995                                                 " invalid queue number"
996                                                 " for VMDQ RSS, allowed"
997                                                 " value are 1, 2 or 4.");
998                                         return -EINVAL;
999                                 }
1000                         break;
1001                 case ETH_MQ_RX_VMDQ_ONLY:
1002                 case ETH_MQ_RX_NONE:
1003                         /* if nothing mq mode configure, use default scheme */
1004                         dev->data->dev_conf.rxmode.mq_mode =
1005                                 ETH_MQ_RX_VMDQ_ONLY;
1006                         break;
1007                 default: /* ETH_MQ_RX_DCB, ETH_MQ_RX_DCB_RSS or ETH_MQ_TX_DCB*/
1008                         /* SRIOV only works in VMDq enable mode */
1009                         PMD_INIT_LOG(ERR, "SRIOV is active,"
1010                                         " wrong mq_mode rx %d.",
1011                                         dev_conf->rxmode.mq_mode);
1012                         return -EINVAL;
1013                 }
1014
1015                 switch (dev_conf->txmode.mq_mode) {
1016                 case ETH_MQ_TX_VMDQ_DCB:
1017                         PMD_INIT_LOG(INFO, "ETH_MQ_TX_VMDQ_DCB mode supported in SRIOV");
1018                         dev->data->dev_conf.txmode.mq_mode = ETH_MQ_TX_VMDQ_DCB;
1019                         break;
1020                 default: /* ETH_MQ_TX_VMDQ_ONLY or ETH_MQ_TX_NONE */
1021                         dev->data->dev_conf.txmode.mq_mode =
1022                                 ETH_MQ_TX_VMDQ_ONLY;
1023                         break;
1024                 }
1025
1026                 /* check valid queue number */
1027                 if ((nb_rx_q > RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool) ||
1028                     (nb_tx_q > RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool)) {
1029                         PMD_INIT_LOG(ERR, "SRIOV is active,"
1030                                         " nb_rx_q=%d nb_tx_q=%d queue number"
1031                                         " must be less than or equal to %d.",
1032                                         nb_rx_q, nb_tx_q,
1033                                         RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool);
1034                         return -EINVAL;
1035                 }
1036         } else {
1037                 if (dev_conf->rxmode.mq_mode == ETH_MQ_RX_VMDQ_DCB_RSS) {
1038                         PMD_INIT_LOG(ERR, "VMDQ+DCB+RSS mq_mode is"
1039                                           " not supported.");
1040                         return -EINVAL;
1041                 }
1042                 /* check configuration for vmdb+dcb mode */
1043                 if (dev_conf->rxmode.mq_mode == ETH_MQ_RX_VMDQ_DCB) {
1044                         const struct rte_eth_vmdq_dcb_conf *conf;
1045
1046                         if (nb_rx_q != TXGBE_VMDQ_DCB_NB_QUEUES) {
1047                                 PMD_INIT_LOG(ERR, "VMDQ+DCB, nb_rx_q != %d.",
1048                                                 TXGBE_VMDQ_DCB_NB_QUEUES);
1049                                 return -EINVAL;
1050                         }
1051                         conf = &dev_conf->rx_adv_conf.vmdq_dcb_conf;
1052                         if (!(conf->nb_queue_pools == ETH_16_POOLS ||
1053                                conf->nb_queue_pools == ETH_32_POOLS)) {
1054                                 PMD_INIT_LOG(ERR, "VMDQ+DCB selected,"
1055                                                 " nb_queue_pools must be %d or %d.",
1056                                                 ETH_16_POOLS, ETH_32_POOLS);
1057                                 return -EINVAL;
1058                         }
1059                 }
1060                 if (dev_conf->txmode.mq_mode == ETH_MQ_TX_VMDQ_DCB) {
1061                         const struct rte_eth_vmdq_dcb_tx_conf *conf;
1062
1063                         if (nb_tx_q != TXGBE_VMDQ_DCB_NB_QUEUES) {
1064                                 PMD_INIT_LOG(ERR, "VMDQ+DCB, nb_tx_q != %d",
1065                                                  TXGBE_VMDQ_DCB_NB_QUEUES);
1066                                 return -EINVAL;
1067                         }
1068                         conf = &dev_conf->tx_adv_conf.vmdq_dcb_tx_conf;
1069                         if (!(conf->nb_queue_pools == ETH_16_POOLS ||
1070                                conf->nb_queue_pools == ETH_32_POOLS)) {
1071                                 PMD_INIT_LOG(ERR, "VMDQ+DCB selected,"
1072                                                 " nb_queue_pools != %d and"
1073                                                 " nb_queue_pools != %d.",
1074                                                 ETH_16_POOLS, ETH_32_POOLS);
1075                                 return -EINVAL;
1076                         }
1077                 }
1078
1079                 /* For DCB mode check our configuration before we go further */
1080                 if (dev_conf->rxmode.mq_mode == ETH_MQ_RX_DCB) {
1081                         const struct rte_eth_dcb_rx_conf *conf;
1082
1083                         conf = &dev_conf->rx_adv_conf.dcb_rx_conf;
1084                         if (!(conf->nb_tcs == ETH_4_TCS ||
1085                                conf->nb_tcs == ETH_8_TCS)) {
1086                                 PMD_INIT_LOG(ERR, "DCB selected, nb_tcs != %d"
1087                                                 " and nb_tcs != %d.",
1088                                                 ETH_4_TCS, ETH_8_TCS);
1089                                 return -EINVAL;
1090                         }
1091                 }
1092
1093                 if (dev_conf->txmode.mq_mode == ETH_MQ_TX_DCB) {
1094                         const struct rte_eth_dcb_tx_conf *conf;
1095
1096                         conf = &dev_conf->tx_adv_conf.dcb_tx_conf;
1097                         if (!(conf->nb_tcs == ETH_4_TCS ||
1098                                conf->nb_tcs == ETH_8_TCS)) {
1099                                 PMD_INIT_LOG(ERR, "DCB selected, nb_tcs != %d"
1100                                                 " and nb_tcs != %d.",
1101                                                 ETH_4_TCS, ETH_8_TCS);
1102                                 return -EINVAL;
1103                         }
1104                 }
1105         }
1106         return 0;
1107 }
1108
1109 static int
1110 txgbe_dev_configure(struct rte_eth_dev *dev)
1111 {
1112         struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev);
1113         struct txgbe_adapter *adapter = TXGBE_DEV_ADAPTER(dev);
1114         int ret;
1115
1116         PMD_INIT_FUNC_TRACE();
1117
1118         if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG)
1119                 dev->data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_RSS_HASH;
1120
1121         /* multiple queue mode checking */
1122         ret  = txgbe_check_mq_mode(dev);
1123         if (ret != 0) {
1124                 PMD_DRV_LOG(ERR, "txgbe_check_mq_mode fails with %d.",
1125                             ret);
1126                 return ret;
1127         }
1128
1129         /* set flag to update link status after init */
1130         intr->flags |= TXGBE_FLAG_NEED_LINK_UPDATE;
1131
1132         /*
1133          * Initialize to TRUE. If any of Rx queues doesn't meet the bulk
1134          * allocation Rx preconditions we will reset it.
1135          */
1136         adapter->rx_bulk_alloc_allowed = true;
1137
1138         return 0;
1139 }
1140
1141 static void
1142 txgbe_dev_phy_intr_setup(struct rte_eth_dev *dev)
1143 {
1144         struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
1145         struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev);
1146         uint32_t gpie;
1147
1148         gpie = rd32(hw, TXGBE_GPIOINTEN);
1149         gpie |= TXGBE_GPIOBIT_6;
1150         wr32(hw, TXGBE_GPIOINTEN, gpie);
1151         intr->mask_misc |= TXGBE_ICRMISC_GPIO;
1152 }
1153
1154 /*
1155  * Configure device link speed and setup link.
1156  * It returns 0 on success.
1157  */
1158 static int
1159 txgbe_dev_start(struct rte_eth_dev *dev)
1160 {
1161         struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
1162         struct txgbe_hw_stats *hw_stats = TXGBE_DEV_STATS(dev);
1163         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1164         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
1165         uint32_t intr_vector = 0;
1166         int err;
1167         bool link_up = false, negotiate = 0;
1168         uint32_t speed = 0;
1169         uint32_t allowed_speeds = 0;
1170         int mask = 0;
1171         int status;
1172         uint32_t *link_speeds;
1173
1174         PMD_INIT_FUNC_TRACE();
1175
1176         /* TXGBE devices don't support:
1177          *    - half duplex (checked afterwards for valid speeds)
1178          *    - fixed speed: TODO implement
1179          */
1180         if (dev->data->dev_conf.link_speeds & ETH_LINK_SPEED_FIXED) {
1181                 PMD_INIT_LOG(ERR,
1182                 "Invalid link_speeds for port %u, fix speed not supported",
1183                                 dev->data->port_id);
1184                 return -EINVAL;
1185         }
1186
1187         /* Stop the link setup handler before resetting the HW. */
1188         rte_eal_alarm_cancel(txgbe_dev_setup_link_alarm_handler, dev);
1189
1190         /* disable uio/vfio intr/eventfd mapping */
1191         rte_intr_disable(intr_handle);
1192
1193         /* stop adapter */
1194         hw->adapter_stopped = 0;
1195         txgbe_stop_hw(hw);
1196
1197         /* reinitialize adapter
1198          * this calls reset and start
1199          */
1200         hw->nb_rx_queues = dev->data->nb_rx_queues;
1201         hw->nb_tx_queues = dev->data->nb_tx_queues;
1202         status = txgbe_pf_reset_hw(hw);
1203         if (status != 0)
1204                 return -1;
1205         hw->mac.start_hw(hw);
1206         hw->mac.get_link_status = true;
1207
1208         txgbe_dev_phy_intr_setup(dev);
1209
1210         /* check and configure queue intr-vector mapping */
1211         if ((rte_intr_cap_multiple(intr_handle) ||
1212              !RTE_ETH_DEV_SRIOV(dev).active) &&
1213             dev->data->dev_conf.intr_conf.rxq != 0) {
1214                 intr_vector = dev->data->nb_rx_queues;
1215                 if (rte_intr_efd_enable(intr_handle, intr_vector))
1216                         return -1;
1217         }
1218
1219         if (rte_intr_dp_is_en(intr_handle) && !intr_handle->intr_vec) {
1220                 intr_handle->intr_vec =
1221                         rte_zmalloc("intr_vec",
1222                                     dev->data->nb_rx_queues * sizeof(int), 0);
1223                 if (intr_handle->intr_vec == NULL) {
1224                         PMD_INIT_LOG(ERR, "Failed to allocate %d rx_queues"
1225                                      " intr_vec", dev->data->nb_rx_queues);
1226                         return -ENOMEM;
1227                 }
1228         }
1229
1230         /* confiugre msix for sleep until rx interrupt */
1231         txgbe_configure_msix(dev);
1232
1233         /* initialize transmission unit */
1234         txgbe_dev_tx_init(dev);
1235
1236         /* This can fail when allocating mbufs for descriptor rings */
1237         err = txgbe_dev_rx_init(dev);
1238         if (err) {
1239                 PMD_INIT_LOG(ERR, "Unable to initialize RX hardware");
1240                 goto error;
1241         }
1242
1243         mask = ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK |
1244                 ETH_VLAN_EXTEND_MASK;
1245         err = txgbe_vlan_offload_config(dev, mask);
1246         if (err) {
1247                 PMD_INIT_LOG(ERR, "Unable to set VLAN offload");
1248                 goto error;
1249         }
1250
1251         err = txgbe_dev_rxtx_start(dev);
1252         if (err < 0) {
1253                 PMD_INIT_LOG(ERR, "Unable to start rxtx queues");
1254                 goto error;
1255         }
1256
1257         /* Skip link setup if loopback mode is enabled. */
1258         if (hw->mac.type == txgbe_mac_raptor &&
1259             dev->data->dev_conf.lpbk_mode)
1260                 goto skip_link_setup;
1261
1262         if (txgbe_is_sfp(hw) && hw->phy.multispeed_fiber) {
1263                 err = hw->mac.setup_sfp(hw);
1264                 if (err)
1265                         goto error;
1266         }
1267
1268         if (hw->phy.media_type == txgbe_media_type_copper) {
1269                 /* Turn on the copper */
1270                 hw->phy.set_phy_power(hw, true);
1271         } else {
1272                 /* Turn on the laser */
1273                 hw->mac.enable_tx_laser(hw);
1274         }
1275
1276         err = hw->mac.check_link(hw, &speed, &link_up, 0);
1277         if (err)
1278                 goto error;
1279         dev->data->dev_link.link_status = link_up;
1280
1281         err = hw->mac.get_link_capabilities(hw, &speed, &negotiate);
1282         if (err)
1283                 goto error;
1284
1285         allowed_speeds = ETH_LINK_SPEED_100M | ETH_LINK_SPEED_1G |
1286                         ETH_LINK_SPEED_10G;
1287
1288         link_speeds = &dev->data->dev_conf.link_speeds;
1289         if (*link_speeds & ~allowed_speeds) {
1290                 PMD_INIT_LOG(ERR, "Invalid link setting");
1291                 goto error;
1292         }
1293
1294         speed = 0x0;
1295         if (*link_speeds == ETH_LINK_SPEED_AUTONEG) {
1296                 speed = (TXGBE_LINK_SPEED_100M_FULL |
1297                          TXGBE_LINK_SPEED_1GB_FULL |
1298                          TXGBE_LINK_SPEED_10GB_FULL);
1299         } else {
1300                 if (*link_speeds & ETH_LINK_SPEED_10G)
1301                         speed |= TXGBE_LINK_SPEED_10GB_FULL;
1302                 if (*link_speeds & ETH_LINK_SPEED_5G)
1303                         speed |= TXGBE_LINK_SPEED_5GB_FULL;
1304                 if (*link_speeds & ETH_LINK_SPEED_2_5G)
1305                         speed |= TXGBE_LINK_SPEED_2_5GB_FULL;
1306                 if (*link_speeds & ETH_LINK_SPEED_1G)
1307                         speed |= TXGBE_LINK_SPEED_1GB_FULL;
1308                 if (*link_speeds & ETH_LINK_SPEED_100M)
1309                         speed |= TXGBE_LINK_SPEED_100M_FULL;
1310         }
1311
1312         err = hw->mac.setup_link(hw, speed, link_up);
1313         if (err)
1314                 goto error;
1315
1316 skip_link_setup:
1317
1318         if (rte_intr_allow_others(intr_handle)) {
1319                 /* check if lsc interrupt is enabled */
1320                 if (dev->data->dev_conf.intr_conf.lsc != 0)
1321                         txgbe_dev_lsc_interrupt_setup(dev, TRUE);
1322                 else
1323                         txgbe_dev_lsc_interrupt_setup(dev, FALSE);
1324                 txgbe_dev_macsec_interrupt_setup(dev);
1325                 txgbe_set_ivar_map(hw, -1, 1, TXGBE_MISC_VEC_ID);
1326         } else {
1327                 rte_intr_callback_unregister(intr_handle,
1328                                              txgbe_dev_interrupt_handler, dev);
1329                 if (dev->data->dev_conf.intr_conf.lsc != 0)
1330                         PMD_INIT_LOG(INFO, "lsc won't enable because of"
1331                                      " no intr multiplex");
1332         }
1333
1334         /* check if rxq interrupt is enabled */
1335         if (dev->data->dev_conf.intr_conf.rxq != 0 &&
1336             rte_intr_dp_is_en(intr_handle))
1337                 txgbe_dev_rxq_interrupt_setup(dev);
1338
1339         /* enable uio/vfio intr/eventfd mapping */
1340         rte_intr_enable(intr_handle);
1341
1342         /* resume enabled intr since hw reset */
1343         txgbe_enable_intr(dev);
1344
1345         /*
1346          * Update link status right before return, because it may
1347          * start link configuration process in a separate thread.
1348          */
1349         txgbe_dev_link_update(dev, 0);
1350
1351         wr32m(hw, TXGBE_LEDCTL, 0xFFFFFFFF, TXGBE_LEDCTL_ORD_MASK);
1352
1353         txgbe_read_stats_registers(hw, hw_stats);
1354         hw->offset_loaded = 1;
1355
1356         return 0;
1357
1358 error:
1359         PMD_INIT_LOG(ERR, "failure in dev start: %d", err);
1360         txgbe_dev_clear_queues(dev);
1361         return -EIO;
1362 }
1363
1364 /*
1365  * Stop device: disable rx and tx functions to allow for reconfiguring.
1366  */
1367 static int
1368 txgbe_dev_stop(struct rte_eth_dev *dev)
1369 {
1370         struct rte_eth_link link;
1371         struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
1372         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1373         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
1374
1375         if (hw->adapter_stopped)
1376                 return 0;
1377
1378         PMD_INIT_FUNC_TRACE();
1379
1380         rte_eal_alarm_cancel(txgbe_dev_setup_link_alarm_handler, dev);
1381
1382         /* disable interrupts */
1383         txgbe_disable_intr(hw);
1384
1385         /* reset the NIC */
1386         txgbe_pf_reset_hw(hw);
1387         hw->adapter_stopped = 0;
1388
1389         /* stop adapter */
1390         txgbe_stop_hw(hw);
1391
1392         if (hw->phy.media_type == txgbe_media_type_copper) {
1393                 /* Turn off the copper */
1394                 hw->phy.set_phy_power(hw, false);
1395         } else {
1396                 /* Turn off the laser */
1397                 hw->mac.disable_tx_laser(hw);
1398         }
1399
1400         txgbe_dev_clear_queues(dev);
1401
1402         /* Clear stored conf */
1403         dev->data->scattered_rx = 0;
1404         dev->data->lro = 0;
1405
1406         /* Clear recorded link status */
1407         memset(&link, 0, sizeof(link));
1408         rte_eth_linkstatus_set(dev, &link);
1409
1410         if (!rte_intr_allow_others(intr_handle))
1411                 /* resume to the default handler */
1412                 rte_intr_callback_register(intr_handle,
1413                                            txgbe_dev_interrupt_handler,
1414                                            (void *)dev);
1415
1416         /* Clean datapath event and queue/vec mapping */
1417         rte_intr_efd_disable(intr_handle);
1418         if (intr_handle->intr_vec != NULL) {
1419                 rte_free(intr_handle->intr_vec);
1420                 intr_handle->intr_vec = NULL;
1421         }
1422
1423         wr32m(hw, TXGBE_LEDCTL, 0xFFFFFFFF, TXGBE_LEDCTL_SEL_MASK);
1424
1425         hw->adapter_stopped = true;
1426         dev->data->dev_started = 0;
1427
1428         return 0;
1429 }
1430
1431 /*
1432  * Set device link up: enable tx.
1433  */
1434 static int
1435 txgbe_dev_set_link_up(struct rte_eth_dev *dev)
1436 {
1437         struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
1438
1439         if (hw->phy.media_type == txgbe_media_type_copper) {
1440                 /* Turn on the copper */
1441                 hw->phy.set_phy_power(hw, true);
1442         } else {
1443                 /* Turn on the laser */
1444                 hw->mac.enable_tx_laser(hw);
1445                 txgbe_dev_link_update(dev, 0);
1446         }
1447
1448         return 0;
1449 }
1450
1451 /*
1452  * Set device link down: disable tx.
1453  */
1454 static int
1455 txgbe_dev_set_link_down(struct rte_eth_dev *dev)
1456 {
1457         struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
1458
1459         if (hw->phy.media_type == txgbe_media_type_copper) {
1460                 /* Turn off the copper */
1461                 hw->phy.set_phy_power(hw, false);
1462         } else {
1463                 /* Turn off the laser */
1464                 hw->mac.disable_tx_laser(hw);
1465                 txgbe_dev_link_update(dev, 0);
1466         }
1467
1468         return 0;
1469 }
1470
1471 /*
1472  * Reset and stop device.
1473  */
1474 static int
1475 txgbe_dev_close(struct rte_eth_dev *dev)
1476 {
1477         struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
1478         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1479         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
1480         int retries = 0;
1481         int ret;
1482
1483         PMD_INIT_FUNC_TRACE();
1484
1485         txgbe_pf_reset_hw(hw);
1486
1487         ret = txgbe_dev_stop(dev);
1488
1489         txgbe_dev_free_queues(dev);
1490
1491         /* reprogram the RAR[0] in case user changed it. */
1492         txgbe_set_rar(hw, 0, hw->mac.addr, 0, true);
1493
1494         /* Unlock any pending hardware semaphore */
1495         txgbe_swfw_lock_reset(hw);
1496
1497         /* disable uio intr before callback unregister */
1498         rte_intr_disable(intr_handle);
1499
1500         do {
1501                 ret = rte_intr_callback_unregister(intr_handle,
1502                                 txgbe_dev_interrupt_handler, dev);
1503                 if (ret >= 0 || ret == -ENOENT) {
1504                         break;
1505                 } else if (ret != -EAGAIN) {
1506                         PMD_INIT_LOG(ERR,
1507                                 "intr callback unregister failed: %d",
1508                                 ret);
1509                 }
1510                 rte_delay_ms(100);
1511         } while (retries++ < (10 + TXGBE_LINK_UP_TIME));
1512
1513         /* cancel the delay handler before remove dev */
1514         rte_eal_alarm_cancel(txgbe_dev_interrupt_delayed_handler, dev);
1515
1516         /* uninitialize PF if max_vfs not zero */
1517         txgbe_pf_host_uninit(dev);
1518
1519         rte_free(dev->data->mac_addrs);
1520         dev->data->mac_addrs = NULL;
1521
1522         rte_free(dev->data->hash_mac_addrs);
1523         dev->data->hash_mac_addrs = NULL;
1524
1525         return ret;
1526 }
1527
1528 /*
1529  * Reset PF device.
1530  */
1531 static int
1532 txgbe_dev_reset(struct rte_eth_dev *dev)
1533 {
1534         int ret;
1535
1536         /* When a DPDK PMD PF begin to reset PF port, it should notify all
1537          * its VF to make them align with it. The detailed notification
1538          * mechanism is PMD specific. As to txgbe PF, it is rather complex.
1539          * To avoid unexpected behavior in VF, currently reset of PF with
1540          * SR-IOV activation is not supported. It might be supported later.
1541          */
1542         if (dev->data->sriov.active)
1543                 return -ENOTSUP;
1544
1545         ret = eth_txgbe_dev_uninit(dev);
1546         if (ret)
1547                 return ret;
1548
1549         ret = eth_txgbe_dev_init(dev, NULL);
1550
1551         return ret;
1552 }
1553
1554 #define UPDATE_QP_COUNTER_32bit(reg, last_counter, counter)     \
1555         {                                                       \
1556                 uint32_t current_counter = rd32(hw, reg);       \
1557                 if (current_counter < last_counter)             \
1558                         current_counter += 0x100000000LL;       \
1559                 if (!hw->offset_loaded)                         \
1560                         last_counter = current_counter;         \
1561                 counter = current_counter - last_counter;       \
1562                 counter &= 0xFFFFFFFFLL;                        \
1563         }
1564
1565 #define UPDATE_QP_COUNTER_36bit(reg_lsb, reg_msb, last_counter, counter) \
1566         {                                                                \
1567                 uint64_t current_counter_lsb = rd32(hw, reg_lsb);        \
1568                 uint64_t current_counter_msb = rd32(hw, reg_msb);        \
1569                 uint64_t current_counter = (current_counter_msb << 32) | \
1570                         current_counter_lsb;                             \
1571                 if (current_counter < last_counter)                      \
1572                         current_counter += 0x1000000000LL;               \
1573                 if (!hw->offset_loaded)                                  \
1574                         last_counter = current_counter;                  \
1575                 counter = current_counter - last_counter;                \
1576                 counter &= 0xFFFFFFFFFLL;                                \
1577         }
1578
1579 void
1580 txgbe_read_stats_registers(struct txgbe_hw *hw,
1581                            struct txgbe_hw_stats *hw_stats)
1582 {
1583         unsigned int i;
1584
1585         /* QP Stats */
1586         for (i = 0; i < hw->nb_rx_queues; i++) {
1587                 UPDATE_QP_COUNTER_32bit(TXGBE_QPRXPKT(i),
1588                         hw->qp_last[i].rx_qp_packets,
1589                         hw_stats->qp[i].rx_qp_packets);
1590                 UPDATE_QP_COUNTER_36bit(TXGBE_QPRXOCTL(i), TXGBE_QPRXOCTH(i),
1591                         hw->qp_last[i].rx_qp_bytes,
1592                         hw_stats->qp[i].rx_qp_bytes);
1593                 UPDATE_QP_COUNTER_32bit(TXGBE_QPRXMPKT(i),
1594                         hw->qp_last[i].rx_qp_mc_packets,
1595                         hw_stats->qp[i].rx_qp_mc_packets);
1596         }
1597
1598         for (i = 0; i < hw->nb_tx_queues; i++) {
1599                 UPDATE_QP_COUNTER_32bit(TXGBE_QPTXPKT(i),
1600                         hw->qp_last[i].tx_qp_packets,
1601                         hw_stats->qp[i].tx_qp_packets);
1602                 UPDATE_QP_COUNTER_36bit(TXGBE_QPTXOCTL(i), TXGBE_QPTXOCTH(i),
1603                         hw->qp_last[i].tx_qp_bytes,
1604                         hw_stats->qp[i].tx_qp_bytes);
1605         }
1606         /* PB Stats */
1607         for (i = 0; i < TXGBE_MAX_UP; i++) {
1608                 hw_stats->up[i].rx_up_xon_packets +=
1609                                 rd32(hw, TXGBE_PBRXUPXON(i));
1610                 hw_stats->up[i].rx_up_xoff_packets +=
1611                                 rd32(hw, TXGBE_PBRXUPXOFF(i));
1612                 hw_stats->up[i].tx_up_xon_packets +=
1613                                 rd32(hw, TXGBE_PBTXUPXON(i));
1614                 hw_stats->up[i].tx_up_xoff_packets +=
1615                                 rd32(hw, TXGBE_PBTXUPXOFF(i));
1616                 hw_stats->up[i].tx_up_xon2off_packets +=
1617                                 rd32(hw, TXGBE_PBTXUPOFF(i));
1618                 hw_stats->up[i].rx_up_dropped +=
1619                                 rd32(hw, TXGBE_PBRXMISS(i));
1620         }
1621         hw_stats->rx_xon_packets += rd32(hw, TXGBE_PBRXLNKXON);
1622         hw_stats->rx_xoff_packets += rd32(hw, TXGBE_PBRXLNKXOFF);
1623         hw_stats->tx_xon_packets += rd32(hw, TXGBE_PBTXLNKXON);
1624         hw_stats->tx_xoff_packets += rd32(hw, TXGBE_PBTXLNKXOFF);
1625
1626         /* DMA Stats */
1627         hw_stats->rx_packets += rd32(hw, TXGBE_DMARXPKT);
1628         hw_stats->tx_packets += rd32(hw, TXGBE_DMATXPKT);
1629
1630         hw_stats->rx_bytes += rd64(hw, TXGBE_DMARXOCTL);
1631         hw_stats->tx_bytes += rd64(hw, TXGBE_DMATXOCTL);
1632         hw_stats->rx_drop_packets += rd32(hw, TXGBE_PBRXDROP);
1633
1634         /* MAC Stats */
1635         hw_stats->rx_crc_errors += rd64(hw, TXGBE_MACRXERRCRCL);
1636         hw_stats->rx_multicast_packets += rd64(hw, TXGBE_MACRXMPKTL);
1637         hw_stats->tx_multicast_packets += rd64(hw, TXGBE_MACTXMPKTL);
1638
1639         hw_stats->rx_total_packets += rd64(hw, TXGBE_MACRXPKTL);
1640         hw_stats->tx_total_packets += rd64(hw, TXGBE_MACTXPKTL);
1641         hw_stats->rx_total_bytes += rd64(hw, TXGBE_MACRXGBOCTL);
1642
1643         hw_stats->rx_broadcast_packets += rd64(hw, TXGBE_MACRXOCTL);
1644         hw_stats->tx_broadcast_packets += rd32(hw, TXGBE_MACTXOCTL);
1645
1646         hw_stats->rx_size_64_packets += rd64(hw, TXGBE_MACRX1TO64L);
1647         hw_stats->rx_size_65_to_127_packets += rd64(hw, TXGBE_MACRX65TO127L);
1648         hw_stats->rx_size_128_to_255_packets += rd64(hw, TXGBE_MACRX128TO255L);
1649         hw_stats->rx_size_256_to_511_packets += rd64(hw, TXGBE_MACRX256TO511L);
1650         hw_stats->rx_size_512_to_1023_packets +=
1651                         rd64(hw, TXGBE_MACRX512TO1023L);
1652         hw_stats->rx_size_1024_to_max_packets +=
1653                         rd64(hw, TXGBE_MACRX1024TOMAXL);
1654         hw_stats->tx_size_64_packets += rd64(hw, TXGBE_MACTX1TO64L);
1655         hw_stats->tx_size_65_to_127_packets += rd64(hw, TXGBE_MACTX65TO127L);
1656         hw_stats->tx_size_128_to_255_packets += rd64(hw, TXGBE_MACTX128TO255L);
1657         hw_stats->tx_size_256_to_511_packets += rd64(hw, TXGBE_MACTX256TO511L);
1658         hw_stats->tx_size_512_to_1023_packets +=
1659                         rd64(hw, TXGBE_MACTX512TO1023L);
1660         hw_stats->tx_size_1024_to_max_packets +=
1661                         rd64(hw, TXGBE_MACTX1024TOMAXL);
1662
1663         hw_stats->rx_undersize_errors += rd64(hw, TXGBE_MACRXERRLENL);
1664         hw_stats->rx_oversize_errors += rd32(hw, TXGBE_MACRXOVERSIZE);
1665         hw_stats->rx_jabber_errors += rd32(hw, TXGBE_MACRXJABBER);
1666
1667         /* MNG Stats */
1668         hw_stats->mng_bmc2host_packets = rd32(hw, TXGBE_MNGBMC2OS);
1669         hw_stats->mng_host2bmc_packets = rd32(hw, TXGBE_MNGOS2BMC);
1670         hw_stats->rx_management_packets = rd32(hw, TXGBE_DMARXMNG);
1671         hw_stats->tx_management_packets = rd32(hw, TXGBE_DMATXMNG);
1672
1673         /* FCoE Stats */
1674         hw_stats->rx_fcoe_crc_errors += rd32(hw, TXGBE_FCOECRC);
1675         hw_stats->rx_fcoe_mbuf_allocation_errors += rd32(hw, TXGBE_FCOELAST);
1676         hw_stats->rx_fcoe_dropped += rd32(hw, TXGBE_FCOERPDC);
1677         hw_stats->rx_fcoe_packets += rd32(hw, TXGBE_FCOEPRC);
1678         hw_stats->tx_fcoe_packets += rd32(hw, TXGBE_FCOEPTC);
1679         hw_stats->rx_fcoe_bytes += rd32(hw, TXGBE_FCOEDWRC);
1680         hw_stats->tx_fcoe_bytes += rd32(hw, TXGBE_FCOEDWTC);
1681
1682         /* Flow Director Stats */
1683         hw_stats->flow_director_matched_filters += rd32(hw, TXGBE_FDIRMATCH);
1684         hw_stats->flow_director_missed_filters += rd32(hw, TXGBE_FDIRMISS);
1685         hw_stats->flow_director_added_filters +=
1686                 TXGBE_FDIRUSED_ADD(rd32(hw, TXGBE_FDIRUSED));
1687         hw_stats->flow_director_removed_filters +=
1688                 TXGBE_FDIRUSED_REM(rd32(hw, TXGBE_FDIRUSED));
1689         hw_stats->flow_director_filter_add_errors +=
1690                 TXGBE_FDIRFAIL_ADD(rd32(hw, TXGBE_FDIRFAIL));
1691         hw_stats->flow_director_filter_remove_errors +=
1692                 TXGBE_FDIRFAIL_REM(rd32(hw, TXGBE_FDIRFAIL));
1693
1694         /* MACsec Stats */
1695         hw_stats->tx_macsec_pkts_untagged += rd32(hw, TXGBE_LSECTX_UTPKT);
1696         hw_stats->tx_macsec_pkts_encrypted +=
1697                         rd32(hw, TXGBE_LSECTX_ENCPKT);
1698         hw_stats->tx_macsec_pkts_protected +=
1699                         rd32(hw, TXGBE_LSECTX_PROTPKT);
1700         hw_stats->tx_macsec_octets_encrypted +=
1701                         rd32(hw, TXGBE_LSECTX_ENCOCT);
1702         hw_stats->tx_macsec_octets_protected +=
1703                         rd32(hw, TXGBE_LSECTX_PROTOCT);
1704         hw_stats->rx_macsec_pkts_untagged += rd32(hw, TXGBE_LSECRX_UTPKT);
1705         hw_stats->rx_macsec_pkts_badtag += rd32(hw, TXGBE_LSECRX_BTPKT);
1706         hw_stats->rx_macsec_pkts_nosci += rd32(hw, TXGBE_LSECRX_NOSCIPKT);
1707         hw_stats->rx_macsec_pkts_unknownsci += rd32(hw, TXGBE_LSECRX_UNSCIPKT);
1708         hw_stats->rx_macsec_octets_decrypted += rd32(hw, TXGBE_LSECRX_DECOCT);
1709         hw_stats->rx_macsec_octets_validated += rd32(hw, TXGBE_LSECRX_VLDOCT);
1710         hw_stats->rx_macsec_sc_pkts_unchecked +=
1711                         rd32(hw, TXGBE_LSECRX_UNCHKPKT);
1712         hw_stats->rx_macsec_sc_pkts_delayed += rd32(hw, TXGBE_LSECRX_DLYPKT);
1713         hw_stats->rx_macsec_sc_pkts_late += rd32(hw, TXGBE_LSECRX_LATEPKT);
1714         for (i = 0; i < 2; i++) {
1715                 hw_stats->rx_macsec_sa_pkts_ok +=
1716                         rd32(hw, TXGBE_LSECRX_OKPKT(i));
1717                 hw_stats->rx_macsec_sa_pkts_invalid +=
1718                         rd32(hw, TXGBE_LSECRX_INVPKT(i));
1719                 hw_stats->rx_macsec_sa_pkts_notvalid +=
1720                         rd32(hw, TXGBE_LSECRX_BADPKT(i));
1721         }
1722         hw_stats->rx_macsec_sa_pkts_unusedsa +=
1723                         rd32(hw, TXGBE_LSECRX_INVSAPKT);
1724         hw_stats->rx_macsec_sa_pkts_notusingsa +=
1725                         rd32(hw, TXGBE_LSECRX_BADSAPKT);
1726
1727         hw_stats->rx_total_missed_packets = 0;
1728         for (i = 0; i < TXGBE_MAX_UP; i++) {
1729                 hw_stats->rx_total_missed_packets +=
1730                         hw_stats->up[i].rx_up_dropped;
1731         }
1732 }
1733
1734 static int
1735 txgbe_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
1736 {
1737         struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
1738         struct txgbe_hw_stats *hw_stats = TXGBE_DEV_STATS(dev);
1739         struct txgbe_stat_mappings *stat_mappings =
1740                         TXGBE_DEV_STAT_MAPPINGS(dev);
1741         uint32_t i, j;
1742
1743         txgbe_read_stats_registers(hw, hw_stats);
1744
1745         if (stats == NULL)
1746                 return -EINVAL;
1747
1748         /* Fill out the rte_eth_stats statistics structure */
1749         stats->ipackets = hw_stats->rx_packets;
1750         stats->ibytes = hw_stats->rx_bytes;
1751         stats->opackets = hw_stats->tx_packets;
1752         stats->obytes = hw_stats->tx_bytes;
1753
1754         memset(&stats->q_ipackets, 0, sizeof(stats->q_ipackets));
1755         memset(&stats->q_opackets, 0, sizeof(stats->q_opackets));
1756         memset(&stats->q_ibytes, 0, sizeof(stats->q_ibytes));
1757         memset(&stats->q_obytes, 0, sizeof(stats->q_obytes));
1758         memset(&stats->q_errors, 0, sizeof(stats->q_errors));
1759         for (i = 0; i < TXGBE_MAX_QP; i++) {
1760                 uint32_t n = i / NB_QMAP_FIELDS_PER_QSM_REG;
1761                 uint32_t offset = (i % NB_QMAP_FIELDS_PER_QSM_REG) * 8;
1762                 uint32_t q_map;
1763
1764                 q_map = (stat_mappings->rqsm[n] >> offset)
1765                                 & QMAP_FIELD_RESERVED_BITS_MASK;
1766                 j = (q_map < RTE_ETHDEV_QUEUE_STAT_CNTRS
1767                      ? q_map : q_map % RTE_ETHDEV_QUEUE_STAT_CNTRS);
1768                 stats->q_ipackets[j] += hw_stats->qp[i].rx_qp_packets;
1769                 stats->q_ibytes[j] += hw_stats->qp[i].rx_qp_bytes;
1770
1771                 q_map = (stat_mappings->tqsm[n] >> offset)
1772                                 & QMAP_FIELD_RESERVED_BITS_MASK;
1773                 j = (q_map < RTE_ETHDEV_QUEUE_STAT_CNTRS
1774                      ? q_map : q_map % RTE_ETHDEV_QUEUE_STAT_CNTRS);
1775                 stats->q_opackets[j] += hw_stats->qp[i].tx_qp_packets;
1776                 stats->q_obytes[j] += hw_stats->qp[i].tx_qp_bytes;
1777         }
1778
1779         /* Rx Errors */
1780         stats->imissed  = hw_stats->rx_total_missed_packets;
1781         stats->ierrors  = hw_stats->rx_crc_errors +
1782                           hw_stats->rx_mac_short_packet_dropped +
1783                           hw_stats->rx_length_errors +
1784                           hw_stats->rx_undersize_errors +
1785                           hw_stats->rx_oversize_errors +
1786                           hw_stats->rx_drop_packets +
1787                           hw_stats->rx_illegal_byte_errors +
1788                           hw_stats->rx_error_bytes +
1789                           hw_stats->rx_fragment_errors +
1790                           hw_stats->rx_fcoe_crc_errors +
1791                           hw_stats->rx_fcoe_mbuf_allocation_errors;
1792
1793         /* Tx Errors */
1794         stats->oerrors  = 0;
1795         return 0;
1796 }
1797
1798 static int
1799 txgbe_dev_stats_reset(struct rte_eth_dev *dev)
1800 {
1801         struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
1802         struct txgbe_hw_stats *hw_stats = TXGBE_DEV_STATS(dev);
1803
1804         /* HW registers are cleared on read */
1805         hw->offset_loaded = 0;
1806         txgbe_dev_stats_get(dev, NULL);
1807         hw->offset_loaded = 1;
1808
1809         /* Reset software totals */
1810         memset(hw_stats, 0, sizeof(*hw_stats));
1811
1812         return 0;
1813 }
1814
1815 /* This function calculates the number of xstats based on the current config */
1816 static unsigned
1817 txgbe_xstats_calc_num(struct rte_eth_dev *dev)
1818 {
1819         int nb_queues = max(dev->data->nb_rx_queues, dev->data->nb_tx_queues);
1820         return TXGBE_NB_HW_STATS +
1821                TXGBE_NB_UP_STATS * TXGBE_MAX_UP +
1822                TXGBE_NB_QP_STATS * nb_queues;
1823 }
1824
1825 static inline int
1826 txgbe_get_name_by_id(uint32_t id, char *name, uint32_t size)
1827 {
1828         int nb, st;
1829
1830         /* Extended stats from txgbe_hw_stats */
1831         if (id < TXGBE_NB_HW_STATS) {
1832                 snprintf(name, size, "[hw]%s",
1833                         rte_txgbe_stats_strings[id].name);
1834                 return 0;
1835         }
1836         id -= TXGBE_NB_HW_STATS;
1837
1838         /* Priority Stats */
1839         if (id < TXGBE_NB_UP_STATS * TXGBE_MAX_UP) {
1840                 nb = id / TXGBE_NB_UP_STATS;
1841                 st = id % TXGBE_NB_UP_STATS;
1842                 snprintf(name, size, "[p%u]%s", nb,
1843                         rte_txgbe_up_strings[st].name);
1844                 return 0;
1845         }
1846         id -= TXGBE_NB_UP_STATS * TXGBE_MAX_UP;
1847
1848         /* Queue Stats */
1849         if (id < TXGBE_NB_QP_STATS * TXGBE_MAX_QP) {
1850                 nb = id / TXGBE_NB_QP_STATS;
1851                 st = id % TXGBE_NB_QP_STATS;
1852                 snprintf(name, size, "[q%u]%s", nb,
1853                         rte_txgbe_qp_strings[st].name);
1854                 return 0;
1855         }
1856         id -= TXGBE_NB_QP_STATS * TXGBE_MAX_QP;
1857
1858         return -(int)(id + 1);
1859 }
1860
1861 static inline int
1862 txgbe_get_offset_by_id(uint32_t id, uint32_t *offset)
1863 {
1864         int nb, st;
1865
1866         /* Extended stats from txgbe_hw_stats */
1867         if (id < TXGBE_NB_HW_STATS) {
1868                 *offset = rte_txgbe_stats_strings[id].offset;
1869                 return 0;
1870         }
1871         id -= TXGBE_NB_HW_STATS;
1872
1873         /* Priority Stats */
1874         if (id < TXGBE_NB_UP_STATS * TXGBE_MAX_UP) {
1875                 nb = id / TXGBE_NB_UP_STATS;
1876                 st = id % TXGBE_NB_UP_STATS;
1877                 *offset = rte_txgbe_up_strings[st].offset +
1878                         nb * (TXGBE_NB_UP_STATS * sizeof(uint64_t));
1879                 return 0;
1880         }
1881         id -= TXGBE_NB_UP_STATS * TXGBE_MAX_UP;
1882
1883         /* Queue Stats */
1884         if (id < TXGBE_NB_QP_STATS * TXGBE_MAX_QP) {
1885                 nb = id / TXGBE_NB_QP_STATS;
1886                 st = id % TXGBE_NB_QP_STATS;
1887                 *offset = rte_txgbe_qp_strings[st].offset +
1888                         nb * (TXGBE_NB_QP_STATS * sizeof(uint64_t));
1889                 return 0;
1890         }
1891         id -= TXGBE_NB_QP_STATS * TXGBE_MAX_QP;
1892
1893         return -(int)(id + 1);
1894 }
1895
1896 static int txgbe_dev_xstats_get_names(struct rte_eth_dev *dev,
1897         struct rte_eth_xstat_name *xstats_names, unsigned int limit)
1898 {
1899         unsigned int i, count;
1900
1901         count = txgbe_xstats_calc_num(dev);
1902         if (xstats_names == NULL)
1903                 return count;
1904
1905         /* Note: limit >= cnt_stats checked upstream
1906          * in rte_eth_xstats_names()
1907          */
1908         limit = min(limit, count);
1909
1910         /* Extended stats from txgbe_hw_stats */
1911         for (i = 0; i < limit; i++) {
1912                 if (txgbe_get_name_by_id(i, xstats_names[i].name,
1913                         sizeof(xstats_names[i].name))) {
1914                         PMD_INIT_LOG(WARNING, "id value %d isn't valid", i);
1915                         break;
1916                 }
1917         }
1918
1919         return i;
1920 }
1921
1922 static int txgbe_dev_xstats_get_names_by_id(struct rte_eth_dev *dev,
1923         struct rte_eth_xstat_name *xstats_names,
1924         const uint64_t *ids,
1925         unsigned int limit)
1926 {
1927         unsigned int i;
1928
1929         if (ids == NULL)
1930                 return txgbe_dev_xstats_get_names(dev, xstats_names, limit);
1931
1932         for (i = 0; i < limit; i++) {
1933                 if (txgbe_get_name_by_id(ids[i], xstats_names[i].name,
1934                                 sizeof(xstats_names[i].name))) {
1935                         PMD_INIT_LOG(WARNING, "id value %d isn't valid", i);
1936                         return -1;
1937                 }
1938         }
1939
1940         return i;
1941 }
1942
1943 static int
1944 txgbe_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
1945                                          unsigned int limit)
1946 {
1947         struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
1948         struct txgbe_hw_stats *hw_stats = TXGBE_DEV_STATS(dev);
1949         unsigned int i, count;
1950
1951         txgbe_read_stats_registers(hw, hw_stats);
1952
1953         /* If this is a reset xstats is NULL, and we have cleared the
1954          * registers by reading them.
1955          */
1956         count = txgbe_xstats_calc_num(dev);
1957         if (xstats == NULL)
1958                 return count;
1959
1960         limit = min(limit, txgbe_xstats_calc_num(dev));
1961
1962         /* Extended stats from txgbe_hw_stats */
1963         for (i = 0; i < limit; i++) {
1964                 uint32_t offset = 0;
1965
1966                 if (txgbe_get_offset_by_id(i, &offset)) {
1967                         PMD_INIT_LOG(WARNING, "id value %d isn't valid", i);
1968                         break;
1969                 }
1970                 xstats[i].value = *(uint64_t *)(((char *)hw_stats) + offset);
1971                 xstats[i].id = i;
1972         }
1973
1974         return i;
1975 }
1976
1977 static int
1978 txgbe_dev_xstats_get_(struct rte_eth_dev *dev, uint64_t *values,
1979                                          unsigned int limit)
1980 {
1981         struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
1982         struct txgbe_hw_stats *hw_stats = TXGBE_DEV_STATS(dev);
1983         unsigned int i, count;
1984
1985         txgbe_read_stats_registers(hw, hw_stats);
1986
1987         /* If this is a reset xstats is NULL, and we have cleared the
1988          * registers by reading them.
1989          */
1990         count = txgbe_xstats_calc_num(dev);
1991         if (values == NULL)
1992                 return count;
1993
1994         limit = min(limit, txgbe_xstats_calc_num(dev));
1995
1996         /* Extended stats from txgbe_hw_stats */
1997         for (i = 0; i < limit; i++) {
1998                 uint32_t offset;
1999
2000                 if (txgbe_get_offset_by_id(i, &offset)) {
2001                         PMD_INIT_LOG(WARNING, "id value %d isn't valid", i);
2002                         break;
2003                 }
2004                 values[i] = *(uint64_t *)(((char *)hw_stats) + offset);
2005         }
2006
2007         return i;
2008 }
2009
2010 static int
2011 txgbe_dev_xstats_get_by_id(struct rte_eth_dev *dev, const uint64_t *ids,
2012                 uint64_t *values, unsigned int limit)
2013 {
2014         struct txgbe_hw_stats *hw_stats = TXGBE_DEV_STATS(dev);
2015         unsigned int i;
2016
2017         if (ids == NULL)
2018                 return txgbe_dev_xstats_get_(dev, values, limit);
2019
2020         for (i = 0; i < limit; i++) {
2021                 uint32_t offset;
2022
2023                 if (txgbe_get_offset_by_id(ids[i], &offset)) {
2024                         PMD_INIT_LOG(WARNING, "id value %d isn't valid", i);
2025                         break;
2026                 }
2027                 values[i] = *(uint64_t *)(((char *)hw_stats) + offset);
2028         }
2029
2030         return i;
2031 }
2032
2033 static int
2034 txgbe_dev_xstats_reset(struct rte_eth_dev *dev)
2035 {
2036         struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
2037         struct txgbe_hw_stats *hw_stats = TXGBE_DEV_STATS(dev);
2038
2039         /* HW registers are cleared on read */
2040         hw->offset_loaded = 0;
2041         txgbe_read_stats_registers(hw, hw_stats);
2042         hw->offset_loaded = 1;
2043
2044         /* Reset software totals */
2045         memset(hw_stats, 0, sizeof(*hw_stats));
2046
2047         return 0;
2048 }
2049
2050 static int
2051 txgbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
2052 {
2053         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
2054         struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
2055
2056         dev_info->max_rx_queues = (uint16_t)hw->mac.max_rx_queues;
2057         dev_info->max_tx_queues = (uint16_t)hw->mac.max_tx_queues;
2058         dev_info->min_rx_bufsize = 1024;
2059         dev_info->max_rx_pktlen = 15872;
2060         dev_info->max_mac_addrs = hw->mac.num_rar_entries;
2061         dev_info->max_hash_mac_addrs = TXGBE_VMDQ_NUM_UC_MAC;
2062         dev_info->max_vfs = pci_dev->max_vfs;
2063         dev_info->max_vmdq_pools = ETH_64_POOLS;
2064         dev_info->vmdq_queue_num = dev_info->max_rx_queues;
2065         dev_info->rx_queue_offload_capa = txgbe_get_rx_queue_offloads(dev);
2066         dev_info->rx_offload_capa = (txgbe_get_rx_port_offloads(dev) |
2067                                      dev_info->rx_queue_offload_capa);
2068         dev_info->tx_queue_offload_capa = txgbe_get_tx_queue_offloads(dev);
2069         dev_info->tx_offload_capa = txgbe_get_tx_port_offloads(dev);
2070
2071         dev_info->default_rxconf = (struct rte_eth_rxconf) {
2072                 .rx_thresh = {
2073                         .pthresh = TXGBE_DEFAULT_RX_PTHRESH,
2074                         .hthresh = TXGBE_DEFAULT_RX_HTHRESH,
2075                         .wthresh = TXGBE_DEFAULT_RX_WTHRESH,
2076                 },
2077                 .rx_free_thresh = TXGBE_DEFAULT_RX_FREE_THRESH,
2078                 .rx_drop_en = 0,
2079                 .offloads = 0,
2080         };
2081
2082         dev_info->default_txconf = (struct rte_eth_txconf) {
2083                 .tx_thresh = {
2084                         .pthresh = TXGBE_DEFAULT_TX_PTHRESH,
2085                         .hthresh = TXGBE_DEFAULT_TX_HTHRESH,
2086                         .wthresh = TXGBE_DEFAULT_TX_WTHRESH,
2087                 },
2088                 .tx_free_thresh = TXGBE_DEFAULT_TX_FREE_THRESH,
2089                 .offloads = 0,
2090         };
2091
2092         dev_info->rx_desc_lim = rx_desc_lim;
2093         dev_info->tx_desc_lim = tx_desc_lim;
2094
2095         dev_info->hash_key_size = TXGBE_HKEY_MAX_INDEX * sizeof(uint32_t);
2096         dev_info->reta_size = ETH_RSS_RETA_SIZE_128;
2097         dev_info->flow_type_rss_offloads = TXGBE_RSS_OFFLOAD_ALL;
2098
2099         dev_info->speed_capa = ETH_LINK_SPEED_1G | ETH_LINK_SPEED_10G;
2100         dev_info->speed_capa |= ETH_LINK_SPEED_100M;
2101
2102         /* Driver-preferred Rx/Tx parameters */
2103         dev_info->default_rxportconf.burst_size = 32;
2104         dev_info->default_txportconf.burst_size = 32;
2105         dev_info->default_rxportconf.nb_queues = 1;
2106         dev_info->default_txportconf.nb_queues = 1;
2107         dev_info->default_rxportconf.ring_size = 256;
2108         dev_info->default_txportconf.ring_size = 256;
2109
2110         return 0;
2111 }
2112
2113 const uint32_t *
2114 txgbe_dev_supported_ptypes_get(struct rte_eth_dev *dev)
2115 {
2116         if (dev->rx_pkt_burst == txgbe_recv_pkts ||
2117             dev->rx_pkt_burst == txgbe_recv_pkts_lro_single_alloc ||
2118             dev->rx_pkt_burst == txgbe_recv_pkts_lro_bulk_alloc ||
2119             dev->rx_pkt_burst == txgbe_recv_pkts_bulk_alloc)
2120                 return txgbe_get_supported_ptypes();
2121
2122         return NULL;
2123 }
2124
2125 void
2126 txgbe_dev_setup_link_alarm_handler(void *param)
2127 {
2128         struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
2129         struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
2130         struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev);
2131         u32 speed;
2132         bool autoneg = false;
2133
2134         speed = hw->phy.autoneg_advertised;
2135         if (!speed)
2136                 hw->mac.get_link_capabilities(hw, &speed, &autoneg);
2137
2138         hw->mac.setup_link(hw, speed, true);
2139
2140         intr->flags &= ~TXGBE_FLAG_NEED_LINK_CONFIG;
2141 }
2142
2143 /* return 0 means link status changed, -1 means not changed */
2144 int
2145 txgbe_dev_link_update_share(struct rte_eth_dev *dev,
2146                             int wait_to_complete)
2147 {
2148         struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
2149         struct rte_eth_link link;
2150         u32 link_speed = TXGBE_LINK_SPEED_UNKNOWN;
2151         struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev);
2152         bool link_up;
2153         int err;
2154         int wait = 1;
2155
2156         memset(&link, 0, sizeof(link));
2157         link.link_status = ETH_LINK_DOWN;
2158         link.link_speed = ETH_SPEED_NUM_NONE;
2159         link.link_duplex = ETH_LINK_HALF_DUPLEX;
2160         link.link_autoneg = ETH_LINK_AUTONEG;
2161
2162         hw->mac.get_link_status = true;
2163
2164         if (intr->flags & TXGBE_FLAG_NEED_LINK_CONFIG)
2165                 return rte_eth_linkstatus_set(dev, &link);
2166
2167         /* check if it needs to wait to complete, if lsc interrupt is enabled */
2168         if (wait_to_complete == 0 || dev->data->dev_conf.intr_conf.lsc != 0)
2169                 wait = 0;
2170
2171         err = hw->mac.check_link(hw, &link_speed, &link_up, wait);
2172
2173         if (err != 0) {
2174                 link.link_speed = ETH_SPEED_NUM_100M;
2175                 link.link_duplex = ETH_LINK_FULL_DUPLEX;
2176                 return rte_eth_linkstatus_set(dev, &link);
2177         }
2178
2179         if (link_up == 0) {
2180                 if (hw->phy.media_type == txgbe_media_type_fiber) {
2181                         intr->flags |= TXGBE_FLAG_NEED_LINK_CONFIG;
2182                         rte_eal_alarm_set(10,
2183                                 txgbe_dev_setup_link_alarm_handler, dev);
2184                 }
2185                 return rte_eth_linkstatus_set(dev, &link);
2186         }
2187
2188         intr->flags &= ~TXGBE_FLAG_NEED_LINK_CONFIG;
2189         link.link_status = ETH_LINK_UP;
2190         link.link_duplex = ETH_LINK_FULL_DUPLEX;
2191
2192         switch (link_speed) {
2193         default:
2194         case TXGBE_LINK_SPEED_UNKNOWN:
2195                 link.link_duplex = ETH_LINK_FULL_DUPLEX;
2196                 link.link_speed = ETH_SPEED_NUM_100M;
2197                 break;
2198
2199         case TXGBE_LINK_SPEED_100M_FULL:
2200                 link.link_speed = ETH_SPEED_NUM_100M;
2201                 break;
2202
2203         case TXGBE_LINK_SPEED_1GB_FULL:
2204                 link.link_speed = ETH_SPEED_NUM_1G;
2205                 break;
2206
2207         case TXGBE_LINK_SPEED_2_5GB_FULL:
2208                 link.link_speed = ETH_SPEED_NUM_2_5G;
2209                 break;
2210
2211         case TXGBE_LINK_SPEED_5GB_FULL:
2212                 link.link_speed = ETH_SPEED_NUM_5G;
2213                 break;
2214
2215         case TXGBE_LINK_SPEED_10GB_FULL:
2216                 link.link_speed = ETH_SPEED_NUM_10G;
2217                 break;
2218         }
2219
2220         return rte_eth_linkstatus_set(dev, &link);
2221 }
2222
2223 static int
2224 txgbe_dev_link_update(struct rte_eth_dev *dev, int wait_to_complete)
2225 {
2226         return txgbe_dev_link_update_share(dev, wait_to_complete);
2227 }
2228
2229 /**
2230  * It clears the interrupt causes and enables the interrupt.
2231  * It will be called once only during nic initialized.
2232  *
2233  * @param dev
2234  *  Pointer to struct rte_eth_dev.
2235  * @param on
2236  *  Enable or Disable.
2237  *
2238  * @return
2239  *  - On success, zero.
2240  *  - On failure, a negative value.
2241  */
2242 static int
2243 txgbe_dev_lsc_interrupt_setup(struct rte_eth_dev *dev, uint8_t on)
2244 {
2245         struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev);
2246
2247         txgbe_dev_link_status_print(dev);
2248         if (on)
2249                 intr->mask_misc |= TXGBE_ICRMISC_LSC;
2250         else
2251                 intr->mask_misc &= ~TXGBE_ICRMISC_LSC;
2252
2253         return 0;
2254 }
2255
2256 /**
2257  * It clears the interrupt causes and enables the interrupt.
2258  * It will be called once only during nic initialized.
2259  *
2260  * @param dev
2261  *  Pointer to struct rte_eth_dev.
2262  *
2263  * @return
2264  *  - On success, zero.
2265  *  - On failure, a negative value.
2266  */
2267 static int
2268 txgbe_dev_rxq_interrupt_setup(struct rte_eth_dev *dev)
2269 {
2270         struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev);
2271
2272         intr->mask[0] |= TXGBE_ICR_MASK;
2273         intr->mask[1] |= TXGBE_ICR_MASK;
2274
2275         return 0;
2276 }
2277
2278 /**
2279  * It clears the interrupt causes and enables the interrupt.
2280  * It will be called once only during nic initialized.
2281  *
2282  * @param dev
2283  *  Pointer to struct rte_eth_dev.
2284  *
2285  * @return
2286  *  - On success, zero.
2287  *  - On failure, a negative value.
2288  */
2289 static int
2290 txgbe_dev_macsec_interrupt_setup(struct rte_eth_dev *dev)
2291 {
2292         struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev);
2293
2294         intr->mask_misc |= TXGBE_ICRMISC_LNKSEC;
2295
2296         return 0;
2297 }
2298
2299 /*
2300  * It reads ICR and sets flag (TXGBE_ICRMISC_LSC) for the link_update.
2301  *
2302  * @param dev
2303  *  Pointer to struct rte_eth_dev.
2304  *
2305  * @return
2306  *  - On success, zero.
2307  *  - On failure, a negative value.
2308  */
2309 static int
2310 txgbe_dev_interrupt_get_status(struct rte_eth_dev *dev)
2311 {
2312         uint32_t eicr;
2313         struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
2314         struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev);
2315
2316         /* clear all cause mask */
2317         txgbe_disable_intr(hw);
2318
2319         /* read-on-clear nic registers here */
2320         eicr = ((u32 *)hw->isb_mem)[TXGBE_ISB_MISC];
2321         PMD_DRV_LOG(DEBUG, "eicr %x", eicr);
2322
2323         intr->flags = 0;
2324
2325         /* set flag for async link update */
2326         if (eicr & TXGBE_ICRMISC_LSC)
2327                 intr->flags |= TXGBE_FLAG_NEED_LINK_UPDATE;
2328
2329         if (eicr & TXGBE_ICRMISC_VFMBX)
2330                 intr->flags |= TXGBE_FLAG_MAILBOX;
2331
2332         if (eicr & TXGBE_ICRMISC_LNKSEC)
2333                 intr->flags |= TXGBE_FLAG_MACSEC;
2334
2335         if (eicr & TXGBE_ICRMISC_GPIO)
2336                 intr->flags |= TXGBE_FLAG_PHY_INTERRUPT;
2337
2338         return 0;
2339 }
2340
2341 /**
2342  * It gets and then prints the link status.
2343  *
2344  * @param dev
2345  *  Pointer to struct rte_eth_dev.
2346  *
2347  * @return
2348  *  - On success, zero.
2349  *  - On failure, a negative value.
2350  */
2351 static void
2352 txgbe_dev_link_status_print(struct rte_eth_dev *dev)
2353 {
2354         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
2355         struct rte_eth_link link;
2356
2357         rte_eth_linkstatus_get(dev, &link);
2358
2359         if (link.link_status) {
2360                 PMD_INIT_LOG(INFO, "Port %d: Link Up - speed %u Mbps - %s",
2361                                         (int)(dev->data->port_id),
2362                                         (unsigned int)link.link_speed,
2363                         link.link_duplex == ETH_LINK_FULL_DUPLEX ?
2364                                         "full-duplex" : "half-duplex");
2365         } else {
2366                 PMD_INIT_LOG(INFO, " Port %d: Link Down",
2367                                 (int)(dev->data->port_id));
2368         }
2369         PMD_INIT_LOG(DEBUG, "PCI Address: " PCI_PRI_FMT,
2370                                 pci_dev->addr.domain,
2371                                 pci_dev->addr.bus,
2372                                 pci_dev->addr.devid,
2373                                 pci_dev->addr.function);
2374 }
2375
2376 /*
2377  * It executes link_update after knowing an interrupt occurred.
2378  *
2379  * @param dev
2380  *  Pointer to struct rte_eth_dev.
2381  *
2382  * @return
2383  *  - On success, zero.
2384  *  - On failure, a negative value.
2385  */
2386 static int
2387 txgbe_dev_interrupt_action(struct rte_eth_dev *dev,
2388                            struct rte_intr_handle *intr_handle)
2389 {
2390         struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev);
2391         int64_t timeout;
2392         struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
2393
2394         PMD_DRV_LOG(DEBUG, "intr action type %d", intr->flags);
2395
2396         if (intr->flags & TXGBE_FLAG_MAILBOX) {
2397                 txgbe_pf_mbx_process(dev);
2398                 intr->flags &= ~TXGBE_FLAG_MAILBOX;
2399         }
2400
2401         if (intr->flags & TXGBE_FLAG_PHY_INTERRUPT) {
2402                 hw->phy.handle_lasi(hw);
2403                 intr->flags &= ~TXGBE_FLAG_PHY_INTERRUPT;
2404         }
2405
2406         if (intr->flags & TXGBE_FLAG_NEED_LINK_UPDATE) {
2407                 struct rte_eth_link link;
2408
2409                 /*get the link status before link update, for predicting later*/
2410                 rte_eth_linkstatus_get(dev, &link);
2411
2412                 txgbe_dev_link_update(dev, 0);
2413
2414                 /* likely to up */
2415                 if (!link.link_status)
2416                         /* handle it 1 sec later, wait it being stable */
2417                         timeout = TXGBE_LINK_UP_CHECK_TIMEOUT;
2418                 /* likely to down */
2419                 else
2420                         /* handle it 4 sec later, wait it being stable */
2421                         timeout = TXGBE_LINK_DOWN_CHECK_TIMEOUT;
2422
2423                 txgbe_dev_link_status_print(dev);
2424                 if (rte_eal_alarm_set(timeout * 1000,
2425                                       txgbe_dev_interrupt_delayed_handler,
2426                                       (void *)dev) < 0) {
2427                         PMD_DRV_LOG(ERR, "Error setting alarm");
2428                 } else {
2429                         /* remember original mask */
2430                         intr->mask_misc_orig = intr->mask_misc;
2431                         /* only disable lsc interrupt */
2432                         intr->mask_misc &= ~TXGBE_ICRMISC_LSC;
2433                 }
2434         }
2435
2436         PMD_DRV_LOG(DEBUG, "enable intr immediately");
2437         txgbe_enable_intr(dev);
2438         rte_intr_enable(intr_handle);
2439
2440         return 0;
2441 }
2442
2443 /**
2444  * Interrupt handler which shall be registered for alarm callback for delayed
2445  * handling specific interrupt to wait for the stable nic state. As the
2446  * NIC interrupt state is not stable for txgbe after link is just down,
2447  * it needs to wait 4 seconds to get the stable status.
2448  *
2449  * @param handle
2450  *  Pointer to interrupt handle.
2451  * @param param
2452  *  The address of parameter (struct rte_eth_dev *) registered before.
2453  *
2454  * @return
2455  *  void
2456  */
2457 static void
2458 txgbe_dev_interrupt_delayed_handler(void *param)
2459 {
2460         struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
2461         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
2462         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
2463         struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev);
2464         struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
2465         uint32_t eicr;
2466
2467         txgbe_disable_intr(hw);
2468
2469         eicr = ((u32 *)hw->isb_mem)[TXGBE_ISB_MISC];
2470         if (eicr & TXGBE_ICRMISC_VFMBX)
2471                 txgbe_pf_mbx_process(dev);
2472
2473         if (intr->flags & TXGBE_FLAG_PHY_INTERRUPT) {
2474                 hw->phy.handle_lasi(hw);
2475                 intr->flags &= ~TXGBE_FLAG_PHY_INTERRUPT;
2476         }
2477
2478         if (intr->flags & TXGBE_FLAG_NEED_LINK_UPDATE) {
2479                 txgbe_dev_link_update(dev, 0);
2480                 intr->flags &= ~TXGBE_FLAG_NEED_LINK_UPDATE;
2481                 txgbe_dev_link_status_print(dev);
2482                 rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC,
2483                                               NULL);
2484         }
2485
2486         if (intr->flags & TXGBE_FLAG_MACSEC) {
2487                 rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_MACSEC,
2488                                               NULL);
2489                 intr->flags &= ~TXGBE_FLAG_MACSEC;
2490         }
2491
2492         /* restore original mask */
2493         intr->mask_misc = intr->mask_misc_orig;
2494         intr->mask_misc_orig = 0;
2495
2496         PMD_DRV_LOG(DEBUG, "enable intr in delayed handler S[%08x]", eicr);
2497         txgbe_enable_intr(dev);
2498         rte_intr_enable(intr_handle);
2499 }
2500
2501 /**
2502  * Interrupt handler triggered by NIC  for handling
2503  * specific interrupt.
2504  *
2505  * @param handle
2506  *  Pointer to interrupt handle.
2507  * @param param
2508  *  The address of parameter (struct rte_eth_dev *) registered before.
2509  *
2510  * @return
2511  *  void
2512  */
2513 static void
2514 txgbe_dev_interrupt_handler(void *param)
2515 {
2516         struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
2517
2518         txgbe_dev_interrupt_get_status(dev);
2519         txgbe_dev_interrupt_action(dev, dev->intr_handle);
2520 }
2521
2522 static int
2523 txgbe_add_rar(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr,
2524                                 uint32_t index, uint32_t pool)
2525 {
2526         struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
2527         uint32_t enable_addr = 1;
2528
2529         return txgbe_set_rar(hw, index, mac_addr->addr_bytes,
2530                              pool, enable_addr);
2531 }
2532
2533 static void
2534 txgbe_remove_rar(struct rte_eth_dev *dev, uint32_t index)
2535 {
2536         struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
2537
2538         txgbe_clear_rar(hw, index);
2539 }
2540
2541 static int
2542 txgbe_set_default_mac_addr(struct rte_eth_dev *dev, struct rte_ether_addr *addr)
2543 {
2544         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
2545
2546         txgbe_remove_rar(dev, 0);
2547         txgbe_add_rar(dev, addr, 0, pci_dev->max_vfs);
2548
2549         return 0;
2550 }
2551
2552 static uint32_t
2553 txgbe_uta_vector(struct txgbe_hw *hw, struct rte_ether_addr *uc_addr)
2554 {
2555         uint32_t vector = 0;
2556
2557         switch (hw->mac.mc_filter_type) {
2558         case 0:   /* use bits [47:36] of the address */
2559                 vector = ((uc_addr->addr_bytes[4] >> 4) |
2560                         (((uint16_t)uc_addr->addr_bytes[5]) << 4));
2561                 break;
2562         case 1:   /* use bits [46:35] of the address */
2563                 vector = ((uc_addr->addr_bytes[4] >> 3) |
2564                         (((uint16_t)uc_addr->addr_bytes[5]) << 5));
2565                 break;
2566         case 2:   /* use bits [45:34] of the address */
2567                 vector = ((uc_addr->addr_bytes[4] >> 2) |
2568                         (((uint16_t)uc_addr->addr_bytes[5]) << 6));
2569                 break;
2570         case 3:   /* use bits [43:32] of the address */
2571                 vector = ((uc_addr->addr_bytes[4]) |
2572                         (((uint16_t)uc_addr->addr_bytes[5]) << 8));
2573                 break;
2574         default:  /* Invalid mc_filter_type */
2575                 break;
2576         }
2577
2578         /* vector can only be 12-bits or boundary will be exceeded */
2579         vector &= 0xFFF;
2580         return vector;
2581 }
2582
2583 static int
2584 txgbe_uc_hash_table_set(struct rte_eth_dev *dev,
2585                         struct rte_ether_addr *mac_addr, uint8_t on)
2586 {
2587         uint32_t vector;
2588         uint32_t uta_idx;
2589         uint32_t reg_val;
2590         uint32_t uta_mask;
2591         uint32_t psrctl;
2592
2593         struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
2594         struct txgbe_uta_info *uta_info = TXGBE_DEV_UTA_INFO(dev);
2595
2596         /* The UTA table only exists on pf hardware */
2597         if (hw->mac.type < txgbe_mac_raptor)
2598                 return -ENOTSUP;
2599
2600         vector = txgbe_uta_vector(hw, mac_addr);
2601         uta_idx = (vector >> 5) & 0x7F;
2602         uta_mask = 0x1UL << (vector & 0x1F);
2603
2604         if (!!on == !!(uta_info->uta_shadow[uta_idx] & uta_mask))
2605                 return 0;
2606
2607         reg_val = rd32(hw, TXGBE_UCADDRTBL(uta_idx));
2608         if (on) {
2609                 uta_info->uta_in_use++;
2610                 reg_val |= uta_mask;
2611                 uta_info->uta_shadow[uta_idx] |= uta_mask;
2612         } else {
2613                 uta_info->uta_in_use--;
2614                 reg_val &= ~uta_mask;
2615                 uta_info->uta_shadow[uta_idx] &= ~uta_mask;
2616         }
2617
2618         wr32(hw, TXGBE_UCADDRTBL(uta_idx), reg_val);
2619
2620         psrctl = rd32(hw, TXGBE_PSRCTL);
2621         if (uta_info->uta_in_use > 0)
2622                 psrctl |= TXGBE_PSRCTL_UCHFENA;
2623         else
2624                 psrctl &= ~TXGBE_PSRCTL_UCHFENA;
2625
2626         psrctl &= ~TXGBE_PSRCTL_ADHF12_MASK;
2627         psrctl |= TXGBE_PSRCTL_ADHF12(hw->mac.mc_filter_type);
2628         wr32(hw, TXGBE_PSRCTL, psrctl);
2629
2630         return 0;
2631 }
2632
2633 static int
2634 txgbe_uc_all_hash_table_set(struct rte_eth_dev *dev, uint8_t on)
2635 {
2636         struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
2637         struct txgbe_uta_info *uta_info = TXGBE_DEV_UTA_INFO(dev);
2638         uint32_t psrctl;
2639         int i;
2640
2641         /* The UTA table only exists on pf hardware */
2642         if (hw->mac.type < txgbe_mac_raptor)
2643                 return -ENOTSUP;
2644
2645         if (on) {
2646                 for (i = 0; i < ETH_VMDQ_NUM_UC_HASH_ARRAY; i++) {
2647                         uta_info->uta_shadow[i] = ~0;
2648                         wr32(hw, TXGBE_UCADDRTBL(i), ~0);
2649                 }
2650         } else {
2651                 for (i = 0; i < ETH_VMDQ_NUM_UC_HASH_ARRAY; i++) {
2652                         uta_info->uta_shadow[i] = 0;
2653                         wr32(hw, TXGBE_UCADDRTBL(i), 0);
2654                 }
2655         }
2656
2657         psrctl = rd32(hw, TXGBE_PSRCTL);
2658         if (on)
2659                 psrctl |= TXGBE_PSRCTL_UCHFENA;
2660         else
2661                 psrctl &= ~TXGBE_PSRCTL_UCHFENA;
2662
2663         psrctl &= ~TXGBE_PSRCTL_ADHF12_MASK;
2664         psrctl |= TXGBE_PSRCTL_ADHF12(hw->mac.mc_filter_type);
2665         wr32(hw, TXGBE_PSRCTL, psrctl);
2666
2667         return 0;
2668 }
2669
2670 static int
2671 txgbe_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id)
2672 {
2673         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
2674         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
2675         uint32_t mask;
2676         struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
2677
2678         if (queue_id < 32) {
2679                 mask = rd32(hw, TXGBE_IMS(0));
2680                 mask &= (1 << queue_id);
2681                 wr32(hw, TXGBE_IMS(0), mask);
2682         } else if (queue_id < 64) {
2683                 mask = rd32(hw, TXGBE_IMS(1));
2684                 mask &= (1 << (queue_id - 32));
2685                 wr32(hw, TXGBE_IMS(1), mask);
2686         }
2687         rte_intr_enable(intr_handle);
2688
2689         return 0;
2690 }
2691
2692 static int
2693 txgbe_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id)
2694 {
2695         uint32_t mask;
2696         struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
2697
2698         if (queue_id < 32) {
2699                 mask = rd32(hw, TXGBE_IMS(0));
2700                 mask &= ~(1 << queue_id);
2701                 wr32(hw, TXGBE_IMS(0), mask);
2702         } else if (queue_id < 64) {
2703                 mask = rd32(hw, TXGBE_IMS(1));
2704                 mask &= ~(1 << (queue_id - 32));
2705                 wr32(hw, TXGBE_IMS(1), mask);
2706         }
2707
2708         return 0;
2709 }
2710
2711 /**
2712  * set the IVAR registers, mapping interrupt causes to vectors
2713  * @param hw
2714  *  pointer to txgbe_hw struct
2715  * @direction
2716  *  0 for Rx, 1 for Tx, -1 for other causes
2717  * @queue
2718  *  queue to map the corresponding interrupt to
2719  * @msix_vector
2720  *  the vector to map to the corresponding queue
2721  */
2722 void
2723 txgbe_set_ivar_map(struct txgbe_hw *hw, int8_t direction,
2724                    uint8_t queue, uint8_t msix_vector)
2725 {
2726         uint32_t tmp, idx;
2727
2728         if (direction == -1) {
2729                 /* other causes */
2730                 msix_vector |= TXGBE_IVARMISC_VLD;
2731                 idx = 0;
2732                 tmp = rd32(hw, TXGBE_IVARMISC);
2733                 tmp &= ~(0xFF << idx);
2734                 tmp |= (msix_vector << idx);
2735                 wr32(hw, TXGBE_IVARMISC, tmp);
2736         } else {
2737                 /* rx or tx causes */
2738                 /* Workround for ICR lost */
2739                 idx = ((16 * (queue & 1)) + (8 * direction));
2740                 tmp = rd32(hw, TXGBE_IVAR(queue >> 1));
2741                 tmp &= ~(0xFF << idx);
2742                 tmp |= (msix_vector << idx);
2743                 wr32(hw, TXGBE_IVAR(queue >> 1), tmp);
2744         }
2745 }
2746
2747 /**
2748  * Sets up the hardware to properly generate MSI-X interrupts
2749  * @hw
2750  *  board private structure
2751  */
2752 static void
2753 txgbe_configure_msix(struct rte_eth_dev *dev)
2754 {
2755         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
2756         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
2757         struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
2758         uint32_t queue_id, base = TXGBE_MISC_VEC_ID;
2759         uint32_t vec = TXGBE_MISC_VEC_ID;
2760         uint32_t gpie;
2761
2762         /* won't configure msix register if no mapping is done
2763          * between intr vector and event fd
2764          * but if misx has been enabled already, need to configure
2765          * auto clean, auto mask and throttling.
2766          */
2767         gpie = rd32(hw, TXGBE_GPIE);
2768         if (!rte_intr_dp_is_en(intr_handle) &&
2769             !(gpie & TXGBE_GPIE_MSIX))
2770                 return;
2771
2772         if (rte_intr_allow_others(intr_handle)) {
2773                 base = TXGBE_RX_VEC_START;
2774                 vec = base;
2775         }
2776
2777         /* setup GPIE for MSI-x mode */
2778         gpie = rd32(hw, TXGBE_GPIE);
2779         gpie |= TXGBE_GPIE_MSIX;
2780         wr32(hw, TXGBE_GPIE, gpie);
2781
2782         /* Populate the IVAR table and set the ITR values to the
2783          * corresponding register.
2784          */
2785         if (rte_intr_dp_is_en(intr_handle)) {
2786                 for (queue_id = 0; queue_id < dev->data->nb_rx_queues;
2787                         queue_id++) {
2788                         /* by default, 1:1 mapping */
2789                         txgbe_set_ivar_map(hw, 0, queue_id, vec);
2790                         intr_handle->intr_vec[queue_id] = vec;
2791                         if (vec < base + intr_handle->nb_efd - 1)
2792                                 vec++;
2793                 }
2794
2795                 txgbe_set_ivar_map(hw, -1, 1, TXGBE_MISC_VEC_ID);
2796         }
2797         wr32(hw, TXGBE_ITR(TXGBE_MISC_VEC_ID),
2798                         TXGBE_ITR_IVAL_10G(TXGBE_QUEUE_ITR_INTERVAL_DEFAULT)
2799                         | TXGBE_ITR_WRDSA);
2800 }
2801
2802 static u8 *
2803 txgbe_dev_addr_list_itr(__rte_unused struct txgbe_hw *hw,
2804                         u8 **mc_addr_ptr, u32 *vmdq)
2805 {
2806         u8 *mc_addr;
2807
2808         *vmdq = 0;
2809         mc_addr = *mc_addr_ptr;
2810         *mc_addr_ptr = (mc_addr + sizeof(struct rte_ether_addr));
2811         return mc_addr;
2812 }
2813
2814 int
2815 txgbe_dev_set_mc_addr_list(struct rte_eth_dev *dev,
2816                           struct rte_ether_addr *mc_addr_set,
2817                           uint32_t nb_mc_addr)
2818 {
2819         struct txgbe_hw *hw;
2820         u8 *mc_addr_list;
2821
2822         hw = TXGBE_DEV_HW(dev);
2823         mc_addr_list = (u8 *)mc_addr_set;
2824         return txgbe_update_mc_addr_list(hw, mc_addr_list, nb_mc_addr,
2825                                          txgbe_dev_addr_list_itr, TRUE);
2826 }
2827
2828 static const struct eth_dev_ops txgbe_eth_dev_ops = {
2829         .dev_configure              = txgbe_dev_configure,
2830         .dev_infos_get              = txgbe_dev_info_get,
2831         .dev_start                  = txgbe_dev_start,
2832         .dev_stop                   = txgbe_dev_stop,
2833         .dev_set_link_up            = txgbe_dev_set_link_up,
2834         .dev_set_link_down          = txgbe_dev_set_link_down,
2835         .dev_close                  = txgbe_dev_close,
2836         .dev_reset                  = txgbe_dev_reset,
2837         .link_update                = txgbe_dev_link_update,
2838         .stats_get                  = txgbe_dev_stats_get,
2839         .xstats_get                 = txgbe_dev_xstats_get,
2840         .xstats_get_by_id           = txgbe_dev_xstats_get_by_id,
2841         .stats_reset                = txgbe_dev_stats_reset,
2842         .xstats_reset               = txgbe_dev_xstats_reset,
2843         .xstats_get_names           = txgbe_dev_xstats_get_names,
2844         .xstats_get_names_by_id     = txgbe_dev_xstats_get_names_by_id,
2845         .queue_stats_mapping_set    = txgbe_dev_queue_stats_mapping_set,
2846         .dev_supported_ptypes_get   = txgbe_dev_supported_ptypes_get,
2847         .vlan_filter_set            = txgbe_vlan_filter_set,
2848         .vlan_tpid_set              = txgbe_vlan_tpid_set,
2849         .vlan_offload_set           = txgbe_vlan_offload_set,
2850         .vlan_strip_queue_set       = txgbe_vlan_strip_queue_set,
2851         .rx_queue_start             = txgbe_dev_rx_queue_start,
2852         .rx_queue_stop              = txgbe_dev_rx_queue_stop,
2853         .tx_queue_start             = txgbe_dev_tx_queue_start,
2854         .tx_queue_stop              = txgbe_dev_tx_queue_stop,
2855         .rx_queue_setup             = txgbe_dev_rx_queue_setup,
2856         .rx_queue_intr_enable       = txgbe_dev_rx_queue_intr_enable,
2857         .rx_queue_intr_disable      = txgbe_dev_rx_queue_intr_disable,
2858         .rx_queue_release           = txgbe_dev_rx_queue_release,
2859         .tx_queue_setup             = txgbe_dev_tx_queue_setup,
2860         .tx_queue_release           = txgbe_dev_tx_queue_release,
2861         .mac_addr_add               = txgbe_add_rar,
2862         .mac_addr_remove            = txgbe_remove_rar,
2863         .mac_addr_set               = txgbe_set_default_mac_addr,
2864         .uc_hash_table_set          = txgbe_uc_hash_table_set,
2865         .uc_all_hash_table_set      = txgbe_uc_all_hash_table_set,
2866         .set_mc_addr_list           = txgbe_dev_set_mc_addr_list,
2867         .rxq_info_get               = txgbe_rxq_info_get,
2868         .txq_info_get               = txgbe_txq_info_get,
2869 };
2870
2871 RTE_PMD_REGISTER_PCI(net_txgbe, rte_txgbe_pmd);
2872 RTE_PMD_REGISTER_PCI_TABLE(net_txgbe, pci_id_txgbe_map);
2873 RTE_PMD_REGISTER_KMOD_DEP(net_txgbe, "* igb_uio | uio_pci_generic | vfio-pci");
2874
2875 RTE_LOG_REGISTER(txgbe_logtype_init, pmd.net.txgbe.init, NOTICE);
2876 RTE_LOG_REGISTER(txgbe_logtype_driver, pmd.net.txgbe.driver, NOTICE);
2877
2878 #ifdef RTE_LIBRTE_TXGBE_DEBUG_RX
2879         RTE_LOG_REGISTER(txgbe_logtype_rx, pmd.net.txgbe.rx, DEBUG);
2880 #endif
2881 #ifdef RTE_LIBRTE_TXGBE_DEBUG_TX
2882         RTE_LOG_REGISTER(txgbe_logtype_tx, pmd.net.txgbe.tx, DEBUG);
2883 #endif
2884
2885 #ifdef RTE_LIBRTE_TXGBE_DEBUG_TX_FREE
2886         RTE_LOG_REGISTER(txgbe_logtype_tx_free, pmd.net.txgbe.tx_free, DEBUG);
2887 #endif