1208e2f764c819aec51a06d8d5f761d614d1ea0d
[dpdk.git] / drivers / net / txgbe / txgbe_ethdev.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2015-2020
3  */
4
5 #include <stdio.h>
6 #include <errno.h>
7 #include <stdint.h>
8 #include <string.h>
9 #include <rte_common.h>
10 #include <rte_ethdev_pci.h>
11
12 #include <rte_interrupts.h>
13 #include <rte_log.h>
14 #include <rte_debug.h>
15 #include <rte_pci.h>
16 #include <rte_memory.h>
17 #include <rte_eal.h>
18 #include <rte_alarm.h>
19
20 #include "txgbe_logs.h"
21 #include "base/txgbe.h"
22 #include "txgbe_ethdev.h"
23 #include "txgbe_rxtx.h"
24 #include "txgbe_regs_group.h"
25
26 static const struct reg_info txgbe_regs_general[] = {
27         {TXGBE_RST, 1, 1, "TXGBE_RST"},
28         {TXGBE_STAT, 1, 1, "TXGBE_STAT"},
29         {TXGBE_PORTCTL, 1, 1, "TXGBE_PORTCTL"},
30         {TXGBE_SDP, 1, 1, "TXGBE_SDP"},
31         {TXGBE_SDPCTL, 1, 1, "TXGBE_SDPCTL"},
32         {TXGBE_LEDCTL, 1, 1, "TXGBE_LEDCTL"},
33         {0, 0, 0, ""}
34 };
35
36 static const struct reg_info txgbe_regs_nvm[] = {
37         {0, 0, 0, ""}
38 };
39
40 static const struct reg_info txgbe_regs_interrupt[] = {
41         {0, 0, 0, ""}
42 };
43
44 static const struct reg_info txgbe_regs_fctl_others[] = {
45         {0, 0, 0, ""}
46 };
47
48 static const struct reg_info txgbe_regs_rxdma[] = {
49         {0, 0, 0, ""}
50 };
51
52 static const struct reg_info txgbe_regs_rx[] = {
53         {0, 0, 0, ""}
54 };
55
56 static struct reg_info txgbe_regs_tx[] = {
57         {0, 0, 0, ""}
58 };
59
60 static const struct reg_info txgbe_regs_wakeup[] = {
61         {0, 0, 0, ""}
62 };
63
64 static const struct reg_info txgbe_regs_dcb[] = {
65         {0, 0, 0, ""}
66 };
67
68 static const struct reg_info txgbe_regs_mac[] = {
69         {0, 0, 0, ""}
70 };
71
72 static const struct reg_info txgbe_regs_diagnostic[] = {
73         {0, 0, 0, ""},
74 };
75
76 /* PF registers */
77 static const struct reg_info *txgbe_regs_others[] = {
78                                 txgbe_regs_general,
79                                 txgbe_regs_nvm,
80                                 txgbe_regs_interrupt,
81                                 txgbe_regs_fctl_others,
82                                 txgbe_regs_rxdma,
83                                 txgbe_regs_rx,
84                                 txgbe_regs_tx,
85                                 txgbe_regs_wakeup,
86                                 txgbe_regs_dcb,
87                                 txgbe_regs_mac,
88                                 txgbe_regs_diagnostic,
89                                 NULL};
90
91 static int  txgbe_dev_set_link_up(struct rte_eth_dev *dev);
92 static int  txgbe_dev_set_link_down(struct rte_eth_dev *dev);
93 static int txgbe_dev_close(struct rte_eth_dev *dev);
94 static int txgbe_dev_link_update(struct rte_eth_dev *dev,
95                                 int wait_to_complete);
96 static int txgbe_dev_stats_reset(struct rte_eth_dev *dev);
97 static void txgbe_vlan_hw_strip_enable(struct rte_eth_dev *dev, uint16_t queue);
98 static void txgbe_vlan_hw_strip_disable(struct rte_eth_dev *dev,
99                                         uint16_t queue);
100
101 static void txgbe_dev_link_status_print(struct rte_eth_dev *dev);
102 static int txgbe_dev_lsc_interrupt_setup(struct rte_eth_dev *dev, uint8_t on);
103 static int txgbe_dev_macsec_interrupt_setup(struct rte_eth_dev *dev);
104 static int txgbe_dev_rxq_interrupt_setup(struct rte_eth_dev *dev);
105 static int txgbe_dev_interrupt_get_status(struct rte_eth_dev *dev);
106 static int txgbe_dev_interrupt_action(struct rte_eth_dev *dev,
107                                       struct rte_intr_handle *handle);
108 static void txgbe_dev_interrupt_handler(void *param);
109 static void txgbe_dev_interrupt_delayed_handler(void *param);
110 static void txgbe_configure_msix(struct rte_eth_dev *dev);
111
112 #define TXGBE_SET_HWSTRIP(h, q) do {\
113                 uint32_t idx = (q) / (sizeof((h)->bitmap[0]) * NBBY); \
114                 uint32_t bit = (q) % (sizeof((h)->bitmap[0]) * NBBY); \
115                 (h)->bitmap[idx] |= 1 << bit;\
116         } while (0)
117
118 #define TXGBE_CLEAR_HWSTRIP(h, q) do {\
119                 uint32_t idx = (q) / (sizeof((h)->bitmap[0]) * NBBY); \
120                 uint32_t bit = (q) % (sizeof((h)->bitmap[0]) * NBBY); \
121                 (h)->bitmap[idx] &= ~(1 << bit);\
122         } while (0)
123
124 #define TXGBE_GET_HWSTRIP(h, q, r) do {\
125                 uint32_t idx = (q) / (sizeof((h)->bitmap[0]) * NBBY); \
126                 uint32_t bit = (q) % (sizeof((h)->bitmap[0]) * NBBY); \
127                 (r) = (h)->bitmap[idx] >> bit & 1;\
128         } while (0)
129
130 /*
131  * The set of PCI devices this driver supports
132  */
133 static const struct rte_pci_id pci_id_txgbe_map[] = {
134         { RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, TXGBE_DEV_ID_RAPTOR_SFP) },
135         { RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, TXGBE_DEV_ID_WX1820_SFP) },
136         { .vendor_id = 0, /* sentinel */ },
137 };
138
139 static const struct rte_eth_desc_lim rx_desc_lim = {
140         .nb_max = TXGBE_RING_DESC_MAX,
141         .nb_min = TXGBE_RING_DESC_MIN,
142         .nb_align = TXGBE_RXD_ALIGN,
143 };
144
145 static const struct rte_eth_desc_lim tx_desc_lim = {
146         .nb_max = TXGBE_RING_DESC_MAX,
147         .nb_min = TXGBE_RING_DESC_MIN,
148         .nb_align = TXGBE_TXD_ALIGN,
149         .nb_seg_max = TXGBE_TX_MAX_SEG,
150         .nb_mtu_seg_max = TXGBE_TX_MAX_SEG,
151 };
152
153 static const struct eth_dev_ops txgbe_eth_dev_ops;
154
155 #define HW_XSTAT(m) {#m, offsetof(struct txgbe_hw_stats, m)}
156 #define HW_XSTAT_NAME(m, n) {n, offsetof(struct txgbe_hw_stats, m)}
157 static const struct rte_txgbe_xstats_name_off rte_txgbe_stats_strings[] = {
158         /* MNG RxTx */
159         HW_XSTAT(mng_bmc2host_packets),
160         HW_XSTAT(mng_host2bmc_packets),
161         /* Basic RxTx */
162         HW_XSTAT(rx_packets),
163         HW_XSTAT(tx_packets),
164         HW_XSTAT(rx_bytes),
165         HW_XSTAT(tx_bytes),
166         HW_XSTAT(rx_total_bytes),
167         HW_XSTAT(rx_total_packets),
168         HW_XSTAT(tx_total_packets),
169         HW_XSTAT(rx_total_missed_packets),
170         HW_XSTAT(rx_broadcast_packets),
171         HW_XSTAT(rx_multicast_packets),
172         HW_XSTAT(rx_management_packets),
173         HW_XSTAT(tx_management_packets),
174         HW_XSTAT(rx_management_dropped),
175
176         /* Basic Error */
177         HW_XSTAT(rx_crc_errors),
178         HW_XSTAT(rx_illegal_byte_errors),
179         HW_XSTAT(rx_error_bytes),
180         HW_XSTAT(rx_mac_short_packet_dropped),
181         HW_XSTAT(rx_length_errors),
182         HW_XSTAT(rx_undersize_errors),
183         HW_XSTAT(rx_fragment_errors),
184         HW_XSTAT(rx_oversize_errors),
185         HW_XSTAT(rx_jabber_errors),
186         HW_XSTAT(rx_l3_l4_xsum_error),
187         HW_XSTAT(mac_local_errors),
188         HW_XSTAT(mac_remote_errors),
189
190         /* Flow Director */
191         HW_XSTAT(flow_director_added_filters),
192         HW_XSTAT(flow_director_removed_filters),
193         HW_XSTAT(flow_director_filter_add_errors),
194         HW_XSTAT(flow_director_filter_remove_errors),
195         HW_XSTAT(flow_director_matched_filters),
196         HW_XSTAT(flow_director_missed_filters),
197
198         /* FCoE */
199         HW_XSTAT(rx_fcoe_crc_errors),
200         HW_XSTAT(rx_fcoe_mbuf_allocation_errors),
201         HW_XSTAT(rx_fcoe_dropped),
202         HW_XSTAT(rx_fcoe_packets),
203         HW_XSTAT(tx_fcoe_packets),
204         HW_XSTAT(rx_fcoe_bytes),
205         HW_XSTAT(tx_fcoe_bytes),
206         HW_XSTAT(rx_fcoe_no_ddp),
207         HW_XSTAT(rx_fcoe_no_ddp_ext_buff),
208
209         /* MACSEC */
210         HW_XSTAT(tx_macsec_pkts_untagged),
211         HW_XSTAT(tx_macsec_pkts_encrypted),
212         HW_XSTAT(tx_macsec_pkts_protected),
213         HW_XSTAT(tx_macsec_octets_encrypted),
214         HW_XSTAT(tx_macsec_octets_protected),
215         HW_XSTAT(rx_macsec_pkts_untagged),
216         HW_XSTAT(rx_macsec_pkts_badtag),
217         HW_XSTAT(rx_macsec_pkts_nosci),
218         HW_XSTAT(rx_macsec_pkts_unknownsci),
219         HW_XSTAT(rx_macsec_octets_decrypted),
220         HW_XSTAT(rx_macsec_octets_validated),
221         HW_XSTAT(rx_macsec_sc_pkts_unchecked),
222         HW_XSTAT(rx_macsec_sc_pkts_delayed),
223         HW_XSTAT(rx_macsec_sc_pkts_late),
224         HW_XSTAT(rx_macsec_sa_pkts_ok),
225         HW_XSTAT(rx_macsec_sa_pkts_invalid),
226         HW_XSTAT(rx_macsec_sa_pkts_notvalid),
227         HW_XSTAT(rx_macsec_sa_pkts_unusedsa),
228         HW_XSTAT(rx_macsec_sa_pkts_notusingsa),
229
230         /* MAC RxTx */
231         HW_XSTAT(rx_size_64_packets),
232         HW_XSTAT(rx_size_65_to_127_packets),
233         HW_XSTAT(rx_size_128_to_255_packets),
234         HW_XSTAT(rx_size_256_to_511_packets),
235         HW_XSTAT(rx_size_512_to_1023_packets),
236         HW_XSTAT(rx_size_1024_to_max_packets),
237         HW_XSTAT(tx_size_64_packets),
238         HW_XSTAT(tx_size_65_to_127_packets),
239         HW_XSTAT(tx_size_128_to_255_packets),
240         HW_XSTAT(tx_size_256_to_511_packets),
241         HW_XSTAT(tx_size_512_to_1023_packets),
242         HW_XSTAT(tx_size_1024_to_max_packets),
243
244         /* Flow Control */
245         HW_XSTAT(tx_xon_packets),
246         HW_XSTAT(rx_xon_packets),
247         HW_XSTAT(tx_xoff_packets),
248         HW_XSTAT(rx_xoff_packets),
249
250         HW_XSTAT_NAME(tx_xon_packets, "tx_flow_control_xon_packets"),
251         HW_XSTAT_NAME(rx_xon_packets, "rx_flow_control_xon_packets"),
252         HW_XSTAT_NAME(tx_xoff_packets, "tx_flow_control_xoff_packets"),
253         HW_XSTAT_NAME(rx_xoff_packets, "rx_flow_control_xoff_packets"),
254 };
255
256 #define TXGBE_NB_HW_STATS (sizeof(rte_txgbe_stats_strings) / \
257                            sizeof(rte_txgbe_stats_strings[0]))
258
259 /* Per-priority statistics */
260 #define UP_XSTAT(m) {#m, offsetof(struct txgbe_hw_stats, up[0].m)}
261 static const struct rte_txgbe_xstats_name_off rte_txgbe_up_strings[] = {
262         UP_XSTAT(rx_up_packets),
263         UP_XSTAT(tx_up_packets),
264         UP_XSTAT(rx_up_bytes),
265         UP_XSTAT(tx_up_bytes),
266         UP_XSTAT(rx_up_drop_packets),
267
268         UP_XSTAT(tx_up_xon_packets),
269         UP_XSTAT(rx_up_xon_packets),
270         UP_XSTAT(tx_up_xoff_packets),
271         UP_XSTAT(rx_up_xoff_packets),
272         UP_XSTAT(rx_up_dropped),
273         UP_XSTAT(rx_up_mbuf_alloc_errors),
274         UP_XSTAT(tx_up_xon2off_packets),
275 };
276
277 #define TXGBE_NB_UP_STATS (sizeof(rte_txgbe_up_strings) / \
278                            sizeof(rte_txgbe_up_strings[0]))
279
280 /* Per-queue statistics */
281 #define QP_XSTAT(m) {#m, offsetof(struct txgbe_hw_stats, qp[0].m)}
282 static const struct rte_txgbe_xstats_name_off rte_txgbe_qp_strings[] = {
283         QP_XSTAT(rx_qp_packets),
284         QP_XSTAT(tx_qp_packets),
285         QP_XSTAT(rx_qp_bytes),
286         QP_XSTAT(tx_qp_bytes),
287         QP_XSTAT(rx_qp_mc_packets),
288 };
289
290 #define TXGBE_NB_QP_STATS (sizeof(rte_txgbe_qp_strings) / \
291                            sizeof(rte_txgbe_qp_strings[0]))
292
293 static inline int
294 txgbe_is_sfp(struct txgbe_hw *hw)
295 {
296         switch (hw->phy.type) {
297         case txgbe_phy_sfp_avago:
298         case txgbe_phy_sfp_ftl:
299         case txgbe_phy_sfp_intel:
300         case txgbe_phy_sfp_unknown:
301         case txgbe_phy_sfp_tyco_passive:
302         case txgbe_phy_sfp_unknown_passive:
303                 return 1;
304         default:
305                 return 0;
306         }
307 }
308
309 static inline int32_t
310 txgbe_pf_reset_hw(struct txgbe_hw *hw)
311 {
312         uint32_t ctrl_ext;
313         int32_t status;
314
315         status = hw->mac.reset_hw(hw);
316
317         ctrl_ext = rd32(hw, TXGBE_PORTCTL);
318         /* Set PF Reset Done bit so PF/VF Mail Ops can work */
319         ctrl_ext |= TXGBE_PORTCTL_RSTDONE;
320         wr32(hw, TXGBE_PORTCTL, ctrl_ext);
321         txgbe_flush(hw);
322
323         if (status == TXGBE_ERR_SFP_NOT_PRESENT)
324                 status = 0;
325         return status;
326 }
327
328 static inline void
329 txgbe_enable_intr(struct rte_eth_dev *dev)
330 {
331         struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev);
332         struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
333
334         wr32(hw, TXGBE_IENMISC, intr->mask_misc);
335         wr32(hw, TXGBE_IMC(0), TXGBE_IMC_MASK);
336         wr32(hw, TXGBE_IMC(1), TXGBE_IMC_MASK);
337         txgbe_flush(hw);
338 }
339
340 static void
341 txgbe_disable_intr(struct txgbe_hw *hw)
342 {
343         PMD_INIT_FUNC_TRACE();
344
345         wr32(hw, TXGBE_IENMISC, ~BIT_MASK32);
346         wr32(hw, TXGBE_IMS(0), TXGBE_IMC_MASK);
347         wr32(hw, TXGBE_IMS(1), TXGBE_IMC_MASK);
348         txgbe_flush(hw);
349 }
350
351 static int
352 txgbe_dev_queue_stats_mapping_set(struct rte_eth_dev *eth_dev,
353                                   uint16_t queue_id,
354                                   uint8_t stat_idx,
355                                   uint8_t is_rx)
356 {
357         struct txgbe_hw *hw = TXGBE_DEV_HW(eth_dev);
358         struct txgbe_stat_mappings *stat_mappings =
359                 TXGBE_DEV_STAT_MAPPINGS(eth_dev);
360         uint32_t qsmr_mask = 0;
361         uint32_t clearing_mask = QMAP_FIELD_RESERVED_BITS_MASK;
362         uint32_t q_map;
363         uint8_t n, offset;
364
365         if (hw->mac.type != txgbe_mac_raptor)
366                 return -ENOSYS;
367
368         if (stat_idx & !QMAP_FIELD_RESERVED_BITS_MASK)
369                 return -EIO;
370
371         PMD_INIT_LOG(DEBUG, "Setting port %d, %s queue_id %d to stat index %d",
372                      (int)(eth_dev->data->port_id), is_rx ? "RX" : "TX",
373                      queue_id, stat_idx);
374
375         n = (uint8_t)(queue_id / NB_QMAP_FIELDS_PER_QSM_REG);
376         if (n >= TXGBE_NB_STAT_MAPPING) {
377                 PMD_INIT_LOG(ERR, "Nb of stat mapping registers exceeded");
378                 return -EIO;
379         }
380         offset = (uint8_t)(queue_id % NB_QMAP_FIELDS_PER_QSM_REG);
381
382         /* Now clear any previous stat_idx set */
383         clearing_mask <<= (QSM_REG_NB_BITS_PER_QMAP_FIELD * offset);
384         if (!is_rx)
385                 stat_mappings->tqsm[n] &= ~clearing_mask;
386         else
387                 stat_mappings->rqsm[n] &= ~clearing_mask;
388
389         q_map = (uint32_t)stat_idx;
390         q_map &= QMAP_FIELD_RESERVED_BITS_MASK;
391         qsmr_mask = q_map << (QSM_REG_NB_BITS_PER_QMAP_FIELD * offset);
392         if (!is_rx)
393                 stat_mappings->tqsm[n] |= qsmr_mask;
394         else
395                 stat_mappings->rqsm[n] |= qsmr_mask;
396
397         PMD_INIT_LOG(DEBUG, "Set port %d, %s queue_id %d to stat index %d",
398                      (int)(eth_dev->data->port_id), is_rx ? "RX" : "TX",
399                      queue_id, stat_idx);
400         PMD_INIT_LOG(DEBUG, "%s[%d] = 0x%08x", is_rx ? "RQSMR" : "TQSM", n,
401                      is_rx ? stat_mappings->rqsm[n] : stat_mappings->tqsm[n]);
402         return 0;
403 }
404
405 static void
406 txgbe_dcb_init(struct txgbe_hw *hw, struct txgbe_dcb_config *dcb_config)
407 {
408         int i;
409         u8 bwgp;
410         struct txgbe_dcb_tc_config *tc;
411
412         UNREFERENCED_PARAMETER(hw);
413
414         dcb_config->num_tcs.pg_tcs = TXGBE_DCB_TC_MAX;
415         dcb_config->num_tcs.pfc_tcs = TXGBE_DCB_TC_MAX;
416         bwgp = (u8)(100 / TXGBE_DCB_TC_MAX);
417         for (i = 0; i < TXGBE_DCB_TC_MAX; i++) {
418                 tc = &dcb_config->tc_config[i];
419                 tc->path[TXGBE_DCB_TX_CONFIG].bwg_id = i;
420                 tc->path[TXGBE_DCB_TX_CONFIG].bwg_percent = bwgp + (i & 1);
421                 tc->path[TXGBE_DCB_RX_CONFIG].bwg_id = i;
422                 tc->path[TXGBE_DCB_RX_CONFIG].bwg_percent = bwgp + (i & 1);
423                 tc->pfc = txgbe_dcb_pfc_disabled;
424         }
425
426         /* Initialize default user to priority mapping, UPx->TC0 */
427         tc = &dcb_config->tc_config[0];
428         tc->path[TXGBE_DCB_TX_CONFIG].up_to_tc_bitmap = 0xFF;
429         tc->path[TXGBE_DCB_RX_CONFIG].up_to_tc_bitmap = 0xFF;
430         for (i = 0; i < TXGBE_DCB_BWG_MAX; i++) {
431                 dcb_config->bw_percentage[i][TXGBE_DCB_TX_CONFIG] = 100;
432                 dcb_config->bw_percentage[i][TXGBE_DCB_RX_CONFIG] = 100;
433         }
434         dcb_config->rx_pba_cfg = txgbe_dcb_pba_equal;
435         dcb_config->pfc_mode_enable = false;
436         dcb_config->vt_mode = true;
437         dcb_config->round_robin_enable = false;
438         /* support all DCB capabilities */
439         dcb_config->support.capabilities = 0xFF;
440 }
441
442 /*
443  * Ensure that all locks are released before first NVM or PHY access
444  */
445 static void
446 txgbe_swfw_lock_reset(struct txgbe_hw *hw)
447 {
448         uint16_t mask;
449
450         /*
451          * These ones are more tricky since they are common to all ports; but
452          * swfw_sync retries last long enough (1s) to be almost sure that if
453          * lock can not be taken it is due to an improper lock of the
454          * semaphore.
455          */
456         mask = TXGBE_MNGSEM_SWPHY |
457                TXGBE_MNGSEM_SWMBX |
458                TXGBE_MNGSEM_SWFLASH;
459         if (hw->mac.acquire_swfw_sync(hw, mask) < 0)
460                 PMD_DRV_LOG(DEBUG, "SWFW common locks released");
461
462         hw->mac.release_swfw_sync(hw, mask);
463 }
464
465 static int
466 eth_txgbe_dev_init(struct rte_eth_dev *eth_dev, void *init_params __rte_unused)
467 {
468         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
469         struct txgbe_hw *hw = TXGBE_DEV_HW(eth_dev);
470         struct txgbe_vfta *shadow_vfta = TXGBE_DEV_VFTA(eth_dev);
471         struct txgbe_hwstrip *hwstrip = TXGBE_DEV_HWSTRIP(eth_dev);
472         struct txgbe_dcb_config *dcb_config = TXGBE_DEV_DCB_CONFIG(eth_dev);
473         struct txgbe_filter_info *filter_info = TXGBE_DEV_FILTER(eth_dev);
474         struct txgbe_bw_conf *bw_conf = TXGBE_DEV_BW_CONF(eth_dev);
475         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
476         const struct rte_memzone *mz;
477         uint32_t ctrl_ext;
478         uint16_t csum;
479         int err, i, ret;
480
481         PMD_INIT_FUNC_TRACE();
482
483         eth_dev->dev_ops = &txgbe_eth_dev_ops;
484         eth_dev->rx_queue_count       = txgbe_dev_rx_queue_count;
485         eth_dev->rx_descriptor_status = txgbe_dev_rx_descriptor_status;
486         eth_dev->tx_descriptor_status = txgbe_dev_tx_descriptor_status;
487         eth_dev->rx_pkt_burst = &txgbe_recv_pkts;
488         eth_dev->tx_pkt_burst = &txgbe_xmit_pkts;
489         eth_dev->tx_pkt_prepare = &txgbe_prep_pkts;
490
491         /*
492          * For secondary processes, we don't initialise any further as primary
493          * has already done this work. Only check we don't need a different
494          * RX and TX function.
495          */
496         if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
497                 struct txgbe_tx_queue *txq;
498                 /* TX queue function in primary, set by last queue initialized
499                  * Tx queue may not initialized by primary process
500                  */
501                 if (eth_dev->data->tx_queues) {
502                         uint16_t nb_tx_queues = eth_dev->data->nb_tx_queues;
503                         txq = eth_dev->data->tx_queues[nb_tx_queues - 1];
504                         txgbe_set_tx_function(eth_dev, txq);
505                 } else {
506                         /* Use default TX function if we get here */
507                         PMD_INIT_LOG(NOTICE, "No TX queues configured yet. "
508                                      "Using default TX function.");
509                 }
510
511                 txgbe_set_rx_function(eth_dev);
512
513                 return 0;
514         }
515
516         rte_eth_copy_pci_info(eth_dev, pci_dev);
517
518         /* Vendor and Device ID need to be set before init of shared code */
519         hw->device_id = pci_dev->id.device_id;
520         hw->vendor_id = pci_dev->id.vendor_id;
521         hw->hw_addr = (void *)pci_dev->mem_resource[0].addr;
522         hw->allow_unsupported_sfp = 1;
523
524         /* Reserve memory for interrupt status block */
525         mz = rte_eth_dma_zone_reserve(eth_dev, "txgbe_driver", -1,
526                 16, TXGBE_ALIGN, SOCKET_ID_ANY);
527         if (mz == NULL)
528                 return -ENOMEM;
529
530         hw->isb_dma = TMZ_PADDR(mz);
531         hw->isb_mem = TMZ_VADDR(mz);
532
533         /* Initialize the shared code (base driver) */
534         err = txgbe_init_shared_code(hw);
535         if (err != 0) {
536                 PMD_INIT_LOG(ERR, "Shared code init failed: %d", err);
537                 return -EIO;
538         }
539
540         /* Unlock any pending hardware semaphore */
541         txgbe_swfw_lock_reset(hw);
542
543         /* Initialize DCB configuration*/
544         memset(dcb_config, 0, sizeof(struct txgbe_dcb_config));
545         txgbe_dcb_init(hw, dcb_config);
546
547         /* Get Hardware Flow Control setting */
548         hw->fc.requested_mode = txgbe_fc_full;
549         hw->fc.current_mode = txgbe_fc_full;
550         hw->fc.pause_time = TXGBE_FC_PAUSE_TIME;
551         for (i = 0; i < TXGBE_DCB_TC_MAX; i++) {
552                 hw->fc.low_water[i] = TXGBE_FC_XON_LOTH;
553                 hw->fc.high_water[i] = TXGBE_FC_XOFF_HITH;
554         }
555         hw->fc.send_xon = 1;
556
557         err = hw->rom.init_params(hw);
558         if (err != 0) {
559                 PMD_INIT_LOG(ERR, "The EEPROM init failed: %d", err);
560                 return -EIO;
561         }
562
563         /* Make sure we have a good EEPROM before we read from it */
564         err = hw->rom.validate_checksum(hw, &csum);
565         if (err != 0) {
566                 PMD_INIT_LOG(ERR, "The EEPROM checksum is not valid: %d", err);
567                 return -EIO;
568         }
569
570         err = hw->mac.init_hw(hw);
571
572         /*
573          * Devices with copper phys will fail to initialise if txgbe_init_hw()
574          * is called too soon after the kernel driver unbinding/binding occurs.
575          * The failure occurs in txgbe_identify_phy() for all devices,
576          * but for non-copper devies, txgbe_identify_sfp_module() is
577          * also called. See txgbe_identify_phy(). The reason for the
578          * failure is not known, and only occuts when virtualisation features
579          * are disabled in the bios. A delay of 200ms  was found to be enough by
580          * trial-and-error, and is doubled to be safe.
581          */
582         if (err && hw->phy.media_type == txgbe_media_type_copper) {
583                 rte_delay_ms(200);
584                 err = hw->mac.init_hw(hw);
585         }
586
587         if (err == TXGBE_ERR_SFP_NOT_PRESENT)
588                 err = 0;
589
590         if (err == TXGBE_ERR_EEPROM_VERSION) {
591                 PMD_INIT_LOG(ERR, "This device is a pre-production adapter/"
592                              "LOM.  Please be aware there may be issues associated "
593                              "with your hardware.");
594                 PMD_INIT_LOG(ERR, "If you are experiencing problems "
595                              "please contact your hardware representative "
596                              "who provided you with this hardware.");
597         } else if (err == TXGBE_ERR_SFP_NOT_SUPPORTED) {
598                 PMD_INIT_LOG(ERR, "Unsupported SFP+ Module");
599         }
600         if (err) {
601                 PMD_INIT_LOG(ERR, "Hardware Initialization Failure: %d", err);
602                 return -EIO;
603         }
604
605         /* Reset the hw statistics */
606         txgbe_dev_stats_reset(eth_dev);
607
608         /* disable interrupt */
609         txgbe_disable_intr(hw);
610
611         /* Allocate memory for storing MAC addresses */
612         eth_dev->data->mac_addrs = rte_zmalloc("txgbe", RTE_ETHER_ADDR_LEN *
613                                                hw->mac.num_rar_entries, 0);
614         if (eth_dev->data->mac_addrs == NULL) {
615                 PMD_INIT_LOG(ERR,
616                              "Failed to allocate %u bytes needed to store "
617                              "MAC addresses",
618                              RTE_ETHER_ADDR_LEN * hw->mac.num_rar_entries);
619                 return -ENOMEM;
620         }
621
622         /* Copy the permanent MAC address */
623         rte_ether_addr_copy((struct rte_ether_addr *)hw->mac.perm_addr,
624                         &eth_dev->data->mac_addrs[0]);
625
626         /* Allocate memory for storing hash filter MAC addresses */
627         eth_dev->data->hash_mac_addrs = rte_zmalloc("txgbe",
628                         RTE_ETHER_ADDR_LEN * TXGBE_VMDQ_NUM_UC_MAC, 0);
629         if (eth_dev->data->hash_mac_addrs == NULL) {
630                 PMD_INIT_LOG(ERR,
631                              "Failed to allocate %d bytes needed to store MAC addresses",
632                              RTE_ETHER_ADDR_LEN * TXGBE_VMDQ_NUM_UC_MAC);
633                 return -ENOMEM;
634         }
635
636         /* initialize the vfta */
637         memset(shadow_vfta, 0, sizeof(*shadow_vfta));
638
639         /* initialize the hw strip bitmap*/
640         memset(hwstrip, 0, sizeof(*hwstrip));
641
642         /* initialize PF if max_vfs not zero */
643         ret = txgbe_pf_host_init(eth_dev);
644         if (ret) {
645                 rte_free(eth_dev->data->mac_addrs);
646                 eth_dev->data->mac_addrs = NULL;
647                 rte_free(eth_dev->data->hash_mac_addrs);
648                 eth_dev->data->hash_mac_addrs = NULL;
649                 return ret;
650         }
651
652         ctrl_ext = rd32(hw, TXGBE_PORTCTL);
653         /* let hardware know driver is loaded */
654         ctrl_ext |= TXGBE_PORTCTL_DRVLOAD;
655         /* Set PF Reset Done bit so PF/VF Mail Ops can work */
656         ctrl_ext |= TXGBE_PORTCTL_RSTDONE;
657         wr32(hw, TXGBE_PORTCTL, ctrl_ext);
658         txgbe_flush(hw);
659
660         if (txgbe_is_sfp(hw) && hw->phy.sfp_type != txgbe_sfp_type_not_present)
661                 PMD_INIT_LOG(DEBUG, "MAC: %d, PHY: %d, SFP+: %d",
662                              (int)hw->mac.type, (int)hw->phy.type,
663                              (int)hw->phy.sfp_type);
664         else
665                 PMD_INIT_LOG(DEBUG, "MAC: %d, PHY: %d",
666                              (int)hw->mac.type, (int)hw->phy.type);
667
668         PMD_INIT_LOG(DEBUG, "port %d vendorID=0x%x deviceID=0x%x",
669                      eth_dev->data->port_id, pci_dev->id.vendor_id,
670                      pci_dev->id.device_id);
671
672         rte_intr_callback_register(intr_handle,
673                                    txgbe_dev_interrupt_handler, eth_dev);
674
675         /* enable uio/vfio intr/eventfd mapping */
676         rte_intr_enable(intr_handle);
677
678         /* enable support intr */
679         txgbe_enable_intr(eth_dev);
680
681         /* initialize filter info */
682         memset(filter_info, 0,
683                sizeof(struct txgbe_filter_info));
684
685         /* initialize 5tuple filter list */
686         TAILQ_INIT(&filter_info->fivetuple_list);
687
688         /* initialize bandwidth configuration info */
689         memset(bw_conf, 0, sizeof(struct txgbe_bw_conf));
690
691         return 0;
692 }
693
694 static int
695 eth_txgbe_dev_uninit(struct rte_eth_dev *eth_dev)
696 {
697         PMD_INIT_FUNC_TRACE();
698
699         if (rte_eal_process_type() != RTE_PROC_PRIMARY)
700                 return 0;
701
702         txgbe_dev_close(eth_dev);
703
704         return 0;
705 }
706
707 static int txgbe_ntuple_filter_uninit(struct rte_eth_dev *eth_dev)
708 {
709         struct txgbe_filter_info *filter_info = TXGBE_DEV_FILTER(eth_dev);
710         struct txgbe_5tuple_filter *p_5tuple;
711
712         while ((p_5tuple = TAILQ_FIRST(&filter_info->fivetuple_list))) {
713                 TAILQ_REMOVE(&filter_info->fivetuple_list,
714                              p_5tuple,
715                              entries);
716                 rte_free(p_5tuple);
717         }
718         memset(filter_info->fivetuple_mask, 0,
719                sizeof(uint32_t) * TXGBE_5TUPLE_ARRAY_SIZE);
720
721         return 0;
722 }
723
724 static int
725 eth_txgbe_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
726                 struct rte_pci_device *pci_dev)
727 {
728         struct rte_eth_dev *pf_ethdev;
729         struct rte_eth_devargs eth_da;
730         int retval;
731
732         if (pci_dev->device.devargs) {
733                 retval = rte_eth_devargs_parse(pci_dev->device.devargs->args,
734                                 &eth_da);
735                 if (retval)
736                         return retval;
737         } else {
738                 memset(&eth_da, 0, sizeof(eth_da));
739         }
740
741         retval = rte_eth_dev_create(&pci_dev->device, pci_dev->device.name,
742                         sizeof(struct txgbe_adapter),
743                         eth_dev_pci_specific_init, pci_dev,
744                         eth_txgbe_dev_init, NULL);
745
746         if (retval || eth_da.nb_representor_ports < 1)
747                 return retval;
748
749         pf_ethdev = rte_eth_dev_allocated(pci_dev->device.name);
750         if (pf_ethdev == NULL)
751                 return -ENODEV;
752
753         return 0;
754 }
755
756 static int eth_txgbe_pci_remove(struct rte_pci_device *pci_dev)
757 {
758         struct rte_eth_dev *ethdev;
759
760         ethdev = rte_eth_dev_allocated(pci_dev->device.name);
761         if (!ethdev)
762                 return -ENODEV;
763
764         return rte_eth_dev_destroy(ethdev, eth_txgbe_dev_uninit);
765 }
766
767 static struct rte_pci_driver rte_txgbe_pmd = {
768         .id_table = pci_id_txgbe_map,
769         .drv_flags = RTE_PCI_DRV_NEED_MAPPING |
770                      RTE_PCI_DRV_INTR_LSC,
771         .probe = eth_txgbe_pci_probe,
772         .remove = eth_txgbe_pci_remove,
773 };
774
775 static int
776 txgbe_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
777 {
778         struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
779         struct txgbe_vfta *shadow_vfta = TXGBE_DEV_VFTA(dev);
780         uint32_t vfta;
781         uint32_t vid_idx;
782         uint32_t vid_bit;
783
784         vid_idx = (uint32_t)((vlan_id >> 5) & 0x7F);
785         vid_bit = (uint32_t)(1 << (vlan_id & 0x1F));
786         vfta = rd32(hw, TXGBE_VLANTBL(vid_idx));
787         if (on)
788                 vfta |= vid_bit;
789         else
790                 vfta &= ~vid_bit;
791         wr32(hw, TXGBE_VLANTBL(vid_idx), vfta);
792
793         /* update local VFTA copy */
794         shadow_vfta->vfta[vid_idx] = vfta;
795
796         return 0;
797 }
798
799 static void
800 txgbe_vlan_strip_queue_set(struct rte_eth_dev *dev, uint16_t queue, int on)
801 {
802         struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
803         struct txgbe_rx_queue *rxq;
804         bool restart;
805         uint32_t rxcfg, rxbal, rxbah;
806
807         if (on)
808                 txgbe_vlan_hw_strip_enable(dev, queue);
809         else
810                 txgbe_vlan_hw_strip_disable(dev, queue);
811
812         rxq = dev->data->rx_queues[queue];
813         rxbal = rd32(hw, TXGBE_RXBAL(rxq->reg_idx));
814         rxbah = rd32(hw, TXGBE_RXBAH(rxq->reg_idx));
815         rxcfg = rd32(hw, TXGBE_RXCFG(rxq->reg_idx));
816         if (rxq->offloads & DEV_RX_OFFLOAD_VLAN_STRIP) {
817                 restart = (rxcfg & TXGBE_RXCFG_ENA) &&
818                         !(rxcfg & TXGBE_RXCFG_VLAN);
819                 rxcfg |= TXGBE_RXCFG_VLAN;
820         } else {
821                 restart = (rxcfg & TXGBE_RXCFG_ENA) &&
822                         (rxcfg & TXGBE_RXCFG_VLAN);
823                 rxcfg &= ~TXGBE_RXCFG_VLAN;
824         }
825         rxcfg &= ~TXGBE_RXCFG_ENA;
826
827         if (restart) {
828                 /* set vlan strip for ring */
829                 txgbe_dev_rx_queue_stop(dev, queue);
830                 wr32(hw, TXGBE_RXBAL(rxq->reg_idx), rxbal);
831                 wr32(hw, TXGBE_RXBAH(rxq->reg_idx), rxbah);
832                 wr32(hw, TXGBE_RXCFG(rxq->reg_idx), rxcfg);
833                 txgbe_dev_rx_queue_start(dev, queue);
834         }
835 }
836
837 static int
838 txgbe_vlan_tpid_set(struct rte_eth_dev *dev,
839                     enum rte_vlan_type vlan_type,
840                     uint16_t tpid)
841 {
842         struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
843         int ret = 0;
844         uint32_t portctrl, vlan_ext, qinq;
845
846         portctrl = rd32(hw, TXGBE_PORTCTL);
847
848         vlan_ext = (portctrl & TXGBE_PORTCTL_VLANEXT);
849         qinq = vlan_ext && (portctrl & TXGBE_PORTCTL_QINQ);
850         switch (vlan_type) {
851         case ETH_VLAN_TYPE_INNER:
852                 if (vlan_ext) {
853                         wr32m(hw, TXGBE_VLANCTL,
854                                 TXGBE_VLANCTL_TPID_MASK,
855                                 TXGBE_VLANCTL_TPID(tpid));
856                         wr32m(hw, TXGBE_DMATXCTRL,
857                                 TXGBE_DMATXCTRL_TPID_MASK,
858                                 TXGBE_DMATXCTRL_TPID(tpid));
859                 } else {
860                         ret = -ENOTSUP;
861                         PMD_DRV_LOG(ERR, "Inner type is not supported"
862                                     " by single VLAN");
863                 }
864
865                 if (qinq) {
866                         wr32m(hw, TXGBE_TAGTPID(0),
867                                 TXGBE_TAGTPID_LSB_MASK,
868                                 TXGBE_TAGTPID_LSB(tpid));
869                 }
870                 break;
871         case ETH_VLAN_TYPE_OUTER:
872                 if (vlan_ext) {
873                         /* Only the high 16-bits is valid */
874                         wr32m(hw, TXGBE_EXTAG,
875                                 TXGBE_EXTAG_VLAN_MASK,
876                                 TXGBE_EXTAG_VLAN(tpid));
877                 } else {
878                         wr32m(hw, TXGBE_VLANCTL,
879                                 TXGBE_VLANCTL_TPID_MASK,
880                                 TXGBE_VLANCTL_TPID(tpid));
881                         wr32m(hw, TXGBE_DMATXCTRL,
882                                 TXGBE_DMATXCTRL_TPID_MASK,
883                                 TXGBE_DMATXCTRL_TPID(tpid));
884                 }
885
886                 if (qinq) {
887                         wr32m(hw, TXGBE_TAGTPID(0),
888                                 TXGBE_TAGTPID_MSB_MASK,
889                                 TXGBE_TAGTPID_MSB(tpid));
890                 }
891                 break;
892         default:
893                 PMD_DRV_LOG(ERR, "Unsupported VLAN type %d", vlan_type);
894                 return -EINVAL;
895         }
896
897         return ret;
898 }
899
900 void
901 txgbe_vlan_hw_filter_disable(struct rte_eth_dev *dev)
902 {
903         struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
904         uint32_t vlnctrl;
905
906         PMD_INIT_FUNC_TRACE();
907
908         /* Filter Table Disable */
909         vlnctrl = rd32(hw, TXGBE_VLANCTL);
910         vlnctrl &= ~TXGBE_VLANCTL_VFE;
911         wr32(hw, TXGBE_VLANCTL, vlnctrl);
912 }
913
914 void
915 txgbe_vlan_hw_filter_enable(struct rte_eth_dev *dev)
916 {
917         struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
918         struct txgbe_vfta *shadow_vfta = TXGBE_DEV_VFTA(dev);
919         uint32_t vlnctrl;
920         uint16_t i;
921
922         PMD_INIT_FUNC_TRACE();
923
924         /* Filter Table Enable */
925         vlnctrl = rd32(hw, TXGBE_VLANCTL);
926         vlnctrl &= ~TXGBE_VLANCTL_CFIENA;
927         vlnctrl |= TXGBE_VLANCTL_VFE;
928         wr32(hw, TXGBE_VLANCTL, vlnctrl);
929
930         /* write whatever is in local vfta copy */
931         for (i = 0; i < TXGBE_VFTA_SIZE; i++)
932                 wr32(hw, TXGBE_VLANTBL(i), shadow_vfta->vfta[i]);
933 }
934
935 void
936 txgbe_vlan_hw_strip_bitmap_set(struct rte_eth_dev *dev, uint16_t queue, bool on)
937 {
938         struct txgbe_hwstrip *hwstrip = TXGBE_DEV_HWSTRIP(dev);
939         struct txgbe_rx_queue *rxq;
940
941         if (queue >= TXGBE_MAX_RX_QUEUE_NUM)
942                 return;
943
944         if (on)
945                 TXGBE_SET_HWSTRIP(hwstrip, queue);
946         else
947                 TXGBE_CLEAR_HWSTRIP(hwstrip, queue);
948
949         if (queue >= dev->data->nb_rx_queues)
950                 return;
951
952         rxq = dev->data->rx_queues[queue];
953
954         if (on) {
955                 rxq->vlan_flags = PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED;
956                 rxq->offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
957         } else {
958                 rxq->vlan_flags = PKT_RX_VLAN;
959                 rxq->offloads &= ~DEV_RX_OFFLOAD_VLAN_STRIP;
960         }
961 }
962
963 static void
964 txgbe_vlan_hw_strip_disable(struct rte_eth_dev *dev, uint16_t queue)
965 {
966         struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
967         uint32_t ctrl;
968
969         PMD_INIT_FUNC_TRACE();
970
971         ctrl = rd32(hw, TXGBE_RXCFG(queue));
972         ctrl &= ~TXGBE_RXCFG_VLAN;
973         wr32(hw, TXGBE_RXCFG(queue), ctrl);
974
975         /* record those setting for HW strip per queue */
976         txgbe_vlan_hw_strip_bitmap_set(dev, queue, 0);
977 }
978
979 static void
980 txgbe_vlan_hw_strip_enable(struct rte_eth_dev *dev, uint16_t queue)
981 {
982         struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
983         uint32_t ctrl;
984
985         PMD_INIT_FUNC_TRACE();
986
987         ctrl = rd32(hw, TXGBE_RXCFG(queue));
988         ctrl |= TXGBE_RXCFG_VLAN;
989         wr32(hw, TXGBE_RXCFG(queue), ctrl);
990
991         /* record those setting for HW strip per queue */
992         txgbe_vlan_hw_strip_bitmap_set(dev, queue, 1);
993 }
994
995 static void
996 txgbe_vlan_hw_extend_disable(struct rte_eth_dev *dev)
997 {
998         struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
999         uint32_t ctrl;
1000
1001         PMD_INIT_FUNC_TRACE();
1002
1003         ctrl = rd32(hw, TXGBE_PORTCTL);
1004         ctrl &= ~TXGBE_PORTCTL_VLANEXT;
1005         ctrl &= ~TXGBE_PORTCTL_QINQ;
1006         wr32(hw, TXGBE_PORTCTL, ctrl);
1007 }
1008
1009 static void
1010 txgbe_vlan_hw_extend_enable(struct rte_eth_dev *dev)
1011 {
1012         struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
1013         struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode;
1014         struct rte_eth_txmode *txmode = &dev->data->dev_conf.txmode;
1015         uint32_t ctrl;
1016
1017         PMD_INIT_FUNC_TRACE();
1018
1019         ctrl  = rd32(hw, TXGBE_PORTCTL);
1020         ctrl |= TXGBE_PORTCTL_VLANEXT;
1021         if (rxmode->offloads & DEV_RX_OFFLOAD_QINQ_STRIP ||
1022             txmode->offloads & DEV_TX_OFFLOAD_QINQ_INSERT)
1023                 ctrl |= TXGBE_PORTCTL_QINQ;
1024         wr32(hw, TXGBE_PORTCTL, ctrl);
1025 }
1026
1027 void
1028 txgbe_vlan_hw_strip_config(struct rte_eth_dev *dev)
1029 {
1030         struct txgbe_rx_queue *rxq;
1031         uint16_t i;
1032
1033         PMD_INIT_FUNC_TRACE();
1034
1035         for (i = 0; i < dev->data->nb_rx_queues; i++) {
1036                 rxq = dev->data->rx_queues[i];
1037
1038                 if (rxq->offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
1039                         txgbe_vlan_strip_queue_set(dev, i, 1);
1040                 else
1041                         txgbe_vlan_strip_queue_set(dev, i, 0);
1042         }
1043 }
1044
1045 void
1046 txgbe_config_vlan_strip_on_all_queues(struct rte_eth_dev *dev, int mask)
1047 {
1048         uint16_t i;
1049         struct rte_eth_rxmode *rxmode;
1050         struct txgbe_rx_queue *rxq;
1051
1052         if (mask & ETH_VLAN_STRIP_MASK) {
1053                 rxmode = &dev->data->dev_conf.rxmode;
1054                 if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
1055                         for (i = 0; i < dev->data->nb_rx_queues; i++) {
1056                                 rxq = dev->data->rx_queues[i];
1057                                 rxq->offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
1058                         }
1059                 else
1060                         for (i = 0; i < dev->data->nb_rx_queues; i++) {
1061                                 rxq = dev->data->rx_queues[i];
1062                                 rxq->offloads &= ~DEV_RX_OFFLOAD_VLAN_STRIP;
1063                         }
1064         }
1065 }
1066
1067 static int
1068 txgbe_vlan_offload_config(struct rte_eth_dev *dev, int mask)
1069 {
1070         struct rte_eth_rxmode *rxmode;
1071         rxmode = &dev->data->dev_conf.rxmode;
1072
1073         if (mask & ETH_VLAN_STRIP_MASK)
1074                 txgbe_vlan_hw_strip_config(dev);
1075
1076         if (mask & ETH_VLAN_FILTER_MASK) {
1077                 if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
1078                         txgbe_vlan_hw_filter_enable(dev);
1079                 else
1080                         txgbe_vlan_hw_filter_disable(dev);
1081         }
1082
1083         if (mask & ETH_VLAN_EXTEND_MASK) {
1084                 if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_EXTEND)
1085                         txgbe_vlan_hw_extend_enable(dev);
1086                 else
1087                         txgbe_vlan_hw_extend_disable(dev);
1088         }
1089
1090         return 0;
1091 }
1092
1093 static int
1094 txgbe_vlan_offload_set(struct rte_eth_dev *dev, int mask)
1095 {
1096         txgbe_config_vlan_strip_on_all_queues(dev, mask);
1097
1098         txgbe_vlan_offload_config(dev, mask);
1099
1100         return 0;
1101 }
1102
1103 static void
1104 txgbe_vmdq_vlan_hw_filter_enable(struct rte_eth_dev *dev)
1105 {
1106         struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
1107         /* VLNCTL: enable vlan filtering and allow all vlan tags through */
1108         uint32_t vlanctrl = rd32(hw, TXGBE_VLANCTL);
1109
1110         vlanctrl |= TXGBE_VLANCTL_VFE; /* enable vlan filters */
1111         wr32(hw, TXGBE_VLANCTL, vlanctrl);
1112 }
1113
1114 static int
1115 txgbe_check_vf_rss_rxq_num(struct rte_eth_dev *dev, uint16_t nb_rx_q)
1116 {
1117         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1118
1119         switch (nb_rx_q) {
1120         case 1:
1121         case 2:
1122                 RTE_ETH_DEV_SRIOV(dev).active = ETH_64_POOLS;
1123                 break;
1124         case 4:
1125                 RTE_ETH_DEV_SRIOV(dev).active = ETH_32_POOLS;
1126                 break;
1127         default:
1128                 return -EINVAL;
1129         }
1130
1131         RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool =
1132                 TXGBE_MAX_RX_QUEUE_NUM / RTE_ETH_DEV_SRIOV(dev).active;
1133         RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx =
1134                 pci_dev->max_vfs * RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool;
1135         return 0;
1136 }
1137
1138 static int
1139 txgbe_check_mq_mode(struct rte_eth_dev *dev)
1140 {
1141         struct rte_eth_conf *dev_conf = &dev->data->dev_conf;
1142         uint16_t nb_rx_q = dev->data->nb_rx_queues;
1143         uint16_t nb_tx_q = dev->data->nb_tx_queues;
1144
1145         if (RTE_ETH_DEV_SRIOV(dev).active != 0) {
1146                 /* check multi-queue mode */
1147                 switch (dev_conf->rxmode.mq_mode) {
1148                 case ETH_MQ_RX_VMDQ_DCB:
1149                         PMD_INIT_LOG(INFO, "ETH_MQ_RX_VMDQ_DCB mode supported in SRIOV");
1150                         break;
1151                 case ETH_MQ_RX_VMDQ_DCB_RSS:
1152                         /* DCB/RSS VMDQ in SRIOV mode, not implement yet */
1153                         PMD_INIT_LOG(ERR, "SRIOV active,"
1154                                         " unsupported mq_mode rx %d.",
1155                                         dev_conf->rxmode.mq_mode);
1156                         return -EINVAL;
1157                 case ETH_MQ_RX_RSS:
1158                 case ETH_MQ_RX_VMDQ_RSS:
1159                         dev->data->dev_conf.rxmode.mq_mode = ETH_MQ_RX_VMDQ_RSS;
1160                         if (nb_rx_q <= RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool)
1161                                 if (txgbe_check_vf_rss_rxq_num(dev, nb_rx_q)) {
1162                                         PMD_INIT_LOG(ERR, "SRIOV is active,"
1163                                                 " invalid queue number"
1164                                                 " for VMDQ RSS, allowed"
1165                                                 " value are 1, 2 or 4.");
1166                                         return -EINVAL;
1167                                 }
1168                         break;
1169                 case ETH_MQ_RX_VMDQ_ONLY:
1170                 case ETH_MQ_RX_NONE:
1171                         /* if nothing mq mode configure, use default scheme */
1172                         dev->data->dev_conf.rxmode.mq_mode =
1173                                 ETH_MQ_RX_VMDQ_ONLY;
1174                         break;
1175                 default: /* ETH_MQ_RX_DCB, ETH_MQ_RX_DCB_RSS or ETH_MQ_TX_DCB*/
1176                         /* SRIOV only works in VMDq enable mode */
1177                         PMD_INIT_LOG(ERR, "SRIOV is active,"
1178                                         " wrong mq_mode rx %d.",
1179                                         dev_conf->rxmode.mq_mode);
1180                         return -EINVAL;
1181                 }
1182
1183                 switch (dev_conf->txmode.mq_mode) {
1184                 case ETH_MQ_TX_VMDQ_DCB:
1185                         PMD_INIT_LOG(INFO, "ETH_MQ_TX_VMDQ_DCB mode supported in SRIOV");
1186                         dev->data->dev_conf.txmode.mq_mode = ETH_MQ_TX_VMDQ_DCB;
1187                         break;
1188                 default: /* ETH_MQ_TX_VMDQ_ONLY or ETH_MQ_TX_NONE */
1189                         dev->data->dev_conf.txmode.mq_mode =
1190                                 ETH_MQ_TX_VMDQ_ONLY;
1191                         break;
1192                 }
1193
1194                 /* check valid queue number */
1195                 if ((nb_rx_q > RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool) ||
1196                     (nb_tx_q > RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool)) {
1197                         PMD_INIT_LOG(ERR, "SRIOV is active,"
1198                                         " nb_rx_q=%d nb_tx_q=%d queue number"
1199                                         " must be less than or equal to %d.",
1200                                         nb_rx_q, nb_tx_q,
1201                                         RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool);
1202                         return -EINVAL;
1203                 }
1204         } else {
1205                 if (dev_conf->rxmode.mq_mode == ETH_MQ_RX_VMDQ_DCB_RSS) {
1206                         PMD_INIT_LOG(ERR, "VMDQ+DCB+RSS mq_mode is"
1207                                           " not supported.");
1208                         return -EINVAL;
1209                 }
1210                 /* check configuration for vmdb+dcb mode */
1211                 if (dev_conf->rxmode.mq_mode == ETH_MQ_RX_VMDQ_DCB) {
1212                         const struct rte_eth_vmdq_dcb_conf *conf;
1213
1214                         if (nb_rx_q != TXGBE_VMDQ_DCB_NB_QUEUES) {
1215                                 PMD_INIT_LOG(ERR, "VMDQ+DCB, nb_rx_q != %d.",
1216                                                 TXGBE_VMDQ_DCB_NB_QUEUES);
1217                                 return -EINVAL;
1218                         }
1219                         conf = &dev_conf->rx_adv_conf.vmdq_dcb_conf;
1220                         if (!(conf->nb_queue_pools == ETH_16_POOLS ||
1221                                conf->nb_queue_pools == ETH_32_POOLS)) {
1222                                 PMD_INIT_LOG(ERR, "VMDQ+DCB selected,"
1223                                                 " nb_queue_pools must be %d or %d.",
1224                                                 ETH_16_POOLS, ETH_32_POOLS);
1225                                 return -EINVAL;
1226                         }
1227                 }
1228                 if (dev_conf->txmode.mq_mode == ETH_MQ_TX_VMDQ_DCB) {
1229                         const struct rte_eth_vmdq_dcb_tx_conf *conf;
1230
1231                         if (nb_tx_q != TXGBE_VMDQ_DCB_NB_QUEUES) {
1232                                 PMD_INIT_LOG(ERR, "VMDQ+DCB, nb_tx_q != %d",
1233                                                  TXGBE_VMDQ_DCB_NB_QUEUES);
1234                                 return -EINVAL;
1235                         }
1236                         conf = &dev_conf->tx_adv_conf.vmdq_dcb_tx_conf;
1237                         if (!(conf->nb_queue_pools == ETH_16_POOLS ||
1238                                conf->nb_queue_pools == ETH_32_POOLS)) {
1239                                 PMD_INIT_LOG(ERR, "VMDQ+DCB selected,"
1240                                                 " nb_queue_pools != %d and"
1241                                                 " nb_queue_pools != %d.",
1242                                                 ETH_16_POOLS, ETH_32_POOLS);
1243                                 return -EINVAL;
1244                         }
1245                 }
1246
1247                 /* For DCB mode check our configuration before we go further */
1248                 if (dev_conf->rxmode.mq_mode == ETH_MQ_RX_DCB) {
1249                         const struct rte_eth_dcb_rx_conf *conf;
1250
1251                         conf = &dev_conf->rx_adv_conf.dcb_rx_conf;
1252                         if (!(conf->nb_tcs == ETH_4_TCS ||
1253                                conf->nb_tcs == ETH_8_TCS)) {
1254                                 PMD_INIT_LOG(ERR, "DCB selected, nb_tcs != %d"
1255                                                 " and nb_tcs != %d.",
1256                                                 ETH_4_TCS, ETH_8_TCS);
1257                                 return -EINVAL;
1258                         }
1259                 }
1260
1261                 if (dev_conf->txmode.mq_mode == ETH_MQ_TX_DCB) {
1262                         const struct rte_eth_dcb_tx_conf *conf;
1263
1264                         conf = &dev_conf->tx_adv_conf.dcb_tx_conf;
1265                         if (!(conf->nb_tcs == ETH_4_TCS ||
1266                                conf->nb_tcs == ETH_8_TCS)) {
1267                                 PMD_INIT_LOG(ERR, "DCB selected, nb_tcs != %d"
1268                                                 " and nb_tcs != %d.",
1269                                                 ETH_4_TCS, ETH_8_TCS);
1270                                 return -EINVAL;
1271                         }
1272                 }
1273         }
1274         return 0;
1275 }
1276
1277 static int
1278 txgbe_dev_configure(struct rte_eth_dev *dev)
1279 {
1280         struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev);
1281         struct txgbe_adapter *adapter = TXGBE_DEV_ADAPTER(dev);
1282         int ret;
1283
1284         PMD_INIT_FUNC_TRACE();
1285
1286         if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG)
1287                 dev->data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_RSS_HASH;
1288
1289         /* multiple queue mode checking */
1290         ret  = txgbe_check_mq_mode(dev);
1291         if (ret != 0) {
1292                 PMD_DRV_LOG(ERR, "txgbe_check_mq_mode fails with %d.",
1293                             ret);
1294                 return ret;
1295         }
1296
1297         /* set flag to update link status after init */
1298         intr->flags |= TXGBE_FLAG_NEED_LINK_UPDATE;
1299
1300         /*
1301          * Initialize to TRUE. If any of Rx queues doesn't meet the bulk
1302          * allocation Rx preconditions we will reset it.
1303          */
1304         adapter->rx_bulk_alloc_allowed = true;
1305
1306         return 0;
1307 }
1308
1309 static void
1310 txgbe_dev_phy_intr_setup(struct rte_eth_dev *dev)
1311 {
1312         struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
1313         struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev);
1314         uint32_t gpie;
1315
1316         gpie = rd32(hw, TXGBE_GPIOINTEN);
1317         gpie |= TXGBE_GPIOBIT_6;
1318         wr32(hw, TXGBE_GPIOINTEN, gpie);
1319         intr->mask_misc |= TXGBE_ICRMISC_GPIO;
1320 }
1321
1322 int
1323 txgbe_set_vf_rate_limit(struct rte_eth_dev *dev, uint16_t vf,
1324                         uint16_t tx_rate, uint64_t q_msk)
1325 {
1326         struct txgbe_hw *hw;
1327         struct txgbe_vf_info *vfinfo;
1328         struct rte_eth_link link;
1329         uint8_t  nb_q_per_pool;
1330         uint32_t queue_stride;
1331         uint32_t queue_idx, idx = 0, vf_idx;
1332         uint32_t queue_end;
1333         uint16_t total_rate = 0;
1334         struct rte_pci_device *pci_dev;
1335         int ret;
1336
1337         pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1338         ret = rte_eth_link_get_nowait(dev->data->port_id, &link);
1339         if (ret < 0)
1340                 return ret;
1341
1342         if (vf >= pci_dev->max_vfs)
1343                 return -EINVAL;
1344
1345         if (tx_rate > link.link_speed)
1346                 return -EINVAL;
1347
1348         if (q_msk == 0)
1349                 return 0;
1350
1351         hw = TXGBE_DEV_HW(dev);
1352         vfinfo = *(TXGBE_DEV_VFDATA(dev));
1353         nb_q_per_pool = RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool;
1354         queue_stride = TXGBE_MAX_RX_QUEUE_NUM / RTE_ETH_DEV_SRIOV(dev).active;
1355         queue_idx = vf * queue_stride;
1356         queue_end = queue_idx + nb_q_per_pool - 1;
1357         if (queue_end >= hw->mac.max_tx_queues)
1358                 return -EINVAL;
1359
1360         if (vfinfo) {
1361                 for (vf_idx = 0; vf_idx < pci_dev->max_vfs; vf_idx++) {
1362                         if (vf_idx == vf)
1363                                 continue;
1364                         for (idx = 0; idx < RTE_DIM(vfinfo[vf_idx].tx_rate);
1365                                 idx++)
1366                                 total_rate += vfinfo[vf_idx].tx_rate[idx];
1367                 }
1368         } else {
1369                 return -EINVAL;
1370         }
1371
1372         /* Store tx_rate for this vf. */
1373         for (idx = 0; idx < nb_q_per_pool; idx++) {
1374                 if (((uint64_t)0x1 << idx) & q_msk) {
1375                         if (vfinfo[vf].tx_rate[idx] != tx_rate)
1376                                 vfinfo[vf].tx_rate[idx] = tx_rate;
1377                         total_rate += tx_rate;
1378                 }
1379         }
1380
1381         if (total_rate > dev->data->dev_link.link_speed) {
1382                 /* Reset stored TX rate of the VF if it causes exceed
1383                  * link speed.
1384                  */
1385                 memset(vfinfo[vf].tx_rate, 0, sizeof(vfinfo[vf].tx_rate));
1386                 return -EINVAL;
1387         }
1388
1389         /* Set ARBTXRATE of each queue/pool for vf X  */
1390         for (; queue_idx <= queue_end; queue_idx++) {
1391                 if (0x1 & q_msk)
1392                         txgbe_set_queue_rate_limit(dev, queue_idx, tx_rate);
1393                 q_msk = q_msk >> 1;
1394         }
1395
1396         return 0;
1397 }
1398
1399 /*
1400  * Configure device link speed and setup link.
1401  * It returns 0 on success.
1402  */
1403 static int
1404 txgbe_dev_start(struct rte_eth_dev *dev)
1405 {
1406         struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
1407         struct txgbe_hw_stats *hw_stats = TXGBE_DEV_STATS(dev);
1408         struct txgbe_vf_info *vfinfo = *TXGBE_DEV_VFDATA(dev);
1409         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1410         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
1411         uint32_t intr_vector = 0;
1412         int err;
1413         bool link_up = false, negotiate = 0;
1414         uint32_t speed = 0;
1415         uint32_t allowed_speeds = 0;
1416         int mask = 0;
1417         int status;
1418         uint16_t vf, idx;
1419         uint32_t *link_speeds;
1420
1421         PMD_INIT_FUNC_TRACE();
1422
1423         /* TXGBE devices don't support:
1424          *    - half duplex (checked afterwards for valid speeds)
1425          *    - fixed speed: TODO implement
1426          */
1427         if (dev->data->dev_conf.link_speeds & ETH_LINK_SPEED_FIXED) {
1428                 PMD_INIT_LOG(ERR,
1429                 "Invalid link_speeds for port %u, fix speed not supported",
1430                                 dev->data->port_id);
1431                 return -EINVAL;
1432         }
1433
1434         /* Stop the link setup handler before resetting the HW. */
1435         rte_eal_alarm_cancel(txgbe_dev_setup_link_alarm_handler, dev);
1436
1437         /* disable uio/vfio intr/eventfd mapping */
1438         rte_intr_disable(intr_handle);
1439
1440         /* stop adapter */
1441         hw->adapter_stopped = 0;
1442         txgbe_stop_hw(hw);
1443
1444         /* reinitialize adapter
1445          * this calls reset and start
1446          */
1447         hw->nb_rx_queues = dev->data->nb_rx_queues;
1448         hw->nb_tx_queues = dev->data->nb_tx_queues;
1449         status = txgbe_pf_reset_hw(hw);
1450         if (status != 0)
1451                 return -1;
1452         hw->mac.start_hw(hw);
1453         hw->mac.get_link_status = true;
1454
1455         /* configure PF module if SRIOV enabled */
1456         txgbe_pf_host_configure(dev);
1457
1458         txgbe_dev_phy_intr_setup(dev);
1459
1460         /* check and configure queue intr-vector mapping */
1461         if ((rte_intr_cap_multiple(intr_handle) ||
1462              !RTE_ETH_DEV_SRIOV(dev).active) &&
1463             dev->data->dev_conf.intr_conf.rxq != 0) {
1464                 intr_vector = dev->data->nb_rx_queues;
1465                 if (rte_intr_efd_enable(intr_handle, intr_vector))
1466                         return -1;
1467         }
1468
1469         if (rte_intr_dp_is_en(intr_handle) && !intr_handle->intr_vec) {
1470                 intr_handle->intr_vec =
1471                         rte_zmalloc("intr_vec",
1472                                     dev->data->nb_rx_queues * sizeof(int), 0);
1473                 if (intr_handle->intr_vec == NULL) {
1474                         PMD_INIT_LOG(ERR, "Failed to allocate %d rx_queues"
1475                                      " intr_vec", dev->data->nb_rx_queues);
1476                         return -ENOMEM;
1477                 }
1478         }
1479
1480         /* confiugre msix for sleep until rx interrupt */
1481         txgbe_configure_msix(dev);
1482
1483         /* initialize transmission unit */
1484         txgbe_dev_tx_init(dev);
1485
1486         /* This can fail when allocating mbufs for descriptor rings */
1487         err = txgbe_dev_rx_init(dev);
1488         if (err) {
1489                 PMD_INIT_LOG(ERR, "Unable to initialize RX hardware");
1490                 goto error;
1491         }
1492
1493         mask = ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK |
1494                 ETH_VLAN_EXTEND_MASK;
1495         err = txgbe_vlan_offload_config(dev, mask);
1496         if (err) {
1497                 PMD_INIT_LOG(ERR, "Unable to set VLAN offload");
1498                 goto error;
1499         }
1500
1501         if (dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_VMDQ_ONLY) {
1502                 /* Enable vlan filtering for VMDq */
1503                 txgbe_vmdq_vlan_hw_filter_enable(dev);
1504         }
1505
1506         /* Configure DCB hw */
1507         txgbe_configure_pb(dev);
1508         txgbe_configure_port(dev);
1509         txgbe_configure_dcb(dev);
1510
1511         /* Restore vf rate limit */
1512         if (vfinfo != NULL) {
1513                 for (vf = 0; vf < pci_dev->max_vfs; vf++)
1514                         for (idx = 0; idx < TXGBE_MAX_QUEUE_NUM_PER_VF; idx++)
1515                                 if (vfinfo[vf].tx_rate[idx] != 0)
1516                                         txgbe_set_vf_rate_limit(dev, vf,
1517                                                 vfinfo[vf].tx_rate[idx],
1518                                                 1 << idx);
1519         }
1520
1521         err = txgbe_dev_rxtx_start(dev);
1522         if (err < 0) {
1523                 PMD_INIT_LOG(ERR, "Unable to start rxtx queues");
1524                 goto error;
1525         }
1526
1527         /* Skip link setup if loopback mode is enabled. */
1528         if (hw->mac.type == txgbe_mac_raptor &&
1529             dev->data->dev_conf.lpbk_mode)
1530                 goto skip_link_setup;
1531
1532         if (txgbe_is_sfp(hw) && hw->phy.multispeed_fiber) {
1533                 err = hw->mac.setup_sfp(hw);
1534                 if (err)
1535                         goto error;
1536         }
1537
1538         if (hw->phy.media_type == txgbe_media_type_copper) {
1539                 /* Turn on the copper */
1540                 hw->phy.set_phy_power(hw, true);
1541         } else {
1542                 /* Turn on the laser */
1543                 hw->mac.enable_tx_laser(hw);
1544         }
1545
1546         err = hw->mac.check_link(hw, &speed, &link_up, 0);
1547         if (err)
1548                 goto error;
1549         dev->data->dev_link.link_status = link_up;
1550
1551         err = hw->mac.get_link_capabilities(hw, &speed, &negotiate);
1552         if (err)
1553                 goto error;
1554
1555         allowed_speeds = ETH_LINK_SPEED_100M | ETH_LINK_SPEED_1G |
1556                         ETH_LINK_SPEED_10G;
1557
1558         link_speeds = &dev->data->dev_conf.link_speeds;
1559         if (*link_speeds & ~allowed_speeds) {
1560                 PMD_INIT_LOG(ERR, "Invalid link setting");
1561                 goto error;
1562         }
1563
1564         speed = 0x0;
1565         if (*link_speeds == ETH_LINK_SPEED_AUTONEG) {
1566                 speed = (TXGBE_LINK_SPEED_100M_FULL |
1567                          TXGBE_LINK_SPEED_1GB_FULL |
1568                          TXGBE_LINK_SPEED_10GB_FULL);
1569         } else {
1570                 if (*link_speeds & ETH_LINK_SPEED_10G)
1571                         speed |= TXGBE_LINK_SPEED_10GB_FULL;
1572                 if (*link_speeds & ETH_LINK_SPEED_5G)
1573                         speed |= TXGBE_LINK_SPEED_5GB_FULL;
1574                 if (*link_speeds & ETH_LINK_SPEED_2_5G)
1575                         speed |= TXGBE_LINK_SPEED_2_5GB_FULL;
1576                 if (*link_speeds & ETH_LINK_SPEED_1G)
1577                         speed |= TXGBE_LINK_SPEED_1GB_FULL;
1578                 if (*link_speeds & ETH_LINK_SPEED_100M)
1579                         speed |= TXGBE_LINK_SPEED_100M_FULL;
1580         }
1581
1582         err = hw->mac.setup_link(hw, speed, link_up);
1583         if (err)
1584                 goto error;
1585
1586 skip_link_setup:
1587
1588         if (rte_intr_allow_others(intr_handle)) {
1589                 /* check if lsc interrupt is enabled */
1590                 if (dev->data->dev_conf.intr_conf.lsc != 0)
1591                         txgbe_dev_lsc_interrupt_setup(dev, TRUE);
1592                 else
1593                         txgbe_dev_lsc_interrupt_setup(dev, FALSE);
1594                 txgbe_dev_macsec_interrupt_setup(dev);
1595                 txgbe_set_ivar_map(hw, -1, 1, TXGBE_MISC_VEC_ID);
1596         } else {
1597                 rte_intr_callback_unregister(intr_handle,
1598                                              txgbe_dev_interrupt_handler, dev);
1599                 if (dev->data->dev_conf.intr_conf.lsc != 0)
1600                         PMD_INIT_LOG(INFO, "lsc won't enable because of"
1601                                      " no intr multiplex");
1602         }
1603
1604         /* check if rxq interrupt is enabled */
1605         if (dev->data->dev_conf.intr_conf.rxq != 0 &&
1606             rte_intr_dp_is_en(intr_handle))
1607                 txgbe_dev_rxq_interrupt_setup(dev);
1608
1609         /* enable uio/vfio intr/eventfd mapping */
1610         rte_intr_enable(intr_handle);
1611
1612         /* resume enabled intr since hw reset */
1613         txgbe_enable_intr(dev);
1614
1615         /*
1616          * Update link status right before return, because it may
1617          * start link configuration process in a separate thread.
1618          */
1619         txgbe_dev_link_update(dev, 0);
1620
1621         wr32m(hw, TXGBE_LEDCTL, 0xFFFFFFFF, TXGBE_LEDCTL_ORD_MASK);
1622
1623         txgbe_read_stats_registers(hw, hw_stats);
1624         hw->offset_loaded = 1;
1625
1626         return 0;
1627
1628 error:
1629         PMD_INIT_LOG(ERR, "failure in dev start: %d", err);
1630         txgbe_dev_clear_queues(dev);
1631         return -EIO;
1632 }
1633
1634 /*
1635  * Stop device: disable rx and tx functions to allow for reconfiguring.
1636  */
1637 static int
1638 txgbe_dev_stop(struct rte_eth_dev *dev)
1639 {
1640         struct rte_eth_link link;
1641         struct txgbe_adapter *adapter = TXGBE_DEV_ADAPTER(dev);
1642         struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
1643         struct txgbe_vf_info *vfinfo = *TXGBE_DEV_VFDATA(dev);
1644         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1645         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
1646         int vf;
1647
1648         if (hw->adapter_stopped)
1649                 return 0;
1650
1651         PMD_INIT_FUNC_TRACE();
1652
1653         rte_eal_alarm_cancel(txgbe_dev_setup_link_alarm_handler, dev);
1654
1655         /* disable interrupts */
1656         txgbe_disable_intr(hw);
1657
1658         /* reset the NIC */
1659         txgbe_pf_reset_hw(hw);
1660         hw->adapter_stopped = 0;
1661
1662         /* stop adapter */
1663         txgbe_stop_hw(hw);
1664
1665         for (vf = 0; vfinfo != NULL && vf < pci_dev->max_vfs; vf++)
1666                 vfinfo[vf].clear_to_send = false;
1667
1668         if (hw->phy.media_type == txgbe_media_type_copper) {
1669                 /* Turn off the copper */
1670                 hw->phy.set_phy_power(hw, false);
1671         } else {
1672                 /* Turn off the laser */
1673                 hw->mac.disable_tx_laser(hw);
1674         }
1675
1676         txgbe_dev_clear_queues(dev);
1677
1678         /* Clear stored conf */
1679         dev->data->scattered_rx = 0;
1680         dev->data->lro = 0;
1681
1682         /* Clear recorded link status */
1683         memset(&link, 0, sizeof(link));
1684         rte_eth_linkstatus_set(dev, &link);
1685
1686         if (!rte_intr_allow_others(intr_handle))
1687                 /* resume to the default handler */
1688                 rte_intr_callback_register(intr_handle,
1689                                            txgbe_dev_interrupt_handler,
1690                                            (void *)dev);
1691
1692         /* Clean datapath event and queue/vec mapping */
1693         rte_intr_efd_disable(intr_handle);
1694         if (intr_handle->intr_vec != NULL) {
1695                 rte_free(intr_handle->intr_vec);
1696                 intr_handle->intr_vec = NULL;
1697         }
1698
1699         adapter->rss_reta_updated = 0;
1700         wr32m(hw, TXGBE_LEDCTL, 0xFFFFFFFF, TXGBE_LEDCTL_SEL_MASK);
1701
1702         hw->adapter_stopped = true;
1703         dev->data->dev_started = 0;
1704
1705         return 0;
1706 }
1707
1708 /*
1709  * Set device link up: enable tx.
1710  */
1711 static int
1712 txgbe_dev_set_link_up(struct rte_eth_dev *dev)
1713 {
1714         struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
1715
1716         if (hw->phy.media_type == txgbe_media_type_copper) {
1717                 /* Turn on the copper */
1718                 hw->phy.set_phy_power(hw, true);
1719         } else {
1720                 /* Turn on the laser */
1721                 hw->mac.enable_tx_laser(hw);
1722                 txgbe_dev_link_update(dev, 0);
1723         }
1724
1725         return 0;
1726 }
1727
1728 /*
1729  * Set device link down: disable tx.
1730  */
1731 static int
1732 txgbe_dev_set_link_down(struct rte_eth_dev *dev)
1733 {
1734         struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
1735
1736         if (hw->phy.media_type == txgbe_media_type_copper) {
1737                 /* Turn off the copper */
1738                 hw->phy.set_phy_power(hw, false);
1739         } else {
1740                 /* Turn off the laser */
1741                 hw->mac.disable_tx_laser(hw);
1742                 txgbe_dev_link_update(dev, 0);
1743         }
1744
1745         return 0;
1746 }
1747
1748 /*
1749  * Reset and stop device.
1750  */
1751 static int
1752 txgbe_dev_close(struct rte_eth_dev *dev)
1753 {
1754         struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
1755         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1756         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
1757         int retries = 0;
1758         int ret;
1759
1760         PMD_INIT_FUNC_TRACE();
1761
1762         txgbe_pf_reset_hw(hw);
1763
1764         ret = txgbe_dev_stop(dev);
1765
1766         txgbe_dev_free_queues(dev);
1767
1768         /* reprogram the RAR[0] in case user changed it. */
1769         txgbe_set_rar(hw, 0, hw->mac.addr, 0, true);
1770
1771         /* Unlock any pending hardware semaphore */
1772         txgbe_swfw_lock_reset(hw);
1773
1774         /* disable uio intr before callback unregister */
1775         rte_intr_disable(intr_handle);
1776
1777         do {
1778                 ret = rte_intr_callback_unregister(intr_handle,
1779                                 txgbe_dev_interrupt_handler, dev);
1780                 if (ret >= 0 || ret == -ENOENT) {
1781                         break;
1782                 } else if (ret != -EAGAIN) {
1783                         PMD_INIT_LOG(ERR,
1784                                 "intr callback unregister failed: %d",
1785                                 ret);
1786                 }
1787                 rte_delay_ms(100);
1788         } while (retries++ < (10 + TXGBE_LINK_UP_TIME));
1789
1790         /* cancel the delay handler before remove dev */
1791         rte_eal_alarm_cancel(txgbe_dev_interrupt_delayed_handler, dev);
1792
1793         /* uninitialize PF if max_vfs not zero */
1794         txgbe_pf_host_uninit(dev);
1795
1796         rte_free(dev->data->mac_addrs);
1797         dev->data->mac_addrs = NULL;
1798
1799         rte_free(dev->data->hash_mac_addrs);
1800         dev->data->hash_mac_addrs = NULL;
1801
1802         /* Remove all ntuple filters of the device */
1803         txgbe_ntuple_filter_uninit(dev);
1804
1805         return ret;
1806 }
1807
1808 /*
1809  * Reset PF device.
1810  */
1811 static int
1812 txgbe_dev_reset(struct rte_eth_dev *dev)
1813 {
1814         int ret;
1815
1816         /* When a DPDK PMD PF begin to reset PF port, it should notify all
1817          * its VF to make them align with it. The detailed notification
1818          * mechanism is PMD specific. As to txgbe PF, it is rather complex.
1819          * To avoid unexpected behavior in VF, currently reset of PF with
1820          * SR-IOV activation is not supported. It might be supported later.
1821          */
1822         if (dev->data->sriov.active)
1823                 return -ENOTSUP;
1824
1825         ret = eth_txgbe_dev_uninit(dev);
1826         if (ret)
1827                 return ret;
1828
1829         ret = eth_txgbe_dev_init(dev, NULL);
1830
1831         return ret;
1832 }
1833
1834 #define UPDATE_QP_COUNTER_32bit(reg, last_counter, counter)     \
1835         {                                                       \
1836                 uint32_t current_counter = rd32(hw, reg);       \
1837                 if (current_counter < last_counter)             \
1838                         current_counter += 0x100000000LL;       \
1839                 if (!hw->offset_loaded)                         \
1840                         last_counter = current_counter;         \
1841                 counter = current_counter - last_counter;       \
1842                 counter &= 0xFFFFFFFFLL;                        \
1843         }
1844
1845 #define UPDATE_QP_COUNTER_36bit(reg_lsb, reg_msb, last_counter, counter) \
1846         {                                                                \
1847                 uint64_t current_counter_lsb = rd32(hw, reg_lsb);        \
1848                 uint64_t current_counter_msb = rd32(hw, reg_msb);        \
1849                 uint64_t current_counter = (current_counter_msb << 32) | \
1850                         current_counter_lsb;                             \
1851                 if (current_counter < last_counter)                      \
1852                         current_counter += 0x1000000000LL;               \
1853                 if (!hw->offset_loaded)                                  \
1854                         last_counter = current_counter;                  \
1855                 counter = current_counter - last_counter;                \
1856                 counter &= 0xFFFFFFFFFLL;                                \
1857         }
1858
1859 void
1860 txgbe_read_stats_registers(struct txgbe_hw *hw,
1861                            struct txgbe_hw_stats *hw_stats)
1862 {
1863         unsigned int i;
1864
1865         /* QP Stats */
1866         for (i = 0; i < hw->nb_rx_queues; i++) {
1867                 UPDATE_QP_COUNTER_32bit(TXGBE_QPRXPKT(i),
1868                         hw->qp_last[i].rx_qp_packets,
1869                         hw_stats->qp[i].rx_qp_packets);
1870                 UPDATE_QP_COUNTER_36bit(TXGBE_QPRXOCTL(i), TXGBE_QPRXOCTH(i),
1871                         hw->qp_last[i].rx_qp_bytes,
1872                         hw_stats->qp[i].rx_qp_bytes);
1873                 UPDATE_QP_COUNTER_32bit(TXGBE_QPRXMPKT(i),
1874                         hw->qp_last[i].rx_qp_mc_packets,
1875                         hw_stats->qp[i].rx_qp_mc_packets);
1876         }
1877
1878         for (i = 0; i < hw->nb_tx_queues; i++) {
1879                 UPDATE_QP_COUNTER_32bit(TXGBE_QPTXPKT(i),
1880                         hw->qp_last[i].tx_qp_packets,
1881                         hw_stats->qp[i].tx_qp_packets);
1882                 UPDATE_QP_COUNTER_36bit(TXGBE_QPTXOCTL(i), TXGBE_QPTXOCTH(i),
1883                         hw->qp_last[i].tx_qp_bytes,
1884                         hw_stats->qp[i].tx_qp_bytes);
1885         }
1886         /* PB Stats */
1887         for (i = 0; i < TXGBE_MAX_UP; i++) {
1888                 hw_stats->up[i].rx_up_xon_packets +=
1889                                 rd32(hw, TXGBE_PBRXUPXON(i));
1890                 hw_stats->up[i].rx_up_xoff_packets +=
1891                                 rd32(hw, TXGBE_PBRXUPXOFF(i));
1892                 hw_stats->up[i].tx_up_xon_packets +=
1893                                 rd32(hw, TXGBE_PBTXUPXON(i));
1894                 hw_stats->up[i].tx_up_xoff_packets +=
1895                                 rd32(hw, TXGBE_PBTXUPXOFF(i));
1896                 hw_stats->up[i].tx_up_xon2off_packets +=
1897                                 rd32(hw, TXGBE_PBTXUPOFF(i));
1898                 hw_stats->up[i].rx_up_dropped +=
1899                                 rd32(hw, TXGBE_PBRXMISS(i));
1900         }
1901         hw_stats->rx_xon_packets += rd32(hw, TXGBE_PBRXLNKXON);
1902         hw_stats->rx_xoff_packets += rd32(hw, TXGBE_PBRXLNKXOFF);
1903         hw_stats->tx_xon_packets += rd32(hw, TXGBE_PBTXLNKXON);
1904         hw_stats->tx_xoff_packets += rd32(hw, TXGBE_PBTXLNKXOFF);
1905
1906         /* DMA Stats */
1907         hw_stats->rx_packets += rd32(hw, TXGBE_DMARXPKT);
1908         hw_stats->tx_packets += rd32(hw, TXGBE_DMATXPKT);
1909
1910         hw_stats->rx_bytes += rd64(hw, TXGBE_DMARXOCTL);
1911         hw_stats->tx_bytes += rd64(hw, TXGBE_DMATXOCTL);
1912         hw_stats->rx_drop_packets += rd32(hw, TXGBE_PBRXDROP);
1913
1914         /* MAC Stats */
1915         hw_stats->rx_crc_errors += rd64(hw, TXGBE_MACRXERRCRCL);
1916         hw_stats->rx_multicast_packets += rd64(hw, TXGBE_MACRXMPKTL);
1917         hw_stats->tx_multicast_packets += rd64(hw, TXGBE_MACTXMPKTL);
1918
1919         hw_stats->rx_total_packets += rd64(hw, TXGBE_MACRXPKTL);
1920         hw_stats->tx_total_packets += rd64(hw, TXGBE_MACTXPKTL);
1921         hw_stats->rx_total_bytes += rd64(hw, TXGBE_MACRXGBOCTL);
1922
1923         hw_stats->rx_broadcast_packets += rd64(hw, TXGBE_MACRXOCTL);
1924         hw_stats->tx_broadcast_packets += rd32(hw, TXGBE_MACTXOCTL);
1925
1926         hw_stats->rx_size_64_packets += rd64(hw, TXGBE_MACRX1TO64L);
1927         hw_stats->rx_size_65_to_127_packets += rd64(hw, TXGBE_MACRX65TO127L);
1928         hw_stats->rx_size_128_to_255_packets += rd64(hw, TXGBE_MACRX128TO255L);
1929         hw_stats->rx_size_256_to_511_packets += rd64(hw, TXGBE_MACRX256TO511L);
1930         hw_stats->rx_size_512_to_1023_packets +=
1931                         rd64(hw, TXGBE_MACRX512TO1023L);
1932         hw_stats->rx_size_1024_to_max_packets +=
1933                         rd64(hw, TXGBE_MACRX1024TOMAXL);
1934         hw_stats->tx_size_64_packets += rd64(hw, TXGBE_MACTX1TO64L);
1935         hw_stats->tx_size_65_to_127_packets += rd64(hw, TXGBE_MACTX65TO127L);
1936         hw_stats->tx_size_128_to_255_packets += rd64(hw, TXGBE_MACTX128TO255L);
1937         hw_stats->tx_size_256_to_511_packets += rd64(hw, TXGBE_MACTX256TO511L);
1938         hw_stats->tx_size_512_to_1023_packets +=
1939                         rd64(hw, TXGBE_MACTX512TO1023L);
1940         hw_stats->tx_size_1024_to_max_packets +=
1941                         rd64(hw, TXGBE_MACTX1024TOMAXL);
1942
1943         hw_stats->rx_undersize_errors += rd64(hw, TXGBE_MACRXERRLENL);
1944         hw_stats->rx_oversize_errors += rd32(hw, TXGBE_MACRXOVERSIZE);
1945         hw_stats->rx_jabber_errors += rd32(hw, TXGBE_MACRXJABBER);
1946
1947         /* MNG Stats */
1948         hw_stats->mng_bmc2host_packets = rd32(hw, TXGBE_MNGBMC2OS);
1949         hw_stats->mng_host2bmc_packets = rd32(hw, TXGBE_MNGOS2BMC);
1950         hw_stats->rx_management_packets = rd32(hw, TXGBE_DMARXMNG);
1951         hw_stats->tx_management_packets = rd32(hw, TXGBE_DMATXMNG);
1952
1953         /* FCoE Stats */
1954         hw_stats->rx_fcoe_crc_errors += rd32(hw, TXGBE_FCOECRC);
1955         hw_stats->rx_fcoe_mbuf_allocation_errors += rd32(hw, TXGBE_FCOELAST);
1956         hw_stats->rx_fcoe_dropped += rd32(hw, TXGBE_FCOERPDC);
1957         hw_stats->rx_fcoe_packets += rd32(hw, TXGBE_FCOEPRC);
1958         hw_stats->tx_fcoe_packets += rd32(hw, TXGBE_FCOEPTC);
1959         hw_stats->rx_fcoe_bytes += rd32(hw, TXGBE_FCOEDWRC);
1960         hw_stats->tx_fcoe_bytes += rd32(hw, TXGBE_FCOEDWTC);
1961
1962         /* Flow Director Stats */
1963         hw_stats->flow_director_matched_filters += rd32(hw, TXGBE_FDIRMATCH);
1964         hw_stats->flow_director_missed_filters += rd32(hw, TXGBE_FDIRMISS);
1965         hw_stats->flow_director_added_filters +=
1966                 TXGBE_FDIRUSED_ADD(rd32(hw, TXGBE_FDIRUSED));
1967         hw_stats->flow_director_removed_filters +=
1968                 TXGBE_FDIRUSED_REM(rd32(hw, TXGBE_FDIRUSED));
1969         hw_stats->flow_director_filter_add_errors +=
1970                 TXGBE_FDIRFAIL_ADD(rd32(hw, TXGBE_FDIRFAIL));
1971         hw_stats->flow_director_filter_remove_errors +=
1972                 TXGBE_FDIRFAIL_REM(rd32(hw, TXGBE_FDIRFAIL));
1973
1974         /* MACsec Stats */
1975         hw_stats->tx_macsec_pkts_untagged += rd32(hw, TXGBE_LSECTX_UTPKT);
1976         hw_stats->tx_macsec_pkts_encrypted +=
1977                         rd32(hw, TXGBE_LSECTX_ENCPKT);
1978         hw_stats->tx_macsec_pkts_protected +=
1979                         rd32(hw, TXGBE_LSECTX_PROTPKT);
1980         hw_stats->tx_macsec_octets_encrypted +=
1981                         rd32(hw, TXGBE_LSECTX_ENCOCT);
1982         hw_stats->tx_macsec_octets_protected +=
1983                         rd32(hw, TXGBE_LSECTX_PROTOCT);
1984         hw_stats->rx_macsec_pkts_untagged += rd32(hw, TXGBE_LSECRX_UTPKT);
1985         hw_stats->rx_macsec_pkts_badtag += rd32(hw, TXGBE_LSECRX_BTPKT);
1986         hw_stats->rx_macsec_pkts_nosci += rd32(hw, TXGBE_LSECRX_NOSCIPKT);
1987         hw_stats->rx_macsec_pkts_unknownsci += rd32(hw, TXGBE_LSECRX_UNSCIPKT);
1988         hw_stats->rx_macsec_octets_decrypted += rd32(hw, TXGBE_LSECRX_DECOCT);
1989         hw_stats->rx_macsec_octets_validated += rd32(hw, TXGBE_LSECRX_VLDOCT);
1990         hw_stats->rx_macsec_sc_pkts_unchecked +=
1991                         rd32(hw, TXGBE_LSECRX_UNCHKPKT);
1992         hw_stats->rx_macsec_sc_pkts_delayed += rd32(hw, TXGBE_LSECRX_DLYPKT);
1993         hw_stats->rx_macsec_sc_pkts_late += rd32(hw, TXGBE_LSECRX_LATEPKT);
1994         for (i = 0; i < 2; i++) {
1995                 hw_stats->rx_macsec_sa_pkts_ok +=
1996                         rd32(hw, TXGBE_LSECRX_OKPKT(i));
1997                 hw_stats->rx_macsec_sa_pkts_invalid +=
1998                         rd32(hw, TXGBE_LSECRX_INVPKT(i));
1999                 hw_stats->rx_macsec_sa_pkts_notvalid +=
2000                         rd32(hw, TXGBE_LSECRX_BADPKT(i));
2001         }
2002         hw_stats->rx_macsec_sa_pkts_unusedsa +=
2003                         rd32(hw, TXGBE_LSECRX_INVSAPKT);
2004         hw_stats->rx_macsec_sa_pkts_notusingsa +=
2005                         rd32(hw, TXGBE_LSECRX_BADSAPKT);
2006
2007         hw_stats->rx_total_missed_packets = 0;
2008         for (i = 0; i < TXGBE_MAX_UP; i++) {
2009                 hw_stats->rx_total_missed_packets +=
2010                         hw_stats->up[i].rx_up_dropped;
2011         }
2012 }
2013
2014 static int
2015 txgbe_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
2016 {
2017         struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
2018         struct txgbe_hw_stats *hw_stats = TXGBE_DEV_STATS(dev);
2019         struct txgbe_stat_mappings *stat_mappings =
2020                         TXGBE_DEV_STAT_MAPPINGS(dev);
2021         uint32_t i, j;
2022
2023         txgbe_read_stats_registers(hw, hw_stats);
2024
2025         if (stats == NULL)
2026                 return -EINVAL;
2027
2028         /* Fill out the rte_eth_stats statistics structure */
2029         stats->ipackets = hw_stats->rx_packets;
2030         stats->ibytes = hw_stats->rx_bytes;
2031         stats->opackets = hw_stats->tx_packets;
2032         stats->obytes = hw_stats->tx_bytes;
2033
2034         memset(&stats->q_ipackets, 0, sizeof(stats->q_ipackets));
2035         memset(&stats->q_opackets, 0, sizeof(stats->q_opackets));
2036         memset(&stats->q_ibytes, 0, sizeof(stats->q_ibytes));
2037         memset(&stats->q_obytes, 0, sizeof(stats->q_obytes));
2038         memset(&stats->q_errors, 0, sizeof(stats->q_errors));
2039         for (i = 0; i < TXGBE_MAX_QP; i++) {
2040                 uint32_t n = i / NB_QMAP_FIELDS_PER_QSM_REG;
2041                 uint32_t offset = (i % NB_QMAP_FIELDS_PER_QSM_REG) * 8;
2042                 uint32_t q_map;
2043
2044                 q_map = (stat_mappings->rqsm[n] >> offset)
2045                                 & QMAP_FIELD_RESERVED_BITS_MASK;
2046                 j = (q_map < RTE_ETHDEV_QUEUE_STAT_CNTRS
2047                      ? q_map : q_map % RTE_ETHDEV_QUEUE_STAT_CNTRS);
2048                 stats->q_ipackets[j] += hw_stats->qp[i].rx_qp_packets;
2049                 stats->q_ibytes[j] += hw_stats->qp[i].rx_qp_bytes;
2050
2051                 q_map = (stat_mappings->tqsm[n] >> offset)
2052                                 & QMAP_FIELD_RESERVED_BITS_MASK;
2053                 j = (q_map < RTE_ETHDEV_QUEUE_STAT_CNTRS
2054                      ? q_map : q_map % RTE_ETHDEV_QUEUE_STAT_CNTRS);
2055                 stats->q_opackets[j] += hw_stats->qp[i].tx_qp_packets;
2056                 stats->q_obytes[j] += hw_stats->qp[i].tx_qp_bytes;
2057         }
2058
2059         /* Rx Errors */
2060         stats->imissed  = hw_stats->rx_total_missed_packets;
2061         stats->ierrors  = hw_stats->rx_crc_errors +
2062                           hw_stats->rx_mac_short_packet_dropped +
2063                           hw_stats->rx_length_errors +
2064                           hw_stats->rx_undersize_errors +
2065                           hw_stats->rx_oversize_errors +
2066                           hw_stats->rx_drop_packets +
2067                           hw_stats->rx_illegal_byte_errors +
2068                           hw_stats->rx_error_bytes +
2069                           hw_stats->rx_fragment_errors +
2070                           hw_stats->rx_fcoe_crc_errors +
2071                           hw_stats->rx_fcoe_mbuf_allocation_errors;
2072
2073         /* Tx Errors */
2074         stats->oerrors  = 0;
2075         return 0;
2076 }
2077
2078 static int
2079 txgbe_dev_stats_reset(struct rte_eth_dev *dev)
2080 {
2081         struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
2082         struct txgbe_hw_stats *hw_stats = TXGBE_DEV_STATS(dev);
2083
2084         /* HW registers are cleared on read */
2085         hw->offset_loaded = 0;
2086         txgbe_dev_stats_get(dev, NULL);
2087         hw->offset_loaded = 1;
2088
2089         /* Reset software totals */
2090         memset(hw_stats, 0, sizeof(*hw_stats));
2091
2092         return 0;
2093 }
2094
2095 /* This function calculates the number of xstats based on the current config */
2096 static unsigned
2097 txgbe_xstats_calc_num(struct rte_eth_dev *dev)
2098 {
2099         int nb_queues = max(dev->data->nb_rx_queues, dev->data->nb_tx_queues);
2100         return TXGBE_NB_HW_STATS +
2101                TXGBE_NB_UP_STATS * TXGBE_MAX_UP +
2102                TXGBE_NB_QP_STATS * nb_queues;
2103 }
2104
2105 static inline int
2106 txgbe_get_name_by_id(uint32_t id, char *name, uint32_t size)
2107 {
2108         int nb, st;
2109
2110         /* Extended stats from txgbe_hw_stats */
2111         if (id < TXGBE_NB_HW_STATS) {
2112                 snprintf(name, size, "[hw]%s",
2113                         rte_txgbe_stats_strings[id].name);
2114                 return 0;
2115         }
2116         id -= TXGBE_NB_HW_STATS;
2117
2118         /* Priority Stats */
2119         if (id < TXGBE_NB_UP_STATS * TXGBE_MAX_UP) {
2120                 nb = id / TXGBE_NB_UP_STATS;
2121                 st = id % TXGBE_NB_UP_STATS;
2122                 snprintf(name, size, "[p%u]%s", nb,
2123                         rte_txgbe_up_strings[st].name);
2124                 return 0;
2125         }
2126         id -= TXGBE_NB_UP_STATS * TXGBE_MAX_UP;
2127
2128         /* Queue Stats */
2129         if (id < TXGBE_NB_QP_STATS * TXGBE_MAX_QP) {
2130                 nb = id / TXGBE_NB_QP_STATS;
2131                 st = id % TXGBE_NB_QP_STATS;
2132                 snprintf(name, size, "[q%u]%s", nb,
2133                         rte_txgbe_qp_strings[st].name);
2134                 return 0;
2135         }
2136         id -= TXGBE_NB_QP_STATS * TXGBE_MAX_QP;
2137
2138         return -(int)(id + 1);
2139 }
2140
2141 static inline int
2142 txgbe_get_offset_by_id(uint32_t id, uint32_t *offset)
2143 {
2144         int nb, st;
2145
2146         /* Extended stats from txgbe_hw_stats */
2147         if (id < TXGBE_NB_HW_STATS) {
2148                 *offset = rte_txgbe_stats_strings[id].offset;
2149                 return 0;
2150         }
2151         id -= TXGBE_NB_HW_STATS;
2152
2153         /* Priority Stats */
2154         if (id < TXGBE_NB_UP_STATS * TXGBE_MAX_UP) {
2155                 nb = id / TXGBE_NB_UP_STATS;
2156                 st = id % TXGBE_NB_UP_STATS;
2157                 *offset = rte_txgbe_up_strings[st].offset +
2158                         nb * (TXGBE_NB_UP_STATS * sizeof(uint64_t));
2159                 return 0;
2160         }
2161         id -= TXGBE_NB_UP_STATS * TXGBE_MAX_UP;
2162
2163         /* Queue Stats */
2164         if (id < TXGBE_NB_QP_STATS * TXGBE_MAX_QP) {
2165                 nb = id / TXGBE_NB_QP_STATS;
2166                 st = id % TXGBE_NB_QP_STATS;
2167                 *offset = rte_txgbe_qp_strings[st].offset +
2168                         nb * (TXGBE_NB_QP_STATS * sizeof(uint64_t));
2169                 return 0;
2170         }
2171
2172         return -1;
2173 }
2174
2175 static int txgbe_dev_xstats_get_names(struct rte_eth_dev *dev,
2176         struct rte_eth_xstat_name *xstats_names, unsigned int limit)
2177 {
2178         unsigned int i, count;
2179
2180         count = txgbe_xstats_calc_num(dev);
2181         if (xstats_names == NULL)
2182                 return count;
2183
2184         /* Note: limit >= cnt_stats checked upstream
2185          * in rte_eth_xstats_names()
2186          */
2187         limit = min(limit, count);
2188
2189         /* Extended stats from txgbe_hw_stats */
2190         for (i = 0; i < limit; i++) {
2191                 if (txgbe_get_name_by_id(i, xstats_names[i].name,
2192                         sizeof(xstats_names[i].name))) {
2193                         PMD_INIT_LOG(WARNING, "id value %d isn't valid", i);
2194                         break;
2195                 }
2196         }
2197
2198         return i;
2199 }
2200
2201 static int txgbe_dev_xstats_get_names_by_id(struct rte_eth_dev *dev,
2202         struct rte_eth_xstat_name *xstats_names,
2203         const uint64_t *ids,
2204         unsigned int limit)
2205 {
2206         unsigned int i;
2207
2208         if (ids == NULL)
2209                 return txgbe_dev_xstats_get_names(dev, xstats_names, limit);
2210
2211         for (i = 0; i < limit; i++) {
2212                 if (txgbe_get_name_by_id(ids[i], xstats_names[i].name,
2213                                 sizeof(xstats_names[i].name))) {
2214                         PMD_INIT_LOG(WARNING, "id value %d isn't valid", i);
2215                         return -1;
2216                 }
2217         }
2218
2219         return i;
2220 }
2221
2222 static int
2223 txgbe_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
2224                                          unsigned int limit)
2225 {
2226         struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
2227         struct txgbe_hw_stats *hw_stats = TXGBE_DEV_STATS(dev);
2228         unsigned int i, count;
2229
2230         txgbe_read_stats_registers(hw, hw_stats);
2231
2232         /* If this is a reset xstats is NULL, and we have cleared the
2233          * registers by reading them.
2234          */
2235         count = txgbe_xstats_calc_num(dev);
2236         if (xstats == NULL)
2237                 return count;
2238
2239         limit = min(limit, txgbe_xstats_calc_num(dev));
2240
2241         /* Extended stats from txgbe_hw_stats */
2242         for (i = 0; i < limit; i++) {
2243                 uint32_t offset = 0;
2244
2245                 if (txgbe_get_offset_by_id(i, &offset)) {
2246                         PMD_INIT_LOG(WARNING, "id value %d isn't valid", i);
2247                         break;
2248                 }
2249                 xstats[i].value = *(uint64_t *)(((char *)hw_stats) + offset);
2250                 xstats[i].id = i;
2251         }
2252
2253         return i;
2254 }
2255
2256 static int
2257 txgbe_dev_xstats_get_(struct rte_eth_dev *dev, uint64_t *values,
2258                                          unsigned int limit)
2259 {
2260         struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
2261         struct txgbe_hw_stats *hw_stats = TXGBE_DEV_STATS(dev);
2262         unsigned int i, count;
2263
2264         txgbe_read_stats_registers(hw, hw_stats);
2265
2266         /* If this is a reset xstats is NULL, and we have cleared the
2267          * registers by reading them.
2268          */
2269         count = txgbe_xstats_calc_num(dev);
2270         if (values == NULL)
2271                 return count;
2272
2273         limit = min(limit, txgbe_xstats_calc_num(dev));
2274
2275         /* Extended stats from txgbe_hw_stats */
2276         for (i = 0; i < limit; i++) {
2277                 uint32_t offset;
2278
2279                 if (txgbe_get_offset_by_id(i, &offset)) {
2280                         PMD_INIT_LOG(WARNING, "id value %d isn't valid", i);
2281                         break;
2282                 }
2283                 values[i] = *(uint64_t *)(((char *)hw_stats) + offset);
2284         }
2285
2286         return i;
2287 }
2288
2289 static int
2290 txgbe_dev_xstats_get_by_id(struct rte_eth_dev *dev, const uint64_t *ids,
2291                 uint64_t *values, unsigned int limit)
2292 {
2293         struct txgbe_hw_stats *hw_stats = TXGBE_DEV_STATS(dev);
2294         unsigned int i;
2295
2296         if (ids == NULL)
2297                 return txgbe_dev_xstats_get_(dev, values, limit);
2298
2299         for (i = 0; i < limit; i++) {
2300                 uint32_t offset;
2301
2302                 if (txgbe_get_offset_by_id(ids[i], &offset)) {
2303                         PMD_INIT_LOG(WARNING, "id value %d isn't valid", i);
2304                         break;
2305                 }
2306                 values[i] = *(uint64_t *)(((char *)hw_stats) + offset);
2307         }
2308
2309         return i;
2310 }
2311
2312 static int
2313 txgbe_dev_xstats_reset(struct rte_eth_dev *dev)
2314 {
2315         struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
2316         struct txgbe_hw_stats *hw_stats = TXGBE_DEV_STATS(dev);
2317
2318         /* HW registers are cleared on read */
2319         hw->offset_loaded = 0;
2320         txgbe_read_stats_registers(hw, hw_stats);
2321         hw->offset_loaded = 1;
2322
2323         /* Reset software totals */
2324         memset(hw_stats, 0, sizeof(*hw_stats));
2325
2326         return 0;
2327 }
2328
2329 static int
2330 txgbe_fw_version_get(struct rte_eth_dev *dev, char *fw_version, size_t fw_size)
2331 {
2332         struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
2333         u16 eeprom_verh, eeprom_verl;
2334         u32 etrack_id;
2335         int ret;
2336
2337         hw->rom.readw_sw(hw, TXGBE_EEPROM_VERSION_H, &eeprom_verh);
2338         hw->rom.readw_sw(hw, TXGBE_EEPROM_VERSION_L, &eeprom_verl);
2339
2340         etrack_id = (eeprom_verh << 16) | eeprom_verl;
2341         ret = snprintf(fw_version, fw_size, "0x%08x", etrack_id);
2342
2343         ret += 1; /* add the size of '\0' */
2344         if (fw_size < (u32)ret)
2345                 return ret;
2346         else
2347                 return 0;
2348 }
2349
2350 static int
2351 txgbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
2352 {
2353         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
2354         struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
2355
2356         dev_info->max_rx_queues = (uint16_t)hw->mac.max_rx_queues;
2357         dev_info->max_tx_queues = (uint16_t)hw->mac.max_tx_queues;
2358         dev_info->min_rx_bufsize = 1024;
2359         dev_info->max_rx_pktlen = 15872;
2360         dev_info->max_mac_addrs = hw->mac.num_rar_entries;
2361         dev_info->max_hash_mac_addrs = TXGBE_VMDQ_NUM_UC_MAC;
2362         dev_info->max_vfs = pci_dev->max_vfs;
2363         dev_info->max_vmdq_pools = ETH_64_POOLS;
2364         dev_info->vmdq_queue_num = dev_info->max_rx_queues;
2365         dev_info->rx_queue_offload_capa = txgbe_get_rx_queue_offloads(dev);
2366         dev_info->rx_offload_capa = (txgbe_get_rx_port_offloads(dev) |
2367                                      dev_info->rx_queue_offload_capa);
2368         dev_info->tx_queue_offload_capa = txgbe_get_tx_queue_offloads(dev);
2369         dev_info->tx_offload_capa = txgbe_get_tx_port_offloads(dev);
2370
2371         dev_info->default_rxconf = (struct rte_eth_rxconf) {
2372                 .rx_thresh = {
2373                         .pthresh = TXGBE_DEFAULT_RX_PTHRESH,
2374                         .hthresh = TXGBE_DEFAULT_RX_HTHRESH,
2375                         .wthresh = TXGBE_DEFAULT_RX_WTHRESH,
2376                 },
2377                 .rx_free_thresh = TXGBE_DEFAULT_RX_FREE_THRESH,
2378                 .rx_drop_en = 0,
2379                 .offloads = 0,
2380         };
2381
2382         dev_info->default_txconf = (struct rte_eth_txconf) {
2383                 .tx_thresh = {
2384                         .pthresh = TXGBE_DEFAULT_TX_PTHRESH,
2385                         .hthresh = TXGBE_DEFAULT_TX_HTHRESH,
2386                         .wthresh = TXGBE_DEFAULT_TX_WTHRESH,
2387                 },
2388                 .tx_free_thresh = TXGBE_DEFAULT_TX_FREE_THRESH,
2389                 .offloads = 0,
2390         };
2391
2392         dev_info->rx_desc_lim = rx_desc_lim;
2393         dev_info->tx_desc_lim = tx_desc_lim;
2394
2395         dev_info->hash_key_size = TXGBE_HKEY_MAX_INDEX * sizeof(uint32_t);
2396         dev_info->reta_size = ETH_RSS_RETA_SIZE_128;
2397         dev_info->flow_type_rss_offloads = TXGBE_RSS_OFFLOAD_ALL;
2398
2399         dev_info->speed_capa = ETH_LINK_SPEED_1G | ETH_LINK_SPEED_10G;
2400         dev_info->speed_capa |= ETH_LINK_SPEED_100M;
2401
2402         /* Driver-preferred Rx/Tx parameters */
2403         dev_info->default_rxportconf.burst_size = 32;
2404         dev_info->default_txportconf.burst_size = 32;
2405         dev_info->default_rxportconf.nb_queues = 1;
2406         dev_info->default_txportconf.nb_queues = 1;
2407         dev_info->default_rxportconf.ring_size = 256;
2408         dev_info->default_txportconf.ring_size = 256;
2409
2410         return 0;
2411 }
2412
2413 const uint32_t *
2414 txgbe_dev_supported_ptypes_get(struct rte_eth_dev *dev)
2415 {
2416         if (dev->rx_pkt_burst == txgbe_recv_pkts ||
2417             dev->rx_pkt_burst == txgbe_recv_pkts_lro_single_alloc ||
2418             dev->rx_pkt_burst == txgbe_recv_pkts_lro_bulk_alloc ||
2419             dev->rx_pkt_burst == txgbe_recv_pkts_bulk_alloc)
2420                 return txgbe_get_supported_ptypes();
2421
2422         return NULL;
2423 }
2424
2425 void
2426 txgbe_dev_setup_link_alarm_handler(void *param)
2427 {
2428         struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
2429         struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
2430         struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev);
2431         u32 speed;
2432         bool autoneg = false;
2433
2434         speed = hw->phy.autoneg_advertised;
2435         if (!speed)
2436                 hw->mac.get_link_capabilities(hw, &speed, &autoneg);
2437
2438         hw->mac.setup_link(hw, speed, true);
2439
2440         intr->flags &= ~TXGBE_FLAG_NEED_LINK_CONFIG;
2441 }
2442
2443 /* return 0 means link status changed, -1 means not changed */
2444 int
2445 txgbe_dev_link_update_share(struct rte_eth_dev *dev,
2446                             int wait_to_complete)
2447 {
2448         struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
2449         struct rte_eth_link link;
2450         u32 link_speed = TXGBE_LINK_SPEED_UNKNOWN;
2451         struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev);
2452         bool link_up;
2453         int err;
2454         int wait = 1;
2455
2456         memset(&link, 0, sizeof(link));
2457         link.link_status = ETH_LINK_DOWN;
2458         link.link_speed = ETH_SPEED_NUM_NONE;
2459         link.link_duplex = ETH_LINK_HALF_DUPLEX;
2460         link.link_autoneg = ETH_LINK_AUTONEG;
2461
2462         hw->mac.get_link_status = true;
2463
2464         if (intr->flags & TXGBE_FLAG_NEED_LINK_CONFIG)
2465                 return rte_eth_linkstatus_set(dev, &link);
2466
2467         /* check if it needs to wait to complete, if lsc interrupt is enabled */
2468         if (wait_to_complete == 0 || dev->data->dev_conf.intr_conf.lsc != 0)
2469                 wait = 0;
2470
2471         err = hw->mac.check_link(hw, &link_speed, &link_up, wait);
2472
2473         if (err != 0) {
2474                 link.link_speed = ETH_SPEED_NUM_100M;
2475                 link.link_duplex = ETH_LINK_FULL_DUPLEX;
2476                 return rte_eth_linkstatus_set(dev, &link);
2477         }
2478
2479         if (link_up == 0) {
2480                 if (hw->phy.media_type == txgbe_media_type_fiber) {
2481                         intr->flags |= TXGBE_FLAG_NEED_LINK_CONFIG;
2482                         rte_eal_alarm_set(10,
2483                                 txgbe_dev_setup_link_alarm_handler, dev);
2484                 }
2485                 return rte_eth_linkstatus_set(dev, &link);
2486         }
2487
2488         intr->flags &= ~TXGBE_FLAG_NEED_LINK_CONFIG;
2489         link.link_status = ETH_LINK_UP;
2490         link.link_duplex = ETH_LINK_FULL_DUPLEX;
2491
2492         switch (link_speed) {
2493         default:
2494         case TXGBE_LINK_SPEED_UNKNOWN:
2495                 link.link_duplex = ETH_LINK_FULL_DUPLEX;
2496                 link.link_speed = ETH_SPEED_NUM_100M;
2497                 break;
2498
2499         case TXGBE_LINK_SPEED_100M_FULL:
2500                 link.link_speed = ETH_SPEED_NUM_100M;
2501                 break;
2502
2503         case TXGBE_LINK_SPEED_1GB_FULL:
2504                 link.link_speed = ETH_SPEED_NUM_1G;
2505                 break;
2506
2507         case TXGBE_LINK_SPEED_2_5GB_FULL:
2508                 link.link_speed = ETH_SPEED_NUM_2_5G;
2509                 break;
2510
2511         case TXGBE_LINK_SPEED_5GB_FULL:
2512                 link.link_speed = ETH_SPEED_NUM_5G;
2513                 break;
2514
2515         case TXGBE_LINK_SPEED_10GB_FULL:
2516                 link.link_speed = ETH_SPEED_NUM_10G;
2517                 break;
2518         }
2519
2520         return rte_eth_linkstatus_set(dev, &link);
2521 }
2522
2523 static int
2524 txgbe_dev_link_update(struct rte_eth_dev *dev, int wait_to_complete)
2525 {
2526         return txgbe_dev_link_update_share(dev, wait_to_complete);
2527 }
2528
2529 static int
2530 txgbe_dev_promiscuous_enable(struct rte_eth_dev *dev)
2531 {
2532         struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
2533         uint32_t fctrl;
2534
2535         fctrl = rd32(hw, TXGBE_PSRCTL);
2536         fctrl |= (TXGBE_PSRCTL_UCP | TXGBE_PSRCTL_MCP);
2537         wr32(hw, TXGBE_PSRCTL, fctrl);
2538
2539         return 0;
2540 }
2541
2542 static int
2543 txgbe_dev_promiscuous_disable(struct rte_eth_dev *dev)
2544 {
2545         struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
2546         uint32_t fctrl;
2547
2548         fctrl = rd32(hw, TXGBE_PSRCTL);
2549         fctrl &= (~TXGBE_PSRCTL_UCP);
2550         if (dev->data->all_multicast == 1)
2551                 fctrl |= TXGBE_PSRCTL_MCP;
2552         else
2553                 fctrl &= (~TXGBE_PSRCTL_MCP);
2554         wr32(hw, TXGBE_PSRCTL, fctrl);
2555
2556         return 0;
2557 }
2558
2559 static int
2560 txgbe_dev_allmulticast_enable(struct rte_eth_dev *dev)
2561 {
2562         struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
2563         uint32_t fctrl;
2564
2565         fctrl = rd32(hw, TXGBE_PSRCTL);
2566         fctrl |= TXGBE_PSRCTL_MCP;
2567         wr32(hw, TXGBE_PSRCTL, fctrl);
2568
2569         return 0;
2570 }
2571
2572 static int
2573 txgbe_dev_allmulticast_disable(struct rte_eth_dev *dev)
2574 {
2575         struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
2576         uint32_t fctrl;
2577
2578         if (dev->data->promiscuous == 1)
2579                 return 0; /* must remain in all_multicast mode */
2580
2581         fctrl = rd32(hw, TXGBE_PSRCTL);
2582         fctrl &= (~TXGBE_PSRCTL_MCP);
2583         wr32(hw, TXGBE_PSRCTL, fctrl);
2584
2585         return 0;
2586 }
2587
2588 /**
2589  * It clears the interrupt causes and enables the interrupt.
2590  * It will be called once only during nic initialized.
2591  *
2592  * @param dev
2593  *  Pointer to struct rte_eth_dev.
2594  * @param on
2595  *  Enable or Disable.
2596  *
2597  * @return
2598  *  - On success, zero.
2599  *  - On failure, a negative value.
2600  */
2601 static int
2602 txgbe_dev_lsc_interrupt_setup(struct rte_eth_dev *dev, uint8_t on)
2603 {
2604         struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev);
2605
2606         txgbe_dev_link_status_print(dev);
2607         if (on)
2608                 intr->mask_misc |= TXGBE_ICRMISC_LSC;
2609         else
2610                 intr->mask_misc &= ~TXGBE_ICRMISC_LSC;
2611
2612         return 0;
2613 }
2614
2615 /**
2616  * It clears the interrupt causes and enables the interrupt.
2617  * It will be called once only during nic initialized.
2618  *
2619  * @param dev
2620  *  Pointer to struct rte_eth_dev.
2621  *
2622  * @return
2623  *  - On success, zero.
2624  *  - On failure, a negative value.
2625  */
2626 static int
2627 txgbe_dev_rxq_interrupt_setup(struct rte_eth_dev *dev)
2628 {
2629         struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev);
2630
2631         intr->mask[0] |= TXGBE_ICR_MASK;
2632         intr->mask[1] |= TXGBE_ICR_MASK;
2633
2634         return 0;
2635 }
2636
2637 /**
2638  * It clears the interrupt causes and enables the interrupt.
2639  * It will be called once only during nic initialized.
2640  *
2641  * @param dev
2642  *  Pointer to struct rte_eth_dev.
2643  *
2644  * @return
2645  *  - On success, zero.
2646  *  - On failure, a negative value.
2647  */
2648 static int
2649 txgbe_dev_macsec_interrupt_setup(struct rte_eth_dev *dev)
2650 {
2651         struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev);
2652
2653         intr->mask_misc |= TXGBE_ICRMISC_LNKSEC;
2654
2655         return 0;
2656 }
2657
2658 /*
2659  * It reads ICR and sets flag (TXGBE_ICRMISC_LSC) for the link_update.
2660  *
2661  * @param dev
2662  *  Pointer to struct rte_eth_dev.
2663  *
2664  * @return
2665  *  - On success, zero.
2666  *  - On failure, a negative value.
2667  */
2668 static int
2669 txgbe_dev_interrupt_get_status(struct rte_eth_dev *dev)
2670 {
2671         uint32_t eicr;
2672         struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
2673         struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev);
2674
2675         /* clear all cause mask */
2676         txgbe_disable_intr(hw);
2677
2678         /* read-on-clear nic registers here */
2679         eicr = ((u32 *)hw->isb_mem)[TXGBE_ISB_MISC];
2680         PMD_DRV_LOG(DEBUG, "eicr %x", eicr);
2681
2682         intr->flags = 0;
2683
2684         /* set flag for async link update */
2685         if (eicr & TXGBE_ICRMISC_LSC)
2686                 intr->flags |= TXGBE_FLAG_NEED_LINK_UPDATE;
2687
2688         if (eicr & TXGBE_ICRMISC_VFMBX)
2689                 intr->flags |= TXGBE_FLAG_MAILBOX;
2690
2691         if (eicr & TXGBE_ICRMISC_LNKSEC)
2692                 intr->flags |= TXGBE_FLAG_MACSEC;
2693
2694         if (eicr & TXGBE_ICRMISC_GPIO)
2695                 intr->flags |= TXGBE_FLAG_PHY_INTERRUPT;
2696
2697         return 0;
2698 }
2699
2700 /**
2701  * It gets and then prints the link status.
2702  *
2703  * @param dev
2704  *  Pointer to struct rte_eth_dev.
2705  *
2706  * @return
2707  *  - On success, zero.
2708  *  - On failure, a negative value.
2709  */
2710 static void
2711 txgbe_dev_link_status_print(struct rte_eth_dev *dev)
2712 {
2713         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
2714         struct rte_eth_link link;
2715
2716         rte_eth_linkstatus_get(dev, &link);
2717
2718         if (link.link_status) {
2719                 PMD_INIT_LOG(INFO, "Port %d: Link Up - speed %u Mbps - %s",
2720                                         (int)(dev->data->port_id),
2721                                         (unsigned int)link.link_speed,
2722                         link.link_duplex == ETH_LINK_FULL_DUPLEX ?
2723                                         "full-duplex" : "half-duplex");
2724         } else {
2725                 PMD_INIT_LOG(INFO, " Port %d: Link Down",
2726                                 (int)(dev->data->port_id));
2727         }
2728         PMD_INIT_LOG(DEBUG, "PCI Address: " PCI_PRI_FMT,
2729                                 pci_dev->addr.domain,
2730                                 pci_dev->addr.bus,
2731                                 pci_dev->addr.devid,
2732                                 pci_dev->addr.function);
2733 }
2734
2735 /*
2736  * It executes link_update after knowing an interrupt occurred.
2737  *
2738  * @param dev
2739  *  Pointer to struct rte_eth_dev.
2740  *
2741  * @return
2742  *  - On success, zero.
2743  *  - On failure, a negative value.
2744  */
2745 static int
2746 txgbe_dev_interrupt_action(struct rte_eth_dev *dev,
2747                            struct rte_intr_handle *intr_handle)
2748 {
2749         struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev);
2750         int64_t timeout;
2751         struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
2752
2753         PMD_DRV_LOG(DEBUG, "intr action type %d", intr->flags);
2754
2755         if (intr->flags & TXGBE_FLAG_MAILBOX) {
2756                 txgbe_pf_mbx_process(dev);
2757                 intr->flags &= ~TXGBE_FLAG_MAILBOX;
2758         }
2759
2760         if (intr->flags & TXGBE_FLAG_PHY_INTERRUPT) {
2761                 hw->phy.handle_lasi(hw);
2762                 intr->flags &= ~TXGBE_FLAG_PHY_INTERRUPT;
2763         }
2764
2765         if (intr->flags & TXGBE_FLAG_NEED_LINK_UPDATE) {
2766                 struct rte_eth_link link;
2767
2768                 /*get the link status before link update, for predicting later*/
2769                 rte_eth_linkstatus_get(dev, &link);
2770
2771                 txgbe_dev_link_update(dev, 0);
2772
2773                 /* likely to up */
2774                 if (!link.link_status)
2775                         /* handle it 1 sec later, wait it being stable */
2776                         timeout = TXGBE_LINK_UP_CHECK_TIMEOUT;
2777                 /* likely to down */
2778                 else
2779                         /* handle it 4 sec later, wait it being stable */
2780                         timeout = TXGBE_LINK_DOWN_CHECK_TIMEOUT;
2781
2782                 txgbe_dev_link_status_print(dev);
2783                 if (rte_eal_alarm_set(timeout * 1000,
2784                                       txgbe_dev_interrupt_delayed_handler,
2785                                       (void *)dev) < 0) {
2786                         PMD_DRV_LOG(ERR, "Error setting alarm");
2787                 } else {
2788                         /* remember original mask */
2789                         intr->mask_misc_orig = intr->mask_misc;
2790                         /* only disable lsc interrupt */
2791                         intr->mask_misc &= ~TXGBE_ICRMISC_LSC;
2792                 }
2793         }
2794
2795         PMD_DRV_LOG(DEBUG, "enable intr immediately");
2796         txgbe_enable_intr(dev);
2797         rte_intr_enable(intr_handle);
2798
2799         return 0;
2800 }
2801
2802 /**
2803  * Interrupt handler which shall be registered for alarm callback for delayed
2804  * handling specific interrupt to wait for the stable nic state. As the
2805  * NIC interrupt state is not stable for txgbe after link is just down,
2806  * it needs to wait 4 seconds to get the stable status.
2807  *
2808  * @param handle
2809  *  Pointer to interrupt handle.
2810  * @param param
2811  *  The address of parameter (struct rte_eth_dev *) registered before.
2812  *
2813  * @return
2814  *  void
2815  */
2816 static void
2817 txgbe_dev_interrupt_delayed_handler(void *param)
2818 {
2819         struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
2820         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
2821         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
2822         struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev);
2823         struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
2824         uint32_t eicr;
2825
2826         txgbe_disable_intr(hw);
2827
2828         eicr = ((u32 *)hw->isb_mem)[TXGBE_ISB_MISC];
2829         if (eicr & TXGBE_ICRMISC_VFMBX)
2830                 txgbe_pf_mbx_process(dev);
2831
2832         if (intr->flags & TXGBE_FLAG_PHY_INTERRUPT) {
2833                 hw->phy.handle_lasi(hw);
2834                 intr->flags &= ~TXGBE_FLAG_PHY_INTERRUPT;
2835         }
2836
2837         if (intr->flags & TXGBE_FLAG_NEED_LINK_UPDATE) {
2838                 txgbe_dev_link_update(dev, 0);
2839                 intr->flags &= ~TXGBE_FLAG_NEED_LINK_UPDATE;
2840                 txgbe_dev_link_status_print(dev);
2841                 rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC,
2842                                               NULL);
2843         }
2844
2845         if (intr->flags & TXGBE_FLAG_MACSEC) {
2846                 rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_MACSEC,
2847                                               NULL);
2848                 intr->flags &= ~TXGBE_FLAG_MACSEC;
2849         }
2850
2851         /* restore original mask */
2852         intr->mask_misc = intr->mask_misc_orig;
2853         intr->mask_misc_orig = 0;
2854
2855         PMD_DRV_LOG(DEBUG, "enable intr in delayed handler S[%08x]", eicr);
2856         txgbe_enable_intr(dev);
2857         rte_intr_enable(intr_handle);
2858 }
2859
2860 /**
2861  * Interrupt handler triggered by NIC  for handling
2862  * specific interrupt.
2863  *
2864  * @param handle
2865  *  Pointer to interrupt handle.
2866  * @param param
2867  *  The address of parameter (struct rte_eth_dev *) registered before.
2868  *
2869  * @return
2870  *  void
2871  */
2872 static void
2873 txgbe_dev_interrupt_handler(void *param)
2874 {
2875         struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
2876
2877         txgbe_dev_interrupt_get_status(dev);
2878         txgbe_dev_interrupt_action(dev, dev->intr_handle);
2879 }
2880
2881 static int
2882 txgbe_dev_led_on(struct rte_eth_dev *dev)
2883 {
2884         struct txgbe_hw *hw;
2885
2886         hw = TXGBE_DEV_HW(dev);
2887         return txgbe_led_on(hw, 4) == 0 ? 0 : -ENOTSUP;
2888 }
2889
2890 static int
2891 txgbe_dev_led_off(struct rte_eth_dev *dev)
2892 {
2893         struct txgbe_hw *hw;
2894
2895         hw = TXGBE_DEV_HW(dev);
2896         return txgbe_led_off(hw, 4) == 0 ? 0 : -ENOTSUP;
2897 }
2898
2899 static int
2900 txgbe_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
2901 {
2902         struct txgbe_hw *hw;
2903         uint32_t mflcn_reg;
2904         uint32_t fccfg_reg;
2905         int rx_pause;
2906         int tx_pause;
2907
2908         hw = TXGBE_DEV_HW(dev);
2909
2910         fc_conf->pause_time = hw->fc.pause_time;
2911         fc_conf->high_water = hw->fc.high_water[0];
2912         fc_conf->low_water = hw->fc.low_water[0];
2913         fc_conf->send_xon = hw->fc.send_xon;
2914         fc_conf->autoneg = !hw->fc.disable_fc_autoneg;
2915
2916         /*
2917          * Return rx_pause status according to actual setting of
2918          * RXFCCFG register.
2919          */
2920         mflcn_reg = rd32(hw, TXGBE_RXFCCFG);
2921         if (mflcn_reg & (TXGBE_RXFCCFG_FC | TXGBE_RXFCCFG_PFC))
2922                 rx_pause = 1;
2923         else
2924                 rx_pause = 0;
2925
2926         /*
2927          * Return tx_pause status according to actual setting of
2928          * TXFCCFG register.
2929          */
2930         fccfg_reg = rd32(hw, TXGBE_TXFCCFG);
2931         if (fccfg_reg & (TXGBE_TXFCCFG_FC | TXGBE_TXFCCFG_PFC))
2932                 tx_pause = 1;
2933         else
2934                 tx_pause = 0;
2935
2936         if (rx_pause && tx_pause)
2937                 fc_conf->mode = RTE_FC_FULL;
2938         else if (rx_pause)
2939                 fc_conf->mode = RTE_FC_RX_PAUSE;
2940         else if (tx_pause)
2941                 fc_conf->mode = RTE_FC_TX_PAUSE;
2942         else
2943                 fc_conf->mode = RTE_FC_NONE;
2944
2945         return 0;
2946 }
2947
2948 static int
2949 txgbe_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
2950 {
2951         struct txgbe_hw *hw;
2952         int err;
2953         uint32_t rx_buf_size;
2954         uint32_t max_high_water;
2955         enum txgbe_fc_mode rte_fcmode_2_txgbe_fcmode[] = {
2956                 txgbe_fc_none,
2957                 txgbe_fc_rx_pause,
2958                 txgbe_fc_tx_pause,
2959                 txgbe_fc_full
2960         };
2961
2962         PMD_INIT_FUNC_TRACE();
2963
2964         hw = TXGBE_DEV_HW(dev);
2965         rx_buf_size = rd32(hw, TXGBE_PBRXSIZE(0));
2966         PMD_INIT_LOG(DEBUG, "Rx packet buffer size = 0x%x", rx_buf_size);
2967
2968         /*
2969          * At least reserve one Ethernet frame for watermark
2970          * high_water/low_water in kilo bytes for txgbe
2971          */
2972         max_high_water = (rx_buf_size - RTE_ETHER_MAX_LEN) >> 10;
2973         if (fc_conf->high_water > max_high_water ||
2974             fc_conf->high_water < fc_conf->low_water) {
2975                 PMD_INIT_LOG(ERR, "Invalid high/low water setup value in KB");
2976                 PMD_INIT_LOG(ERR, "High_water must <= 0x%x", max_high_water);
2977                 return -EINVAL;
2978         }
2979
2980         hw->fc.requested_mode = rte_fcmode_2_txgbe_fcmode[fc_conf->mode];
2981         hw->fc.pause_time     = fc_conf->pause_time;
2982         hw->fc.high_water[0]  = fc_conf->high_water;
2983         hw->fc.low_water[0]   = fc_conf->low_water;
2984         hw->fc.send_xon       = fc_conf->send_xon;
2985         hw->fc.disable_fc_autoneg = !fc_conf->autoneg;
2986
2987         err = txgbe_fc_enable(hw);
2988
2989         /* Not negotiated is not an error case */
2990         if (err == 0 || err == TXGBE_ERR_FC_NOT_NEGOTIATED) {
2991                 wr32m(hw, TXGBE_MACRXFLT, TXGBE_MACRXFLT_CTL_MASK,
2992                       (fc_conf->mac_ctrl_frame_fwd
2993                        ? TXGBE_MACRXFLT_CTL_NOPS : TXGBE_MACRXFLT_CTL_DROP));
2994                 txgbe_flush(hw);
2995
2996                 return 0;
2997         }
2998
2999         PMD_INIT_LOG(ERR, "txgbe_fc_enable = 0x%x", err);
3000         return -EIO;
3001 }
3002
3003 static int
3004 txgbe_priority_flow_ctrl_set(struct rte_eth_dev *dev,
3005                 struct rte_eth_pfc_conf *pfc_conf)
3006 {
3007         int err;
3008         uint32_t rx_buf_size;
3009         uint32_t max_high_water;
3010         uint8_t tc_num;
3011         uint8_t  map[TXGBE_DCB_UP_MAX] = { 0 };
3012         struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
3013         struct txgbe_dcb_config *dcb_config = TXGBE_DEV_DCB_CONFIG(dev);
3014
3015         enum txgbe_fc_mode rte_fcmode_2_txgbe_fcmode[] = {
3016                 txgbe_fc_none,
3017                 txgbe_fc_rx_pause,
3018                 txgbe_fc_tx_pause,
3019                 txgbe_fc_full
3020         };
3021
3022         PMD_INIT_FUNC_TRACE();
3023
3024         txgbe_dcb_unpack_map_cee(dcb_config, TXGBE_DCB_RX_CONFIG, map);
3025         tc_num = map[pfc_conf->priority];
3026         rx_buf_size = rd32(hw, TXGBE_PBRXSIZE(tc_num));
3027         PMD_INIT_LOG(DEBUG, "Rx packet buffer size = 0x%x", rx_buf_size);
3028         /*
3029          * At least reserve one Ethernet frame for watermark
3030          * high_water/low_water in kilo bytes for txgbe
3031          */
3032         max_high_water = (rx_buf_size - RTE_ETHER_MAX_LEN) >> 10;
3033         if (pfc_conf->fc.high_water > max_high_water ||
3034             pfc_conf->fc.high_water <= pfc_conf->fc.low_water) {
3035                 PMD_INIT_LOG(ERR, "Invalid high/low water setup value in KB");
3036                 PMD_INIT_LOG(ERR, "High_water must <= 0x%x", max_high_water);
3037                 return -EINVAL;
3038         }
3039
3040         hw->fc.requested_mode = rte_fcmode_2_txgbe_fcmode[pfc_conf->fc.mode];
3041         hw->fc.pause_time = pfc_conf->fc.pause_time;
3042         hw->fc.send_xon = pfc_conf->fc.send_xon;
3043         hw->fc.low_water[tc_num] =  pfc_conf->fc.low_water;
3044         hw->fc.high_water[tc_num] = pfc_conf->fc.high_water;
3045
3046         err = txgbe_dcb_pfc_enable(hw, tc_num);
3047
3048         /* Not negotiated is not an error case */
3049         if (err == 0 || err == TXGBE_ERR_FC_NOT_NEGOTIATED)
3050                 return 0;
3051
3052         PMD_INIT_LOG(ERR, "txgbe_dcb_pfc_enable = 0x%x", err);
3053         return -EIO;
3054 }
3055
3056 int
3057 txgbe_dev_rss_reta_update(struct rte_eth_dev *dev,
3058                           struct rte_eth_rss_reta_entry64 *reta_conf,
3059                           uint16_t reta_size)
3060 {
3061         uint8_t i, j, mask;
3062         uint32_t reta;
3063         uint16_t idx, shift;
3064         struct txgbe_adapter *adapter = TXGBE_DEV_ADAPTER(dev);
3065         struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
3066
3067         PMD_INIT_FUNC_TRACE();
3068
3069         if (!txgbe_rss_update_sp(hw->mac.type)) {
3070                 PMD_DRV_LOG(ERR, "RSS reta update is not supported on this "
3071                         "NIC.");
3072                 return -ENOTSUP;
3073         }
3074
3075         if (reta_size != ETH_RSS_RETA_SIZE_128) {
3076                 PMD_DRV_LOG(ERR, "The size of hash lookup table configured "
3077                         "(%d) doesn't match the number hardware can supported "
3078                         "(%d)", reta_size, ETH_RSS_RETA_SIZE_128);
3079                 return -EINVAL;
3080         }
3081
3082         for (i = 0; i < reta_size; i += 4) {
3083                 idx = i / RTE_RETA_GROUP_SIZE;
3084                 shift = i % RTE_RETA_GROUP_SIZE;
3085                 mask = (uint8_t)RS64(reta_conf[idx].mask, shift, 0xF);
3086                 if (!mask)
3087                         continue;
3088
3089                 reta = rd32a(hw, TXGBE_REG_RSSTBL, i >> 2);
3090                 for (j = 0; j < 4; j++) {
3091                         if (RS8(mask, j, 0x1)) {
3092                                 reta  &= ~(MS32(8 * j, 0xFF));
3093                                 reta |= LS32(reta_conf[idx].reta[shift + j],
3094                                                 8 * j, 0xFF);
3095                         }
3096                 }
3097                 wr32a(hw, TXGBE_REG_RSSTBL, i >> 2, reta);
3098         }
3099         adapter->rss_reta_updated = 1;
3100
3101         return 0;
3102 }
3103
3104 int
3105 txgbe_dev_rss_reta_query(struct rte_eth_dev *dev,
3106                          struct rte_eth_rss_reta_entry64 *reta_conf,
3107                          uint16_t reta_size)
3108 {
3109         struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
3110         uint8_t i, j, mask;
3111         uint32_t reta;
3112         uint16_t idx, shift;
3113
3114         PMD_INIT_FUNC_TRACE();
3115
3116         if (reta_size != ETH_RSS_RETA_SIZE_128) {
3117                 PMD_DRV_LOG(ERR, "The size of hash lookup table configured "
3118                         "(%d) doesn't match the number hardware can supported "
3119                         "(%d)", reta_size, ETH_RSS_RETA_SIZE_128);
3120                 return -EINVAL;
3121         }
3122
3123         for (i = 0; i < reta_size; i += 4) {
3124                 idx = i / RTE_RETA_GROUP_SIZE;
3125                 shift = i % RTE_RETA_GROUP_SIZE;
3126                 mask = (uint8_t)RS64(reta_conf[idx].mask, shift, 0xF);
3127                 if (!mask)
3128                         continue;
3129
3130                 reta = rd32a(hw, TXGBE_REG_RSSTBL, i >> 2);
3131                 for (j = 0; j < 4; j++) {
3132                         if (RS8(mask, j, 0x1))
3133                                 reta_conf[idx].reta[shift + j] =
3134                                         (uint16_t)RS32(reta, 8 * j, 0xFF);
3135                 }
3136         }
3137
3138         return 0;
3139 }
3140
3141 static int
3142 txgbe_add_rar(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr,
3143                                 uint32_t index, uint32_t pool)
3144 {
3145         struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
3146         uint32_t enable_addr = 1;
3147
3148         return txgbe_set_rar(hw, index, mac_addr->addr_bytes,
3149                              pool, enable_addr);
3150 }
3151
3152 static void
3153 txgbe_remove_rar(struct rte_eth_dev *dev, uint32_t index)
3154 {
3155         struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
3156
3157         txgbe_clear_rar(hw, index);
3158 }
3159
3160 static int
3161 txgbe_set_default_mac_addr(struct rte_eth_dev *dev, struct rte_ether_addr *addr)
3162 {
3163         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
3164
3165         txgbe_remove_rar(dev, 0);
3166         txgbe_add_rar(dev, addr, 0, pci_dev->max_vfs);
3167
3168         return 0;
3169 }
3170
3171 static int
3172 txgbe_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
3173 {
3174         struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
3175         struct rte_eth_dev_info dev_info;
3176         uint32_t frame_size = mtu + RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN;
3177         struct rte_eth_dev_data *dev_data = dev->data;
3178         int ret;
3179
3180         ret = txgbe_dev_info_get(dev, &dev_info);
3181         if (ret != 0)
3182                 return ret;
3183
3184         /* check that mtu is within the allowed range */
3185         if (mtu < RTE_ETHER_MIN_MTU || frame_size > dev_info.max_rx_pktlen)
3186                 return -EINVAL;
3187
3188         /* If device is started, refuse mtu that requires the support of
3189          * scattered packets when this feature has not been enabled before.
3190          */
3191         if (dev_data->dev_started && !dev_data->scattered_rx &&
3192             (frame_size + 2 * TXGBE_VLAN_TAG_SIZE >
3193              dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM)) {
3194                 PMD_INIT_LOG(ERR, "Stop port first.");
3195                 return -EINVAL;
3196         }
3197
3198         /* update max frame size */
3199         dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
3200
3201         if (hw->mode)
3202                 wr32m(hw, TXGBE_FRMSZ, TXGBE_FRMSZ_MAX_MASK,
3203                         TXGBE_FRAME_SIZE_MAX);
3204         else
3205                 wr32m(hw, TXGBE_FRMSZ, TXGBE_FRMSZ_MAX_MASK,
3206                         TXGBE_FRMSZ_MAX(frame_size));
3207
3208         return 0;
3209 }
3210
3211 static uint32_t
3212 txgbe_uta_vector(struct txgbe_hw *hw, struct rte_ether_addr *uc_addr)
3213 {
3214         uint32_t vector = 0;
3215
3216         switch (hw->mac.mc_filter_type) {
3217         case 0:   /* use bits [47:36] of the address */
3218                 vector = ((uc_addr->addr_bytes[4] >> 4) |
3219                         (((uint16_t)uc_addr->addr_bytes[5]) << 4));
3220                 break;
3221         case 1:   /* use bits [46:35] of the address */
3222                 vector = ((uc_addr->addr_bytes[4] >> 3) |
3223                         (((uint16_t)uc_addr->addr_bytes[5]) << 5));
3224                 break;
3225         case 2:   /* use bits [45:34] of the address */
3226                 vector = ((uc_addr->addr_bytes[4] >> 2) |
3227                         (((uint16_t)uc_addr->addr_bytes[5]) << 6));
3228                 break;
3229         case 3:   /* use bits [43:32] of the address */
3230                 vector = ((uc_addr->addr_bytes[4]) |
3231                         (((uint16_t)uc_addr->addr_bytes[5]) << 8));
3232                 break;
3233         default:  /* Invalid mc_filter_type */
3234                 break;
3235         }
3236
3237         /* vector can only be 12-bits or boundary will be exceeded */
3238         vector &= 0xFFF;
3239         return vector;
3240 }
3241
3242 static int
3243 txgbe_uc_hash_table_set(struct rte_eth_dev *dev,
3244                         struct rte_ether_addr *mac_addr, uint8_t on)
3245 {
3246         uint32_t vector;
3247         uint32_t uta_idx;
3248         uint32_t reg_val;
3249         uint32_t uta_mask;
3250         uint32_t psrctl;
3251
3252         struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
3253         struct txgbe_uta_info *uta_info = TXGBE_DEV_UTA_INFO(dev);
3254
3255         /* The UTA table only exists on pf hardware */
3256         if (hw->mac.type < txgbe_mac_raptor)
3257                 return -ENOTSUP;
3258
3259         vector = txgbe_uta_vector(hw, mac_addr);
3260         uta_idx = (vector >> 5) & 0x7F;
3261         uta_mask = 0x1UL << (vector & 0x1F);
3262
3263         if (!!on == !!(uta_info->uta_shadow[uta_idx] & uta_mask))
3264                 return 0;
3265
3266         reg_val = rd32(hw, TXGBE_UCADDRTBL(uta_idx));
3267         if (on) {
3268                 uta_info->uta_in_use++;
3269                 reg_val |= uta_mask;
3270                 uta_info->uta_shadow[uta_idx] |= uta_mask;
3271         } else {
3272                 uta_info->uta_in_use--;
3273                 reg_val &= ~uta_mask;
3274                 uta_info->uta_shadow[uta_idx] &= ~uta_mask;
3275         }
3276
3277         wr32(hw, TXGBE_UCADDRTBL(uta_idx), reg_val);
3278
3279         psrctl = rd32(hw, TXGBE_PSRCTL);
3280         if (uta_info->uta_in_use > 0)
3281                 psrctl |= TXGBE_PSRCTL_UCHFENA;
3282         else
3283                 psrctl &= ~TXGBE_PSRCTL_UCHFENA;
3284
3285         psrctl &= ~TXGBE_PSRCTL_ADHF12_MASK;
3286         psrctl |= TXGBE_PSRCTL_ADHF12(hw->mac.mc_filter_type);
3287         wr32(hw, TXGBE_PSRCTL, psrctl);
3288
3289         return 0;
3290 }
3291
3292 static int
3293 txgbe_uc_all_hash_table_set(struct rte_eth_dev *dev, uint8_t on)
3294 {
3295         struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
3296         struct txgbe_uta_info *uta_info = TXGBE_DEV_UTA_INFO(dev);
3297         uint32_t psrctl;
3298         int i;
3299
3300         /* The UTA table only exists on pf hardware */
3301         if (hw->mac.type < txgbe_mac_raptor)
3302                 return -ENOTSUP;
3303
3304         if (on) {
3305                 for (i = 0; i < ETH_VMDQ_NUM_UC_HASH_ARRAY; i++) {
3306                         uta_info->uta_shadow[i] = ~0;
3307                         wr32(hw, TXGBE_UCADDRTBL(i), ~0);
3308                 }
3309         } else {
3310                 for (i = 0; i < ETH_VMDQ_NUM_UC_HASH_ARRAY; i++) {
3311                         uta_info->uta_shadow[i] = 0;
3312                         wr32(hw, TXGBE_UCADDRTBL(i), 0);
3313                 }
3314         }
3315
3316         psrctl = rd32(hw, TXGBE_PSRCTL);
3317         if (on)
3318                 psrctl |= TXGBE_PSRCTL_UCHFENA;
3319         else
3320                 psrctl &= ~TXGBE_PSRCTL_UCHFENA;
3321
3322         psrctl &= ~TXGBE_PSRCTL_ADHF12_MASK;
3323         psrctl |= TXGBE_PSRCTL_ADHF12(hw->mac.mc_filter_type);
3324         wr32(hw, TXGBE_PSRCTL, psrctl);
3325
3326         return 0;
3327 }
3328
3329 uint32_t
3330 txgbe_convert_vm_rx_mask_to_val(uint16_t rx_mask, uint32_t orig_val)
3331 {
3332         uint32_t new_val = orig_val;
3333
3334         if (rx_mask & ETH_VMDQ_ACCEPT_UNTAG)
3335                 new_val |= TXGBE_POOLETHCTL_UTA;
3336         if (rx_mask & ETH_VMDQ_ACCEPT_HASH_MC)
3337                 new_val |= TXGBE_POOLETHCTL_MCHA;
3338         if (rx_mask & ETH_VMDQ_ACCEPT_HASH_UC)
3339                 new_val |= TXGBE_POOLETHCTL_UCHA;
3340         if (rx_mask & ETH_VMDQ_ACCEPT_BROADCAST)
3341                 new_val |= TXGBE_POOLETHCTL_BCA;
3342         if (rx_mask & ETH_VMDQ_ACCEPT_MULTICAST)
3343                 new_val |= TXGBE_POOLETHCTL_MCP;
3344
3345         return new_val;
3346 }
3347
3348 static int
3349 txgbe_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id)
3350 {
3351         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
3352         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
3353         uint32_t mask;
3354         struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
3355
3356         if (queue_id < 32) {
3357                 mask = rd32(hw, TXGBE_IMS(0));
3358                 mask &= (1 << queue_id);
3359                 wr32(hw, TXGBE_IMS(0), mask);
3360         } else if (queue_id < 64) {
3361                 mask = rd32(hw, TXGBE_IMS(1));
3362                 mask &= (1 << (queue_id - 32));
3363                 wr32(hw, TXGBE_IMS(1), mask);
3364         }
3365         rte_intr_enable(intr_handle);
3366
3367         return 0;
3368 }
3369
3370 static int
3371 txgbe_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id)
3372 {
3373         uint32_t mask;
3374         struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
3375
3376         if (queue_id < 32) {
3377                 mask = rd32(hw, TXGBE_IMS(0));
3378                 mask &= ~(1 << queue_id);
3379                 wr32(hw, TXGBE_IMS(0), mask);
3380         } else if (queue_id < 64) {
3381                 mask = rd32(hw, TXGBE_IMS(1));
3382                 mask &= ~(1 << (queue_id - 32));
3383                 wr32(hw, TXGBE_IMS(1), mask);
3384         }
3385
3386         return 0;
3387 }
3388
3389 /**
3390  * set the IVAR registers, mapping interrupt causes to vectors
3391  * @param hw
3392  *  pointer to txgbe_hw struct
3393  * @direction
3394  *  0 for Rx, 1 for Tx, -1 for other causes
3395  * @queue
3396  *  queue to map the corresponding interrupt to
3397  * @msix_vector
3398  *  the vector to map to the corresponding queue
3399  */
3400 void
3401 txgbe_set_ivar_map(struct txgbe_hw *hw, int8_t direction,
3402                    uint8_t queue, uint8_t msix_vector)
3403 {
3404         uint32_t tmp, idx;
3405
3406         if (direction == -1) {
3407                 /* other causes */
3408                 msix_vector |= TXGBE_IVARMISC_VLD;
3409                 idx = 0;
3410                 tmp = rd32(hw, TXGBE_IVARMISC);
3411                 tmp &= ~(0xFF << idx);
3412                 tmp |= (msix_vector << idx);
3413                 wr32(hw, TXGBE_IVARMISC, tmp);
3414         } else {
3415                 /* rx or tx causes */
3416                 /* Workround for ICR lost */
3417                 idx = ((16 * (queue & 1)) + (8 * direction));
3418                 tmp = rd32(hw, TXGBE_IVAR(queue >> 1));
3419                 tmp &= ~(0xFF << idx);
3420                 tmp |= (msix_vector << idx);
3421                 wr32(hw, TXGBE_IVAR(queue >> 1), tmp);
3422         }
3423 }
3424
3425 /**
3426  * Sets up the hardware to properly generate MSI-X interrupts
3427  * @hw
3428  *  board private structure
3429  */
3430 static void
3431 txgbe_configure_msix(struct rte_eth_dev *dev)
3432 {
3433         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
3434         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
3435         struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
3436         uint32_t queue_id, base = TXGBE_MISC_VEC_ID;
3437         uint32_t vec = TXGBE_MISC_VEC_ID;
3438         uint32_t gpie;
3439
3440         /* won't configure msix register if no mapping is done
3441          * between intr vector and event fd
3442          * but if misx has been enabled already, need to configure
3443          * auto clean, auto mask and throttling.
3444          */
3445         gpie = rd32(hw, TXGBE_GPIE);
3446         if (!rte_intr_dp_is_en(intr_handle) &&
3447             !(gpie & TXGBE_GPIE_MSIX))
3448                 return;
3449
3450         if (rte_intr_allow_others(intr_handle)) {
3451                 base = TXGBE_RX_VEC_START;
3452                 vec = base;
3453         }
3454
3455         /* setup GPIE for MSI-x mode */
3456         gpie = rd32(hw, TXGBE_GPIE);
3457         gpie |= TXGBE_GPIE_MSIX;
3458         wr32(hw, TXGBE_GPIE, gpie);
3459
3460         /* Populate the IVAR table and set the ITR values to the
3461          * corresponding register.
3462          */
3463         if (rte_intr_dp_is_en(intr_handle)) {
3464                 for (queue_id = 0; queue_id < dev->data->nb_rx_queues;
3465                         queue_id++) {
3466                         /* by default, 1:1 mapping */
3467                         txgbe_set_ivar_map(hw, 0, queue_id, vec);
3468                         intr_handle->intr_vec[queue_id] = vec;
3469                         if (vec < base + intr_handle->nb_efd - 1)
3470                                 vec++;
3471                 }
3472
3473                 txgbe_set_ivar_map(hw, -1, 1, TXGBE_MISC_VEC_ID);
3474         }
3475         wr32(hw, TXGBE_ITR(TXGBE_MISC_VEC_ID),
3476                         TXGBE_ITR_IVAL_10G(TXGBE_QUEUE_ITR_INTERVAL_DEFAULT)
3477                         | TXGBE_ITR_WRDSA);
3478 }
3479
3480 int
3481 txgbe_set_queue_rate_limit(struct rte_eth_dev *dev,
3482                            uint16_t queue_idx, uint16_t tx_rate)
3483 {
3484         struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
3485         uint32_t bcnrc_val;
3486
3487         if (queue_idx >= hw->mac.max_tx_queues)
3488                 return -EINVAL;
3489
3490         if (tx_rate != 0) {
3491                 bcnrc_val = TXGBE_ARBTXRATE_MAX(tx_rate);
3492                 bcnrc_val |= TXGBE_ARBTXRATE_MIN(tx_rate / 2);
3493         } else {
3494                 bcnrc_val = 0;
3495         }
3496
3497         /*
3498          * Set global transmit compensation time to the MMW_SIZE in ARBTXMMW
3499          * register. MMW_SIZE=0x014 if 9728-byte jumbo is supported.
3500          */
3501         wr32(hw, TXGBE_ARBTXMMW, 0x14);
3502
3503         /* Set ARBTXRATE of queue X */
3504         wr32(hw, TXGBE_ARBPOOLIDX, queue_idx);
3505         wr32(hw, TXGBE_ARBTXRATE, bcnrc_val);
3506         txgbe_flush(hw);
3507
3508         return 0;
3509 }
3510
3511 static int
3512 txgbe_dev_filter_ctrl(__rte_unused struct rte_eth_dev *dev,
3513                      enum rte_filter_type filter_type,
3514                      enum rte_filter_op filter_op,
3515                      void *arg)
3516 {
3517         int ret = 0;
3518
3519         switch (filter_type) {
3520         case RTE_ETH_FILTER_GENERIC:
3521                 if (filter_op != RTE_ETH_FILTER_GET)
3522                         return -EINVAL;
3523                 *(const void **)arg = &txgbe_flow_ops;
3524                 break;
3525         default:
3526                 PMD_DRV_LOG(WARNING, "Filter type (%d) not supported",
3527                                                         filter_type);
3528                 ret = -EINVAL;
3529                 break;
3530         }
3531
3532         return ret;
3533 }
3534
3535 static u8 *
3536 txgbe_dev_addr_list_itr(__rte_unused struct txgbe_hw *hw,
3537                         u8 **mc_addr_ptr, u32 *vmdq)
3538 {
3539         u8 *mc_addr;
3540
3541         *vmdq = 0;
3542         mc_addr = *mc_addr_ptr;
3543         *mc_addr_ptr = (mc_addr + sizeof(struct rte_ether_addr));
3544         return mc_addr;
3545 }
3546
3547 int
3548 txgbe_dev_set_mc_addr_list(struct rte_eth_dev *dev,
3549                           struct rte_ether_addr *mc_addr_set,
3550                           uint32_t nb_mc_addr)
3551 {
3552         struct txgbe_hw *hw;
3553         u8 *mc_addr_list;
3554
3555         hw = TXGBE_DEV_HW(dev);
3556         mc_addr_list = (u8 *)mc_addr_set;
3557         return txgbe_update_mc_addr_list(hw, mc_addr_list, nb_mc_addr,
3558                                          txgbe_dev_addr_list_itr, TRUE);
3559 }
3560
3561 static uint64_t
3562 txgbe_read_systime_cyclecounter(struct rte_eth_dev *dev)
3563 {
3564         struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
3565         uint64_t systime_cycles;
3566
3567         systime_cycles = (uint64_t)rd32(hw, TXGBE_TSTIMEL);
3568         systime_cycles |= (uint64_t)rd32(hw, TXGBE_TSTIMEH) << 32;
3569
3570         return systime_cycles;
3571 }
3572
3573 static uint64_t
3574 txgbe_read_rx_tstamp_cyclecounter(struct rte_eth_dev *dev)
3575 {
3576         struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
3577         uint64_t rx_tstamp_cycles;
3578
3579         /* TSRXSTMPL stores ns and TSRXSTMPH stores seconds. */
3580         rx_tstamp_cycles = (uint64_t)rd32(hw, TXGBE_TSRXSTMPL);
3581         rx_tstamp_cycles |= (uint64_t)rd32(hw, TXGBE_TSRXSTMPH) << 32;
3582
3583         return rx_tstamp_cycles;
3584 }
3585
3586 static uint64_t
3587 txgbe_read_tx_tstamp_cyclecounter(struct rte_eth_dev *dev)
3588 {
3589         struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
3590         uint64_t tx_tstamp_cycles;
3591
3592         /* TSTXSTMPL stores ns and TSTXSTMPH stores seconds. */
3593         tx_tstamp_cycles = (uint64_t)rd32(hw, TXGBE_TSTXSTMPL);
3594         tx_tstamp_cycles |= (uint64_t)rd32(hw, TXGBE_TSTXSTMPH) << 32;
3595
3596         return tx_tstamp_cycles;
3597 }
3598
3599 static void
3600 txgbe_start_timecounters(struct rte_eth_dev *dev)
3601 {
3602         struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
3603         struct txgbe_adapter *adapter = TXGBE_DEV_ADAPTER(dev);
3604         struct rte_eth_link link;
3605         uint32_t incval = 0;
3606         uint32_t shift = 0;
3607
3608         /* Get current link speed. */
3609         txgbe_dev_link_update(dev, 1);
3610         rte_eth_linkstatus_get(dev, &link);
3611
3612         switch (link.link_speed) {
3613         case ETH_SPEED_NUM_100M:
3614                 incval = TXGBE_INCVAL_100;
3615                 shift = TXGBE_INCVAL_SHIFT_100;
3616                 break;
3617         case ETH_SPEED_NUM_1G:
3618                 incval = TXGBE_INCVAL_1GB;
3619                 shift = TXGBE_INCVAL_SHIFT_1GB;
3620                 break;
3621         case ETH_SPEED_NUM_10G:
3622         default:
3623                 incval = TXGBE_INCVAL_10GB;
3624                 shift = TXGBE_INCVAL_SHIFT_10GB;
3625                 break;
3626         }
3627
3628         wr32(hw, TXGBE_TSTIMEINC, TXGBE_TSTIMEINC_VP(incval, 2));
3629
3630         memset(&adapter->systime_tc, 0, sizeof(struct rte_timecounter));
3631         memset(&adapter->rx_tstamp_tc, 0, sizeof(struct rte_timecounter));
3632         memset(&adapter->tx_tstamp_tc, 0, sizeof(struct rte_timecounter));
3633
3634         adapter->systime_tc.cc_mask = TXGBE_CYCLECOUNTER_MASK;
3635         adapter->systime_tc.cc_shift = shift;
3636         adapter->systime_tc.nsec_mask = (1ULL << shift) - 1;
3637
3638         adapter->rx_tstamp_tc.cc_mask = TXGBE_CYCLECOUNTER_MASK;
3639         adapter->rx_tstamp_tc.cc_shift = shift;
3640         adapter->rx_tstamp_tc.nsec_mask = (1ULL << shift) - 1;
3641
3642         adapter->tx_tstamp_tc.cc_mask = TXGBE_CYCLECOUNTER_MASK;
3643         adapter->tx_tstamp_tc.cc_shift = shift;
3644         adapter->tx_tstamp_tc.nsec_mask = (1ULL << shift) - 1;
3645 }
3646
3647 static int
3648 txgbe_timesync_adjust_time(struct rte_eth_dev *dev, int64_t delta)
3649 {
3650         struct txgbe_adapter *adapter = TXGBE_DEV_ADAPTER(dev);
3651
3652         adapter->systime_tc.nsec += delta;
3653         adapter->rx_tstamp_tc.nsec += delta;
3654         adapter->tx_tstamp_tc.nsec += delta;
3655
3656         return 0;
3657 }
3658
3659 static int
3660 txgbe_timesync_write_time(struct rte_eth_dev *dev, const struct timespec *ts)
3661 {
3662         uint64_t ns;
3663         struct txgbe_adapter *adapter = TXGBE_DEV_ADAPTER(dev);
3664
3665         ns = rte_timespec_to_ns(ts);
3666         /* Set the timecounters to a new value. */
3667         adapter->systime_tc.nsec = ns;
3668         adapter->rx_tstamp_tc.nsec = ns;
3669         adapter->tx_tstamp_tc.nsec = ns;
3670
3671         return 0;
3672 }
3673
3674 static int
3675 txgbe_timesync_read_time(struct rte_eth_dev *dev, struct timespec *ts)
3676 {
3677         uint64_t ns, systime_cycles;
3678         struct txgbe_adapter *adapter = TXGBE_DEV_ADAPTER(dev);
3679
3680         systime_cycles = txgbe_read_systime_cyclecounter(dev);
3681         ns = rte_timecounter_update(&adapter->systime_tc, systime_cycles);
3682         *ts = rte_ns_to_timespec(ns);
3683
3684         return 0;
3685 }
3686
3687 static int
3688 txgbe_timesync_enable(struct rte_eth_dev *dev)
3689 {
3690         struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
3691         uint32_t tsync_ctl;
3692
3693         /* Stop the timesync system time. */
3694         wr32(hw, TXGBE_TSTIMEINC, 0x0);
3695         /* Reset the timesync system time value. */
3696         wr32(hw, TXGBE_TSTIMEL, 0x0);
3697         wr32(hw, TXGBE_TSTIMEH, 0x0);
3698
3699         txgbe_start_timecounters(dev);
3700
3701         /* Enable L2 filtering of IEEE1588/802.1AS Ethernet frame types. */
3702         wr32(hw, TXGBE_ETFLT(TXGBE_ETF_ID_1588),
3703                 RTE_ETHER_TYPE_1588 | TXGBE_ETFLT_ENA | TXGBE_ETFLT_1588);
3704
3705         /* Enable timestamping of received PTP packets. */
3706         tsync_ctl = rd32(hw, TXGBE_TSRXCTL);
3707         tsync_ctl |= TXGBE_TSRXCTL_ENA;
3708         wr32(hw, TXGBE_TSRXCTL, tsync_ctl);
3709
3710         /* Enable timestamping of transmitted PTP packets. */
3711         tsync_ctl = rd32(hw, TXGBE_TSTXCTL);
3712         tsync_ctl |= TXGBE_TSTXCTL_ENA;
3713         wr32(hw, TXGBE_TSTXCTL, tsync_ctl);
3714
3715         txgbe_flush(hw);
3716
3717         return 0;
3718 }
3719
3720 static int
3721 txgbe_timesync_disable(struct rte_eth_dev *dev)
3722 {
3723         struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
3724         uint32_t tsync_ctl;
3725
3726         /* Disable timestamping of transmitted PTP packets. */
3727         tsync_ctl = rd32(hw, TXGBE_TSTXCTL);
3728         tsync_ctl &= ~TXGBE_TSTXCTL_ENA;
3729         wr32(hw, TXGBE_TSTXCTL, tsync_ctl);
3730
3731         /* Disable timestamping of received PTP packets. */
3732         tsync_ctl = rd32(hw, TXGBE_TSRXCTL);
3733         tsync_ctl &= ~TXGBE_TSRXCTL_ENA;
3734         wr32(hw, TXGBE_TSRXCTL, tsync_ctl);
3735
3736         /* Disable L2 filtering of IEEE1588/802.1AS Ethernet frame types. */
3737         wr32(hw, TXGBE_ETFLT(TXGBE_ETF_ID_1588), 0);
3738
3739         /* Stop incrementating the System Time registers. */
3740         wr32(hw, TXGBE_TSTIMEINC, 0);
3741
3742         return 0;
3743 }
3744
3745 static int
3746 txgbe_timesync_read_rx_timestamp(struct rte_eth_dev *dev,
3747                                  struct timespec *timestamp,
3748                                  uint32_t flags __rte_unused)
3749 {
3750         struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
3751         struct txgbe_adapter *adapter = TXGBE_DEV_ADAPTER(dev);
3752         uint32_t tsync_rxctl;
3753         uint64_t rx_tstamp_cycles;
3754         uint64_t ns;
3755
3756         tsync_rxctl = rd32(hw, TXGBE_TSRXCTL);
3757         if ((tsync_rxctl & TXGBE_TSRXCTL_VLD) == 0)
3758                 return -EINVAL;
3759
3760         rx_tstamp_cycles = txgbe_read_rx_tstamp_cyclecounter(dev);
3761         ns = rte_timecounter_update(&adapter->rx_tstamp_tc, rx_tstamp_cycles);
3762         *timestamp = rte_ns_to_timespec(ns);
3763
3764         return  0;
3765 }
3766
3767 static int
3768 txgbe_timesync_read_tx_timestamp(struct rte_eth_dev *dev,
3769                                  struct timespec *timestamp)
3770 {
3771         struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
3772         struct txgbe_adapter *adapter = TXGBE_DEV_ADAPTER(dev);
3773         uint32_t tsync_txctl;
3774         uint64_t tx_tstamp_cycles;
3775         uint64_t ns;
3776
3777         tsync_txctl = rd32(hw, TXGBE_TSTXCTL);
3778         if ((tsync_txctl & TXGBE_TSTXCTL_VLD) == 0)
3779                 return -EINVAL;
3780
3781         tx_tstamp_cycles = txgbe_read_tx_tstamp_cyclecounter(dev);
3782         ns = rte_timecounter_update(&adapter->tx_tstamp_tc, tx_tstamp_cycles);
3783         *timestamp = rte_ns_to_timespec(ns);
3784
3785         return 0;
3786 }
3787
3788 static int
3789 txgbe_get_reg_length(struct rte_eth_dev *dev __rte_unused)
3790 {
3791         int count = 0;
3792         int g_ind = 0;
3793         const struct reg_info *reg_group;
3794         const struct reg_info **reg_set = txgbe_regs_others;
3795
3796         while ((reg_group = reg_set[g_ind++]))
3797                 count += txgbe_regs_group_count(reg_group);
3798
3799         return count;
3800 }
3801
3802 static int
3803 txgbe_get_regs(struct rte_eth_dev *dev,
3804               struct rte_dev_reg_info *regs)
3805 {
3806         struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
3807         uint32_t *data = regs->data;
3808         int g_ind = 0;
3809         int count = 0;
3810         const struct reg_info *reg_group;
3811         const struct reg_info **reg_set = txgbe_regs_others;
3812
3813         if (data == NULL) {
3814                 regs->length = txgbe_get_reg_length(dev);
3815                 regs->width = sizeof(uint32_t);
3816                 return 0;
3817         }
3818
3819         /* Support only full register dump */
3820         if (regs->length == 0 ||
3821             regs->length == (uint32_t)txgbe_get_reg_length(dev)) {
3822                 regs->version = hw->mac.type << 24 |
3823                                 hw->revision_id << 16 |
3824                                 hw->device_id;
3825                 while ((reg_group = reg_set[g_ind++]))
3826                         count += txgbe_read_regs_group(dev, &data[count],
3827                                                       reg_group);
3828                 return 0;
3829         }
3830
3831         return -ENOTSUP;
3832 }
3833
3834 static int
3835 txgbe_get_eeprom_length(struct rte_eth_dev *dev)
3836 {
3837         struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
3838
3839         /* Return unit is byte count */
3840         return hw->rom.word_size * 2;
3841 }
3842
3843 static int
3844 txgbe_get_eeprom(struct rte_eth_dev *dev,
3845                 struct rte_dev_eeprom_info *in_eeprom)
3846 {
3847         struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
3848         struct txgbe_rom_info *eeprom = &hw->rom;
3849         uint16_t *data = in_eeprom->data;
3850         int first, length;
3851
3852         first = in_eeprom->offset >> 1;
3853         length = in_eeprom->length >> 1;
3854         if (first > hw->rom.word_size ||
3855             ((first + length) > hw->rom.word_size))
3856                 return -EINVAL;
3857
3858         in_eeprom->magic = hw->vendor_id | (hw->device_id << 16);
3859
3860         return eeprom->readw_buffer(hw, first, length, data);
3861 }
3862
3863 static int
3864 txgbe_set_eeprom(struct rte_eth_dev *dev,
3865                 struct rte_dev_eeprom_info *in_eeprom)
3866 {
3867         struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
3868         struct txgbe_rom_info *eeprom = &hw->rom;
3869         uint16_t *data = in_eeprom->data;
3870         int first, length;
3871
3872         first = in_eeprom->offset >> 1;
3873         length = in_eeprom->length >> 1;
3874         if (first > hw->rom.word_size ||
3875             ((first + length) > hw->rom.word_size))
3876                 return -EINVAL;
3877
3878         in_eeprom->magic = hw->vendor_id | (hw->device_id << 16);
3879
3880         return eeprom->writew_buffer(hw,  first, length, data);
3881 }
3882
3883 static int
3884 txgbe_get_module_info(struct rte_eth_dev *dev,
3885                       struct rte_eth_dev_module_info *modinfo)
3886 {
3887         struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
3888         uint32_t status;
3889         uint8_t sff8472_rev, addr_mode;
3890         bool page_swap = false;
3891
3892         /* Check whether we support SFF-8472 or not */
3893         status = hw->phy.read_i2c_eeprom(hw,
3894                                              TXGBE_SFF_SFF_8472_COMP,
3895                                              &sff8472_rev);
3896         if (status != 0)
3897                 return -EIO;
3898
3899         /* addressing mode is not supported */
3900         status = hw->phy.read_i2c_eeprom(hw,
3901                                              TXGBE_SFF_SFF_8472_SWAP,
3902                                              &addr_mode);
3903         if (status != 0)
3904                 return -EIO;
3905
3906         if (addr_mode & TXGBE_SFF_ADDRESSING_MODE) {
3907                 PMD_DRV_LOG(ERR,
3908                             "Address change required to access page 0xA2, "
3909                             "but not supported. Please report the module "
3910                             "type to the driver maintainers.");
3911                 page_swap = true;
3912         }
3913
3914         if (sff8472_rev == TXGBE_SFF_SFF_8472_UNSUP || page_swap) {
3915                 /* We have a SFP, but it does not support SFF-8472 */
3916                 modinfo->type = RTE_ETH_MODULE_SFF_8079;
3917                 modinfo->eeprom_len = RTE_ETH_MODULE_SFF_8079_LEN;
3918         } else {
3919                 /* We have a SFP which supports a revision of SFF-8472. */
3920                 modinfo->type = RTE_ETH_MODULE_SFF_8472;
3921                 modinfo->eeprom_len = RTE_ETH_MODULE_SFF_8472_LEN;
3922         }
3923
3924         return 0;
3925 }
3926
3927 static int
3928 txgbe_get_module_eeprom(struct rte_eth_dev *dev,
3929                         struct rte_dev_eeprom_info *info)
3930 {
3931         struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
3932         uint32_t status = TXGBE_ERR_PHY_ADDR_INVALID;
3933         uint8_t databyte = 0xFF;
3934         uint8_t *data = info->data;
3935         uint32_t i = 0;
3936
3937         if (info->length == 0)
3938                 return -EINVAL;
3939
3940         for (i = info->offset; i < info->offset + info->length; i++) {
3941                 if (i < RTE_ETH_MODULE_SFF_8079_LEN)
3942                         status = hw->phy.read_i2c_eeprom(hw, i, &databyte);
3943                 else
3944                         status = hw->phy.read_i2c_sff8472(hw, i, &databyte);
3945
3946                 if (status != 0)
3947                         return -EIO;
3948
3949                 data[i - info->offset] = databyte;
3950         }
3951
3952         return 0;
3953 }
3954
3955 bool
3956 txgbe_rss_update_sp(enum txgbe_mac_type mac_type)
3957 {
3958         switch (mac_type) {
3959         case txgbe_mac_raptor:
3960                 return 1;
3961         default:
3962                 return 0;
3963         }
3964 }
3965
3966 static int
3967 txgbe_dev_get_dcb_info(struct rte_eth_dev *dev,
3968                         struct rte_eth_dcb_info *dcb_info)
3969 {
3970         struct txgbe_dcb_config *dcb_config = TXGBE_DEV_DCB_CONFIG(dev);
3971         struct txgbe_dcb_tc_config *tc;
3972         struct rte_eth_dcb_tc_queue_mapping *tc_queue;
3973         uint8_t nb_tcs;
3974         uint8_t i, j;
3975
3976         if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_DCB_FLAG)
3977                 dcb_info->nb_tcs = dcb_config->num_tcs.pg_tcs;
3978         else
3979                 dcb_info->nb_tcs = 1;
3980
3981         tc_queue = &dcb_info->tc_queue;
3982         nb_tcs = dcb_info->nb_tcs;
3983
3984         if (dcb_config->vt_mode) { /* vt is enabled */
3985                 struct rte_eth_vmdq_dcb_conf *vmdq_rx_conf =
3986                                 &dev->data->dev_conf.rx_adv_conf.vmdq_dcb_conf;
3987                 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++)
3988                         dcb_info->prio_tc[i] = vmdq_rx_conf->dcb_tc[i];
3989                 if (RTE_ETH_DEV_SRIOV(dev).active > 0) {
3990                         for (j = 0; j < nb_tcs; j++) {
3991                                 tc_queue->tc_rxq[0][j].base = j;
3992                                 tc_queue->tc_rxq[0][j].nb_queue = 1;
3993                                 tc_queue->tc_txq[0][j].base = j;
3994                                 tc_queue->tc_txq[0][j].nb_queue = 1;
3995                         }
3996                 } else {
3997                         for (i = 0; i < vmdq_rx_conf->nb_queue_pools; i++) {
3998                                 for (j = 0; j < nb_tcs; j++) {
3999                                         tc_queue->tc_rxq[i][j].base =
4000                                                 i * nb_tcs + j;
4001                                         tc_queue->tc_rxq[i][j].nb_queue = 1;
4002                                         tc_queue->tc_txq[i][j].base =
4003                                                 i * nb_tcs + j;
4004                                         tc_queue->tc_txq[i][j].nb_queue = 1;
4005                                 }
4006                         }
4007                 }
4008         } else { /* vt is disabled */
4009                 struct rte_eth_dcb_rx_conf *rx_conf =
4010                                 &dev->data->dev_conf.rx_adv_conf.dcb_rx_conf;
4011                 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++)
4012                         dcb_info->prio_tc[i] = rx_conf->dcb_tc[i];
4013                 if (dcb_info->nb_tcs == ETH_4_TCS) {
4014                         for (i = 0; i < dcb_info->nb_tcs; i++) {
4015                                 dcb_info->tc_queue.tc_rxq[0][i].base = i * 32;
4016                                 dcb_info->tc_queue.tc_rxq[0][i].nb_queue = 16;
4017                         }
4018                         dcb_info->tc_queue.tc_txq[0][0].base = 0;
4019                         dcb_info->tc_queue.tc_txq[0][1].base = 64;
4020                         dcb_info->tc_queue.tc_txq[0][2].base = 96;
4021                         dcb_info->tc_queue.tc_txq[0][3].base = 112;
4022                         dcb_info->tc_queue.tc_txq[0][0].nb_queue = 64;
4023                         dcb_info->tc_queue.tc_txq[0][1].nb_queue = 32;
4024                         dcb_info->tc_queue.tc_txq[0][2].nb_queue = 16;
4025                         dcb_info->tc_queue.tc_txq[0][3].nb_queue = 16;
4026                 } else if (dcb_info->nb_tcs == ETH_8_TCS) {
4027                         for (i = 0; i < dcb_info->nb_tcs; i++) {
4028                                 dcb_info->tc_queue.tc_rxq[0][i].base = i * 16;
4029                                 dcb_info->tc_queue.tc_rxq[0][i].nb_queue = 16;
4030                         }
4031                         dcb_info->tc_queue.tc_txq[0][0].base = 0;
4032                         dcb_info->tc_queue.tc_txq[0][1].base = 32;
4033                         dcb_info->tc_queue.tc_txq[0][2].base = 64;
4034                         dcb_info->tc_queue.tc_txq[0][3].base = 80;
4035                         dcb_info->tc_queue.tc_txq[0][4].base = 96;
4036                         dcb_info->tc_queue.tc_txq[0][5].base = 104;
4037                         dcb_info->tc_queue.tc_txq[0][6].base = 112;
4038                         dcb_info->tc_queue.tc_txq[0][7].base = 120;
4039                         dcb_info->tc_queue.tc_txq[0][0].nb_queue = 32;
4040                         dcb_info->tc_queue.tc_txq[0][1].nb_queue = 32;
4041                         dcb_info->tc_queue.tc_txq[0][2].nb_queue = 16;
4042                         dcb_info->tc_queue.tc_txq[0][3].nb_queue = 16;
4043                         dcb_info->tc_queue.tc_txq[0][4].nb_queue = 8;
4044                         dcb_info->tc_queue.tc_txq[0][5].nb_queue = 8;
4045                         dcb_info->tc_queue.tc_txq[0][6].nb_queue = 8;
4046                         dcb_info->tc_queue.tc_txq[0][7].nb_queue = 8;
4047                 }
4048         }
4049         for (i = 0; i < dcb_info->nb_tcs; i++) {
4050                 tc = &dcb_config->tc_config[i];
4051                 dcb_info->tc_bws[i] = tc->path[TXGBE_DCB_TX_CONFIG].bwg_percent;
4052         }
4053         return 0;
4054 }
4055
4056 static const struct eth_dev_ops txgbe_eth_dev_ops = {
4057         .dev_configure              = txgbe_dev_configure,
4058         .dev_infos_get              = txgbe_dev_info_get,
4059         .dev_start                  = txgbe_dev_start,
4060         .dev_stop                   = txgbe_dev_stop,
4061         .dev_set_link_up            = txgbe_dev_set_link_up,
4062         .dev_set_link_down          = txgbe_dev_set_link_down,
4063         .dev_close                  = txgbe_dev_close,
4064         .dev_reset                  = txgbe_dev_reset,
4065         .promiscuous_enable         = txgbe_dev_promiscuous_enable,
4066         .promiscuous_disable        = txgbe_dev_promiscuous_disable,
4067         .allmulticast_enable        = txgbe_dev_allmulticast_enable,
4068         .allmulticast_disable       = txgbe_dev_allmulticast_disable,
4069         .link_update                = txgbe_dev_link_update,
4070         .stats_get                  = txgbe_dev_stats_get,
4071         .xstats_get                 = txgbe_dev_xstats_get,
4072         .xstats_get_by_id           = txgbe_dev_xstats_get_by_id,
4073         .stats_reset                = txgbe_dev_stats_reset,
4074         .xstats_reset               = txgbe_dev_xstats_reset,
4075         .xstats_get_names           = txgbe_dev_xstats_get_names,
4076         .xstats_get_names_by_id     = txgbe_dev_xstats_get_names_by_id,
4077         .queue_stats_mapping_set    = txgbe_dev_queue_stats_mapping_set,
4078         .fw_version_get             = txgbe_fw_version_get,
4079         .dev_supported_ptypes_get   = txgbe_dev_supported_ptypes_get,
4080         .mtu_set                    = txgbe_dev_mtu_set,
4081         .vlan_filter_set            = txgbe_vlan_filter_set,
4082         .vlan_tpid_set              = txgbe_vlan_tpid_set,
4083         .vlan_offload_set           = txgbe_vlan_offload_set,
4084         .vlan_strip_queue_set       = txgbe_vlan_strip_queue_set,
4085         .rx_queue_start             = txgbe_dev_rx_queue_start,
4086         .rx_queue_stop              = txgbe_dev_rx_queue_stop,
4087         .tx_queue_start             = txgbe_dev_tx_queue_start,
4088         .tx_queue_stop              = txgbe_dev_tx_queue_stop,
4089         .rx_queue_setup             = txgbe_dev_rx_queue_setup,
4090         .rx_queue_intr_enable       = txgbe_dev_rx_queue_intr_enable,
4091         .rx_queue_intr_disable      = txgbe_dev_rx_queue_intr_disable,
4092         .rx_queue_release           = txgbe_dev_rx_queue_release,
4093         .tx_queue_setup             = txgbe_dev_tx_queue_setup,
4094         .tx_queue_release           = txgbe_dev_tx_queue_release,
4095         .dev_led_on                 = txgbe_dev_led_on,
4096         .dev_led_off                = txgbe_dev_led_off,
4097         .flow_ctrl_get              = txgbe_flow_ctrl_get,
4098         .flow_ctrl_set              = txgbe_flow_ctrl_set,
4099         .priority_flow_ctrl_set     = txgbe_priority_flow_ctrl_set,
4100         .mac_addr_add               = txgbe_add_rar,
4101         .mac_addr_remove            = txgbe_remove_rar,
4102         .mac_addr_set               = txgbe_set_default_mac_addr,
4103         .uc_hash_table_set          = txgbe_uc_hash_table_set,
4104         .uc_all_hash_table_set      = txgbe_uc_all_hash_table_set,
4105         .set_queue_rate_limit       = txgbe_set_queue_rate_limit,
4106         .reta_update                = txgbe_dev_rss_reta_update,
4107         .reta_query                 = txgbe_dev_rss_reta_query,
4108         .rss_hash_update            = txgbe_dev_rss_hash_update,
4109         .rss_hash_conf_get          = txgbe_dev_rss_hash_conf_get,
4110         .filter_ctrl                = txgbe_dev_filter_ctrl,
4111         .set_mc_addr_list           = txgbe_dev_set_mc_addr_list,
4112         .rxq_info_get               = txgbe_rxq_info_get,
4113         .txq_info_get               = txgbe_txq_info_get,
4114         .timesync_enable            = txgbe_timesync_enable,
4115         .timesync_disable           = txgbe_timesync_disable,
4116         .timesync_read_rx_timestamp = txgbe_timesync_read_rx_timestamp,
4117         .timesync_read_tx_timestamp = txgbe_timesync_read_tx_timestamp,
4118         .get_reg                    = txgbe_get_regs,
4119         .get_eeprom_length          = txgbe_get_eeprom_length,
4120         .get_eeprom                 = txgbe_get_eeprom,
4121         .set_eeprom                 = txgbe_set_eeprom,
4122         .get_module_info            = txgbe_get_module_info,
4123         .get_module_eeprom          = txgbe_get_module_eeprom,
4124         .get_dcb_info               = txgbe_dev_get_dcb_info,
4125         .timesync_adjust_time       = txgbe_timesync_adjust_time,
4126         .timesync_read_time         = txgbe_timesync_read_time,
4127         .timesync_write_time        = txgbe_timesync_write_time,
4128         .tx_done_cleanup            = txgbe_dev_tx_done_cleanup,
4129 };
4130
4131 RTE_PMD_REGISTER_PCI(net_txgbe, rte_txgbe_pmd);
4132 RTE_PMD_REGISTER_PCI_TABLE(net_txgbe, pci_id_txgbe_map);
4133 RTE_PMD_REGISTER_KMOD_DEP(net_txgbe, "* igb_uio | uio_pci_generic | vfio-pci");
4134
4135 RTE_LOG_REGISTER(txgbe_logtype_init, pmd.net.txgbe.init, NOTICE);
4136 RTE_LOG_REGISTER(txgbe_logtype_driver, pmd.net.txgbe.driver, NOTICE);
4137
4138 #ifdef RTE_LIBRTE_TXGBE_DEBUG_RX
4139         RTE_LOG_REGISTER(txgbe_logtype_rx, pmd.net.txgbe.rx, DEBUG);
4140 #endif
4141 #ifdef RTE_LIBRTE_TXGBE_DEBUG_TX
4142         RTE_LOG_REGISTER(txgbe_logtype_tx, pmd.net.txgbe.tx, DEBUG);
4143 #endif
4144
4145 #ifdef RTE_LIBRTE_TXGBE_DEBUG_TX_FREE
4146         RTE_LOG_REGISTER(txgbe_logtype_tx_free, pmd.net.txgbe.tx_free, DEBUG);
4147 #endif