net/txgbe: support creating consistent filter
[dpdk.git] / drivers / net / txgbe / txgbe_ethdev.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2015-2020
3  */
4
5 #include <stdio.h>
6 #include <errno.h>
7 #include <stdint.h>
8 #include <string.h>
9 #include <rte_common.h>
10 #include <rte_ethdev_pci.h>
11
12 #include <rte_interrupts.h>
13 #include <rte_log.h>
14 #include <rte_debug.h>
15 #include <rte_pci.h>
16 #include <rte_memory.h>
17 #include <rte_eal.h>
18 #include <rte_alarm.h>
19
20 #include "txgbe_logs.h"
21 #include "base/txgbe.h"
22 #include "txgbe_ethdev.h"
23 #include "txgbe_rxtx.h"
24 #include "txgbe_regs_group.h"
25
26 static const struct reg_info txgbe_regs_general[] = {
27         {TXGBE_RST, 1, 1, "TXGBE_RST"},
28         {TXGBE_STAT, 1, 1, "TXGBE_STAT"},
29         {TXGBE_PORTCTL, 1, 1, "TXGBE_PORTCTL"},
30         {TXGBE_SDP, 1, 1, "TXGBE_SDP"},
31         {TXGBE_SDPCTL, 1, 1, "TXGBE_SDPCTL"},
32         {TXGBE_LEDCTL, 1, 1, "TXGBE_LEDCTL"},
33         {0, 0, 0, ""}
34 };
35
36 static const struct reg_info txgbe_regs_nvm[] = {
37         {0, 0, 0, ""}
38 };
39
40 static const struct reg_info txgbe_regs_interrupt[] = {
41         {0, 0, 0, ""}
42 };
43
44 static const struct reg_info txgbe_regs_fctl_others[] = {
45         {0, 0, 0, ""}
46 };
47
48 static const struct reg_info txgbe_regs_rxdma[] = {
49         {0, 0, 0, ""}
50 };
51
52 static const struct reg_info txgbe_regs_rx[] = {
53         {0, 0, 0, ""}
54 };
55
56 static struct reg_info txgbe_regs_tx[] = {
57         {0, 0, 0, ""}
58 };
59
60 static const struct reg_info txgbe_regs_wakeup[] = {
61         {0, 0, 0, ""}
62 };
63
64 static const struct reg_info txgbe_regs_dcb[] = {
65         {0, 0, 0, ""}
66 };
67
68 static const struct reg_info txgbe_regs_mac[] = {
69         {0, 0, 0, ""}
70 };
71
72 static const struct reg_info txgbe_regs_diagnostic[] = {
73         {0, 0, 0, ""},
74 };
75
76 /* PF registers */
77 static const struct reg_info *txgbe_regs_others[] = {
78                                 txgbe_regs_general,
79                                 txgbe_regs_nvm,
80                                 txgbe_regs_interrupt,
81                                 txgbe_regs_fctl_others,
82                                 txgbe_regs_rxdma,
83                                 txgbe_regs_rx,
84                                 txgbe_regs_tx,
85                                 txgbe_regs_wakeup,
86                                 txgbe_regs_dcb,
87                                 txgbe_regs_mac,
88                                 txgbe_regs_diagnostic,
89                                 NULL};
90
91 static int txgbe_fdir_filter_init(struct rte_eth_dev *eth_dev);
92 static int txgbe_fdir_filter_uninit(struct rte_eth_dev *eth_dev);
93 static int txgbe_l2_tn_filter_init(struct rte_eth_dev *eth_dev);
94 static int txgbe_l2_tn_filter_uninit(struct rte_eth_dev *eth_dev);
95 static int  txgbe_dev_set_link_up(struct rte_eth_dev *dev);
96 static int  txgbe_dev_set_link_down(struct rte_eth_dev *dev);
97 static int txgbe_dev_close(struct rte_eth_dev *dev);
98 static int txgbe_dev_link_update(struct rte_eth_dev *dev,
99                                 int wait_to_complete);
100 static int txgbe_dev_stats_reset(struct rte_eth_dev *dev);
101 static void txgbe_vlan_hw_strip_enable(struct rte_eth_dev *dev, uint16_t queue);
102 static void txgbe_vlan_hw_strip_disable(struct rte_eth_dev *dev,
103                                         uint16_t queue);
104
105 static void txgbe_dev_link_status_print(struct rte_eth_dev *dev);
106 static int txgbe_dev_lsc_interrupt_setup(struct rte_eth_dev *dev, uint8_t on);
107 static int txgbe_dev_macsec_interrupt_setup(struct rte_eth_dev *dev);
108 static int txgbe_dev_rxq_interrupt_setup(struct rte_eth_dev *dev);
109 static int txgbe_dev_interrupt_get_status(struct rte_eth_dev *dev);
110 static int txgbe_dev_interrupt_action(struct rte_eth_dev *dev,
111                                       struct rte_intr_handle *handle);
112 static void txgbe_dev_interrupt_handler(void *param);
113 static void txgbe_dev_interrupt_delayed_handler(void *param);
114 static void txgbe_configure_msix(struct rte_eth_dev *dev);
115
116 static int txgbe_filter_restore(struct rte_eth_dev *dev);
117 static void txgbe_l2_tunnel_conf(struct rte_eth_dev *dev);
118
119 #define TXGBE_SET_HWSTRIP(h, q) do {\
120                 uint32_t idx = (q) / (sizeof((h)->bitmap[0]) * NBBY); \
121                 uint32_t bit = (q) % (sizeof((h)->bitmap[0]) * NBBY); \
122                 (h)->bitmap[idx] |= 1 << bit;\
123         } while (0)
124
125 #define TXGBE_CLEAR_HWSTRIP(h, q) do {\
126                 uint32_t idx = (q) / (sizeof((h)->bitmap[0]) * NBBY); \
127                 uint32_t bit = (q) % (sizeof((h)->bitmap[0]) * NBBY); \
128                 (h)->bitmap[idx] &= ~(1 << bit);\
129         } while (0)
130
131 #define TXGBE_GET_HWSTRIP(h, q, r) do {\
132                 uint32_t idx = (q) / (sizeof((h)->bitmap[0]) * NBBY); \
133                 uint32_t bit = (q) % (sizeof((h)->bitmap[0]) * NBBY); \
134                 (r) = (h)->bitmap[idx] >> bit & 1;\
135         } while (0)
136
137 /*
138  * The set of PCI devices this driver supports
139  */
140 static const struct rte_pci_id pci_id_txgbe_map[] = {
141         { RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, TXGBE_DEV_ID_RAPTOR_SFP) },
142         { RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, TXGBE_DEV_ID_WX1820_SFP) },
143         { .vendor_id = 0, /* sentinel */ },
144 };
145
146 static const struct rte_eth_desc_lim rx_desc_lim = {
147         .nb_max = TXGBE_RING_DESC_MAX,
148         .nb_min = TXGBE_RING_DESC_MIN,
149         .nb_align = TXGBE_RXD_ALIGN,
150 };
151
152 static const struct rte_eth_desc_lim tx_desc_lim = {
153         .nb_max = TXGBE_RING_DESC_MAX,
154         .nb_min = TXGBE_RING_DESC_MIN,
155         .nb_align = TXGBE_TXD_ALIGN,
156         .nb_seg_max = TXGBE_TX_MAX_SEG,
157         .nb_mtu_seg_max = TXGBE_TX_MAX_SEG,
158 };
159
160 static const struct eth_dev_ops txgbe_eth_dev_ops;
161
162 #define HW_XSTAT(m) {#m, offsetof(struct txgbe_hw_stats, m)}
163 #define HW_XSTAT_NAME(m, n) {n, offsetof(struct txgbe_hw_stats, m)}
164 static const struct rte_txgbe_xstats_name_off rte_txgbe_stats_strings[] = {
165         /* MNG RxTx */
166         HW_XSTAT(mng_bmc2host_packets),
167         HW_XSTAT(mng_host2bmc_packets),
168         /* Basic RxTx */
169         HW_XSTAT(rx_packets),
170         HW_XSTAT(tx_packets),
171         HW_XSTAT(rx_bytes),
172         HW_XSTAT(tx_bytes),
173         HW_XSTAT(rx_total_bytes),
174         HW_XSTAT(rx_total_packets),
175         HW_XSTAT(tx_total_packets),
176         HW_XSTAT(rx_total_missed_packets),
177         HW_XSTAT(rx_broadcast_packets),
178         HW_XSTAT(rx_multicast_packets),
179         HW_XSTAT(rx_management_packets),
180         HW_XSTAT(tx_management_packets),
181         HW_XSTAT(rx_management_dropped),
182
183         /* Basic Error */
184         HW_XSTAT(rx_crc_errors),
185         HW_XSTAT(rx_illegal_byte_errors),
186         HW_XSTAT(rx_error_bytes),
187         HW_XSTAT(rx_mac_short_packet_dropped),
188         HW_XSTAT(rx_length_errors),
189         HW_XSTAT(rx_undersize_errors),
190         HW_XSTAT(rx_fragment_errors),
191         HW_XSTAT(rx_oversize_errors),
192         HW_XSTAT(rx_jabber_errors),
193         HW_XSTAT(rx_l3_l4_xsum_error),
194         HW_XSTAT(mac_local_errors),
195         HW_XSTAT(mac_remote_errors),
196
197         /* Flow Director */
198         HW_XSTAT(flow_director_added_filters),
199         HW_XSTAT(flow_director_removed_filters),
200         HW_XSTAT(flow_director_filter_add_errors),
201         HW_XSTAT(flow_director_filter_remove_errors),
202         HW_XSTAT(flow_director_matched_filters),
203         HW_XSTAT(flow_director_missed_filters),
204
205         /* FCoE */
206         HW_XSTAT(rx_fcoe_crc_errors),
207         HW_XSTAT(rx_fcoe_mbuf_allocation_errors),
208         HW_XSTAT(rx_fcoe_dropped),
209         HW_XSTAT(rx_fcoe_packets),
210         HW_XSTAT(tx_fcoe_packets),
211         HW_XSTAT(rx_fcoe_bytes),
212         HW_XSTAT(tx_fcoe_bytes),
213         HW_XSTAT(rx_fcoe_no_ddp),
214         HW_XSTAT(rx_fcoe_no_ddp_ext_buff),
215
216         /* MACSEC */
217         HW_XSTAT(tx_macsec_pkts_untagged),
218         HW_XSTAT(tx_macsec_pkts_encrypted),
219         HW_XSTAT(tx_macsec_pkts_protected),
220         HW_XSTAT(tx_macsec_octets_encrypted),
221         HW_XSTAT(tx_macsec_octets_protected),
222         HW_XSTAT(rx_macsec_pkts_untagged),
223         HW_XSTAT(rx_macsec_pkts_badtag),
224         HW_XSTAT(rx_macsec_pkts_nosci),
225         HW_XSTAT(rx_macsec_pkts_unknownsci),
226         HW_XSTAT(rx_macsec_octets_decrypted),
227         HW_XSTAT(rx_macsec_octets_validated),
228         HW_XSTAT(rx_macsec_sc_pkts_unchecked),
229         HW_XSTAT(rx_macsec_sc_pkts_delayed),
230         HW_XSTAT(rx_macsec_sc_pkts_late),
231         HW_XSTAT(rx_macsec_sa_pkts_ok),
232         HW_XSTAT(rx_macsec_sa_pkts_invalid),
233         HW_XSTAT(rx_macsec_sa_pkts_notvalid),
234         HW_XSTAT(rx_macsec_sa_pkts_unusedsa),
235         HW_XSTAT(rx_macsec_sa_pkts_notusingsa),
236
237         /* MAC RxTx */
238         HW_XSTAT(rx_size_64_packets),
239         HW_XSTAT(rx_size_65_to_127_packets),
240         HW_XSTAT(rx_size_128_to_255_packets),
241         HW_XSTAT(rx_size_256_to_511_packets),
242         HW_XSTAT(rx_size_512_to_1023_packets),
243         HW_XSTAT(rx_size_1024_to_max_packets),
244         HW_XSTAT(tx_size_64_packets),
245         HW_XSTAT(tx_size_65_to_127_packets),
246         HW_XSTAT(tx_size_128_to_255_packets),
247         HW_XSTAT(tx_size_256_to_511_packets),
248         HW_XSTAT(tx_size_512_to_1023_packets),
249         HW_XSTAT(tx_size_1024_to_max_packets),
250
251         /* Flow Control */
252         HW_XSTAT(tx_xon_packets),
253         HW_XSTAT(rx_xon_packets),
254         HW_XSTAT(tx_xoff_packets),
255         HW_XSTAT(rx_xoff_packets),
256
257         HW_XSTAT_NAME(tx_xon_packets, "tx_flow_control_xon_packets"),
258         HW_XSTAT_NAME(rx_xon_packets, "rx_flow_control_xon_packets"),
259         HW_XSTAT_NAME(tx_xoff_packets, "tx_flow_control_xoff_packets"),
260         HW_XSTAT_NAME(rx_xoff_packets, "rx_flow_control_xoff_packets"),
261 };
262
263 #define TXGBE_NB_HW_STATS (sizeof(rte_txgbe_stats_strings) / \
264                            sizeof(rte_txgbe_stats_strings[0]))
265
266 /* Per-priority statistics */
267 #define UP_XSTAT(m) {#m, offsetof(struct txgbe_hw_stats, up[0].m)}
268 static const struct rte_txgbe_xstats_name_off rte_txgbe_up_strings[] = {
269         UP_XSTAT(rx_up_packets),
270         UP_XSTAT(tx_up_packets),
271         UP_XSTAT(rx_up_bytes),
272         UP_XSTAT(tx_up_bytes),
273         UP_XSTAT(rx_up_drop_packets),
274
275         UP_XSTAT(tx_up_xon_packets),
276         UP_XSTAT(rx_up_xon_packets),
277         UP_XSTAT(tx_up_xoff_packets),
278         UP_XSTAT(rx_up_xoff_packets),
279         UP_XSTAT(rx_up_dropped),
280         UP_XSTAT(rx_up_mbuf_alloc_errors),
281         UP_XSTAT(tx_up_xon2off_packets),
282 };
283
284 #define TXGBE_NB_UP_STATS (sizeof(rte_txgbe_up_strings) / \
285                            sizeof(rte_txgbe_up_strings[0]))
286
287 /* Per-queue statistics */
288 #define QP_XSTAT(m) {#m, offsetof(struct txgbe_hw_stats, qp[0].m)}
289 static const struct rte_txgbe_xstats_name_off rte_txgbe_qp_strings[] = {
290         QP_XSTAT(rx_qp_packets),
291         QP_XSTAT(tx_qp_packets),
292         QP_XSTAT(rx_qp_bytes),
293         QP_XSTAT(tx_qp_bytes),
294         QP_XSTAT(rx_qp_mc_packets),
295 };
296
297 #define TXGBE_NB_QP_STATS (sizeof(rte_txgbe_qp_strings) / \
298                            sizeof(rte_txgbe_qp_strings[0]))
299
300 static inline int
301 txgbe_is_sfp(struct txgbe_hw *hw)
302 {
303         switch (hw->phy.type) {
304         case txgbe_phy_sfp_avago:
305         case txgbe_phy_sfp_ftl:
306         case txgbe_phy_sfp_intel:
307         case txgbe_phy_sfp_unknown:
308         case txgbe_phy_sfp_tyco_passive:
309         case txgbe_phy_sfp_unknown_passive:
310                 return 1;
311         default:
312                 return 0;
313         }
314 }
315
316 static inline int32_t
317 txgbe_pf_reset_hw(struct txgbe_hw *hw)
318 {
319         uint32_t ctrl_ext;
320         int32_t status;
321
322         status = hw->mac.reset_hw(hw);
323
324         ctrl_ext = rd32(hw, TXGBE_PORTCTL);
325         /* Set PF Reset Done bit so PF/VF Mail Ops can work */
326         ctrl_ext |= TXGBE_PORTCTL_RSTDONE;
327         wr32(hw, TXGBE_PORTCTL, ctrl_ext);
328         txgbe_flush(hw);
329
330         if (status == TXGBE_ERR_SFP_NOT_PRESENT)
331                 status = 0;
332         return status;
333 }
334
335 static inline void
336 txgbe_enable_intr(struct rte_eth_dev *dev)
337 {
338         struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev);
339         struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
340
341         wr32(hw, TXGBE_IENMISC, intr->mask_misc);
342         wr32(hw, TXGBE_IMC(0), TXGBE_IMC_MASK);
343         wr32(hw, TXGBE_IMC(1), TXGBE_IMC_MASK);
344         txgbe_flush(hw);
345 }
346
347 static void
348 txgbe_disable_intr(struct txgbe_hw *hw)
349 {
350         PMD_INIT_FUNC_TRACE();
351
352         wr32(hw, TXGBE_IENMISC, ~BIT_MASK32);
353         wr32(hw, TXGBE_IMS(0), TXGBE_IMC_MASK);
354         wr32(hw, TXGBE_IMS(1), TXGBE_IMC_MASK);
355         txgbe_flush(hw);
356 }
357
358 static int
359 txgbe_dev_queue_stats_mapping_set(struct rte_eth_dev *eth_dev,
360                                   uint16_t queue_id,
361                                   uint8_t stat_idx,
362                                   uint8_t is_rx)
363 {
364         struct txgbe_hw *hw = TXGBE_DEV_HW(eth_dev);
365         struct txgbe_stat_mappings *stat_mappings =
366                 TXGBE_DEV_STAT_MAPPINGS(eth_dev);
367         uint32_t qsmr_mask = 0;
368         uint32_t clearing_mask = QMAP_FIELD_RESERVED_BITS_MASK;
369         uint32_t q_map;
370         uint8_t n, offset;
371
372         if (hw->mac.type != txgbe_mac_raptor)
373                 return -ENOSYS;
374
375         if (stat_idx & !QMAP_FIELD_RESERVED_BITS_MASK)
376                 return -EIO;
377
378         PMD_INIT_LOG(DEBUG, "Setting port %d, %s queue_id %d to stat index %d",
379                      (int)(eth_dev->data->port_id), is_rx ? "RX" : "TX",
380                      queue_id, stat_idx);
381
382         n = (uint8_t)(queue_id / NB_QMAP_FIELDS_PER_QSM_REG);
383         if (n >= TXGBE_NB_STAT_MAPPING) {
384                 PMD_INIT_LOG(ERR, "Nb of stat mapping registers exceeded");
385                 return -EIO;
386         }
387         offset = (uint8_t)(queue_id % NB_QMAP_FIELDS_PER_QSM_REG);
388
389         /* Now clear any previous stat_idx set */
390         clearing_mask <<= (QSM_REG_NB_BITS_PER_QMAP_FIELD * offset);
391         if (!is_rx)
392                 stat_mappings->tqsm[n] &= ~clearing_mask;
393         else
394                 stat_mappings->rqsm[n] &= ~clearing_mask;
395
396         q_map = (uint32_t)stat_idx;
397         q_map &= QMAP_FIELD_RESERVED_BITS_MASK;
398         qsmr_mask = q_map << (QSM_REG_NB_BITS_PER_QMAP_FIELD * offset);
399         if (!is_rx)
400                 stat_mappings->tqsm[n] |= qsmr_mask;
401         else
402                 stat_mappings->rqsm[n] |= qsmr_mask;
403
404         PMD_INIT_LOG(DEBUG, "Set port %d, %s queue_id %d to stat index %d",
405                      (int)(eth_dev->data->port_id), is_rx ? "RX" : "TX",
406                      queue_id, stat_idx);
407         PMD_INIT_LOG(DEBUG, "%s[%d] = 0x%08x", is_rx ? "RQSMR" : "TQSM", n,
408                      is_rx ? stat_mappings->rqsm[n] : stat_mappings->tqsm[n]);
409         return 0;
410 }
411
412 static void
413 txgbe_dcb_init(struct txgbe_hw *hw, struct txgbe_dcb_config *dcb_config)
414 {
415         int i;
416         u8 bwgp;
417         struct txgbe_dcb_tc_config *tc;
418
419         UNREFERENCED_PARAMETER(hw);
420
421         dcb_config->num_tcs.pg_tcs = TXGBE_DCB_TC_MAX;
422         dcb_config->num_tcs.pfc_tcs = TXGBE_DCB_TC_MAX;
423         bwgp = (u8)(100 / TXGBE_DCB_TC_MAX);
424         for (i = 0; i < TXGBE_DCB_TC_MAX; i++) {
425                 tc = &dcb_config->tc_config[i];
426                 tc->path[TXGBE_DCB_TX_CONFIG].bwg_id = i;
427                 tc->path[TXGBE_DCB_TX_CONFIG].bwg_percent = bwgp + (i & 1);
428                 tc->path[TXGBE_DCB_RX_CONFIG].bwg_id = i;
429                 tc->path[TXGBE_DCB_RX_CONFIG].bwg_percent = bwgp + (i & 1);
430                 tc->pfc = txgbe_dcb_pfc_disabled;
431         }
432
433         /* Initialize default user to priority mapping, UPx->TC0 */
434         tc = &dcb_config->tc_config[0];
435         tc->path[TXGBE_DCB_TX_CONFIG].up_to_tc_bitmap = 0xFF;
436         tc->path[TXGBE_DCB_RX_CONFIG].up_to_tc_bitmap = 0xFF;
437         for (i = 0; i < TXGBE_DCB_BWG_MAX; i++) {
438                 dcb_config->bw_percentage[i][TXGBE_DCB_TX_CONFIG] = 100;
439                 dcb_config->bw_percentage[i][TXGBE_DCB_RX_CONFIG] = 100;
440         }
441         dcb_config->rx_pba_cfg = txgbe_dcb_pba_equal;
442         dcb_config->pfc_mode_enable = false;
443         dcb_config->vt_mode = true;
444         dcb_config->round_robin_enable = false;
445         /* support all DCB capabilities */
446         dcb_config->support.capabilities = 0xFF;
447 }
448
449 /*
450  * Ensure that all locks are released before first NVM or PHY access
451  */
452 static void
453 txgbe_swfw_lock_reset(struct txgbe_hw *hw)
454 {
455         uint16_t mask;
456
457         /*
458          * These ones are more tricky since they are common to all ports; but
459          * swfw_sync retries last long enough (1s) to be almost sure that if
460          * lock can not be taken it is due to an improper lock of the
461          * semaphore.
462          */
463         mask = TXGBE_MNGSEM_SWPHY |
464                TXGBE_MNGSEM_SWMBX |
465                TXGBE_MNGSEM_SWFLASH;
466         if (hw->mac.acquire_swfw_sync(hw, mask) < 0)
467                 PMD_DRV_LOG(DEBUG, "SWFW common locks released");
468
469         hw->mac.release_swfw_sync(hw, mask);
470 }
471
472 static int
473 eth_txgbe_dev_init(struct rte_eth_dev *eth_dev, void *init_params __rte_unused)
474 {
475         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
476         struct txgbe_hw *hw = TXGBE_DEV_HW(eth_dev);
477         struct txgbe_vfta *shadow_vfta = TXGBE_DEV_VFTA(eth_dev);
478         struct txgbe_hwstrip *hwstrip = TXGBE_DEV_HWSTRIP(eth_dev);
479         struct txgbe_dcb_config *dcb_config = TXGBE_DEV_DCB_CONFIG(eth_dev);
480         struct txgbe_filter_info *filter_info = TXGBE_DEV_FILTER(eth_dev);
481         struct txgbe_bw_conf *bw_conf = TXGBE_DEV_BW_CONF(eth_dev);
482         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
483         const struct rte_memzone *mz;
484         uint32_t ctrl_ext;
485         uint16_t csum;
486         int err, i, ret;
487
488         PMD_INIT_FUNC_TRACE();
489
490         eth_dev->dev_ops = &txgbe_eth_dev_ops;
491         eth_dev->rx_queue_count       = txgbe_dev_rx_queue_count;
492         eth_dev->rx_descriptor_status = txgbe_dev_rx_descriptor_status;
493         eth_dev->tx_descriptor_status = txgbe_dev_tx_descriptor_status;
494         eth_dev->rx_pkt_burst = &txgbe_recv_pkts;
495         eth_dev->tx_pkt_burst = &txgbe_xmit_pkts;
496         eth_dev->tx_pkt_prepare = &txgbe_prep_pkts;
497
498         /*
499          * For secondary processes, we don't initialise any further as primary
500          * has already done this work. Only check we don't need a different
501          * RX and TX function.
502          */
503         if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
504                 struct txgbe_tx_queue *txq;
505                 /* TX queue function in primary, set by last queue initialized
506                  * Tx queue may not initialized by primary process
507                  */
508                 if (eth_dev->data->tx_queues) {
509                         uint16_t nb_tx_queues = eth_dev->data->nb_tx_queues;
510                         txq = eth_dev->data->tx_queues[nb_tx_queues - 1];
511                         txgbe_set_tx_function(eth_dev, txq);
512                 } else {
513                         /* Use default TX function if we get here */
514                         PMD_INIT_LOG(NOTICE, "No TX queues configured yet. "
515                                      "Using default TX function.");
516                 }
517
518                 txgbe_set_rx_function(eth_dev);
519
520                 return 0;
521         }
522
523         rte_eth_copy_pci_info(eth_dev, pci_dev);
524
525         /* Vendor and Device ID need to be set before init of shared code */
526         hw->device_id = pci_dev->id.device_id;
527         hw->vendor_id = pci_dev->id.vendor_id;
528         hw->hw_addr = (void *)pci_dev->mem_resource[0].addr;
529         hw->allow_unsupported_sfp = 1;
530
531         /* Reserve memory for interrupt status block */
532         mz = rte_eth_dma_zone_reserve(eth_dev, "txgbe_driver", -1,
533                 16, TXGBE_ALIGN, SOCKET_ID_ANY);
534         if (mz == NULL)
535                 return -ENOMEM;
536
537         hw->isb_dma = TMZ_PADDR(mz);
538         hw->isb_mem = TMZ_VADDR(mz);
539
540         /* Initialize the shared code (base driver) */
541         err = txgbe_init_shared_code(hw);
542         if (err != 0) {
543                 PMD_INIT_LOG(ERR, "Shared code init failed: %d", err);
544                 return -EIO;
545         }
546
547         /* Unlock any pending hardware semaphore */
548         txgbe_swfw_lock_reset(hw);
549
550         /* Initialize DCB configuration*/
551         memset(dcb_config, 0, sizeof(struct txgbe_dcb_config));
552         txgbe_dcb_init(hw, dcb_config);
553
554         /* Get Hardware Flow Control setting */
555         hw->fc.requested_mode = txgbe_fc_full;
556         hw->fc.current_mode = txgbe_fc_full;
557         hw->fc.pause_time = TXGBE_FC_PAUSE_TIME;
558         for (i = 0; i < TXGBE_DCB_TC_MAX; i++) {
559                 hw->fc.low_water[i] = TXGBE_FC_XON_LOTH;
560                 hw->fc.high_water[i] = TXGBE_FC_XOFF_HITH;
561         }
562         hw->fc.send_xon = 1;
563
564         err = hw->rom.init_params(hw);
565         if (err != 0) {
566                 PMD_INIT_LOG(ERR, "The EEPROM init failed: %d", err);
567                 return -EIO;
568         }
569
570         /* Make sure we have a good EEPROM before we read from it */
571         err = hw->rom.validate_checksum(hw, &csum);
572         if (err != 0) {
573                 PMD_INIT_LOG(ERR, "The EEPROM checksum is not valid: %d", err);
574                 return -EIO;
575         }
576
577         err = hw->mac.init_hw(hw);
578
579         /*
580          * Devices with copper phys will fail to initialise if txgbe_init_hw()
581          * is called too soon after the kernel driver unbinding/binding occurs.
582          * The failure occurs in txgbe_identify_phy() for all devices,
583          * but for non-copper devies, txgbe_identify_sfp_module() is
584          * also called. See txgbe_identify_phy(). The reason for the
585          * failure is not known, and only occuts when virtualisation features
586          * are disabled in the bios. A delay of 200ms  was found to be enough by
587          * trial-and-error, and is doubled to be safe.
588          */
589         if (err && hw->phy.media_type == txgbe_media_type_copper) {
590                 rte_delay_ms(200);
591                 err = hw->mac.init_hw(hw);
592         }
593
594         if (err == TXGBE_ERR_SFP_NOT_PRESENT)
595                 err = 0;
596
597         if (err == TXGBE_ERR_EEPROM_VERSION) {
598                 PMD_INIT_LOG(ERR, "This device is a pre-production adapter/"
599                              "LOM.  Please be aware there may be issues associated "
600                              "with your hardware.");
601                 PMD_INIT_LOG(ERR, "If you are experiencing problems "
602                              "please contact your hardware representative "
603                              "who provided you with this hardware.");
604         } else if (err == TXGBE_ERR_SFP_NOT_SUPPORTED) {
605                 PMD_INIT_LOG(ERR, "Unsupported SFP+ Module");
606         }
607         if (err) {
608                 PMD_INIT_LOG(ERR, "Hardware Initialization Failure: %d", err);
609                 return -EIO;
610         }
611
612         /* Reset the hw statistics */
613         txgbe_dev_stats_reset(eth_dev);
614
615         /* disable interrupt */
616         txgbe_disable_intr(hw);
617
618         /* Allocate memory for storing MAC addresses */
619         eth_dev->data->mac_addrs = rte_zmalloc("txgbe", RTE_ETHER_ADDR_LEN *
620                                                hw->mac.num_rar_entries, 0);
621         if (eth_dev->data->mac_addrs == NULL) {
622                 PMD_INIT_LOG(ERR,
623                              "Failed to allocate %u bytes needed to store "
624                              "MAC addresses",
625                              RTE_ETHER_ADDR_LEN * hw->mac.num_rar_entries);
626                 return -ENOMEM;
627         }
628
629         /* Copy the permanent MAC address */
630         rte_ether_addr_copy((struct rte_ether_addr *)hw->mac.perm_addr,
631                         &eth_dev->data->mac_addrs[0]);
632
633         /* Allocate memory for storing hash filter MAC addresses */
634         eth_dev->data->hash_mac_addrs = rte_zmalloc("txgbe",
635                         RTE_ETHER_ADDR_LEN * TXGBE_VMDQ_NUM_UC_MAC, 0);
636         if (eth_dev->data->hash_mac_addrs == NULL) {
637                 PMD_INIT_LOG(ERR,
638                              "Failed to allocate %d bytes needed to store MAC addresses",
639                              RTE_ETHER_ADDR_LEN * TXGBE_VMDQ_NUM_UC_MAC);
640                 return -ENOMEM;
641         }
642
643         /* initialize the vfta */
644         memset(shadow_vfta, 0, sizeof(*shadow_vfta));
645
646         /* initialize the hw strip bitmap*/
647         memset(hwstrip, 0, sizeof(*hwstrip));
648
649         /* initialize PF if max_vfs not zero */
650         ret = txgbe_pf_host_init(eth_dev);
651         if (ret) {
652                 rte_free(eth_dev->data->mac_addrs);
653                 eth_dev->data->mac_addrs = NULL;
654                 rte_free(eth_dev->data->hash_mac_addrs);
655                 eth_dev->data->hash_mac_addrs = NULL;
656                 return ret;
657         }
658
659         ctrl_ext = rd32(hw, TXGBE_PORTCTL);
660         /* let hardware know driver is loaded */
661         ctrl_ext |= TXGBE_PORTCTL_DRVLOAD;
662         /* Set PF Reset Done bit so PF/VF Mail Ops can work */
663         ctrl_ext |= TXGBE_PORTCTL_RSTDONE;
664         wr32(hw, TXGBE_PORTCTL, ctrl_ext);
665         txgbe_flush(hw);
666
667         if (txgbe_is_sfp(hw) && hw->phy.sfp_type != txgbe_sfp_type_not_present)
668                 PMD_INIT_LOG(DEBUG, "MAC: %d, PHY: %d, SFP+: %d",
669                              (int)hw->mac.type, (int)hw->phy.type,
670                              (int)hw->phy.sfp_type);
671         else
672                 PMD_INIT_LOG(DEBUG, "MAC: %d, PHY: %d",
673                              (int)hw->mac.type, (int)hw->phy.type);
674
675         PMD_INIT_LOG(DEBUG, "port %d vendorID=0x%x deviceID=0x%x",
676                      eth_dev->data->port_id, pci_dev->id.vendor_id,
677                      pci_dev->id.device_id);
678
679         rte_intr_callback_register(intr_handle,
680                                    txgbe_dev_interrupt_handler, eth_dev);
681
682         /* enable uio/vfio intr/eventfd mapping */
683         rte_intr_enable(intr_handle);
684
685         /* enable support intr */
686         txgbe_enable_intr(eth_dev);
687
688         /* initialize filter info */
689         memset(filter_info, 0,
690                sizeof(struct txgbe_filter_info));
691
692         /* initialize 5tuple filter list */
693         TAILQ_INIT(&filter_info->fivetuple_list);
694
695         /* initialize flow director filter list & hash */
696         txgbe_fdir_filter_init(eth_dev);
697
698         /* initialize l2 tunnel filter list & hash */
699         txgbe_l2_tn_filter_init(eth_dev);
700
701         /* initialize flow filter lists */
702         txgbe_filterlist_init();
703
704         /* initialize bandwidth configuration info */
705         memset(bw_conf, 0, sizeof(struct txgbe_bw_conf));
706
707         return 0;
708 }
709
710 static int
711 eth_txgbe_dev_uninit(struct rte_eth_dev *eth_dev)
712 {
713         PMD_INIT_FUNC_TRACE();
714
715         if (rte_eal_process_type() != RTE_PROC_PRIMARY)
716                 return 0;
717
718         txgbe_dev_close(eth_dev);
719
720         return 0;
721 }
722
723 static int txgbe_ntuple_filter_uninit(struct rte_eth_dev *eth_dev)
724 {
725         struct txgbe_filter_info *filter_info = TXGBE_DEV_FILTER(eth_dev);
726         struct txgbe_5tuple_filter *p_5tuple;
727
728         while ((p_5tuple = TAILQ_FIRST(&filter_info->fivetuple_list))) {
729                 TAILQ_REMOVE(&filter_info->fivetuple_list,
730                              p_5tuple,
731                              entries);
732                 rte_free(p_5tuple);
733         }
734         memset(filter_info->fivetuple_mask, 0,
735                sizeof(uint32_t) * TXGBE_5TUPLE_ARRAY_SIZE);
736
737         return 0;
738 }
739
740 static int txgbe_fdir_filter_uninit(struct rte_eth_dev *eth_dev)
741 {
742         struct txgbe_hw_fdir_info *fdir_info = TXGBE_DEV_FDIR(eth_dev);
743         struct txgbe_fdir_filter *fdir_filter;
744
745         if (fdir_info->hash_map)
746                 rte_free(fdir_info->hash_map);
747         if (fdir_info->hash_handle)
748                 rte_hash_free(fdir_info->hash_handle);
749
750         while ((fdir_filter = TAILQ_FIRST(&fdir_info->fdir_list))) {
751                 TAILQ_REMOVE(&fdir_info->fdir_list,
752                              fdir_filter,
753                              entries);
754                 rte_free(fdir_filter);
755         }
756
757         return 0;
758 }
759
760 static int txgbe_l2_tn_filter_uninit(struct rte_eth_dev *eth_dev)
761 {
762         struct txgbe_l2_tn_info *l2_tn_info = TXGBE_DEV_L2_TN(eth_dev);
763         struct txgbe_l2_tn_filter *l2_tn_filter;
764
765         if (l2_tn_info->hash_map)
766                 rte_free(l2_tn_info->hash_map);
767         if (l2_tn_info->hash_handle)
768                 rte_hash_free(l2_tn_info->hash_handle);
769
770         while ((l2_tn_filter = TAILQ_FIRST(&l2_tn_info->l2_tn_list))) {
771                 TAILQ_REMOVE(&l2_tn_info->l2_tn_list,
772                              l2_tn_filter,
773                              entries);
774                 rte_free(l2_tn_filter);
775         }
776
777         return 0;
778 }
779
780 static int txgbe_fdir_filter_init(struct rte_eth_dev *eth_dev)
781 {
782         struct txgbe_hw_fdir_info *fdir_info = TXGBE_DEV_FDIR(eth_dev);
783         char fdir_hash_name[RTE_HASH_NAMESIZE];
784         struct rte_hash_parameters fdir_hash_params = {
785                 .name = fdir_hash_name,
786                 .entries = TXGBE_MAX_FDIR_FILTER_NUM,
787                 .key_len = sizeof(struct txgbe_atr_input),
788                 .hash_func = rte_hash_crc,
789                 .hash_func_init_val = 0,
790                 .socket_id = rte_socket_id(),
791         };
792
793         TAILQ_INIT(&fdir_info->fdir_list);
794         snprintf(fdir_hash_name, RTE_HASH_NAMESIZE,
795                  "fdir_%s", TDEV_NAME(eth_dev));
796         fdir_info->hash_handle = rte_hash_create(&fdir_hash_params);
797         if (!fdir_info->hash_handle) {
798                 PMD_INIT_LOG(ERR, "Failed to create fdir hash table!");
799                 return -EINVAL;
800         }
801         fdir_info->hash_map = rte_zmalloc("txgbe",
802                                           sizeof(struct txgbe_fdir_filter *) *
803                                           TXGBE_MAX_FDIR_FILTER_NUM,
804                                           0);
805         if (!fdir_info->hash_map) {
806                 PMD_INIT_LOG(ERR,
807                              "Failed to allocate memory for fdir hash map!");
808                 return -ENOMEM;
809         }
810         fdir_info->mask_added = FALSE;
811
812         return 0;
813 }
814
815 static int txgbe_l2_tn_filter_init(struct rte_eth_dev *eth_dev)
816 {
817         struct txgbe_l2_tn_info *l2_tn_info = TXGBE_DEV_L2_TN(eth_dev);
818         char l2_tn_hash_name[RTE_HASH_NAMESIZE];
819         struct rte_hash_parameters l2_tn_hash_params = {
820                 .name = l2_tn_hash_name,
821                 .entries = TXGBE_MAX_L2_TN_FILTER_NUM,
822                 .key_len = sizeof(struct txgbe_l2_tn_key),
823                 .hash_func = rte_hash_crc,
824                 .hash_func_init_val = 0,
825                 .socket_id = rte_socket_id(),
826         };
827
828         TAILQ_INIT(&l2_tn_info->l2_tn_list);
829         snprintf(l2_tn_hash_name, RTE_HASH_NAMESIZE,
830                  "l2_tn_%s", TDEV_NAME(eth_dev));
831         l2_tn_info->hash_handle = rte_hash_create(&l2_tn_hash_params);
832         if (!l2_tn_info->hash_handle) {
833                 PMD_INIT_LOG(ERR, "Failed to create L2 TN hash table!");
834                 return -EINVAL;
835         }
836         l2_tn_info->hash_map = rte_zmalloc("txgbe",
837                                    sizeof(struct txgbe_l2_tn_filter *) *
838                                    TXGBE_MAX_L2_TN_FILTER_NUM,
839                                    0);
840         if (!l2_tn_info->hash_map) {
841                 PMD_INIT_LOG(ERR,
842                         "Failed to allocate memory for L2 TN hash map!");
843                 return -ENOMEM;
844         }
845         l2_tn_info->e_tag_en = FALSE;
846         l2_tn_info->e_tag_fwd_en = FALSE;
847         l2_tn_info->e_tag_ether_type = RTE_ETHER_TYPE_ETAG;
848
849         return 0;
850 }
851
852 static int
853 eth_txgbe_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
854                 struct rte_pci_device *pci_dev)
855 {
856         struct rte_eth_dev *pf_ethdev;
857         struct rte_eth_devargs eth_da;
858         int retval;
859
860         if (pci_dev->device.devargs) {
861                 retval = rte_eth_devargs_parse(pci_dev->device.devargs->args,
862                                 &eth_da);
863                 if (retval)
864                         return retval;
865         } else {
866                 memset(&eth_da, 0, sizeof(eth_da));
867         }
868
869         retval = rte_eth_dev_create(&pci_dev->device, pci_dev->device.name,
870                         sizeof(struct txgbe_adapter),
871                         eth_dev_pci_specific_init, pci_dev,
872                         eth_txgbe_dev_init, NULL);
873
874         if (retval || eth_da.nb_representor_ports < 1)
875                 return retval;
876
877         pf_ethdev = rte_eth_dev_allocated(pci_dev->device.name);
878         if (pf_ethdev == NULL)
879                 return -ENODEV;
880
881         return 0;
882 }
883
884 static int eth_txgbe_pci_remove(struct rte_pci_device *pci_dev)
885 {
886         struct rte_eth_dev *ethdev;
887
888         ethdev = rte_eth_dev_allocated(pci_dev->device.name);
889         if (!ethdev)
890                 return -ENODEV;
891
892         return rte_eth_dev_destroy(ethdev, eth_txgbe_dev_uninit);
893 }
894
895 static struct rte_pci_driver rte_txgbe_pmd = {
896         .id_table = pci_id_txgbe_map,
897         .drv_flags = RTE_PCI_DRV_NEED_MAPPING |
898                      RTE_PCI_DRV_INTR_LSC,
899         .probe = eth_txgbe_pci_probe,
900         .remove = eth_txgbe_pci_remove,
901 };
902
903 static int
904 txgbe_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
905 {
906         struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
907         struct txgbe_vfta *shadow_vfta = TXGBE_DEV_VFTA(dev);
908         uint32_t vfta;
909         uint32_t vid_idx;
910         uint32_t vid_bit;
911
912         vid_idx = (uint32_t)((vlan_id >> 5) & 0x7F);
913         vid_bit = (uint32_t)(1 << (vlan_id & 0x1F));
914         vfta = rd32(hw, TXGBE_VLANTBL(vid_idx));
915         if (on)
916                 vfta |= vid_bit;
917         else
918                 vfta &= ~vid_bit;
919         wr32(hw, TXGBE_VLANTBL(vid_idx), vfta);
920
921         /* update local VFTA copy */
922         shadow_vfta->vfta[vid_idx] = vfta;
923
924         return 0;
925 }
926
927 static void
928 txgbe_vlan_strip_queue_set(struct rte_eth_dev *dev, uint16_t queue, int on)
929 {
930         struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
931         struct txgbe_rx_queue *rxq;
932         bool restart;
933         uint32_t rxcfg, rxbal, rxbah;
934
935         if (on)
936                 txgbe_vlan_hw_strip_enable(dev, queue);
937         else
938                 txgbe_vlan_hw_strip_disable(dev, queue);
939
940         rxq = dev->data->rx_queues[queue];
941         rxbal = rd32(hw, TXGBE_RXBAL(rxq->reg_idx));
942         rxbah = rd32(hw, TXGBE_RXBAH(rxq->reg_idx));
943         rxcfg = rd32(hw, TXGBE_RXCFG(rxq->reg_idx));
944         if (rxq->offloads & DEV_RX_OFFLOAD_VLAN_STRIP) {
945                 restart = (rxcfg & TXGBE_RXCFG_ENA) &&
946                         !(rxcfg & TXGBE_RXCFG_VLAN);
947                 rxcfg |= TXGBE_RXCFG_VLAN;
948         } else {
949                 restart = (rxcfg & TXGBE_RXCFG_ENA) &&
950                         (rxcfg & TXGBE_RXCFG_VLAN);
951                 rxcfg &= ~TXGBE_RXCFG_VLAN;
952         }
953         rxcfg &= ~TXGBE_RXCFG_ENA;
954
955         if (restart) {
956                 /* set vlan strip for ring */
957                 txgbe_dev_rx_queue_stop(dev, queue);
958                 wr32(hw, TXGBE_RXBAL(rxq->reg_idx), rxbal);
959                 wr32(hw, TXGBE_RXBAH(rxq->reg_idx), rxbah);
960                 wr32(hw, TXGBE_RXCFG(rxq->reg_idx), rxcfg);
961                 txgbe_dev_rx_queue_start(dev, queue);
962         }
963 }
964
965 static int
966 txgbe_vlan_tpid_set(struct rte_eth_dev *dev,
967                     enum rte_vlan_type vlan_type,
968                     uint16_t tpid)
969 {
970         struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
971         int ret = 0;
972         uint32_t portctrl, vlan_ext, qinq;
973
974         portctrl = rd32(hw, TXGBE_PORTCTL);
975
976         vlan_ext = (portctrl & TXGBE_PORTCTL_VLANEXT);
977         qinq = vlan_ext && (portctrl & TXGBE_PORTCTL_QINQ);
978         switch (vlan_type) {
979         case ETH_VLAN_TYPE_INNER:
980                 if (vlan_ext) {
981                         wr32m(hw, TXGBE_VLANCTL,
982                                 TXGBE_VLANCTL_TPID_MASK,
983                                 TXGBE_VLANCTL_TPID(tpid));
984                         wr32m(hw, TXGBE_DMATXCTRL,
985                                 TXGBE_DMATXCTRL_TPID_MASK,
986                                 TXGBE_DMATXCTRL_TPID(tpid));
987                 } else {
988                         ret = -ENOTSUP;
989                         PMD_DRV_LOG(ERR, "Inner type is not supported"
990                                     " by single VLAN");
991                 }
992
993                 if (qinq) {
994                         wr32m(hw, TXGBE_TAGTPID(0),
995                                 TXGBE_TAGTPID_LSB_MASK,
996                                 TXGBE_TAGTPID_LSB(tpid));
997                 }
998                 break;
999         case ETH_VLAN_TYPE_OUTER:
1000                 if (vlan_ext) {
1001                         /* Only the high 16-bits is valid */
1002                         wr32m(hw, TXGBE_EXTAG,
1003                                 TXGBE_EXTAG_VLAN_MASK,
1004                                 TXGBE_EXTAG_VLAN(tpid));
1005                 } else {
1006                         wr32m(hw, TXGBE_VLANCTL,
1007                                 TXGBE_VLANCTL_TPID_MASK,
1008                                 TXGBE_VLANCTL_TPID(tpid));
1009                         wr32m(hw, TXGBE_DMATXCTRL,
1010                                 TXGBE_DMATXCTRL_TPID_MASK,
1011                                 TXGBE_DMATXCTRL_TPID(tpid));
1012                 }
1013
1014                 if (qinq) {
1015                         wr32m(hw, TXGBE_TAGTPID(0),
1016                                 TXGBE_TAGTPID_MSB_MASK,
1017                                 TXGBE_TAGTPID_MSB(tpid));
1018                 }
1019                 break;
1020         default:
1021                 PMD_DRV_LOG(ERR, "Unsupported VLAN type %d", vlan_type);
1022                 return -EINVAL;
1023         }
1024
1025         return ret;
1026 }
1027
1028 void
1029 txgbe_vlan_hw_filter_disable(struct rte_eth_dev *dev)
1030 {
1031         struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
1032         uint32_t vlnctrl;
1033
1034         PMD_INIT_FUNC_TRACE();
1035
1036         /* Filter Table Disable */
1037         vlnctrl = rd32(hw, TXGBE_VLANCTL);
1038         vlnctrl &= ~TXGBE_VLANCTL_VFE;
1039         wr32(hw, TXGBE_VLANCTL, vlnctrl);
1040 }
1041
1042 void
1043 txgbe_vlan_hw_filter_enable(struct rte_eth_dev *dev)
1044 {
1045         struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
1046         struct txgbe_vfta *shadow_vfta = TXGBE_DEV_VFTA(dev);
1047         uint32_t vlnctrl;
1048         uint16_t i;
1049
1050         PMD_INIT_FUNC_TRACE();
1051
1052         /* Filter Table Enable */
1053         vlnctrl = rd32(hw, TXGBE_VLANCTL);
1054         vlnctrl &= ~TXGBE_VLANCTL_CFIENA;
1055         vlnctrl |= TXGBE_VLANCTL_VFE;
1056         wr32(hw, TXGBE_VLANCTL, vlnctrl);
1057
1058         /* write whatever is in local vfta copy */
1059         for (i = 0; i < TXGBE_VFTA_SIZE; i++)
1060                 wr32(hw, TXGBE_VLANTBL(i), shadow_vfta->vfta[i]);
1061 }
1062
1063 void
1064 txgbe_vlan_hw_strip_bitmap_set(struct rte_eth_dev *dev, uint16_t queue, bool on)
1065 {
1066         struct txgbe_hwstrip *hwstrip = TXGBE_DEV_HWSTRIP(dev);
1067         struct txgbe_rx_queue *rxq;
1068
1069         if (queue >= TXGBE_MAX_RX_QUEUE_NUM)
1070                 return;
1071
1072         if (on)
1073                 TXGBE_SET_HWSTRIP(hwstrip, queue);
1074         else
1075                 TXGBE_CLEAR_HWSTRIP(hwstrip, queue);
1076
1077         if (queue >= dev->data->nb_rx_queues)
1078                 return;
1079
1080         rxq = dev->data->rx_queues[queue];
1081
1082         if (on) {
1083                 rxq->vlan_flags = PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED;
1084                 rxq->offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
1085         } else {
1086                 rxq->vlan_flags = PKT_RX_VLAN;
1087                 rxq->offloads &= ~DEV_RX_OFFLOAD_VLAN_STRIP;
1088         }
1089 }
1090
1091 static void
1092 txgbe_vlan_hw_strip_disable(struct rte_eth_dev *dev, uint16_t queue)
1093 {
1094         struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
1095         uint32_t ctrl;
1096
1097         PMD_INIT_FUNC_TRACE();
1098
1099         ctrl = rd32(hw, TXGBE_RXCFG(queue));
1100         ctrl &= ~TXGBE_RXCFG_VLAN;
1101         wr32(hw, TXGBE_RXCFG(queue), ctrl);
1102
1103         /* record those setting for HW strip per queue */
1104         txgbe_vlan_hw_strip_bitmap_set(dev, queue, 0);
1105 }
1106
1107 static void
1108 txgbe_vlan_hw_strip_enable(struct rte_eth_dev *dev, uint16_t queue)
1109 {
1110         struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
1111         uint32_t ctrl;
1112
1113         PMD_INIT_FUNC_TRACE();
1114
1115         ctrl = rd32(hw, TXGBE_RXCFG(queue));
1116         ctrl |= TXGBE_RXCFG_VLAN;
1117         wr32(hw, TXGBE_RXCFG(queue), ctrl);
1118
1119         /* record those setting for HW strip per queue */
1120         txgbe_vlan_hw_strip_bitmap_set(dev, queue, 1);
1121 }
1122
1123 static void
1124 txgbe_vlan_hw_extend_disable(struct rte_eth_dev *dev)
1125 {
1126         struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
1127         uint32_t ctrl;
1128
1129         PMD_INIT_FUNC_TRACE();
1130
1131         ctrl = rd32(hw, TXGBE_PORTCTL);
1132         ctrl &= ~TXGBE_PORTCTL_VLANEXT;
1133         ctrl &= ~TXGBE_PORTCTL_QINQ;
1134         wr32(hw, TXGBE_PORTCTL, ctrl);
1135 }
1136
1137 static void
1138 txgbe_vlan_hw_extend_enable(struct rte_eth_dev *dev)
1139 {
1140         struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
1141         struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode;
1142         struct rte_eth_txmode *txmode = &dev->data->dev_conf.txmode;
1143         uint32_t ctrl;
1144
1145         PMD_INIT_FUNC_TRACE();
1146
1147         ctrl  = rd32(hw, TXGBE_PORTCTL);
1148         ctrl |= TXGBE_PORTCTL_VLANEXT;
1149         if (rxmode->offloads & DEV_RX_OFFLOAD_QINQ_STRIP ||
1150             txmode->offloads & DEV_TX_OFFLOAD_QINQ_INSERT)
1151                 ctrl |= TXGBE_PORTCTL_QINQ;
1152         wr32(hw, TXGBE_PORTCTL, ctrl);
1153 }
1154
1155 void
1156 txgbe_vlan_hw_strip_config(struct rte_eth_dev *dev)
1157 {
1158         struct txgbe_rx_queue *rxq;
1159         uint16_t i;
1160
1161         PMD_INIT_FUNC_TRACE();
1162
1163         for (i = 0; i < dev->data->nb_rx_queues; i++) {
1164                 rxq = dev->data->rx_queues[i];
1165
1166                 if (rxq->offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
1167                         txgbe_vlan_strip_queue_set(dev, i, 1);
1168                 else
1169                         txgbe_vlan_strip_queue_set(dev, i, 0);
1170         }
1171 }
1172
1173 void
1174 txgbe_config_vlan_strip_on_all_queues(struct rte_eth_dev *dev, int mask)
1175 {
1176         uint16_t i;
1177         struct rte_eth_rxmode *rxmode;
1178         struct txgbe_rx_queue *rxq;
1179
1180         if (mask & ETH_VLAN_STRIP_MASK) {
1181                 rxmode = &dev->data->dev_conf.rxmode;
1182                 if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
1183                         for (i = 0; i < dev->data->nb_rx_queues; i++) {
1184                                 rxq = dev->data->rx_queues[i];
1185                                 rxq->offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
1186                         }
1187                 else
1188                         for (i = 0; i < dev->data->nb_rx_queues; i++) {
1189                                 rxq = dev->data->rx_queues[i];
1190                                 rxq->offloads &= ~DEV_RX_OFFLOAD_VLAN_STRIP;
1191                         }
1192         }
1193 }
1194
1195 static int
1196 txgbe_vlan_offload_config(struct rte_eth_dev *dev, int mask)
1197 {
1198         struct rte_eth_rxmode *rxmode;
1199         rxmode = &dev->data->dev_conf.rxmode;
1200
1201         if (mask & ETH_VLAN_STRIP_MASK)
1202                 txgbe_vlan_hw_strip_config(dev);
1203
1204         if (mask & ETH_VLAN_FILTER_MASK) {
1205                 if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
1206                         txgbe_vlan_hw_filter_enable(dev);
1207                 else
1208                         txgbe_vlan_hw_filter_disable(dev);
1209         }
1210
1211         if (mask & ETH_VLAN_EXTEND_MASK) {
1212                 if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_EXTEND)
1213                         txgbe_vlan_hw_extend_enable(dev);
1214                 else
1215                         txgbe_vlan_hw_extend_disable(dev);
1216         }
1217
1218         return 0;
1219 }
1220
1221 static int
1222 txgbe_vlan_offload_set(struct rte_eth_dev *dev, int mask)
1223 {
1224         txgbe_config_vlan_strip_on_all_queues(dev, mask);
1225
1226         txgbe_vlan_offload_config(dev, mask);
1227
1228         return 0;
1229 }
1230
1231 static void
1232 txgbe_vmdq_vlan_hw_filter_enable(struct rte_eth_dev *dev)
1233 {
1234         struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
1235         /* VLNCTL: enable vlan filtering and allow all vlan tags through */
1236         uint32_t vlanctrl = rd32(hw, TXGBE_VLANCTL);
1237
1238         vlanctrl |= TXGBE_VLANCTL_VFE; /* enable vlan filters */
1239         wr32(hw, TXGBE_VLANCTL, vlanctrl);
1240 }
1241
1242 static int
1243 txgbe_check_vf_rss_rxq_num(struct rte_eth_dev *dev, uint16_t nb_rx_q)
1244 {
1245         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1246
1247         switch (nb_rx_q) {
1248         case 1:
1249         case 2:
1250                 RTE_ETH_DEV_SRIOV(dev).active = ETH_64_POOLS;
1251                 break;
1252         case 4:
1253                 RTE_ETH_DEV_SRIOV(dev).active = ETH_32_POOLS;
1254                 break;
1255         default:
1256                 return -EINVAL;
1257         }
1258
1259         RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool =
1260                 TXGBE_MAX_RX_QUEUE_NUM / RTE_ETH_DEV_SRIOV(dev).active;
1261         RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx =
1262                 pci_dev->max_vfs * RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool;
1263         return 0;
1264 }
1265
1266 static int
1267 txgbe_check_mq_mode(struct rte_eth_dev *dev)
1268 {
1269         struct rte_eth_conf *dev_conf = &dev->data->dev_conf;
1270         uint16_t nb_rx_q = dev->data->nb_rx_queues;
1271         uint16_t nb_tx_q = dev->data->nb_tx_queues;
1272
1273         if (RTE_ETH_DEV_SRIOV(dev).active != 0) {
1274                 /* check multi-queue mode */
1275                 switch (dev_conf->rxmode.mq_mode) {
1276                 case ETH_MQ_RX_VMDQ_DCB:
1277                         PMD_INIT_LOG(INFO, "ETH_MQ_RX_VMDQ_DCB mode supported in SRIOV");
1278                         break;
1279                 case ETH_MQ_RX_VMDQ_DCB_RSS:
1280                         /* DCB/RSS VMDQ in SRIOV mode, not implement yet */
1281                         PMD_INIT_LOG(ERR, "SRIOV active,"
1282                                         " unsupported mq_mode rx %d.",
1283                                         dev_conf->rxmode.mq_mode);
1284                         return -EINVAL;
1285                 case ETH_MQ_RX_RSS:
1286                 case ETH_MQ_RX_VMDQ_RSS:
1287                         dev->data->dev_conf.rxmode.mq_mode = ETH_MQ_RX_VMDQ_RSS;
1288                         if (nb_rx_q <= RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool)
1289                                 if (txgbe_check_vf_rss_rxq_num(dev, nb_rx_q)) {
1290                                         PMD_INIT_LOG(ERR, "SRIOV is active,"
1291                                                 " invalid queue number"
1292                                                 " for VMDQ RSS, allowed"
1293                                                 " value are 1, 2 or 4.");
1294                                         return -EINVAL;
1295                                 }
1296                         break;
1297                 case ETH_MQ_RX_VMDQ_ONLY:
1298                 case ETH_MQ_RX_NONE:
1299                         /* if nothing mq mode configure, use default scheme */
1300                         dev->data->dev_conf.rxmode.mq_mode =
1301                                 ETH_MQ_RX_VMDQ_ONLY;
1302                         break;
1303                 default: /* ETH_MQ_RX_DCB, ETH_MQ_RX_DCB_RSS or ETH_MQ_TX_DCB*/
1304                         /* SRIOV only works in VMDq enable mode */
1305                         PMD_INIT_LOG(ERR, "SRIOV is active,"
1306                                         " wrong mq_mode rx %d.",
1307                                         dev_conf->rxmode.mq_mode);
1308                         return -EINVAL;
1309                 }
1310
1311                 switch (dev_conf->txmode.mq_mode) {
1312                 case ETH_MQ_TX_VMDQ_DCB:
1313                         PMD_INIT_LOG(INFO, "ETH_MQ_TX_VMDQ_DCB mode supported in SRIOV");
1314                         dev->data->dev_conf.txmode.mq_mode = ETH_MQ_TX_VMDQ_DCB;
1315                         break;
1316                 default: /* ETH_MQ_TX_VMDQ_ONLY or ETH_MQ_TX_NONE */
1317                         dev->data->dev_conf.txmode.mq_mode =
1318                                 ETH_MQ_TX_VMDQ_ONLY;
1319                         break;
1320                 }
1321
1322                 /* check valid queue number */
1323                 if ((nb_rx_q > RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool) ||
1324                     (nb_tx_q > RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool)) {
1325                         PMD_INIT_LOG(ERR, "SRIOV is active,"
1326                                         " nb_rx_q=%d nb_tx_q=%d queue number"
1327                                         " must be less than or equal to %d.",
1328                                         nb_rx_q, nb_tx_q,
1329                                         RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool);
1330                         return -EINVAL;
1331                 }
1332         } else {
1333                 if (dev_conf->rxmode.mq_mode == ETH_MQ_RX_VMDQ_DCB_RSS) {
1334                         PMD_INIT_LOG(ERR, "VMDQ+DCB+RSS mq_mode is"
1335                                           " not supported.");
1336                         return -EINVAL;
1337                 }
1338                 /* check configuration for vmdb+dcb mode */
1339                 if (dev_conf->rxmode.mq_mode == ETH_MQ_RX_VMDQ_DCB) {
1340                         const struct rte_eth_vmdq_dcb_conf *conf;
1341
1342                         if (nb_rx_q != TXGBE_VMDQ_DCB_NB_QUEUES) {
1343                                 PMD_INIT_LOG(ERR, "VMDQ+DCB, nb_rx_q != %d.",
1344                                                 TXGBE_VMDQ_DCB_NB_QUEUES);
1345                                 return -EINVAL;
1346                         }
1347                         conf = &dev_conf->rx_adv_conf.vmdq_dcb_conf;
1348                         if (!(conf->nb_queue_pools == ETH_16_POOLS ||
1349                                conf->nb_queue_pools == ETH_32_POOLS)) {
1350                                 PMD_INIT_LOG(ERR, "VMDQ+DCB selected,"
1351                                                 " nb_queue_pools must be %d or %d.",
1352                                                 ETH_16_POOLS, ETH_32_POOLS);
1353                                 return -EINVAL;
1354                         }
1355                 }
1356                 if (dev_conf->txmode.mq_mode == ETH_MQ_TX_VMDQ_DCB) {
1357                         const struct rte_eth_vmdq_dcb_tx_conf *conf;
1358
1359                         if (nb_tx_q != TXGBE_VMDQ_DCB_NB_QUEUES) {
1360                                 PMD_INIT_LOG(ERR, "VMDQ+DCB, nb_tx_q != %d",
1361                                                  TXGBE_VMDQ_DCB_NB_QUEUES);
1362                                 return -EINVAL;
1363                         }
1364                         conf = &dev_conf->tx_adv_conf.vmdq_dcb_tx_conf;
1365                         if (!(conf->nb_queue_pools == ETH_16_POOLS ||
1366                                conf->nb_queue_pools == ETH_32_POOLS)) {
1367                                 PMD_INIT_LOG(ERR, "VMDQ+DCB selected,"
1368                                                 " nb_queue_pools != %d and"
1369                                                 " nb_queue_pools != %d.",
1370                                                 ETH_16_POOLS, ETH_32_POOLS);
1371                                 return -EINVAL;
1372                         }
1373                 }
1374
1375                 /* For DCB mode check our configuration before we go further */
1376                 if (dev_conf->rxmode.mq_mode == ETH_MQ_RX_DCB) {
1377                         const struct rte_eth_dcb_rx_conf *conf;
1378
1379                         conf = &dev_conf->rx_adv_conf.dcb_rx_conf;
1380                         if (!(conf->nb_tcs == ETH_4_TCS ||
1381                                conf->nb_tcs == ETH_8_TCS)) {
1382                                 PMD_INIT_LOG(ERR, "DCB selected, nb_tcs != %d"
1383                                                 " and nb_tcs != %d.",
1384                                                 ETH_4_TCS, ETH_8_TCS);
1385                                 return -EINVAL;
1386                         }
1387                 }
1388
1389                 if (dev_conf->txmode.mq_mode == ETH_MQ_TX_DCB) {
1390                         const struct rte_eth_dcb_tx_conf *conf;
1391
1392                         conf = &dev_conf->tx_adv_conf.dcb_tx_conf;
1393                         if (!(conf->nb_tcs == ETH_4_TCS ||
1394                                conf->nb_tcs == ETH_8_TCS)) {
1395                                 PMD_INIT_LOG(ERR, "DCB selected, nb_tcs != %d"
1396                                                 " and nb_tcs != %d.",
1397                                                 ETH_4_TCS, ETH_8_TCS);
1398                                 return -EINVAL;
1399                         }
1400                 }
1401         }
1402         return 0;
1403 }
1404
1405 static int
1406 txgbe_dev_configure(struct rte_eth_dev *dev)
1407 {
1408         struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev);
1409         struct txgbe_adapter *adapter = TXGBE_DEV_ADAPTER(dev);
1410         int ret;
1411
1412         PMD_INIT_FUNC_TRACE();
1413
1414         if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG)
1415                 dev->data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_RSS_HASH;
1416
1417         /* multiple queue mode checking */
1418         ret  = txgbe_check_mq_mode(dev);
1419         if (ret != 0) {
1420                 PMD_DRV_LOG(ERR, "txgbe_check_mq_mode fails with %d.",
1421                             ret);
1422                 return ret;
1423         }
1424
1425         /* set flag to update link status after init */
1426         intr->flags |= TXGBE_FLAG_NEED_LINK_UPDATE;
1427
1428         /*
1429          * Initialize to TRUE. If any of Rx queues doesn't meet the bulk
1430          * allocation Rx preconditions we will reset it.
1431          */
1432         adapter->rx_bulk_alloc_allowed = true;
1433
1434         return 0;
1435 }
1436
1437 static void
1438 txgbe_dev_phy_intr_setup(struct rte_eth_dev *dev)
1439 {
1440         struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
1441         struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev);
1442         uint32_t gpie;
1443
1444         gpie = rd32(hw, TXGBE_GPIOINTEN);
1445         gpie |= TXGBE_GPIOBIT_6;
1446         wr32(hw, TXGBE_GPIOINTEN, gpie);
1447         intr->mask_misc |= TXGBE_ICRMISC_GPIO;
1448 }
1449
1450 int
1451 txgbe_set_vf_rate_limit(struct rte_eth_dev *dev, uint16_t vf,
1452                         uint16_t tx_rate, uint64_t q_msk)
1453 {
1454         struct txgbe_hw *hw;
1455         struct txgbe_vf_info *vfinfo;
1456         struct rte_eth_link link;
1457         uint8_t  nb_q_per_pool;
1458         uint32_t queue_stride;
1459         uint32_t queue_idx, idx = 0, vf_idx;
1460         uint32_t queue_end;
1461         uint16_t total_rate = 0;
1462         struct rte_pci_device *pci_dev;
1463         int ret;
1464
1465         pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1466         ret = rte_eth_link_get_nowait(dev->data->port_id, &link);
1467         if (ret < 0)
1468                 return ret;
1469
1470         if (vf >= pci_dev->max_vfs)
1471                 return -EINVAL;
1472
1473         if (tx_rate > link.link_speed)
1474                 return -EINVAL;
1475
1476         if (q_msk == 0)
1477                 return 0;
1478
1479         hw = TXGBE_DEV_HW(dev);
1480         vfinfo = *(TXGBE_DEV_VFDATA(dev));
1481         nb_q_per_pool = RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool;
1482         queue_stride = TXGBE_MAX_RX_QUEUE_NUM / RTE_ETH_DEV_SRIOV(dev).active;
1483         queue_idx = vf * queue_stride;
1484         queue_end = queue_idx + nb_q_per_pool - 1;
1485         if (queue_end >= hw->mac.max_tx_queues)
1486                 return -EINVAL;
1487
1488         if (vfinfo) {
1489                 for (vf_idx = 0; vf_idx < pci_dev->max_vfs; vf_idx++) {
1490                         if (vf_idx == vf)
1491                                 continue;
1492                         for (idx = 0; idx < RTE_DIM(vfinfo[vf_idx].tx_rate);
1493                                 idx++)
1494                                 total_rate += vfinfo[vf_idx].tx_rate[idx];
1495                 }
1496         } else {
1497                 return -EINVAL;
1498         }
1499
1500         /* Store tx_rate for this vf. */
1501         for (idx = 0; idx < nb_q_per_pool; idx++) {
1502                 if (((uint64_t)0x1 << idx) & q_msk) {
1503                         if (vfinfo[vf].tx_rate[idx] != tx_rate)
1504                                 vfinfo[vf].tx_rate[idx] = tx_rate;
1505                         total_rate += tx_rate;
1506                 }
1507         }
1508
1509         if (total_rate > dev->data->dev_link.link_speed) {
1510                 /* Reset stored TX rate of the VF if it causes exceed
1511                  * link speed.
1512                  */
1513                 memset(vfinfo[vf].tx_rate, 0, sizeof(vfinfo[vf].tx_rate));
1514                 return -EINVAL;
1515         }
1516
1517         /* Set ARBTXRATE of each queue/pool for vf X  */
1518         for (; queue_idx <= queue_end; queue_idx++) {
1519                 if (0x1 & q_msk)
1520                         txgbe_set_queue_rate_limit(dev, queue_idx, tx_rate);
1521                 q_msk = q_msk >> 1;
1522         }
1523
1524         return 0;
1525 }
1526
1527 /*
1528  * Configure device link speed and setup link.
1529  * It returns 0 on success.
1530  */
1531 static int
1532 txgbe_dev_start(struct rte_eth_dev *dev)
1533 {
1534         struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
1535         struct txgbe_hw_stats *hw_stats = TXGBE_DEV_STATS(dev);
1536         struct txgbe_vf_info *vfinfo = *TXGBE_DEV_VFDATA(dev);
1537         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1538         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
1539         uint32_t intr_vector = 0;
1540         int err;
1541         bool link_up = false, negotiate = 0;
1542         uint32_t speed = 0;
1543         uint32_t allowed_speeds = 0;
1544         int mask = 0;
1545         int status;
1546         uint16_t vf, idx;
1547         uint32_t *link_speeds;
1548
1549         PMD_INIT_FUNC_TRACE();
1550
1551         /* TXGBE devices don't support:
1552          *    - half duplex (checked afterwards for valid speeds)
1553          *    - fixed speed: TODO implement
1554          */
1555         if (dev->data->dev_conf.link_speeds & ETH_LINK_SPEED_FIXED) {
1556                 PMD_INIT_LOG(ERR,
1557                 "Invalid link_speeds for port %u, fix speed not supported",
1558                                 dev->data->port_id);
1559                 return -EINVAL;
1560         }
1561
1562         /* Stop the link setup handler before resetting the HW. */
1563         rte_eal_alarm_cancel(txgbe_dev_setup_link_alarm_handler, dev);
1564
1565         /* disable uio/vfio intr/eventfd mapping */
1566         rte_intr_disable(intr_handle);
1567
1568         /* stop adapter */
1569         hw->adapter_stopped = 0;
1570         txgbe_stop_hw(hw);
1571
1572         /* reinitialize adapter
1573          * this calls reset and start
1574          */
1575         hw->nb_rx_queues = dev->data->nb_rx_queues;
1576         hw->nb_tx_queues = dev->data->nb_tx_queues;
1577         status = txgbe_pf_reset_hw(hw);
1578         if (status != 0)
1579                 return -1;
1580         hw->mac.start_hw(hw);
1581         hw->mac.get_link_status = true;
1582
1583         /* configure PF module if SRIOV enabled */
1584         txgbe_pf_host_configure(dev);
1585
1586         txgbe_dev_phy_intr_setup(dev);
1587
1588         /* check and configure queue intr-vector mapping */
1589         if ((rte_intr_cap_multiple(intr_handle) ||
1590              !RTE_ETH_DEV_SRIOV(dev).active) &&
1591             dev->data->dev_conf.intr_conf.rxq != 0) {
1592                 intr_vector = dev->data->nb_rx_queues;
1593                 if (rte_intr_efd_enable(intr_handle, intr_vector))
1594                         return -1;
1595         }
1596
1597         if (rte_intr_dp_is_en(intr_handle) && !intr_handle->intr_vec) {
1598                 intr_handle->intr_vec =
1599                         rte_zmalloc("intr_vec",
1600                                     dev->data->nb_rx_queues * sizeof(int), 0);
1601                 if (intr_handle->intr_vec == NULL) {
1602                         PMD_INIT_LOG(ERR, "Failed to allocate %d rx_queues"
1603                                      " intr_vec", dev->data->nb_rx_queues);
1604                         return -ENOMEM;
1605                 }
1606         }
1607
1608         /* confiugre msix for sleep until rx interrupt */
1609         txgbe_configure_msix(dev);
1610
1611         /* initialize transmission unit */
1612         txgbe_dev_tx_init(dev);
1613
1614         /* This can fail when allocating mbufs for descriptor rings */
1615         err = txgbe_dev_rx_init(dev);
1616         if (err) {
1617                 PMD_INIT_LOG(ERR, "Unable to initialize RX hardware");
1618                 goto error;
1619         }
1620
1621         mask = ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK |
1622                 ETH_VLAN_EXTEND_MASK;
1623         err = txgbe_vlan_offload_config(dev, mask);
1624         if (err) {
1625                 PMD_INIT_LOG(ERR, "Unable to set VLAN offload");
1626                 goto error;
1627         }
1628
1629         if (dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_VMDQ_ONLY) {
1630                 /* Enable vlan filtering for VMDq */
1631                 txgbe_vmdq_vlan_hw_filter_enable(dev);
1632         }
1633
1634         /* Configure DCB hw */
1635         txgbe_configure_pb(dev);
1636         txgbe_configure_port(dev);
1637         txgbe_configure_dcb(dev);
1638
1639         if (dev->data->dev_conf.fdir_conf.mode != RTE_FDIR_MODE_NONE) {
1640                 err = txgbe_fdir_configure(dev);
1641                 if (err)
1642                         goto error;
1643         }
1644
1645         /* Restore vf rate limit */
1646         if (vfinfo != NULL) {
1647                 for (vf = 0; vf < pci_dev->max_vfs; vf++)
1648                         for (idx = 0; idx < TXGBE_MAX_QUEUE_NUM_PER_VF; idx++)
1649                                 if (vfinfo[vf].tx_rate[idx] != 0)
1650                                         txgbe_set_vf_rate_limit(dev, vf,
1651                                                 vfinfo[vf].tx_rate[idx],
1652                                                 1 << idx);
1653         }
1654
1655         err = txgbe_dev_rxtx_start(dev);
1656         if (err < 0) {
1657                 PMD_INIT_LOG(ERR, "Unable to start rxtx queues");
1658                 goto error;
1659         }
1660
1661         /* Skip link setup if loopback mode is enabled. */
1662         if (hw->mac.type == txgbe_mac_raptor &&
1663             dev->data->dev_conf.lpbk_mode)
1664                 goto skip_link_setup;
1665
1666         if (txgbe_is_sfp(hw) && hw->phy.multispeed_fiber) {
1667                 err = hw->mac.setup_sfp(hw);
1668                 if (err)
1669                         goto error;
1670         }
1671
1672         if (hw->phy.media_type == txgbe_media_type_copper) {
1673                 /* Turn on the copper */
1674                 hw->phy.set_phy_power(hw, true);
1675         } else {
1676                 /* Turn on the laser */
1677                 hw->mac.enable_tx_laser(hw);
1678         }
1679
1680         err = hw->mac.check_link(hw, &speed, &link_up, 0);
1681         if (err)
1682                 goto error;
1683         dev->data->dev_link.link_status = link_up;
1684
1685         err = hw->mac.get_link_capabilities(hw, &speed, &negotiate);
1686         if (err)
1687                 goto error;
1688
1689         allowed_speeds = ETH_LINK_SPEED_100M | ETH_LINK_SPEED_1G |
1690                         ETH_LINK_SPEED_10G;
1691
1692         link_speeds = &dev->data->dev_conf.link_speeds;
1693         if (*link_speeds & ~allowed_speeds) {
1694                 PMD_INIT_LOG(ERR, "Invalid link setting");
1695                 goto error;
1696         }
1697
1698         speed = 0x0;
1699         if (*link_speeds == ETH_LINK_SPEED_AUTONEG) {
1700                 speed = (TXGBE_LINK_SPEED_100M_FULL |
1701                          TXGBE_LINK_SPEED_1GB_FULL |
1702                          TXGBE_LINK_SPEED_10GB_FULL);
1703         } else {
1704                 if (*link_speeds & ETH_LINK_SPEED_10G)
1705                         speed |= TXGBE_LINK_SPEED_10GB_FULL;
1706                 if (*link_speeds & ETH_LINK_SPEED_5G)
1707                         speed |= TXGBE_LINK_SPEED_5GB_FULL;
1708                 if (*link_speeds & ETH_LINK_SPEED_2_5G)
1709                         speed |= TXGBE_LINK_SPEED_2_5GB_FULL;
1710                 if (*link_speeds & ETH_LINK_SPEED_1G)
1711                         speed |= TXGBE_LINK_SPEED_1GB_FULL;
1712                 if (*link_speeds & ETH_LINK_SPEED_100M)
1713                         speed |= TXGBE_LINK_SPEED_100M_FULL;
1714         }
1715
1716         err = hw->mac.setup_link(hw, speed, link_up);
1717         if (err)
1718                 goto error;
1719
1720 skip_link_setup:
1721
1722         if (rte_intr_allow_others(intr_handle)) {
1723                 /* check if lsc interrupt is enabled */
1724                 if (dev->data->dev_conf.intr_conf.lsc != 0)
1725                         txgbe_dev_lsc_interrupt_setup(dev, TRUE);
1726                 else
1727                         txgbe_dev_lsc_interrupt_setup(dev, FALSE);
1728                 txgbe_dev_macsec_interrupt_setup(dev);
1729                 txgbe_set_ivar_map(hw, -1, 1, TXGBE_MISC_VEC_ID);
1730         } else {
1731                 rte_intr_callback_unregister(intr_handle,
1732                                              txgbe_dev_interrupt_handler, dev);
1733                 if (dev->data->dev_conf.intr_conf.lsc != 0)
1734                         PMD_INIT_LOG(INFO, "lsc won't enable because of"
1735                                      " no intr multiplex");
1736         }
1737
1738         /* check if rxq interrupt is enabled */
1739         if (dev->data->dev_conf.intr_conf.rxq != 0 &&
1740             rte_intr_dp_is_en(intr_handle))
1741                 txgbe_dev_rxq_interrupt_setup(dev);
1742
1743         /* enable uio/vfio intr/eventfd mapping */
1744         rte_intr_enable(intr_handle);
1745
1746         /* resume enabled intr since hw reset */
1747         txgbe_enable_intr(dev);
1748         txgbe_l2_tunnel_conf(dev);
1749         txgbe_filter_restore(dev);
1750
1751         /*
1752          * Update link status right before return, because it may
1753          * start link configuration process in a separate thread.
1754          */
1755         txgbe_dev_link_update(dev, 0);
1756
1757         wr32m(hw, TXGBE_LEDCTL, 0xFFFFFFFF, TXGBE_LEDCTL_ORD_MASK);
1758
1759         txgbe_read_stats_registers(hw, hw_stats);
1760         hw->offset_loaded = 1;
1761
1762         return 0;
1763
1764 error:
1765         PMD_INIT_LOG(ERR, "failure in dev start: %d", err);
1766         txgbe_dev_clear_queues(dev);
1767         return -EIO;
1768 }
1769
1770 /*
1771  * Stop device: disable rx and tx functions to allow for reconfiguring.
1772  */
1773 static int
1774 txgbe_dev_stop(struct rte_eth_dev *dev)
1775 {
1776         struct rte_eth_link link;
1777         struct txgbe_adapter *adapter = TXGBE_DEV_ADAPTER(dev);
1778         struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
1779         struct txgbe_vf_info *vfinfo = *TXGBE_DEV_VFDATA(dev);
1780         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1781         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
1782         int vf;
1783
1784         if (hw->adapter_stopped)
1785                 return 0;
1786
1787         PMD_INIT_FUNC_TRACE();
1788
1789         rte_eal_alarm_cancel(txgbe_dev_setup_link_alarm_handler, dev);
1790
1791         /* disable interrupts */
1792         txgbe_disable_intr(hw);
1793
1794         /* reset the NIC */
1795         txgbe_pf_reset_hw(hw);
1796         hw->adapter_stopped = 0;
1797
1798         /* stop adapter */
1799         txgbe_stop_hw(hw);
1800
1801         for (vf = 0; vfinfo != NULL && vf < pci_dev->max_vfs; vf++)
1802                 vfinfo[vf].clear_to_send = false;
1803
1804         if (hw->phy.media_type == txgbe_media_type_copper) {
1805                 /* Turn off the copper */
1806                 hw->phy.set_phy_power(hw, false);
1807         } else {
1808                 /* Turn off the laser */
1809                 hw->mac.disable_tx_laser(hw);
1810         }
1811
1812         txgbe_dev_clear_queues(dev);
1813
1814         /* Clear stored conf */
1815         dev->data->scattered_rx = 0;
1816         dev->data->lro = 0;
1817
1818         /* Clear recorded link status */
1819         memset(&link, 0, sizeof(link));
1820         rte_eth_linkstatus_set(dev, &link);
1821
1822         if (!rte_intr_allow_others(intr_handle))
1823                 /* resume to the default handler */
1824                 rte_intr_callback_register(intr_handle,
1825                                            txgbe_dev_interrupt_handler,
1826                                            (void *)dev);
1827
1828         /* Clean datapath event and queue/vec mapping */
1829         rte_intr_efd_disable(intr_handle);
1830         if (intr_handle->intr_vec != NULL) {
1831                 rte_free(intr_handle->intr_vec);
1832                 intr_handle->intr_vec = NULL;
1833         }
1834
1835         adapter->rss_reta_updated = 0;
1836         wr32m(hw, TXGBE_LEDCTL, 0xFFFFFFFF, TXGBE_LEDCTL_SEL_MASK);
1837
1838         hw->adapter_stopped = true;
1839         dev->data->dev_started = 0;
1840
1841         return 0;
1842 }
1843
1844 /*
1845  * Set device link up: enable tx.
1846  */
1847 static int
1848 txgbe_dev_set_link_up(struct rte_eth_dev *dev)
1849 {
1850         struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
1851
1852         if (hw->phy.media_type == txgbe_media_type_copper) {
1853                 /* Turn on the copper */
1854                 hw->phy.set_phy_power(hw, true);
1855         } else {
1856                 /* Turn on the laser */
1857                 hw->mac.enable_tx_laser(hw);
1858                 txgbe_dev_link_update(dev, 0);
1859         }
1860
1861         return 0;
1862 }
1863
1864 /*
1865  * Set device link down: disable tx.
1866  */
1867 static int
1868 txgbe_dev_set_link_down(struct rte_eth_dev *dev)
1869 {
1870         struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
1871
1872         if (hw->phy.media_type == txgbe_media_type_copper) {
1873                 /* Turn off the copper */
1874                 hw->phy.set_phy_power(hw, false);
1875         } else {
1876                 /* Turn off the laser */
1877                 hw->mac.disable_tx_laser(hw);
1878                 txgbe_dev_link_update(dev, 0);
1879         }
1880
1881         return 0;
1882 }
1883
1884 /*
1885  * Reset and stop device.
1886  */
1887 static int
1888 txgbe_dev_close(struct rte_eth_dev *dev)
1889 {
1890         struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
1891         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1892         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
1893         int retries = 0;
1894         int ret;
1895
1896         PMD_INIT_FUNC_TRACE();
1897
1898         txgbe_pf_reset_hw(hw);
1899
1900         ret = txgbe_dev_stop(dev);
1901
1902         txgbe_dev_free_queues(dev);
1903
1904         /* reprogram the RAR[0] in case user changed it. */
1905         txgbe_set_rar(hw, 0, hw->mac.addr, 0, true);
1906
1907         /* Unlock any pending hardware semaphore */
1908         txgbe_swfw_lock_reset(hw);
1909
1910         /* disable uio intr before callback unregister */
1911         rte_intr_disable(intr_handle);
1912
1913         do {
1914                 ret = rte_intr_callback_unregister(intr_handle,
1915                                 txgbe_dev_interrupt_handler, dev);
1916                 if (ret >= 0 || ret == -ENOENT) {
1917                         break;
1918                 } else if (ret != -EAGAIN) {
1919                         PMD_INIT_LOG(ERR,
1920                                 "intr callback unregister failed: %d",
1921                                 ret);
1922                 }
1923                 rte_delay_ms(100);
1924         } while (retries++ < (10 + TXGBE_LINK_UP_TIME));
1925
1926         /* cancel the delay handler before remove dev */
1927         rte_eal_alarm_cancel(txgbe_dev_interrupt_delayed_handler, dev);
1928
1929         /* uninitialize PF if max_vfs not zero */
1930         txgbe_pf_host_uninit(dev);
1931
1932         rte_free(dev->data->mac_addrs);
1933         dev->data->mac_addrs = NULL;
1934
1935         rte_free(dev->data->hash_mac_addrs);
1936         dev->data->hash_mac_addrs = NULL;
1937
1938         /* remove all the fdir filters & hash */
1939         txgbe_fdir_filter_uninit(dev);
1940
1941         /* remove all the L2 tunnel filters & hash */
1942         txgbe_l2_tn_filter_uninit(dev);
1943
1944         /* Remove all ntuple filters of the device */
1945         txgbe_ntuple_filter_uninit(dev);
1946
1947         return ret;
1948 }
1949
1950 /*
1951  * Reset PF device.
1952  */
1953 static int
1954 txgbe_dev_reset(struct rte_eth_dev *dev)
1955 {
1956         int ret;
1957
1958         /* When a DPDK PMD PF begin to reset PF port, it should notify all
1959          * its VF to make them align with it. The detailed notification
1960          * mechanism is PMD specific. As to txgbe PF, it is rather complex.
1961          * To avoid unexpected behavior in VF, currently reset of PF with
1962          * SR-IOV activation is not supported. It might be supported later.
1963          */
1964         if (dev->data->sriov.active)
1965                 return -ENOTSUP;
1966
1967         ret = eth_txgbe_dev_uninit(dev);
1968         if (ret)
1969                 return ret;
1970
1971         ret = eth_txgbe_dev_init(dev, NULL);
1972
1973         return ret;
1974 }
1975
1976 #define UPDATE_QP_COUNTER_32bit(reg, last_counter, counter)     \
1977         {                                                       \
1978                 uint32_t current_counter = rd32(hw, reg);       \
1979                 if (current_counter < last_counter)             \
1980                         current_counter += 0x100000000LL;       \
1981                 if (!hw->offset_loaded)                         \
1982                         last_counter = current_counter;         \
1983                 counter = current_counter - last_counter;       \
1984                 counter &= 0xFFFFFFFFLL;                        \
1985         }
1986
1987 #define UPDATE_QP_COUNTER_36bit(reg_lsb, reg_msb, last_counter, counter) \
1988         {                                                                \
1989                 uint64_t current_counter_lsb = rd32(hw, reg_lsb);        \
1990                 uint64_t current_counter_msb = rd32(hw, reg_msb);        \
1991                 uint64_t current_counter = (current_counter_msb << 32) | \
1992                         current_counter_lsb;                             \
1993                 if (current_counter < last_counter)                      \
1994                         current_counter += 0x1000000000LL;               \
1995                 if (!hw->offset_loaded)                                  \
1996                         last_counter = current_counter;                  \
1997                 counter = current_counter - last_counter;                \
1998                 counter &= 0xFFFFFFFFFLL;                                \
1999         }
2000
2001 void
2002 txgbe_read_stats_registers(struct txgbe_hw *hw,
2003                            struct txgbe_hw_stats *hw_stats)
2004 {
2005         unsigned int i;
2006
2007         /* QP Stats */
2008         for (i = 0; i < hw->nb_rx_queues; i++) {
2009                 UPDATE_QP_COUNTER_32bit(TXGBE_QPRXPKT(i),
2010                         hw->qp_last[i].rx_qp_packets,
2011                         hw_stats->qp[i].rx_qp_packets);
2012                 UPDATE_QP_COUNTER_36bit(TXGBE_QPRXOCTL(i), TXGBE_QPRXOCTH(i),
2013                         hw->qp_last[i].rx_qp_bytes,
2014                         hw_stats->qp[i].rx_qp_bytes);
2015                 UPDATE_QP_COUNTER_32bit(TXGBE_QPRXMPKT(i),
2016                         hw->qp_last[i].rx_qp_mc_packets,
2017                         hw_stats->qp[i].rx_qp_mc_packets);
2018         }
2019
2020         for (i = 0; i < hw->nb_tx_queues; i++) {
2021                 UPDATE_QP_COUNTER_32bit(TXGBE_QPTXPKT(i),
2022                         hw->qp_last[i].tx_qp_packets,
2023                         hw_stats->qp[i].tx_qp_packets);
2024                 UPDATE_QP_COUNTER_36bit(TXGBE_QPTXOCTL(i), TXGBE_QPTXOCTH(i),
2025                         hw->qp_last[i].tx_qp_bytes,
2026                         hw_stats->qp[i].tx_qp_bytes);
2027         }
2028         /* PB Stats */
2029         for (i = 0; i < TXGBE_MAX_UP; i++) {
2030                 hw_stats->up[i].rx_up_xon_packets +=
2031                                 rd32(hw, TXGBE_PBRXUPXON(i));
2032                 hw_stats->up[i].rx_up_xoff_packets +=
2033                                 rd32(hw, TXGBE_PBRXUPXOFF(i));
2034                 hw_stats->up[i].tx_up_xon_packets +=
2035                                 rd32(hw, TXGBE_PBTXUPXON(i));
2036                 hw_stats->up[i].tx_up_xoff_packets +=
2037                                 rd32(hw, TXGBE_PBTXUPXOFF(i));
2038                 hw_stats->up[i].tx_up_xon2off_packets +=
2039                                 rd32(hw, TXGBE_PBTXUPOFF(i));
2040                 hw_stats->up[i].rx_up_dropped +=
2041                                 rd32(hw, TXGBE_PBRXMISS(i));
2042         }
2043         hw_stats->rx_xon_packets += rd32(hw, TXGBE_PBRXLNKXON);
2044         hw_stats->rx_xoff_packets += rd32(hw, TXGBE_PBRXLNKXOFF);
2045         hw_stats->tx_xon_packets += rd32(hw, TXGBE_PBTXLNKXON);
2046         hw_stats->tx_xoff_packets += rd32(hw, TXGBE_PBTXLNKXOFF);
2047
2048         /* DMA Stats */
2049         hw_stats->rx_packets += rd32(hw, TXGBE_DMARXPKT);
2050         hw_stats->tx_packets += rd32(hw, TXGBE_DMATXPKT);
2051
2052         hw_stats->rx_bytes += rd64(hw, TXGBE_DMARXOCTL);
2053         hw_stats->tx_bytes += rd64(hw, TXGBE_DMATXOCTL);
2054         hw_stats->rx_drop_packets += rd32(hw, TXGBE_PBRXDROP);
2055
2056         /* MAC Stats */
2057         hw_stats->rx_crc_errors += rd64(hw, TXGBE_MACRXERRCRCL);
2058         hw_stats->rx_multicast_packets += rd64(hw, TXGBE_MACRXMPKTL);
2059         hw_stats->tx_multicast_packets += rd64(hw, TXGBE_MACTXMPKTL);
2060
2061         hw_stats->rx_total_packets += rd64(hw, TXGBE_MACRXPKTL);
2062         hw_stats->tx_total_packets += rd64(hw, TXGBE_MACTXPKTL);
2063         hw_stats->rx_total_bytes += rd64(hw, TXGBE_MACRXGBOCTL);
2064
2065         hw_stats->rx_broadcast_packets += rd64(hw, TXGBE_MACRXOCTL);
2066         hw_stats->tx_broadcast_packets += rd32(hw, TXGBE_MACTXOCTL);
2067
2068         hw_stats->rx_size_64_packets += rd64(hw, TXGBE_MACRX1TO64L);
2069         hw_stats->rx_size_65_to_127_packets += rd64(hw, TXGBE_MACRX65TO127L);
2070         hw_stats->rx_size_128_to_255_packets += rd64(hw, TXGBE_MACRX128TO255L);
2071         hw_stats->rx_size_256_to_511_packets += rd64(hw, TXGBE_MACRX256TO511L);
2072         hw_stats->rx_size_512_to_1023_packets +=
2073                         rd64(hw, TXGBE_MACRX512TO1023L);
2074         hw_stats->rx_size_1024_to_max_packets +=
2075                         rd64(hw, TXGBE_MACRX1024TOMAXL);
2076         hw_stats->tx_size_64_packets += rd64(hw, TXGBE_MACTX1TO64L);
2077         hw_stats->tx_size_65_to_127_packets += rd64(hw, TXGBE_MACTX65TO127L);
2078         hw_stats->tx_size_128_to_255_packets += rd64(hw, TXGBE_MACTX128TO255L);
2079         hw_stats->tx_size_256_to_511_packets += rd64(hw, TXGBE_MACTX256TO511L);
2080         hw_stats->tx_size_512_to_1023_packets +=
2081                         rd64(hw, TXGBE_MACTX512TO1023L);
2082         hw_stats->tx_size_1024_to_max_packets +=
2083                         rd64(hw, TXGBE_MACTX1024TOMAXL);
2084
2085         hw_stats->rx_undersize_errors += rd64(hw, TXGBE_MACRXERRLENL);
2086         hw_stats->rx_oversize_errors += rd32(hw, TXGBE_MACRXOVERSIZE);
2087         hw_stats->rx_jabber_errors += rd32(hw, TXGBE_MACRXJABBER);
2088
2089         /* MNG Stats */
2090         hw_stats->mng_bmc2host_packets = rd32(hw, TXGBE_MNGBMC2OS);
2091         hw_stats->mng_host2bmc_packets = rd32(hw, TXGBE_MNGOS2BMC);
2092         hw_stats->rx_management_packets = rd32(hw, TXGBE_DMARXMNG);
2093         hw_stats->tx_management_packets = rd32(hw, TXGBE_DMATXMNG);
2094
2095         /* FCoE Stats */
2096         hw_stats->rx_fcoe_crc_errors += rd32(hw, TXGBE_FCOECRC);
2097         hw_stats->rx_fcoe_mbuf_allocation_errors += rd32(hw, TXGBE_FCOELAST);
2098         hw_stats->rx_fcoe_dropped += rd32(hw, TXGBE_FCOERPDC);
2099         hw_stats->rx_fcoe_packets += rd32(hw, TXGBE_FCOEPRC);
2100         hw_stats->tx_fcoe_packets += rd32(hw, TXGBE_FCOEPTC);
2101         hw_stats->rx_fcoe_bytes += rd32(hw, TXGBE_FCOEDWRC);
2102         hw_stats->tx_fcoe_bytes += rd32(hw, TXGBE_FCOEDWTC);
2103
2104         /* Flow Director Stats */
2105         hw_stats->flow_director_matched_filters += rd32(hw, TXGBE_FDIRMATCH);
2106         hw_stats->flow_director_missed_filters += rd32(hw, TXGBE_FDIRMISS);
2107         hw_stats->flow_director_added_filters +=
2108                 TXGBE_FDIRUSED_ADD(rd32(hw, TXGBE_FDIRUSED));
2109         hw_stats->flow_director_removed_filters +=
2110                 TXGBE_FDIRUSED_REM(rd32(hw, TXGBE_FDIRUSED));
2111         hw_stats->flow_director_filter_add_errors +=
2112                 TXGBE_FDIRFAIL_ADD(rd32(hw, TXGBE_FDIRFAIL));
2113         hw_stats->flow_director_filter_remove_errors +=
2114                 TXGBE_FDIRFAIL_REM(rd32(hw, TXGBE_FDIRFAIL));
2115
2116         /* MACsec Stats */
2117         hw_stats->tx_macsec_pkts_untagged += rd32(hw, TXGBE_LSECTX_UTPKT);
2118         hw_stats->tx_macsec_pkts_encrypted +=
2119                         rd32(hw, TXGBE_LSECTX_ENCPKT);
2120         hw_stats->tx_macsec_pkts_protected +=
2121                         rd32(hw, TXGBE_LSECTX_PROTPKT);
2122         hw_stats->tx_macsec_octets_encrypted +=
2123                         rd32(hw, TXGBE_LSECTX_ENCOCT);
2124         hw_stats->tx_macsec_octets_protected +=
2125                         rd32(hw, TXGBE_LSECTX_PROTOCT);
2126         hw_stats->rx_macsec_pkts_untagged += rd32(hw, TXGBE_LSECRX_UTPKT);
2127         hw_stats->rx_macsec_pkts_badtag += rd32(hw, TXGBE_LSECRX_BTPKT);
2128         hw_stats->rx_macsec_pkts_nosci += rd32(hw, TXGBE_LSECRX_NOSCIPKT);
2129         hw_stats->rx_macsec_pkts_unknownsci += rd32(hw, TXGBE_LSECRX_UNSCIPKT);
2130         hw_stats->rx_macsec_octets_decrypted += rd32(hw, TXGBE_LSECRX_DECOCT);
2131         hw_stats->rx_macsec_octets_validated += rd32(hw, TXGBE_LSECRX_VLDOCT);
2132         hw_stats->rx_macsec_sc_pkts_unchecked +=
2133                         rd32(hw, TXGBE_LSECRX_UNCHKPKT);
2134         hw_stats->rx_macsec_sc_pkts_delayed += rd32(hw, TXGBE_LSECRX_DLYPKT);
2135         hw_stats->rx_macsec_sc_pkts_late += rd32(hw, TXGBE_LSECRX_LATEPKT);
2136         for (i = 0; i < 2; i++) {
2137                 hw_stats->rx_macsec_sa_pkts_ok +=
2138                         rd32(hw, TXGBE_LSECRX_OKPKT(i));
2139                 hw_stats->rx_macsec_sa_pkts_invalid +=
2140                         rd32(hw, TXGBE_LSECRX_INVPKT(i));
2141                 hw_stats->rx_macsec_sa_pkts_notvalid +=
2142                         rd32(hw, TXGBE_LSECRX_BADPKT(i));
2143         }
2144         hw_stats->rx_macsec_sa_pkts_unusedsa +=
2145                         rd32(hw, TXGBE_LSECRX_INVSAPKT);
2146         hw_stats->rx_macsec_sa_pkts_notusingsa +=
2147                         rd32(hw, TXGBE_LSECRX_BADSAPKT);
2148
2149         hw_stats->rx_total_missed_packets = 0;
2150         for (i = 0; i < TXGBE_MAX_UP; i++) {
2151                 hw_stats->rx_total_missed_packets +=
2152                         hw_stats->up[i].rx_up_dropped;
2153         }
2154 }
2155
2156 static int
2157 txgbe_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
2158 {
2159         struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
2160         struct txgbe_hw_stats *hw_stats = TXGBE_DEV_STATS(dev);
2161         struct txgbe_stat_mappings *stat_mappings =
2162                         TXGBE_DEV_STAT_MAPPINGS(dev);
2163         uint32_t i, j;
2164
2165         txgbe_read_stats_registers(hw, hw_stats);
2166
2167         if (stats == NULL)
2168                 return -EINVAL;
2169
2170         /* Fill out the rte_eth_stats statistics structure */
2171         stats->ipackets = hw_stats->rx_packets;
2172         stats->ibytes = hw_stats->rx_bytes;
2173         stats->opackets = hw_stats->tx_packets;
2174         stats->obytes = hw_stats->tx_bytes;
2175
2176         memset(&stats->q_ipackets, 0, sizeof(stats->q_ipackets));
2177         memset(&stats->q_opackets, 0, sizeof(stats->q_opackets));
2178         memset(&stats->q_ibytes, 0, sizeof(stats->q_ibytes));
2179         memset(&stats->q_obytes, 0, sizeof(stats->q_obytes));
2180         memset(&stats->q_errors, 0, sizeof(stats->q_errors));
2181         for (i = 0; i < TXGBE_MAX_QP; i++) {
2182                 uint32_t n = i / NB_QMAP_FIELDS_PER_QSM_REG;
2183                 uint32_t offset = (i % NB_QMAP_FIELDS_PER_QSM_REG) * 8;
2184                 uint32_t q_map;
2185
2186                 q_map = (stat_mappings->rqsm[n] >> offset)
2187                                 & QMAP_FIELD_RESERVED_BITS_MASK;
2188                 j = (q_map < RTE_ETHDEV_QUEUE_STAT_CNTRS
2189                      ? q_map : q_map % RTE_ETHDEV_QUEUE_STAT_CNTRS);
2190                 stats->q_ipackets[j] += hw_stats->qp[i].rx_qp_packets;
2191                 stats->q_ibytes[j] += hw_stats->qp[i].rx_qp_bytes;
2192
2193                 q_map = (stat_mappings->tqsm[n] >> offset)
2194                                 & QMAP_FIELD_RESERVED_BITS_MASK;
2195                 j = (q_map < RTE_ETHDEV_QUEUE_STAT_CNTRS
2196                      ? q_map : q_map % RTE_ETHDEV_QUEUE_STAT_CNTRS);
2197                 stats->q_opackets[j] += hw_stats->qp[i].tx_qp_packets;
2198                 stats->q_obytes[j] += hw_stats->qp[i].tx_qp_bytes;
2199         }
2200
2201         /* Rx Errors */
2202         stats->imissed  = hw_stats->rx_total_missed_packets;
2203         stats->ierrors  = hw_stats->rx_crc_errors +
2204                           hw_stats->rx_mac_short_packet_dropped +
2205                           hw_stats->rx_length_errors +
2206                           hw_stats->rx_undersize_errors +
2207                           hw_stats->rx_oversize_errors +
2208                           hw_stats->rx_drop_packets +
2209                           hw_stats->rx_illegal_byte_errors +
2210                           hw_stats->rx_error_bytes +
2211                           hw_stats->rx_fragment_errors +
2212                           hw_stats->rx_fcoe_crc_errors +
2213                           hw_stats->rx_fcoe_mbuf_allocation_errors;
2214
2215         /* Tx Errors */
2216         stats->oerrors  = 0;
2217         return 0;
2218 }
2219
2220 static int
2221 txgbe_dev_stats_reset(struct rte_eth_dev *dev)
2222 {
2223         struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
2224         struct txgbe_hw_stats *hw_stats = TXGBE_DEV_STATS(dev);
2225
2226         /* HW registers are cleared on read */
2227         hw->offset_loaded = 0;
2228         txgbe_dev_stats_get(dev, NULL);
2229         hw->offset_loaded = 1;
2230
2231         /* Reset software totals */
2232         memset(hw_stats, 0, sizeof(*hw_stats));
2233
2234         return 0;
2235 }
2236
2237 /* This function calculates the number of xstats based on the current config */
2238 static unsigned
2239 txgbe_xstats_calc_num(struct rte_eth_dev *dev)
2240 {
2241         int nb_queues = max(dev->data->nb_rx_queues, dev->data->nb_tx_queues);
2242         return TXGBE_NB_HW_STATS +
2243                TXGBE_NB_UP_STATS * TXGBE_MAX_UP +
2244                TXGBE_NB_QP_STATS * nb_queues;
2245 }
2246
2247 static inline int
2248 txgbe_get_name_by_id(uint32_t id, char *name, uint32_t size)
2249 {
2250         int nb, st;
2251
2252         /* Extended stats from txgbe_hw_stats */
2253         if (id < TXGBE_NB_HW_STATS) {
2254                 snprintf(name, size, "[hw]%s",
2255                         rte_txgbe_stats_strings[id].name);
2256                 return 0;
2257         }
2258         id -= TXGBE_NB_HW_STATS;
2259
2260         /* Priority Stats */
2261         if (id < TXGBE_NB_UP_STATS * TXGBE_MAX_UP) {
2262                 nb = id / TXGBE_NB_UP_STATS;
2263                 st = id % TXGBE_NB_UP_STATS;
2264                 snprintf(name, size, "[p%u]%s", nb,
2265                         rte_txgbe_up_strings[st].name);
2266                 return 0;
2267         }
2268         id -= TXGBE_NB_UP_STATS * TXGBE_MAX_UP;
2269
2270         /* Queue Stats */
2271         if (id < TXGBE_NB_QP_STATS * TXGBE_MAX_QP) {
2272                 nb = id / TXGBE_NB_QP_STATS;
2273                 st = id % TXGBE_NB_QP_STATS;
2274                 snprintf(name, size, "[q%u]%s", nb,
2275                         rte_txgbe_qp_strings[st].name);
2276                 return 0;
2277         }
2278         id -= TXGBE_NB_QP_STATS * TXGBE_MAX_QP;
2279
2280         return -(int)(id + 1);
2281 }
2282
2283 static inline int
2284 txgbe_get_offset_by_id(uint32_t id, uint32_t *offset)
2285 {
2286         int nb, st;
2287
2288         /* Extended stats from txgbe_hw_stats */
2289         if (id < TXGBE_NB_HW_STATS) {
2290                 *offset = rte_txgbe_stats_strings[id].offset;
2291                 return 0;
2292         }
2293         id -= TXGBE_NB_HW_STATS;
2294
2295         /* Priority Stats */
2296         if (id < TXGBE_NB_UP_STATS * TXGBE_MAX_UP) {
2297                 nb = id / TXGBE_NB_UP_STATS;
2298                 st = id % TXGBE_NB_UP_STATS;
2299                 *offset = rte_txgbe_up_strings[st].offset +
2300                         nb * (TXGBE_NB_UP_STATS * sizeof(uint64_t));
2301                 return 0;
2302         }
2303         id -= TXGBE_NB_UP_STATS * TXGBE_MAX_UP;
2304
2305         /* Queue Stats */
2306         if (id < TXGBE_NB_QP_STATS * TXGBE_MAX_QP) {
2307                 nb = id / TXGBE_NB_QP_STATS;
2308                 st = id % TXGBE_NB_QP_STATS;
2309                 *offset = rte_txgbe_qp_strings[st].offset +
2310                         nb * (TXGBE_NB_QP_STATS * sizeof(uint64_t));
2311                 return 0;
2312         }
2313
2314         return -1;
2315 }
2316
2317 static int txgbe_dev_xstats_get_names(struct rte_eth_dev *dev,
2318         struct rte_eth_xstat_name *xstats_names, unsigned int limit)
2319 {
2320         unsigned int i, count;
2321
2322         count = txgbe_xstats_calc_num(dev);
2323         if (xstats_names == NULL)
2324                 return count;
2325
2326         /* Note: limit >= cnt_stats checked upstream
2327          * in rte_eth_xstats_names()
2328          */
2329         limit = min(limit, count);
2330
2331         /* Extended stats from txgbe_hw_stats */
2332         for (i = 0; i < limit; i++) {
2333                 if (txgbe_get_name_by_id(i, xstats_names[i].name,
2334                         sizeof(xstats_names[i].name))) {
2335                         PMD_INIT_LOG(WARNING, "id value %d isn't valid", i);
2336                         break;
2337                 }
2338         }
2339
2340         return i;
2341 }
2342
2343 static int txgbe_dev_xstats_get_names_by_id(struct rte_eth_dev *dev,
2344         struct rte_eth_xstat_name *xstats_names,
2345         const uint64_t *ids,
2346         unsigned int limit)
2347 {
2348         unsigned int i;
2349
2350         if (ids == NULL)
2351                 return txgbe_dev_xstats_get_names(dev, xstats_names, limit);
2352
2353         for (i = 0; i < limit; i++) {
2354                 if (txgbe_get_name_by_id(ids[i], xstats_names[i].name,
2355                                 sizeof(xstats_names[i].name))) {
2356                         PMD_INIT_LOG(WARNING, "id value %d isn't valid", i);
2357                         return -1;
2358                 }
2359         }
2360
2361         return i;
2362 }
2363
2364 static int
2365 txgbe_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
2366                                          unsigned int limit)
2367 {
2368         struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
2369         struct txgbe_hw_stats *hw_stats = TXGBE_DEV_STATS(dev);
2370         unsigned int i, count;
2371
2372         txgbe_read_stats_registers(hw, hw_stats);
2373
2374         /* If this is a reset xstats is NULL, and we have cleared the
2375          * registers by reading them.
2376          */
2377         count = txgbe_xstats_calc_num(dev);
2378         if (xstats == NULL)
2379                 return count;
2380
2381         limit = min(limit, txgbe_xstats_calc_num(dev));
2382
2383         /* Extended stats from txgbe_hw_stats */
2384         for (i = 0; i < limit; i++) {
2385                 uint32_t offset = 0;
2386
2387                 if (txgbe_get_offset_by_id(i, &offset)) {
2388                         PMD_INIT_LOG(WARNING, "id value %d isn't valid", i);
2389                         break;
2390                 }
2391                 xstats[i].value = *(uint64_t *)(((char *)hw_stats) + offset);
2392                 xstats[i].id = i;
2393         }
2394
2395         return i;
2396 }
2397
2398 static int
2399 txgbe_dev_xstats_get_(struct rte_eth_dev *dev, uint64_t *values,
2400                                          unsigned int limit)
2401 {
2402         struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
2403         struct txgbe_hw_stats *hw_stats = TXGBE_DEV_STATS(dev);
2404         unsigned int i, count;
2405
2406         txgbe_read_stats_registers(hw, hw_stats);
2407
2408         /* If this is a reset xstats is NULL, and we have cleared the
2409          * registers by reading them.
2410          */
2411         count = txgbe_xstats_calc_num(dev);
2412         if (values == NULL)
2413                 return count;
2414
2415         limit = min(limit, txgbe_xstats_calc_num(dev));
2416
2417         /* Extended stats from txgbe_hw_stats */
2418         for (i = 0; i < limit; i++) {
2419                 uint32_t offset;
2420
2421                 if (txgbe_get_offset_by_id(i, &offset)) {
2422                         PMD_INIT_LOG(WARNING, "id value %d isn't valid", i);
2423                         break;
2424                 }
2425                 values[i] = *(uint64_t *)(((char *)hw_stats) + offset);
2426         }
2427
2428         return i;
2429 }
2430
2431 static int
2432 txgbe_dev_xstats_get_by_id(struct rte_eth_dev *dev, const uint64_t *ids,
2433                 uint64_t *values, unsigned int limit)
2434 {
2435         struct txgbe_hw_stats *hw_stats = TXGBE_DEV_STATS(dev);
2436         unsigned int i;
2437
2438         if (ids == NULL)
2439                 return txgbe_dev_xstats_get_(dev, values, limit);
2440
2441         for (i = 0; i < limit; i++) {
2442                 uint32_t offset;
2443
2444                 if (txgbe_get_offset_by_id(ids[i], &offset)) {
2445                         PMD_INIT_LOG(WARNING, "id value %d isn't valid", i);
2446                         break;
2447                 }
2448                 values[i] = *(uint64_t *)(((char *)hw_stats) + offset);
2449         }
2450
2451         return i;
2452 }
2453
2454 static int
2455 txgbe_dev_xstats_reset(struct rte_eth_dev *dev)
2456 {
2457         struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
2458         struct txgbe_hw_stats *hw_stats = TXGBE_DEV_STATS(dev);
2459
2460         /* HW registers are cleared on read */
2461         hw->offset_loaded = 0;
2462         txgbe_read_stats_registers(hw, hw_stats);
2463         hw->offset_loaded = 1;
2464
2465         /* Reset software totals */
2466         memset(hw_stats, 0, sizeof(*hw_stats));
2467
2468         return 0;
2469 }
2470
2471 static int
2472 txgbe_fw_version_get(struct rte_eth_dev *dev, char *fw_version, size_t fw_size)
2473 {
2474         struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
2475         u16 eeprom_verh, eeprom_verl;
2476         u32 etrack_id;
2477         int ret;
2478
2479         hw->rom.readw_sw(hw, TXGBE_EEPROM_VERSION_H, &eeprom_verh);
2480         hw->rom.readw_sw(hw, TXGBE_EEPROM_VERSION_L, &eeprom_verl);
2481
2482         etrack_id = (eeprom_verh << 16) | eeprom_verl;
2483         ret = snprintf(fw_version, fw_size, "0x%08x", etrack_id);
2484
2485         ret += 1; /* add the size of '\0' */
2486         if (fw_size < (u32)ret)
2487                 return ret;
2488         else
2489                 return 0;
2490 }
2491
2492 static int
2493 txgbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
2494 {
2495         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
2496         struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
2497
2498         dev_info->max_rx_queues = (uint16_t)hw->mac.max_rx_queues;
2499         dev_info->max_tx_queues = (uint16_t)hw->mac.max_tx_queues;
2500         dev_info->min_rx_bufsize = 1024;
2501         dev_info->max_rx_pktlen = 15872;
2502         dev_info->max_mac_addrs = hw->mac.num_rar_entries;
2503         dev_info->max_hash_mac_addrs = TXGBE_VMDQ_NUM_UC_MAC;
2504         dev_info->max_vfs = pci_dev->max_vfs;
2505         dev_info->max_vmdq_pools = ETH_64_POOLS;
2506         dev_info->vmdq_queue_num = dev_info->max_rx_queues;
2507         dev_info->rx_queue_offload_capa = txgbe_get_rx_queue_offloads(dev);
2508         dev_info->rx_offload_capa = (txgbe_get_rx_port_offloads(dev) |
2509                                      dev_info->rx_queue_offload_capa);
2510         dev_info->tx_queue_offload_capa = txgbe_get_tx_queue_offloads(dev);
2511         dev_info->tx_offload_capa = txgbe_get_tx_port_offloads(dev);
2512
2513         dev_info->default_rxconf = (struct rte_eth_rxconf) {
2514                 .rx_thresh = {
2515                         .pthresh = TXGBE_DEFAULT_RX_PTHRESH,
2516                         .hthresh = TXGBE_DEFAULT_RX_HTHRESH,
2517                         .wthresh = TXGBE_DEFAULT_RX_WTHRESH,
2518                 },
2519                 .rx_free_thresh = TXGBE_DEFAULT_RX_FREE_THRESH,
2520                 .rx_drop_en = 0,
2521                 .offloads = 0,
2522         };
2523
2524         dev_info->default_txconf = (struct rte_eth_txconf) {
2525                 .tx_thresh = {
2526                         .pthresh = TXGBE_DEFAULT_TX_PTHRESH,
2527                         .hthresh = TXGBE_DEFAULT_TX_HTHRESH,
2528                         .wthresh = TXGBE_DEFAULT_TX_WTHRESH,
2529                 },
2530                 .tx_free_thresh = TXGBE_DEFAULT_TX_FREE_THRESH,
2531                 .offloads = 0,
2532         };
2533
2534         dev_info->rx_desc_lim = rx_desc_lim;
2535         dev_info->tx_desc_lim = tx_desc_lim;
2536
2537         dev_info->hash_key_size = TXGBE_HKEY_MAX_INDEX * sizeof(uint32_t);
2538         dev_info->reta_size = ETH_RSS_RETA_SIZE_128;
2539         dev_info->flow_type_rss_offloads = TXGBE_RSS_OFFLOAD_ALL;
2540
2541         dev_info->speed_capa = ETH_LINK_SPEED_1G | ETH_LINK_SPEED_10G;
2542         dev_info->speed_capa |= ETH_LINK_SPEED_100M;
2543
2544         /* Driver-preferred Rx/Tx parameters */
2545         dev_info->default_rxportconf.burst_size = 32;
2546         dev_info->default_txportconf.burst_size = 32;
2547         dev_info->default_rxportconf.nb_queues = 1;
2548         dev_info->default_txportconf.nb_queues = 1;
2549         dev_info->default_rxportconf.ring_size = 256;
2550         dev_info->default_txportconf.ring_size = 256;
2551
2552         return 0;
2553 }
2554
2555 const uint32_t *
2556 txgbe_dev_supported_ptypes_get(struct rte_eth_dev *dev)
2557 {
2558         if (dev->rx_pkt_burst == txgbe_recv_pkts ||
2559             dev->rx_pkt_burst == txgbe_recv_pkts_lro_single_alloc ||
2560             dev->rx_pkt_burst == txgbe_recv_pkts_lro_bulk_alloc ||
2561             dev->rx_pkt_burst == txgbe_recv_pkts_bulk_alloc)
2562                 return txgbe_get_supported_ptypes();
2563
2564         return NULL;
2565 }
2566
2567 void
2568 txgbe_dev_setup_link_alarm_handler(void *param)
2569 {
2570         struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
2571         struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
2572         struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev);
2573         u32 speed;
2574         bool autoneg = false;
2575
2576         speed = hw->phy.autoneg_advertised;
2577         if (!speed)
2578                 hw->mac.get_link_capabilities(hw, &speed, &autoneg);
2579
2580         hw->mac.setup_link(hw, speed, true);
2581
2582         intr->flags &= ~TXGBE_FLAG_NEED_LINK_CONFIG;
2583 }
2584
2585 /* return 0 means link status changed, -1 means not changed */
2586 int
2587 txgbe_dev_link_update_share(struct rte_eth_dev *dev,
2588                             int wait_to_complete)
2589 {
2590         struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
2591         struct rte_eth_link link;
2592         u32 link_speed = TXGBE_LINK_SPEED_UNKNOWN;
2593         struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev);
2594         bool link_up;
2595         int err;
2596         int wait = 1;
2597
2598         memset(&link, 0, sizeof(link));
2599         link.link_status = ETH_LINK_DOWN;
2600         link.link_speed = ETH_SPEED_NUM_NONE;
2601         link.link_duplex = ETH_LINK_HALF_DUPLEX;
2602         link.link_autoneg = ETH_LINK_AUTONEG;
2603
2604         hw->mac.get_link_status = true;
2605
2606         if (intr->flags & TXGBE_FLAG_NEED_LINK_CONFIG)
2607                 return rte_eth_linkstatus_set(dev, &link);
2608
2609         /* check if it needs to wait to complete, if lsc interrupt is enabled */
2610         if (wait_to_complete == 0 || dev->data->dev_conf.intr_conf.lsc != 0)
2611                 wait = 0;
2612
2613         err = hw->mac.check_link(hw, &link_speed, &link_up, wait);
2614
2615         if (err != 0) {
2616                 link.link_speed = ETH_SPEED_NUM_100M;
2617                 link.link_duplex = ETH_LINK_FULL_DUPLEX;
2618                 return rte_eth_linkstatus_set(dev, &link);
2619         }
2620
2621         if (link_up == 0) {
2622                 if (hw->phy.media_type == txgbe_media_type_fiber) {
2623                         intr->flags |= TXGBE_FLAG_NEED_LINK_CONFIG;
2624                         rte_eal_alarm_set(10,
2625                                 txgbe_dev_setup_link_alarm_handler, dev);
2626                 }
2627                 return rte_eth_linkstatus_set(dev, &link);
2628         }
2629
2630         intr->flags &= ~TXGBE_FLAG_NEED_LINK_CONFIG;
2631         link.link_status = ETH_LINK_UP;
2632         link.link_duplex = ETH_LINK_FULL_DUPLEX;
2633
2634         switch (link_speed) {
2635         default:
2636         case TXGBE_LINK_SPEED_UNKNOWN:
2637                 link.link_duplex = ETH_LINK_FULL_DUPLEX;
2638                 link.link_speed = ETH_SPEED_NUM_100M;
2639                 break;
2640
2641         case TXGBE_LINK_SPEED_100M_FULL:
2642                 link.link_speed = ETH_SPEED_NUM_100M;
2643                 break;
2644
2645         case TXGBE_LINK_SPEED_1GB_FULL:
2646                 link.link_speed = ETH_SPEED_NUM_1G;
2647                 break;
2648
2649         case TXGBE_LINK_SPEED_2_5GB_FULL:
2650                 link.link_speed = ETH_SPEED_NUM_2_5G;
2651                 break;
2652
2653         case TXGBE_LINK_SPEED_5GB_FULL:
2654                 link.link_speed = ETH_SPEED_NUM_5G;
2655                 break;
2656
2657         case TXGBE_LINK_SPEED_10GB_FULL:
2658                 link.link_speed = ETH_SPEED_NUM_10G;
2659                 break;
2660         }
2661
2662         return rte_eth_linkstatus_set(dev, &link);
2663 }
2664
2665 static int
2666 txgbe_dev_link_update(struct rte_eth_dev *dev, int wait_to_complete)
2667 {
2668         return txgbe_dev_link_update_share(dev, wait_to_complete);
2669 }
2670
2671 static int
2672 txgbe_dev_promiscuous_enable(struct rte_eth_dev *dev)
2673 {
2674         struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
2675         uint32_t fctrl;
2676
2677         fctrl = rd32(hw, TXGBE_PSRCTL);
2678         fctrl |= (TXGBE_PSRCTL_UCP | TXGBE_PSRCTL_MCP);
2679         wr32(hw, TXGBE_PSRCTL, fctrl);
2680
2681         return 0;
2682 }
2683
2684 static int
2685 txgbe_dev_promiscuous_disable(struct rte_eth_dev *dev)
2686 {
2687         struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
2688         uint32_t fctrl;
2689
2690         fctrl = rd32(hw, TXGBE_PSRCTL);
2691         fctrl &= (~TXGBE_PSRCTL_UCP);
2692         if (dev->data->all_multicast == 1)
2693                 fctrl |= TXGBE_PSRCTL_MCP;
2694         else
2695                 fctrl &= (~TXGBE_PSRCTL_MCP);
2696         wr32(hw, TXGBE_PSRCTL, fctrl);
2697
2698         return 0;
2699 }
2700
2701 static int
2702 txgbe_dev_allmulticast_enable(struct rte_eth_dev *dev)
2703 {
2704         struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
2705         uint32_t fctrl;
2706
2707         fctrl = rd32(hw, TXGBE_PSRCTL);
2708         fctrl |= TXGBE_PSRCTL_MCP;
2709         wr32(hw, TXGBE_PSRCTL, fctrl);
2710
2711         return 0;
2712 }
2713
2714 static int
2715 txgbe_dev_allmulticast_disable(struct rte_eth_dev *dev)
2716 {
2717         struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
2718         uint32_t fctrl;
2719
2720         if (dev->data->promiscuous == 1)
2721                 return 0; /* must remain in all_multicast mode */
2722
2723         fctrl = rd32(hw, TXGBE_PSRCTL);
2724         fctrl &= (~TXGBE_PSRCTL_MCP);
2725         wr32(hw, TXGBE_PSRCTL, fctrl);
2726
2727         return 0;
2728 }
2729
2730 /**
2731  * It clears the interrupt causes and enables the interrupt.
2732  * It will be called once only during nic initialized.
2733  *
2734  * @param dev
2735  *  Pointer to struct rte_eth_dev.
2736  * @param on
2737  *  Enable or Disable.
2738  *
2739  * @return
2740  *  - On success, zero.
2741  *  - On failure, a negative value.
2742  */
2743 static int
2744 txgbe_dev_lsc_interrupt_setup(struct rte_eth_dev *dev, uint8_t on)
2745 {
2746         struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev);
2747
2748         txgbe_dev_link_status_print(dev);
2749         if (on)
2750                 intr->mask_misc |= TXGBE_ICRMISC_LSC;
2751         else
2752                 intr->mask_misc &= ~TXGBE_ICRMISC_LSC;
2753
2754         return 0;
2755 }
2756
2757 /**
2758  * It clears the interrupt causes and enables the interrupt.
2759  * It will be called once only during nic initialized.
2760  *
2761  * @param dev
2762  *  Pointer to struct rte_eth_dev.
2763  *
2764  * @return
2765  *  - On success, zero.
2766  *  - On failure, a negative value.
2767  */
2768 static int
2769 txgbe_dev_rxq_interrupt_setup(struct rte_eth_dev *dev)
2770 {
2771         struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev);
2772
2773         intr->mask[0] |= TXGBE_ICR_MASK;
2774         intr->mask[1] |= TXGBE_ICR_MASK;
2775
2776         return 0;
2777 }
2778
2779 /**
2780  * It clears the interrupt causes and enables the interrupt.
2781  * It will be called once only during nic initialized.
2782  *
2783  * @param dev
2784  *  Pointer to struct rte_eth_dev.
2785  *
2786  * @return
2787  *  - On success, zero.
2788  *  - On failure, a negative value.
2789  */
2790 static int
2791 txgbe_dev_macsec_interrupt_setup(struct rte_eth_dev *dev)
2792 {
2793         struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev);
2794
2795         intr->mask_misc |= TXGBE_ICRMISC_LNKSEC;
2796
2797         return 0;
2798 }
2799
2800 /*
2801  * It reads ICR and sets flag (TXGBE_ICRMISC_LSC) for the link_update.
2802  *
2803  * @param dev
2804  *  Pointer to struct rte_eth_dev.
2805  *
2806  * @return
2807  *  - On success, zero.
2808  *  - On failure, a negative value.
2809  */
2810 static int
2811 txgbe_dev_interrupt_get_status(struct rte_eth_dev *dev)
2812 {
2813         uint32_t eicr;
2814         struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
2815         struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev);
2816
2817         /* clear all cause mask */
2818         txgbe_disable_intr(hw);
2819
2820         /* read-on-clear nic registers here */
2821         eicr = ((u32 *)hw->isb_mem)[TXGBE_ISB_MISC];
2822         PMD_DRV_LOG(DEBUG, "eicr %x", eicr);
2823
2824         intr->flags = 0;
2825
2826         /* set flag for async link update */
2827         if (eicr & TXGBE_ICRMISC_LSC)
2828                 intr->flags |= TXGBE_FLAG_NEED_LINK_UPDATE;
2829
2830         if (eicr & TXGBE_ICRMISC_VFMBX)
2831                 intr->flags |= TXGBE_FLAG_MAILBOX;
2832
2833         if (eicr & TXGBE_ICRMISC_LNKSEC)
2834                 intr->flags |= TXGBE_FLAG_MACSEC;
2835
2836         if (eicr & TXGBE_ICRMISC_GPIO)
2837                 intr->flags |= TXGBE_FLAG_PHY_INTERRUPT;
2838
2839         return 0;
2840 }
2841
2842 /**
2843  * It gets and then prints the link status.
2844  *
2845  * @param dev
2846  *  Pointer to struct rte_eth_dev.
2847  *
2848  * @return
2849  *  - On success, zero.
2850  *  - On failure, a negative value.
2851  */
2852 static void
2853 txgbe_dev_link_status_print(struct rte_eth_dev *dev)
2854 {
2855         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
2856         struct rte_eth_link link;
2857
2858         rte_eth_linkstatus_get(dev, &link);
2859
2860         if (link.link_status) {
2861                 PMD_INIT_LOG(INFO, "Port %d: Link Up - speed %u Mbps - %s",
2862                                         (int)(dev->data->port_id),
2863                                         (unsigned int)link.link_speed,
2864                         link.link_duplex == ETH_LINK_FULL_DUPLEX ?
2865                                         "full-duplex" : "half-duplex");
2866         } else {
2867                 PMD_INIT_LOG(INFO, " Port %d: Link Down",
2868                                 (int)(dev->data->port_id));
2869         }
2870         PMD_INIT_LOG(DEBUG, "PCI Address: " PCI_PRI_FMT,
2871                                 pci_dev->addr.domain,
2872                                 pci_dev->addr.bus,
2873                                 pci_dev->addr.devid,
2874                                 pci_dev->addr.function);
2875 }
2876
2877 /*
2878  * It executes link_update after knowing an interrupt occurred.
2879  *
2880  * @param dev
2881  *  Pointer to struct rte_eth_dev.
2882  *
2883  * @return
2884  *  - On success, zero.
2885  *  - On failure, a negative value.
2886  */
2887 static int
2888 txgbe_dev_interrupt_action(struct rte_eth_dev *dev,
2889                            struct rte_intr_handle *intr_handle)
2890 {
2891         struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev);
2892         int64_t timeout;
2893         struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
2894
2895         PMD_DRV_LOG(DEBUG, "intr action type %d", intr->flags);
2896
2897         if (intr->flags & TXGBE_FLAG_MAILBOX) {
2898                 txgbe_pf_mbx_process(dev);
2899                 intr->flags &= ~TXGBE_FLAG_MAILBOX;
2900         }
2901
2902         if (intr->flags & TXGBE_FLAG_PHY_INTERRUPT) {
2903                 hw->phy.handle_lasi(hw);
2904                 intr->flags &= ~TXGBE_FLAG_PHY_INTERRUPT;
2905         }
2906
2907         if (intr->flags & TXGBE_FLAG_NEED_LINK_UPDATE) {
2908                 struct rte_eth_link link;
2909
2910                 /*get the link status before link update, for predicting later*/
2911                 rte_eth_linkstatus_get(dev, &link);
2912
2913                 txgbe_dev_link_update(dev, 0);
2914
2915                 /* likely to up */
2916                 if (!link.link_status)
2917                         /* handle it 1 sec later, wait it being stable */
2918                         timeout = TXGBE_LINK_UP_CHECK_TIMEOUT;
2919                 /* likely to down */
2920                 else
2921                         /* handle it 4 sec later, wait it being stable */
2922                         timeout = TXGBE_LINK_DOWN_CHECK_TIMEOUT;
2923
2924                 txgbe_dev_link_status_print(dev);
2925                 if (rte_eal_alarm_set(timeout * 1000,
2926                                       txgbe_dev_interrupt_delayed_handler,
2927                                       (void *)dev) < 0) {
2928                         PMD_DRV_LOG(ERR, "Error setting alarm");
2929                 } else {
2930                         /* remember original mask */
2931                         intr->mask_misc_orig = intr->mask_misc;
2932                         /* only disable lsc interrupt */
2933                         intr->mask_misc &= ~TXGBE_ICRMISC_LSC;
2934                 }
2935         }
2936
2937         PMD_DRV_LOG(DEBUG, "enable intr immediately");
2938         txgbe_enable_intr(dev);
2939         rte_intr_enable(intr_handle);
2940
2941         return 0;
2942 }
2943
2944 /**
2945  * Interrupt handler which shall be registered for alarm callback for delayed
2946  * handling specific interrupt to wait for the stable nic state. As the
2947  * NIC interrupt state is not stable for txgbe after link is just down,
2948  * it needs to wait 4 seconds to get the stable status.
2949  *
2950  * @param handle
2951  *  Pointer to interrupt handle.
2952  * @param param
2953  *  The address of parameter (struct rte_eth_dev *) registered before.
2954  *
2955  * @return
2956  *  void
2957  */
2958 static void
2959 txgbe_dev_interrupt_delayed_handler(void *param)
2960 {
2961         struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
2962         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
2963         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
2964         struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev);
2965         struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
2966         uint32_t eicr;
2967
2968         txgbe_disable_intr(hw);
2969
2970         eicr = ((u32 *)hw->isb_mem)[TXGBE_ISB_MISC];
2971         if (eicr & TXGBE_ICRMISC_VFMBX)
2972                 txgbe_pf_mbx_process(dev);
2973
2974         if (intr->flags & TXGBE_FLAG_PHY_INTERRUPT) {
2975                 hw->phy.handle_lasi(hw);
2976                 intr->flags &= ~TXGBE_FLAG_PHY_INTERRUPT;
2977         }
2978
2979         if (intr->flags & TXGBE_FLAG_NEED_LINK_UPDATE) {
2980                 txgbe_dev_link_update(dev, 0);
2981                 intr->flags &= ~TXGBE_FLAG_NEED_LINK_UPDATE;
2982                 txgbe_dev_link_status_print(dev);
2983                 rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC,
2984                                               NULL);
2985         }
2986
2987         if (intr->flags & TXGBE_FLAG_MACSEC) {
2988                 rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_MACSEC,
2989                                               NULL);
2990                 intr->flags &= ~TXGBE_FLAG_MACSEC;
2991         }
2992
2993         /* restore original mask */
2994         intr->mask_misc = intr->mask_misc_orig;
2995         intr->mask_misc_orig = 0;
2996
2997         PMD_DRV_LOG(DEBUG, "enable intr in delayed handler S[%08x]", eicr);
2998         txgbe_enable_intr(dev);
2999         rte_intr_enable(intr_handle);
3000 }
3001
3002 /**
3003  * Interrupt handler triggered by NIC  for handling
3004  * specific interrupt.
3005  *
3006  * @param handle
3007  *  Pointer to interrupt handle.
3008  * @param param
3009  *  The address of parameter (struct rte_eth_dev *) registered before.
3010  *
3011  * @return
3012  *  void
3013  */
3014 static void
3015 txgbe_dev_interrupt_handler(void *param)
3016 {
3017         struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
3018
3019         txgbe_dev_interrupt_get_status(dev);
3020         txgbe_dev_interrupt_action(dev, dev->intr_handle);
3021 }
3022
3023 static int
3024 txgbe_dev_led_on(struct rte_eth_dev *dev)
3025 {
3026         struct txgbe_hw *hw;
3027
3028         hw = TXGBE_DEV_HW(dev);
3029         return txgbe_led_on(hw, 4) == 0 ? 0 : -ENOTSUP;
3030 }
3031
3032 static int
3033 txgbe_dev_led_off(struct rte_eth_dev *dev)
3034 {
3035         struct txgbe_hw *hw;
3036
3037         hw = TXGBE_DEV_HW(dev);
3038         return txgbe_led_off(hw, 4) == 0 ? 0 : -ENOTSUP;
3039 }
3040
3041 static int
3042 txgbe_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
3043 {
3044         struct txgbe_hw *hw;
3045         uint32_t mflcn_reg;
3046         uint32_t fccfg_reg;
3047         int rx_pause;
3048         int tx_pause;
3049
3050         hw = TXGBE_DEV_HW(dev);
3051
3052         fc_conf->pause_time = hw->fc.pause_time;
3053         fc_conf->high_water = hw->fc.high_water[0];
3054         fc_conf->low_water = hw->fc.low_water[0];
3055         fc_conf->send_xon = hw->fc.send_xon;
3056         fc_conf->autoneg = !hw->fc.disable_fc_autoneg;
3057
3058         /*
3059          * Return rx_pause status according to actual setting of
3060          * RXFCCFG register.
3061          */
3062         mflcn_reg = rd32(hw, TXGBE_RXFCCFG);
3063         if (mflcn_reg & (TXGBE_RXFCCFG_FC | TXGBE_RXFCCFG_PFC))
3064                 rx_pause = 1;
3065         else
3066                 rx_pause = 0;
3067
3068         /*
3069          * Return tx_pause status according to actual setting of
3070          * TXFCCFG register.
3071          */
3072         fccfg_reg = rd32(hw, TXGBE_TXFCCFG);
3073         if (fccfg_reg & (TXGBE_TXFCCFG_FC | TXGBE_TXFCCFG_PFC))
3074                 tx_pause = 1;
3075         else
3076                 tx_pause = 0;
3077
3078         if (rx_pause && tx_pause)
3079                 fc_conf->mode = RTE_FC_FULL;
3080         else if (rx_pause)
3081                 fc_conf->mode = RTE_FC_RX_PAUSE;
3082         else if (tx_pause)
3083                 fc_conf->mode = RTE_FC_TX_PAUSE;
3084         else
3085                 fc_conf->mode = RTE_FC_NONE;
3086
3087         return 0;
3088 }
3089
3090 static int
3091 txgbe_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
3092 {
3093         struct txgbe_hw *hw;
3094         int err;
3095         uint32_t rx_buf_size;
3096         uint32_t max_high_water;
3097         enum txgbe_fc_mode rte_fcmode_2_txgbe_fcmode[] = {
3098                 txgbe_fc_none,
3099                 txgbe_fc_rx_pause,
3100                 txgbe_fc_tx_pause,
3101                 txgbe_fc_full
3102         };
3103
3104         PMD_INIT_FUNC_TRACE();
3105
3106         hw = TXGBE_DEV_HW(dev);
3107         rx_buf_size = rd32(hw, TXGBE_PBRXSIZE(0));
3108         PMD_INIT_LOG(DEBUG, "Rx packet buffer size = 0x%x", rx_buf_size);
3109
3110         /*
3111          * At least reserve one Ethernet frame for watermark
3112          * high_water/low_water in kilo bytes for txgbe
3113          */
3114         max_high_water = (rx_buf_size - RTE_ETHER_MAX_LEN) >> 10;
3115         if (fc_conf->high_water > max_high_water ||
3116             fc_conf->high_water < fc_conf->low_water) {
3117                 PMD_INIT_LOG(ERR, "Invalid high/low water setup value in KB");
3118                 PMD_INIT_LOG(ERR, "High_water must <= 0x%x", max_high_water);
3119                 return -EINVAL;
3120         }
3121
3122         hw->fc.requested_mode = rte_fcmode_2_txgbe_fcmode[fc_conf->mode];
3123         hw->fc.pause_time     = fc_conf->pause_time;
3124         hw->fc.high_water[0]  = fc_conf->high_water;
3125         hw->fc.low_water[0]   = fc_conf->low_water;
3126         hw->fc.send_xon       = fc_conf->send_xon;
3127         hw->fc.disable_fc_autoneg = !fc_conf->autoneg;
3128
3129         err = txgbe_fc_enable(hw);
3130
3131         /* Not negotiated is not an error case */
3132         if (err == 0 || err == TXGBE_ERR_FC_NOT_NEGOTIATED) {
3133                 wr32m(hw, TXGBE_MACRXFLT, TXGBE_MACRXFLT_CTL_MASK,
3134                       (fc_conf->mac_ctrl_frame_fwd
3135                        ? TXGBE_MACRXFLT_CTL_NOPS : TXGBE_MACRXFLT_CTL_DROP));
3136                 txgbe_flush(hw);
3137
3138                 return 0;
3139         }
3140
3141         PMD_INIT_LOG(ERR, "txgbe_fc_enable = 0x%x", err);
3142         return -EIO;
3143 }
3144
3145 static int
3146 txgbe_priority_flow_ctrl_set(struct rte_eth_dev *dev,
3147                 struct rte_eth_pfc_conf *pfc_conf)
3148 {
3149         int err;
3150         uint32_t rx_buf_size;
3151         uint32_t max_high_water;
3152         uint8_t tc_num;
3153         uint8_t  map[TXGBE_DCB_UP_MAX] = { 0 };
3154         struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
3155         struct txgbe_dcb_config *dcb_config = TXGBE_DEV_DCB_CONFIG(dev);
3156
3157         enum txgbe_fc_mode rte_fcmode_2_txgbe_fcmode[] = {
3158                 txgbe_fc_none,
3159                 txgbe_fc_rx_pause,
3160                 txgbe_fc_tx_pause,
3161                 txgbe_fc_full
3162         };
3163
3164         PMD_INIT_FUNC_TRACE();
3165
3166         txgbe_dcb_unpack_map_cee(dcb_config, TXGBE_DCB_RX_CONFIG, map);
3167         tc_num = map[pfc_conf->priority];
3168         rx_buf_size = rd32(hw, TXGBE_PBRXSIZE(tc_num));
3169         PMD_INIT_LOG(DEBUG, "Rx packet buffer size = 0x%x", rx_buf_size);
3170         /*
3171          * At least reserve one Ethernet frame for watermark
3172          * high_water/low_water in kilo bytes for txgbe
3173          */
3174         max_high_water = (rx_buf_size - RTE_ETHER_MAX_LEN) >> 10;
3175         if (pfc_conf->fc.high_water > max_high_water ||
3176             pfc_conf->fc.high_water <= pfc_conf->fc.low_water) {
3177                 PMD_INIT_LOG(ERR, "Invalid high/low water setup value in KB");
3178                 PMD_INIT_LOG(ERR, "High_water must <= 0x%x", max_high_water);
3179                 return -EINVAL;
3180         }
3181
3182         hw->fc.requested_mode = rte_fcmode_2_txgbe_fcmode[pfc_conf->fc.mode];
3183         hw->fc.pause_time = pfc_conf->fc.pause_time;
3184         hw->fc.send_xon = pfc_conf->fc.send_xon;
3185         hw->fc.low_water[tc_num] =  pfc_conf->fc.low_water;
3186         hw->fc.high_water[tc_num] = pfc_conf->fc.high_water;
3187
3188         err = txgbe_dcb_pfc_enable(hw, tc_num);
3189
3190         /* Not negotiated is not an error case */
3191         if (err == 0 || err == TXGBE_ERR_FC_NOT_NEGOTIATED)
3192                 return 0;
3193
3194         PMD_INIT_LOG(ERR, "txgbe_dcb_pfc_enable = 0x%x", err);
3195         return -EIO;
3196 }
3197
3198 int
3199 txgbe_dev_rss_reta_update(struct rte_eth_dev *dev,
3200                           struct rte_eth_rss_reta_entry64 *reta_conf,
3201                           uint16_t reta_size)
3202 {
3203         uint8_t i, j, mask;
3204         uint32_t reta;
3205         uint16_t idx, shift;
3206         struct txgbe_adapter *adapter = TXGBE_DEV_ADAPTER(dev);
3207         struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
3208
3209         PMD_INIT_FUNC_TRACE();
3210
3211         if (!txgbe_rss_update_sp(hw->mac.type)) {
3212                 PMD_DRV_LOG(ERR, "RSS reta update is not supported on this "
3213                         "NIC.");
3214                 return -ENOTSUP;
3215         }
3216
3217         if (reta_size != ETH_RSS_RETA_SIZE_128) {
3218                 PMD_DRV_LOG(ERR, "The size of hash lookup table configured "
3219                         "(%d) doesn't match the number hardware can supported "
3220                         "(%d)", reta_size, ETH_RSS_RETA_SIZE_128);
3221                 return -EINVAL;
3222         }
3223
3224         for (i = 0; i < reta_size; i += 4) {
3225                 idx = i / RTE_RETA_GROUP_SIZE;
3226                 shift = i % RTE_RETA_GROUP_SIZE;
3227                 mask = (uint8_t)RS64(reta_conf[idx].mask, shift, 0xF);
3228                 if (!mask)
3229                         continue;
3230
3231                 reta = rd32a(hw, TXGBE_REG_RSSTBL, i >> 2);
3232                 for (j = 0; j < 4; j++) {
3233                         if (RS8(mask, j, 0x1)) {
3234                                 reta  &= ~(MS32(8 * j, 0xFF));
3235                                 reta |= LS32(reta_conf[idx].reta[shift + j],
3236                                                 8 * j, 0xFF);
3237                         }
3238                 }
3239                 wr32a(hw, TXGBE_REG_RSSTBL, i >> 2, reta);
3240         }
3241         adapter->rss_reta_updated = 1;
3242
3243         return 0;
3244 }
3245
3246 int
3247 txgbe_dev_rss_reta_query(struct rte_eth_dev *dev,
3248                          struct rte_eth_rss_reta_entry64 *reta_conf,
3249                          uint16_t reta_size)
3250 {
3251         struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
3252         uint8_t i, j, mask;
3253         uint32_t reta;
3254         uint16_t idx, shift;
3255
3256         PMD_INIT_FUNC_TRACE();
3257
3258         if (reta_size != ETH_RSS_RETA_SIZE_128) {
3259                 PMD_DRV_LOG(ERR, "The size of hash lookup table configured "
3260                         "(%d) doesn't match the number hardware can supported "
3261                         "(%d)", reta_size, ETH_RSS_RETA_SIZE_128);
3262                 return -EINVAL;
3263         }
3264
3265         for (i = 0; i < reta_size; i += 4) {
3266                 idx = i / RTE_RETA_GROUP_SIZE;
3267                 shift = i % RTE_RETA_GROUP_SIZE;
3268                 mask = (uint8_t)RS64(reta_conf[idx].mask, shift, 0xF);
3269                 if (!mask)
3270                         continue;
3271
3272                 reta = rd32a(hw, TXGBE_REG_RSSTBL, i >> 2);
3273                 for (j = 0; j < 4; j++) {
3274                         if (RS8(mask, j, 0x1))
3275                                 reta_conf[idx].reta[shift + j] =
3276                                         (uint16_t)RS32(reta, 8 * j, 0xFF);
3277                 }
3278         }
3279
3280         return 0;
3281 }
3282
3283 static int
3284 txgbe_add_rar(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr,
3285                                 uint32_t index, uint32_t pool)
3286 {
3287         struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
3288         uint32_t enable_addr = 1;
3289
3290         return txgbe_set_rar(hw, index, mac_addr->addr_bytes,
3291                              pool, enable_addr);
3292 }
3293
3294 static void
3295 txgbe_remove_rar(struct rte_eth_dev *dev, uint32_t index)
3296 {
3297         struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
3298
3299         txgbe_clear_rar(hw, index);
3300 }
3301
3302 static int
3303 txgbe_set_default_mac_addr(struct rte_eth_dev *dev, struct rte_ether_addr *addr)
3304 {
3305         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
3306
3307         txgbe_remove_rar(dev, 0);
3308         txgbe_add_rar(dev, addr, 0, pci_dev->max_vfs);
3309
3310         return 0;
3311 }
3312
3313 static int
3314 txgbe_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
3315 {
3316         struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
3317         struct rte_eth_dev_info dev_info;
3318         uint32_t frame_size = mtu + RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN;
3319         struct rte_eth_dev_data *dev_data = dev->data;
3320         int ret;
3321
3322         ret = txgbe_dev_info_get(dev, &dev_info);
3323         if (ret != 0)
3324                 return ret;
3325
3326         /* check that mtu is within the allowed range */
3327         if (mtu < RTE_ETHER_MIN_MTU || frame_size > dev_info.max_rx_pktlen)
3328                 return -EINVAL;
3329
3330         /* If device is started, refuse mtu that requires the support of
3331          * scattered packets when this feature has not been enabled before.
3332          */
3333         if (dev_data->dev_started && !dev_data->scattered_rx &&
3334             (frame_size + 2 * TXGBE_VLAN_TAG_SIZE >
3335              dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM)) {
3336                 PMD_INIT_LOG(ERR, "Stop port first.");
3337                 return -EINVAL;
3338         }
3339
3340         /* update max frame size */
3341         dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
3342
3343         if (hw->mode)
3344                 wr32m(hw, TXGBE_FRMSZ, TXGBE_FRMSZ_MAX_MASK,
3345                         TXGBE_FRAME_SIZE_MAX);
3346         else
3347                 wr32m(hw, TXGBE_FRMSZ, TXGBE_FRMSZ_MAX_MASK,
3348                         TXGBE_FRMSZ_MAX(frame_size));
3349
3350         return 0;
3351 }
3352
3353 static uint32_t
3354 txgbe_uta_vector(struct txgbe_hw *hw, struct rte_ether_addr *uc_addr)
3355 {
3356         uint32_t vector = 0;
3357
3358         switch (hw->mac.mc_filter_type) {
3359         case 0:   /* use bits [47:36] of the address */
3360                 vector = ((uc_addr->addr_bytes[4] >> 4) |
3361                         (((uint16_t)uc_addr->addr_bytes[5]) << 4));
3362                 break;
3363         case 1:   /* use bits [46:35] of the address */
3364                 vector = ((uc_addr->addr_bytes[4] >> 3) |
3365                         (((uint16_t)uc_addr->addr_bytes[5]) << 5));
3366                 break;
3367         case 2:   /* use bits [45:34] of the address */
3368                 vector = ((uc_addr->addr_bytes[4] >> 2) |
3369                         (((uint16_t)uc_addr->addr_bytes[5]) << 6));
3370                 break;
3371         case 3:   /* use bits [43:32] of the address */
3372                 vector = ((uc_addr->addr_bytes[4]) |
3373                         (((uint16_t)uc_addr->addr_bytes[5]) << 8));
3374                 break;
3375         default:  /* Invalid mc_filter_type */
3376                 break;
3377         }
3378
3379         /* vector can only be 12-bits or boundary will be exceeded */
3380         vector &= 0xFFF;
3381         return vector;
3382 }
3383
3384 static int
3385 txgbe_uc_hash_table_set(struct rte_eth_dev *dev,
3386                         struct rte_ether_addr *mac_addr, uint8_t on)
3387 {
3388         uint32_t vector;
3389         uint32_t uta_idx;
3390         uint32_t reg_val;
3391         uint32_t uta_mask;
3392         uint32_t psrctl;
3393
3394         struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
3395         struct txgbe_uta_info *uta_info = TXGBE_DEV_UTA_INFO(dev);
3396
3397         /* The UTA table only exists on pf hardware */
3398         if (hw->mac.type < txgbe_mac_raptor)
3399                 return -ENOTSUP;
3400
3401         vector = txgbe_uta_vector(hw, mac_addr);
3402         uta_idx = (vector >> 5) & 0x7F;
3403         uta_mask = 0x1UL << (vector & 0x1F);
3404
3405         if (!!on == !!(uta_info->uta_shadow[uta_idx] & uta_mask))
3406                 return 0;
3407
3408         reg_val = rd32(hw, TXGBE_UCADDRTBL(uta_idx));
3409         if (on) {
3410                 uta_info->uta_in_use++;
3411                 reg_val |= uta_mask;
3412                 uta_info->uta_shadow[uta_idx] |= uta_mask;
3413         } else {
3414                 uta_info->uta_in_use--;
3415                 reg_val &= ~uta_mask;
3416                 uta_info->uta_shadow[uta_idx] &= ~uta_mask;
3417         }
3418
3419         wr32(hw, TXGBE_UCADDRTBL(uta_idx), reg_val);
3420
3421         psrctl = rd32(hw, TXGBE_PSRCTL);
3422         if (uta_info->uta_in_use > 0)
3423                 psrctl |= TXGBE_PSRCTL_UCHFENA;
3424         else
3425                 psrctl &= ~TXGBE_PSRCTL_UCHFENA;
3426
3427         psrctl &= ~TXGBE_PSRCTL_ADHF12_MASK;
3428         psrctl |= TXGBE_PSRCTL_ADHF12(hw->mac.mc_filter_type);
3429         wr32(hw, TXGBE_PSRCTL, psrctl);
3430
3431         return 0;
3432 }
3433
3434 static int
3435 txgbe_uc_all_hash_table_set(struct rte_eth_dev *dev, uint8_t on)
3436 {
3437         struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
3438         struct txgbe_uta_info *uta_info = TXGBE_DEV_UTA_INFO(dev);
3439         uint32_t psrctl;
3440         int i;
3441
3442         /* The UTA table only exists on pf hardware */
3443         if (hw->mac.type < txgbe_mac_raptor)
3444                 return -ENOTSUP;
3445
3446         if (on) {
3447                 for (i = 0; i < ETH_VMDQ_NUM_UC_HASH_ARRAY; i++) {
3448                         uta_info->uta_shadow[i] = ~0;
3449                         wr32(hw, TXGBE_UCADDRTBL(i), ~0);
3450                 }
3451         } else {
3452                 for (i = 0; i < ETH_VMDQ_NUM_UC_HASH_ARRAY; i++) {
3453                         uta_info->uta_shadow[i] = 0;
3454                         wr32(hw, TXGBE_UCADDRTBL(i), 0);
3455                 }
3456         }
3457
3458         psrctl = rd32(hw, TXGBE_PSRCTL);
3459         if (on)
3460                 psrctl |= TXGBE_PSRCTL_UCHFENA;
3461         else
3462                 psrctl &= ~TXGBE_PSRCTL_UCHFENA;
3463
3464         psrctl &= ~TXGBE_PSRCTL_ADHF12_MASK;
3465         psrctl |= TXGBE_PSRCTL_ADHF12(hw->mac.mc_filter_type);
3466         wr32(hw, TXGBE_PSRCTL, psrctl);
3467
3468         return 0;
3469 }
3470
3471 uint32_t
3472 txgbe_convert_vm_rx_mask_to_val(uint16_t rx_mask, uint32_t orig_val)
3473 {
3474         uint32_t new_val = orig_val;
3475
3476         if (rx_mask & ETH_VMDQ_ACCEPT_UNTAG)
3477                 new_val |= TXGBE_POOLETHCTL_UTA;
3478         if (rx_mask & ETH_VMDQ_ACCEPT_HASH_MC)
3479                 new_val |= TXGBE_POOLETHCTL_MCHA;
3480         if (rx_mask & ETH_VMDQ_ACCEPT_HASH_UC)
3481                 new_val |= TXGBE_POOLETHCTL_UCHA;
3482         if (rx_mask & ETH_VMDQ_ACCEPT_BROADCAST)
3483                 new_val |= TXGBE_POOLETHCTL_BCA;
3484         if (rx_mask & ETH_VMDQ_ACCEPT_MULTICAST)
3485                 new_val |= TXGBE_POOLETHCTL_MCP;
3486
3487         return new_val;
3488 }
3489
3490 static int
3491 txgbe_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id)
3492 {
3493         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
3494         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
3495         uint32_t mask;
3496         struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
3497
3498         if (queue_id < 32) {
3499                 mask = rd32(hw, TXGBE_IMS(0));
3500                 mask &= (1 << queue_id);
3501                 wr32(hw, TXGBE_IMS(0), mask);
3502         } else if (queue_id < 64) {
3503                 mask = rd32(hw, TXGBE_IMS(1));
3504                 mask &= (1 << (queue_id - 32));
3505                 wr32(hw, TXGBE_IMS(1), mask);
3506         }
3507         rte_intr_enable(intr_handle);
3508
3509         return 0;
3510 }
3511
3512 static int
3513 txgbe_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id)
3514 {
3515         uint32_t mask;
3516         struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
3517
3518         if (queue_id < 32) {
3519                 mask = rd32(hw, TXGBE_IMS(0));
3520                 mask &= ~(1 << queue_id);
3521                 wr32(hw, TXGBE_IMS(0), mask);
3522         } else if (queue_id < 64) {
3523                 mask = rd32(hw, TXGBE_IMS(1));
3524                 mask &= ~(1 << (queue_id - 32));
3525                 wr32(hw, TXGBE_IMS(1), mask);
3526         }
3527
3528         return 0;
3529 }
3530
3531 /**
3532  * set the IVAR registers, mapping interrupt causes to vectors
3533  * @param hw
3534  *  pointer to txgbe_hw struct
3535  * @direction
3536  *  0 for Rx, 1 for Tx, -1 for other causes
3537  * @queue
3538  *  queue to map the corresponding interrupt to
3539  * @msix_vector
3540  *  the vector to map to the corresponding queue
3541  */
3542 void
3543 txgbe_set_ivar_map(struct txgbe_hw *hw, int8_t direction,
3544                    uint8_t queue, uint8_t msix_vector)
3545 {
3546         uint32_t tmp, idx;
3547
3548         if (direction == -1) {
3549                 /* other causes */
3550                 msix_vector |= TXGBE_IVARMISC_VLD;
3551                 idx = 0;
3552                 tmp = rd32(hw, TXGBE_IVARMISC);
3553                 tmp &= ~(0xFF << idx);
3554                 tmp |= (msix_vector << idx);
3555                 wr32(hw, TXGBE_IVARMISC, tmp);
3556         } else {
3557                 /* rx or tx causes */
3558                 /* Workround for ICR lost */
3559                 idx = ((16 * (queue & 1)) + (8 * direction));
3560                 tmp = rd32(hw, TXGBE_IVAR(queue >> 1));
3561                 tmp &= ~(0xFF << idx);
3562                 tmp |= (msix_vector << idx);
3563                 wr32(hw, TXGBE_IVAR(queue >> 1), tmp);
3564         }
3565 }
3566
3567 /**
3568  * Sets up the hardware to properly generate MSI-X interrupts
3569  * @hw
3570  *  board private structure
3571  */
3572 static void
3573 txgbe_configure_msix(struct rte_eth_dev *dev)
3574 {
3575         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
3576         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
3577         struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
3578         uint32_t queue_id, base = TXGBE_MISC_VEC_ID;
3579         uint32_t vec = TXGBE_MISC_VEC_ID;
3580         uint32_t gpie;
3581
3582         /* won't configure msix register if no mapping is done
3583          * between intr vector and event fd
3584          * but if misx has been enabled already, need to configure
3585          * auto clean, auto mask and throttling.
3586          */
3587         gpie = rd32(hw, TXGBE_GPIE);
3588         if (!rte_intr_dp_is_en(intr_handle) &&
3589             !(gpie & TXGBE_GPIE_MSIX))
3590                 return;
3591
3592         if (rte_intr_allow_others(intr_handle)) {
3593                 base = TXGBE_RX_VEC_START;
3594                 vec = base;
3595         }
3596
3597         /* setup GPIE for MSI-x mode */
3598         gpie = rd32(hw, TXGBE_GPIE);
3599         gpie |= TXGBE_GPIE_MSIX;
3600         wr32(hw, TXGBE_GPIE, gpie);
3601
3602         /* Populate the IVAR table and set the ITR values to the
3603          * corresponding register.
3604          */
3605         if (rte_intr_dp_is_en(intr_handle)) {
3606                 for (queue_id = 0; queue_id < dev->data->nb_rx_queues;
3607                         queue_id++) {
3608                         /* by default, 1:1 mapping */
3609                         txgbe_set_ivar_map(hw, 0, queue_id, vec);
3610                         intr_handle->intr_vec[queue_id] = vec;
3611                         if (vec < base + intr_handle->nb_efd - 1)
3612                                 vec++;
3613                 }
3614
3615                 txgbe_set_ivar_map(hw, -1, 1, TXGBE_MISC_VEC_ID);
3616         }
3617         wr32(hw, TXGBE_ITR(TXGBE_MISC_VEC_ID),
3618                         TXGBE_ITR_IVAL_10G(TXGBE_QUEUE_ITR_INTERVAL_DEFAULT)
3619                         | TXGBE_ITR_WRDSA);
3620 }
3621
3622 int
3623 txgbe_set_queue_rate_limit(struct rte_eth_dev *dev,
3624                            uint16_t queue_idx, uint16_t tx_rate)
3625 {
3626         struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
3627         uint32_t bcnrc_val;
3628
3629         if (queue_idx >= hw->mac.max_tx_queues)
3630                 return -EINVAL;
3631
3632         if (tx_rate != 0) {
3633                 bcnrc_val = TXGBE_ARBTXRATE_MAX(tx_rate);
3634                 bcnrc_val |= TXGBE_ARBTXRATE_MIN(tx_rate / 2);
3635         } else {
3636                 bcnrc_val = 0;
3637         }
3638
3639         /*
3640          * Set global transmit compensation time to the MMW_SIZE in ARBTXMMW
3641          * register. MMW_SIZE=0x014 if 9728-byte jumbo is supported.
3642          */
3643         wr32(hw, TXGBE_ARBTXMMW, 0x14);
3644
3645         /* Set ARBTXRATE of queue X */
3646         wr32(hw, TXGBE_ARBPOOLIDX, queue_idx);
3647         wr32(hw, TXGBE_ARBTXRATE, bcnrc_val);
3648         txgbe_flush(hw);
3649
3650         return 0;
3651 }
3652
3653 int
3654 txgbe_syn_filter_set(struct rte_eth_dev *dev,
3655                         struct rte_eth_syn_filter *filter,
3656                         bool add)
3657 {
3658         struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
3659         struct txgbe_filter_info *filter_info = TXGBE_DEV_FILTER(dev);
3660         uint32_t syn_info;
3661         uint32_t synqf;
3662
3663         if (filter->queue >= TXGBE_MAX_RX_QUEUE_NUM)
3664                 return -EINVAL;
3665
3666         syn_info = filter_info->syn_info;
3667
3668         if (add) {
3669                 if (syn_info & TXGBE_SYNCLS_ENA)
3670                         return -EINVAL;
3671                 synqf = (uint32_t)TXGBE_SYNCLS_QPID(filter->queue);
3672                 synqf |= TXGBE_SYNCLS_ENA;
3673
3674                 if (filter->hig_pri)
3675                         synqf |= TXGBE_SYNCLS_HIPRIO;
3676                 else
3677                         synqf &= ~TXGBE_SYNCLS_HIPRIO;
3678         } else {
3679                 synqf = rd32(hw, TXGBE_SYNCLS);
3680                 if (!(syn_info & TXGBE_SYNCLS_ENA))
3681                         return -ENOENT;
3682                 synqf &= ~(TXGBE_SYNCLS_QPID_MASK | TXGBE_SYNCLS_ENA);
3683         }
3684
3685         filter_info->syn_info = synqf;
3686         wr32(hw, TXGBE_SYNCLS, synqf);
3687         txgbe_flush(hw);
3688         return 0;
3689 }
3690
3691 static inline enum txgbe_5tuple_protocol
3692 convert_protocol_type(uint8_t protocol_value)
3693 {
3694         if (protocol_value == IPPROTO_TCP)
3695                 return TXGBE_5TF_PROT_TCP;
3696         else if (protocol_value == IPPROTO_UDP)
3697                 return TXGBE_5TF_PROT_UDP;
3698         else if (protocol_value == IPPROTO_SCTP)
3699                 return TXGBE_5TF_PROT_SCTP;
3700         else
3701                 return TXGBE_5TF_PROT_NONE;
3702 }
3703
3704 /* inject a 5-tuple filter to HW */
3705 static inline void
3706 txgbe_inject_5tuple_filter(struct rte_eth_dev *dev,
3707                            struct txgbe_5tuple_filter *filter)
3708 {
3709         struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
3710         int i;
3711         uint32_t ftqf, sdpqf;
3712         uint32_t l34timir = 0;
3713         uint32_t mask = TXGBE_5TFCTL0_MASK;
3714
3715         i = filter->index;
3716         sdpqf = TXGBE_5TFPORT_DST(be_to_le16(filter->filter_info.dst_port));
3717         sdpqf |= TXGBE_5TFPORT_SRC(be_to_le16(filter->filter_info.src_port));
3718
3719         ftqf = TXGBE_5TFCTL0_PROTO(filter->filter_info.proto);
3720         ftqf |= TXGBE_5TFCTL0_PRI(filter->filter_info.priority);
3721         if (filter->filter_info.src_ip_mask == 0) /* 0 means compare. */
3722                 mask &= ~TXGBE_5TFCTL0_MSADDR;
3723         if (filter->filter_info.dst_ip_mask == 0)
3724                 mask &= ~TXGBE_5TFCTL0_MDADDR;
3725         if (filter->filter_info.src_port_mask == 0)
3726                 mask &= ~TXGBE_5TFCTL0_MSPORT;
3727         if (filter->filter_info.dst_port_mask == 0)
3728                 mask &= ~TXGBE_5TFCTL0_MDPORT;
3729         if (filter->filter_info.proto_mask == 0)
3730                 mask &= ~TXGBE_5TFCTL0_MPROTO;
3731         ftqf |= mask;
3732         ftqf |= TXGBE_5TFCTL0_MPOOL;
3733         ftqf |= TXGBE_5TFCTL0_ENA;
3734
3735         wr32(hw, TXGBE_5TFDADDR(i), be_to_le32(filter->filter_info.dst_ip));
3736         wr32(hw, TXGBE_5TFSADDR(i), be_to_le32(filter->filter_info.src_ip));
3737         wr32(hw, TXGBE_5TFPORT(i), sdpqf);
3738         wr32(hw, TXGBE_5TFCTL0(i), ftqf);
3739
3740         l34timir |= TXGBE_5TFCTL1_QP(filter->queue);
3741         wr32(hw, TXGBE_5TFCTL1(i), l34timir);
3742 }
3743
3744 /*
3745  * add a 5tuple filter
3746  *
3747  * @param
3748  * dev: Pointer to struct rte_eth_dev.
3749  * index: the index the filter allocates.
3750  * filter: pointer to the filter that will be added.
3751  * rx_queue: the queue id the filter assigned to.
3752  *
3753  * @return
3754  *    - On success, zero.
3755  *    - On failure, a negative value.
3756  */
3757 static int
3758 txgbe_add_5tuple_filter(struct rte_eth_dev *dev,
3759                         struct txgbe_5tuple_filter *filter)
3760 {
3761         struct txgbe_filter_info *filter_info = TXGBE_DEV_FILTER(dev);
3762         int i, idx, shift;
3763
3764         /*
3765          * look for an unused 5tuple filter index,
3766          * and insert the filter to list.
3767          */
3768         for (i = 0; i < TXGBE_MAX_FTQF_FILTERS; i++) {
3769                 idx = i / (sizeof(uint32_t) * NBBY);
3770                 shift = i % (sizeof(uint32_t) * NBBY);
3771                 if (!(filter_info->fivetuple_mask[idx] & (1 << shift))) {
3772                         filter_info->fivetuple_mask[idx] |= 1 << shift;
3773                         filter->index = i;
3774                         TAILQ_INSERT_TAIL(&filter_info->fivetuple_list,
3775                                           filter,
3776                                           entries);
3777                         break;
3778                 }
3779         }
3780         if (i >= TXGBE_MAX_FTQF_FILTERS) {
3781                 PMD_DRV_LOG(ERR, "5tuple filters are full.");
3782                 return -ENOSYS;
3783         }
3784
3785         txgbe_inject_5tuple_filter(dev, filter);
3786
3787         return 0;
3788 }
3789
3790 /*
3791  * remove a 5tuple filter
3792  *
3793  * @param
3794  * dev: Pointer to struct rte_eth_dev.
3795  * filter: the pointer of the filter will be removed.
3796  */
3797 static void
3798 txgbe_remove_5tuple_filter(struct rte_eth_dev *dev,
3799                         struct txgbe_5tuple_filter *filter)
3800 {
3801         struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
3802         struct txgbe_filter_info *filter_info = TXGBE_DEV_FILTER(dev);
3803         uint16_t index = filter->index;
3804
3805         filter_info->fivetuple_mask[index / (sizeof(uint32_t) * NBBY)] &=
3806                                 ~(1 << (index % (sizeof(uint32_t) * NBBY)));
3807         TAILQ_REMOVE(&filter_info->fivetuple_list, filter, entries);
3808         rte_free(filter);
3809
3810         wr32(hw, TXGBE_5TFDADDR(index), 0);
3811         wr32(hw, TXGBE_5TFSADDR(index), 0);
3812         wr32(hw, TXGBE_5TFPORT(index), 0);
3813         wr32(hw, TXGBE_5TFCTL0(index), 0);
3814         wr32(hw, TXGBE_5TFCTL1(index), 0);
3815 }
3816
3817 static inline struct txgbe_5tuple_filter *
3818 txgbe_5tuple_filter_lookup(struct txgbe_5tuple_filter_list *filter_list,
3819                         struct txgbe_5tuple_filter_info *key)
3820 {
3821         struct txgbe_5tuple_filter *it;
3822
3823         TAILQ_FOREACH(it, filter_list, entries) {
3824                 if (memcmp(key, &it->filter_info,
3825                         sizeof(struct txgbe_5tuple_filter_info)) == 0) {
3826                         return it;
3827                 }
3828         }
3829         return NULL;
3830 }
3831
3832 /* translate elements in struct rte_eth_ntuple_filter
3833  * to struct txgbe_5tuple_filter_info
3834  */
3835 static inline int
3836 ntuple_filter_to_5tuple(struct rte_eth_ntuple_filter *filter,
3837                         struct txgbe_5tuple_filter_info *filter_info)
3838 {
3839         if (filter->queue >= TXGBE_MAX_RX_QUEUE_NUM ||
3840                 filter->priority > TXGBE_5TUPLE_MAX_PRI ||
3841                 filter->priority < TXGBE_5TUPLE_MIN_PRI)
3842                 return -EINVAL;
3843
3844         switch (filter->dst_ip_mask) {
3845         case UINT32_MAX:
3846                 filter_info->dst_ip_mask = 0;
3847                 filter_info->dst_ip = filter->dst_ip;
3848                 break;
3849         case 0:
3850                 filter_info->dst_ip_mask = 1;
3851                 break;
3852         default:
3853                 PMD_DRV_LOG(ERR, "invalid dst_ip mask.");
3854                 return -EINVAL;
3855         }
3856
3857         switch (filter->src_ip_mask) {
3858         case UINT32_MAX:
3859                 filter_info->src_ip_mask = 0;
3860                 filter_info->src_ip = filter->src_ip;
3861                 break;
3862         case 0:
3863                 filter_info->src_ip_mask = 1;
3864                 break;
3865         default:
3866                 PMD_DRV_LOG(ERR, "invalid src_ip mask.");
3867                 return -EINVAL;
3868         }
3869
3870         switch (filter->dst_port_mask) {
3871         case UINT16_MAX:
3872                 filter_info->dst_port_mask = 0;
3873                 filter_info->dst_port = filter->dst_port;
3874                 break;
3875         case 0:
3876                 filter_info->dst_port_mask = 1;
3877                 break;
3878         default:
3879                 PMD_DRV_LOG(ERR, "invalid dst_port mask.");
3880                 return -EINVAL;
3881         }
3882
3883         switch (filter->src_port_mask) {
3884         case UINT16_MAX:
3885                 filter_info->src_port_mask = 0;
3886                 filter_info->src_port = filter->src_port;
3887                 break;
3888         case 0:
3889                 filter_info->src_port_mask = 1;
3890                 break;
3891         default:
3892                 PMD_DRV_LOG(ERR, "invalid src_port mask.");
3893                 return -EINVAL;
3894         }
3895
3896         switch (filter->proto_mask) {
3897         case UINT8_MAX:
3898                 filter_info->proto_mask = 0;
3899                 filter_info->proto =
3900                         convert_protocol_type(filter->proto);
3901                 break;
3902         case 0:
3903                 filter_info->proto_mask = 1;
3904                 break;
3905         default:
3906                 PMD_DRV_LOG(ERR, "invalid protocol mask.");
3907                 return -EINVAL;
3908         }
3909
3910         filter_info->priority = (uint8_t)filter->priority;
3911         return 0;
3912 }
3913
3914 /*
3915  * add or delete a ntuple filter
3916  *
3917  * @param
3918  * dev: Pointer to struct rte_eth_dev.
3919  * ntuple_filter: Pointer to struct rte_eth_ntuple_filter
3920  * add: if true, add filter, if false, remove filter
3921  *
3922  * @return
3923  *    - On success, zero.
3924  *    - On failure, a negative value.
3925  */
3926 int
3927 txgbe_add_del_ntuple_filter(struct rte_eth_dev *dev,
3928                         struct rte_eth_ntuple_filter *ntuple_filter,
3929                         bool add)
3930 {
3931         struct txgbe_filter_info *filter_info = TXGBE_DEV_FILTER(dev);
3932         struct txgbe_5tuple_filter_info filter_5tuple;
3933         struct txgbe_5tuple_filter *filter;
3934         int ret;
3935
3936         if (ntuple_filter->flags != RTE_5TUPLE_FLAGS) {
3937                 PMD_DRV_LOG(ERR, "only 5tuple is supported.");
3938                 return -EINVAL;
3939         }
3940
3941         memset(&filter_5tuple, 0, sizeof(struct txgbe_5tuple_filter_info));
3942         ret = ntuple_filter_to_5tuple(ntuple_filter, &filter_5tuple);
3943         if (ret < 0)
3944                 return ret;
3945
3946         filter = txgbe_5tuple_filter_lookup(&filter_info->fivetuple_list,
3947                                          &filter_5tuple);
3948         if (filter != NULL && add) {
3949                 PMD_DRV_LOG(ERR, "filter exists.");
3950                 return -EEXIST;
3951         }
3952         if (filter == NULL && !add) {
3953                 PMD_DRV_LOG(ERR, "filter doesn't exist.");
3954                 return -ENOENT;
3955         }
3956
3957         if (add) {
3958                 filter = rte_zmalloc("txgbe_5tuple_filter",
3959                                 sizeof(struct txgbe_5tuple_filter), 0);
3960                 if (filter == NULL)
3961                         return -ENOMEM;
3962                 rte_memcpy(&filter->filter_info,
3963                                  &filter_5tuple,
3964                                  sizeof(struct txgbe_5tuple_filter_info));
3965                 filter->queue = ntuple_filter->queue;
3966                 ret = txgbe_add_5tuple_filter(dev, filter);
3967                 if (ret < 0) {
3968                         rte_free(filter);
3969                         return ret;
3970                 }
3971         } else {
3972                 txgbe_remove_5tuple_filter(dev, filter);
3973         }
3974
3975         return 0;
3976 }
3977
3978 int
3979 txgbe_add_del_ethertype_filter(struct rte_eth_dev *dev,
3980                         struct rte_eth_ethertype_filter *filter,
3981                         bool add)
3982 {
3983         struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
3984         struct txgbe_filter_info *filter_info = TXGBE_DEV_FILTER(dev);
3985         uint32_t etqf = 0;
3986         uint32_t etqs = 0;
3987         int ret;
3988         struct txgbe_ethertype_filter ethertype_filter;
3989
3990         if (filter->queue >= TXGBE_MAX_RX_QUEUE_NUM)
3991                 return -EINVAL;
3992
3993         if (filter->ether_type == RTE_ETHER_TYPE_IPV4 ||
3994             filter->ether_type == RTE_ETHER_TYPE_IPV6) {
3995                 PMD_DRV_LOG(ERR, "unsupported ether_type(0x%04x) in"
3996                         " ethertype filter.", filter->ether_type);
3997                 return -EINVAL;
3998         }
3999
4000         if (filter->flags & RTE_ETHTYPE_FLAGS_MAC) {
4001                 PMD_DRV_LOG(ERR, "mac compare is unsupported.");
4002                 return -EINVAL;
4003         }
4004         if (filter->flags & RTE_ETHTYPE_FLAGS_DROP) {
4005                 PMD_DRV_LOG(ERR, "drop option is unsupported.");
4006                 return -EINVAL;
4007         }
4008
4009         ret = txgbe_ethertype_filter_lookup(filter_info, filter->ether_type);
4010         if (ret >= 0 && add) {
4011                 PMD_DRV_LOG(ERR, "ethertype (0x%04x) filter exists.",
4012                             filter->ether_type);
4013                 return -EEXIST;
4014         }
4015         if (ret < 0 && !add) {
4016                 PMD_DRV_LOG(ERR, "ethertype (0x%04x) filter doesn't exist.",
4017                             filter->ether_type);
4018                 return -ENOENT;
4019         }
4020
4021         if (add) {
4022                 etqf = TXGBE_ETFLT_ENA;
4023                 etqf |= TXGBE_ETFLT_ETID(filter->ether_type);
4024                 etqs |= TXGBE_ETCLS_QPID(filter->queue);
4025                 etqs |= TXGBE_ETCLS_QENA;
4026
4027                 ethertype_filter.ethertype = filter->ether_type;
4028                 ethertype_filter.etqf = etqf;
4029                 ethertype_filter.etqs = etqs;
4030                 ethertype_filter.conf = FALSE;
4031                 ret = txgbe_ethertype_filter_insert(filter_info,
4032                                                     &ethertype_filter);
4033                 if (ret < 0) {
4034                         PMD_DRV_LOG(ERR, "ethertype filters are full.");
4035                         return -ENOSPC;
4036                 }
4037         } else {
4038                 ret = txgbe_ethertype_filter_remove(filter_info, (uint8_t)ret);
4039                 if (ret < 0)
4040                         return -ENOSYS;
4041         }
4042         wr32(hw, TXGBE_ETFLT(ret), etqf);
4043         wr32(hw, TXGBE_ETCLS(ret), etqs);
4044         txgbe_flush(hw);
4045
4046         return 0;
4047 }
4048
4049 static int
4050 txgbe_dev_filter_ctrl(__rte_unused struct rte_eth_dev *dev,
4051                      enum rte_filter_type filter_type,
4052                      enum rte_filter_op filter_op,
4053                      void *arg)
4054 {
4055         int ret = 0;
4056
4057         switch (filter_type) {
4058         case RTE_ETH_FILTER_GENERIC:
4059                 if (filter_op != RTE_ETH_FILTER_GET)
4060                         return -EINVAL;
4061                 *(const void **)arg = &txgbe_flow_ops;
4062                 break;
4063         default:
4064                 PMD_DRV_LOG(WARNING, "Filter type (%d) not supported",
4065                                                         filter_type);
4066                 ret = -EINVAL;
4067                 break;
4068         }
4069
4070         return ret;
4071 }
4072
4073 static u8 *
4074 txgbe_dev_addr_list_itr(__rte_unused struct txgbe_hw *hw,
4075                         u8 **mc_addr_ptr, u32 *vmdq)
4076 {
4077         u8 *mc_addr;
4078
4079         *vmdq = 0;
4080         mc_addr = *mc_addr_ptr;
4081         *mc_addr_ptr = (mc_addr + sizeof(struct rte_ether_addr));
4082         return mc_addr;
4083 }
4084
4085 int
4086 txgbe_dev_set_mc_addr_list(struct rte_eth_dev *dev,
4087                           struct rte_ether_addr *mc_addr_set,
4088                           uint32_t nb_mc_addr)
4089 {
4090         struct txgbe_hw *hw;
4091         u8 *mc_addr_list;
4092
4093         hw = TXGBE_DEV_HW(dev);
4094         mc_addr_list = (u8 *)mc_addr_set;
4095         return txgbe_update_mc_addr_list(hw, mc_addr_list, nb_mc_addr,
4096                                          txgbe_dev_addr_list_itr, TRUE);
4097 }
4098
4099 static uint64_t
4100 txgbe_read_systime_cyclecounter(struct rte_eth_dev *dev)
4101 {
4102         struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
4103         uint64_t systime_cycles;
4104
4105         systime_cycles = (uint64_t)rd32(hw, TXGBE_TSTIMEL);
4106         systime_cycles |= (uint64_t)rd32(hw, TXGBE_TSTIMEH) << 32;
4107
4108         return systime_cycles;
4109 }
4110
4111 static uint64_t
4112 txgbe_read_rx_tstamp_cyclecounter(struct rte_eth_dev *dev)
4113 {
4114         struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
4115         uint64_t rx_tstamp_cycles;
4116
4117         /* TSRXSTMPL stores ns and TSRXSTMPH stores seconds. */
4118         rx_tstamp_cycles = (uint64_t)rd32(hw, TXGBE_TSRXSTMPL);
4119         rx_tstamp_cycles |= (uint64_t)rd32(hw, TXGBE_TSRXSTMPH) << 32;
4120
4121         return rx_tstamp_cycles;
4122 }
4123
4124 static uint64_t
4125 txgbe_read_tx_tstamp_cyclecounter(struct rte_eth_dev *dev)
4126 {
4127         struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
4128         uint64_t tx_tstamp_cycles;
4129
4130         /* TSTXSTMPL stores ns and TSTXSTMPH stores seconds. */
4131         tx_tstamp_cycles = (uint64_t)rd32(hw, TXGBE_TSTXSTMPL);
4132         tx_tstamp_cycles |= (uint64_t)rd32(hw, TXGBE_TSTXSTMPH) << 32;
4133
4134         return tx_tstamp_cycles;
4135 }
4136
4137 static void
4138 txgbe_start_timecounters(struct rte_eth_dev *dev)
4139 {
4140         struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
4141         struct txgbe_adapter *adapter = TXGBE_DEV_ADAPTER(dev);
4142         struct rte_eth_link link;
4143         uint32_t incval = 0;
4144         uint32_t shift = 0;
4145
4146         /* Get current link speed. */
4147         txgbe_dev_link_update(dev, 1);
4148         rte_eth_linkstatus_get(dev, &link);
4149
4150         switch (link.link_speed) {
4151         case ETH_SPEED_NUM_100M:
4152                 incval = TXGBE_INCVAL_100;
4153                 shift = TXGBE_INCVAL_SHIFT_100;
4154                 break;
4155         case ETH_SPEED_NUM_1G:
4156                 incval = TXGBE_INCVAL_1GB;
4157                 shift = TXGBE_INCVAL_SHIFT_1GB;
4158                 break;
4159         case ETH_SPEED_NUM_10G:
4160         default:
4161                 incval = TXGBE_INCVAL_10GB;
4162                 shift = TXGBE_INCVAL_SHIFT_10GB;
4163                 break;
4164         }
4165
4166         wr32(hw, TXGBE_TSTIMEINC, TXGBE_TSTIMEINC_VP(incval, 2));
4167
4168         memset(&adapter->systime_tc, 0, sizeof(struct rte_timecounter));
4169         memset(&adapter->rx_tstamp_tc, 0, sizeof(struct rte_timecounter));
4170         memset(&adapter->tx_tstamp_tc, 0, sizeof(struct rte_timecounter));
4171
4172         adapter->systime_tc.cc_mask = TXGBE_CYCLECOUNTER_MASK;
4173         adapter->systime_tc.cc_shift = shift;
4174         adapter->systime_tc.nsec_mask = (1ULL << shift) - 1;
4175
4176         adapter->rx_tstamp_tc.cc_mask = TXGBE_CYCLECOUNTER_MASK;
4177         adapter->rx_tstamp_tc.cc_shift = shift;
4178         adapter->rx_tstamp_tc.nsec_mask = (1ULL << shift) - 1;
4179
4180         adapter->tx_tstamp_tc.cc_mask = TXGBE_CYCLECOUNTER_MASK;
4181         adapter->tx_tstamp_tc.cc_shift = shift;
4182         adapter->tx_tstamp_tc.nsec_mask = (1ULL << shift) - 1;
4183 }
4184
4185 static int
4186 txgbe_timesync_adjust_time(struct rte_eth_dev *dev, int64_t delta)
4187 {
4188         struct txgbe_adapter *adapter = TXGBE_DEV_ADAPTER(dev);
4189
4190         adapter->systime_tc.nsec += delta;
4191         adapter->rx_tstamp_tc.nsec += delta;
4192         adapter->tx_tstamp_tc.nsec += delta;
4193
4194         return 0;
4195 }
4196
4197 static int
4198 txgbe_timesync_write_time(struct rte_eth_dev *dev, const struct timespec *ts)
4199 {
4200         uint64_t ns;
4201         struct txgbe_adapter *adapter = TXGBE_DEV_ADAPTER(dev);
4202
4203         ns = rte_timespec_to_ns(ts);
4204         /* Set the timecounters to a new value. */
4205         adapter->systime_tc.nsec = ns;
4206         adapter->rx_tstamp_tc.nsec = ns;
4207         adapter->tx_tstamp_tc.nsec = ns;
4208
4209         return 0;
4210 }
4211
4212 static int
4213 txgbe_timesync_read_time(struct rte_eth_dev *dev, struct timespec *ts)
4214 {
4215         uint64_t ns, systime_cycles;
4216         struct txgbe_adapter *adapter = TXGBE_DEV_ADAPTER(dev);
4217
4218         systime_cycles = txgbe_read_systime_cyclecounter(dev);
4219         ns = rte_timecounter_update(&adapter->systime_tc, systime_cycles);
4220         *ts = rte_ns_to_timespec(ns);
4221
4222         return 0;
4223 }
4224
4225 static int
4226 txgbe_timesync_enable(struct rte_eth_dev *dev)
4227 {
4228         struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
4229         uint32_t tsync_ctl;
4230
4231         /* Stop the timesync system time. */
4232         wr32(hw, TXGBE_TSTIMEINC, 0x0);
4233         /* Reset the timesync system time value. */
4234         wr32(hw, TXGBE_TSTIMEL, 0x0);
4235         wr32(hw, TXGBE_TSTIMEH, 0x0);
4236
4237         txgbe_start_timecounters(dev);
4238
4239         /* Enable L2 filtering of IEEE1588/802.1AS Ethernet frame types. */
4240         wr32(hw, TXGBE_ETFLT(TXGBE_ETF_ID_1588),
4241                 RTE_ETHER_TYPE_1588 | TXGBE_ETFLT_ENA | TXGBE_ETFLT_1588);
4242
4243         /* Enable timestamping of received PTP packets. */
4244         tsync_ctl = rd32(hw, TXGBE_TSRXCTL);
4245         tsync_ctl |= TXGBE_TSRXCTL_ENA;
4246         wr32(hw, TXGBE_TSRXCTL, tsync_ctl);
4247
4248         /* Enable timestamping of transmitted PTP packets. */
4249         tsync_ctl = rd32(hw, TXGBE_TSTXCTL);
4250         tsync_ctl |= TXGBE_TSTXCTL_ENA;
4251         wr32(hw, TXGBE_TSTXCTL, tsync_ctl);
4252
4253         txgbe_flush(hw);
4254
4255         return 0;
4256 }
4257
4258 static int
4259 txgbe_timesync_disable(struct rte_eth_dev *dev)
4260 {
4261         struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
4262         uint32_t tsync_ctl;
4263
4264         /* Disable timestamping of transmitted PTP packets. */
4265         tsync_ctl = rd32(hw, TXGBE_TSTXCTL);
4266         tsync_ctl &= ~TXGBE_TSTXCTL_ENA;
4267         wr32(hw, TXGBE_TSTXCTL, tsync_ctl);
4268
4269         /* Disable timestamping of received PTP packets. */
4270         tsync_ctl = rd32(hw, TXGBE_TSRXCTL);
4271         tsync_ctl &= ~TXGBE_TSRXCTL_ENA;
4272         wr32(hw, TXGBE_TSRXCTL, tsync_ctl);
4273
4274         /* Disable L2 filtering of IEEE1588/802.1AS Ethernet frame types. */
4275         wr32(hw, TXGBE_ETFLT(TXGBE_ETF_ID_1588), 0);
4276
4277         /* Stop incrementating the System Time registers. */
4278         wr32(hw, TXGBE_TSTIMEINC, 0);
4279
4280         return 0;
4281 }
4282
4283 static int
4284 txgbe_timesync_read_rx_timestamp(struct rte_eth_dev *dev,
4285                                  struct timespec *timestamp,
4286                                  uint32_t flags __rte_unused)
4287 {
4288         struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
4289         struct txgbe_adapter *adapter = TXGBE_DEV_ADAPTER(dev);
4290         uint32_t tsync_rxctl;
4291         uint64_t rx_tstamp_cycles;
4292         uint64_t ns;
4293
4294         tsync_rxctl = rd32(hw, TXGBE_TSRXCTL);
4295         if ((tsync_rxctl & TXGBE_TSRXCTL_VLD) == 0)
4296                 return -EINVAL;
4297
4298         rx_tstamp_cycles = txgbe_read_rx_tstamp_cyclecounter(dev);
4299         ns = rte_timecounter_update(&adapter->rx_tstamp_tc, rx_tstamp_cycles);
4300         *timestamp = rte_ns_to_timespec(ns);
4301
4302         return  0;
4303 }
4304
4305 static int
4306 txgbe_timesync_read_tx_timestamp(struct rte_eth_dev *dev,
4307                                  struct timespec *timestamp)
4308 {
4309         struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
4310         struct txgbe_adapter *adapter = TXGBE_DEV_ADAPTER(dev);
4311         uint32_t tsync_txctl;
4312         uint64_t tx_tstamp_cycles;
4313         uint64_t ns;
4314
4315         tsync_txctl = rd32(hw, TXGBE_TSTXCTL);
4316         if ((tsync_txctl & TXGBE_TSTXCTL_VLD) == 0)
4317                 return -EINVAL;
4318
4319         tx_tstamp_cycles = txgbe_read_tx_tstamp_cyclecounter(dev);
4320         ns = rte_timecounter_update(&adapter->tx_tstamp_tc, tx_tstamp_cycles);
4321         *timestamp = rte_ns_to_timespec(ns);
4322
4323         return 0;
4324 }
4325
4326 static int
4327 txgbe_get_reg_length(struct rte_eth_dev *dev __rte_unused)
4328 {
4329         int count = 0;
4330         int g_ind = 0;
4331         const struct reg_info *reg_group;
4332         const struct reg_info **reg_set = txgbe_regs_others;
4333
4334         while ((reg_group = reg_set[g_ind++]))
4335                 count += txgbe_regs_group_count(reg_group);
4336
4337         return count;
4338 }
4339
4340 static int
4341 txgbe_get_regs(struct rte_eth_dev *dev,
4342               struct rte_dev_reg_info *regs)
4343 {
4344         struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
4345         uint32_t *data = regs->data;
4346         int g_ind = 0;
4347         int count = 0;
4348         const struct reg_info *reg_group;
4349         const struct reg_info **reg_set = txgbe_regs_others;
4350
4351         if (data == NULL) {
4352                 regs->length = txgbe_get_reg_length(dev);
4353                 regs->width = sizeof(uint32_t);
4354                 return 0;
4355         }
4356
4357         /* Support only full register dump */
4358         if (regs->length == 0 ||
4359             regs->length == (uint32_t)txgbe_get_reg_length(dev)) {
4360                 regs->version = hw->mac.type << 24 |
4361                                 hw->revision_id << 16 |
4362                                 hw->device_id;
4363                 while ((reg_group = reg_set[g_ind++]))
4364                         count += txgbe_read_regs_group(dev, &data[count],
4365                                                       reg_group);
4366                 return 0;
4367         }
4368
4369         return -ENOTSUP;
4370 }
4371
4372 static int
4373 txgbe_get_eeprom_length(struct rte_eth_dev *dev)
4374 {
4375         struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
4376
4377         /* Return unit is byte count */
4378         return hw->rom.word_size * 2;
4379 }
4380
4381 static int
4382 txgbe_get_eeprom(struct rte_eth_dev *dev,
4383                 struct rte_dev_eeprom_info *in_eeprom)
4384 {
4385         struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
4386         struct txgbe_rom_info *eeprom = &hw->rom;
4387         uint16_t *data = in_eeprom->data;
4388         int first, length;
4389
4390         first = in_eeprom->offset >> 1;
4391         length = in_eeprom->length >> 1;
4392         if (first > hw->rom.word_size ||
4393             ((first + length) > hw->rom.word_size))
4394                 return -EINVAL;
4395
4396         in_eeprom->magic = hw->vendor_id | (hw->device_id << 16);
4397
4398         return eeprom->readw_buffer(hw, first, length, data);
4399 }
4400
4401 static int
4402 txgbe_set_eeprom(struct rte_eth_dev *dev,
4403                 struct rte_dev_eeprom_info *in_eeprom)
4404 {
4405         struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
4406         struct txgbe_rom_info *eeprom = &hw->rom;
4407         uint16_t *data = in_eeprom->data;
4408         int first, length;
4409
4410         first = in_eeprom->offset >> 1;
4411         length = in_eeprom->length >> 1;
4412         if (first > hw->rom.word_size ||
4413             ((first + length) > hw->rom.word_size))
4414                 return -EINVAL;
4415
4416         in_eeprom->magic = hw->vendor_id | (hw->device_id << 16);
4417
4418         return eeprom->writew_buffer(hw,  first, length, data);
4419 }
4420
4421 static int
4422 txgbe_get_module_info(struct rte_eth_dev *dev,
4423                       struct rte_eth_dev_module_info *modinfo)
4424 {
4425         struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
4426         uint32_t status;
4427         uint8_t sff8472_rev, addr_mode;
4428         bool page_swap = false;
4429
4430         /* Check whether we support SFF-8472 or not */
4431         status = hw->phy.read_i2c_eeprom(hw,
4432                                              TXGBE_SFF_SFF_8472_COMP,
4433                                              &sff8472_rev);
4434         if (status != 0)
4435                 return -EIO;
4436
4437         /* addressing mode is not supported */
4438         status = hw->phy.read_i2c_eeprom(hw,
4439                                              TXGBE_SFF_SFF_8472_SWAP,
4440                                              &addr_mode);
4441         if (status != 0)
4442                 return -EIO;
4443
4444         if (addr_mode & TXGBE_SFF_ADDRESSING_MODE) {
4445                 PMD_DRV_LOG(ERR,
4446                             "Address change required to access page 0xA2, "
4447                             "but not supported. Please report the module "
4448                             "type to the driver maintainers.");
4449                 page_swap = true;
4450         }
4451
4452         if (sff8472_rev == TXGBE_SFF_SFF_8472_UNSUP || page_swap) {
4453                 /* We have a SFP, but it does not support SFF-8472 */
4454                 modinfo->type = RTE_ETH_MODULE_SFF_8079;
4455                 modinfo->eeprom_len = RTE_ETH_MODULE_SFF_8079_LEN;
4456         } else {
4457                 /* We have a SFP which supports a revision of SFF-8472. */
4458                 modinfo->type = RTE_ETH_MODULE_SFF_8472;
4459                 modinfo->eeprom_len = RTE_ETH_MODULE_SFF_8472_LEN;
4460         }
4461
4462         return 0;
4463 }
4464
4465 static int
4466 txgbe_get_module_eeprom(struct rte_eth_dev *dev,
4467                         struct rte_dev_eeprom_info *info)
4468 {
4469         struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
4470         uint32_t status = TXGBE_ERR_PHY_ADDR_INVALID;
4471         uint8_t databyte = 0xFF;
4472         uint8_t *data = info->data;
4473         uint32_t i = 0;
4474
4475         if (info->length == 0)
4476                 return -EINVAL;
4477
4478         for (i = info->offset; i < info->offset + info->length; i++) {
4479                 if (i < RTE_ETH_MODULE_SFF_8079_LEN)
4480                         status = hw->phy.read_i2c_eeprom(hw, i, &databyte);
4481                 else
4482                         status = hw->phy.read_i2c_sff8472(hw, i, &databyte);
4483
4484                 if (status != 0)
4485                         return -EIO;
4486
4487                 data[i - info->offset] = databyte;
4488         }
4489
4490         return 0;
4491 }
4492
4493 bool
4494 txgbe_rss_update_sp(enum txgbe_mac_type mac_type)
4495 {
4496         switch (mac_type) {
4497         case txgbe_mac_raptor:
4498                 return 1;
4499         default:
4500                 return 0;
4501         }
4502 }
4503
4504 static int
4505 txgbe_dev_get_dcb_info(struct rte_eth_dev *dev,
4506                         struct rte_eth_dcb_info *dcb_info)
4507 {
4508         struct txgbe_dcb_config *dcb_config = TXGBE_DEV_DCB_CONFIG(dev);
4509         struct txgbe_dcb_tc_config *tc;
4510         struct rte_eth_dcb_tc_queue_mapping *tc_queue;
4511         uint8_t nb_tcs;
4512         uint8_t i, j;
4513
4514         if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_DCB_FLAG)
4515                 dcb_info->nb_tcs = dcb_config->num_tcs.pg_tcs;
4516         else
4517                 dcb_info->nb_tcs = 1;
4518
4519         tc_queue = &dcb_info->tc_queue;
4520         nb_tcs = dcb_info->nb_tcs;
4521
4522         if (dcb_config->vt_mode) { /* vt is enabled */
4523                 struct rte_eth_vmdq_dcb_conf *vmdq_rx_conf =
4524                                 &dev->data->dev_conf.rx_adv_conf.vmdq_dcb_conf;
4525                 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++)
4526                         dcb_info->prio_tc[i] = vmdq_rx_conf->dcb_tc[i];
4527                 if (RTE_ETH_DEV_SRIOV(dev).active > 0) {
4528                         for (j = 0; j < nb_tcs; j++) {
4529                                 tc_queue->tc_rxq[0][j].base = j;
4530                                 tc_queue->tc_rxq[0][j].nb_queue = 1;
4531                                 tc_queue->tc_txq[0][j].base = j;
4532                                 tc_queue->tc_txq[0][j].nb_queue = 1;
4533                         }
4534                 } else {
4535                         for (i = 0; i < vmdq_rx_conf->nb_queue_pools; i++) {
4536                                 for (j = 0; j < nb_tcs; j++) {
4537                                         tc_queue->tc_rxq[i][j].base =
4538                                                 i * nb_tcs + j;
4539                                         tc_queue->tc_rxq[i][j].nb_queue = 1;
4540                                         tc_queue->tc_txq[i][j].base =
4541                                                 i * nb_tcs + j;
4542                                         tc_queue->tc_txq[i][j].nb_queue = 1;
4543                                 }
4544                         }
4545                 }
4546         } else { /* vt is disabled */
4547                 struct rte_eth_dcb_rx_conf *rx_conf =
4548                                 &dev->data->dev_conf.rx_adv_conf.dcb_rx_conf;
4549                 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++)
4550                         dcb_info->prio_tc[i] = rx_conf->dcb_tc[i];
4551                 if (dcb_info->nb_tcs == ETH_4_TCS) {
4552                         for (i = 0; i < dcb_info->nb_tcs; i++) {
4553                                 dcb_info->tc_queue.tc_rxq[0][i].base = i * 32;
4554                                 dcb_info->tc_queue.tc_rxq[0][i].nb_queue = 16;
4555                         }
4556                         dcb_info->tc_queue.tc_txq[0][0].base = 0;
4557                         dcb_info->tc_queue.tc_txq[0][1].base = 64;
4558                         dcb_info->tc_queue.tc_txq[0][2].base = 96;
4559                         dcb_info->tc_queue.tc_txq[0][3].base = 112;
4560                         dcb_info->tc_queue.tc_txq[0][0].nb_queue = 64;
4561                         dcb_info->tc_queue.tc_txq[0][1].nb_queue = 32;
4562                         dcb_info->tc_queue.tc_txq[0][2].nb_queue = 16;
4563                         dcb_info->tc_queue.tc_txq[0][3].nb_queue = 16;
4564                 } else if (dcb_info->nb_tcs == ETH_8_TCS) {
4565                         for (i = 0; i < dcb_info->nb_tcs; i++) {
4566                                 dcb_info->tc_queue.tc_rxq[0][i].base = i * 16;
4567                                 dcb_info->tc_queue.tc_rxq[0][i].nb_queue = 16;
4568                         }
4569                         dcb_info->tc_queue.tc_txq[0][0].base = 0;
4570                         dcb_info->tc_queue.tc_txq[0][1].base = 32;
4571                         dcb_info->tc_queue.tc_txq[0][2].base = 64;
4572                         dcb_info->tc_queue.tc_txq[0][3].base = 80;
4573                         dcb_info->tc_queue.tc_txq[0][4].base = 96;
4574                         dcb_info->tc_queue.tc_txq[0][5].base = 104;
4575                         dcb_info->tc_queue.tc_txq[0][6].base = 112;
4576                         dcb_info->tc_queue.tc_txq[0][7].base = 120;
4577                         dcb_info->tc_queue.tc_txq[0][0].nb_queue = 32;
4578                         dcb_info->tc_queue.tc_txq[0][1].nb_queue = 32;
4579                         dcb_info->tc_queue.tc_txq[0][2].nb_queue = 16;
4580                         dcb_info->tc_queue.tc_txq[0][3].nb_queue = 16;
4581                         dcb_info->tc_queue.tc_txq[0][4].nb_queue = 8;
4582                         dcb_info->tc_queue.tc_txq[0][5].nb_queue = 8;
4583                         dcb_info->tc_queue.tc_txq[0][6].nb_queue = 8;
4584                         dcb_info->tc_queue.tc_txq[0][7].nb_queue = 8;
4585                 }
4586         }
4587         for (i = 0; i < dcb_info->nb_tcs; i++) {
4588                 tc = &dcb_config->tc_config[i];
4589                 dcb_info->tc_bws[i] = tc->path[TXGBE_DCB_TX_CONFIG].bwg_percent;
4590         }
4591         return 0;
4592 }
4593
4594 /* Update e-tag ether type */
4595 static int
4596 txgbe_update_e_tag_eth_type(struct txgbe_hw *hw,
4597                             uint16_t ether_type)
4598 {
4599         uint32_t etag_etype;
4600
4601         etag_etype = rd32(hw, TXGBE_EXTAG);
4602         etag_etype &= ~TXGBE_EXTAG_ETAG_MASK;
4603         etag_etype |= ether_type;
4604         wr32(hw, TXGBE_EXTAG, etag_etype);
4605         txgbe_flush(hw);
4606
4607         return 0;
4608 }
4609
4610 /* Enable e-tag tunnel */
4611 static int
4612 txgbe_e_tag_enable(struct txgbe_hw *hw)
4613 {
4614         uint32_t etag_etype;
4615
4616         etag_etype = rd32(hw, TXGBE_PORTCTL);
4617         etag_etype |= TXGBE_PORTCTL_ETAG;
4618         wr32(hw, TXGBE_PORTCTL, etag_etype);
4619         txgbe_flush(hw);
4620
4621         return 0;
4622 }
4623
4624 static int
4625 txgbe_e_tag_filter_del(struct rte_eth_dev *dev,
4626                        struct txgbe_l2_tunnel_conf  *l2_tunnel)
4627 {
4628         int ret = 0;
4629         struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
4630         uint32_t i, rar_entries;
4631         uint32_t rar_low, rar_high;
4632
4633         rar_entries = hw->mac.num_rar_entries;
4634
4635         for (i = 1; i < rar_entries; i++) {
4636                 wr32(hw, TXGBE_ETHADDRIDX, i);
4637                 rar_high = rd32(hw, TXGBE_ETHADDRH);
4638                 rar_low  = rd32(hw, TXGBE_ETHADDRL);
4639                 if ((rar_high & TXGBE_ETHADDRH_VLD) &&
4640                     (rar_high & TXGBE_ETHADDRH_ETAG) &&
4641                     (TXGBE_ETHADDRL_ETAG(rar_low) ==
4642                      l2_tunnel->tunnel_id)) {
4643                         wr32(hw, TXGBE_ETHADDRL, 0);
4644                         wr32(hw, TXGBE_ETHADDRH, 0);
4645
4646                         txgbe_clear_vmdq(hw, i, BIT_MASK32);
4647
4648                         return ret;
4649                 }
4650         }
4651
4652         return ret;
4653 }
4654
4655 static int
4656 txgbe_e_tag_filter_add(struct rte_eth_dev *dev,
4657                        struct txgbe_l2_tunnel_conf *l2_tunnel)
4658 {
4659         int ret = 0;
4660         struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
4661         uint32_t i, rar_entries;
4662         uint32_t rar_low, rar_high;
4663
4664         /* One entry for one tunnel. Try to remove potential existing entry. */
4665         txgbe_e_tag_filter_del(dev, l2_tunnel);
4666
4667         rar_entries = hw->mac.num_rar_entries;
4668
4669         for (i = 1; i < rar_entries; i++) {
4670                 wr32(hw, TXGBE_ETHADDRIDX, i);
4671                 rar_high = rd32(hw, TXGBE_ETHADDRH);
4672                 if (rar_high & TXGBE_ETHADDRH_VLD) {
4673                         continue;
4674                 } else {
4675                         txgbe_set_vmdq(hw, i, l2_tunnel->pool);
4676                         rar_high = TXGBE_ETHADDRH_VLD | TXGBE_ETHADDRH_ETAG;
4677                         rar_low = l2_tunnel->tunnel_id;
4678
4679                         wr32(hw, TXGBE_ETHADDRL, rar_low);
4680                         wr32(hw, TXGBE_ETHADDRH, rar_high);
4681
4682                         return ret;
4683                 }
4684         }
4685
4686         PMD_INIT_LOG(NOTICE, "The table of E-tag forwarding rule is full."
4687                      " Please remove a rule before adding a new one.");
4688         return -EINVAL;
4689 }
4690
4691 static inline struct txgbe_l2_tn_filter *
4692 txgbe_l2_tn_filter_lookup(struct txgbe_l2_tn_info *l2_tn_info,
4693                           struct txgbe_l2_tn_key *key)
4694 {
4695         int ret;
4696
4697         ret = rte_hash_lookup(l2_tn_info->hash_handle, (const void *)key);
4698         if (ret < 0)
4699                 return NULL;
4700
4701         return l2_tn_info->hash_map[ret];
4702 }
4703
4704 static inline int
4705 txgbe_insert_l2_tn_filter(struct txgbe_l2_tn_info *l2_tn_info,
4706                           struct txgbe_l2_tn_filter *l2_tn_filter)
4707 {
4708         int ret;
4709
4710         ret = rte_hash_add_key(l2_tn_info->hash_handle,
4711                                &l2_tn_filter->key);
4712
4713         if (ret < 0) {
4714                 PMD_DRV_LOG(ERR,
4715                             "Failed to insert L2 tunnel filter"
4716                             " to hash table %d!",
4717                             ret);
4718                 return ret;
4719         }
4720
4721         l2_tn_info->hash_map[ret] = l2_tn_filter;
4722
4723         TAILQ_INSERT_TAIL(&l2_tn_info->l2_tn_list, l2_tn_filter, entries);
4724
4725         return 0;
4726 }
4727
4728 static inline int
4729 txgbe_remove_l2_tn_filter(struct txgbe_l2_tn_info *l2_tn_info,
4730                           struct txgbe_l2_tn_key *key)
4731 {
4732         int ret;
4733         struct txgbe_l2_tn_filter *l2_tn_filter;
4734
4735         ret = rte_hash_del_key(l2_tn_info->hash_handle, key);
4736
4737         if (ret < 0) {
4738                 PMD_DRV_LOG(ERR,
4739                             "No such L2 tunnel filter to delete %d!",
4740                             ret);
4741                 return ret;
4742         }
4743
4744         l2_tn_filter = l2_tn_info->hash_map[ret];
4745         l2_tn_info->hash_map[ret] = NULL;
4746
4747         TAILQ_REMOVE(&l2_tn_info->l2_tn_list, l2_tn_filter, entries);
4748         rte_free(l2_tn_filter);
4749
4750         return 0;
4751 }
4752
4753 /* Add l2 tunnel filter */
4754 int
4755 txgbe_dev_l2_tunnel_filter_add(struct rte_eth_dev *dev,
4756                                struct txgbe_l2_tunnel_conf *l2_tunnel,
4757                                bool restore)
4758 {
4759         int ret;
4760         struct txgbe_l2_tn_info *l2_tn_info = TXGBE_DEV_L2_TN(dev);
4761         struct txgbe_l2_tn_key key;
4762         struct txgbe_l2_tn_filter *node;
4763
4764         if (!restore) {
4765                 key.l2_tn_type = l2_tunnel->l2_tunnel_type;
4766                 key.tn_id = l2_tunnel->tunnel_id;
4767
4768                 node = txgbe_l2_tn_filter_lookup(l2_tn_info, &key);
4769
4770                 if (node) {
4771                         PMD_DRV_LOG(ERR,
4772                                     "The L2 tunnel filter already exists!");
4773                         return -EINVAL;
4774                 }
4775
4776                 node = rte_zmalloc("txgbe_l2_tn",
4777                                    sizeof(struct txgbe_l2_tn_filter),
4778                                    0);
4779                 if (!node)
4780                         return -ENOMEM;
4781
4782                 rte_memcpy(&node->key,
4783                                  &key,
4784                                  sizeof(struct txgbe_l2_tn_key));
4785                 node->pool = l2_tunnel->pool;
4786                 ret = txgbe_insert_l2_tn_filter(l2_tn_info, node);
4787                 if (ret < 0) {
4788                         rte_free(node);
4789                         return ret;
4790                 }
4791         }
4792
4793         switch (l2_tunnel->l2_tunnel_type) {
4794         case RTE_L2_TUNNEL_TYPE_E_TAG:
4795                 ret = txgbe_e_tag_filter_add(dev, l2_tunnel);
4796                 break;
4797         default:
4798                 PMD_DRV_LOG(ERR, "Invalid tunnel type");
4799                 ret = -EINVAL;
4800                 break;
4801         }
4802
4803         if (!restore && ret < 0)
4804                 (void)txgbe_remove_l2_tn_filter(l2_tn_info, &key);
4805
4806         return ret;
4807 }
4808
4809 /* Delete l2 tunnel filter */
4810 int
4811 txgbe_dev_l2_tunnel_filter_del(struct rte_eth_dev *dev,
4812                                struct txgbe_l2_tunnel_conf *l2_tunnel)
4813 {
4814         int ret;
4815         struct txgbe_l2_tn_info *l2_tn_info = TXGBE_DEV_L2_TN(dev);
4816         struct txgbe_l2_tn_key key;
4817
4818         key.l2_tn_type = l2_tunnel->l2_tunnel_type;
4819         key.tn_id = l2_tunnel->tunnel_id;
4820         ret = txgbe_remove_l2_tn_filter(l2_tn_info, &key);
4821         if (ret < 0)
4822                 return ret;
4823
4824         switch (l2_tunnel->l2_tunnel_type) {
4825         case RTE_L2_TUNNEL_TYPE_E_TAG:
4826                 ret = txgbe_e_tag_filter_del(dev, l2_tunnel);
4827                 break;
4828         default:
4829                 PMD_DRV_LOG(ERR, "Invalid tunnel type");
4830                 ret = -EINVAL;
4831                 break;
4832         }
4833
4834         return ret;
4835 }
4836
4837 static int
4838 txgbe_e_tag_forwarding_en_dis(struct rte_eth_dev *dev, bool en)
4839 {
4840         int ret = 0;
4841         uint32_t ctrl;
4842         struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
4843
4844         ctrl = rd32(hw, TXGBE_POOLCTL);
4845         ctrl &= ~TXGBE_POOLCTL_MODE_MASK;
4846         if (en)
4847                 ctrl |= TXGBE_PSRPOOL_MODE_ETAG;
4848         wr32(hw, TXGBE_POOLCTL, ctrl);
4849
4850         return ret;
4851 }
4852
4853 /* restore n-tuple filter */
4854 static inline void
4855 txgbe_ntuple_filter_restore(struct rte_eth_dev *dev)
4856 {
4857         struct txgbe_filter_info *filter_info = TXGBE_DEV_FILTER(dev);
4858         struct txgbe_5tuple_filter *node;
4859
4860         TAILQ_FOREACH(node, &filter_info->fivetuple_list, entries) {
4861                 txgbe_inject_5tuple_filter(dev, node);
4862         }
4863 }
4864
4865 /* restore ethernet type filter */
4866 static inline void
4867 txgbe_ethertype_filter_restore(struct rte_eth_dev *dev)
4868 {
4869         struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
4870         struct txgbe_filter_info *filter_info = TXGBE_DEV_FILTER(dev);
4871         int i;
4872
4873         for (i = 0; i < TXGBE_ETF_ID_MAX; i++) {
4874                 if (filter_info->ethertype_mask & (1 << i)) {
4875                         wr32(hw, TXGBE_ETFLT(i),
4876                                         filter_info->ethertype_filters[i].etqf);
4877                         wr32(hw, TXGBE_ETCLS(i),
4878                                         filter_info->ethertype_filters[i].etqs);
4879                         txgbe_flush(hw);
4880                 }
4881         }
4882 }
4883
4884 /* restore SYN filter */
4885 static inline void
4886 txgbe_syn_filter_restore(struct rte_eth_dev *dev)
4887 {
4888         struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
4889         struct txgbe_filter_info *filter_info = TXGBE_DEV_FILTER(dev);
4890         uint32_t synqf;
4891
4892         synqf = filter_info->syn_info;
4893
4894         if (synqf & TXGBE_SYNCLS_ENA) {
4895                 wr32(hw, TXGBE_SYNCLS, synqf);
4896                 txgbe_flush(hw);
4897         }
4898 }
4899
4900 /* restore L2 tunnel filter */
4901 static inline void
4902 txgbe_l2_tn_filter_restore(struct rte_eth_dev *dev)
4903 {
4904         struct txgbe_l2_tn_info *l2_tn_info = TXGBE_DEV_L2_TN(dev);
4905         struct txgbe_l2_tn_filter *node;
4906         struct txgbe_l2_tunnel_conf l2_tn_conf;
4907
4908         TAILQ_FOREACH(node, &l2_tn_info->l2_tn_list, entries) {
4909                 l2_tn_conf.l2_tunnel_type = node->key.l2_tn_type;
4910                 l2_tn_conf.tunnel_id      = node->key.tn_id;
4911                 l2_tn_conf.pool           = node->pool;
4912                 (void)txgbe_dev_l2_tunnel_filter_add(dev, &l2_tn_conf, TRUE);
4913         }
4914 }
4915
4916 /* restore rss filter */
4917 static inline void
4918 txgbe_rss_filter_restore(struct rte_eth_dev *dev)
4919 {
4920         struct txgbe_filter_info *filter_info = TXGBE_DEV_FILTER(dev);
4921
4922         if (filter_info->rss_info.conf.queue_num)
4923                 txgbe_config_rss_filter(dev,
4924                         &filter_info->rss_info, TRUE);
4925 }
4926
4927 static int
4928 txgbe_filter_restore(struct rte_eth_dev *dev)
4929 {
4930         txgbe_ntuple_filter_restore(dev);
4931         txgbe_ethertype_filter_restore(dev);
4932         txgbe_syn_filter_restore(dev);
4933         txgbe_fdir_filter_restore(dev);
4934         txgbe_l2_tn_filter_restore(dev);
4935         txgbe_rss_filter_restore(dev);
4936
4937         return 0;
4938 }
4939
4940 static void
4941 txgbe_l2_tunnel_conf(struct rte_eth_dev *dev)
4942 {
4943         struct txgbe_l2_tn_info *l2_tn_info = TXGBE_DEV_L2_TN(dev);
4944         struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
4945
4946         if (l2_tn_info->e_tag_en)
4947                 (void)txgbe_e_tag_enable(hw);
4948
4949         if (l2_tn_info->e_tag_fwd_en)
4950                 (void)txgbe_e_tag_forwarding_en_dis(dev, 1);
4951
4952         (void)txgbe_update_e_tag_eth_type(hw, l2_tn_info->e_tag_ether_type);
4953 }
4954
4955 static const struct eth_dev_ops txgbe_eth_dev_ops = {
4956         .dev_configure              = txgbe_dev_configure,
4957         .dev_infos_get              = txgbe_dev_info_get,
4958         .dev_start                  = txgbe_dev_start,
4959         .dev_stop                   = txgbe_dev_stop,
4960         .dev_set_link_up            = txgbe_dev_set_link_up,
4961         .dev_set_link_down          = txgbe_dev_set_link_down,
4962         .dev_close                  = txgbe_dev_close,
4963         .dev_reset                  = txgbe_dev_reset,
4964         .promiscuous_enable         = txgbe_dev_promiscuous_enable,
4965         .promiscuous_disable        = txgbe_dev_promiscuous_disable,
4966         .allmulticast_enable        = txgbe_dev_allmulticast_enable,
4967         .allmulticast_disable       = txgbe_dev_allmulticast_disable,
4968         .link_update                = txgbe_dev_link_update,
4969         .stats_get                  = txgbe_dev_stats_get,
4970         .xstats_get                 = txgbe_dev_xstats_get,
4971         .xstats_get_by_id           = txgbe_dev_xstats_get_by_id,
4972         .stats_reset                = txgbe_dev_stats_reset,
4973         .xstats_reset               = txgbe_dev_xstats_reset,
4974         .xstats_get_names           = txgbe_dev_xstats_get_names,
4975         .xstats_get_names_by_id     = txgbe_dev_xstats_get_names_by_id,
4976         .queue_stats_mapping_set    = txgbe_dev_queue_stats_mapping_set,
4977         .fw_version_get             = txgbe_fw_version_get,
4978         .dev_supported_ptypes_get   = txgbe_dev_supported_ptypes_get,
4979         .mtu_set                    = txgbe_dev_mtu_set,
4980         .vlan_filter_set            = txgbe_vlan_filter_set,
4981         .vlan_tpid_set              = txgbe_vlan_tpid_set,
4982         .vlan_offload_set           = txgbe_vlan_offload_set,
4983         .vlan_strip_queue_set       = txgbe_vlan_strip_queue_set,
4984         .rx_queue_start             = txgbe_dev_rx_queue_start,
4985         .rx_queue_stop              = txgbe_dev_rx_queue_stop,
4986         .tx_queue_start             = txgbe_dev_tx_queue_start,
4987         .tx_queue_stop              = txgbe_dev_tx_queue_stop,
4988         .rx_queue_setup             = txgbe_dev_rx_queue_setup,
4989         .rx_queue_intr_enable       = txgbe_dev_rx_queue_intr_enable,
4990         .rx_queue_intr_disable      = txgbe_dev_rx_queue_intr_disable,
4991         .rx_queue_release           = txgbe_dev_rx_queue_release,
4992         .tx_queue_setup             = txgbe_dev_tx_queue_setup,
4993         .tx_queue_release           = txgbe_dev_tx_queue_release,
4994         .dev_led_on                 = txgbe_dev_led_on,
4995         .dev_led_off                = txgbe_dev_led_off,
4996         .flow_ctrl_get              = txgbe_flow_ctrl_get,
4997         .flow_ctrl_set              = txgbe_flow_ctrl_set,
4998         .priority_flow_ctrl_set     = txgbe_priority_flow_ctrl_set,
4999         .mac_addr_add               = txgbe_add_rar,
5000         .mac_addr_remove            = txgbe_remove_rar,
5001         .mac_addr_set               = txgbe_set_default_mac_addr,
5002         .uc_hash_table_set          = txgbe_uc_hash_table_set,
5003         .uc_all_hash_table_set      = txgbe_uc_all_hash_table_set,
5004         .set_queue_rate_limit       = txgbe_set_queue_rate_limit,
5005         .reta_update                = txgbe_dev_rss_reta_update,
5006         .reta_query                 = txgbe_dev_rss_reta_query,
5007         .rss_hash_update            = txgbe_dev_rss_hash_update,
5008         .rss_hash_conf_get          = txgbe_dev_rss_hash_conf_get,
5009         .filter_ctrl                = txgbe_dev_filter_ctrl,
5010         .set_mc_addr_list           = txgbe_dev_set_mc_addr_list,
5011         .rxq_info_get               = txgbe_rxq_info_get,
5012         .txq_info_get               = txgbe_txq_info_get,
5013         .timesync_enable            = txgbe_timesync_enable,
5014         .timesync_disable           = txgbe_timesync_disable,
5015         .timesync_read_rx_timestamp = txgbe_timesync_read_rx_timestamp,
5016         .timesync_read_tx_timestamp = txgbe_timesync_read_tx_timestamp,
5017         .get_reg                    = txgbe_get_regs,
5018         .get_eeprom_length          = txgbe_get_eeprom_length,
5019         .get_eeprom                 = txgbe_get_eeprom,
5020         .set_eeprom                 = txgbe_set_eeprom,
5021         .get_module_info            = txgbe_get_module_info,
5022         .get_module_eeprom          = txgbe_get_module_eeprom,
5023         .get_dcb_info               = txgbe_dev_get_dcb_info,
5024         .timesync_adjust_time       = txgbe_timesync_adjust_time,
5025         .timesync_read_time         = txgbe_timesync_read_time,
5026         .timesync_write_time        = txgbe_timesync_write_time,
5027         .tx_done_cleanup            = txgbe_dev_tx_done_cleanup,
5028 };
5029
5030 RTE_PMD_REGISTER_PCI(net_txgbe, rte_txgbe_pmd);
5031 RTE_PMD_REGISTER_PCI_TABLE(net_txgbe, pci_id_txgbe_map);
5032 RTE_PMD_REGISTER_KMOD_DEP(net_txgbe, "* igb_uio | uio_pci_generic | vfio-pci");
5033
5034 RTE_LOG_REGISTER(txgbe_logtype_init, pmd.net.txgbe.init, NOTICE);
5035 RTE_LOG_REGISTER(txgbe_logtype_driver, pmd.net.txgbe.driver, NOTICE);
5036
5037 #ifdef RTE_LIBRTE_TXGBE_DEBUG_RX
5038         RTE_LOG_REGISTER(txgbe_logtype_rx, pmd.net.txgbe.rx, DEBUG);
5039 #endif
5040 #ifdef RTE_LIBRTE_TXGBE_DEBUG_TX
5041         RTE_LOG_REGISTER(txgbe_logtype_tx, pmd.net.txgbe.tx, DEBUG);
5042 #endif
5043
5044 #ifdef RTE_LIBRTE_TXGBE_DEBUG_TX_FREE
5045         RTE_LOG_REGISTER(txgbe_logtype_tx_free, pmd.net.txgbe.tx_free, DEBUG);
5046 #endif