net/ixgbe: fix uninitialized variable
[dpdk.git] / drivers / net / ixgbe / ixgbe_ethdev.c
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright(c) 2010-2017 Intel Corporation. All rights reserved.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33
34 #include <sys/queue.h>
35 #include <stdio.h>
36 #include <errno.h>
37 #include <stdint.h>
38 #include <string.h>
39 #include <unistd.h>
40 #include <stdarg.h>
41 #include <inttypes.h>
42 #include <netinet/in.h>
43 #include <rte_byteorder.h>
44 #include <rte_common.h>
45 #include <rte_cycles.h>
46
47 #include <rte_interrupts.h>
48 #include <rte_log.h>
49 #include <rte_debug.h>
50 #include <rte_pci.h>
51 #include <rte_atomic.h>
52 #include <rte_branch_prediction.h>
53 #include <rte_memory.h>
54 #include <rte_memzone.h>
55 #include <rte_eal.h>
56 #include <rte_alarm.h>
57 #include <rte_ether.h>
58 #include <rte_ethdev.h>
59 #include <rte_ethdev_pci.h>
60 #include <rte_malloc.h>
61 #include <rte_random.h>
62 #include <rte_dev.h>
63 #include <rte_hash_crc.h>
64
65 #include "ixgbe_logs.h"
66 #include "base/ixgbe_api.h"
67 #include "base/ixgbe_vf.h"
68 #include "base/ixgbe_common.h"
69 #include "ixgbe_ethdev.h"
70 #include "ixgbe_bypass.h"
71 #include "ixgbe_rxtx.h"
72 #include "base/ixgbe_type.h"
73 #include "base/ixgbe_phy.h"
74 #include "ixgbe_regs.h"
75
76 /*
77  * High threshold controlling when to start sending XOFF frames. Must be at
78  * least 8 bytes less than receive packet buffer size. This value is in units
79  * of 1024 bytes.
80  */
81 #define IXGBE_FC_HI    0x80
82
83 /*
84  * Low threshold controlling when to start sending XON frames. This value is
85  * in units of 1024 bytes.
86  */
87 #define IXGBE_FC_LO    0x40
88
89 /* Default minimum inter-interrupt interval for EITR configuration */
90 #define IXGBE_MIN_INTER_INTERRUPT_INTERVAL_DEFAULT    0x79E
91
92 /* Timer value included in XOFF frames. */
93 #define IXGBE_FC_PAUSE 0x680
94
95 #define IXGBE_LINK_DOWN_CHECK_TIMEOUT 4000 /* ms */
96 #define IXGBE_LINK_UP_CHECK_TIMEOUT   1000 /* ms */
97 #define IXGBE_VMDQ_NUM_UC_MAC         4096 /* Maximum nb. of UC MAC addr. */
98
99 #define IXGBE_MMW_SIZE_DEFAULT        0x4
100 #define IXGBE_MMW_SIZE_JUMBO_FRAME    0x14
101 #define IXGBE_MAX_RING_DESC           4096 /* replicate define from rxtx */
102
103 /*
104  *  Default values for RX/TX configuration
105  */
106 #define IXGBE_DEFAULT_RX_FREE_THRESH  32
107 #define IXGBE_DEFAULT_RX_PTHRESH      8
108 #define IXGBE_DEFAULT_RX_HTHRESH      8
109 #define IXGBE_DEFAULT_RX_WTHRESH      0
110
111 #define IXGBE_DEFAULT_TX_FREE_THRESH  32
112 #define IXGBE_DEFAULT_TX_PTHRESH      32
113 #define IXGBE_DEFAULT_TX_HTHRESH      0
114 #define IXGBE_DEFAULT_TX_WTHRESH      0
115 #define IXGBE_DEFAULT_TX_RSBIT_THRESH 32
116
117 /* Bit shift and mask */
118 #define IXGBE_4_BIT_WIDTH  (CHAR_BIT / 2)
119 #define IXGBE_4_BIT_MASK   RTE_LEN2MASK(IXGBE_4_BIT_WIDTH, uint8_t)
120 #define IXGBE_8_BIT_WIDTH  CHAR_BIT
121 #define IXGBE_8_BIT_MASK   UINT8_MAX
122
123 #define IXGBEVF_PMD_NAME "rte_ixgbevf_pmd" /* PMD name */
124
125 #define IXGBE_QUEUE_STAT_COUNTERS (sizeof(hw_stats->qprc) / sizeof(hw_stats->qprc[0]))
126
127 #define IXGBE_HKEY_MAX_INDEX 10
128
129 /* Additional timesync values. */
130 #define NSEC_PER_SEC             1000000000L
131 #define IXGBE_INCVAL_10GB        0x66666666
132 #define IXGBE_INCVAL_1GB         0x40000000
133 #define IXGBE_INCVAL_100         0x50000000
134 #define IXGBE_INCVAL_SHIFT_10GB  28
135 #define IXGBE_INCVAL_SHIFT_1GB   24
136 #define IXGBE_INCVAL_SHIFT_100   21
137 #define IXGBE_INCVAL_SHIFT_82599 7
138 #define IXGBE_INCPER_SHIFT_82599 24
139
140 #define IXGBE_CYCLECOUNTER_MASK   0xffffffffffffffffULL
141
142 #define IXGBE_VT_CTL_POOLING_MODE_MASK         0x00030000
143 #define IXGBE_VT_CTL_POOLING_MODE_ETAG         0x00010000
144 #define DEFAULT_ETAG_ETYPE                     0x893f
145 #define IXGBE_ETAG_ETYPE                       0x00005084
146 #define IXGBE_ETAG_ETYPE_MASK                  0x0000ffff
147 #define IXGBE_ETAG_ETYPE_VALID                 0x80000000
148 #define IXGBE_RAH_ADTYPE                       0x40000000
149 #define IXGBE_RAL_ETAG_FILTER_MASK             0x00003fff
150 #define IXGBE_VMVIR_TAGA_MASK                  0x18000000
151 #define IXGBE_VMVIR_TAGA_ETAG_INSERT           0x08000000
152 #define IXGBE_VMTIR(_i) (0x00017000 + ((_i) * 4)) /* 64 of these (0-63) */
153 #define IXGBE_QDE_STRIP_TAG                    0x00000004
154 #define IXGBE_VTEICR_MASK                      0x07
155
156 #define IXGBE_EXVET_VET_EXT_SHIFT              16
157 #define IXGBE_DMATXCTL_VT_MASK                 0xFFFF0000
158
159 static int eth_ixgbe_dev_init(struct rte_eth_dev *eth_dev);
160 static int eth_ixgbe_dev_uninit(struct rte_eth_dev *eth_dev);
161 static int ixgbe_fdir_filter_init(struct rte_eth_dev *eth_dev);
162 static int ixgbe_fdir_filter_uninit(struct rte_eth_dev *eth_dev);
163 static int ixgbe_l2_tn_filter_init(struct rte_eth_dev *eth_dev);
164 static int ixgbe_l2_tn_filter_uninit(struct rte_eth_dev *eth_dev);
165 static int ixgbe_ntuple_filter_uninit(struct rte_eth_dev *eth_dev);
166 static int  ixgbe_dev_configure(struct rte_eth_dev *dev);
167 static int  ixgbe_dev_start(struct rte_eth_dev *dev);
168 static void ixgbe_dev_stop(struct rte_eth_dev *dev);
169 static int  ixgbe_dev_set_link_up(struct rte_eth_dev *dev);
170 static int  ixgbe_dev_set_link_down(struct rte_eth_dev *dev);
171 static void ixgbe_dev_close(struct rte_eth_dev *dev);
172 static int  ixgbe_dev_reset(struct rte_eth_dev *dev);
173 static void ixgbe_dev_promiscuous_enable(struct rte_eth_dev *dev);
174 static void ixgbe_dev_promiscuous_disable(struct rte_eth_dev *dev);
175 static void ixgbe_dev_allmulticast_enable(struct rte_eth_dev *dev);
176 static void ixgbe_dev_allmulticast_disable(struct rte_eth_dev *dev);
177 static int ixgbe_dev_link_update(struct rte_eth_dev *dev,
178                                 int wait_to_complete);
179 static int ixgbe_dev_stats_get(struct rte_eth_dev *dev,
180                                 struct rte_eth_stats *stats);
181 static int ixgbe_dev_xstats_get(struct rte_eth_dev *dev,
182                                 struct rte_eth_xstat *xstats, unsigned n);
183 static int ixgbevf_dev_xstats_get(struct rte_eth_dev *dev,
184                                   struct rte_eth_xstat *xstats, unsigned n);
185 static int
186 ixgbe_dev_xstats_get_by_id(struct rte_eth_dev *dev, const uint64_t *ids,
187                 uint64_t *values, unsigned int n);
188 static void ixgbe_dev_stats_reset(struct rte_eth_dev *dev);
189 static void ixgbe_dev_xstats_reset(struct rte_eth_dev *dev);
190 static int ixgbe_dev_xstats_get_names(struct rte_eth_dev *dev,
191         struct rte_eth_xstat_name *xstats_names,
192         unsigned int size);
193 static int ixgbevf_dev_xstats_get_names(struct rte_eth_dev *dev,
194         struct rte_eth_xstat_name *xstats_names, unsigned limit);
195 static int ixgbe_dev_xstats_get_names_by_id(
196         struct rte_eth_dev *dev,
197         struct rte_eth_xstat_name *xstats_names,
198         const uint64_t *ids,
199         unsigned int limit);
200 static int ixgbe_dev_queue_stats_mapping_set(struct rte_eth_dev *eth_dev,
201                                              uint16_t queue_id,
202                                              uint8_t stat_idx,
203                                              uint8_t is_rx);
204 static int ixgbe_fw_version_get(struct rte_eth_dev *dev, char *fw_version,
205                                  size_t fw_size);
206 static void ixgbe_dev_info_get(struct rte_eth_dev *dev,
207                                struct rte_eth_dev_info *dev_info);
208 static const uint32_t *ixgbe_dev_supported_ptypes_get(struct rte_eth_dev *dev);
209 static void ixgbevf_dev_info_get(struct rte_eth_dev *dev,
210                                  struct rte_eth_dev_info *dev_info);
211 static int ixgbe_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu);
212
213 static int ixgbe_vlan_filter_set(struct rte_eth_dev *dev,
214                 uint16_t vlan_id, int on);
215 static int ixgbe_vlan_tpid_set(struct rte_eth_dev *dev,
216                                enum rte_vlan_type vlan_type,
217                                uint16_t tpid_id);
218 static void ixgbe_vlan_hw_strip_bitmap_set(struct rte_eth_dev *dev,
219                 uint16_t queue, bool on);
220 static void ixgbe_vlan_strip_queue_set(struct rte_eth_dev *dev, uint16_t queue,
221                 int on);
222 static void ixgbe_vlan_offload_set(struct rte_eth_dev *dev, int mask);
223 static void ixgbe_vlan_hw_strip_enable(struct rte_eth_dev *dev, uint16_t queue);
224 static void ixgbe_vlan_hw_strip_disable(struct rte_eth_dev *dev, uint16_t queue);
225 static void ixgbe_vlan_hw_extend_enable(struct rte_eth_dev *dev);
226 static void ixgbe_vlan_hw_extend_disable(struct rte_eth_dev *dev);
227
228 static int ixgbe_dev_led_on(struct rte_eth_dev *dev);
229 static int ixgbe_dev_led_off(struct rte_eth_dev *dev);
230 static int ixgbe_flow_ctrl_get(struct rte_eth_dev *dev,
231                                struct rte_eth_fc_conf *fc_conf);
232 static int ixgbe_flow_ctrl_set(struct rte_eth_dev *dev,
233                                struct rte_eth_fc_conf *fc_conf);
234 static int ixgbe_priority_flow_ctrl_set(struct rte_eth_dev *dev,
235                 struct rte_eth_pfc_conf *pfc_conf);
236 static int ixgbe_dev_rss_reta_update(struct rte_eth_dev *dev,
237                         struct rte_eth_rss_reta_entry64 *reta_conf,
238                         uint16_t reta_size);
239 static int ixgbe_dev_rss_reta_query(struct rte_eth_dev *dev,
240                         struct rte_eth_rss_reta_entry64 *reta_conf,
241                         uint16_t reta_size);
242 static void ixgbe_dev_link_status_print(struct rte_eth_dev *dev);
243 static int ixgbe_dev_lsc_interrupt_setup(struct rte_eth_dev *dev, uint8_t on);
244 static int ixgbe_dev_macsec_interrupt_setup(struct rte_eth_dev *dev);
245 static int ixgbe_dev_rxq_interrupt_setup(struct rte_eth_dev *dev);
246 static int ixgbe_dev_interrupt_get_status(struct rte_eth_dev *dev);
247 static int ixgbe_dev_interrupt_action(struct rte_eth_dev *dev,
248                                       struct rte_intr_handle *handle);
249 static void ixgbe_dev_interrupt_handler(void *param);
250 static void ixgbe_dev_interrupt_delayed_handler(void *param);
251 static int ixgbe_add_rar(struct rte_eth_dev *dev, struct ether_addr *mac_addr,
252                          uint32_t index, uint32_t pool);
253 static void ixgbe_remove_rar(struct rte_eth_dev *dev, uint32_t index);
254 static void ixgbe_set_default_mac_addr(struct rte_eth_dev *dev,
255                                            struct ether_addr *mac_addr);
256 static void ixgbe_dcb_init(struct ixgbe_hw *hw, struct ixgbe_dcb_config *dcb_config);
257 static bool is_device_supported(struct rte_eth_dev *dev,
258                                 struct rte_pci_driver *drv);
259
260 /* For Virtual Function support */
261 static int eth_ixgbevf_dev_init(struct rte_eth_dev *eth_dev);
262 static int eth_ixgbevf_dev_uninit(struct rte_eth_dev *eth_dev);
263 static int  ixgbevf_dev_configure(struct rte_eth_dev *dev);
264 static int  ixgbevf_dev_start(struct rte_eth_dev *dev);
265 static int ixgbevf_dev_link_update(struct rte_eth_dev *dev,
266                                    int wait_to_complete);
267 static void ixgbevf_dev_stop(struct rte_eth_dev *dev);
268 static void ixgbevf_dev_close(struct rte_eth_dev *dev);
269 static int  ixgbevf_dev_reset(struct rte_eth_dev *dev);
270 static void ixgbevf_intr_disable(struct ixgbe_hw *hw);
271 static void ixgbevf_intr_enable(struct ixgbe_hw *hw);
272 static int ixgbevf_dev_stats_get(struct rte_eth_dev *dev,
273                 struct rte_eth_stats *stats);
274 static void ixgbevf_dev_stats_reset(struct rte_eth_dev *dev);
275 static int ixgbevf_vlan_filter_set(struct rte_eth_dev *dev,
276                 uint16_t vlan_id, int on);
277 static void ixgbevf_vlan_strip_queue_set(struct rte_eth_dev *dev,
278                 uint16_t queue, int on);
279 static void ixgbevf_vlan_offload_set(struct rte_eth_dev *dev, int mask);
280 static void ixgbevf_set_vfta_all(struct rte_eth_dev *dev, bool on);
281 static int ixgbevf_dev_rx_queue_intr_enable(struct rte_eth_dev *dev,
282                                             uint16_t queue_id);
283 static int ixgbevf_dev_rx_queue_intr_disable(struct rte_eth_dev *dev,
284                                              uint16_t queue_id);
285 static void ixgbevf_set_ivar_map(struct ixgbe_hw *hw, int8_t direction,
286                                  uint8_t queue, uint8_t msix_vector);
287 static void ixgbevf_configure_msix(struct rte_eth_dev *dev);
288 static void ixgbevf_dev_allmulticast_enable(struct rte_eth_dev *dev);
289 static void ixgbevf_dev_allmulticast_disable(struct rte_eth_dev *dev);
290
291 /* For Eth VMDQ APIs support */
292 static int ixgbe_uc_hash_table_set(struct rte_eth_dev *dev, struct
293                 ether_addr * mac_addr, uint8_t on);
294 static int ixgbe_uc_all_hash_table_set(struct rte_eth_dev *dev, uint8_t on);
295 static int ixgbe_mirror_rule_set(struct rte_eth_dev *dev,
296                 struct rte_eth_mirror_conf *mirror_conf,
297                 uint8_t rule_id, uint8_t on);
298 static int ixgbe_mirror_rule_reset(struct rte_eth_dev *dev,
299                 uint8_t rule_id);
300 static int ixgbe_dev_rx_queue_intr_enable(struct rte_eth_dev *dev,
301                                           uint16_t queue_id);
302 static int ixgbe_dev_rx_queue_intr_disable(struct rte_eth_dev *dev,
303                                            uint16_t queue_id);
304 static void ixgbe_set_ivar_map(struct ixgbe_hw *hw, int8_t direction,
305                                uint8_t queue, uint8_t msix_vector);
306 static void ixgbe_configure_msix(struct rte_eth_dev *dev);
307
308 static int ixgbevf_add_mac_addr(struct rte_eth_dev *dev,
309                                 struct ether_addr *mac_addr,
310                                 uint32_t index, uint32_t pool);
311 static void ixgbevf_remove_mac_addr(struct rte_eth_dev *dev, uint32_t index);
312 static void ixgbevf_set_default_mac_addr(struct rte_eth_dev *dev,
313                                              struct ether_addr *mac_addr);
314 static int ixgbe_syn_filter_get(struct rte_eth_dev *dev,
315                         struct rte_eth_syn_filter *filter);
316 static int ixgbe_syn_filter_handle(struct rte_eth_dev *dev,
317                         enum rte_filter_op filter_op,
318                         void *arg);
319 static int ixgbe_add_5tuple_filter(struct rte_eth_dev *dev,
320                         struct ixgbe_5tuple_filter *filter);
321 static void ixgbe_remove_5tuple_filter(struct rte_eth_dev *dev,
322                         struct ixgbe_5tuple_filter *filter);
323 static int ixgbe_ntuple_filter_handle(struct rte_eth_dev *dev,
324                                 enum rte_filter_op filter_op,
325                                 void *arg);
326 static int ixgbe_get_ntuple_filter(struct rte_eth_dev *dev,
327                         struct rte_eth_ntuple_filter *filter);
328 static int ixgbe_ethertype_filter_handle(struct rte_eth_dev *dev,
329                                 enum rte_filter_op filter_op,
330                                 void *arg);
331 static int ixgbe_get_ethertype_filter(struct rte_eth_dev *dev,
332                         struct rte_eth_ethertype_filter *filter);
333 static int ixgbe_dev_filter_ctrl(struct rte_eth_dev *dev,
334                      enum rte_filter_type filter_type,
335                      enum rte_filter_op filter_op,
336                      void *arg);
337 static int ixgbevf_dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu);
338
339 static int ixgbe_dev_set_mc_addr_list(struct rte_eth_dev *dev,
340                                       struct ether_addr *mc_addr_set,
341                                       uint32_t nb_mc_addr);
342 static int ixgbe_dev_get_dcb_info(struct rte_eth_dev *dev,
343                                    struct rte_eth_dcb_info *dcb_info);
344
345 static int ixgbe_get_reg_length(struct rte_eth_dev *dev);
346 static int ixgbe_get_regs(struct rte_eth_dev *dev,
347                             struct rte_dev_reg_info *regs);
348 static int ixgbe_get_eeprom_length(struct rte_eth_dev *dev);
349 static int ixgbe_get_eeprom(struct rte_eth_dev *dev,
350                                 struct rte_dev_eeprom_info *eeprom);
351 static int ixgbe_set_eeprom(struct rte_eth_dev *dev,
352                                 struct rte_dev_eeprom_info *eeprom);
353
354 static int ixgbevf_get_reg_length(struct rte_eth_dev *dev);
355 static int ixgbevf_get_regs(struct rte_eth_dev *dev,
356                                 struct rte_dev_reg_info *regs);
357
358 static int ixgbe_timesync_enable(struct rte_eth_dev *dev);
359 static int ixgbe_timesync_disable(struct rte_eth_dev *dev);
360 static int ixgbe_timesync_read_rx_timestamp(struct rte_eth_dev *dev,
361                                             struct timespec *timestamp,
362                                             uint32_t flags);
363 static int ixgbe_timesync_read_tx_timestamp(struct rte_eth_dev *dev,
364                                             struct timespec *timestamp);
365 static int ixgbe_timesync_adjust_time(struct rte_eth_dev *dev, int64_t delta);
366 static int ixgbe_timesync_read_time(struct rte_eth_dev *dev,
367                                    struct timespec *timestamp);
368 static int ixgbe_timesync_write_time(struct rte_eth_dev *dev,
369                                    const struct timespec *timestamp);
370 static void ixgbevf_dev_interrupt_handler(void *param);
371
372 static int ixgbe_dev_l2_tunnel_eth_type_conf
373         (struct rte_eth_dev *dev, struct rte_eth_l2_tunnel_conf *l2_tunnel);
374 static int ixgbe_dev_l2_tunnel_offload_set
375         (struct rte_eth_dev *dev,
376          struct rte_eth_l2_tunnel_conf *l2_tunnel,
377          uint32_t mask,
378          uint8_t en);
379 static int ixgbe_dev_l2_tunnel_filter_handle(struct rte_eth_dev *dev,
380                                              enum rte_filter_op filter_op,
381                                              void *arg);
382
383 static int ixgbe_dev_udp_tunnel_port_add(struct rte_eth_dev *dev,
384                                          struct rte_eth_udp_tunnel *udp_tunnel);
385 static int ixgbe_dev_udp_tunnel_port_del(struct rte_eth_dev *dev,
386                                          struct rte_eth_udp_tunnel *udp_tunnel);
387 static int ixgbe_filter_restore(struct rte_eth_dev *dev);
388 static void ixgbe_l2_tunnel_conf(struct rte_eth_dev *dev);
389
390 /*
391  * Define VF Stats MACRO for Non "cleared on read" register
392  */
393 #define UPDATE_VF_STAT(reg, last, cur)                          \
394 {                                                               \
395         uint32_t latest = IXGBE_READ_REG(hw, reg);              \
396         cur += (latest - last) & UINT_MAX;                      \
397         last = latest;                                          \
398 }
399
400 #define UPDATE_VF_STAT_36BIT(lsb, msb, last, cur)                \
401 {                                                                \
402         u64 new_lsb = IXGBE_READ_REG(hw, lsb);                   \
403         u64 new_msb = IXGBE_READ_REG(hw, msb);                   \
404         u64 latest = ((new_msb << 32) | new_lsb);                \
405         cur += (0x1000000000LL + latest - last) & 0xFFFFFFFFFLL; \
406         last = latest;                                           \
407 }
408
409 #define IXGBE_SET_HWSTRIP(h, q) do {\
410                 uint32_t idx = (q) / (sizeof((h)->bitmap[0]) * NBBY); \
411                 uint32_t bit = (q) % (sizeof((h)->bitmap[0]) * NBBY); \
412                 (h)->bitmap[idx] |= 1 << bit;\
413         } while (0)
414
415 #define IXGBE_CLEAR_HWSTRIP(h, q) do {\
416                 uint32_t idx = (q) / (sizeof((h)->bitmap[0]) * NBBY); \
417                 uint32_t bit = (q) % (sizeof((h)->bitmap[0]) * NBBY); \
418                 (h)->bitmap[idx] &= ~(1 << bit);\
419         } while (0)
420
421 #define IXGBE_GET_HWSTRIP(h, q, r) do {\
422                 uint32_t idx = (q) / (sizeof((h)->bitmap[0]) * NBBY); \
423                 uint32_t bit = (q) % (sizeof((h)->bitmap[0]) * NBBY); \
424                 (r) = (h)->bitmap[idx] >> bit & 1;\
425         } while (0)
426
427 /*
428  * The set of PCI devices this driver supports
429  */
430 static const struct rte_pci_id pci_id_ixgbe_map[] = {
431         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598) },
432         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_BX) },
433         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_DUAL_PORT) },
434         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_SINGLE_PORT) },
435         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT) },
436         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT2) },
437         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_SFP_LOM) },
438         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_CX4) },
439         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_CX4_DUAL_PORT) },
440         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_DA_DUAL_PORT) },
441         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM) },
442         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_XF_LR) },
443         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4) },
444         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4_MEZZ) },
445         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KR) },
446         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_COMBO_BACKPLANE) },
447         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_CX4) },
448         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP) },
449         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_BACKPLANE_FCOE) },
450         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_FCOE) },
451         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_EM) },
452         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_SF2) },
453         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_SF_QP) },
454         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_QSFP_SF_QP) },
455         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599EN_SFP) },
456         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_XAUI_LOM) },
457         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_T3_LOM) },
458         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_LS) },
459         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T) },
460         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T1) },
461         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_SFP) },
462         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_10G_T) },
463         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_1G_T) },
464         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550T) },
465         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550T1) },
466         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_KR) },
467         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_KR_L) },
468         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SFP_N) },
469         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SGMII) },
470         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SGMII_L) },
471         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_10G_T) },
472         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_QSFP) },
473         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_QSFP_N) },
474         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SFP) },
475         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_1G_T) },
476         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_1G_T_L) },
477         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_KX4) },
478         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_KR) },
479 #ifdef RTE_LIBRTE_IXGBE_BYPASS
480         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_BYPASS) },
481 #endif
482         { .vendor_id = 0, /* sentinel */ },
483 };
484
485 /*
486  * The set of PCI devices this driver supports (for 82599 VF)
487  */
488 static const struct rte_pci_id pci_id_ixgbevf_map[] = {
489         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_VF) },
490         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_VF_HV) },
491         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540_VF) },
492         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540_VF_HV) },
493         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550_VF_HV) },
494         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550_VF) },
495         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_VF) },
496         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_VF_HV) },
497         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_VF) },
498         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_VF_HV) },
499         { .vendor_id = 0, /* sentinel */ },
500 };
501
502 static const struct rte_eth_desc_lim rx_desc_lim = {
503         .nb_max = IXGBE_MAX_RING_DESC,
504         .nb_min = IXGBE_MIN_RING_DESC,
505         .nb_align = IXGBE_RXD_ALIGN,
506 };
507
508 static const struct rte_eth_desc_lim tx_desc_lim = {
509         .nb_max = IXGBE_MAX_RING_DESC,
510         .nb_min = IXGBE_MIN_RING_DESC,
511         .nb_align = IXGBE_TXD_ALIGN,
512         .nb_seg_max = IXGBE_TX_MAX_SEG,
513         .nb_mtu_seg_max = IXGBE_TX_MAX_SEG,
514 };
515
516 static const struct eth_dev_ops ixgbe_eth_dev_ops = {
517         .dev_configure        = ixgbe_dev_configure,
518         .dev_start            = ixgbe_dev_start,
519         .dev_stop             = ixgbe_dev_stop,
520         .dev_set_link_up    = ixgbe_dev_set_link_up,
521         .dev_set_link_down  = ixgbe_dev_set_link_down,
522         .dev_close            = ixgbe_dev_close,
523         .dev_reset            = ixgbe_dev_reset,
524         .promiscuous_enable   = ixgbe_dev_promiscuous_enable,
525         .promiscuous_disable  = ixgbe_dev_promiscuous_disable,
526         .allmulticast_enable  = ixgbe_dev_allmulticast_enable,
527         .allmulticast_disable = ixgbe_dev_allmulticast_disable,
528         .link_update          = ixgbe_dev_link_update,
529         .stats_get            = ixgbe_dev_stats_get,
530         .xstats_get           = ixgbe_dev_xstats_get,
531         .xstats_get_by_id     = ixgbe_dev_xstats_get_by_id,
532         .stats_reset          = ixgbe_dev_stats_reset,
533         .xstats_reset         = ixgbe_dev_xstats_reset,
534         .xstats_get_names     = ixgbe_dev_xstats_get_names,
535         .xstats_get_names_by_id = ixgbe_dev_xstats_get_names_by_id,
536         .queue_stats_mapping_set = ixgbe_dev_queue_stats_mapping_set,
537         .fw_version_get       = ixgbe_fw_version_get,
538         .dev_infos_get        = ixgbe_dev_info_get,
539         .dev_supported_ptypes_get = ixgbe_dev_supported_ptypes_get,
540         .mtu_set              = ixgbe_dev_mtu_set,
541         .vlan_filter_set      = ixgbe_vlan_filter_set,
542         .vlan_tpid_set        = ixgbe_vlan_tpid_set,
543         .vlan_offload_set     = ixgbe_vlan_offload_set,
544         .vlan_strip_queue_set = ixgbe_vlan_strip_queue_set,
545         .rx_queue_start       = ixgbe_dev_rx_queue_start,
546         .rx_queue_stop        = ixgbe_dev_rx_queue_stop,
547         .tx_queue_start       = ixgbe_dev_tx_queue_start,
548         .tx_queue_stop        = ixgbe_dev_tx_queue_stop,
549         .rx_queue_setup       = ixgbe_dev_rx_queue_setup,
550         .rx_queue_intr_enable = ixgbe_dev_rx_queue_intr_enable,
551         .rx_queue_intr_disable = ixgbe_dev_rx_queue_intr_disable,
552         .rx_queue_release     = ixgbe_dev_rx_queue_release,
553         .rx_queue_count       = ixgbe_dev_rx_queue_count,
554         .rx_descriptor_done   = ixgbe_dev_rx_descriptor_done,
555         .rx_descriptor_status = ixgbe_dev_rx_descriptor_status,
556         .tx_descriptor_status = ixgbe_dev_tx_descriptor_status,
557         .tx_queue_setup       = ixgbe_dev_tx_queue_setup,
558         .tx_queue_release     = ixgbe_dev_tx_queue_release,
559         .dev_led_on           = ixgbe_dev_led_on,
560         .dev_led_off          = ixgbe_dev_led_off,
561         .flow_ctrl_get        = ixgbe_flow_ctrl_get,
562         .flow_ctrl_set        = ixgbe_flow_ctrl_set,
563         .priority_flow_ctrl_set = ixgbe_priority_flow_ctrl_set,
564         .mac_addr_add         = ixgbe_add_rar,
565         .mac_addr_remove      = ixgbe_remove_rar,
566         .mac_addr_set         = ixgbe_set_default_mac_addr,
567         .uc_hash_table_set    = ixgbe_uc_hash_table_set,
568         .uc_all_hash_table_set  = ixgbe_uc_all_hash_table_set,
569         .mirror_rule_set      = ixgbe_mirror_rule_set,
570         .mirror_rule_reset    = ixgbe_mirror_rule_reset,
571         .set_queue_rate_limit = ixgbe_set_queue_rate_limit,
572         .reta_update          = ixgbe_dev_rss_reta_update,
573         .reta_query           = ixgbe_dev_rss_reta_query,
574         .rss_hash_update      = ixgbe_dev_rss_hash_update,
575         .rss_hash_conf_get    = ixgbe_dev_rss_hash_conf_get,
576         .filter_ctrl          = ixgbe_dev_filter_ctrl,
577         .set_mc_addr_list     = ixgbe_dev_set_mc_addr_list,
578         .rxq_info_get         = ixgbe_rxq_info_get,
579         .txq_info_get         = ixgbe_txq_info_get,
580         .timesync_enable      = ixgbe_timesync_enable,
581         .timesync_disable     = ixgbe_timesync_disable,
582         .timesync_read_rx_timestamp = ixgbe_timesync_read_rx_timestamp,
583         .timesync_read_tx_timestamp = ixgbe_timesync_read_tx_timestamp,
584         .get_reg              = ixgbe_get_regs,
585         .get_eeprom_length    = ixgbe_get_eeprom_length,
586         .get_eeprom           = ixgbe_get_eeprom,
587         .set_eeprom           = ixgbe_set_eeprom,
588         .get_dcb_info         = ixgbe_dev_get_dcb_info,
589         .timesync_adjust_time = ixgbe_timesync_adjust_time,
590         .timesync_read_time   = ixgbe_timesync_read_time,
591         .timesync_write_time  = ixgbe_timesync_write_time,
592         .l2_tunnel_eth_type_conf = ixgbe_dev_l2_tunnel_eth_type_conf,
593         .l2_tunnel_offload_set   = ixgbe_dev_l2_tunnel_offload_set,
594         .udp_tunnel_port_add  = ixgbe_dev_udp_tunnel_port_add,
595         .udp_tunnel_port_del  = ixgbe_dev_udp_tunnel_port_del,
596         .tm_ops_get           = ixgbe_tm_ops_get,
597 };
598
599 /*
600  * dev_ops for virtual function, bare necessities for basic vf
601  * operation have been implemented
602  */
603 static const struct eth_dev_ops ixgbevf_eth_dev_ops = {
604         .dev_configure        = ixgbevf_dev_configure,
605         .dev_start            = ixgbevf_dev_start,
606         .dev_stop             = ixgbevf_dev_stop,
607         .link_update          = ixgbevf_dev_link_update,
608         .stats_get            = ixgbevf_dev_stats_get,
609         .xstats_get           = ixgbevf_dev_xstats_get,
610         .stats_reset          = ixgbevf_dev_stats_reset,
611         .xstats_reset         = ixgbevf_dev_stats_reset,
612         .xstats_get_names     = ixgbevf_dev_xstats_get_names,
613         .dev_close            = ixgbevf_dev_close,
614         .dev_reset            = ixgbevf_dev_reset,
615         .allmulticast_enable  = ixgbevf_dev_allmulticast_enable,
616         .allmulticast_disable = ixgbevf_dev_allmulticast_disable,
617         .dev_infos_get        = ixgbevf_dev_info_get,
618         .dev_supported_ptypes_get = ixgbe_dev_supported_ptypes_get,
619         .mtu_set              = ixgbevf_dev_set_mtu,
620         .vlan_filter_set      = ixgbevf_vlan_filter_set,
621         .vlan_strip_queue_set = ixgbevf_vlan_strip_queue_set,
622         .vlan_offload_set     = ixgbevf_vlan_offload_set,
623         .rx_queue_setup       = ixgbe_dev_rx_queue_setup,
624         .rx_queue_release     = ixgbe_dev_rx_queue_release,
625         .rx_descriptor_done   = ixgbe_dev_rx_descriptor_done,
626         .rx_descriptor_status = ixgbe_dev_rx_descriptor_status,
627         .tx_descriptor_status = ixgbe_dev_tx_descriptor_status,
628         .tx_queue_setup       = ixgbe_dev_tx_queue_setup,
629         .tx_queue_release     = ixgbe_dev_tx_queue_release,
630         .rx_queue_intr_enable = ixgbevf_dev_rx_queue_intr_enable,
631         .rx_queue_intr_disable = ixgbevf_dev_rx_queue_intr_disable,
632         .mac_addr_add         = ixgbevf_add_mac_addr,
633         .mac_addr_remove      = ixgbevf_remove_mac_addr,
634         .set_mc_addr_list     = ixgbe_dev_set_mc_addr_list,
635         .rxq_info_get         = ixgbe_rxq_info_get,
636         .txq_info_get         = ixgbe_txq_info_get,
637         .mac_addr_set         = ixgbevf_set_default_mac_addr,
638         .get_reg              = ixgbevf_get_regs,
639         .reta_update          = ixgbe_dev_rss_reta_update,
640         .reta_query           = ixgbe_dev_rss_reta_query,
641         .rss_hash_update      = ixgbe_dev_rss_hash_update,
642         .rss_hash_conf_get    = ixgbe_dev_rss_hash_conf_get,
643 };
644
645 /* store statistics names and its offset in stats structure */
646 struct rte_ixgbe_xstats_name_off {
647         char name[RTE_ETH_XSTATS_NAME_SIZE];
648         unsigned offset;
649 };
650
651 static const struct rte_ixgbe_xstats_name_off rte_ixgbe_stats_strings[] = {
652         {"rx_crc_errors", offsetof(struct ixgbe_hw_stats, crcerrs)},
653         {"rx_illegal_byte_errors", offsetof(struct ixgbe_hw_stats, illerrc)},
654         {"rx_error_bytes", offsetof(struct ixgbe_hw_stats, errbc)},
655         {"mac_local_errors", offsetof(struct ixgbe_hw_stats, mlfc)},
656         {"mac_remote_errors", offsetof(struct ixgbe_hw_stats, mrfc)},
657         {"rx_length_errors", offsetof(struct ixgbe_hw_stats, rlec)},
658         {"tx_xon_packets", offsetof(struct ixgbe_hw_stats, lxontxc)},
659         {"rx_xon_packets", offsetof(struct ixgbe_hw_stats, lxonrxc)},
660         {"tx_xoff_packets", offsetof(struct ixgbe_hw_stats, lxofftxc)},
661         {"rx_xoff_packets", offsetof(struct ixgbe_hw_stats, lxoffrxc)},
662         {"rx_size_64_packets", offsetof(struct ixgbe_hw_stats, prc64)},
663         {"rx_size_65_to_127_packets", offsetof(struct ixgbe_hw_stats, prc127)},
664         {"rx_size_128_to_255_packets", offsetof(struct ixgbe_hw_stats, prc255)},
665         {"rx_size_256_to_511_packets", offsetof(struct ixgbe_hw_stats, prc511)},
666         {"rx_size_512_to_1023_packets", offsetof(struct ixgbe_hw_stats,
667                 prc1023)},
668         {"rx_size_1024_to_max_packets", offsetof(struct ixgbe_hw_stats,
669                 prc1522)},
670         {"rx_broadcast_packets", offsetof(struct ixgbe_hw_stats, bprc)},
671         {"rx_multicast_packets", offsetof(struct ixgbe_hw_stats, mprc)},
672         {"rx_fragment_errors", offsetof(struct ixgbe_hw_stats, rfc)},
673         {"rx_undersize_errors", offsetof(struct ixgbe_hw_stats, ruc)},
674         {"rx_oversize_errors", offsetof(struct ixgbe_hw_stats, roc)},
675         {"rx_jabber_errors", offsetof(struct ixgbe_hw_stats, rjc)},
676         {"rx_management_packets", offsetof(struct ixgbe_hw_stats, mngprc)},
677         {"rx_management_dropped", offsetof(struct ixgbe_hw_stats, mngpdc)},
678         {"tx_management_packets", offsetof(struct ixgbe_hw_stats, mngptc)},
679         {"rx_total_packets", offsetof(struct ixgbe_hw_stats, tpr)},
680         {"rx_total_bytes", offsetof(struct ixgbe_hw_stats, tor)},
681         {"tx_total_packets", offsetof(struct ixgbe_hw_stats, tpt)},
682         {"tx_size_64_packets", offsetof(struct ixgbe_hw_stats, ptc64)},
683         {"tx_size_65_to_127_packets", offsetof(struct ixgbe_hw_stats, ptc127)},
684         {"tx_size_128_to_255_packets", offsetof(struct ixgbe_hw_stats, ptc255)},
685         {"tx_size_256_to_511_packets", offsetof(struct ixgbe_hw_stats, ptc511)},
686         {"tx_size_512_to_1023_packets", offsetof(struct ixgbe_hw_stats,
687                 ptc1023)},
688         {"tx_size_1024_to_max_packets", offsetof(struct ixgbe_hw_stats,
689                 ptc1522)},
690         {"tx_multicast_packets", offsetof(struct ixgbe_hw_stats, mptc)},
691         {"tx_broadcast_packets", offsetof(struct ixgbe_hw_stats, bptc)},
692         {"rx_mac_short_packet_dropped", offsetof(struct ixgbe_hw_stats, mspdc)},
693         {"rx_l3_l4_xsum_error", offsetof(struct ixgbe_hw_stats, xec)},
694
695         {"flow_director_added_filters", offsetof(struct ixgbe_hw_stats,
696                 fdirustat_add)},
697         {"flow_director_removed_filters", offsetof(struct ixgbe_hw_stats,
698                 fdirustat_remove)},
699         {"flow_director_filter_add_errors", offsetof(struct ixgbe_hw_stats,
700                 fdirfstat_fadd)},
701         {"flow_director_filter_remove_errors", offsetof(struct ixgbe_hw_stats,
702                 fdirfstat_fremove)},
703         {"flow_director_matched_filters", offsetof(struct ixgbe_hw_stats,
704                 fdirmatch)},
705         {"flow_director_missed_filters", offsetof(struct ixgbe_hw_stats,
706                 fdirmiss)},
707
708         {"rx_fcoe_crc_errors", offsetof(struct ixgbe_hw_stats, fccrc)},
709         {"rx_fcoe_dropped", offsetof(struct ixgbe_hw_stats, fcoerpdc)},
710         {"rx_fcoe_mbuf_allocation_errors", offsetof(struct ixgbe_hw_stats,
711                 fclast)},
712         {"rx_fcoe_packets", offsetof(struct ixgbe_hw_stats, fcoeprc)},
713         {"tx_fcoe_packets", offsetof(struct ixgbe_hw_stats, fcoeptc)},
714         {"rx_fcoe_bytes", offsetof(struct ixgbe_hw_stats, fcoedwrc)},
715         {"tx_fcoe_bytes", offsetof(struct ixgbe_hw_stats, fcoedwtc)},
716         {"rx_fcoe_no_direct_data_placement", offsetof(struct ixgbe_hw_stats,
717                 fcoe_noddp)},
718         {"rx_fcoe_no_direct_data_placement_ext_buff",
719                 offsetof(struct ixgbe_hw_stats, fcoe_noddp_ext_buff)},
720
721         {"tx_flow_control_xon_packets", offsetof(struct ixgbe_hw_stats,
722                 lxontxc)},
723         {"rx_flow_control_xon_packets", offsetof(struct ixgbe_hw_stats,
724                 lxonrxc)},
725         {"tx_flow_control_xoff_packets", offsetof(struct ixgbe_hw_stats,
726                 lxofftxc)},
727         {"rx_flow_control_xoff_packets", offsetof(struct ixgbe_hw_stats,
728                 lxoffrxc)},
729         {"rx_total_missed_packets", offsetof(struct ixgbe_hw_stats, mpctotal)},
730 };
731
732 #define IXGBE_NB_HW_STATS (sizeof(rte_ixgbe_stats_strings) / \
733                            sizeof(rte_ixgbe_stats_strings[0]))
734
735 /* MACsec statistics */
736 static const struct rte_ixgbe_xstats_name_off rte_ixgbe_macsec_strings[] = {
737         {"out_pkts_untagged", offsetof(struct ixgbe_macsec_stats,
738                 out_pkts_untagged)},
739         {"out_pkts_encrypted", offsetof(struct ixgbe_macsec_stats,
740                 out_pkts_encrypted)},
741         {"out_pkts_protected", offsetof(struct ixgbe_macsec_stats,
742                 out_pkts_protected)},
743         {"out_octets_encrypted", offsetof(struct ixgbe_macsec_stats,
744                 out_octets_encrypted)},
745         {"out_octets_protected", offsetof(struct ixgbe_macsec_stats,
746                 out_octets_protected)},
747         {"in_pkts_untagged", offsetof(struct ixgbe_macsec_stats,
748                 in_pkts_untagged)},
749         {"in_pkts_badtag", offsetof(struct ixgbe_macsec_stats,
750                 in_pkts_badtag)},
751         {"in_pkts_nosci", offsetof(struct ixgbe_macsec_stats,
752                 in_pkts_nosci)},
753         {"in_pkts_unknownsci", offsetof(struct ixgbe_macsec_stats,
754                 in_pkts_unknownsci)},
755         {"in_octets_decrypted", offsetof(struct ixgbe_macsec_stats,
756                 in_octets_decrypted)},
757         {"in_octets_validated", offsetof(struct ixgbe_macsec_stats,
758                 in_octets_validated)},
759         {"in_pkts_unchecked", offsetof(struct ixgbe_macsec_stats,
760                 in_pkts_unchecked)},
761         {"in_pkts_delayed", offsetof(struct ixgbe_macsec_stats,
762                 in_pkts_delayed)},
763         {"in_pkts_late", offsetof(struct ixgbe_macsec_stats,
764                 in_pkts_late)},
765         {"in_pkts_ok", offsetof(struct ixgbe_macsec_stats,
766                 in_pkts_ok)},
767         {"in_pkts_invalid", offsetof(struct ixgbe_macsec_stats,
768                 in_pkts_invalid)},
769         {"in_pkts_notvalid", offsetof(struct ixgbe_macsec_stats,
770                 in_pkts_notvalid)},
771         {"in_pkts_unusedsa", offsetof(struct ixgbe_macsec_stats,
772                 in_pkts_unusedsa)},
773         {"in_pkts_notusingsa", offsetof(struct ixgbe_macsec_stats,
774                 in_pkts_notusingsa)},
775 };
776
777 #define IXGBE_NB_MACSEC_STATS (sizeof(rte_ixgbe_macsec_strings) / \
778                            sizeof(rte_ixgbe_macsec_strings[0]))
779
780 /* Per-queue statistics */
781 static const struct rte_ixgbe_xstats_name_off rte_ixgbe_rxq_strings[] = {
782         {"mbuf_allocation_errors", offsetof(struct ixgbe_hw_stats, rnbc)},
783         {"dropped", offsetof(struct ixgbe_hw_stats, mpc)},
784         {"xon_packets", offsetof(struct ixgbe_hw_stats, pxonrxc)},
785         {"xoff_packets", offsetof(struct ixgbe_hw_stats, pxoffrxc)},
786 };
787
788 #define IXGBE_NB_RXQ_PRIO_STATS (sizeof(rte_ixgbe_rxq_strings) / \
789                            sizeof(rte_ixgbe_rxq_strings[0]))
790 #define IXGBE_NB_RXQ_PRIO_VALUES 8
791
792 static const struct rte_ixgbe_xstats_name_off rte_ixgbe_txq_strings[] = {
793         {"xon_packets", offsetof(struct ixgbe_hw_stats, pxontxc)},
794         {"xoff_packets", offsetof(struct ixgbe_hw_stats, pxofftxc)},
795         {"xon_to_xoff_packets", offsetof(struct ixgbe_hw_stats,
796                 pxon2offc)},
797 };
798
799 #define IXGBE_NB_TXQ_PRIO_STATS (sizeof(rte_ixgbe_txq_strings) / \
800                            sizeof(rte_ixgbe_txq_strings[0]))
801 #define IXGBE_NB_TXQ_PRIO_VALUES 8
802
803 static const struct rte_ixgbe_xstats_name_off rte_ixgbevf_stats_strings[] = {
804         {"rx_multicast_packets", offsetof(struct ixgbevf_hw_stats, vfmprc)},
805 };
806
807 #define IXGBEVF_NB_XSTATS (sizeof(rte_ixgbevf_stats_strings) /  \
808                 sizeof(rte_ixgbevf_stats_strings[0]))
809
810 /**
811  * Atomically reads the link status information from global
812  * structure rte_eth_dev.
813  *
814  * @param dev
815  *   - Pointer to the structure rte_eth_dev to read from.
816  *   - Pointer to the buffer to be saved with the link status.
817  *
818  * @return
819  *   - On success, zero.
820  *   - On failure, negative value.
821  */
822 static inline int
823 rte_ixgbe_dev_atomic_read_link_status(struct rte_eth_dev *dev,
824                                 struct rte_eth_link *link)
825 {
826         struct rte_eth_link *dst = link;
827         struct rte_eth_link *src = &(dev->data->dev_link);
828
829         if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
830                                         *(uint64_t *)src) == 0)
831                 return -1;
832
833         return 0;
834 }
835
836 /**
837  * Atomically writes the link status information into global
838  * structure rte_eth_dev.
839  *
840  * @param dev
841  *   - Pointer to the structure rte_eth_dev to read from.
842  *   - Pointer to the buffer to be saved with the link status.
843  *
844  * @return
845  *   - On success, zero.
846  *   - On failure, negative value.
847  */
848 static inline int
849 rte_ixgbe_dev_atomic_write_link_status(struct rte_eth_dev *dev,
850                                 struct rte_eth_link *link)
851 {
852         struct rte_eth_link *dst = &(dev->data->dev_link);
853         struct rte_eth_link *src = link;
854
855         if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
856                                         *(uint64_t *)src) == 0)
857                 return -1;
858
859         return 0;
860 }
861
862 /*
863  * This function is the same as ixgbe_is_sfp() in base/ixgbe.h.
864  */
865 static inline int
866 ixgbe_is_sfp(struct ixgbe_hw *hw)
867 {
868         switch (hw->phy.type) {
869         case ixgbe_phy_sfp_avago:
870         case ixgbe_phy_sfp_ftl:
871         case ixgbe_phy_sfp_intel:
872         case ixgbe_phy_sfp_unknown:
873         case ixgbe_phy_sfp_passive_tyco:
874         case ixgbe_phy_sfp_passive_unknown:
875                 return 1;
876         default:
877                 return 0;
878         }
879 }
880
881 static inline int32_t
882 ixgbe_pf_reset_hw(struct ixgbe_hw *hw)
883 {
884         uint32_t ctrl_ext;
885         int32_t status;
886
887         status = ixgbe_reset_hw(hw);
888
889         ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
890         /* Set PF Reset Done bit so PF/VF Mail Ops can work */
891         ctrl_ext |= IXGBE_CTRL_EXT_PFRSTD;
892         IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
893         IXGBE_WRITE_FLUSH(hw);
894
895         if (status == IXGBE_ERR_SFP_NOT_PRESENT)
896                 status = IXGBE_SUCCESS;
897         return status;
898 }
899
900 static inline void
901 ixgbe_enable_intr(struct rte_eth_dev *dev)
902 {
903         struct ixgbe_interrupt *intr =
904                 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
905         struct ixgbe_hw *hw =
906                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
907
908         IXGBE_WRITE_REG(hw, IXGBE_EIMS, intr->mask);
909         IXGBE_WRITE_FLUSH(hw);
910 }
911
912 /*
913  * This function is based on ixgbe_disable_intr() in base/ixgbe.h.
914  */
915 static void
916 ixgbe_disable_intr(struct ixgbe_hw *hw)
917 {
918         PMD_INIT_FUNC_TRACE();
919
920         if (hw->mac.type == ixgbe_mac_82598EB) {
921                 IXGBE_WRITE_REG(hw, IXGBE_EIMC, ~0);
922         } else {
923                 IXGBE_WRITE_REG(hw, IXGBE_EIMC, 0xFFFF0000);
924                 IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(0), ~0);
925                 IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(1), ~0);
926         }
927         IXGBE_WRITE_FLUSH(hw);
928 }
929
930 /*
931  * This function resets queue statistics mapping registers.
932  * From Niantic datasheet, Initialization of Statistics section:
933  * "...if software requires the queue counters, the RQSMR and TQSM registers
934  * must be re-programmed following a device reset.
935  */
936 static void
937 ixgbe_reset_qstat_mappings(struct ixgbe_hw *hw)
938 {
939         uint32_t i;
940
941         for (i = 0; i != IXGBE_NB_STAT_MAPPING_REGS; i++) {
942                 IXGBE_WRITE_REG(hw, IXGBE_RQSMR(i), 0);
943                 IXGBE_WRITE_REG(hw, IXGBE_TQSM(i), 0);
944         }
945 }
946
947
948 static int
949 ixgbe_dev_queue_stats_mapping_set(struct rte_eth_dev *eth_dev,
950                                   uint16_t queue_id,
951                                   uint8_t stat_idx,
952                                   uint8_t is_rx)
953 {
954 #define QSM_REG_NB_BITS_PER_QMAP_FIELD 8
955 #define NB_QMAP_FIELDS_PER_QSM_REG 4
956 #define QMAP_FIELD_RESERVED_BITS_MASK 0x0f
957
958         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
959         struct ixgbe_stat_mapping_registers *stat_mappings =
960                 IXGBE_DEV_PRIVATE_TO_STAT_MAPPINGS(eth_dev->data->dev_private);
961         uint32_t qsmr_mask = 0;
962         uint32_t clearing_mask = QMAP_FIELD_RESERVED_BITS_MASK;
963         uint32_t q_map;
964         uint8_t n, offset;
965
966         if ((hw->mac.type != ixgbe_mac_82599EB) &&
967                 (hw->mac.type != ixgbe_mac_X540) &&
968                 (hw->mac.type != ixgbe_mac_X550) &&
969                 (hw->mac.type != ixgbe_mac_X550EM_x) &&
970                 (hw->mac.type != ixgbe_mac_X550EM_a))
971                 return -ENOSYS;
972
973         PMD_INIT_LOG(DEBUG, "Setting port %d, %s queue_id %d to stat index %d",
974                      (int)(eth_dev->data->port_id), is_rx ? "RX" : "TX",
975                      queue_id, stat_idx);
976
977         n = (uint8_t)(queue_id / NB_QMAP_FIELDS_PER_QSM_REG);
978         if (n >= IXGBE_NB_STAT_MAPPING_REGS) {
979                 PMD_INIT_LOG(ERR, "Nb of stat mapping registers exceeded");
980                 return -EIO;
981         }
982         offset = (uint8_t)(queue_id % NB_QMAP_FIELDS_PER_QSM_REG);
983
984         /* Now clear any previous stat_idx set */
985         clearing_mask <<= (QSM_REG_NB_BITS_PER_QMAP_FIELD * offset);
986         if (!is_rx)
987                 stat_mappings->tqsm[n] &= ~clearing_mask;
988         else
989                 stat_mappings->rqsmr[n] &= ~clearing_mask;
990
991         q_map = (uint32_t)stat_idx;
992         q_map &= QMAP_FIELD_RESERVED_BITS_MASK;
993         qsmr_mask = q_map << (QSM_REG_NB_BITS_PER_QMAP_FIELD * offset);
994         if (!is_rx)
995                 stat_mappings->tqsm[n] |= qsmr_mask;
996         else
997                 stat_mappings->rqsmr[n] |= qsmr_mask;
998
999         PMD_INIT_LOG(DEBUG, "Set port %d, %s queue_id %d to stat index %d",
1000                      (int)(eth_dev->data->port_id), is_rx ? "RX" : "TX",
1001                      queue_id, stat_idx);
1002         PMD_INIT_LOG(DEBUG, "%s[%d] = 0x%08x", is_rx ? "RQSMR" : "TQSM", n,
1003                      is_rx ? stat_mappings->rqsmr[n] : stat_mappings->tqsm[n]);
1004
1005         /* Now write the mapping in the appropriate register */
1006         if (is_rx) {
1007                 PMD_INIT_LOG(DEBUG, "Write 0x%x to RX IXGBE stat mapping reg:%d",
1008                              stat_mappings->rqsmr[n], n);
1009                 IXGBE_WRITE_REG(hw, IXGBE_RQSMR(n), stat_mappings->rqsmr[n]);
1010         } else {
1011                 PMD_INIT_LOG(DEBUG, "Write 0x%x to TX IXGBE stat mapping reg:%d",
1012                              stat_mappings->tqsm[n], n);
1013                 IXGBE_WRITE_REG(hw, IXGBE_TQSM(n), stat_mappings->tqsm[n]);
1014         }
1015         return 0;
1016 }
1017
1018 static void
1019 ixgbe_restore_statistics_mapping(struct rte_eth_dev *dev)
1020 {
1021         struct ixgbe_stat_mapping_registers *stat_mappings =
1022                 IXGBE_DEV_PRIVATE_TO_STAT_MAPPINGS(dev->data->dev_private);
1023         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1024         int i;
1025
1026         /* write whatever was in stat mapping table to the NIC */
1027         for (i = 0; i < IXGBE_NB_STAT_MAPPING_REGS; i++) {
1028                 /* rx */
1029                 IXGBE_WRITE_REG(hw, IXGBE_RQSMR(i), stat_mappings->rqsmr[i]);
1030
1031                 /* tx */
1032                 IXGBE_WRITE_REG(hw, IXGBE_TQSM(i), stat_mappings->tqsm[i]);
1033         }
1034 }
1035
1036 static void
1037 ixgbe_dcb_init(struct ixgbe_hw *hw, struct ixgbe_dcb_config *dcb_config)
1038 {
1039         uint8_t i;
1040         struct ixgbe_dcb_tc_config *tc;
1041         uint8_t dcb_max_tc = IXGBE_DCB_MAX_TRAFFIC_CLASS;
1042
1043         dcb_config->num_tcs.pg_tcs = dcb_max_tc;
1044         dcb_config->num_tcs.pfc_tcs = dcb_max_tc;
1045         for (i = 0; i < dcb_max_tc; i++) {
1046                 tc = &dcb_config->tc_config[i];
1047                 tc->path[IXGBE_DCB_TX_CONFIG].bwg_id = i;
1048                 tc->path[IXGBE_DCB_TX_CONFIG].bwg_percent =
1049                                  (uint8_t)(100/dcb_max_tc + (i & 1));
1050                 tc->path[IXGBE_DCB_RX_CONFIG].bwg_id = i;
1051                 tc->path[IXGBE_DCB_RX_CONFIG].bwg_percent =
1052                                  (uint8_t)(100/dcb_max_tc + (i & 1));
1053                 tc->pfc = ixgbe_dcb_pfc_disabled;
1054         }
1055
1056         /* Initialize default user to priority mapping, UPx->TC0 */
1057         tc = &dcb_config->tc_config[0];
1058         tc->path[IXGBE_DCB_TX_CONFIG].up_to_tc_bitmap = 0xFF;
1059         tc->path[IXGBE_DCB_RX_CONFIG].up_to_tc_bitmap = 0xFF;
1060         for (i = 0; i < IXGBE_DCB_MAX_BW_GROUP; i++) {
1061                 dcb_config->bw_percentage[IXGBE_DCB_TX_CONFIG][i] = 100;
1062                 dcb_config->bw_percentage[IXGBE_DCB_RX_CONFIG][i] = 100;
1063         }
1064         dcb_config->rx_pba_cfg = ixgbe_dcb_pba_equal;
1065         dcb_config->pfc_mode_enable = false;
1066         dcb_config->vt_mode = true;
1067         dcb_config->round_robin_enable = false;
1068         /* support all DCB capabilities in 82599 */
1069         dcb_config->support.capabilities = 0xFF;
1070
1071         /*we only support 4 Tcs for X540, X550 */
1072         if (hw->mac.type == ixgbe_mac_X540 ||
1073                 hw->mac.type == ixgbe_mac_X550 ||
1074                 hw->mac.type == ixgbe_mac_X550EM_x ||
1075                 hw->mac.type == ixgbe_mac_X550EM_a) {
1076                 dcb_config->num_tcs.pg_tcs = 4;
1077                 dcb_config->num_tcs.pfc_tcs = 4;
1078         }
1079 }
1080
1081 /*
1082  * Ensure that all locks are released before first NVM or PHY access
1083  */
1084 static void
1085 ixgbe_swfw_lock_reset(struct ixgbe_hw *hw)
1086 {
1087         uint16_t mask;
1088
1089         /*
1090          * Phy lock should not fail in this early stage. If this is the case,
1091          * it is due to an improper exit of the application.
1092          * So force the release of the faulty lock. Release of common lock
1093          * is done automatically by swfw_sync function.
1094          */
1095         mask = IXGBE_GSSR_PHY0_SM << hw->bus.func;
1096         if (ixgbe_acquire_swfw_semaphore(hw, mask) < 0) {
1097                 PMD_DRV_LOG(DEBUG, "SWFW phy%d lock released", hw->bus.func);
1098         }
1099         ixgbe_release_swfw_semaphore(hw, mask);
1100
1101         /*
1102          * These ones are more tricky since they are common to all ports; but
1103          * swfw_sync retries last long enough (1s) to be almost sure that if
1104          * lock can not be taken it is due to an improper lock of the
1105          * semaphore.
1106          */
1107         mask = IXGBE_GSSR_EEP_SM | IXGBE_GSSR_MAC_CSR_SM | IXGBE_GSSR_SW_MNG_SM;
1108         if (ixgbe_acquire_swfw_semaphore(hw, mask) < 0) {
1109                 PMD_DRV_LOG(DEBUG, "SWFW common locks released");
1110         }
1111         ixgbe_release_swfw_semaphore(hw, mask);
1112 }
1113
1114 /*
1115  * This function is based on code in ixgbe_attach() in base/ixgbe.c.
1116  * It returns 0 on success.
1117  */
1118 static int
1119 eth_ixgbe_dev_init(struct rte_eth_dev *eth_dev)
1120 {
1121         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
1122         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
1123         struct ixgbe_hw *hw =
1124                 IXGBE_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
1125         struct ixgbe_vfta *shadow_vfta =
1126                 IXGBE_DEV_PRIVATE_TO_VFTA(eth_dev->data->dev_private);
1127         struct ixgbe_hwstrip *hwstrip =
1128                 IXGBE_DEV_PRIVATE_TO_HWSTRIP_BITMAP(eth_dev->data->dev_private);
1129         struct ixgbe_dcb_config *dcb_config =
1130                 IXGBE_DEV_PRIVATE_TO_DCB_CFG(eth_dev->data->dev_private);
1131         struct ixgbe_filter_info *filter_info =
1132                 IXGBE_DEV_PRIVATE_TO_FILTER_INFO(eth_dev->data->dev_private);
1133         struct ixgbe_bw_conf *bw_conf =
1134                 IXGBE_DEV_PRIVATE_TO_BW_CONF(eth_dev->data->dev_private);
1135         uint32_t ctrl_ext;
1136         uint16_t csum;
1137         int diag, i;
1138
1139         PMD_INIT_FUNC_TRACE();
1140
1141         eth_dev->dev_ops = &ixgbe_eth_dev_ops;
1142         eth_dev->rx_pkt_burst = &ixgbe_recv_pkts;
1143         eth_dev->tx_pkt_burst = &ixgbe_xmit_pkts;
1144         eth_dev->tx_pkt_prepare = &ixgbe_prep_pkts;
1145
1146         /*
1147          * For secondary processes, we don't initialise any further as primary
1148          * has already done this work. Only check we don't need a different
1149          * RX and TX function.
1150          */
1151         if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
1152                 struct ixgbe_tx_queue *txq;
1153                 /* TX queue function in primary, set by last queue initialized
1154                  * Tx queue may not initialized by primary process
1155                  */
1156                 if (eth_dev->data->tx_queues) {
1157                         txq = eth_dev->data->tx_queues[eth_dev->data->nb_tx_queues-1];
1158                         ixgbe_set_tx_function(eth_dev, txq);
1159                 } else {
1160                         /* Use default TX function if we get here */
1161                         PMD_INIT_LOG(NOTICE, "No TX queues configured yet. "
1162                                      "Using default TX function.");
1163                 }
1164
1165                 ixgbe_set_rx_function(eth_dev);
1166
1167                 return 0;
1168         }
1169
1170         rte_eth_copy_pci_info(eth_dev, pci_dev);
1171         eth_dev->data->dev_flags |= RTE_ETH_DEV_DETACHABLE;
1172
1173         /* Vendor and Device ID need to be set before init of shared code */
1174         hw->device_id = pci_dev->id.device_id;
1175         hw->vendor_id = pci_dev->id.vendor_id;
1176         hw->hw_addr = (void *)pci_dev->mem_resource[0].addr;
1177         hw->allow_unsupported_sfp = 1;
1178
1179         /* Initialize the shared code (base driver) */
1180 #ifdef RTE_LIBRTE_IXGBE_BYPASS
1181         diag = ixgbe_bypass_init_shared_code(hw);
1182 #else
1183         diag = ixgbe_init_shared_code(hw);
1184 #endif /* RTE_LIBRTE_IXGBE_BYPASS */
1185
1186         if (diag != IXGBE_SUCCESS) {
1187                 PMD_INIT_LOG(ERR, "Shared code init failed: %d", diag);
1188                 return -EIO;
1189         }
1190
1191         /* pick up the PCI bus settings for reporting later */
1192         ixgbe_get_bus_info(hw);
1193
1194         /* Unlock any pending hardware semaphore */
1195         ixgbe_swfw_lock_reset(hw);
1196
1197         /* Initialize DCB configuration*/
1198         memset(dcb_config, 0, sizeof(struct ixgbe_dcb_config));
1199         ixgbe_dcb_init(hw, dcb_config);
1200         /* Get Hardware Flow Control setting */
1201         hw->fc.requested_mode = ixgbe_fc_full;
1202         hw->fc.current_mode = ixgbe_fc_full;
1203         hw->fc.pause_time = IXGBE_FC_PAUSE;
1204         for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
1205                 hw->fc.low_water[i] = IXGBE_FC_LO;
1206                 hw->fc.high_water[i] = IXGBE_FC_HI;
1207         }
1208         hw->fc.send_xon = 1;
1209
1210         /* Make sure we have a good EEPROM before we read from it */
1211         diag = ixgbe_validate_eeprom_checksum(hw, &csum);
1212         if (diag != IXGBE_SUCCESS) {
1213                 PMD_INIT_LOG(ERR, "The EEPROM checksum is not valid: %d", diag);
1214                 return -EIO;
1215         }
1216
1217 #ifdef RTE_LIBRTE_IXGBE_BYPASS
1218         diag = ixgbe_bypass_init_hw(hw);
1219 #else
1220         diag = ixgbe_init_hw(hw);
1221 #endif /* RTE_LIBRTE_IXGBE_BYPASS */
1222
1223         /*
1224          * Devices with copper phys will fail to initialise if ixgbe_init_hw()
1225          * is called too soon after the kernel driver unbinding/binding occurs.
1226          * The failure occurs in ixgbe_identify_phy_generic() for all devices,
1227          * but for non-copper devies, ixgbe_identify_sfp_module_generic() is
1228          * also called. See ixgbe_identify_phy_82599(). The reason for the
1229          * failure is not known, and only occuts when virtualisation features
1230          * are disabled in the bios. A delay of 100ms  was found to be enough by
1231          * trial-and-error, and is doubled to be safe.
1232          */
1233         if (diag && (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_copper)) {
1234                 rte_delay_ms(200);
1235                 diag = ixgbe_init_hw(hw);
1236         }
1237
1238         if (diag == IXGBE_ERR_SFP_NOT_PRESENT)
1239                 diag = IXGBE_SUCCESS;
1240
1241         if (diag == IXGBE_ERR_EEPROM_VERSION) {
1242                 PMD_INIT_LOG(ERR, "This device is a pre-production adapter/"
1243                              "LOM.  Please be aware there may be issues associated "
1244                              "with your hardware.");
1245                 PMD_INIT_LOG(ERR, "If you are experiencing problems "
1246                              "please contact your Intel or hardware representative "
1247                              "who provided you with this hardware.");
1248         } else if (diag == IXGBE_ERR_SFP_NOT_SUPPORTED)
1249                 PMD_INIT_LOG(ERR, "Unsupported SFP+ Module");
1250         if (diag) {
1251                 PMD_INIT_LOG(ERR, "Hardware Initialization Failure: %d", diag);
1252                 return -EIO;
1253         }
1254
1255         /* Reset the hw statistics */
1256         ixgbe_dev_stats_reset(eth_dev);
1257
1258         /* disable interrupt */
1259         ixgbe_disable_intr(hw);
1260
1261         /* reset mappings for queue statistics hw counters*/
1262         ixgbe_reset_qstat_mappings(hw);
1263
1264         /* Allocate memory for storing MAC addresses */
1265         eth_dev->data->mac_addrs = rte_zmalloc("ixgbe", ETHER_ADDR_LEN *
1266                                                hw->mac.num_rar_entries, 0);
1267         if (eth_dev->data->mac_addrs == NULL) {
1268                 PMD_INIT_LOG(ERR,
1269                              "Failed to allocate %u bytes needed to store "
1270                              "MAC addresses",
1271                              ETHER_ADDR_LEN * hw->mac.num_rar_entries);
1272                 return -ENOMEM;
1273         }
1274         /* Copy the permanent MAC address */
1275         ether_addr_copy((struct ether_addr *) hw->mac.perm_addr,
1276                         &eth_dev->data->mac_addrs[0]);
1277
1278         /* Allocate memory for storing hash filter MAC addresses */
1279         eth_dev->data->hash_mac_addrs = rte_zmalloc("ixgbe", ETHER_ADDR_LEN *
1280                                                     IXGBE_VMDQ_NUM_UC_MAC, 0);
1281         if (eth_dev->data->hash_mac_addrs == NULL) {
1282                 PMD_INIT_LOG(ERR,
1283                              "Failed to allocate %d bytes needed to store MAC addresses",
1284                              ETHER_ADDR_LEN * IXGBE_VMDQ_NUM_UC_MAC);
1285                 return -ENOMEM;
1286         }
1287
1288         /* initialize the vfta */
1289         memset(shadow_vfta, 0, sizeof(*shadow_vfta));
1290
1291         /* initialize the hw strip bitmap*/
1292         memset(hwstrip, 0, sizeof(*hwstrip));
1293
1294         /* initialize PF if max_vfs not zero */
1295         ixgbe_pf_host_init(eth_dev);
1296
1297         ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
1298         /* let hardware know driver is loaded */
1299         ctrl_ext |= IXGBE_CTRL_EXT_DRV_LOAD;
1300         /* Set PF Reset Done bit so PF/VF Mail Ops can work */
1301         ctrl_ext |= IXGBE_CTRL_EXT_PFRSTD;
1302         IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
1303         IXGBE_WRITE_FLUSH(hw);
1304
1305         if (ixgbe_is_sfp(hw) && hw->phy.sfp_type != ixgbe_sfp_type_not_present)
1306                 PMD_INIT_LOG(DEBUG, "MAC: %d, PHY: %d, SFP+: %d",
1307                              (int) hw->mac.type, (int) hw->phy.type,
1308                              (int) hw->phy.sfp_type);
1309         else
1310                 PMD_INIT_LOG(DEBUG, "MAC: %d, PHY: %d",
1311                              (int) hw->mac.type, (int) hw->phy.type);
1312
1313         PMD_INIT_LOG(DEBUG, "port %d vendorID=0x%x deviceID=0x%x",
1314                      eth_dev->data->port_id, pci_dev->id.vendor_id,
1315                      pci_dev->id.device_id);
1316
1317         rte_intr_callback_register(intr_handle,
1318                                    ixgbe_dev_interrupt_handler, eth_dev);
1319
1320         /* enable uio/vfio intr/eventfd mapping */
1321         rte_intr_enable(intr_handle);
1322
1323         /* enable support intr */
1324         ixgbe_enable_intr(eth_dev);
1325
1326         /* initialize filter info */
1327         memset(filter_info, 0,
1328                sizeof(struct ixgbe_filter_info));
1329
1330         /* initialize 5tuple filter list */
1331         TAILQ_INIT(&filter_info->fivetuple_list);
1332
1333         /* initialize flow director filter list & hash */
1334         ixgbe_fdir_filter_init(eth_dev);
1335
1336         /* initialize l2 tunnel filter list & hash */
1337         ixgbe_l2_tn_filter_init(eth_dev);
1338
1339         /* initialize flow filter lists */
1340         ixgbe_filterlist_init();
1341
1342         /* initialize bandwidth configuration info */
1343         memset(bw_conf, 0, sizeof(struct ixgbe_bw_conf));
1344
1345         /* initialize Traffic Manager configuration */
1346         ixgbe_tm_conf_init(eth_dev);
1347
1348         return 0;
1349 }
1350
1351 static int
1352 eth_ixgbe_dev_uninit(struct rte_eth_dev *eth_dev)
1353 {
1354         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
1355         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
1356         struct ixgbe_hw *hw;
1357
1358         PMD_INIT_FUNC_TRACE();
1359
1360         if (rte_eal_process_type() != RTE_PROC_PRIMARY)
1361                 return -EPERM;
1362
1363         hw = IXGBE_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
1364
1365         if (hw->adapter_stopped == 0)
1366                 ixgbe_dev_close(eth_dev);
1367
1368         eth_dev->dev_ops = NULL;
1369         eth_dev->rx_pkt_burst = NULL;
1370         eth_dev->tx_pkt_burst = NULL;
1371
1372         /* Unlock any pending hardware semaphore */
1373         ixgbe_swfw_lock_reset(hw);
1374
1375         /* disable uio intr before callback unregister */
1376         rte_intr_disable(intr_handle);
1377         rte_intr_callback_unregister(intr_handle,
1378                                      ixgbe_dev_interrupt_handler, eth_dev);
1379
1380         /* uninitialize PF if max_vfs not zero */
1381         ixgbe_pf_host_uninit(eth_dev);
1382
1383         rte_free(eth_dev->data->mac_addrs);
1384         eth_dev->data->mac_addrs = NULL;
1385
1386         rte_free(eth_dev->data->hash_mac_addrs);
1387         eth_dev->data->hash_mac_addrs = NULL;
1388
1389         /* remove all the fdir filters & hash */
1390         ixgbe_fdir_filter_uninit(eth_dev);
1391
1392         /* remove all the L2 tunnel filters & hash */
1393         ixgbe_l2_tn_filter_uninit(eth_dev);
1394
1395         /* Remove all ntuple filters of the device */
1396         ixgbe_ntuple_filter_uninit(eth_dev);
1397
1398         /* clear all the filters list */
1399         ixgbe_filterlist_flush();
1400
1401         /* Remove all Traffic Manager configuration */
1402         ixgbe_tm_conf_uninit(eth_dev);
1403
1404         return 0;
1405 }
1406
1407 static int ixgbe_ntuple_filter_uninit(struct rte_eth_dev *eth_dev)
1408 {
1409         struct ixgbe_filter_info *filter_info =
1410                 IXGBE_DEV_PRIVATE_TO_FILTER_INFO(eth_dev->data->dev_private);
1411         struct ixgbe_5tuple_filter *p_5tuple;
1412
1413         while ((p_5tuple = TAILQ_FIRST(&filter_info->fivetuple_list))) {
1414                 TAILQ_REMOVE(&filter_info->fivetuple_list,
1415                              p_5tuple,
1416                              entries);
1417                 rte_free(p_5tuple);
1418         }
1419         memset(filter_info->fivetuple_mask, 0,
1420                sizeof(uint32_t) * IXGBE_5TUPLE_ARRAY_SIZE);
1421
1422         return 0;
1423 }
1424
1425 static int ixgbe_fdir_filter_uninit(struct rte_eth_dev *eth_dev)
1426 {
1427         struct ixgbe_hw_fdir_info *fdir_info =
1428                 IXGBE_DEV_PRIVATE_TO_FDIR_INFO(eth_dev->data->dev_private);
1429         struct ixgbe_fdir_filter *fdir_filter;
1430
1431                 if (fdir_info->hash_map)
1432                 rte_free(fdir_info->hash_map);
1433         if (fdir_info->hash_handle)
1434                 rte_hash_free(fdir_info->hash_handle);
1435
1436         while ((fdir_filter = TAILQ_FIRST(&fdir_info->fdir_list))) {
1437                 TAILQ_REMOVE(&fdir_info->fdir_list,
1438                              fdir_filter,
1439                              entries);
1440                 rte_free(fdir_filter);
1441         }
1442
1443         return 0;
1444 }
1445
1446 static int ixgbe_l2_tn_filter_uninit(struct rte_eth_dev *eth_dev)
1447 {
1448         struct ixgbe_l2_tn_info *l2_tn_info =
1449                 IXGBE_DEV_PRIVATE_TO_L2_TN_INFO(eth_dev->data->dev_private);
1450         struct ixgbe_l2_tn_filter *l2_tn_filter;
1451
1452         if (l2_tn_info->hash_map)
1453                 rte_free(l2_tn_info->hash_map);
1454         if (l2_tn_info->hash_handle)
1455                 rte_hash_free(l2_tn_info->hash_handle);
1456
1457         while ((l2_tn_filter = TAILQ_FIRST(&l2_tn_info->l2_tn_list))) {
1458                 TAILQ_REMOVE(&l2_tn_info->l2_tn_list,
1459                              l2_tn_filter,
1460                              entries);
1461                 rte_free(l2_tn_filter);
1462         }
1463
1464         return 0;
1465 }
1466
1467 static int ixgbe_fdir_filter_init(struct rte_eth_dev *eth_dev)
1468 {
1469         struct ixgbe_hw_fdir_info *fdir_info =
1470                 IXGBE_DEV_PRIVATE_TO_FDIR_INFO(eth_dev->data->dev_private);
1471         char fdir_hash_name[RTE_HASH_NAMESIZE];
1472         struct rte_hash_parameters fdir_hash_params = {
1473                 .name = fdir_hash_name,
1474                 .entries = IXGBE_MAX_FDIR_FILTER_NUM,
1475                 .key_len = sizeof(union ixgbe_atr_input),
1476                 .hash_func = rte_hash_crc,
1477                 .hash_func_init_val = 0,
1478                 .socket_id = rte_socket_id(),
1479         };
1480
1481         TAILQ_INIT(&fdir_info->fdir_list);
1482         snprintf(fdir_hash_name, RTE_HASH_NAMESIZE,
1483                  "fdir_%s", eth_dev->device->name);
1484         fdir_info->hash_handle = rte_hash_create(&fdir_hash_params);
1485         if (!fdir_info->hash_handle) {
1486                 PMD_INIT_LOG(ERR, "Failed to create fdir hash table!");
1487                 return -EINVAL;
1488         }
1489         fdir_info->hash_map = rte_zmalloc("ixgbe",
1490                                           sizeof(struct ixgbe_fdir_filter *) *
1491                                           IXGBE_MAX_FDIR_FILTER_NUM,
1492                                           0);
1493         if (!fdir_info->hash_map) {
1494                 PMD_INIT_LOG(ERR,
1495                              "Failed to allocate memory for fdir hash map!");
1496                 return -ENOMEM;
1497         }
1498         fdir_info->mask_added = FALSE;
1499
1500         return 0;
1501 }
1502
1503 static int ixgbe_l2_tn_filter_init(struct rte_eth_dev *eth_dev)
1504 {
1505         struct ixgbe_l2_tn_info *l2_tn_info =
1506                 IXGBE_DEV_PRIVATE_TO_L2_TN_INFO(eth_dev->data->dev_private);
1507         char l2_tn_hash_name[RTE_HASH_NAMESIZE];
1508         struct rte_hash_parameters l2_tn_hash_params = {
1509                 .name = l2_tn_hash_name,
1510                 .entries = IXGBE_MAX_L2_TN_FILTER_NUM,
1511                 .key_len = sizeof(struct ixgbe_l2_tn_key),
1512                 .hash_func = rte_hash_crc,
1513                 .hash_func_init_val = 0,
1514                 .socket_id = rte_socket_id(),
1515         };
1516
1517         TAILQ_INIT(&l2_tn_info->l2_tn_list);
1518         snprintf(l2_tn_hash_name, RTE_HASH_NAMESIZE,
1519                  "l2_tn_%s", eth_dev->device->name);
1520         l2_tn_info->hash_handle = rte_hash_create(&l2_tn_hash_params);
1521         if (!l2_tn_info->hash_handle) {
1522                 PMD_INIT_LOG(ERR, "Failed to create L2 TN hash table!");
1523                 return -EINVAL;
1524         }
1525         l2_tn_info->hash_map = rte_zmalloc("ixgbe",
1526                                    sizeof(struct ixgbe_l2_tn_filter *) *
1527                                    IXGBE_MAX_L2_TN_FILTER_NUM,
1528                                    0);
1529         if (!l2_tn_info->hash_map) {
1530                 PMD_INIT_LOG(ERR,
1531                         "Failed to allocate memory for L2 TN hash map!");
1532                 return -ENOMEM;
1533         }
1534         l2_tn_info->e_tag_en = FALSE;
1535         l2_tn_info->e_tag_fwd_en = FALSE;
1536         l2_tn_info->e_tag_ether_type = DEFAULT_ETAG_ETYPE;
1537
1538         return 0;
1539 }
1540 /*
1541  * Negotiate mailbox API version with the PF.
1542  * After reset API version is always set to the basic one (ixgbe_mbox_api_10).
1543  * Then we try to negotiate starting with the most recent one.
1544  * If all negotiation attempts fail, then we will proceed with
1545  * the default one (ixgbe_mbox_api_10).
1546  */
1547 static void
1548 ixgbevf_negotiate_api(struct ixgbe_hw *hw)
1549 {
1550         int32_t i;
1551
1552         /* start with highest supported, proceed down */
1553         static const enum ixgbe_pfvf_api_rev sup_ver[] = {
1554                 ixgbe_mbox_api_12,
1555                 ixgbe_mbox_api_11,
1556                 ixgbe_mbox_api_10,
1557         };
1558
1559         for (i = 0;
1560                         i != RTE_DIM(sup_ver) &&
1561                         ixgbevf_negotiate_api_version(hw, sup_ver[i]) != 0;
1562                         i++)
1563                 ;
1564 }
1565
1566 static void
1567 generate_random_mac_addr(struct ether_addr *mac_addr)
1568 {
1569         uint64_t random;
1570
1571         /* Set Organizationally Unique Identifier (OUI) prefix. */
1572         mac_addr->addr_bytes[0] = 0x00;
1573         mac_addr->addr_bytes[1] = 0x09;
1574         mac_addr->addr_bytes[2] = 0xC0;
1575         /* Force indication of locally assigned MAC address. */
1576         mac_addr->addr_bytes[0] |= ETHER_LOCAL_ADMIN_ADDR;
1577         /* Generate the last 3 bytes of the MAC address with a random number. */
1578         random = rte_rand();
1579         memcpy(&mac_addr->addr_bytes[3], &random, 3);
1580 }
1581
1582 /*
1583  * Virtual Function device init
1584  */
1585 static int
1586 eth_ixgbevf_dev_init(struct rte_eth_dev *eth_dev)
1587 {
1588         int diag;
1589         uint32_t tc, tcs;
1590         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
1591         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
1592         struct ixgbe_hw *hw =
1593                 IXGBE_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
1594         struct ixgbe_vfta *shadow_vfta =
1595                 IXGBE_DEV_PRIVATE_TO_VFTA(eth_dev->data->dev_private);
1596         struct ixgbe_hwstrip *hwstrip =
1597                 IXGBE_DEV_PRIVATE_TO_HWSTRIP_BITMAP(eth_dev->data->dev_private);
1598         struct ether_addr *perm_addr = (struct ether_addr *) hw->mac.perm_addr;
1599
1600         PMD_INIT_FUNC_TRACE();
1601
1602         eth_dev->dev_ops = &ixgbevf_eth_dev_ops;
1603         eth_dev->rx_pkt_burst = &ixgbe_recv_pkts;
1604         eth_dev->tx_pkt_burst = &ixgbe_xmit_pkts;
1605
1606         /* for secondary processes, we don't initialise any further as primary
1607          * has already done this work. Only check we don't need a different
1608          * RX function
1609          */
1610         if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
1611                 struct ixgbe_tx_queue *txq;
1612                 /* TX queue function in primary, set by last queue initialized
1613                  * Tx queue may not initialized by primary process
1614                  */
1615                 if (eth_dev->data->tx_queues) {
1616                         txq = eth_dev->data->tx_queues[eth_dev->data->nb_tx_queues - 1];
1617                         ixgbe_set_tx_function(eth_dev, txq);
1618                 } else {
1619                         /* Use default TX function if we get here */
1620                         PMD_INIT_LOG(NOTICE,
1621                                      "No TX queues configured yet. Using default TX function.");
1622                 }
1623
1624                 ixgbe_set_rx_function(eth_dev);
1625
1626                 return 0;
1627         }
1628
1629         rte_eth_copy_pci_info(eth_dev, pci_dev);
1630         eth_dev->data->dev_flags |= RTE_ETH_DEV_DETACHABLE;
1631
1632         hw->device_id = pci_dev->id.device_id;
1633         hw->vendor_id = pci_dev->id.vendor_id;
1634         hw->hw_addr = (void *)pci_dev->mem_resource[0].addr;
1635
1636         /* initialize the vfta */
1637         memset(shadow_vfta, 0, sizeof(*shadow_vfta));
1638
1639         /* initialize the hw strip bitmap*/
1640         memset(hwstrip, 0, sizeof(*hwstrip));
1641
1642         /* Initialize the shared code (base driver) */
1643         diag = ixgbe_init_shared_code(hw);
1644         if (diag != IXGBE_SUCCESS) {
1645                 PMD_INIT_LOG(ERR, "Shared code init failed for ixgbevf: %d", diag);
1646                 return -EIO;
1647         }
1648
1649         /* init_mailbox_params */
1650         hw->mbx.ops.init_params(hw);
1651
1652         /* Reset the hw statistics */
1653         ixgbevf_dev_stats_reset(eth_dev);
1654
1655         /* Disable the interrupts for VF */
1656         ixgbevf_intr_disable(hw);
1657
1658         hw->mac.num_rar_entries = 128; /* The MAX of the underlying PF */
1659         diag = hw->mac.ops.reset_hw(hw);
1660
1661         /*
1662          * The VF reset operation returns the IXGBE_ERR_INVALID_MAC_ADDR when
1663          * the underlying PF driver has not assigned a MAC address to the VF.
1664          * In this case, assign a random MAC address.
1665          */
1666         if ((diag != IXGBE_SUCCESS) && (diag != IXGBE_ERR_INVALID_MAC_ADDR)) {
1667                 PMD_INIT_LOG(ERR, "VF Initialization Failure: %d", diag);
1668                 return diag;
1669         }
1670
1671         /* negotiate mailbox API version to use with the PF. */
1672         ixgbevf_negotiate_api(hw);
1673
1674         /* Get Rx/Tx queue count via mailbox, which is ready after reset_hw */
1675         ixgbevf_get_queues(hw, &tcs, &tc);
1676
1677         /* Allocate memory for storing MAC addresses */
1678         eth_dev->data->mac_addrs = rte_zmalloc("ixgbevf", ETHER_ADDR_LEN *
1679                                                hw->mac.num_rar_entries, 0);
1680         if (eth_dev->data->mac_addrs == NULL) {
1681                 PMD_INIT_LOG(ERR,
1682                              "Failed to allocate %u bytes needed to store "
1683                              "MAC addresses",
1684                              ETHER_ADDR_LEN * hw->mac.num_rar_entries);
1685                 return -ENOMEM;
1686         }
1687
1688         /* Generate a random MAC address, if none was assigned by PF. */
1689         if (is_zero_ether_addr(perm_addr)) {
1690                 generate_random_mac_addr(perm_addr);
1691                 diag = ixgbe_set_rar_vf(hw, 1, perm_addr->addr_bytes, 0, 1);
1692                 if (diag) {
1693                         rte_free(eth_dev->data->mac_addrs);
1694                         eth_dev->data->mac_addrs = NULL;
1695                         return diag;
1696                 }
1697                 PMD_INIT_LOG(INFO, "\tVF MAC address not assigned by Host PF");
1698                 PMD_INIT_LOG(INFO, "\tAssign randomly generated MAC address "
1699                              "%02x:%02x:%02x:%02x:%02x:%02x",
1700                              perm_addr->addr_bytes[0],
1701                              perm_addr->addr_bytes[1],
1702                              perm_addr->addr_bytes[2],
1703                              perm_addr->addr_bytes[3],
1704                              perm_addr->addr_bytes[4],
1705                              perm_addr->addr_bytes[5]);
1706         }
1707
1708         /* Copy the permanent MAC address */
1709         ether_addr_copy(perm_addr, &eth_dev->data->mac_addrs[0]);
1710
1711         /* reset the hardware with the new settings */
1712         diag = hw->mac.ops.start_hw(hw);
1713         switch (diag) {
1714         case  0:
1715                 break;
1716
1717         default:
1718                 PMD_INIT_LOG(ERR, "VF Initialization Failure: %d", diag);
1719                 return -EIO;
1720         }
1721
1722         rte_intr_callback_register(intr_handle,
1723                                    ixgbevf_dev_interrupt_handler, eth_dev);
1724         rte_intr_enable(intr_handle);
1725         ixgbevf_intr_enable(hw);
1726
1727         PMD_INIT_LOG(DEBUG, "port %d vendorID=0x%x deviceID=0x%x mac.type=%s",
1728                      eth_dev->data->port_id, pci_dev->id.vendor_id,
1729                      pci_dev->id.device_id, "ixgbe_mac_82599_vf");
1730
1731         return 0;
1732 }
1733
1734 /* Virtual Function device uninit */
1735
1736 static int
1737 eth_ixgbevf_dev_uninit(struct rte_eth_dev *eth_dev)
1738 {
1739         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
1740         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
1741         struct ixgbe_hw *hw;
1742
1743         PMD_INIT_FUNC_TRACE();
1744
1745         if (rte_eal_process_type() != RTE_PROC_PRIMARY)
1746                 return -EPERM;
1747
1748         hw = IXGBE_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
1749
1750         if (hw->adapter_stopped == 0)
1751                 ixgbevf_dev_close(eth_dev);
1752
1753         eth_dev->dev_ops = NULL;
1754         eth_dev->rx_pkt_burst = NULL;
1755         eth_dev->tx_pkt_burst = NULL;
1756
1757         /* Disable the interrupts for VF */
1758         ixgbevf_intr_disable(hw);
1759
1760         rte_free(eth_dev->data->mac_addrs);
1761         eth_dev->data->mac_addrs = NULL;
1762
1763         rte_intr_disable(intr_handle);
1764         rte_intr_callback_unregister(intr_handle,
1765                                      ixgbevf_dev_interrupt_handler, eth_dev);
1766
1767         return 0;
1768 }
1769
1770 static int eth_ixgbe_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
1771         struct rte_pci_device *pci_dev)
1772 {
1773         return rte_eth_dev_pci_generic_probe(pci_dev,
1774                 sizeof(struct ixgbe_adapter), eth_ixgbe_dev_init);
1775 }
1776
1777 static int eth_ixgbe_pci_remove(struct rte_pci_device *pci_dev)
1778 {
1779         return rte_eth_dev_pci_generic_remove(pci_dev, eth_ixgbe_dev_uninit);
1780 }
1781
1782 static struct rte_pci_driver rte_ixgbe_pmd = {
1783         .id_table = pci_id_ixgbe_map,
1784         .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC,
1785         .probe = eth_ixgbe_pci_probe,
1786         .remove = eth_ixgbe_pci_remove,
1787 };
1788
1789 static int eth_ixgbevf_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
1790         struct rte_pci_device *pci_dev)
1791 {
1792         return rte_eth_dev_pci_generic_probe(pci_dev,
1793                 sizeof(struct ixgbe_adapter), eth_ixgbevf_dev_init);
1794 }
1795
1796 static int eth_ixgbevf_pci_remove(struct rte_pci_device *pci_dev)
1797 {
1798         return rte_eth_dev_pci_generic_remove(pci_dev, eth_ixgbevf_dev_uninit);
1799 }
1800
1801 /*
1802  * virtual function driver struct
1803  */
1804 static struct rte_pci_driver rte_ixgbevf_pmd = {
1805         .id_table = pci_id_ixgbevf_map,
1806         .drv_flags = RTE_PCI_DRV_NEED_MAPPING,
1807         .probe = eth_ixgbevf_pci_probe,
1808         .remove = eth_ixgbevf_pci_remove,
1809 };
1810
1811 static int
1812 ixgbe_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
1813 {
1814         struct ixgbe_hw *hw =
1815                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1816         struct ixgbe_vfta *shadow_vfta =
1817                 IXGBE_DEV_PRIVATE_TO_VFTA(dev->data->dev_private);
1818         uint32_t vfta;
1819         uint32_t vid_idx;
1820         uint32_t vid_bit;
1821
1822         vid_idx = (uint32_t) ((vlan_id >> 5) & 0x7F);
1823         vid_bit = (uint32_t) (1 << (vlan_id & 0x1F));
1824         vfta = IXGBE_READ_REG(hw, IXGBE_VFTA(vid_idx));
1825         if (on)
1826                 vfta |= vid_bit;
1827         else
1828                 vfta &= ~vid_bit;
1829         IXGBE_WRITE_REG(hw, IXGBE_VFTA(vid_idx), vfta);
1830
1831         /* update local VFTA copy */
1832         shadow_vfta->vfta[vid_idx] = vfta;
1833
1834         return 0;
1835 }
1836
1837 static void
1838 ixgbe_vlan_strip_queue_set(struct rte_eth_dev *dev, uint16_t queue, int on)
1839 {
1840         if (on)
1841                 ixgbe_vlan_hw_strip_enable(dev, queue);
1842         else
1843                 ixgbe_vlan_hw_strip_disable(dev, queue);
1844 }
1845
1846 static int
1847 ixgbe_vlan_tpid_set(struct rte_eth_dev *dev,
1848                     enum rte_vlan_type vlan_type,
1849                     uint16_t tpid)
1850 {
1851         struct ixgbe_hw *hw =
1852                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1853         int ret = 0;
1854         uint32_t reg;
1855         uint32_t qinq;
1856
1857         qinq = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
1858         qinq &= IXGBE_DMATXCTL_GDV;
1859
1860         switch (vlan_type) {
1861         case ETH_VLAN_TYPE_INNER:
1862                 if (qinq) {
1863                         reg = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
1864                         reg = (reg & (~IXGBE_VLNCTRL_VET)) | (uint32_t)tpid;
1865                         IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, reg);
1866                         reg = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
1867                         reg = (reg & (~IXGBE_DMATXCTL_VT_MASK))
1868                                 | ((uint32_t)tpid << IXGBE_DMATXCTL_VT_SHIFT);
1869                         IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, reg);
1870                 } else {
1871                         ret = -ENOTSUP;
1872                         PMD_DRV_LOG(ERR, "Inner type is not supported"
1873                                     " by single VLAN");
1874                 }
1875                 break;
1876         case ETH_VLAN_TYPE_OUTER:
1877                 if (qinq) {
1878                         /* Only the high 16-bits is valid */
1879                         IXGBE_WRITE_REG(hw, IXGBE_EXVET, (uint32_t)tpid <<
1880                                         IXGBE_EXVET_VET_EXT_SHIFT);
1881                 } else {
1882                         reg = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
1883                         reg = (reg & (~IXGBE_VLNCTRL_VET)) | (uint32_t)tpid;
1884                         IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, reg);
1885                         reg = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
1886                         reg = (reg & (~IXGBE_DMATXCTL_VT_MASK))
1887                                 | ((uint32_t)tpid << IXGBE_DMATXCTL_VT_SHIFT);
1888                         IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, reg);
1889                 }
1890
1891                 break;
1892         default:
1893                 ret = -EINVAL;
1894                 PMD_DRV_LOG(ERR, "Unsupported VLAN type %d", vlan_type);
1895                 break;
1896         }
1897
1898         return ret;
1899 }
1900
1901 void
1902 ixgbe_vlan_hw_filter_disable(struct rte_eth_dev *dev)
1903 {
1904         struct ixgbe_hw *hw =
1905                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1906         uint32_t vlnctrl;
1907
1908         PMD_INIT_FUNC_TRACE();
1909
1910         /* Filter Table Disable */
1911         vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
1912         vlnctrl &= ~IXGBE_VLNCTRL_VFE;
1913
1914         IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
1915 }
1916
1917 void
1918 ixgbe_vlan_hw_filter_enable(struct rte_eth_dev *dev)
1919 {
1920         struct ixgbe_hw *hw =
1921                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1922         struct ixgbe_vfta *shadow_vfta =
1923                 IXGBE_DEV_PRIVATE_TO_VFTA(dev->data->dev_private);
1924         uint32_t vlnctrl;
1925         uint16_t i;
1926
1927         PMD_INIT_FUNC_TRACE();
1928
1929         /* Filter Table Enable */
1930         vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
1931         vlnctrl &= ~IXGBE_VLNCTRL_CFIEN;
1932         vlnctrl |= IXGBE_VLNCTRL_VFE;
1933
1934         IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
1935
1936         /* write whatever is in local vfta copy */
1937         for (i = 0; i < IXGBE_VFTA_SIZE; i++)
1938                 IXGBE_WRITE_REG(hw, IXGBE_VFTA(i), shadow_vfta->vfta[i]);
1939 }
1940
1941 static void
1942 ixgbe_vlan_hw_strip_bitmap_set(struct rte_eth_dev *dev, uint16_t queue, bool on)
1943 {
1944         struct ixgbe_hwstrip *hwstrip =
1945                 IXGBE_DEV_PRIVATE_TO_HWSTRIP_BITMAP(dev->data->dev_private);
1946         struct ixgbe_rx_queue *rxq;
1947
1948         if (queue >= IXGBE_MAX_RX_QUEUE_NUM)
1949                 return;
1950
1951         if (on)
1952                 IXGBE_SET_HWSTRIP(hwstrip, queue);
1953         else
1954                 IXGBE_CLEAR_HWSTRIP(hwstrip, queue);
1955
1956         if (queue >= dev->data->nb_rx_queues)
1957                 return;
1958
1959         rxq = dev->data->rx_queues[queue];
1960
1961         if (on)
1962                 rxq->vlan_flags = PKT_RX_VLAN_PKT | PKT_RX_VLAN_STRIPPED;
1963         else
1964                 rxq->vlan_flags = PKT_RX_VLAN_PKT;
1965 }
1966
1967 static void
1968 ixgbe_vlan_hw_strip_disable(struct rte_eth_dev *dev, uint16_t queue)
1969 {
1970         struct ixgbe_hw *hw =
1971                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1972         uint32_t ctrl;
1973
1974         PMD_INIT_FUNC_TRACE();
1975
1976         if (hw->mac.type == ixgbe_mac_82598EB) {
1977                 /* No queue level support */
1978                 PMD_INIT_LOG(NOTICE, "82598EB not support queue level hw strip");
1979                 return;
1980         }
1981
1982         /* Other 10G NIC, the VLAN strip can be setup per queue in RXDCTL */
1983         ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(queue));
1984         ctrl &= ~IXGBE_RXDCTL_VME;
1985         IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(queue), ctrl);
1986
1987         /* record those setting for HW strip per queue */
1988         ixgbe_vlan_hw_strip_bitmap_set(dev, queue, 0);
1989 }
1990
1991 static void
1992 ixgbe_vlan_hw_strip_enable(struct rte_eth_dev *dev, uint16_t queue)
1993 {
1994         struct ixgbe_hw *hw =
1995                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1996         uint32_t ctrl;
1997
1998         PMD_INIT_FUNC_TRACE();
1999
2000         if (hw->mac.type == ixgbe_mac_82598EB) {
2001                 /* No queue level supported */
2002                 PMD_INIT_LOG(NOTICE, "82598EB not support queue level hw strip");
2003                 return;
2004         }
2005
2006         /* Other 10G NIC, the VLAN strip can be setup per queue in RXDCTL */
2007         ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(queue));
2008         ctrl |= IXGBE_RXDCTL_VME;
2009         IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(queue), ctrl);
2010
2011         /* record those setting for HW strip per queue */
2012         ixgbe_vlan_hw_strip_bitmap_set(dev, queue, 1);
2013 }
2014
2015 void
2016 ixgbe_vlan_hw_strip_disable_all(struct rte_eth_dev *dev)
2017 {
2018         struct ixgbe_hw *hw =
2019                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2020         uint32_t ctrl;
2021         uint16_t i;
2022         struct ixgbe_rx_queue *rxq;
2023
2024         PMD_INIT_FUNC_TRACE();
2025
2026         if (hw->mac.type == ixgbe_mac_82598EB) {
2027                 ctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
2028                 ctrl &= ~IXGBE_VLNCTRL_VME;
2029                 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, ctrl);
2030         } else {
2031                 /* Other 10G NIC, the VLAN strip can be setup per queue in RXDCTL */
2032                 for (i = 0; i < dev->data->nb_rx_queues; i++) {
2033                         rxq = dev->data->rx_queues[i];
2034                         ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxq->reg_idx));
2035                         ctrl &= ~IXGBE_RXDCTL_VME;
2036                         IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxq->reg_idx), ctrl);
2037
2038                         /* record those setting for HW strip per queue */
2039                         ixgbe_vlan_hw_strip_bitmap_set(dev, i, 0);
2040                 }
2041         }
2042 }
2043
2044 void
2045 ixgbe_vlan_hw_strip_enable_all(struct rte_eth_dev *dev)
2046 {
2047         struct ixgbe_hw *hw =
2048                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2049         uint32_t ctrl;
2050         uint16_t i;
2051         struct ixgbe_rx_queue *rxq;
2052
2053         PMD_INIT_FUNC_TRACE();
2054
2055         if (hw->mac.type == ixgbe_mac_82598EB) {
2056                 ctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
2057                 ctrl |= IXGBE_VLNCTRL_VME;
2058                 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, ctrl);
2059         } else {
2060                 /* Other 10G NIC, the VLAN strip can be setup per queue in RXDCTL */
2061                 for (i = 0; i < dev->data->nb_rx_queues; i++) {
2062                         rxq = dev->data->rx_queues[i];
2063                         ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxq->reg_idx));
2064                         ctrl |= IXGBE_RXDCTL_VME;
2065                         IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxq->reg_idx), ctrl);
2066
2067                         /* record those setting for HW strip per queue */
2068                         ixgbe_vlan_hw_strip_bitmap_set(dev, i, 1);
2069                 }
2070         }
2071 }
2072
2073 static void
2074 ixgbe_vlan_hw_extend_disable(struct rte_eth_dev *dev)
2075 {
2076         struct ixgbe_hw *hw =
2077                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2078         uint32_t ctrl;
2079
2080         PMD_INIT_FUNC_TRACE();
2081
2082         /* DMATXCTRL: Geric Double VLAN Disable */
2083         ctrl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
2084         ctrl &= ~IXGBE_DMATXCTL_GDV;
2085         IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, ctrl);
2086
2087         /* CTRL_EXT: Global Double VLAN Disable */
2088         ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
2089         ctrl &= ~IXGBE_EXTENDED_VLAN;
2090         IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl);
2091
2092 }
2093
2094 static void
2095 ixgbe_vlan_hw_extend_enable(struct rte_eth_dev *dev)
2096 {
2097         struct ixgbe_hw *hw =
2098                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2099         uint32_t ctrl;
2100
2101         PMD_INIT_FUNC_TRACE();
2102
2103         /* DMATXCTRL: Geric Double VLAN Enable */
2104         ctrl  = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
2105         ctrl |= IXGBE_DMATXCTL_GDV;
2106         IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, ctrl);
2107
2108         /* CTRL_EXT: Global Double VLAN Enable */
2109         ctrl  = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
2110         ctrl |= IXGBE_EXTENDED_VLAN;
2111         IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl);
2112
2113         /* Clear pooling mode of PFVTCTL. It's required by X550. */
2114         if (hw->mac.type == ixgbe_mac_X550 ||
2115             hw->mac.type == ixgbe_mac_X550EM_x ||
2116             hw->mac.type == ixgbe_mac_X550EM_a) {
2117                 ctrl = IXGBE_READ_REG(hw, IXGBE_VT_CTL);
2118                 ctrl &= ~IXGBE_VT_CTL_POOLING_MODE_MASK;
2119                 IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, ctrl);
2120         }
2121
2122         /*
2123          * VET EXT field in the EXVET register = 0x8100 by default
2124          * So no need to change. Same to VT field of DMATXCTL register
2125          */
2126 }
2127
2128 static void
2129 ixgbe_vlan_offload_set(struct rte_eth_dev *dev, int mask)
2130 {
2131         if (mask & ETH_VLAN_STRIP_MASK) {
2132                 if (dev->data->dev_conf.rxmode.hw_vlan_strip)
2133                         ixgbe_vlan_hw_strip_enable_all(dev);
2134                 else
2135                         ixgbe_vlan_hw_strip_disable_all(dev);
2136         }
2137
2138         if (mask & ETH_VLAN_FILTER_MASK) {
2139                 if (dev->data->dev_conf.rxmode.hw_vlan_filter)
2140                         ixgbe_vlan_hw_filter_enable(dev);
2141                 else
2142                         ixgbe_vlan_hw_filter_disable(dev);
2143         }
2144
2145         if (mask & ETH_VLAN_EXTEND_MASK) {
2146                 if (dev->data->dev_conf.rxmode.hw_vlan_extend)
2147                         ixgbe_vlan_hw_extend_enable(dev);
2148                 else
2149                         ixgbe_vlan_hw_extend_disable(dev);
2150         }
2151 }
2152
2153 static void
2154 ixgbe_vmdq_vlan_hw_filter_enable(struct rte_eth_dev *dev)
2155 {
2156         struct ixgbe_hw *hw =
2157                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2158         /* VLNCTRL: enable vlan filtering and allow all vlan tags through */
2159         uint32_t vlanctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
2160
2161         vlanctrl |= IXGBE_VLNCTRL_VFE; /* enable vlan filters */
2162         IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlanctrl);
2163 }
2164
2165 static int
2166 ixgbe_check_vf_rss_rxq_num(struct rte_eth_dev *dev, uint16_t nb_rx_q)
2167 {
2168         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
2169
2170         switch (nb_rx_q) {
2171         case 1:
2172         case 2:
2173                 RTE_ETH_DEV_SRIOV(dev).active = ETH_64_POOLS;
2174                 break;
2175         case 4:
2176                 RTE_ETH_DEV_SRIOV(dev).active = ETH_32_POOLS;
2177                 break;
2178         default:
2179                 return -EINVAL;
2180         }
2181
2182         RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool = nb_rx_q;
2183         RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx = pci_dev->max_vfs * nb_rx_q;
2184
2185         return 0;
2186 }
2187
2188 static int
2189 ixgbe_check_mq_mode(struct rte_eth_dev *dev)
2190 {
2191         struct rte_eth_conf *dev_conf = &dev->data->dev_conf;
2192         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2193         uint16_t nb_rx_q = dev->data->nb_rx_queues;
2194         uint16_t nb_tx_q = dev->data->nb_tx_queues;
2195
2196         if (RTE_ETH_DEV_SRIOV(dev).active != 0) {
2197                 /* check multi-queue mode */
2198                 switch (dev_conf->rxmode.mq_mode) {
2199                 case ETH_MQ_RX_VMDQ_DCB:
2200                         PMD_INIT_LOG(INFO, "ETH_MQ_RX_VMDQ_DCB mode supported in SRIOV");
2201                         break;
2202                 case ETH_MQ_RX_VMDQ_DCB_RSS:
2203                         /* DCB/RSS VMDQ in SRIOV mode, not implement yet */
2204                         PMD_INIT_LOG(ERR, "SRIOV active,"
2205                                         " unsupported mq_mode rx %d.",
2206                                         dev_conf->rxmode.mq_mode);
2207                         return -EINVAL;
2208                 case ETH_MQ_RX_RSS:
2209                 case ETH_MQ_RX_VMDQ_RSS:
2210                         dev->data->dev_conf.rxmode.mq_mode = ETH_MQ_RX_VMDQ_RSS;
2211                         if (nb_rx_q <= RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool)
2212                                 if (ixgbe_check_vf_rss_rxq_num(dev, nb_rx_q)) {
2213                                         PMD_INIT_LOG(ERR, "SRIOV is active,"
2214                                                 " invalid queue number"
2215                                                 " for VMDQ RSS, allowed"
2216                                                 " value are 1, 2 or 4.");
2217                                         return -EINVAL;
2218                                 }
2219                         break;
2220                 case ETH_MQ_RX_VMDQ_ONLY:
2221                 case ETH_MQ_RX_NONE:
2222                         /* if nothing mq mode configure, use default scheme */
2223                         dev->data->dev_conf.rxmode.mq_mode = ETH_MQ_RX_VMDQ_ONLY;
2224                         if (RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool > 1)
2225                                 RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool = 1;
2226                         break;
2227                 default: /* ETH_MQ_RX_DCB, ETH_MQ_RX_DCB_RSS or ETH_MQ_TX_DCB*/
2228                         /* SRIOV only works in VMDq enable mode */
2229                         PMD_INIT_LOG(ERR, "SRIOV is active,"
2230                                         " wrong mq_mode rx %d.",
2231                                         dev_conf->rxmode.mq_mode);
2232                         return -EINVAL;
2233                 }
2234
2235                 switch (dev_conf->txmode.mq_mode) {
2236                 case ETH_MQ_TX_VMDQ_DCB:
2237                         PMD_INIT_LOG(INFO, "ETH_MQ_TX_VMDQ_DCB mode supported in SRIOV");
2238                         dev->data->dev_conf.txmode.mq_mode = ETH_MQ_TX_VMDQ_DCB;
2239                         break;
2240                 default: /* ETH_MQ_TX_VMDQ_ONLY or ETH_MQ_TX_NONE */
2241                         dev->data->dev_conf.txmode.mq_mode = ETH_MQ_TX_VMDQ_ONLY;
2242                         break;
2243                 }
2244
2245                 /* check valid queue number */
2246                 if ((nb_rx_q > RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool) ||
2247                     (nb_tx_q > RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool)) {
2248                         PMD_INIT_LOG(ERR, "SRIOV is active,"
2249                                         " nb_rx_q=%d nb_tx_q=%d queue number"
2250                                         " must be less than or equal to %d.",
2251                                         nb_rx_q, nb_tx_q,
2252                                         RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool);
2253                         return -EINVAL;
2254                 }
2255         } else {
2256                 if (dev_conf->rxmode.mq_mode == ETH_MQ_RX_VMDQ_DCB_RSS) {
2257                         PMD_INIT_LOG(ERR, "VMDQ+DCB+RSS mq_mode is"
2258                                           " not supported.");
2259                         return -EINVAL;
2260                 }
2261                 /* check configuration for vmdb+dcb mode */
2262                 if (dev_conf->rxmode.mq_mode == ETH_MQ_RX_VMDQ_DCB) {
2263                         const struct rte_eth_vmdq_dcb_conf *conf;
2264
2265                         if (nb_rx_q != IXGBE_VMDQ_DCB_NB_QUEUES) {
2266                                 PMD_INIT_LOG(ERR, "VMDQ+DCB, nb_rx_q != %d.",
2267                                                 IXGBE_VMDQ_DCB_NB_QUEUES);
2268                                 return -EINVAL;
2269                         }
2270                         conf = &dev_conf->rx_adv_conf.vmdq_dcb_conf;
2271                         if (!(conf->nb_queue_pools == ETH_16_POOLS ||
2272                                conf->nb_queue_pools == ETH_32_POOLS)) {
2273                                 PMD_INIT_LOG(ERR, "VMDQ+DCB selected,"
2274                                                 " nb_queue_pools must be %d or %d.",
2275                                                 ETH_16_POOLS, ETH_32_POOLS);
2276                                 return -EINVAL;
2277                         }
2278                 }
2279                 if (dev_conf->txmode.mq_mode == ETH_MQ_TX_VMDQ_DCB) {
2280                         const struct rte_eth_vmdq_dcb_tx_conf *conf;
2281
2282                         if (nb_tx_q != IXGBE_VMDQ_DCB_NB_QUEUES) {
2283                                 PMD_INIT_LOG(ERR, "VMDQ+DCB, nb_tx_q != %d",
2284                                                  IXGBE_VMDQ_DCB_NB_QUEUES);
2285                                 return -EINVAL;
2286                         }
2287                         conf = &dev_conf->tx_adv_conf.vmdq_dcb_tx_conf;
2288                         if (!(conf->nb_queue_pools == ETH_16_POOLS ||
2289                                conf->nb_queue_pools == ETH_32_POOLS)) {
2290                                 PMD_INIT_LOG(ERR, "VMDQ+DCB selected,"
2291                                                 " nb_queue_pools != %d and"
2292                                                 " nb_queue_pools != %d.",
2293                                                 ETH_16_POOLS, ETH_32_POOLS);
2294                                 return -EINVAL;
2295                         }
2296                 }
2297
2298                 /* For DCB mode check our configuration before we go further */
2299                 if (dev_conf->rxmode.mq_mode == ETH_MQ_RX_DCB) {
2300                         const struct rte_eth_dcb_rx_conf *conf;
2301
2302                         if (nb_rx_q != IXGBE_DCB_NB_QUEUES) {
2303                                 PMD_INIT_LOG(ERR, "DCB selected, nb_rx_q != %d.",
2304                                                  IXGBE_DCB_NB_QUEUES);
2305                                 return -EINVAL;
2306                         }
2307                         conf = &dev_conf->rx_adv_conf.dcb_rx_conf;
2308                         if (!(conf->nb_tcs == ETH_4_TCS ||
2309                                conf->nb_tcs == ETH_8_TCS)) {
2310                                 PMD_INIT_LOG(ERR, "DCB selected, nb_tcs != %d"
2311                                                 " and nb_tcs != %d.",
2312                                                 ETH_4_TCS, ETH_8_TCS);
2313                                 return -EINVAL;
2314                         }
2315                 }
2316
2317                 if (dev_conf->txmode.mq_mode == ETH_MQ_TX_DCB) {
2318                         const struct rte_eth_dcb_tx_conf *conf;
2319
2320                         if (nb_tx_q != IXGBE_DCB_NB_QUEUES) {
2321                                 PMD_INIT_LOG(ERR, "DCB, nb_tx_q != %d.",
2322                                                  IXGBE_DCB_NB_QUEUES);
2323                                 return -EINVAL;
2324                         }
2325                         conf = &dev_conf->tx_adv_conf.dcb_tx_conf;
2326                         if (!(conf->nb_tcs == ETH_4_TCS ||
2327                                conf->nb_tcs == ETH_8_TCS)) {
2328                                 PMD_INIT_LOG(ERR, "DCB selected, nb_tcs != %d"
2329                                                 " and nb_tcs != %d.",
2330                                                 ETH_4_TCS, ETH_8_TCS);
2331                                 return -EINVAL;
2332                         }
2333                 }
2334
2335                 /*
2336                  * When DCB/VT is off, maximum number of queues changes,
2337                  * except for 82598EB, which remains constant.
2338                  */
2339                 if (dev_conf->txmode.mq_mode == ETH_MQ_TX_NONE &&
2340                                 hw->mac.type != ixgbe_mac_82598EB) {
2341                         if (nb_tx_q > IXGBE_NONE_MODE_TX_NB_QUEUES) {
2342                                 PMD_INIT_LOG(ERR,
2343                                              "Neither VT nor DCB are enabled, "
2344                                              "nb_tx_q > %d.",
2345                                              IXGBE_NONE_MODE_TX_NB_QUEUES);
2346                                 return -EINVAL;
2347                         }
2348                 }
2349         }
2350         return 0;
2351 }
2352
2353 static int
2354 ixgbe_dev_configure(struct rte_eth_dev *dev)
2355 {
2356         struct ixgbe_interrupt *intr =
2357                 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
2358         struct ixgbe_adapter *adapter =
2359                 (struct ixgbe_adapter *)dev->data->dev_private;
2360         int ret;
2361
2362         PMD_INIT_FUNC_TRACE();
2363         /* multipe queue mode checking */
2364         ret  = ixgbe_check_mq_mode(dev);
2365         if (ret != 0) {
2366                 PMD_DRV_LOG(ERR, "ixgbe_check_mq_mode fails with %d.",
2367                             ret);
2368                 return ret;
2369         }
2370
2371         /* set flag to update link status after init */
2372         intr->flags |= IXGBE_FLAG_NEED_LINK_UPDATE;
2373
2374         /*
2375          * Initialize to TRUE. If any of Rx queues doesn't meet the bulk
2376          * allocation or vector Rx preconditions we will reset it.
2377          */
2378         adapter->rx_bulk_alloc_allowed = true;
2379         adapter->rx_vec_allowed = true;
2380
2381         return 0;
2382 }
2383
2384 static void
2385 ixgbe_dev_phy_intr_setup(struct rte_eth_dev *dev)
2386 {
2387         struct ixgbe_hw *hw =
2388                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2389         struct ixgbe_interrupt *intr =
2390                 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
2391         uint32_t gpie;
2392
2393         /* only set up it on X550EM_X */
2394         if (hw->mac.type == ixgbe_mac_X550EM_x) {
2395                 gpie = IXGBE_READ_REG(hw, IXGBE_GPIE);
2396                 gpie |= IXGBE_SDP0_GPIEN_X550EM_x;
2397                 IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
2398                 if (hw->phy.type == ixgbe_phy_x550em_ext_t)
2399                         intr->mask |= IXGBE_EICR_GPI_SDP0_X550EM_x;
2400         }
2401 }
2402
2403 int
2404 ixgbe_set_vf_rate_limit(struct rte_eth_dev *dev, uint16_t vf,
2405                         uint16_t tx_rate, uint64_t q_msk)
2406 {
2407         struct ixgbe_hw *hw;
2408         struct ixgbe_vf_info *vfinfo;
2409         struct rte_eth_link link;
2410         uint8_t  nb_q_per_pool;
2411         uint32_t queue_stride;
2412         uint32_t queue_idx, idx = 0, vf_idx;
2413         uint32_t queue_end;
2414         uint16_t total_rate = 0;
2415         struct rte_pci_device *pci_dev;
2416
2417         pci_dev = RTE_ETH_DEV_TO_PCI(dev);
2418         rte_eth_link_get_nowait(dev->data->port_id, &link);
2419
2420         if (vf >= pci_dev->max_vfs)
2421                 return -EINVAL;
2422
2423         if (tx_rate > link.link_speed)
2424                 return -EINVAL;
2425
2426         if (q_msk == 0)
2427                 return 0;
2428
2429         hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2430         vfinfo = *(IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private));
2431         nb_q_per_pool = RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool;
2432         queue_stride = IXGBE_MAX_RX_QUEUE_NUM / RTE_ETH_DEV_SRIOV(dev).active;
2433         queue_idx = vf * queue_stride;
2434         queue_end = queue_idx + nb_q_per_pool - 1;
2435         if (queue_end >= hw->mac.max_tx_queues)
2436                 return -EINVAL;
2437
2438         if (vfinfo) {
2439                 for (vf_idx = 0; vf_idx < pci_dev->max_vfs; vf_idx++) {
2440                         if (vf_idx == vf)
2441                                 continue;
2442                         for (idx = 0; idx < RTE_DIM(vfinfo[vf_idx].tx_rate);
2443                                 idx++)
2444                                 total_rate += vfinfo[vf_idx].tx_rate[idx];
2445                 }
2446         } else {
2447                 return -EINVAL;
2448         }
2449
2450         /* Store tx_rate for this vf. */
2451         for (idx = 0; idx < nb_q_per_pool; idx++) {
2452                 if (((uint64_t)0x1 << idx) & q_msk) {
2453                         if (vfinfo[vf].tx_rate[idx] != tx_rate)
2454                                 vfinfo[vf].tx_rate[idx] = tx_rate;
2455                         total_rate += tx_rate;
2456                 }
2457         }
2458
2459         if (total_rate > dev->data->dev_link.link_speed) {
2460                 /* Reset stored TX rate of the VF if it causes exceed
2461                  * link speed.
2462                  */
2463                 memset(vfinfo[vf].tx_rate, 0, sizeof(vfinfo[vf].tx_rate));
2464                 return -EINVAL;
2465         }
2466
2467         /* Set RTTBCNRC of each queue/pool for vf X  */
2468         for (; queue_idx <= queue_end; queue_idx++) {
2469                 if (0x1 & q_msk)
2470                         ixgbe_set_queue_rate_limit(dev, queue_idx, tx_rate);
2471                 q_msk = q_msk >> 1;
2472         }
2473
2474         return 0;
2475 }
2476
2477 /*
2478  * Configure device link speed and setup link.
2479  * It returns 0 on success.
2480  */
2481 static int
2482 ixgbe_dev_start(struct rte_eth_dev *dev)
2483 {
2484         struct ixgbe_hw *hw =
2485                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2486         struct ixgbe_vf_info *vfinfo =
2487                 *IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private);
2488         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
2489         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
2490         uint32_t intr_vector = 0;
2491         int err, link_up = 0, negotiate = 0;
2492         uint32_t speed = 0;
2493         int mask = 0;
2494         int status;
2495         uint16_t vf, idx;
2496         uint32_t *link_speeds;
2497         struct ixgbe_tm_conf *tm_conf =
2498                 IXGBE_DEV_PRIVATE_TO_TM_CONF(dev->data->dev_private);
2499
2500         PMD_INIT_FUNC_TRACE();
2501
2502         /* IXGBE devices don't support:
2503         *    - half duplex (checked afterwards for valid speeds)
2504         *    - fixed speed: TODO implement
2505         */
2506         if (dev->data->dev_conf.link_speeds & ETH_LINK_SPEED_FIXED) {
2507                 PMD_INIT_LOG(ERR,
2508                 "Invalid link_speeds for port %u, fix speed not supported",
2509                                 dev->data->port_id);
2510                 return -EINVAL;
2511         }
2512
2513         /* disable uio/vfio intr/eventfd mapping */
2514         rte_intr_disable(intr_handle);
2515
2516         /* stop adapter */
2517         hw->adapter_stopped = 0;
2518         ixgbe_stop_adapter(hw);
2519
2520         /* reinitialize adapter
2521          * this calls reset and start
2522          */
2523         status = ixgbe_pf_reset_hw(hw);
2524         if (status != 0)
2525                 return -1;
2526         hw->mac.ops.start_hw(hw);
2527         hw->mac.get_link_status = true;
2528
2529         /* configure PF module if SRIOV enabled */
2530         ixgbe_pf_host_configure(dev);
2531
2532         ixgbe_dev_phy_intr_setup(dev);
2533
2534         /* check and configure queue intr-vector mapping */
2535         if ((rte_intr_cap_multiple(intr_handle) ||
2536              !RTE_ETH_DEV_SRIOV(dev).active) &&
2537             dev->data->dev_conf.intr_conf.rxq != 0) {
2538                 intr_vector = dev->data->nb_rx_queues;
2539                 if (intr_vector > IXGBE_MAX_INTR_QUEUE_NUM) {
2540                         PMD_INIT_LOG(ERR, "At most %d intr queues supported",
2541                                         IXGBE_MAX_INTR_QUEUE_NUM);
2542                         return -ENOTSUP;
2543                 }
2544                 if (rte_intr_efd_enable(intr_handle, intr_vector))
2545                         return -1;
2546         }
2547
2548         if (rte_intr_dp_is_en(intr_handle) && !intr_handle->intr_vec) {
2549                 intr_handle->intr_vec =
2550                         rte_zmalloc("intr_vec",
2551                                     dev->data->nb_rx_queues * sizeof(int), 0);
2552                 if (intr_handle->intr_vec == NULL) {
2553                         PMD_INIT_LOG(ERR, "Failed to allocate %d rx_queues"
2554                                      " intr_vec", dev->data->nb_rx_queues);
2555                         return -ENOMEM;
2556                 }
2557         }
2558
2559         /* confiugre msix for sleep until rx interrupt */
2560         ixgbe_configure_msix(dev);
2561
2562         /* initialize transmission unit */
2563         ixgbe_dev_tx_init(dev);
2564
2565         /* This can fail when allocating mbufs for descriptor rings */
2566         err = ixgbe_dev_rx_init(dev);
2567         if (err) {
2568                 PMD_INIT_LOG(ERR, "Unable to initialize RX hardware");
2569                 goto error;
2570         }
2571
2572     mask = ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK |
2573                 ETH_VLAN_EXTEND_MASK;
2574         ixgbe_vlan_offload_set(dev, mask);
2575
2576         if (dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_VMDQ_ONLY) {
2577                 /* Enable vlan filtering for VMDq */
2578                 ixgbe_vmdq_vlan_hw_filter_enable(dev);
2579         }
2580
2581         /* Configure DCB hw */
2582         ixgbe_configure_dcb(dev);
2583
2584         if (dev->data->dev_conf.fdir_conf.mode != RTE_FDIR_MODE_NONE) {
2585                 err = ixgbe_fdir_configure(dev);
2586                 if (err)
2587                         goto error;
2588         }
2589
2590         /* Restore vf rate limit */
2591         if (vfinfo != NULL) {
2592                 for (vf = 0; vf < pci_dev->max_vfs; vf++)
2593                         for (idx = 0; idx < IXGBE_MAX_QUEUE_NUM_PER_VF; idx++)
2594                                 if (vfinfo[vf].tx_rate[idx] != 0)
2595                                         ixgbe_set_vf_rate_limit(
2596                                                 dev, vf,
2597                                                 vfinfo[vf].tx_rate[idx],
2598                                                 1 << idx);
2599         }
2600
2601         ixgbe_restore_statistics_mapping(dev);
2602
2603         err = ixgbe_dev_rxtx_start(dev);
2604         if (err < 0) {
2605                 PMD_INIT_LOG(ERR, "Unable to start rxtx queues");
2606                 goto error;
2607         }
2608
2609         /* Skip link setup if loopback mode is enabled for 82599. */
2610         if (hw->mac.type == ixgbe_mac_82599EB &&
2611                         dev->data->dev_conf.lpbk_mode == IXGBE_LPBK_82599_TX_RX)
2612                 goto skip_link_setup;
2613
2614         if (ixgbe_is_sfp(hw) && hw->phy.multispeed_fiber) {
2615                 err = hw->mac.ops.setup_sfp(hw);
2616                 if (err)
2617                         goto error;
2618         }
2619
2620         if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_copper) {
2621                 /* Turn on the copper */
2622                 ixgbe_set_phy_power(hw, true);
2623         } else {
2624                 /* Turn on the laser */
2625                 ixgbe_enable_tx_laser(hw);
2626         }
2627
2628         err = ixgbe_check_link(hw, &speed, &link_up, 0);
2629         if (err)
2630                 goto error;
2631         dev->data->dev_link.link_status = link_up;
2632
2633         err = ixgbe_get_link_capabilities(hw, &speed, &negotiate);
2634         if (err)
2635                 goto error;
2636
2637         link_speeds = &dev->data->dev_conf.link_speeds;
2638         if (*link_speeds & ~(ETH_LINK_SPEED_100M | ETH_LINK_SPEED_1G |
2639                         ETH_LINK_SPEED_10G)) {
2640                 PMD_INIT_LOG(ERR, "Invalid link setting");
2641                 goto error;
2642         }
2643
2644         speed = 0x0;
2645         if (*link_speeds == ETH_LINK_SPEED_AUTONEG) {
2646                 switch (hw->mac.type) {
2647                 case ixgbe_mac_82598EB:
2648                         speed = IXGBE_LINK_SPEED_82598_AUTONEG;
2649                         break;
2650                 case ixgbe_mac_82599EB:
2651                 case ixgbe_mac_X540:
2652                         speed = IXGBE_LINK_SPEED_82599_AUTONEG;
2653                         break;
2654                 case ixgbe_mac_X550:
2655                 case ixgbe_mac_X550EM_x:
2656                 case ixgbe_mac_X550EM_a:
2657                         speed = IXGBE_LINK_SPEED_X550_AUTONEG;
2658                         break;
2659                 default:
2660                         speed = IXGBE_LINK_SPEED_82599_AUTONEG;
2661                 }
2662         } else {
2663                 if (*link_speeds & ETH_LINK_SPEED_10G)
2664                         speed |= IXGBE_LINK_SPEED_10GB_FULL;
2665                 if (*link_speeds & ETH_LINK_SPEED_1G)
2666                         speed |= IXGBE_LINK_SPEED_1GB_FULL;
2667                 if (*link_speeds & ETH_LINK_SPEED_100M)
2668                         speed |= IXGBE_LINK_SPEED_100_FULL;
2669         }
2670
2671         err = ixgbe_setup_link(hw, speed, link_up);
2672         if (err)
2673                 goto error;
2674
2675 skip_link_setup:
2676
2677         if (rte_intr_allow_others(intr_handle)) {
2678                 /* check if lsc interrupt is enabled */
2679                 if (dev->data->dev_conf.intr_conf.lsc != 0)
2680                         ixgbe_dev_lsc_interrupt_setup(dev, TRUE);
2681                 else
2682                         ixgbe_dev_lsc_interrupt_setup(dev, FALSE);
2683                 ixgbe_dev_macsec_interrupt_setup(dev);
2684         } else {
2685                 rte_intr_callback_unregister(intr_handle,
2686                                              ixgbe_dev_interrupt_handler, dev);
2687                 if (dev->data->dev_conf.intr_conf.lsc != 0)
2688                         PMD_INIT_LOG(INFO, "lsc won't enable because of"
2689                                      " no intr multiplex");
2690         }
2691
2692         /* check if rxq interrupt is enabled */
2693         if (dev->data->dev_conf.intr_conf.rxq != 0 &&
2694             rte_intr_dp_is_en(intr_handle))
2695                 ixgbe_dev_rxq_interrupt_setup(dev);
2696
2697         /* enable uio/vfio intr/eventfd mapping */
2698         rte_intr_enable(intr_handle);
2699
2700         /* resume enabled intr since hw reset */
2701         ixgbe_enable_intr(dev);
2702         ixgbe_l2_tunnel_conf(dev);
2703         ixgbe_filter_restore(dev);
2704
2705         if (tm_conf->root && !tm_conf->committed)
2706                 PMD_DRV_LOG(WARNING,
2707                             "please call hierarchy_commit() "
2708                             "before starting the port");
2709
2710         return 0;
2711
2712 error:
2713         PMD_INIT_LOG(ERR, "failure in ixgbe_dev_start(): %d", err);
2714         ixgbe_dev_clear_queues(dev);
2715         return -EIO;
2716 }
2717
2718 /*
2719  * Stop device: disable rx and tx functions to allow for reconfiguring.
2720  */
2721 static void
2722 ixgbe_dev_stop(struct rte_eth_dev *dev)
2723 {
2724         struct rte_eth_link link;
2725         struct ixgbe_hw *hw =
2726                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2727         struct ixgbe_vf_info *vfinfo =
2728                 *IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private);
2729         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
2730         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
2731         int vf;
2732         struct ixgbe_tm_conf *tm_conf =
2733                 IXGBE_DEV_PRIVATE_TO_TM_CONF(dev->data->dev_private);
2734
2735         PMD_INIT_FUNC_TRACE();
2736
2737         /* disable interrupts */
2738         ixgbe_disable_intr(hw);
2739
2740         /* reset the NIC */
2741         ixgbe_pf_reset_hw(hw);
2742         hw->adapter_stopped = 0;
2743
2744         /* stop adapter */
2745         ixgbe_stop_adapter(hw);
2746
2747         for (vf = 0; vfinfo != NULL && vf < pci_dev->max_vfs; vf++)
2748                 vfinfo[vf].clear_to_send = false;
2749
2750         if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_copper) {
2751                 /* Turn off the copper */
2752                 ixgbe_set_phy_power(hw, false);
2753         } else {
2754                 /* Turn off the laser */
2755                 ixgbe_disable_tx_laser(hw);
2756         }
2757
2758         ixgbe_dev_clear_queues(dev);
2759
2760         /* Clear stored conf */
2761         dev->data->scattered_rx = 0;
2762         dev->data->lro = 0;
2763
2764         /* Clear recorded link status */
2765         memset(&link, 0, sizeof(link));
2766         rte_ixgbe_dev_atomic_write_link_status(dev, &link);
2767
2768         if (!rte_intr_allow_others(intr_handle))
2769                 /* resume to the default handler */
2770                 rte_intr_callback_register(intr_handle,
2771                                            ixgbe_dev_interrupt_handler,
2772                                            (void *)dev);
2773
2774         /* Clean datapath event and queue/vec mapping */
2775         rte_intr_efd_disable(intr_handle);
2776         if (intr_handle->intr_vec != NULL) {
2777                 rte_free(intr_handle->intr_vec);
2778                 intr_handle->intr_vec = NULL;
2779         }
2780
2781         /* reset hierarchy commit */
2782         tm_conf->committed = false;
2783 }
2784
2785 /*
2786  * Set device link up: enable tx.
2787  */
2788 static int
2789 ixgbe_dev_set_link_up(struct rte_eth_dev *dev)
2790 {
2791         struct ixgbe_hw *hw =
2792                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2793         if (hw->mac.type == ixgbe_mac_82599EB) {
2794 #ifdef RTE_LIBRTE_IXGBE_BYPASS
2795                 if (hw->device_id == IXGBE_DEV_ID_82599_BYPASS) {
2796                         /* Not suported in bypass mode */
2797                         PMD_INIT_LOG(ERR, "Set link up is not supported "
2798                                      "by device id 0x%x", hw->device_id);
2799                         return -ENOTSUP;
2800                 }
2801 #endif
2802         }
2803
2804         if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_copper) {
2805                 /* Turn on the copper */
2806                 ixgbe_set_phy_power(hw, true);
2807         } else {
2808                 /* Turn on the laser */
2809                 ixgbe_enable_tx_laser(hw);
2810         }
2811
2812         return 0;
2813 }
2814
2815 /*
2816  * Set device link down: disable tx.
2817  */
2818 static int
2819 ixgbe_dev_set_link_down(struct rte_eth_dev *dev)
2820 {
2821         struct ixgbe_hw *hw =
2822                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2823         if (hw->mac.type == ixgbe_mac_82599EB) {
2824 #ifdef RTE_LIBRTE_IXGBE_BYPASS
2825                 if (hw->device_id == IXGBE_DEV_ID_82599_BYPASS) {
2826                         /* Not suported in bypass mode */
2827                         PMD_INIT_LOG(ERR, "Set link down is not supported "
2828                                      "by device id 0x%x", hw->device_id);
2829                         return -ENOTSUP;
2830                 }
2831 #endif
2832         }
2833
2834         if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_copper) {
2835                 /* Turn off the copper */
2836                 ixgbe_set_phy_power(hw, false);
2837         } else {
2838                 /* Turn off the laser */
2839                 ixgbe_disable_tx_laser(hw);
2840         }
2841
2842         return 0;
2843 }
2844
2845 /*
2846  * Reset and stop device.
2847  */
2848 static void
2849 ixgbe_dev_close(struct rte_eth_dev *dev)
2850 {
2851         struct ixgbe_hw *hw =
2852                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2853
2854         PMD_INIT_FUNC_TRACE();
2855
2856         ixgbe_pf_reset_hw(hw);
2857
2858         ixgbe_dev_stop(dev);
2859         hw->adapter_stopped = 1;
2860
2861         ixgbe_dev_free_queues(dev);
2862
2863         ixgbe_disable_pcie_master(hw);
2864
2865         /* reprogram the RAR[0] in case user changed it. */
2866         ixgbe_set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
2867 }
2868
2869 /*
2870  * Reset PF device.
2871  */
2872 static int
2873 ixgbe_dev_reset(struct rte_eth_dev *dev)
2874 {
2875         int ret;
2876
2877         /* When a DPDK PMD PF begin to reset PF port, it should notify all
2878          * its VF to make them align with it. The detailed notification
2879          * mechanism is PMD specific. As to ixgbe PF, it is rather complex.
2880          * To avoid unexpected behavior in VF, currently reset of PF with
2881          * SR-IOV activation is not supported. It might be supported later.
2882          */
2883         if (dev->data->sriov.active)
2884                 return -ENOTSUP;
2885
2886         ret = eth_ixgbe_dev_uninit(dev);
2887         if (ret)
2888                 return ret;
2889
2890         ret = eth_ixgbe_dev_init(dev);
2891
2892         return ret;
2893 }
2894
2895 static void
2896 ixgbe_read_stats_registers(struct ixgbe_hw *hw,
2897                            struct ixgbe_hw_stats *hw_stats,
2898                            struct ixgbe_macsec_stats *macsec_stats,
2899                            uint64_t *total_missed_rx, uint64_t *total_qbrc,
2900                            uint64_t *total_qprc, uint64_t *total_qprdc)
2901 {
2902         uint32_t bprc, lxon, lxoff, total;
2903         uint32_t delta_gprc = 0;
2904         unsigned i;
2905         /* Workaround for RX byte count not including CRC bytes when CRC
2906          * strip is enabled. CRC bytes are removed from counters when crc_strip
2907          * is disabled.
2908          */
2909         int crc_strip = (IXGBE_READ_REG(hw, IXGBE_HLREG0) &
2910                         IXGBE_HLREG0_RXCRCSTRP);
2911
2912         hw_stats->crcerrs += IXGBE_READ_REG(hw, IXGBE_CRCERRS);
2913         hw_stats->illerrc += IXGBE_READ_REG(hw, IXGBE_ILLERRC);
2914         hw_stats->errbc += IXGBE_READ_REG(hw, IXGBE_ERRBC);
2915         hw_stats->mspdc += IXGBE_READ_REG(hw, IXGBE_MSPDC);
2916
2917         for (i = 0; i < 8; i++) {
2918                 uint32_t mp = IXGBE_READ_REG(hw, IXGBE_MPC(i));
2919
2920                 /* global total per queue */
2921                 hw_stats->mpc[i] += mp;
2922                 /* Running comprehensive total for stats display */
2923                 *total_missed_rx += hw_stats->mpc[i];
2924                 if (hw->mac.type == ixgbe_mac_82598EB) {
2925                         hw_stats->rnbc[i] +=
2926                             IXGBE_READ_REG(hw, IXGBE_RNBC(i));
2927                         hw_stats->pxonrxc[i] +=
2928                                 IXGBE_READ_REG(hw, IXGBE_PXONRXC(i));
2929                         hw_stats->pxoffrxc[i] +=
2930                                 IXGBE_READ_REG(hw, IXGBE_PXOFFRXC(i));
2931                 } else {
2932                         hw_stats->pxonrxc[i] +=
2933                                 IXGBE_READ_REG(hw, IXGBE_PXONRXCNT(i));
2934                         hw_stats->pxoffrxc[i] +=
2935                                 IXGBE_READ_REG(hw, IXGBE_PXOFFRXCNT(i));
2936                         hw_stats->pxon2offc[i] +=
2937                                 IXGBE_READ_REG(hw, IXGBE_PXON2OFFCNT(i));
2938                 }
2939                 hw_stats->pxontxc[i] +=
2940                     IXGBE_READ_REG(hw, IXGBE_PXONTXC(i));
2941                 hw_stats->pxofftxc[i] +=
2942                     IXGBE_READ_REG(hw, IXGBE_PXOFFTXC(i));
2943         }
2944         for (i = 0; i < IXGBE_QUEUE_STAT_COUNTERS; i++) {
2945                 uint32_t delta_qprc = IXGBE_READ_REG(hw, IXGBE_QPRC(i));
2946                 uint32_t delta_qptc = IXGBE_READ_REG(hw, IXGBE_QPTC(i));
2947                 uint32_t delta_qprdc = IXGBE_READ_REG(hw, IXGBE_QPRDC(i));
2948
2949                 delta_gprc += delta_qprc;
2950
2951                 hw_stats->qprc[i] += delta_qprc;
2952                 hw_stats->qptc[i] += delta_qptc;
2953
2954                 hw_stats->qbrc[i] += IXGBE_READ_REG(hw, IXGBE_QBRC_L(i));
2955                 hw_stats->qbrc[i] +=
2956                     ((uint64_t)IXGBE_READ_REG(hw, IXGBE_QBRC_H(i)) << 32);
2957                 if (crc_strip == 0)
2958                         hw_stats->qbrc[i] -= delta_qprc * ETHER_CRC_LEN;
2959
2960                 hw_stats->qbtc[i] += IXGBE_READ_REG(hw, IXGBE_QBTC_L(i));
2961                 hw_stats->qbtc[i] +=
2962                     ((uint64_t)IXGBE_READ_REG(hw, IXGBE_QBTC_H(i)) << 32);
2963
2964                 hw_stats->qprdc[i] += delta_qprdc;
2965                 *total_qprdc += hw_stats->qprdc[i];
2966
2967                 *total_qprc += hw_stats->qprc[i];
2968                 *total_qbrc += hw_stats->qbrc[i];
2969         }
2970         hw_stats->mlfc += IXGBE_READ_REG(hw, IXGBE_MLFC);
2971         hw_stats->mrfc += IXGBE_READ_REG(hw, IXGBE_MRFC);
2972         hw_stats->rlec += IXGBE_READ_REG(hw, IXGBE_RLEC);
2973
2974         /*
2975          * An errata states that gprc actually counts good + missed packets:
2976          * Workaround to set gprc to summated queue packet receives
2977          */
2978         hw_stats->gprc = *total_qprc;
2979
2980         if (hw->mac.type != ixgbe_mac_82598EB) {
2981                 hw_stats->gorc += IXGBE_READ_REG(hw, IXGBE_GORCL);
2982                 hw_stats->gorc += ((u64)IXGBE_READ_REG(hw, IXGBE_GORCH) << 32);
2983                 hw_stats->gotc += IXGBE_READ_REG(hw, IXGBE_GOTCL);
2984                 hw_stats->gotc += ((u64)IXGBE_READ_REG(hw, IXGBE_GOTCH) << 32);
2985                 hw_stats->tor += IXGBE_READ_REG(hw, IXGBE_TORL);
2986                 hw_stats->tor += ((u64)IXGBE_READ_REG(hw, IXGBE_TORH) << 32);
2987                 hw_stats->lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXCNT);
2988                 hw_stats->lxoffrxc += IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT);
2989         } else {
2990                 hw_stats->lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXC);
2991                 hw_stats->lxoffrxc += IXGBE_READ_REG(hw, IXGBE_LXOFFRXC);
2992                 /* 82598 only has a counter in the high register */
2993                 hw_stats->gorc += IXGBE_READ_REG(hw, IXGBE_GORCH);
2994                 hw_stats->gotc += IXGBE_READ_REG(hw, IXGBE_GOTCH);
2995                 hw_stats->tor += IXGBE_READ_REG(hw, IXGBE_TORH);
2996         }
2997         uint64_t old_tpr = hw_stats->tpr;
2998
2999         hw_stats->tpr += IXGBE_READ_REG(hw, IXGBE_TPR);
3000         hw_stats->tpt += IXGBE_READ_REG(hw, IXGBE_TPT);
3001
3002         if (crc_strip == 0)
3003                 hw_stats->gorc -= delta_gprc * ETHER_CRC_LEN;
3004
3005         uint64_t delta_gptc = IXGBE_READ_REG(hw, IXGBE_GPTC);
3006         hw_stats->gptc += delta_gptc;
3007         hw_stats->gotc -= delta_gptc * ETHER_CRC_LEN;
3008         hw_stats->tor -= (hw_stats->tpr - old_tpr) * ETHER_CRC_LEN;
3009
3010         /*
3011          * Workaround: mprc hardware is incorrectly counting
3012          * broadcasts, so for now we subtract those.
3013          */
3014         bprc = IXGBE_READ_REG(hw, IXGBE_BPRC);
3015         hw_stats->bprc += bprc;
3016         hw_stats->mprc += IXGBE_READ_REG(hw, IXGBE_MPRC);
3017         if (hw->mac.type == ixgbe_mac_82598EB)
3018                 hw_stats->mprc -= bprc;
3019
3020         hw_stats->prc64 += IXGBE_READ_REG(hw, IXGBE_PRC64);
3021         hw_stats->prc127 += IXGBE_READ_REG(hw, IXGBE_PRC127);
3022         hw_stats->prc255 += IXGBE_READ_REG(hw, IXGBE_PRC255);
3023         hw_stats->prc511 += IXGBE_READ_REG(hw, IXGBE_PRC511);
3024         hw_stats->prc1023 += IXGBE_READ_REG(hw, IXGBE_PRC1023);
3025         hw_stats->prc1522 += IXGBE_READ_REG(hw, IXGBE_PRC1522);
3026
3027         lxon = IXGBE_READ_REG(hw, IXGBE_LXONTXC);
3028         hw_stats->lxontxc += lxon;
3029         lxoff = IXGBE_READ_REG(hw, IXGBE_LXOFFTXC);
3030         hw_stats->lxofftxc += lxoff;
3031         total = lxon + lxoff;
3032
3033         hw_stats->mptc += IXGBE_READ_REG(hw, IXGBE_MPTC);
3034         hw_stats->ptc64 += IXGBE_READ_REG(hw, IXGBE_PTC64);
3035         hw_stats->gptc -= total;
3036         hw_stats->mptc -= total;
3037         hw_stats->ptc64 -= total;
3038         hw_stats->gotc -= total * ETHER_MIN_LEN;
3039
3040         hw_stats->ruc += IXGBE_READ_REG(hw, IXGBE_RUC);
3041         hw_stats->rfc += IXGBE_READ_REG(hw, IXGBE_RFC);
3042         hw_stats->roc += IXGBE_READ_REG(hw, IXGBE_ROC);
3043         hw_stats->rjc += IXGBE_READ_REG(hw, IXGBE_RJC);
3044         hw_stats->mngprc += IXGBE_READ_REG(hw, IXGBE_MNGPRC);
3045         hw_stats->mngpdc += IXGBE_READ_REG(hw, IXGBE_MNGPDC);
3046         hw_stats->mngptc += IXGBE_READ_REG(hw, IXGBE_MNGPTC);
3047         hw_stats->ptc127 += IXGBE_READ_REG(hw, IXGBE_PTC127);
3048         hw_stats->ptc255 += IXGBE_READ_REG(hw, IXGBE_PTC255);
3049         hw_stats->ptc511 += IXGBE_READ_REG(hw, IXGBE_PTC511);
3050         hw_stats->ptc1023 += IXGBE_READ_REG(hw, IXGBE_PTC1023);
3051         hw_stats->ptc1522 += IXGBE_READ_REG(hw, IXGBE_PTC1522);
3052         hw_stats->bptc += IXGBE_READ_REG(hw, IXGBE_BPTC);
3053         hw_stats->xec += IXGBE_READ_REG(hw, IXGBE_XEC);
3054         hw_stats->fccrc += IXGBE_READ_REG(hw, IXGBE_FCCRC);
3055         hw_stats->fclast += IXGBE_READ_REG(hw, IXGBE_FCLAST);
3056         /* Only read FCOE on 82599 */
3057         if (hw->mac.type != ixgbe_mac_82598EB) {
3058                 hw_stats->fcoerpdc += IXGBE_READ_REG(hw, IXGBE_FCOERPDC);
3059                 hw_stats->fcoeprc += IXGBE_READ_REG(hw, IXGBE_FCOEPRC);
3060                 hw_stats->fcoeptc += IXGBE_READ_REG(hw, IXGBE_FCOEPTC);
3061                 hw_stats->fcoedwrc += IXGBE_READ_REG(hw, IXGBE_FCOEDWRC);
3062                 hw_stats->fcoedwtc += IXGBE_READ_REG(hw, IXGBE_FCOEDWTC);
3063         }
3064
3065         /* Flow Director Stats registers */
3066         hw_stats->fdirmatch += IXGBE_READ_REG(hw, IXGBE_FDIRMATCH);
3067         hw_stats->fdirmiss += IXGBE_READ_REG(hw, IXGBE_FDIRMISS);
3068
3069         /* MACsec Stats registers */
3070         macsec_stats->out_pkts_untagged += IXGBE_READ_REG(hw, IXGBE_LSECTXUT);
3071         macsec_stats->out_pkts_encrypted +=
3072                 IXGBE_READ_REG(hw, IXGBE_LSECTXPKTE);
3073         macsec_stats->out_pkts_protected +=
3074                 IXGBE_READ_REG(hw, IXGBE_LSECTXPKTP);
3075         macsec_stats->out_octets_encrypted +=
3076                 IXGBE_READ_REG(hw, IXGBE_LSECTXOCTE);
3077         macsec_stats->out_octets_protected +=
3078                 IXGBE_READ_REG(hw, IXGBE_LSECTXOCTP);
3079         macsec_stats->in_pkts_untagged += IXGBE_READ_REG(hw, IXGBE_LSECRXUT);
3080         macsec_stats->in_pkts_badtag += IXGBE_READ_REG(hw, IXGBE_LSECRXBAD);
3081         macsec_stats->in_pkts_nosci += IXGBE_READ_REG(hw, IXGBE_LSECRXNOSCI);
3082         macsec_stats->in_pkts_unknownsci +=
3083                 IXGBE_READ_REG(hw, IXGBE_LSECRXUNSCI);
3084         macsec_stats->in_octets_decrypted +=
3085                 IXGBE_READ_REG(hw, IXGBE_LSECRXOCTD);
3086         macsec_stats->in_octets_validated +=
3087                 IXGBE_READ_REG(hw, IXGBE_LSECRXOCTV);
3088         macsec_stats->in_pkts_unchecked += IXGBE_READ_REG(hw, IXGBE_LSECRXUNCH);
3089         macsec_stats->in_pkts_delayed += IXGBE_READ_REG(hw, IXGBE_LSECRXDELAY);
3090         macsec_stats->in_pkts_late += IXGBE_READ_REG(hw, IXGBE_LSECRXLATE);
3091         for (i = 0; i < 2; i++) {
3092                 macsec_stats->in_pkts_ok +=
3093                         IXGBE_READ_REG(hw, IXGBE_LSECRXOK(i));
3094                 macsec_stats->in_pkts_invalid +=
3095                         IXGBE_READ_REG(hw, IXGBE_LSECRXINV(i));
3096                 macsec_stats->in_pkts_notvalid +=
3097                         IXGBE_READ_REG(hw, IXGBE_LSECRXNV(i));
3098         }
3099         macsec_stats->in_pkts_unusedsa += IXGBE_READ_REG(hw, IXGBE_LSECRXUNSA);
3100         macsec_stats->in_pkts_notusingsa +=
3101                 IXGBE_READ_REG(hw, IXGBE_LSECRXNUSA);
3102 }
3103
3104 /*
3105  * This function is based on ixgbe_update_stats_counters() in ixgbe/ixgbe.c
3106  */
3107 static int
3108 ixgbe_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
3109 {
3110         struct ixgbe_hw *hw =
3111                         IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3112         struct ixgbe_hw_stats *hw_stats =
3113                         IXGBE_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
3114         struct ixgbe_macsec_stats *macsec_stats =
3115                         IXGBE_DEV_PRIVATE_TO_MACSEC_STATS(
3116                                 dev->data->dev_private);
3117         uint64_t total_missed_rx, total_qbrc, total_qprc, total_qprdc;
3118         unsigned i;
3119
3120         total_missed_rx = 0;
3121         total_qbrc = 0;
3122         total_qprc = 0;
3123         total_qprdc = 0;
3124
3125         ixgbe_read_stats_registers(hw, hw_stats, macsec_stats, &total_missed_rx,
3126                         &total_qbrc, &total_qprc, &total_qprdc);
3127
3128         if (stats == NULL)
3129                 return -EINVAL;
3130
3131         /* Fill out the rte_eth_stats statistics structure */
3132         stats->ipackets = total_qprc;
3133         stats->ibytes = total_qbrc;
3134         stats->opackets = hw_stats->gptc;
3135         stats->obytes = hw_stats->gotc;
3136
3137         for (i = 0; i < IXGBE_QUEUE_STAT_COUNTERS; i++) {
3138                 stats->q_ipackets[i] = hw_stats->qprc[i];
3139                 stats->q_opackets[i] = hw_stats->qptc[i];
3140                 stats->q_ibytes[i] = hw_stats->qbrc[i];
3141                 stats->q_obytes[i] = hw_stats->qbtc[i];
3142                 stats->q_errors[i] = hw_stats->qprdc[i];
3143         }
3144
3145         /* Rx Errors */
3146         stats->imissed  = total_missed_rx;
3147         stats->ierrors  = hw_stats->crcerrs +
3148                           hw_stats->mspdc +
3149                           hw_stats->rlec +
3150                           hw_stats->ruc +
3151                           hw_stats->roc +
3152                           hw_stats->illerrc +
3153                           hw_stats->errbc +
3154                           hw_stats->rfc +
3155                           hw_stats->fccrc +
3156                           hw_stats->fclast;
3157
3158         /* Tx Errors */
3159         stats->oerrors  = 0;
3160         return 0;
3161 }
3162
3163 static void
3164 ixgbe_dev_stats_reset(struct rte_eth_dev *dev)
3165 {
3166         struct ixgbe_hw_stats *stats =
3167                         IXGBE_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
3168
3169         /* HW registers are cleared on read */
3170         ixgbe_dev_stats_get(dev, NULL);
3171
3172         /* Reset software totals */
3173         memset(stats, 0, sizeof(*stats));
3174 }
3175
3176 /* This function calculates the number of xstats based on the current config */
3177 static unsigned
3178 ixgbe_xstats_calc_num(void) {
3179         return IXGBE_NB_HW_STATS + IXGBE_NB_MACSEC_STATS +
3180                 (IXGBE_NB_RXQ_PRIO_STATS * IXGBE_NB_RXQ_PRIO_VALUES) +
3181                 (IXGBE_NB_TXQ_PRIO_STATS * IXGBE_NB_TXQ_PRIO_VALUES);
3182 }
3183
3184 static int ixgbe_dev_xstats_get_names(__rte_unused struct rte_eth_dev *dev,
3185         struct rte_eth_xstat_name *xstats_names, __rte_unused unsigned int size)
3186 {
3187         const unsigned cnt_stats = ixgbe_xstats_calc_num();
3188         unsigned stat, i, count;
3189
3190         if (xstats_names != NULL) {
3191                 count = 0;
3192
3193                 /* Note: limit >= cnt_stats checked upstream
3194                  * in rte_eth_xstats_names()
3195                  */
3196
3197                 /* Extended stats from ixgbe_hw_stats */
3198                 for (i = 0; i < IXGBE_NB_HW_STATS; i++) {
3199                         snprintf(xstats_names[count].name,
3200                                 sizeof(xstats_names[count].name),
3201                                 "%s",
3202                                 rte_ixgbe_stats_strings[i].name);
3203                         count++;
3204                 }
3205
3206                 /* MACsec Stats */
3207                 for (i = 0; i < IXGBE_NB_MACSEC_STATS; i++) {
3208                         snprintf(xstats_names[count].name,
3209                                 sizeof(xstats_names[count].name),
3210                                 "%s",
3211                                 rte_ixgbe_macsec_strings[i].name);
3212                         count++;
3213                 }
3214
3215                 /* RX Priority Stats */
3216                 for (stat = 0; stat < IXGBE_NB_RXQ_PRIO_STATS; stat++) {
3217                         for (i = 0; i < IXGBE_NB_RXQ_PRIO_VALUES; i++) {
3218                                 snprintf(xstats_names[count].name,
3219                                         sizeof(xstats_names[count].name),
3220                                         "rx_priority%u_%s", i,
3221                                         rte_ixgbe_rxq_strings[stat].name);
3222                                 count++;
3223                         }
3224                 }
3225
3226                 /* TX Priority Stats */
3227                 for (stat = 0; stat < IXGBE_NB_TXQ_PRIO_STATS; stat++) {
3228                         for (i = 0; i < IXGBE_NB_TXQ_PRIO_VALUES; i++) {
3229                                 snprintf(xstats_names[count].name,
3230                                         sizeof(xstats_names[count].name),
3231                                         "tx_priority%u_%s", i,
3232                                         rte_ixgbe_txq_strings[stat].name);
3233                                 count++;
3234                         }
3235                 }
3236         }
3237         return cnt_stats;
3238 }
3239
3240 static int ixgbe_dev_xstats_get_names_by_id(
3241         struct rte_eth_dev *dev,
3242         struct rte_eth_xstat_name *xstats_names,
3243         const uint64_t *ids,
3244         unsigned int limit)
3245 {
3246         if (!ids) {
3247                 const unsigned int cnt_stats = ixgbe_xstats_calc_num();
3248                 unsigned int stat, i, count;
3249
3250                 if (xstats_names != NULL) {
3251                         count = 0;
3252
3253                         /* Note: limit >= cnt_stats checked upstream
3254                          * in rte_eth_xstats_names()
3255                          */
3256
3257                         /* Extended stats from ixgbe_hw_stats */
3258                         for (i = 0; i < IXGBE_NB_HW_STATS; i++) {
3259                                 snprintf(xstats_names[count].name,
3260                                         sizeof(xstats_names[count].name),
3261                                         "%s",
3262                                         rte_ixgbe_stats_strings[i].name);
3263                                 count++;
3264                         }
3265
3266                         /* MACsec Stats */
3267                         for (i = 0; i < IXGBE_NB_MACSEC_STATS; i++) {
3268                                 snprintf(xstats_names[count].name,
3269                                         sizeof(xstats_names[count].name),
3270                                         "%s",
3271                                         rte_ixgbe_macsec_strings[i].name);
3272                                 count++;
3273                         }
3274
3275                         /* RX Priority Stats */
3276                         for (stat = 0; stat < IXGBE_NB_RXQ_PRIO_STATS; stat++) {
3277                                 for (i = 0; i < IXGBE_NB_RXQ_PRIO_VALUES; i++) {
3278                                         snprintf(xstats_names[count].name,
3279                                             sizeof(xstats_names[count].name),
3280                                             "rx_priority%u_%s", i,
3281                                             rte_ixgbe_rxq_strings[stat].name);
3282                                         count++;
3283                                 }
3284                         }
3285
3286                         /* TX Priority Stats */
3287                         for (stat = 0; stat < IXGBE_NB_TXQ_PRIO_STATS; stat++) {
3288                                 for (i = 0; i < IXGBE_NB_TXQ_PRIO_VALUES; i++) {
3289                                         snprintf(xstats_names[count].name,
3290                                             sizeof(xstats_names[count].name),
3291                                             "tx_priority%u_%s", i,
3292                                             rte_ixgbe_txq_strings[stat].name);
3293                                         count++;
3294                                 }
3295                         }
3296                 }
3297                 return cnt_stats;
3298         }
3299
3300         uint16_t i;
3301         uint16_t size = ixgbe_xstats_calc_num();
3302         struct rte_eth_xstat_name xstats_names_copy[size];
3303
3304         ixgbe_dev_xstats_get_names_by_id(dev, xstats_names_copy, NULL,
3305                         size);
3306
3307         for (i = 0; i < limit; i++) {
3308                 if (ids[i] >= size) {
3309                         PMD_INIT_LOG(ERR, "id value isn't valid");
3310                         return -1;
3311                 }
3312                 strcpy(xstats_names[i].name,
3313                                 xstats_names_copy[ids[i]].name);
3314         }
3315         return limit;
3316 }
3317
3318 static int ixgbevf_dev_xstats_get_names(__rte_unused struct rte_eth_dev *dev,
3319         struct rte_eth_xstat_name *xstats_names, unsigned limit)
3320 {
3321         unsigned i;
3322
3323         if (limit < IXGBEVF_NB_XSTATS && xstats_names != NULL)
3324                 return -ENOMEM;
3325
3326         if (xstats_names != NULL)
3327                 for (i = 0; i < IXGBEVF_NB_XSTATS; i++)
3328                         snprintf(xstats_names[i].name,
3329                                 sizeof(xstats_names[i].name),
3330                                 "%s", rte_ixgbevf_stats_strings[i].name);
3331         return IXGBEVF_NB_XSTATS;
3332 }
3333
3334 static int
3335 ixgbe_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
3336                                          unsigned n)
3337 {
3338         struct ixgbe_hw *hw =
3339                         IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3340         struct ixgbe_hw_stats *hw_stats =
3341                         IXGBE_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
3342         struct ixgbe_macsec_stats *macsec_stats =
3343                         IXGBE_DEV_PRIVATE_TO_MACSEC_STATS(
3344                                 dev->data->dev_private);
3345         uint64_t total_missed_rx, total_qbrc, total_qprc, total_qprdc;
3346         unsigned i, stat, count = 0;
3347
3348         count = ixgbe_xstats_calc_num();
3349
3350         if (n < count)
3351                 return count;
3352
3353         total_missed_rx = 0;
3354         total_qbrc = 0;
3355         total_qprc = 0;
3356         total_qprdc = 0;
3357
3358         ixgbe_read_stats_registers(hw, hw_stats, macsec_stats, &total_missed_rx,
3359                         &total_qbrc, &total_qprc, &total_qprdc);
3360
3361         /* If this is a reset xstats is NULL, and we have cleared the
3362          * registers by reading them.
3363          */
3364         if (!xstats)
3365                 return 0;
3366
3367         /* Extended stats from ixgbe_hw_stats */
3368         count = 0;
3369         for (i = 0; i < IXGBE_NB_HW_STATS; i++) {
3370                 xstats[count].value = *(uint64_t *)(((char *)hw_stats) +
3371                                 rte_ixgbe_stats_strings[i].offset);
3372                 xstats[count].id = count;
3373                 count++;
3374         }
3375
3376         /* MACsec Stats */
3377         for (i = 0; i < IXGBE_NB_MACSEC_STATS; i++) {
3378                 xstats[count].value = *(uint64_t *)(((char *)macsec_stats) +
3379                                 rte_ixgbe_macsec_strings[i].offset);
3380                 xstats[count].id = count;
3381                 count++;
3382         }
3383
3384         /* RX Priority Stats */
3385         for (stat = 0; stat < IXGBE_NB_RXQ_PRIO_STATS; stat++) {
3386                 for (i = 0; i < IXGBE_NB_RXQ_PRIO_VALUES; i++) {
3387                         xstats[count].value = *(uint64_t *)(((char *)hw_stats) +
3388                                         rte_ixgbe_rxq_strings[stat].offset +
3389                                         (sizeof(uint64_t) * i));
3390                         xstats[count].id = count;
3391                         count++;
3392                 }
3393         }
3394
3395         /* TX Priority Stats */
3396         for (stat = 0; stat < IXGBE_NB_TXQ_PRIO_STATS; stat++) {
3397                 for (i = 0; i < IXGBE_NB_TXQ_PRIO_VALUES; i++) {
3398                         xstats[count].value = *(uint64_t *)(((char *)hw_stats) +
3399                                         rte_ixgbe_txq_strings[stat].offset +
3400                                         (sizeof(uint64_t) * i));
3401                         xstats[count].id = count;
3402                         count++;
3403                 }
3404         }
3405         return count;
3406 }
3407
3408 static int
3409 ixgbe_dev_xstats_get_by_id(struct rte_eth_dev *dev, const uint64_t *ids,
3410                 uint64_t *values, unsigned int n)
3411 {
3412         if (!ids) {
3413                 struct ixgbe_hw *hw =
3414                                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3415                 struct ixgbe_hw_stats *hw_stats =
3416                                 IXGBE_DEV_PRIVATE_TO_STATS(
3417                                                 dev->data->dev_private);
3418                 struct ixgbe_macsec_stats *macsec_stats =
3419                                 IXGBE_DEV_PRIVATE_TO_MACSEC_STATS(
3420                                         dev->data->dev_private);
3421                 uint64_t total_missed_rx, total_qbrc, total_qprc, total_qprdc;
3422                 unsigned int i, stat, count = 0;
3423
3424                 count = ixgbe_xstats_calc_num();
3425
3426                 if (!ids && n < count)
3427                         return count;
3428
3429                 total_missed_rx = 0;
3430                 total_qbrc = 0;
3431                 total_qprc = 0;
3432                 total_qprdc = 0;
3433
3434                 ixgbe_read_stats_registers(hw, hw_stats, macsec_stats,
3435                                 &total_missed_rx, &total_qbrc, &total_qprc,
3436                                 &total_qprdc);
3437
3438                 /* If this is a reset xstats is NULL, and we have cleared the
3439                  * registers by reading them.
3440                  */
3441                 if (!ids && !values)
3442                         return 0;
3443
3444                 /* Extended stats from ixgbe_hw_stats */
3445                 count = 0;
3446                 for (i = 0; i < IXGBE_NB_HW_STATS; i++) {
3447                         values[count] = *(uint64_t *)(((char *)hw_stats) +
3448                                         rte_ixgbe_stats_strings[i].offset);
3449                         count++;
3450                 }
3451
3452                 /* MACsec Stats */
3453                 for (i = 0; i < IXGBE_NB_MACSEC_STATS; i++) {
3454                         values[count] = *(uint64_t *)(((char *)macsec_stats) +
3455                                         rte_ixgbe_macsec_strings[i].offset);
3456                         count++;
3457                 }
3458
3459                 /* RX Priority Stats */
3460                 for (stat = 0; stat < IXGBE_NB_RXQ_PRIO_STATS; stat++) {
3461                         for (i = 0; i < IXGBE_NB_RXQ_PRIO_VALUES; i++) {
3462                                 values[count] =
3463                                         *(uint64_t *)(((char *)hw_stats) +
3464                                         rte_ixgbe_rxq_strings[stat].offset +
3465                                         (sizeof(uint64_t) * i));
3466                                 count++;
3467                         }
3468                 }
3469
3470                 /* TX Priority Stats */
3471                 for (stat = 0; stat < IXGBE_NB_TXQ_PRIO_STATS; stat++) {
3472                         for (i = 0; i < IXGBE_NB_TXQ_PRIO_VALUES; i++) {
3473                                 values[count] =
3474                                         *(uint64_t *)(((char *)hw_stats) +
3475                                         rte_ixgbe_txq_strings[stat].offset +
3476                                         (sizeof(uint64_t) * i));
3477                                 count++;
3478                         }
3479                 }
3480                 return count;
3481         }
3482
3483         uint16_t i;
3484         uint16_t size = ixgbe_xstats_calc_num();
3485         uint64_t values_copy[size];
3486
3487         ixgbe_dev_xstats_get_by_id(dev, NULL, values_copy, size);
3488
3489         for (i = 0; i < n; i++) {
3490                 if (ids[i] >= size) {
3491                         PMD_INIT_LOG(ERR, "id value isn't valid");
3492                         return -1;
3493                 }
3494                 values[i] = values_copy[ids[i]];
3495         }
3496         return n;
3497 }
3498
3499 static void
3500 ixgbe_dev_xstats_reset(struct rte_eth_dev *dev)
3501 {
3502         struct ixgbe_hw_stats *stats =
3503                         IXGBE_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
3504         struct ixgbe_macsec_stats *macsec_stats =
3505                         IXGBE_DEV_PRIVATE_TO_MACSEC_STATS(
3506                                 dev->data->dev_private);
3507
3508         unsigned count = ixgbe_xstats_calc_num();
3509
3510         /* HW registers are cleared on read */
3511         ixgbe_dev_xstats_get(dev, NULL, count);
3512
3513         /* Reset software totals */
3514         memset(stats, 0, sizeof(*stats));
3515         memset(macsec_stats, 0, sizeof(*macsec_stats));
3516 }
3517
3518 static void
3519 ixgbevf_update_stats(struct rte_eth_dev *dev)
3520 {
3521         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3522         struct ixgbevf_hw_stats *hw_stats = (struct ixgbevf_hw_stats *)
3523                           IXGBE_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
3524
3525         /* Good Rx packet, include VF loopback */
3526         UPDATE_VF_STAT(IXGBE_VFGPRC,
3527             hw_stats->last_vfgprc, hw_stats->vfgprc);
3528
3529         /* Good Rx octets, include VF loopback */
3530         UPDATE_VF_STAT_36BIT(IXGBE_VFGORC_LSB, IXGBE_VFGORC_MSB,
3531             hw_stats->last_vfgorc, hw_stats->vfgorc);
3532
3533         /* Good Tx packet, include VF loopback */
3534         UPDATE_VF_STAT(IXGBE_VFGPTC,
3535             hw_stats->last_vfgptc, hw_stats->vfgptc);
3536
3537         /* Good Tx octets, include VF loopback */
3538         UPDATE_VF_STAT_36BIT(IXGBE_VFGOTC_LSB, IXGBE_VFGOTC_MSB,
3539             hw_stats->last_vfgotc, hw_stats->vfgotc);
3540
3541         /* Rx Multicst Packet */
3542         UPDATE_VF_STAT(IXGBE_VFMPRC,
3543             hw_stats->last_vfmprc, hw_stats->vfmprc);
3544 }
3545
3546 static int
3547 ixgbevf_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
3548                        unsigned n)
3549 {
3550         struct ixgbevf_hw_stats *hw_stats = (struct ixgbevf_hw_stats *)
3551                         IXGBE_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
3552         unsigned i;
3553
3554         if (n < IXGBEVF_NB_XSTATS)
3555                 return IXGBEVF_NB_XSTATS;
3556
3557         ixgbevf_update_stats(dev);
3558
3559         if (!xstats)
3560                 return 0;
3561
3562         /* Extended stats */
3563         for (i = 0; i < IXGBEVF_NB_XSTATS; i++) {
3564                 xstats[i].id = i;
3565                 xstats[i].value = *(uint64_t *)(((char *)hw_stats) +
3566                         rte_ixgbevf_stats_strings[i].offset);
3567         }
3568
3569         return IXGBEVF_NB_XSTATS;
3570 }
3571
3572 static int
3573 ixgbevf_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
3574 {
3575         struct ixgbevf_hw_stats *hw_stats = (struct ixgbevf_hw_stats *)
3576                           IXGBE_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
3577
3578         ixgbevf_update_stats(dev);
3579
3580         if (stats == NULL)
3581                 return -EINVAL;
3582
3583         stats->ipackets = hw_stats->vfgprc;
3584         stats->ibytes = hw_stats->vfgorc;
3585         stats->opackets = hw_stats->vfgptc;
3586         stats->obytes = hw_stats->vfgotc;
3587         return 0;
3588 }
3589
3590 static void
3591 ixgbevf_dev_stats_reset(struct rte_eth_dev *dev)
3592 {
3593         struct ixgbevf_hw_stats *hw_stats = (struct ixgbevf_hw_stats *)
3594                         IXGBE_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
3595
3596         /* Sync HW register to the last stats */
3597         ixgbevf_dev_stats_get(dev, NULL);
3598
3599         /* reset HW current stats*/
3600         hw_stats->vfgprc = 0;
3601         hw_stats->vfgorc = 0;
3602         hw_stats->vfgptc = 0;
3603         hw_stats->vfgotc = 0;
3604 }
3605
3606 static int
3607 ixgbe_fw_version_get(struct rte_eth_dev *dev, char *fw_version, size_t fw_size)
3608 {
3609         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3610         u16 eeprom_verh, eeprom_verl;
3611         u32 etrack_id;
3612         int ret;
3613
3614         ixgbe_read_eeprom(hw, 0x2e, &eeprom_verh);
3615         ixgbe_read_eeprom(hw, 0x2d, &eeprom_verl);
3616
3617         etrack_id = (eeprom_verh << 16) | eeprom_verl;
3618         ret = snprintf(fw_version, fw_size, "0x%08x", etrack_id);
3619
3620         ret += 1; /* add the size of '\0' */
3621         if (fw_size < (u32)ret)
3622                 return ret;
3623         else
3624                 return 0;
3625 }
3626
3627 static void
3628 ixgbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
3629 {
3630         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
3631         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3632         struct rte_eth_conf *dev_conf = &dev->data->dev_conf;
3633
3634         dev_info->pci_dev = pci_dev;
3635         dev_info->max_rx_queues = (uint16_t)hw->mac.max_rx_queues;
3636         dev_info->max_tx_queues = (uint16_t)hw->mac.max_tx_queues;
3637         if (RTE_ETH_DEV_SRIOV(dev).active == 0) {
3638                 /*
3639                  * When DCB/VT is off, maximum number of queues changes,
3640                  * except for 82598EB, which remains constant.
3641                  */
3642                 if (dev_conf->txmode.mq_mode == ETH_MQ_TX_NONE &&
3643                                 hw->mac.type != ixgbe_mac_82598EB)
3644                         dev_info->max_tx_queues = IXGBE_NONE_MODE_TX_NB_QUEUES;
3645         }
3646         dev_info->min_rx_bufsize = 1024; /* cf BSIZEPACKET in SRRCTL register */
3647         dev_info->max_rx_pktlen = 15872; /* includes CRC, cf MAXFRS register */
3648         dev_info->max_mac_addrs = hw->mac.num_rar_entries;
3649         dev_info->max_hash_mac_addrs = IXGBE_VMDQ_NUM_UC_MAC;
3650         dev_info->max_vfs = pci_dev->max_vfs;
3651         if (hw->mac.type == ixgbe_mac_82598EB)
3652                 dev_info->max_vmdq_pools = ETH_16_POOLS;
3653         else
3654                 dev_info->max_vmdq_pools = ETH_64_POOLS;
3655         dev_info->vmdq_queue_num = dev_info->max_rx_queues;
3656         dev_info->rx_offload_capa =
3657                 DEV_RX_OFFLOAD_VLAN_STRIP |
3658                 DEV_RX_OFFLOAD_IPV4_CKSUM |
3659                 DEV_RX_OFFLOAD_UDP_CKSUM  |
3660                 DEV_RX_OFFLOAD_TCP_CKSUM;
3661
3662         /*
3663          * RSC is only supported by 82599 and x540 PF devices in a non-SR-IOV
3664          * mode.
3665          */
3666         if ((hw->mac.type == ixgbe_mac_82599EB ||
3667              hw->mac.type == ixgbe_mac_X540) &&
3668             !RTE_ETH_DEV_SRIOV(dev).active)
3669                 dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_TCP_LRO;
3670
3671         if (hw->mac.type == ixgbe_mac_82599EB ||
3672             hw->mac.type == ixgbe_mac_X540)
3673                 dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_MACSEC_STRIP;
3674
3675         if (hw->mac.type == ixgbe_mac_X550 ||
3676             hw->mac.type == ixgbe_mac_X550EM_x ||
3677             hw->mac.type == ixgbe_mac_X550EM_a)
3678                 dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM;
3679
3680         dev_info->tx_offload_capa =
3681                 DEV_TX_OFFLOAD_VLAN_INSERT |
3682                 DEV_TX_OFFLOAD_IPV4_CKSUM  |
3683                 DEV_TX_OFFLOAD_UDP_CKSUM   |
3684                 DEV_TX_OFFLOAD_TCP_CKSUM   |
3685                 DEV_TX_OFFLOAD_SCTP_CKSUM  |
3686                 DEV_TX_OFFLOAD_TCP_TSO;
3687
3688         if (hw->mac.type == ixgbe_mac_82599EB ||
3689             hw->mac.type == ixgbe_mac_X540)
3690                 dev_info->tx_offload_capa |= DEV_TX_OFFLOAD_MACSEC_INSERT;
3691
3692         if (hw->mac.type == ixgbe_mac_X550 ||
3693             hw->mac.type == ixgbe_mac_X550EM_x ||
3694             hw->mac.type == ixgbe_mac_X550EM_a)
3695                 dev_info->tx_offload_capa |= DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM;
3696
3697         dev_info->default_rxconf = (struct rte_eth_rxconf) {
3698                 .rx_thresh = {
3699                         .pthresh = IXGBE_DEFAULT_RX_PTHRESH,
3700                         .hthresh = IXGBE_DEFAULT_RX_HTHRESH,
3701                         .wthresh = IXGBE_DEFAULT_RX_WTHRESH,
3702                 },
3703                 .rx_free_thresh = IXGBE_DEFAULT_RX_FREE_THRESH,
3704                 .rx_drop_en = 0,
3705         };
3706
3707         dev_info->default_txconf = (struct rte_eth_txconf) {
3708                 .tx_thresh = {
3709                         .pthresh = IXGBE_DEFAULT_TX_PTHRESH,
3710                         .hthresh = IXGBE_DEFAULT_TX_HTHRESH,
3711                         .wthresh = IXGBE_DEFAULT_TX_WTHRESH,
3712                 },
3713                 .tx_free_thresh = IXGBE_DEFAULT_TX_FREE_THRESH,
3714                 .tx_rs_thresh = IXGBE_DEFAULT_TX_RSBIT_THRESH,
3715                 .txq_flags = ETH_TXQ_FLAGS_NOMULTSEGS |
3716                                 ETH_TXQ_FLAGS_NOOFFLOADS,
3717         };
3718
3719         dev_info->rx_desc_lim = rx_desc_lim;
3720         dev_info->tx_desc_lim = tx_desc_lim;
3721
3722         dev_info->hash_key_size = IXGBE_HKEY_MAX_INDEX * sizeof(uint32_t);
3723         dev_info->reta_size = ixgbe_reta_size_get(hw->mac.type);
3724         dev_info->flow_type_rss_offloads = IXGBE_RSS_OFFLOAD_ALL;
3725
3726         dev_info->speed_capa = ETH_LINK_SPEED_1G | ETH_LINK_SPEED_10G;
3727         if (hw->mac.type == ixgbe_mac_X540 ||
3728             hw->mac.type == ixgbe_mac_X540_vf ||
3729             hw->mac.type == ixgbe_mac_X550 ||
3730             hw->mac.type == ixgbe_mac_X550_vf) {
3731                 dev_info->speed_capa |= ETH_LINK_SPEED_100M;
3732         }
3733         if (hw->mac.type == ixgbe_mac_X550) {
3734                 dev_info->speed_capa |= ETH_LINK_SPEED_2_5G;
3735                 dev_info->speed_capa |= ETH_LINK_SPEED_5G;
3736         }
3737 }
3738
3739 static const uint32_t *
3740 ixgbe_dev_supported_ptypes_get(struct rte_eth_dev *dev)
3741 {
3742         static const uint32_t ptypes[] = {
3743                 /* For non-vec functions,
3744                  * refers to ixgbe_rxd_pkt_info_to_pkt_type();
3745                  * for vec functions,
3746                  * refers to _recv_raw_pkts_vec().
3747                  */
3748                 RTE_PTYPE_L2_ETHER,
3749                 RTE_PTYPE_L3_IPV4,
3750                 RTE_PTYPE_L3_IPV4_EXT,
3751                 RTE_PTYPE_L3_IPV6,
3752                 RTE_PTYPE_L3_IPV6_EXT,
3753                 RTE_PTYPE_L4_SCTP,
3754                 RTE_PTYPE_L4_TCP,
3755                 RTE_PTYPE_L4_UDP,
3756                 RTE_PTYPE_TUNNEL_IP,
3757                 RTE_PTYPE_INNER_L3_IPV6,
3758                 RTE_PTYPE_INNER_L3_IPV6_EXT,
3759                 RTE_PTYPE_INNER_L4_TCP,
3760                 RTE_PTYPE_INNER_L4_UDP,
3761                 RTE_PTYPE_UNKNOWN
3762         };
3763
3764         if (dev->rx_pkt_burst == ixgbe_recv_pkts ||
3765             dev->rx_pkt_burst == ixgbe_recv_pkts_lro_single_alloc ||
3766             dev->rx_pkt_burst == ixgbe_recv_pkts_lro_bulk_alloc ||
3767             dev->rx_pkt_burst == ixgbe_recv_pkts_bulk_alloc)
3768                 return ptypes;
3769
3770 #if defined(RTE_ARCH_X86)
3771         if (dev->rx_pkt_burst == ixgbe_recv_pkts_vec ||
3772             dev->rx_pkt_burst == ixgbe_recv_scattered_pkts_vec)
3773                 return ptypes;
3774 #endif
3775         return NULL;
3776 }
3777
3778 static void
3779 ixgbevf_dev_info_get(struct rte_eth_dev *dev,
3780                      struct rte_eth_dev_info *dev_info)
3781 {
3782         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
3783         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3784
3785         dev_info->pci_dev = pci_dev;
3786         dev_info->max_rx_queues = (uint16_t)hw->mac.max_rx_queues;
3787         dev_info->max_tx_queues = (uint16_t)hw->mac.max_tx_queues;
3788         dev_info->min_rx_bufsize = 1024; /* cf BSIZEPACKET in SRRCTL reg */
3789         dev_info->max_rx_pktlen = 9728; /* includes CRC, cf MAXFRS reg */
3790         dev_info->max_mac_addrs = hw->mac.num_rar_entries;
3791         dev_info->max_hash_mac_addrs = IXGBE_VMDQ_NUM_UC_MAC;
3792         dev_info->max_vfs = pci_dev->max_vfs;
3793         if (hw->mac.type == ixgbe_mac_82598EB)
3794                 dev_info->max_vmdq_pools = ETH_16_POOLS;
3795         else
3796                 dev_info->max_vmdq_pools = ETH_64_POOLS;
3797         dev_info->rx_offload_capa = DEV_RX_OFFLOAD_VLAN_STRIP |
3798                                 DEV_RX_OFFLOAD_IPV4_CKSUM |
3799                                 DEV_RX_OFFLOAD_UDP_CKSUM  |
3800                                 DEV_RX_OFFLOAD_TCP_CKSUM;
3801         dev_info->tx_offload_capa = DEV_TX_OFFLOAD_VLAN_INSERT |
3802                                 DEV_TX_OFFLOAD_IPV4_CKSUM  |
3803                                 DEV_TX_OFFLOAD_UDP_CKSUM   |
3804                                 DEV_TX_OFFLOAD_TCP_CKSUM   |
3805                                 DEV_TX_OFFLOAD_SCTP_CKSUM  |
3806                                 DEV_TX_OFFLOAD_TCP_TSO;
3807
3808         dev_info->default_rxconf = (struct rte_eth_rxconf) {
3809                 .rx_thresh = {
3810                         .pthresh = IXGBE_DEFAULT_RX_PTHRESH,
3811                         .hthresh = IXGBE_DEFAULT_RX_HTHRESH,
3812                         .wthresh = IXGBE_DEFAULT_RX_WTHRESH,
3813                 },
3814                 .rx_free_thresh = IXGBE_DEFAULT_RX_FREE_THRESH,
3815                 .rx_drop_en = 0,
3816         };
3817
3818         dev_info->default_txconf = (struct rte_eth_txconf) {
3819                 .tx_thresh = {
3820                         .pthresh = IXGBE_DEFAULT_TX_PTHRESH,
3821                         .hthresh = IXGBE_DEFAULT_TX_HTHRESH,
3822                         .wthresh = IXGBE_DEFAULT_TX_WTHRESH,
3823                 },
3824                 .tx_free_thresh = IXGBE_DEFAULT_TX_FREE_THRESH,
3825                 .tx_rs_thresh = IXGBE_DEFAULT_TX_RSBIT_THRESH,
3826                 .txq_flags = ETH_TXQ_FLAGS_NOMULTSEGS |
3827                                 ETH_TXQ_FLAGS_NOOFFLOADS,
3828         };
3829
3830         dev_info->rx_desc_lim = rx_desc_lim;
3831         dev_info->tx_desc_lim = tx_desc_lim;
3832 }
3833
3834 static int
3835 ixgbevf_check_link(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
3836                    int *link_up, int wait_to_complete)
3837 {
3838         /**
3839          * for a quick link status checking, wait_to_compelet == 0,
3840          * skip PF link status checking
3841          */
3842         bool no_pflink_check = wait_to_complete == 0;
3843         struct ixgbe_mbx_info *mbx = &hw->mbx;
3844         struct ixgbe_mac_info *mac = &hw->mac;
3845         uint32_t links_reg, in_msg;
3846         int ret_val = 0;
3847
3848         /* If we were hit with a reset drop the link */
3849         if (!mbx->ops.check_for_rst(hw, 0) || !mbx->timeout)
3850                 mac->get_link_status = true;
3851
3852         if (!mac->get_link_status)
3853                 goto out;
3854
3855         /* if link status is down no point in checking to see if pf is up */
3856         links_reg = IXGBE_READ_REG(hw, IXGBE_VFLINKS);
3857         if (!(links_reg & IXGBE_LINKS_UP))
3858                 goto out;
3859
3860         /* for SFP+ modules and DA cables on 82599 it can take up to 500usecs
3861          * before the link status is correct
3862          */
3863         if (mac->type == ixgbe_mac_82599_vf) {
3864                 int i;
3865
3866                 for (i = 0; i < 5; i++) {
3867                         rte_delay_us(100);
3868                         links_reg = IXGBE_READ_REG(hw, IXGBE_VFLINKS);
3869
3870                         if (!(links_reg & IXGBE_LINKS_UP))
3871                                 goto out;
3872                 }
3873         }
3874
3875         switch (links_reg & IXGBE_LINKS_SPEED_82599) {
3876         case IXGBE_LINKS_SPEED_10G_82599:
3877                 *speed = IXGBE_LINK_SPEED_10GB_FULL;
3878                 if (hw->mac.type >= ixgbe_mac_X550) {
3879                         if (links_reg & IXGBE_LINKS_SPEED_NON_STD)
3880                                 *speed = IXGBE_LINK_SPEED_2_5GB_FULL;
3881                 }
3882                 break;
3883         case IXGBE_LINKS_SPEED_1G_82599:
3884                 *speed = IXGBE_LINK_SPEED_1GB_FULL;
3885                 break;
3886         case IXGBE_LINKS_SPEED_100_82599:
3887                 *speed = IXGBE_LINK_SPEED_100_FULL;
3888                 if (hw->mac.type == ixgbe_mac_X550) {
3889                         if (links_reg & IXGBE_LINKS_SPEED_NON_STD)
3890                                 *speed = IXGBE_LINK_SPEED_5GB_FULL;
3891                 }
3892                 break;
3893         case IXGBE_LINKS_SPEED_10_X550EM_A:
3894                 *speed = IXGBE_LINK_SPEED_UNKNOWN;
3895                 /* Since Reserved in older MAC's */
3896                 if (hw->mac.type >= ixgbe_mac_X550)
3897                         *speed = IXGBE_LINK_SPEED_10_FULL;
3898                 break;
3899         default:
3900                 *speed = IXGBE_LINK_SPEED_UNKNOWN;
3901         }
3902
3903         if (no_pflink_check) {
3904                 if (*speed == IXGBE_LINK_SPEED_UNKNOWN)
3905                         mac->get_link_status = true;
3906                 else
3907                         mac->get_link_status = false;
3908
3909                 goto out;
3910         }
3911         /* if the read failed it could just be a mailbox collision, best wait
3912          * until we are called again and don't report an error
3913          */
3914         if (mbx->ops.read(hw, &in_msg, 1, 0))
3915                 goto out;
3916
3917         if (!(in_msg & IXGBE_VT_MSGTYPE_CTS)) {
3918                 /* msg is not CTS and is NACK we must have lost CTS status */
3919                 if (in_msg & IXGBE_VT_MSGTYPE_NACK)
3920                         ret_val = -1;
3921                 goto out;
3922         }
3923
3924         /* the pf is talking, if we timed out in the past we reinit */
3925         if (!mbx->timeout) {
3926                 ret_val = -1;
3927                 goto out;
3928         }
3929
3930         /* if we passed all the tests above then the link is up and we no
3931          * longer need to check for link
3932          */
3933         mac->get_link_status = false;
3934
3935 out:
3936         *link_up = !mac->get_link_status;
3937         return ret_val;
3938 }
3939
3940 /* return 0 means link status changed, -1 means not changed */
3941 static int
3942 ixgbe_dev_link_update_share(struct rte_eth_dev *dev,
3943                             int wait_to_complete, int vf)
3944 {
3945         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3946         struct rte_eth_link link, old;
3947         ixgbe_link_speed link_speed = IXGBE_LINK_SPEED_UNKNOWN;
3948         struct ixgbe_interrupt *intr =
3949                 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
3950         int link_up;
3951         int diag;
3952         u32 speed = 0;
3953         int wait = 1;
3954         bool autoneg = false;
3955
3956         link.link_status = ETH_LINK_DOWN;
3957         link.link_speed = 0;
3958         link.link_duplex = ETH_LINK_HALF_DUPLEX;
3959         link.link_autoneg = ETH_LINK_AUTONEG;
3960         memset(&old, 0, sizeof(old));
3961         rte_ixgbe_dev_atomic_read_link_status(dev, &old);
3962
3963         hw->mac.get_link_status = true;
3964
3965         if ((intr->flags & IXGBE_FLAG_NEED_LINK_CONFIG) &&
3966                 ixgbe_get_media_type(hw) == ixgbe_media_type_fiber) {
3967                 speed = hw->phy.autoneg_advertised;
3968                 if (!speed)
3969                         ixgbe_get_link_capabilities(hw, &speed, &autoneg);
3970                 ixgbe_setup_link(hw, speed, true);
3971         }
3972
3973         /* check if it needs to wait to complete, if lsc interrupt is enabled */
3974         if (wait_to_complete == 0 || dev->data->dev_conf.intr_conf.lsc != 0)
3975                 wait = 0;
3976
3977         if (vf)
3978                 diag = ixgbevf_check_link(hw, &link_speed, &link_up, wait);
3979         else
3980                 diag = ixgbe_check_link(hw, &link_speed, &link_up, wait);
3981
3982         if (diag != 0) {
3983                 link.link_speed = ETH_SPEED_NUM_100M;
3984                 link.link_duplex = ETH_LINK_FULL_DUPLEX;
3985                 rte_ixgbe_dev_atomic_write_link_status(dev, &link);
3986                 if (link.link_status == old.link_status)
3987                         return -1;
3988                 return 0;
3989         }
3990
3991         if (link_up == 0) {
3992                 rte_ixgbe_dev_atomic_write_link_status(dev, &link);
3993                 intr->flags |= IXGBE_FLAG_NEED_LINK_CONFIG;
3994                 if (link.link_status == old.link_status)
3995                         return -1;
3996                 return 0;
3997         }
3998         intr->flags &= ~IXGBE_FLAG_NEED_LINK_CONFIG;
3999         link.link_status = ETH_LINK_UP;
4000         link.link_duplex = ETH_LINK_FULL_DUPLEX;
4001
4002         switch (link_speed) {
4003         default:
4004         case IXGBE_LINK_SPEED_UNKNOWN:
4005                 link.link_duplex = ETH_LINK_FULL_DUPLEX;
4006                 link.link_speed = ETH_SPEED_NUM_100M;
4007                 break;
4008
4009         case IXGBE_LINK_SPEED_100_FULL:
4010                 link.link_speed = ETH_SPEED_NUM_100M;
4011                 break;
4012
4013         case IXGBE_LINK_SPEED_1GB_FULL:
4014                 link.link_speed = ETH_SPEED_NUM_1G;
4015                 break;
4016
4017         case IXGBE_LINK_SPEED_2_5GB_FULL:
4018                 link.link_speed = ETH_SPEED_NUM_2_5G;
4019                 break;
4020
4021         case IXGBE_LINK_SPEED_5GB_FULL:
4022                 link.link_speed = ETH_SPEED_NUM_5G;
4023                 break;
4024
4025         case IXGBE_LINK_SPEED_10GB_FULL:
4026                 link.link_speed = ETH_SPEED_NUM_10G;
4027                 break;
4028         }
4029         rte_ixgbe_dev_atomic_write_link_status(dev, &link);
4030
4031         if (link.link_status == old.link_status)
4032                 return -1;
4033
4034         return 0;
4035 }
4036
4037 static int
4038 ixgbe_dev_link_update(struct rte_eth_dev *dev, int wait_to_complete)
4039 {
4040         return ixgbe_dev_link_update_share(dev, wait_to_complete, 0);
4041 }
4042
4043 static int
4044 ixgbevf_dev_link_update(struct rte_eth_dev *dev, int wait_to_complete)
4045 {
4046         return ixgbe_dev_link_update_share(dev, wait_to_complete, 1);
4047 }
4048
4049 static void
4050 ixgbe_dev_promiscuous_enable(struct rte_eth_dev *dev)
4051 {
4052         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4053         uint32_t fctrl;
4054
4055         fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
4056         fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
4057         IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
4058 }
4059
4060 static void
4061 ixgbe_dev_promiscuous_disable(struct rte_eth_dev *dev)
4062 {
4063         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4064         uint32_t fctrl;
4065
4066         fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
4067         fctrl &= (~IXGBE_FCTRL_UPE);
4068         if (dev->data->all_multicast == 1)
4069                 fctrl |= IXGBE_FCTRL_MPE;
4070         else
4071                 fctrl &= (~IXGBE_FCTRL_MPE);
4072         IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
4073 }
4074
4075 static void
4076 ixgbe_dev_allmulticast_enable(struct rte_eth_dev *dev)
4077 {
4078         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4079         uint32_t fctrl;
4080
4081         fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
4082         fctrl |= IXGBE_FCTRL_MPE;
4083         IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
4084 }
4085
4086 static void
4087 ixgbe_dev_allmulticast_disable(struct rte_eth_dev *dev)
4088 {
4089         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4090         uint32_t fctrl;
4091
4092         if (dev->data->promiscuous == 1)
4093                 return; /* must remain in all_multicast mode */
4094
4095         fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
4096         fctrl &= (~IXGBE_FCTRL_MPE);
4097         IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
4098 }
4099
4100 /**
4101  * It clears the interrupt causes and enables the interrupt.
4102  * It will be called once only during nic initialized.
4103  *
4104  * @param dev
4105  *  Pointer to struct rte_eth_dev.
4106  * @param on
4107  *  Enable or Disable.
4108  *
4109  * @return
4110  *  - On success, zero.
4111  *  - On failure, a negative value.
4112  */
4113 static int
4114 ixgbe_dev_lsc_interrupt_setup(struct rte_eth_dev *dev, uint8_t on)
4115 {
4116         struct ixgbe_interrupt *intr =
4117                 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
4118
4119         ixgbe_dev_link_status_print(dev);
4120         if (on)
4121                 intr->mask |= IXGBE_EICR_LSC;
4122         else
4123                 intr->mask &= ~IXGBE_EICR_LSC;
4124
4125         return 0;
4126 }
4127
4128 /**
4129  * It clears the interrupt causes and enables the interrupt.
4130  * It will be called once only during nic initialized.
4131  *
4132  * @param dev
4133  *  Pointer to struct rte_eth_dev.
4134  *
4135  * @return
4136  *  - On success, zero.
4137  *  - On failure, a negative value.
4138  */
4139 static int
4140 ixgbe_dev_rxq_interrupt_setup(struct rte_eth_dev *dev)
4141 {
4142         struct ixgbe_interrupt *intr =
4143                 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
4144
4145         intr->mask |= IXGBE_EICR_RTX_QUEUE;
4146
4147         return 0;
4148 }
4149
4150 /**
4151  * It clears the interrupt causes and enables the interrupt.
4152  * It will be called once only during nic initialized.
4153  *
4154  * @param dev
4155  *  Pointer to struct rte_eth_dev.
4156  *
4157  * @return
4158  *  - On success, zero.
4159  *  - On failure, a negative value.
4160  */
4161 static int
4162 ixgbe_dev_macsec_interrupt_setup(struct rte_eth_dev *dev)
4163 {
4164         struct ixgbe_interrupt *intr =
4165                 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
4166
4167         intr->mask |= IXGBE_EICR_LINKSEC;
4168
4169         return 0;
4170 }
4171
4172 /*
4173  * It reads ICR and sets flag (IXGBE_EICR_LSC) for the link_update.
4174  *
4175  * @param dev
4176  *  Pointer to struct rte_eth_dev.
4177  *
4178  * @return
4179  *  - On success, zero.
4180  *  - On failure, a negative value.
4181  */
4182 static int
4183 ixgbe_dev_interrupt_get_status(struct rte_eth_dev *dev)
4184 {
4185         uint32_t eicr;
4186         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4187         struct ixgbe_interrupt *intr =
4188                 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
4189
4190         /* clear all cause mask */
4191         ixgbe_disable_intr(hw);
4192
4193         /* read-on-clear nic registers here */
4194         eicr = IXGBE_READ_REG(hw, IXGBE_EICR);
4195         PMD_DRV_LOG(DEBUG, "eicr %x", eicr);
4196
4197         intr->flags = 0;
4198
4199         /* set flag for async link update */
4200         if (eicr & IXGBE_EICR_LSC)
4201                 intr->flags |= IXGBE_FLAG_NEED_LINK_UPDATE;
4202
4203         if (eicr & IXGBE_EICR_MAILBOX)
4204                 intr->flags |= IXGBE_FLAG_MAILBOX;
4205
4206         if (eicr & IXGBE_EICR_LINKSEC)
4207                 intr->flags |= IXGBE_FLAG_MACSEC;
4208
4209         if (hw->mac.type ==  ixgbe_mac_X550EM_x &&
4210             hw->phy.type == ixgbe_phy_x550em_ext_t &&
4211             (eicr & IXGBE_EICR_GPI_SDP0_X550EM_x))
4212                 intr->flags |= IXGBE_FLAG_PHY_INTERRUPT;
4213
4214         return 0;
4215 }
4216
4217 /**
4218  * It gets and then prints the link status.
4219  *
4220  * @param dev
4221  *  Pointer to struct rte_eth_dev.
4222  *
4223  * @return
4224  *  - On success, zero.
4225  *  - On failure, a negative value.
4226  */
4227 static void
4228 ixgbe_dev_link_status_print(struct rte_eth_dev *dev)
4229 {
4230         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
4231         struct rte_eth_link link;
4232
4233         memset(&link, 0, sizeof(link));
4234         rte_ixgbe_dev_atomic_read_link_status(dev, &link);
4235         if (link.link_status) {
4236                 PMD_INIT_LOG(INFO, "Port %d: Link Up - speed %u Mbps - %s",
4237                                         (int)(dev->data->port_id),
4238                                         (unsigned)link.link_speed,
4239                         link.link_duplex == ETH_LINK_FULL_DUPLEX ?
4240                                         "full-duplex" : "half-duplex");
4241         } else {
4242                 PMD_INIT_LOG(INFO, " Port %d: Link Down",
4243                                 (int)(dev->data->port_id));
4244         }
4245         PMD_INIT_LOG(DEBUG, "PCI Address: " PCI_PRI_FMT,
4246                                 pci_dev->addr.domain,
4247                                 pci_dev->addr.bus,
4248                                 pci_dev->addr.devid,
4249                                 pci_dev->addr.function);
4250 }
4251
4252 /*
4253  * It executes link_update after knowing an interrupt occurred.
4254  *
4255  * @param dev
4256  *  Pointer to struct rte_eth_dev.
4257  *
4258  * @return
4259  *  - On success, zero.
4260  *  - On failure, a negative value.
4261  */
4262 static int
4263 ixgbe_dev_interrupt_action(struct rte_eth_dev *dev,
4264                            struct rte_intr_handle *intr_handle)
4265 {
4266         struct ixgbe_interrupt *intr =
4267                 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
4268         int64_t timeout;
4269         struct rte_eth_link link;
4270         struct ixgbe_hw *hw =
4271                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4272
4273         PMD_DRV_LOG(DEBUG, "intr action type %d", intr->flags);
4274
4275         if (intr->flags & IXGBE_FLAG_MAILBOX) {
4276                 ixgbe_pf_mbx_process(dev);
4277                 intr->flags &= ~IXGBE_FLAG_MAILBOX;
4278         }
4279
4280         if (intr->flags & IXGBE_FLAG_PHY_INTERRUPT) {
4281                 ixgbe_handle_lasi(hw);
4282                 intr->flags &= ~IXGBE_FLAG_PHY_INTERRUPT;
4283         }
4284
4285         if (intr->flags & IXGBE_FLAG_NEED_LINK_UPDATE) {
4286                 /* get the link status before link update, for predicting later */
4287                 memset(&link, 0, sizeof(link));
4288                 rte_ixgbe_dev_atomic_read_link_status(dev, &link);
4289
4290                 ixgbe_dev_link_update(dev, 0);
4291
4292                 /* likely to up */
4293                 if (!link.link_status)
4294                         /* handle it 1 sec later, wait it being stable */
4295                         timeout = IXGBE_LINK_UP_CHECK_TIMEOUT;
4296                 /* likely to down */
4297                 else
4298                         /* handle it 4 sec later, wait it being stable */
4299                         timeout = IXGBE_LINK_DOWN_CHECK_TIMEOUT;
4300
4301                 ixgbe_dev_link_status_print(dev);
4302                 if (rte_eal_alarm_set(timeout * 1000,
4303                                       ixgbe_dev_interrupt_delayed_handler, (void *)dev) < 0)
4304                         PMD_DRV_LOG(ERR, "Error setting alarm");
4305                 else {
4306                         /* remember original mask */
4307                         intr->mask_original = intr->mask;
4308                         /* only disable lsc interrupt */
4309                         intr->mask &= ~IXGBE_EIMS_LSC;
4310                 }
4311         }
4312
4313         PMD_DRV_LOG(DEBUG, "enable intr immediately");
4314         ixgbe_enable_intr(dev);
4315         rte_intr_enable(intr_handle);
4316
4317         return 0;
4318 }
4319
4320 /**
4321  * Interrupt handler which shall be registered for alarm callback for delayed
4322  * handling specific interrupt to wait for the stable nic state. As the
4323  * NIC interrupt state is not stable for ixgbe after link is just down,
4324  * it needs to wait 4 seconds to get the stable status.
4325  *
4326  * @param handle
4327  *  Pointer to interrupt handle.
4328  * @param param
4329  *  The address of parameter (struct rte_eth_dev *) regsitered before.
4330  *
4331  * @return
4332  *  void
4333  */
4334 static void
4335 ixgbe_dev_interrupt_delayed_handler(void *param)
4336 {
4337         struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
4338         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
4339         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
4340         struct ixgbe_interrupt *intr =
4341                 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
4342         struct ixgbe_hw *hw =
4343                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4344         uint32_t eicr;
4345
4346         ixgbe_disable_intr(hw);
4347
4348         eicr = IXGBE_READ_REG(hw, IXGBE_EICR);
4349         if (eicr & IXGBE_EICR_MAILBOX)
4350                 ixgbe_pf_mbx_process(dev);
4351
4352         if (intr->flags & IXGBE_FLAG_PHY_INTERRUPT) {
4353                 ixgbe_handle_lasi(hw);
4354                 intr->flags &= ~IXGBE_FLAG_PHY_INTERRUPT;
4355         }
4356
4357         if (intr->flags & IXGBE_FLAG_NEED_LINK_UPDATE) {
4358                 ixgbe_dev_link_update(dev, 0);
4359                 intr->flags &= ~IXGBE_FLAG_NEED_LINK_UPDATE;
4360                 ixgbe_dev_link_status_print(dev);
4361                 _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC,
4362                                               NULL, NULL);
4363         }
4364
4365         if (intr->flags & IXGBE_FLAG_MACSEC) {
4366                 _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_MACSEC,
4367                                               NULL, NULL);
4368                 intr->flags &= ~IXGBE_FLAG_MACSEC;
4369         }
4370
4371         /* restore original mask */
4372         intr->mask = intr->mask_original;
4373         intr->mask_original = 0;
4374
4375         PMD_DRV_LOG(DEBUG, "enable intr in delayed handler S[%08x]", eicr);
4376         ixgbe_enable_intr(dev);
4377         rte_intr_enable(intr_handle);
4378 }
4379
4380 /**
4381  * Interrupt handler triggered by NIC  for handling
4382  * specific interrupt.
4383  *
4384  * @param handle
4385  *  Pointer to interrupt handle.
4386  * @param param
4387  *  The address of parameter (struct rte_eth_dev *) regsitered before.
4388  *
4389  * @return
4390  *  void
4391  */
4392 static void
4393 ixgbe_dev_interrupt_handler(void *param)
4394 {
4395         struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
4396
4397         ixgbe_dev_interrupt_get_status(dev);
4398         ixgbe_dev_interrupt_action(dev, dev->intr_handle);
4399 }
4400
4401 static int
4402 ixgbe_dev_led_on(struct rte_eth_dev *dev)
4403 {
4404         struct ixgbe_hw *hw;
4405
4406         hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4407         return ixgbe_led_on(hw, 0) == IXGBE_SUCCESS ? 0 : -ENOTSUP;
4408 }
4409
4410 static int
4411 ixgbe_dev_led_off(struct rte_eth_dev *dev)
4412 {
4413         struct ixgbe_hw *hw;
4414
4415         hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4416         return ixgbe_led_off(hw, 0) == IXGBE_SUCCESS ? 0 : -ENOTSUP;
4417 }
4418
4419 static int
4420 ixgbe_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
4421 {
4422         struct ixgbe_hw *hw;
4423         uint32_t mflcn_reg;
4424         uint32_t fccfg_reg;
4425         int rx_pause;
4426         int tx_pause;
4427
4428         hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4429
4430         fc_conf->pause_time = hw->fc.pause_time;
4431         fc_conf->high_water = hw->fc.high_water[0];
4432         fc_conf->low_water = hw->fc.low_water[0];
4433         fc_conf->send_xon = hw->fc.send_xon;
4434         fc_conf->autoneg = !hw->fc.disable_fc_autoneg;
4435
4436         /*
4437          * Return rx_pause status according to actual setting of
4438          * MFLCN register.
4439          */
4440         mflcn_reg = IXGBE_READ_REG(hw, IXGBE_MFLCN);
4441         if (mflcn_reg & (IXGBE_MFLCN_RPFCE | IXGBE_MFLCN_RFCE))
4442                 rx_pause = 1;
4443         else
4444                 rx_pause = 0;
4445
4446         /*
4447          * Return tx_pause status according to actual setting of
4448          * FCCFG register.
4449          */
4450         fccfg_reg = IXGBE_READ_REG(hw, IXGBE_FCCFG);
4451         if (fccfg_reg & (IXGBE_FCCFG_TFCE_802_3X | IXGBE_FCCFG_TFCE_PRIORITY))
4452                 tx_pause = 1;
4453         else
4454                 tx_pause = 0;
4455
4456         if (rx_pause && tx_pause)
4457                 fc_conf->mode = RTE_FC_FULL;
4458         else if (rx_pause)
4459                 fc_conf->mode = RTE_FC_RX_PAUSE;
4460         else if (tx_pause)
4461                 fc_conf->mode = RTE_FC_TX_PAUSE;
4462         else
4463                 fc_conf->mode = RTE_FC_NONE;
4464
4465         return 0;
4466 }
4467
4468 static int
4469 ixgbe_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
4470 {
4471         struct ixgbe_hw *hw;
4472         int err;
4473         uint32_t rx_buf_size;
4474         uint32_t max_high_water;
4475         uint32_t mflcn;
4476         enum ixgbe_fc_mode rte_fcmode_2_ixgbe_fcmode[] = {
4477                 ixgbe_fc_none,
4478                 ixgbe_fc_rx_pause,
4479                 ixgbe_fc_tx_pause,
4480                 ixgbe_fc_full
4481         };
4482
4483         PMD_INIT_FUNC_TRACE();
4484
4485         hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4486         rx_buf_size = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(0));
4487         PMD_INIT_LOG(DEBUG, "Rx packet buffer size = 0x%x", rx_buf_size);
4488
4489         /*
4490          * At least reserve one Ethernet frame for watermark
4491          * high_water/low_water in kilo bytes for ixgbe
4492          */
4493         max_high_water = (rx_buf_size - ETHER_MAX_LEN) >> IXGBE_RXPBSIZE_SHIFT;
4494         if ((fc_conf->high_water > max_high_water) ||
4495                 (fc_conf->high_water < fc_conf->low_water)) {
4496                 PMD_INIT_LOG(ERR, "Invalid high/low water setup value in KB");
4497                 PMD_INIT_LOG(ERR, "High_water must <= 0x%x", max_high_water);
4498                 return -EINVAL;
4499         }
4500
4501         hw->fc.requested_mode = rte_fcmode_2_ixgbe_fcmode[fc_conf->mode];
4502         hw->fc.pause_time     = fc_conf->pause_time;
4503         hw->fc.high_water[0]  = fc_conf->high_water;
4504         hw->fc.low_water[0]   = fc_conf->low_water;
4505         hw->fc.send_xon       = fc_conf->send_xon;
4506         hw->fc.disable_fc_autoneg = !fc_conf->autoneg;
4507
4508         err = ixgbe_fc_enable(hw);
4509
4510         /* Not negotiated is not an error case */
4511         if ((err == IXGBE_SUCCESS) || (err == IXGBE_ERR_FC_NOT_NEGOTIATED)) {
4512
4513                 /* check if we want to forward MAC frames - driver doesn't have native
4514                  * capability to do that, so we'll write the registers ourselves */
4515
4516                 mflcn = IXGBE_READ_REG(hw, IXGBE_MFLCN);
4517
4518                 /* set or clear MFLCN.PMCF bit depending on configuration */
4519                 if (fc_conf->mac_ctrl_frame_fwd != 0)
4520                         mflcn |= IXGBE_MFLCN_PMCF;
4521                 else
4522                         mflcn &= ~IXGBE_MFLCN_PMCF;
4523
4524                 IXGBE_WRITE_REG(hw, IXGBE_MFLCN, mflcn);
4525                 IXGBE_WRITE_FLUSH(hw);
4526
4527                 return 0;
4528         }
4529
4530         PMD_INIT_LOG(ERR, "ixgbe_fc_enable = 0x%x", err);
4531         return -EIO;
4532 }
4533
4534 /**
4535  *  ixgbe_pfc_enable_generic - Enable flow control
4536  *  @hw: pointer to hardware structure
4537  *  @tc_num: traffic class number
4538  *  Enable flow control according to the current settings.
4539  */
4540 static int
4541 ixgbe_dcb_pfc_enable_generic(struct ixgbe_hw *hw, uint8_t tc_num)
4542 {
4543         int ret_val = 0;
4544         uint32_t mflcn_reg, fccfg_reg;
4545         uint32_t reg;
4546         uint32_t fcrtl, fcrth;
4547         uint8_t i;
4548         uint8_t nb_rx_en;
4549
4550         /* Validate the water mark configuration */
4551         if (!hw->fc.pause_time) {
4552                 ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
4553                 goto out;
4554         }
4555
4556         /* Low water mark of zero causes XOFF floods */
4557         if (hw->fc.current_mode & ixgbe_fc_tx_pause) {
4558                  /* High/Low water can not be 0 */
4559                 if ((!hw->fc.high_water[tc_num]) || (!hw->fc.low_water[tc_num])) {
4560                         PMD_INIT_LOG(ERR, "Invalid water mark configuration");
4561                         ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
4562                         goto out;
4563                 }
4564
4565                 if (hw->fc.low_water[tc_num] >= hw->fc.high_water[tc_num]) {
4566                         PMD_INIT_LOG(ERR, "Invalid water mark configuration");
4567                         ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
4568                         goto out;
4569                 }
4570         }
4571         /* Negotiate the fc mode to use */
4572         ixgbe_fc_autoneg(hw);
4573
4574         /* Disable any previous flow control settings */
4575         mflcn_reg = IXGBE_READ_REG(hw, IXGBE_MFLCN);
4576         mflcn_reg &= ~(IXGBE_MFLCN_RPFCE_SHIFT | IXGBE_MFLCN_RFCE|IXGBE_MFLCN_RPFCE);
4577
4578         fccfg_reg = IXGBE_READ_REG(hw, IXGBE_FCCFG);
4579         fccfg_reg &= ~(IXGBE_FCCFG_TFCE_802_3X | IXGBE_FCCFG_TFCE_PRIORITY);
4580
4581         switch (hw->fc.current_mode) {
4582         case ixgbe_fc_none:
4583                 /*
4584                  * If the count of enabled RX Priority Flow control >1,
4585                  * and the TX pause can not be disabled
4586                  */
4587                 nb_rx_en = 0;
4588                 for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
4589                         reg = IXGBE_READ_REG(hw, IXGBE_FCRTH_82599(i));
4590                         if (reg & IXGBE_FCRTH_FCEN)
4591                                 nb_rx_en++;
4592                 }
4593                 if (nb_rx_en > 1)
4594                         fccfg_reg |= IXGBE_FCCFG_TFCE_PRIORITY;
4595                 break;
4596         case ixgbe_fc_rx_pause:
4597                 /*
4598                  * Rx Flow control is enabled and Tx Flow control is
4599                  * disabled by software override. Since there really
4600                  * isn't a way to advertise that we are capable of RX
4601                  * Pause ONLY, we will advertise that we support both
4602                  * symmetric and asymmetric Rx PAUSE.  Later, we will
4603                  * disable the adapter's ability to send PAUSE frames.
4604                  */
4605                 mflcn_reg |= IXGBE_MFLCN_RPFCE;
4606                 /*
4607                  * If the count of enabled RX Priority Flow control >1,
4608                  * and the TX pause can not be disabled
4609                  */
4610                 nb_rx_en = 0;
4611                 for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
4612                         reg = IXGBE_READ_REG(hw, IXGBE_FCRTH_82599(i));
4613                         if (reg & IXGBE_FCRTH_FCEN)
4614                                 nb_rx_en++;
4615                 }
4616                 if (nb_rx_en > 1)
4617                         fccfg_reg |= IXGBE_FCCFG_TFCE_PRIORITY;
4618                 break;
4619         case ixgbe_fc_tx_pause:
4620                 /*
4621                  * Tx Flow control is enabled, and Rx Flow control is
4622                  * disabled by software override.
4623                  */
4624                 fccfg_reg |= IXGBE_FCCFG_TFCE_PRIORITY;
4625                 break;
4626         case ixgbe_fc_full:
4627                 /* Flow control (both Rx and Tx) is enabled by SW override. */
4628                 mflcn_reg |= IXGBE_MFLCN_RPFCE;
4629                 fccfg_reg |= IXGBE_FCCFG_TFCE_PRIORITY;
4630                 break;
4631         default:
4632                 PMD_DRV_LOG(DEBUG, "Flow control param set incorrectly");
4633                 ret_val = IXGBE_ERR_CONFIG;
4634                 goto out;
4635         }
4636
4637         /* Set 802.3x based flow control settings. */
4638         mflcn_reg |= IXGBE_MFLCN_DPF;
4639         IXGBE_WRITE_REG(hw, IXGBE_MFLCN, mflcn_reg);
4640         IXGBE_WRITE_REG(hw, IXGBE_FCCFG, fccfg_reg);
4641
4642         /* Set up and enable Rx high/low water mark thresholds, enable XON. */
4643         if ((hw->fc.current_mode & ixgbe_fc_tx_pause) &&
4644                 hw->fc.high_water[tc_num]) {
4645                 fcrtl = (hw->fc.low_water[tc_num] << 10) | IXGBE_FCRTL_XONE;
4646                 IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(tc_num), fcrtl);
4647                 fcrth = (hw->fc.high_water[tc_num] << 10) | IXGBE_FCRTH_FCEN;
4648         } else {
4649                 IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(tc_num), 0);
4650                 /*
4651                  * In order to prevent Tx hangs when the internal Tx
4652                  * switch is enabled we must set the high water mark
4653                  * to the maximum FCRTH value.  This allows the Tx
4654                  * switch to function even under heavy Rx workloads.
4655                  */
4656                 fcrth = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(tc_num)) - 32;
4657         }
4658         IXGBE_WRITE_REG(hw, IXGBE_FCRTH_82599(tc_num), fcrth);
4659
4660         /* Configure pause time (2 TCs per register) */
4661         reg = hw->fc.pause_time * 0x00010001;
4662         for (i = 0; i < (IXGBE_DCB_MAX_TRAFFIC_CLASS / 2); i++)
4663                 IXGBE_WRITE_REG(hw, IXGBE_FCTTV(i), reg);
4664
4665         /* Configure flow control refresh threshold value */
4666         IXGBE_WRITE_REG(hw, IXGBE_FCRTV, hw->fc.pause_time / 2);
4667
4668 out:
4669         return ret_val;
4670 }
4671
4672 static int
4673 ixgbe_dcb_pfc_enable(struct rte_eth_dev *dev, uint8_t tc_num)
4674 {
4675         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4676         int32_t ret_val = IXGBE_NOT_IMPLEMENTED;
4677
4678         if (hw->mac.type != ixgbe_mac_82598EB) {
4679                 ret_val = ixgbe_dcb_pfc_enable_generic(hw, tc_num);
4680         }
4681         return ret_val;
4682 }
4683
4684 static int
4685 ixgbe_priority_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_pfc_conf *pfc_conf)
4686 {
4687         int err;
4688         uint32_t rx_buf_size;
4689         uint32_t max_high_water;
4690         uint8_t tc_num;
4691         uint8_t  map[IXGBE_DCB_MAX_USER_PRIORITY] = { 0 };
4692         struct ixgbe_hw *hw =
4693                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4694         struct ixgbe_dcb_config *dcb_config =
4695                 IXGBE_DEV_PRIVATE_TO_DCB_CFG(dev->data->dev_private);
4696
4697         enum ixgbe_fc_mode rte_fcmode_2_ixgbe_fcmode[] = {
4698                 ixgbe_fc_none,
4699                 ixgbe_fc_rx_pause,
4700                 ixgbe_fc_tx_pause,
4701                 ixgbe_fc_full
4702         };
4703
4704         PMD_INIT_FUNC_TRACE();
4705
4706         ixgbe_dcb_unpack_map_cee(dcb_config, IXGBE_DCB_RX_CONFIG, map);
4707         tc_num = map[pfc_conf->priority];
4708         rx_buf_size = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(tc_num));
4709         PMD_INIT_LOG(DEBUG, "Rx packet buffer size = 0x%x", rx_buf_size);
4710         /*
4711          * At least reserve one Ethernet frame for watermark
4712          * high_water/low_water in kilo bytes for ixgbe
4713          */
4714         max_high_water = (rx_buf_size - ETHER_MAX_LEN) >> IXGBE_RXPBSIZE_SHIFT;
4715         if ((pfc_conf->fc.high_water > max_high_water) ||
4716             (pfc_conf->fc.high_water <= pfc_conf->fc.low_water)) {
4717                 PMD_INIT_LOG(ERR, "Invalid high/low water setup value in KB");
4718                 PMD_INIT_LOG(ERR, "High_water must <= 0x%x", max_high_water);
4719                 return -EINVAL;
4720         }
4721
4722         hw->fc.requested_mode = rte_fcmode_2_ixgbe_fcmode[pfc_conf->fc.mode];
4723         hw->fc.pause_time = pfc_conf->fc.pause_time;
4724         hw->fc.send_xon = pfc_conf->fc.send_xon;
4725         hw->fc.low_water[tc_num] =  pfc_conf->fc.low_water;
4726         hw->fc.high_water[tc_num] = pfc_conf->fc.high_water;
4727
4728         err = ixgbe_dcb_pfc_enable(dev, tc_num);
4729
4730         /* Not negotiated is not an error case */
4731         if ((err == IXGBE_SUCCESS) || (err == IXGBE_ERR_FC_NOT_NEGOTIATED))
4732                 return 0;
4733
4734         PMD_INIT_LOG(ERR, "ixgbe_dcb_pfc_enable = 0x%x", err);
4735         return -EIO;
4736 }
4737
4738 static int
4739 ixgbe_dev_rss_reta_update(struct rte_eth_dev *dev,
4740                           struct rte_eth_rss_reta_entry64 *reta_conf,
4741                           uint16_t reta_size)
4742 {
4743         uint16_t i, sp_reta_size;
4744         uint8_t j, mask;
4745         uint32_t reta, r;
4746         uint16_t idx, shift;
4747         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4748         uint32_t reta_reg;
4749
4750         PMD_INIT_FUNC_TRACE();
4751
4752         if (!ixgbe_rss_update_sp(hw->mac.type)) {
4753                 PMD_DRV_LOG(ERR, "RSS reta update is not supported on this "
4754                         "NIC.");
4755                 return -ENOTSUP;
4756         }
4757
4758         sp_reta_size = ixgbe_reta_size_get(hw->mac.type);
4759         if (reta_size != sp_reta_size) {
4760                 PMD_DRV_LOG(ERR, "The size of hash lookup table configured "
4761                         "(%d) doesn't match the number hardware can supported "
4762                         "(%d)", reta_size, sp_reta_size);
4763                 return -EINVAL;
4764         }
4765
4766         for (i = 0; i < reta_size; i += IXGBE_4_BIT_WIDTH) {
4767                 idx = i / RTE_RETA_GROUP_SIZE;
4768                 shift = i % RTE_RETA_GROUP_SIZE;
4769                 mask = (uint8_t)((reta_conf[idx].mask >> shift) &
4770                                                 IXGBE_4_BIT_MASK);
4771                 if (!mask)
4772                         continue;
4773                 reta_reg = ixgbe_reta_reg_get(hw->mac.type, i);
4774                 if (mask == IXGBE_4_BIT_MASK)
4775                         r = 0;
4776                 else
4777                         r = IXGBE_READ_REG(hw, reta_reg);
4778                 for (j = 0, reta = 0; j < IXGBE_4_BIT_WIDTH; j++) {
4779                         if (mask & (0x1 << j))
4780                                 reta |= reta_conf[idx].reta[shift + j] <<
4781                                                         (CHAR_BIT * j);
4782                         else
4783                                 reta |= r & (IXGBE_8_BIT_MASK <<
4784                                                 (CHAR_BIT * j));
4785                 }
4786                 IXGBE_WRITE_REG(hw, reta_reg, reta);
4787         }
4788
4789         return 0;
4790 }
4791
4792 static int
4793 ixgbe_dev_rss_reta_query(struct rte_eth_dev *dev,
4794                          struct rte_eth_rss_reta_entry64 *reta_conf,
4795                          uint16_t reta_size)
4796 {
4797         uint16_t i, sp_reta_size;
4798         uint8_t j, mask;
4799         uint32_t reta;
4800         uint16_t idx, shift;
4801         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4802         uint32_t reta_reg;
4803
4804         PMD_INIT_FUNC_TRACE();
4805         sp_reta_size = ixgbe_reta_size_get(hw->mac.type);
4806         if (reta_size != sp_reta_size) {
4807                 PMD_DRV_LOG(ERR, "The size of hash lookup table configured "
4808                         "(%d) doesn't match the number hardware can supported "
4809                         "(%d)", reta_size, sp_reta_size);
4810                 return -EINVAL;
4811         }
4812
4813         for (i = 0; i < reta_size; i += IXGBE_4_BIT_WIDTH) {
4814                 idx = i / RTE_RETA_GROUP_SIZE;
4815                 shift = i % RTE_RETA_GROUP_SIZE;
4816                 mask = (uint8_t)((reta_conf[idx].mask >> shift) &
4817                                                 IXGBE_4_BIT_MASK);
4818                 if (!mask)
4819                         continue;
4820
4821                 reta_reg = ixgbe_reta_reg_get(hw->mac.type, i);
4822                 reta = IXGBE_READ_REG(hw, reta_reg);
4823                 for (j = 0; j < IXGBE_4_BIT_WIDTH; j++) {
4824                         if (mask & (0x1 << j))
4825                                 reta_conf[idx].reta[shift + j] =
4826                                         ((reta >> (CHAR_BIT * j)) &
4827                                                 IXGBE_8_BIT_MASK);
4828                 }
4829         }
4830
4831         return 0;
4832 }
4833
4834 static int
4835 ixgbe_add_rar(struct rte_eth_dev *dev, struct ether_addr *mac_addr,
4836                                 uint32_t index, uint32_t pool)
4837 {
4838         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4839         uint32_t enable_addr = 1;
4840
4841         return ixgbe_set_rar(hw, index, mac_addr->addr_bytes,
4842                              pool, enable_addr);
4843 }
4844
4845 static void
4846 ixgbe_remove_rar(struct rte_eth_dev *dev, uint32_t index)
4847 {
4848         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4849
4850         ixgbe_clear_rar(hw, index);
4851 }
4852
4853 static void
4854 ixgbe_set_default_mac_addr(struct rte_eth_dev *dev, struct ether_addr *addr)
4855 {
4856         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
4857
4858         ixgbe_remove_rar(dev, 0);
4859
4860         ixgbe_add_rar(dev, addr, 0, pci_dev->max_vfs);
4861 }
4862
4863 static bool
4864 is_device_supported(struct rte_eth_dev *dev, struct rte_pci_driver *drv)
4865 {
4866         if (strcmp(dev->device->driver->name, drv->driver.name))
4867                 return false;
4868
4869         return true;
4870 }
4871
4872 bool
4873 is_ixgbe_supported(struct rte_eth_dev *dev)
4874 {
4875         return is_device_supported(dev, &rte_ixgbe_pmd);
4876 }
4877
4878 static int
4879 ixgbe_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
4880 {
4881         uint32_t hlreg0;
4882         uint32_t maxfrs;
4883         struct ixgbe_hw *hw;
4884         struct rte_eth_dev_info dev_info;
4885         uint32_t frame_size = mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
4886         struct rte_eth_dev_data *dev_data = dev->data;
4887
4888         ixgbe_dev_info_get(dev, &dev_info);
4889
4890         /* check that mtu is within the allowed range */
4891         if ((mtu < ETHER_MIN_MTU) || (frame_size > dev_info.max_rx_pktlen))
4892                 return -EINVAL;
4893
4894         /* If device is started, refuse mtu that requires the support of
4895          * scattered packets when this feature has not been enabled before.
4896          */
4897         if (dev_data->dev_started && !dev_data->scattered_rx &&
4898             (frame_size + 2 * IXGBE_VLAN_TAG_SIZE >
4899              dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM)) {
4900                 PMD_INIT_LOG(ERR, "Stop port first.");
4901                 return -EINVAL;
4902         }
4903
4904         hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4905         hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0);
4906
4907         /* switch to jumbo mode if needed */
4908         if (frame_size > ETHER_MAX_LEN) {
4909                 dev->data->dev_conf.rxmode.jumbo_frame = 1;
4910                 hlreg0 |= IXGBE_HLREG0_JUMBOEN;
4911         } else {
4912                 dev->data->dev_conf.rxmode.jumbo_frame = 0;
4913                 hlreg0 &= ~IXGBE_HLREG0_JUMBOEN;
4914         }
4915         IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0);
4916
4917         /* update max frame size */
4918         dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
4919
4920         maxfrs = IXGBE_READ_REG(hw, IXGBE_MAXFRS);
4921         maxfrs &= 0x0000FFFF;
4922         maxfrs |= (dev->data->dev_conf.rxmode.max_rx_pkt_len << 16);
4923         IXGBE_WRITE_REG(hw, IXGBE_MAXFRS, maxfrs);
4924
4925         return 0;
4926 }
4927
4928 /*
4929  * Virtual Function operations
4930  */
4931 static void
4932 ixgbevf_intr_disable(struct ixgbe_hw *hw)
4933 {
4934         PMD_INIT_FUNC_TRACE();
4935
4936         /* Clear interrupt mask to stop from interrupts being generated */
4937         IXGBE_WRITE_REG(hw, IXGBE_VTEIMC, IXGBE_VF_IRQ_CLEAR_MASK);
4938
4939         IXGBE_WRITE_FLUSH(hw);
4940 }
4941
4942 static void
4943 ixgbevf_intr_enable(struct ixgbe_hw *hw)
4944 {
4945         PMD_INIT_FUNC_TRACE();
4946
4947         /* VF enable interrupt autoclean */
4948         IXGBE_WRITE_REG(hw, IXGBE_VTEIAM, IXGBE_VF_IRQ_ENABLE_MASK);
4949         IXGBE_WRITE_REG(hw, IXGBE_VTEIAC, IXGBE_VF_IRQ_ENABLE_MASK);
4950         IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, IXGBE_VF_IRQ_ENABLE_MASK);
4951
4952         IXGBE_WRITE_FLUSH(hw);
4953 }
4954
4955 static int
4956 ixgbevf_dev_configure(struct rte_eth_dev *dev)
4957 {
4958         struct rte_eth_conf *conf = &dev->data->dev_conf;
4959         struct ixgbe_adapter *adapter =
4960                         (struct ixgbe_adapter *)dev->data->dev_private;
4961
4962         PMD_INIT_LOG(DEBUG, "Configured Virtual Function port id: %d",
4963                      dev->data->port_id);
4964
4965         /*
4966          * VF has no ability to enable/disable HW CRC
4967          * Keep the persistent behavior the same as Host PF
4968          */
4969 #ifndef RTE_LIBRTE_IXGBE_PF_DISABLE_STRIP_CRC
4970         if (!conf->rxmode.hw_strip_crc) {
4971                 PMD_INIT_LOG(NOTICE, "VF can't disable HW CRC Strip");
4972                 conf->rxmode.hw_strip_crc = 1;
4973         }
4974 #else
4975         if (conf->rxmode.hw_strip_crc) {
4976                 PMD_INIT_LOG(NOTICE, "VF can't enable HW CRC Strip");
4977                 conf->rxmode.hw_strip_crc = 0;
4978         }
4979 #endif
4980
4981         /*
4982          * Initialize to TRUE. If any of Rx queues doesn't meet the bulk
4983          * allocation or vector Rx preconditions we will reset it.
4984          */
4985         adapter->rx_bulk_alloc_allowed = true;
4986         adapter->rx_vec_allowed = true;
4987
4988         return 0;
4989 }
4990
4991 static int
4992 ixgbevf_dev_start(struct rte_eth_dev *dev)
4993 {
4994         struct ixgbe_hw *hw =
4995                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4996         uint32_t intr_vector = 0;
4997         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
4998         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
4999
5000         int err, mask = 0;
5001
5002         PMD_INIT_FUNC_TRACE();
5003
5004         hw->mac.ops.reset_hw(hw);
5005         hw->mac.get_link_status = true;
5006
5007         /* negotiate mailbox API version to use with the PF. */
5008         ixgbevf_negotiate_api(hw);
5009
5010         ixgbevf_dev_tx_init(dev);
5011
5012         /* This can fail when allocating mbufs for descriptor rings */
5013         err = ixgbevf_dev_rx_init(dev);
5014         if (err) {
5015                 PMD_INIT_LOG(ERR, "Unable to initialize RX hardware (%d)", err);
5016                 ixgbe_dev_clear_queues(dev);
5017                 return err;
5018         }
5019
5020         /* Set vfta */
5021         ixgbevf_set_vfta_all(dev, 1);
5022
5023         /* Set HW strip */
5024         mask = ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK |
5025                 ETH_VLAN_EXTEND_MASK;
5026         ixgbevf_vlan_offload_set(dev, mask);
5027
5028         ixgbevf_dev_rxtx_start(dev);
5029
5030         /* check and configure queue intr-vector mapping */
5031         if (dev->data->dev_conf.intr_conf.rxq != 0) {
5032                 /* According to datasheet, only vector 0/1/2 can be used,
5033                  * now only one vector is used for Rx queue
5034                  */
5035                 intr_vector = 1;
5036                 if (rte_intr_efd_enable(intr_handle, intr_vector))
5037                         return -1;
5038         }
5039
5040         if (rte_intr_dp_is_en(intr_handle) && !intr_handle->intr_vec) {
5041                 intr_handle->intr_vec =
5042                         rte_zmalloc("intr_vec",
5043                                     dev->data->nb_rx_queues * sizeof(int), 0);
5044                 if (intr_handle->intr_vec == NULL) {
5045                         PMD_INIT_LOG(ERR, "Failed to allocate %d rx_queues"
5046                                      " intr_vec", dev->data->nb_rx_queues);
5047                         return -ENOMEM;
5048                 }
5049         }
5050         ixgbevf_configure_msix(dev);
5051
5052         /* When a VF port is bound to VFIO-PCI, only miscellaneous interrupt
5053          * is mapped to VFIO vector 0 in eth_ixgbevf_dev_init( ).
5054          * If previous VFIO interrupt mapping setting in eth_ixgbevf_dev_init( )
5055          * is not cleared, it will fail when following rte_intr_enable( ) tries
5056          * to map Rx queue interrupt to other VFIO vectors.
5057          * So clear uio/vfio intr/evevnfd first to avoid failure.
5058          */
5059         rte_intr_disable(intr_handle);
5060
5061         rte_intr_enable(intr_handle);
5062
5063         /* Re-enable interrupt for VF */
5064         ixgbevf_intr_enable(hw);
5065
5066         return 0;
5067 }
5068
5069 static void
5070 ixgbevf_dev_stop(struct rte_eth_dev *dev)
5071 {
5072         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5073         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
5074         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
5075
5076         PMD_INIT_FUNC_TRACE();
5077
5078         ixgbevf_intr_disable(hw);
5079
5080         hw->adapter_stopped = 1;
5081         ixgbe_stop_adapter(hw);
5082
5083         /*
5084           * Clear what we set, but we still keep shadow_vfta to
5085           * restore after device starts
5086           */
5087         ixgbevf_set_vfta_all(dev, 0);
5088
5089         /* Clear stored conf */
5090         dev->data->scattered_rx = 0;
5091
5092         ixgbe_dev_clear_queues(dev);
5093
5094         /* Clean datapath event and queue/vec mapping */
5095         rte_intr_efd_disable(intr_handle);
5096         if (intr_handle->intr_vec != NULL) {
5097                 rte_free(intr_handle->intr_vec);
5098                 intr_handle->intr_vec = NULL;
5099         }
5100 }
5101
5102 static void
5103 ixgbevf_dev_close(struct rte_eth_dev *dev)
5104 {
5105         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5106
5107         PMD_INIT_FUNC_TRACE();
5108
5109         ixgbe_reset_hw(hw);
5110
5111         ixgbevf_dev_stop(dev);
5112
5113         ixgbe_dev_free_queues(dev);
5114
5115         /**
5116          * Remove the VF MAC address ro ensure
5117          * that the VF traffic goes to the PF
5118          * after stop, close and detach of the VF
5119          **/
5120         ixgbevf_remove_mac_addr(dev, 0);
5121 }
5122
5123 /*
5124  * Reset VF device
5125  */
5126 static int
5127 ixgbevf_dev_reset(struct rte_eth_dev *dev)
5128 {
5129         int ret;
5130
5131         ret = eth_ixgbevf_dev_uninit(dev);
5132         if (ret)
5133                 return ret;
5134
5135         ret = eth_ixgbevf_dev_init(dev);
5136
5137         return ret;
5138 }
5139
5140 static void ixgbevf_set_vfta_all(struct rte_eth_dev *dev, bool on)
5141 {
5142         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5143         struct ixgbe_vfta *shadow_vfta =
5144                 IXGBE_DEV_PRIVATE_TO_VFTA(dev->data->dev_private);
5145         int i = 0, j = 0, vfta = 0, mask = 1;
5146
5147         for (i = 0; i < IXGBE_VFTA_SIZE; i++) {
5148                 vfta = shadow_vfta->vfta[i];
5149                 if (vfta) {
5150                         mask = 1;
5151                         for (j = 0; j < 32; j++) {
5152                                 if (vfta & mask)
5153                                         ixgbe_set_vfta(hw, (i<<5)+j, 0,
5154                                                        on, false);
5155                                 mask <<= 1;
5156                         }
5157                 }
5158         }
5159
5160 }
5161
5162 static int
5163 ixgbevf_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
5164 {
5165         struct ixgbe_hw *hw =
5166                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5167         struct ixgbe_vfta *shadow_vfta =
5168                 IXGBE_DEV_PRIVATE_TO_VFTA(dev->data->dev_private);
5169         uint32_t vid_idx = 0;
5170         uint32_t vid_bit = 0;
5171         int ret = 0;
5172
5173         PMD_INIT_FUNC_TRACE();
5174
5175         /* vind is not used in VF driver, set to 0, check ixgbe_set_vfta_vf */
5176         ret = ixgbe_set_vfta(hw, vlan_id, 0, !!on, false);
5177         if (ret) {
5178                 PMD_INIT_LOG(ERR, "Unable to set VF vlan");
5179                 return ret;
5180         }
5181         vid_idx = (uint32_t) ((vlan_id >> 5) & 0x7F);
5182         vid_bit = (uint32_t) (1 << (vlan_id & 0x1F));
5183
5184         /* Save what we set and retore it after device reset */
5185         if (on)
5186                 shadow_vfta->vfta[vid_idx] |= vid_bit;
5187         else
5188                 shadow_vfta->vfta[vid_idx] &= ~vid_bit;
5189
5190         return 0;
5191 }
5192
5193 static void
5194 ixgbevf_vlan_strip_queue_set(struct rte_eth_dev *dev, uint16_t queue, int on)
5195 {
5196         struct ixgbe_hw *hw =
5197                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5198         uint32_t ctrl;
5199
5200         PMD_INIT_FUNC_TRACE();
5201
5202         if (queue >= hw->mac.max_rx_queues)
5203                 return;
5204
5205         ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(queue));
5206         if (on)
5207                 ctrl |= IXGBE_RXDCTL_VME;
5208         else
5209                 ctrl &= ~IXGBE_RXDCTL_VME;
5210         IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(queue), ctrl);
5211
5212         ixgbe_vlan_hw_strip_bitmap_set(dev, queue, on);
5213 }
5214
5215 static void
5216 ixgbevf_vlan_offload_set(struct rte_eth_dev *dev, int mask)
5217 {
5218         struct ixgbe_hw *hw =
5219                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5220         uint16_t i;
5221         int on = 0;
5222
5223         /* VF function only support hw strip feature, others are not support */
5224         if (mask & ETH_VLAN_STRIP_MASK) {
5225                 on = !!(dev->data->dev_conf.rxmode.hw_vlan_strip);
5226
5227                 for (i = 0; i < hw->mac.max_rx_queues; i++)
5228                         ixgbevf_vlan_strip_queue_set(dev, i, on);
5229         }
5230 }
5231
5232 int
5233 ixgbe_vt_check(struct ixgbe_hw *hw)
5234 {
5235         uint32_t reg_val;
5236
5237         /* if Virtualization Technology is enabled */
5238         reg_val = IXGBE_READ_REG(hw, IXGBE_VT_CTL);
5239         if (!(reg_val & IXGBE_VT_CTL_VT_ENABLE)) {
5240                 PMD_INIT_LOG(ERR, "VT must be enabled for this setting");
5241                 return -1;
5242         }
5243
5244         return 0;
5245 }
5246
5247 static uint32_t
5248 ixgbe_uta_vector(struct ixgbe_hw *hw, struct ether_addr *uc_addr)
5249 {
5250         uint32_t vector = 0;
5251
5252         switch (hw->mac.mc_filter_type) {
5253         case 0:   /* use bits [47:36] of the address */
5254                 vector = ((uc_addr->addr_bytes[4] >> 4) |
5255                         (((uint16_t)uc_addr->addr_bytes[5]) << 4));
5256                 break;
5257         case 1:   /* use bits [46:35] of the address */
5258                 vector = ((uc_addr->addr_bytes[4] >> 3) |
5259                         (((uint16_t)uc_addr->addr_bytes[5]) << 5));
5260                 break;
5261         case 2:   /* use bits [45:34] of the address */
5262                 vector = ((uc_addr->addr_bytes[4] >> 2) |
5263                         (((uint16_t)uc_addr->addr_bytes[5]) << 6));
5264                 break;
5265         case 3:   /* use bits [43:32] of the address */
5266                 vector = ((uc_addr->addr_bytes[4]) |
5267                         (((uint16_t)uc_addr->addr_bytes[5]) << 8));
5268                 break;
5269         default:  /* Invalid mc_filter_type */
5270                 break;
5271         }
5272
5273         /* vector can only be 12-bits or boundary will be exceeded */
5274         vector &= 0xFFF;
5275         return vector;
5276 }
5277
5278 static int
5279 ixgbe_uc_hash_table_set(struct rte_eth_dev *dev, struct ether_addr *mac_addr,
5280                         uint8_t on)
5281 {
5282         uint32_t vector;
5283         uint32_t uta_idx;
5284         uint32_t reg_val;
5285         uint32_t uta_shift;
5286         uint32_t rc;
5287         const uint32_t ixgbe_uta_idx_mask = 0x7F;
5288         const uint32_t ixgbe_uta_bit_shift = 5;
5289         const uint32_t ixgbe_uta_bit_mask = (0x1 << ixgbe_uta_bit_shift) - 1;
5290         const uint32_t bit1 = 0x1;
5291
5292         struct ixgbe_hw *hw =
5293                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5294         struct ixgbe_uta_info *uta_info =
5295                 IXGBE_DEV_PRIVATE_TO_UTA(dev->data->dev_private);
5296
5297         /* The UTA table only exists on 82599 hardware and newer */
5298         if (hw->mac.type < ixgbe_mac_82599EB)
5299                 return -ENOTSUP;
5300
5301         vector = ixgbe_uta_vector(hw, mac_addr);
5302         uta_idx = (vector >> ixgbe_uta_bit_shift) & ixgbe_uta_idx_mask;
5303         uta_shift = vector & ixgbe_uta_bit_mask;
5304
5305         rc = ((uta_info->uta_shadow[uta_idx] >> uta_shift & bit1) != 0);
5306         if (rc == on)
5307                 return 0;
5308
5309         reg_val = IXGBE_READ_REG(hw, IXGBE_UTA(uta_idx));
5310         if (on) {
5311                 uta_info->uta_in_use++;
5312                 reg_val |= (bit1 << uta_shift);
5313                 uta_info->uta_shadow[uta_idx] |= (bit1 << uta_shift);
5314         } else {
5315                 uta_info->uta_in_use--;
5316                 reg_val &= ~(bit1 << uta_shift);
5317                 uta_info->uta_shadow[uta_idx] &= ~(bit1 << uta_shift);
5318         }
5319
5320         IXGBE_WRITE_REG(hw, IXGBE_UTA(uta_idx), reg_val);
5321
5322         if (uta_info->uta_in_use > 0)
5323                 IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL,
5324                                 IXGBE_MCSTCTRL_MFE | hw->mac.mc_filter_type);
5325         else
5326                 IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, hw->mac.mc_filter_type);
5327
5328         return 0;
5329 }
5330
5331 static int
5332 ixgbe_uc_all_hash_table_set(struct rte_eth_dev *dev, uint8_t on)
5333 {
5334         int i;
5335         struct ixgbe_hw *hw =
5336                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5337         struct ixgbe_uta_info *uta_info =
5338                 IXGBE_DEV_PRIVATE_TO_UTA(dev->data->dev_private);
5339
5340         /* The UTA table only exists on 82599 hardware and newer */
5341         if (hw->mac.type < ixgbe_mac_82599EB)
5342                 return -ENOTSUP;
5343
5344         if (on) {
5345                 for (i = 0; i < ETH_VMDQ_NUM_UC_HASH_ARRAY; i++) {
5346                         uta_info->uta_shadow[i] = ~0;
5347                         IXGBE_WRITE_REG(hw, IXGBE_UTA(i), ~0);
5348                 }
5349         } else {
5350                 for (i = 0; i < ETH_VMDQ_NUM_UC_HASH_ARRAY; i++) {
5351                         uta_info->uta_shadow[i] = 0;
5352                         IXGBE_WRITE_REG(hw, IXGBE_UTA(i), 0);
5353                 }
5354         }
5355         return 0;
5356
5357 }
5358
5359 uint32_t
5360 ixgbe_convert_vm_rx_mask_to_val(uint16_t rx_mask, uint32_t orig_val)
5361 {
5362         uint32_t new_val = orig_val;
5363
5364         if (rx_mask & ETH_VMDQ_ACCEPT_UNTAG)
5365                 new_val |= IXGBE_VMOLR_AUPE;
5366         if (rx_mask & ETH_VMDQ_ACCEPT_HASH_MC)
5367                 new_val |= IXGBE_VMOLR_ROMPE;
5368         if (rx_mask & ETH_VMDQ_ACCEPT_HASH_UC)
5369                 new_val |= IXGBE_VMOLR_ROPE;
5370         if (rx_mask & ETH_VMDQ_ACCEPT_BROADCAST)
5371                 new_val |= IXGBE_VMOLR_BAM;
5372         if (rx_mask & ETH_VMDQ_ACCEPT_MULTICAST)
5373                 new_val |= IXGBE_VMOLR_MPE;
5374
5375         return new_val;
5376 }
5377
5378 #define IXGBE_MRCTL_VPME  0x01 /* Virtual Pool Mirroring. */
5379 #define IXGBE_MRCTL_UPME  0x02 /* Uplink Port Mirroring. */
5380 #define IXGBE_MRCTL_DPME  0x04 /* Downlink Port Mirroring. */
5381 #define IXGBE_MRCTL_VLME  0x08 /* VLAN Mirroring. */
5382 #define IXGBE_INVALID_MIRROR_TYPE(mirror_type) \
5383         ((mirror_type) & ~(uint8_t)(ETH_MIRROR_VIRTUAL_POOL_UP | \
5384         ETH_MIRROR_UPLINK_PORT | ETH_MIRROR_DOWNLINK_PORT | ETH_MIRROR_VLAN))
5385
5386 static int
5387 ixgbe_mirror_rule_set(struct rte_eth_dev *dev,
5388                       struct rte_eth_mirror_conf *mirror_conf,
5389                       uint8_t rule_id, uint8_t on)
5390 {
5391         uint32_t mr_ctl, vlvf;
5392         uint32_t mp_lsb = 0;
5393         uint32_t mv_msb = 0;
5394         uint32_t mv_lsb = 0;
5395         uint32_t mp_msb = 0;
5396         uint8_t i = 0;
5397         int reg_index = 0;
5398         uint64_t vlan_mask = 0;
5399
5400         const uint8_t pool_mask_offset = 32;
5401         const uint8_t vlan_mask_offset = 32;
5402         const uint8_t dst_pool_offset = 8;
5403         const uint8_t rule_mr_offset  = 4;
5404         const uint8_t mirror_rule_mask = 0x0F;
5405
5406         struct ixgbe_mirror_info *mr_info =
5407                         (IXGBE_DEV_PRIVATE_TO_PFDATA(dev->data->dev_private));
5408         struct ixgbe_hw *hw =
5409                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5410         uint8_t mirror_type = 0;
5411
5412         if (ixgbe_vt_check(hw) < 0)
5413                 return -ENOTSUP;
5414
5415         if (rule_id >= IXGBE_MAX_MIRROR_RULES)
5416                 return -EINVAL;
5417
5418         if (IXGBE_INVALID_MIRROR_TYPE(mirror_conf->rule_type)) {
5419                 PMD_DRV_LOG(ERR, "unsupported mirror type 0x%x.",
5420                             mirror_conf->rule_type);
5421                 return -EINVAL;
5422         }
5423
5424         if (mirror_conf->rule_type & ETH_MIRROR_VLAN) {
5425                 mirror_type |= IXGBE_MRCTL_VLME;
5426                 /* Check if vlan id is valid and find conresponding VLAN ID
5427                  * index in VLVF
5428                  */
5429                 for (i = 0; i < IXGBE_VLVF_ENTRIES; i++) {
5430                         if (mirror_conf->vlan.vlan_mask & (1ULL << i)) {
5431                                 /* search vlan id related pool vlan filter
5432                                  * index
5433                                  */
5434                                 reg_index = ixgbe_find_vlvf_slot(
5435                                                 hw,
5436                                                 mirror_conf->vlan.vlan_id[i],
5437                                                 false);
5438                                 if (reg_index < 0)
5439                                         return -EINVAL;
5440                                 vlvf = IXGBE_READ_REG(hw,
5441                                                       IXGBE_VLVF(reg_index));
5442                                 if ((vlvf & IXGBE_VLVF_VIEN) &&
5443                                     ((vlvf & IXGBE_VLVF_VLANID_MASK) ==
5444                                       mirror_conf->vlan.vlan_id[i]))
5445                                         vlan_mask |= (1ULL << reg_index);
5446                                 else
5447                                         return -EINVAL;
5448                         }
5449                 }
5450
5451                 if (on) {
5452                         mv_lsb = vlan_mask & 0xFFFFFFFF;
5453                         mv_msb = vlan_mask >> vlan_mask_offset;
5454
5455                         mr_info->mr_conf[rule_id].vlan.vlan_mask =
5456                                                 mirror_conf->vlan.vlan_mask;
5457                         for (i = 0; i < ETH_VMDQ_MAX_VLAN_FILTERS; i++) {
5458                                 if (mirror_conf->vlan.vlan_mask & (1ULL << i))
5459                                         mr_info->mr_conf[rule_id].vlan.vlan_id[i] =
5460                                                 mirror_conf->vlan.vlan_id[i];
5461                         }
5462                 } else {
5463                         mv_lsb = 0;
5464                         mv_msb = 0;
5465                         mr_info->mr_conf[rule_id].vlan.vlan_mask = 0;
5466                         for (i = 0; i < ETH_VMDQ_MAX_VLAN_FILTERS; i++)
5467                                 mr_info->mr_conf[rule_id].vlan.vlan_id[i] = 0;
5468                 }
5469         }
5470
5471         /**
5472          * if enable pool mirror, write related pool mask register,if disable
5473          * pool mirror, clear PFMRVM register
5474          */
5475         if (mirror_conf->rule_type & ETH_MIRROR_VIRTUAL_POOL_UP) {
5476                 mirror_type |= IXGBE_MRCTL_VPME;
5477                 if (on) {
5478                         mp_lsb = mirror_conf->pool_mask & 0xFFFFFFFF;
5479                         mp_msb = mirror_conf->pool_mask >> pool_mask_offset;
5480                         mr_info->mr_conf[rule_id].pool_mask =
5481                                         mirror_conf->pool_mask;
5482
5483                 } else {
5484                         mp_lsb = 0;
5485                         mp_msb = 0;
5486                         mr_info->mr_conf[rule_id].pool_mask = 0;
5487                 }
5488         }
5489         if (mirror_conf->rule_type & ETH_MIRROR_UPLINK_PORT)
5490                 mirror_type |= IXGBE_MRCTL_UPME;
5491         if (mirror_conf->rule_type & ETH_MIRROR_DOWNLINK_PORT)
5492                 mirror_type |= IXGBE_MRCTL_DPME;
5493
5494         /* read  mirror control register and recalculate it */
5495         mr_ctl = IXGBE_READ_REG(hw, IXGBE_MRCTL(rule_id));
5496
5497         if (on) {
5498                 mr_ctl |= mirror_type;
5499                 mr_ctl &= mirror_rule_mask;
5500                 mr_ctl |= mirror_conf->dst_pool << dst_pool_offset;
5501         } else {
5502                 mr_ctl &= ~(mirror_conf->rule_type & mirror_rule_mask);
5503         }
5504
5505         mr_info->mr_conf[rule_id].rule_type = mirror_conf->rule_type;
5506         mr_info->mr_conf[rule_id].dst_pool = mirror_conf->dst_pool;
5507
5508         /* write mirrror control  register */
5509         IXGBE_WRITE_REG(hw, IXGBE_MRCTL(rule_id), mr_ctl);
5510
5511         /* write pool mirrror control  register */
5512         if (mirror_conf->rule_type & ETH_MIRROR_VIRTUAL_POOL_UP) {
5513                 IXGBE_WRITE_REG(hw, IXGBE_VMRVM(rule_id), mp_lsb);
5514                 IXGBE_WRITE_REG(hw, IXGBE_VMRVM(rule_id + rule_mr_offset),
5515                                 mp_msb);
5516         }
5517         /* write VLAN mirrror control  register */
5518         if (mirror_conf->rule_type & ETH_MIRROR_VLAN) {
5519                 IXGBE_WRITE_REG(hw, IXGBE_VMRVLAN(rule_id), mv_lsb);
5520                 IXGBE_WRITE_REG(hw, IXGBE_VMRVLAN(rule_id + rule_mr_offset),
5521                                 mv_msb);
5522         }
5523
5524         return 0;
5525 }
5526
5527 static int
5528 ixgbe_mirror_rule_reset(struct rte_eth_dev *dev, uint8_t rule_id)
5529 {
5530         int mr_ctl = 0;
5531         uint32_t lsb_val = 0;
5532         uint32_t msb_val = 0;
5533         const uint8_t rule_mr_offset = 4;
5534
5535         struct ixgbe_hw *hw =
5536                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5537         struct ixgbe_mirror_info *mr_info =
5538                 (IXGBE_DEV_PRIVATE_TO_PFDATA(dev->data->dev_private));
5539
5540         if (ixgbe_vt_check(hw) < 0)
5541                 return -ENOTSUP;
5542
5543         if (rule_id >= IXGBE_MAX_MIRROR_RULES)
5544                 return -EINVAL;
5545
5546         memset(&mr_info->mr_conf[rule_id], 0,
5547                sizeof(struct rte_eth_mirror_conf));
5548
5549         /* clear PFVMCTL register */
5550         IXGBE_WRITE_REG(hw, IXGBE_MRCTL(rule_id), mr_ctl);
5551
5552         /* clear pool mask register */
5553         IXGBE_WRITE_REG(hw, IXGBE_VMRVM(rule_id), lsb_val);
5554         IXGBE_WRITE_REG(hw, IXGBE_VMRVM(rule_id + rule_mr_offset), msb_val);
5555
5556         /* clear vlan mask register */
5557         IXGBE_WRITE_REG(hw, IXGBE_VMRVLAN(rule_id), lsb_val);
5558         IXGBE_WRITE_REG(hw, IXGBE_VMRVLAN(rule_id + rule_mr_offset), msb_val);
5559
5560         return 0;
5561 }
5562
5563 static int
5564 ixgbevf_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id)
5565 {
5566         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
5567         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
5568         uint32_t mask;
5569         struct ixgbe_hw *hw =
5570                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5571         uint32_t vec = IXGBE_MISC_VEC_ID;
5572
5573         mask = IXGBE_READ_REG(hw, IXGBE_VTEIMS);
5574         if (rte_intr_allow_others(intr_handle))
5575                 vec = IXGBE_RX_VEC_START;
5576         mask |= (1 << vec);
5577         RTE_SET_USED(queue_id);
5578         IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, mask);
5579
5580         rte_intr_enable(intr_handle);
5581
5582         return 0;
5583 }
5584
5585 static int
5586 ixgbevf_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id)
5587 {
5588         uint32_t mask;
5589         struct ixgbe_hw *hw =
5590                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5591         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
5592         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
5593         uint32_t vec = IXGBE_MISC_VEC_ID;
5594
5595         mask = IXGBE_READ_REG(hw, IXGBE_VTEIMS);
5596         if (rte_intr_allow_others(intr_handle))
5597                 vec = IXGBE_RX_VEC_START;
5598         mask &= ~(1 << vec);
5599         RTE_SET_USED(queue_id);
5600         IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, mask);
5601
5602         return 0;
5603 }
5604
5605 static int
5606 ixgbe_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id)
5607 {
5608         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
5609         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
5610         uint32_t mask;
5611         struct ixgbe_hw *hw =
5612                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5613         struct ixgbe_interrupt *intr =
5614                 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
5615
5616         if (queue_id < 16) {
5617                 ixgbe_disable_intr(hw);
5618                 intr->mask |= (1 << queue_id);
5619                 ixgbe_enable_intr(dev);
5620         } else if (queue_id < 32) {
5621                 mask = IXGBE_READ_REG(hw, IXGBE_EIMS_EX(0));
5622                 mask &= (1 << queue_id);
5623                 IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(0), mask);
5624         } else if (queue_id < 64) {
5625                 mask = IXGBE_READ_REG(hw, IXGBE_EIMS_EX(1));
5626                 mask &= (1 << (queue_id - 32));
5627                 IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(1), mask);
5628         }
5629         rte_intr_enable(intr_handle);
5630
5631         return 0;
5632 }
5633
5634 static int
5635 ixgbe_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id)
5636 {
5637         uint32_t mask;
5638         struct ixgbe_hw *hw =
5639                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5640         struct ixgbe_interrupt *intr =
5641                 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
5642
5643         if (queue_id < 16) {
5644                 ixgbe_disable_intr(hw);
5645                 intr->mask &= ~(1 << queue_id);
5646                 ixgbe_enable_intr(dev);
5647         } else if (queue_id < 32) {
5648                 mask = IXGBE_READ_REG(hw, IXGBE_EIMS_EX(0));
5649                 mask &= ~(1 << queue_id);
5650                 IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(0), mask);
5651         } else if (queue_id < 64) {
5652                 mask = IXGBE_READ_REG(hw, IXGBE_EIMS_EX(1));
5653                 mask &= ~(1 << (queue_id - 32));
5654                 IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(1), mask);
5655         }
5656
5657         return 0;
5658 }
5659
5660 static void
5661 ixgbevf_set_ivar_map(struct ixgbe_hw *hw, int8_t direction,
5662                      uint8_t queue, uint8_t msix_vector)
5663 {
5664         uint32_t tmp, idx;
5665
5666         if (direction == -1) {
5667                 /* other causes */
5668                 msix_vector |= IXGBE_IVAR_ALLOC_VAL;
5669                 tmp = IXGBE_READ_REG(hw, IXGBE_VTIVAR_MISC);
5670                 tmp &= ~0xFF;
5671                 tmp |= msix_vector;
5672                 IXGBE_WRITE_REG(hw, IXGBE_VTIVAR_MISC, tmp);
5673         } else {
5674                 /* rx or tx cause */
5675                 msix_vector |= IXGBE_IVAR_ALLOC_VAL;
5676                 idx = ((16 * (queue & 1)) + (8 * direction));
5677                 tmp = IXGBE_READ_REG(hw, IXGBE_VTIVAR(queue >> 1));
5678                 tmp &= ~(0xFF << idx);
5679                 tmp |= (msix_vector << idx);
5680                 IXGBE_WRITE_REG(hw, IXGBE_VTIVAR(queue >> 1), tmp);
5681         }
5682 }
5683
5684 /**
5685  * set the IVAR registers, mapping interrupt causes to vectors
5686  * @param hw
5687  *  pointer to ixgbe_hw struct
5688  * @direction
5689  *  0 for Rx, 1 for Tx, -1 for other causes
5690  * @queue
5691  *  queue to map the corresponding interrupt to
5692  * @msix_vector
5693  *  the vector to map to the corresponding queue
5694  */
5695 static void
5696 ixgbe_set_ivar_map(struct ixgbe_hw *hw, int8_t direction,
5697                    uint8_t queue, uint8_t msix_vector)
5698 {
5699         uint32_t tmp, idx;
5700
5701         msix_vector |= IXGBE_IVAR_ALLOC_VAL;
5702         if (hw->mac.type == ixgbe_mac_82598EB) {
5703                 if (direction == -1)
5704                         direction = 0;
5705                 idx = (((direction * 64) + queue) >> 2) & 0x1F;
5706                 tmp = IXGBE_READ_REG(hw, IXGBE_IVAR(idx));
5707                 tmp &= ~(0xFF << (8 * (queue & 0x3)));
5708                 tmp |= (msix_vector << (8 * (queue & 0x3)));
5709                 IXGBE_WRITE_REG(hw, IXGBE_IVAR(idx), tmp);
5710         } else if ((hw->mac.type == ixgbe_mac_82599EB) ||
5711                         (hw->mac.type == ixgbe_mac_X540) ||
5712                         (hw->mac.type == ixgbe_mac_X550)) {
5713                 if (direction == -1) {
5714                         /* other causes */
5715                         idx = ((queue & 1) * 8);
5716                         tmp = IXGBE_READ_REG(hw, IXGBE_IVAR_MISC);
5717                         tmp &= ~(0xFF << idx);
5718                         tmp |= (msix_vector << idx);
5719                         IXGBE_WRITE_REG(hw, IXGBE_IVAR_MISC, tmp);
5720                 } else {
5721                         /* rx or tx causes */
5722                         idx = ((16 * (queue & 1)) + (8 * direction));
5723                         tmp = IXGBE_READ_REG(hw, IXGBE_IVAR(queue >> 1));
5724                         tmp &= ~(0xFF << idx);
5725                         tmp |= (msix_vector << idx);
5726                         IXGBE_WRITE_REG(hw, IXGBE_IVAR(queue >> 1), tmp);
5727                 }
5728         }
5729 }
5730
5731 static void
5732 ixgbevf_configure_msix(struct rte_eth_dev *dev)
5733 {
5734         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
5735         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
5736         struct ixgbe_hw *hw =
5737                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5738         uint32_t q_idx;
5739         uint32_t vector_idx = IXGBE_MISC_VEC_ID;
5740         uint32_t base = IXGBE_MISC_VEC_ID;
5741
5742         /* Configure VF other cause ivar */
5743         ixgbevf_set_ivar_map(hw, -1, 1, vector_idx);
5744
5745         /* won't configure msix register if no mapping is done
5746          * between intr vector and event fd.
5747          */
5748         if (!rte_intr_dp_is_en(intr_handle))
5749                 return;
5750
5751         if (rte_intr_allow_others(intr_handle)) {
5752                 base = IXGBE_RX_VEC_START;
5753                 vector_idx = IXGBE_RX_VEC_START;
5754         }
5755
5756         /* Configure all RX queues of VF */
5757         for (q_idx = 0; q_idx < dev->data->nb_rx_queues; q_idx++) {
5758                 /* Force all queue use vector 0,
5759                  * as IXGBE_VF_MAXMSIVECOTR = 1
5760                  */
5761                 ixgbevf_set_ivar_map(hw, 0, q_idx, vector_idx);
5762                 intr_handle->intr_vec[q_idx] = vector_idx;
5763                 if (vector_idx < base + intr_handle->nb_efd - 1)
5764                         vector_idx++;
5765         }
5766 }
5767
5768 /**
5769  * Sets up the hardware to properly generate MSI-X interrupts
5770  * @hw
5771  *  board private structure
5772  */
5773 static void
5774 ixgbe_configure_msix(struct rte_eth_dev *dev)
5775 {
5776         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
5777         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
5778         struct ixgbe_hw *hw =
5779                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5780         uint32_t queue_id, base = IXGBE_MISC_VEC_ID;
5781         uint32_t vec = IXGBE_MISC_VEC_ID;
5782         uint32_t mask;
5783         uint32_t gpie;
5784
5785         /* won't configure msix register if no mapping is done
5786          * between intr vector and event fd
5787          */
5788         if (!rte_intr_dp_is_en(intr_handle))
5789                 return;
5790
5791         if (rte_intr_allow_others(intr_handle))
5792                 vec = base = IXGBE_RX_VEC_START;
5793
5794         /* setup GPIE for MSI-x mode */
5795         gpie = IXGBE_READ_REG(hw, IXGBE_GPIE);
5796         gpie |= IXGBE_GPIE_MSIX_MODE | IXGBE_GPIE_PBA_SUPPORT |
5797                 IXGBE_GPIE_OCD | IXGBE_GPIE_EIAME;
5798         /* auto clearing and auto setting corresponding bits in EIMS
5799          * when MSI-X interrupt is triggered
5800          */
5801         if (hw->mac.type == ixgbe_mac_82598EB) {
5802                 IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
5803         } else {
5804                 IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(0), 0xFFFFFFFF);
5805                 IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(1), 0xFFFFFFFF);
5806         }
5807         IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
5808
5809         /* Populate the IVAR table and set the ITR values to the
5810          * corresponding register.
5811          */
5812         for (queue_id = 0; queue_id < dev->data->nb_rx_queues;
5813              queue_id++) {
5814                 /* by default, 1:1 mapping */
5815                 ixgbe_set_ivar_map(hw, 0, queue_id, vec);
5816                 intr_handle->intr_vec[queue_id] = vec;
5817                 if (vec < base + intr_handle->nb_efd - 1)
5818                         vec++;
5819         }
5820
5821         switch (hw->mac.type) {
5822         case ixgbe_mac_82598EB:
5823                 ixgbe_set_ivar_map(hw, -1, IXGBE_IVAR_OTHER_CAUSES_INDEX,
5824                                    IXGBE_MISC_VEC_ID);
5825                 break;
5826         case ixgbe_mac_82599EB:
5827         case ixgbe_mac_X540:
5828         case ixgbe_mac_X550:
5829                 ixgbe_set_ivar_map(hw, -1, 1, IXGBE_MISC_VEC_ID);
5830                 break;
5831         default:
5832                 break;
5833         }
5834         IXGBE_WRITE_REG(hw, IXGBE_EITR(IXGBE_MISC_VEC_ID),
5835                         IXGBE_MIN_INTER_INTERRUPT_INTERVAL_DEFAULT & 0xFFF);
5836
5837         /* set up to autoclear timer, and the vectors */
5838         mask = IXGBE_EIMS_ENABLE_MASK;
5839         mask &= ~(IXGBE_EIMS_OTHER |
5840                   IXGBE_EIMS_MAILBOX |
5841                   IXGBE_EIMS_LSC);
5842
5843         IXGBE_WRITE_REG(hw, IXGBE_EIAC, mask);
5844 }
5845
5846 int
5847 ixgbe_set_queue_rate_limit(struct rte_eth_dev *dev,
5848                            uint16_t queue_idx, uint16_t tx_rate)
5849 {
5850         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5851         uint32_t rf_dec, rf_int;
5852         uint32_t bcnrc_val;
5853         uint16_t link_speed = dev->data->dev_link.link_speed;
5854
5855         if (queue_idx >= hw->mac.max_tx_queues)
5856                 return -EINVAL;
5857
5858         if (tx_rate != 0) {
5859                 /* Calculate the rate factor values to set */
5860                 rf_int = (uint32_t)link_speed / (uint32_t)tx_rate;
5861                 rf_dec = (uint32_t)link_speed % (uint32_t)tx_rate;
5862                 rf_dec = (rf_dec << IXGBE_RTTBCNRC_RF_INT_SHIFT) / tx_rate;
5863
5864                 bcnrc_val = IXGBE_RTTBCNRC_RS_ENA;
5865                 bcnrc_val |= ((rf_int << IXGBE_RTTBCNRC_RF_INT_SHIFT) &
5866                                 IXGBE_RTTBCNRC_RF_INT_MASK_M);
5867                 bcnrc_val |= (rf_dec & IXGBE_RTTBCNRC_RF_DEC_MASK);
5868         } else {
5869                 bcnrc_val = 0;
5870         }
5871
5872         /*
5873          * Set global transmit compensation time to the MMW_SIZE in RTTBCNRM
5874          * register. MMW_SIZE=0x014 if 9728-byte jumbo is supported, otherwise
5875          * set as 0x4.
5876          */
5877         if ((dev->data->dev_conf.rxmode.jumbo_frame == 1) &&
5878                 (dev->data->dev_conf.rxmode.max_rx_pkt_len >=
5879                                 IXGBE_MAX_JUMBO_FRAME_SIZE))
5880                 IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRM,
5881                         IXGBE_MMW_SIZE_JUMBO_FRAME);
5882         else
5883                 IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRM,
5884                         IXGBE_MMW_SIZE_DEFAULT);
5885
5886         /* Set RTTBCNRC of queue X */
5887         IXGBE_WRITE_REG(hw, IXGBE_RTTDQSEL, queue_idx);
5888         IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRC, bcnrc_val);
5889         IXGBE_WRITE_FLUSH(hw);
5890
5891         return 0;
5892 }
5893
5894 static int
5895 ixgbevf_add_mac_addr(struct rte_eth_dev *dev, struct ether_addr *mac_addr,
5896                      __attribute__((unused)) uint32_t index,
5897                      __attribute__((unused)) uint32_t pool)
5898 {
5899         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5900         int diag;
5901
5902         /*
5903          * On a 82599 VF, adding again the same MAC addr is not an idempotent
5904          * operation. Trap this case to avoid exhausting the [very limited]
5905          * set of PF resources used to store VF MAC addresses.
5906          */
5907         if (memcmp(hw->mac.perm_addr, mac_addr, sizeof(struct ether_addr)) == 0)
5908                 return -1;
5909         diag = ixgbevf_set_uc_addr_vf(hw, 2, mac_addr->addr_bytes);
5910         if (diag != 0)
5911                 PMD_DRV_LOG(ERR, "Unable to add MAC address "
5912                             "%02x:%02x:%02x:%02x:%02x:%02x - diag=%d",
5913                             mac_addr->addr_bytes[0],
5914                             mac_addr->addr_bytes[1],
5915                             mac_addr->addr_bytes[2],
5916                             mac_addr->addr_bytes[3],
5917                             mac_addr->addr_bytes[4],
5918                             mac_addr->addr_bytes[5],
5919                             diag);
5920         return diag;
5921 }
5922
5923 static void
5924 ixgbevf_remove_mac_addr(struct rte_eth_dev *dev, uint32_t index)
5925 {
5926         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5927         struct ether_addr *perm_addr = (struct ether_addr *) hw->mac.perm_addr;
5928         struct ether_addr *mac_addr;
5929         uint32_t i;
5930         int diag;
5931
5932         /*
5933          * The IXGBE_VF_SET_MACVLAN command of the ixgbe-pf driver does
5934          * not support the deletion of a given MAC address.
5935          * Instead, it imposes to delete all MAC addresses, then to add again
5936          * all MAC addresses with the exception of the one to be deleted.
5937          */
5938         (void) ixgbevf_set_uc_addr_vf(hw, 0, NULL);
5939
5940         /*
5941          * Add again all MAC addresses, with the exception of the deleted one
5942          * and of the permanent MAC address.
5943          */
5944         for (i = 0, mac_addr = dev->data->mac_addrs;
5945              i < hw->mac.num_rar_entries; i++, mac_addr++) {
5946                 /* Skip the deleted MAC address */
5947                 if (i == index)
5948                         continue;
5949                 /* Skip NULL MAC addresses */
5950                 if (is_zero_ether_addr(mac_addr))
5951                         continue;
5952                 /* Skip the permanent MAC address */
5953                 if (memcmp(perm_addr, mac_addr, sizeof(struct ether_addr)) == 0)
5954                         continue;
5955                 diag = ixgbevf_set_uc_addr_vf(hw, 2, mac_addr->addr_bytes);
5956                 if (diag != 0)
5957                         PMD_DRV_LOG(ERR,
5958                                     "Adding again MAC address "
5959                                     "%02x:%02x:%02x:%02x:%02x:%02x failed "
5960                                     "diag=%d",
5961                                     mac_addr->addr_bytes[0],
5962                                     mac_addr->addr_bytes[1],
5963                                     mac_addr->addr_bytes[2],
5964                                     mac_addr->addr_bytes[3],
5965                                     mac_addr->addr_bytes[4],
5966                                     mac_addr->addr_bytes[5],
5967                                     diag);
5968         }
5969 }
5970
5971 static void
5972 ixgbevf_set_default_mac_addr(struct rte_eth_dev *dev, struct ether_addr *addr)
5973 {
5974         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5975
5976         hw->mac.ops.set_rar(hw, 0, (void *)addr, 0, 0);
5977 }
5978
5979 int
5980 ixgbe_syn_filter_set(struct rte_eth_dev *dev,
5981                         struct rte_eth_syn_filter *filter,
5982                         bool add)
5983 {
5984         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5985         struct ixgbe_filter_info *filter_info =
5986                 IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
5987         uint32_t syn_info;
5988         uint32_t synqf;
5989
5990         if (filter->queue >= IXGBE_MAX_RX_QUEUE_NUM)
5991                 return -EINVAL;
5992
5993         syn_info = filter_info->syn_info;
5994
5995         if (add) {
5996                 if (syn_info & IXGBE_SYN_FILTER_ENABLE)
5997                         return -EINVAL;
5998                 synqf = (uint32_t)(((filter->queue << IXGBE_SYN_FILTER_QUEUE_SHIFT) &
5999                         IXGBE_SYN_FILTER_QUEUE) | IXGBE_SYN_FILTER_ENABLE);
6000
6001                 if (filter->hig_pri)
6002                         synqf |= IXGBE_SYN_FILTER_SYNQFP;
6003                 else
6004                         synqf &= ~IXGBE_SYN_FILTER_SYNQFP;
6005         } else {
6006                 synqf = IXGBE_READ_REG(hw, IXGBE_SYNQF);
6007                 if (!(syn_info & IXGBE_SYN_FILTER_ENABLE))
6008                         return -ENOENT;
6009                 synqf &= ~(IXGBE_SYN_FILTER_QUEUE | IXGBE_SYN_FILTER_ENABLE);
6010         }
6011
6012         filter_info->syn_info = synqf;
6013         IXGBE_WRITE_REG(hw, IXGBE_SYNQF, synqf);
6014         IXGBE_WRITE_FLUSH(hw);
6015         return 0;
6016 }
6017
6018 static int
6019 ixgbe_syn_filter_get(struct rte_eth_dev *dev,
6020                         struct rte_eth_syn_filter *filter)
6021 {
6022         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
6023         uint32_t synqf = IXGBE_READ_REG(hw, IXGBE_SYNQF);
6024
6025         if (synqf & IXGBE_SYN_FILTER_ENABLE) {
6026                 filter->hig_pri = (synqf & IXGBE_SYN_FILTER_SYNQFP) ? 1 : 0;
6027                 filter->queue = (uint16_t)((synqf & IXGBE_SYN_FILTER_QUEUE) >> 1);
6028                 return 0;
6029         }
6030         return -ENOENT;
6031 }
6032
6033 static int
6034 ixgbe_syn_filter_handle(struct rte_eth_dev *dev,
6035                         enum rte_filter_op filter_op,
6036                         void *arg)
6037 {
6038         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
6039         int ret;
6040
6041         MAC_TYPE_FILTER_SUP(hw->mac.type);
6042
6043         if (filter_op == RTE_ETH_FILTER_NOP)
6044                 return 0;
6045
6046         if (arg == NULL) {
6047                 PMD_DRV_LOG(ERR, "arg shouldn't be NULL for operation %u",
6048                             filter_op);
6049                 return -EINVAL;
6050         }
6051
6052         switch (filter_op) {
6053         case RTE_ETH_FILTER_ADD:
6054                 ret = ixgbe_syn_filter_set(dev,
6055                                 (struct rte_eth_syn_filter *)arg,
6056                                 TRUE);
6057                 break;
6058         case RTE_ETH_FILTER_DELETE:
6059                 ret = ixgbe_syn_filter_set(dev,
6060                                 (struct rte_eth_syn_filter *)arg,
6061                                 FALSE);
6062                 break;
6063         case RTE_ETH_FILTER_GET:
6064                 ret = ixgbe_syn_filter_get(dev,
6065                                 (struct rte_eth_syn_filter *)arg);
6066                 break;
6067         default:
6068                 PMD_DRV_LOG(ERR, "unsupported operation %u", filter_op);
6069                 ret = -EINVAL;
6070                 break;
6071         }
6072
6073         return ret;
6074 }
6075
6076
6077 static inline enum ixgbe_5tuple_protocol
6078 convert_protocol_type(uint8_t protocol_value)
6079 {
6080         if (protocol_value == IPPROTO_TCP)
6081                 return IXGBE_FILTER_PROTOCOL_TCP;
6082         else if (protocol_value == IPPROTO_UDP)
6083                 return IXGBE_FILTER_PROTOCOL_UDP;
6084         else if (protocol_value == IPPROTO_SCTP)
6085                 return IXGBE_FILTER_PROTOCOL_SCTP;
6086         else
6087                 return IXGBE_FILTER_PROTOCOL_NONE;
6088 }
6089
6090 /* inject a 5-tuple filter to HW */
6091 static inline void
6092 ixgbe_inject_5tuple_filter(struct rte_eth_dev *dev,
6093                            struct ixgbe_5tuple_filter *filter)
6094 {
6095         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
6096         int i;
6097         uint32_t ftqf, sdpqf;
6098         uint32_t l34timir = 0;
6099         uint8_t mask = 0xff;
6100
6101         i = filter->index;
6102
6103         sdpqf = (uint32_t)(filter->filter_info.dst_port <<
6104                                 IXGBE_SDPQF_DSTPORT_SHIFT);
6105         sdpqf = sdpqf | (filter->filter_info.src_port & IXGBE_SDPQF_SRCPORT);
6106
6107         ftqf = (uint32_t)(filter->filter_info.proto &
6108                 IXGBE_FTQF_PROTOCOL_MASK);
6109         ftqf |= (uint32_t)((filter->filter_info.priority &
6110                 IXGBE_FTQF_PRIORITY_MASK) << IXGBE_FTQF_PRIORITY_SHIFT);
6111         if (filter->filter_info.src_ip_mask == 0) /* 0 means compare. */
6112                 mask &= IXGBE_FTQF_SOURCE_ADDR_MASK;
6113         if (filter->filter_info.dst_ip_mask == 0)
6114                 mask &= IXGBE_FTQF_DEST_ADDR_MASK;
6115         if (filter->filter_info.src_port_mask == 0)
6116                 mask &= IXGBE_FTQF_SOURCE_PORT_MASK;
6117         if (filter->filter_info.dst_port_mask == 0)
6118                 mask &= IXGBE_FTQF_DEST_PORT_MASK;
6119         if (filter->filter_info.proto_mask == 0)
6120                 mask &= IXGBE_FTQF_PROTOCOL_COMP_MASK;
6121         ftqf |= mask << IXGBE_FTQF_5TUPLE_MASK_SHIFT;
6122         ftqf |= IXGBE_FTQF_POOL_MASK_EN;
6123         ftqf |= IXGBE_FTQF_QUEUE_ENABLE;
6124
6125         IXGBE_WRITE_REG(hw, IXGBE_DAQF(i), filter->filter_info.dst_ip);
6126         IXGBE_WRITE_REG(hw, IXGBE_SAQF(i), filter->filter_info.src_ip);
6127         IXGBE_WRITE_REG(hw, IXGBE_SDPQF(i), sdpqf);
6128         IXGBE_WRITE_REG(hw, IXGBE_FTQF(i), ftqf);
6129
6130         l34timir |= IXGBE_L34T_IMIR_RESERVE;
6131         l34timir |= (uint32_t)(filter->queue <<
6132                                 IXGBE_L34T_IMIR_QUEUE_SHIFT);
6133         IXGBE_WRITE_REG(hw, IXGBE_L34T_IMIR(i), l34timir);
6134 }
6135
6136 /*
6137  * add a 5tuple filter
6138  *
6139  * @param
6140  * dev: Pointer to struct rte_eth_dev.
6141  * index: the index the filter allocates.
6142  * filter: ponter to the filter that will be added.
6143  * rx_queue: the queue id the filter assigned to.
6144  *
6145  * @return
6146  *    - On success, zero.
6147  *    - On failure, a negative value.
6148  */
6149 static int
6150 ixgbe_add_5tuple_filter(struct rte_eth_dev *dev,
6151                         struct ixgbe_5tuple_filter *filter)
6152 {
6153         struct ixgbe_filter_info *filter_info =
6154                 IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
6155         int i, idx, shift;
6156
6157         /*
6158          * look for an unused 5tuple filter index,
6159          * and insert the filter to list.
6160          */
6161         for (i = 0; i < IXGBE_MAX_FTQF_FILTERS; i++) {
6162                 idx = i / (sizeof(uint32_t) * NBBY);
6163                 shift = i % (sizeof(uint32_t) * NBBY);
6164                 if (!(filter_info->fivetuple_mask[idx] & (1 << shift))) {
6165                         filter_info->fivetuple_mask[idx] |= 1 << shift;
6166                         filter->index = i;
6167                         TAILQ_INSERT_TAIL(&filter_info->fivetuple_list,
6168                                           filter,
6169                                           entries);
6170                         break;
6171                 }
6172         }
6173         if (i >= IXGBE_MAX_FTQF_FILTERS) {
6174                 PMD_DRV_LOG(ERR, "5tuple filters are full.");
6175                 return -ENOSYS;
6176         }
6177
6178         ixgbe_inject_5tuple_filter(dev, filter);
6179
6180         return 0;
6181 }
6182
6183 /*
6184  * remove a 5tuple filter
6185  *
6186  * @param
6187  * dev: Pointer to struct rte_eth_dev.
6188  * filter: the pointer of the filter will be removed.
6189  */
6190 static void
6191 ixgbe_remove_5tuple_filter(struct rte_eth_dev *dev,
6192                         struct ixgbe_5tuple_filter *filter)
6193 {
6194         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
6195         struct ixgbe_filter_info *filter_info =
6196                 IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
6197         uint16_t index = filter->index;
6198
6199         filter_info->fivetuple_mask[index / (sizeof(uint32_t) * NBBY)] &=
6200                                 ~(1 << (index % (sizeof(uint32_t) * NBBY)));
6201         TAILQ_REMOVE(&filter_info->fivetuple_list, filter, entries);
6202         rte_free(filter);
6203
6204         IXGBE_WRITE_REG(hw, IXGBE_DAQF(index), 0);
6205         IXGBE_WRITE_REG(hw, IXGBE_SAQF(index), 0);
6206         IXGBE_WRITE_REG(hw, IXGBE_SDPQF(index), 0);
6207         IXGBE_WRITE_REG(hw, IXGBE_FTQF(index), 0);
6208         IXGBE_WRITE_REG(hw, IXGBE_L34T_IMIR(index), 0);
6209 }
6210
6211 static int
6212 ixgbevf_dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu)
6213 {
6214         struct ixgbe_hw *hw;
6215         uint32_t max_frame = mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
6216         struct rte_eth_rxmode *rx_conf = &dev->data->dev_conf.rxmode;
6217
6218         hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
6219
6220         if ((mtu < ETHER_MIN_MTU) || (max_frame > ETHER_MAX_JUMBO_FRAME_LEN))
6221                 return -EINVAL;
6222
6223         /* refuse mtu that requires the support of scattered packets when this
6224          * feature has not been enabled before.
6225          */
6226         if (!rx_conf->enable_scatter &&
6227             (max_frame + 2 * IXGBE_VLAN_TAG_SIZE >
6228              dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM))
6229                 return -EINVAL;
6230
6231         /*
6232          * When supported by the underlying PF driver, use the IXGBE_VF_SET_MTU
6233          * request of the version 2.0 of the mailbox API.
6234          * For now, use the IXGBE_VF_SET_LPE request of the version 1.0
6235          * of the mailbox API.
6236          * This call to IXGBE_SET_LPE action won't work with ixgbe pf drivers
6237          * prior to 3.11.33 which contains the following change:
6238          * "ixgbe: Enable jumbo frames support w/ SR-IOV"
6239          */
6240         ixgbevf_rlpml_set_vf(hw, max_frame);
6241
6242         /* update max frame size */
6243         dev->data->dev_conf.rxmode.max_rx_pkt_len = max_frame;
6244         return 0;
6245 }
6246
6247 static inline struct ixgbe_5tuple_filter *
6248 ixgbe_5tuple_filter_lookup(struct ixgbe_5tuple_filter_list *filter_list,
6249                         struct ixgbe_5tuple_filter_info *key)
6250 {
6251         struct ixgbe_5tuple_filter *it;
6252
6253         TAILQ_FOREACH(it, filter_list, entries) {
6254                 if (memcmp(key, &it->filter_info,
6255                         sizeof(struct ixgbe_5tuple_filter_info)) == 0) {
6256                         return it;
6257                 }
6258         }
6259         return NULL;
6260 }
6261
6262 /* translate elements in struct rte_eth_ntuple_filter to struct ixgbe_5tuple_filter_info*/
6263 static inline int
6264 ntuple_filter_to_5tuple(struct rte_eth_ntuple_filter *filter,
6265                         struct ixgbe_5tuple_filter_info *filter_info)
6266 {
6267         if (filter->queue >= IXGBE_MAX_RX_QUEUE_NUM ||
6268                 filter->priority > IXGBE_5TUPLE_MAX_PRI ||
6269                 filter->priority < IXGBE_5TUPLE_MIN_PRI)
6270                 return -EINVAL;
6271
6272         switch (filter->dst_ip_mask) {
6273         case UINT32_MAX:
6274                 filter_info->dst_ip_mask = 0;
6275                 filter_info->dst_ip = filter->dst_ip;
6276                 break;
6277         case 0:
6278                 filter_info->dst_ip_mask = 1;
6279                 break;
6280         default:
6281                 PMD_DRV_LOG(ERR, "invalid dst_ip mask.");
6282                 return -EINVAL;
6283         }
6284
6285         switch (filter->src_ip_mask) {
6286         case UINT32_MAX:
6287                 filter_info->src_ip_mask = 0;
6288                 filter_info->src_ip = filter->src_ip;
6289                 break;
6290         case 0:
6291                 filter_info->src_ip_mask = 1;
6292                 break;
6293         default:
6294                 PMD_DRV_LOG(ERR, "invalid src_ip mask.");
6295                 return -EINVAL;
6296         }
6297
6298         switch (filter->dst_port_mask) {
6299         case UINT16_MAX:
6300                 filter_info->dst_port_mask = 0;
6301                 filter_info->dst_port = filter->dst_port;
6302                 break;
6303         case 0:
6304                 filter_info->dst_port_mask = 1;
6305                 break;
6306         default:
6307                 PMD_DRV_LOG(ERR, "invalid dst_port mask.");
6308                 return -EINVAL;
6309         }
6310
6311         switch (filter->src_port_mask) {
6312         case UINT16_MAX:
6313                 filter_info->src_port_mask = 0;
6314                 filter_info->src_port = filter->src_port;
6315                 break;
6316         case 0:
6317                 filter_info->src_port_mask = 1;
6318                 break;
6319         default:
6320                 PMD_DRV_LOG(ERR, "invalid src_port mask.");
6321                 return -EINVAL;
6322         }
6323
6324         switch (filter->proto_mask) {
6325         case UINT8_MAX:
6326                 filter_info->proto_mask = 0;
6327                 filter_info->proto =
6328                         convert_protocol_type(filter->proto);
6329                 break;
6330         case 0:
6331                 filter_info->proto_mask = 1;
6332                 break;
6333         default:
6334                 PMD_DRV_LOG(ERR, "invalid protocol mask.");
6335                 return -EINVAL;
6336         }
6337
6338         filter_info->priority = (uint8_t)filter->priority;
6339         return 0;
6340 }
6341
6342 /*
6343  * add or delete a ntuple filter
6344  *
6345  * @param
6346  * dev: Pointer to struct rte_eth_dev.
6347  * ntuple_filter: Pointer to struct rte_eth_ntuple_filter
6348  * add: if true, add filter, if false, remove filter
6349  *
6350  * @return
6351  *    - On success, zero.
6352  *    - On failure, a negative value.
6353  */
6354 int
6355 ixgbe_add_del_ntuple_filter(struct rte_eth_dev *dev,
6356                         struct rte_eth_ntuple_filter *ntuple_filter,
6357                         bool add)
6358 {
6359         struct ixgbe_filter_info *filter_info =
6360                 IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
6361         struct ixgbe_5tuple_filter_info filter_5tuple;
6362         struct ixgbe_5tuple_filter *filter;
6363         int ret;
6364
6365         if (ntuple_filter->flags != RTE_5TUPLE_FLAGS) {
6366                 PMD_DRV_LOG(ERR, "only 5tuple is supported.");
6367                 return -EINVAL;
6368         }
6369
6370         memset(&filter_5tuple, 0, sizeof(struct ixgbe_5tuple_filter_info));
6371         ret = ntuple_filter_to_5tuple(ntuple_filter, &filter_5tuple);
6372         if (ret < 0)
6373                 return ret;
6374
6375         filter = ixgbe_5tuple_filter_lookup(&filter_info->fivetuple_list,
6376                                          &filter_5tuple);
6377         if (filter != NULL && add) {
6378                 PMD_DRV_LOG(ERR, "filter exists.");
6379                 return -EEXIST;
6380         }
6381         if (filter == NULL && !add) {
6382                 PMD_DRV_LOG(ERR, "filter doesn't exist.");
6383                 return -ENOENT;
6384         }
6385
6386         if (add) {
6387                 filter = rte_zmalloc("ixgbe_5tuple_filter",
6388                                 sizeof(struct ixgbe_5tuple_filter), 0);
6389                 if (filter == NULL)
6390                         return -ENOMEM;
6391                 rte_memcpy(&filter->filter_info,
6392                                  &filter_5tuple,
6393                                  sizeof(struct ixgbe_5tuple_filter_info));
6394                 filter->queue = ntuple_filter->queue;
6395                 ret = ixgbe_add_5tuple_filter(dev, filter);
6396                 if (ret < 0) {
6397                         rte_free(filter);
6398                         return ret;
6399                 }
6400         } else
6401                 ixgbe_remove_5tuple_filter(dev, filter);
6402
6403         return 0;
6404 }
6405
6406 /*
6407  * get a ntuple filter
6408  *
6409  * @param
6410  * dev: Pointer to struct rte_eth_dev.
6411  * ntuple_filter: Pointer to struct rte_eth_ntuple_filter
6412  *
6413  * @return
6414  *    - On success, zero.
6415  *    - On failure, a negative value.
6416  */
6417 static int
6418 ixgbe_get_ntuple_filter(struct rte_eth_dev *dev,
6419                         struct rte_eth_ntuple_filter *ntuple_filter)
6420 {
6421         struct ixgbe_filter_info *filter_info =
6422                 IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
6423         struct ixgbe_5tuple_filter_info filter_5tuple;
6424         struct ixgbe_5tuple_filter *filter;
6425         int ret;
6426
6427         if (ntuple_filter->flags != RTE_5TUPLE_FLAGS) {
6428                 PMD_DRV_LOG(ERR, "only 5tuple is supported.");
6429                 return -EINVAL;
6430         }
6431
6432         memset(&filter_5tuple, 0, sizeof(struct ixgbe_5tuple_filter_info));
6433         ret = ntuple_filter_to_5tuple(ntuple_filter, &filter_5tuple);
6434         if (ret < 0)
6435                 return ret;
6436
6437         filter = ixgbe_5tuple_filter_lookup(&filter_info->fivetuple_list,
6438                                          &filter_5tuple);
6439         if (filter == NULL) {
6440                 PMD_DRV_LOG(ERR, "filter doesn't exist.");
6441                 return -ENOENT;
6442         }
6443         ntuple_filter->queue = filter->queue;
6444         return 0;
6445 }
6446
6447 /*
6448  * ixgbe_ntuple_filter_handle - Handle operations for ntuple filter.
6449  * @dev: pointer to rte_eth_dev structure
6450  * @filter_op:operation will be taken.
6451  * @arg: a pointer to specific structure corresponding to the filter_op
6452  *
6453  * @return
6454  *    - On success, zero.
6455  *    - On failure, a negative value.
6456  */
6457 static int
6458 ixgbe_ntuple_filter_handle(struct rte_eth_dev *dev,
6459                                 enum rte_filter_op filter_op,
6460                                 void *arg)
6461 {
6462         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
6463         int ret;
6464
6465         MAC_TYPE_FILTER_SUP_EXT(hw->mac.type);
6466
6467         if (filter_op == RTE_ETH_FILTER_NOP)
6468                 return 0;
6469
6470         if (arg == NULL) {
6471                 PMD_DRV_LOG(ERR, "arg shouldn't be NULL for operation %u.",
6472                             filter_op);
6473                 return -EINVAL;
6474         }
6475
6476         switch (filter_op) {
6477         case RTE_ETH_FILTER_ADD:
6478                 ret = ixgbe_add_del_ntuple_filter(dev,
6479                         (struct rte_eth_ntuple_filter *)arg,
6480                         TRUE);
6481                 break;
6482         case RTE_ETH_FILTER_DELETE:
6483                 ret = ixgbe_add_del_ntuple_filter(dev,
6484                         (struct rte_eth_ntuple_filter *)arg,
6485                         FALSE);
6486                 break;
6487         case RTE_ETH_FILTER_GET:
6488                 ret = ixgbe_get_ntuple_filter(dev,
6489                         (struct rte_eth_ntuple_filter *)arg);
6490                 break;
6491         default:
6492                 PMD_DRV_LOG(ERR, "unsupported operation %u.", filter_op);
6493                 ret = -EINVAL;
6494                 break;
6495         }
6496         return ret;
6497 }
6498
6499 int
6500 ixgbe_add_del_ethertype_filter(struct rte_eth_dev *dev,
6501                         struct rte_eth_ethertype_filter *filter,
6502                         bool add)
6503 {
6504         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
6505         struct ixgbe_filter_info *filter_info =
6506                 IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
6507         uint32_t etqf = 0;
6508         uint32_t etqs = 0;
6509         int ret;
6510         struct ixgbe_ethertype_filter ethertype_filter;
6511
6512         if (filter->queue >= IXGBE_MAX_RX_QUEUE_NUM)
6513                 return -EINVAL;
6514
6515         if (filter->ether_type == ETHER_TYPE_IPv4 ||
6516                 filter->ether_type == ETHER_TYPE_IPv6) {
6517                 PMD_DRV_LOG(ERR, "unsupported ether_type(0x%04x) in"
6518                         " ethertype filter.", filter->ether_type);
6519                 return -EINVAL;
6520         }
6521
6522         if (filter->flags & RTE_ETHTYPE_FLAGS_MAC) {
6523                 PMD_DRV_LOG(ERR, "mac compare is unsupported.");
6524                 return -EINVAL;
6525         }
6526         if (filter->flags & RTE_ETHTYPE_FLAGS_DROP) {
6527                 PMD_DRV_LOG(ERR, "drop option is unsupported.");
6528                 return -EINVAL;
6529         }
6530
6531         ret = ixgbe_ethertype_filter_lookup(filter_info, filter->ether_type);
6532         if (ret >= 0 && add) {
6533                 PMD_DRV_LOG(ERR, "ethertype (0x%04x) filter exists.",
6534                             filter->ether_type);
6535                 return -EEXIST;
6536         }
6537         if (ret < 0 && !add) {
6538                 PMD_DRV_LOG(ERR, "ethertype (0x%04x) filter doesn't exist.",
6539                             filter->ether_type);
6540                 return -ENOENT;
6541         }
6542
6543         if (add) {
6544                 etqf = IXGBE_ETQF_FILTER_EN;
6545                 etqf |= (uint32_t)filter->ether_type;
6546                 etqs |= (uint32_t)((filter->queue <<
6547                                     IXGBE_ETQS_RX_QUEUE_SHIFT) &
6548                                     IXGBE_ETQS_RX_QUEUE);
6549                 etqs |= IXGBE_ETQS_QUEUE_EN;
6550
6551                 ethertype_filter.ethertype = filter->ether_type;
6552                 ethertype_filter.etqf = etqf;
6553                 ethertype_filter.etqs = etqs;
6554                 ethertype_filter.conf = FALSE;
6555                 ret = ixgbe_ethertype_filter_insert(filter_info,
6556                                                     &ethertype_filter);
6557                 if (ret < 0) {
6558                         PMD_DRV_LOG(ERR, "ethertype filters are full.");
6559                         return -ENOSPC;
6560                 }
6561         } else {
6562                 ret = ixgbe_ethertype_filter_remove(filter_info, (uint8_t)ret);
6563                 if (ret < 0)
6564                         return -ENOSYS;
6565         }
6566         IXGBE_WRITE_REG(hw, IXGBE_ETQF(ret), etqf);
6567         IXGBE_WRITE_REG(hw, IXGBE_ETQS(ret), etqs);
6568         IXGBE_WRITE_FLUSH(hw);
6569
6570         return 0;
6571 }
6572
6573 static int
6574 ixgbe_get_ethertype_filter(struct rte_eth_dev *dev,
6575                         struct rte_eth_ethertype_filter *filter)
6576 {
6577         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
6578         struct ixgbe_filter_info *filter_info =
6579                 IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
6580         uint32_t etqf, etqs;
6581         int ret;
6582
6583         ret = ixgbe_ethertype_filter_lookup(filter_info, filter->ether_type);
6584         if (ret < 0) {
6585                 PMD_DRV_LOG(ERR, "ethertype (0x%04x) filter doesn't exist.",
6586                             filter->ether_type);
6587                 return -ENOENT;
6588         }
6589
6590         etqf = IXGBE_READ_REG(hw, IXGBE_ETQF(ret));
6591         if (etqf & IXGBE_ETQF_FILTER_EN) {
6592                 etqs = IXGBE_READ_REG(hw, IXGBE_ETQS(ret));
6593                 filter->ether_type = etqf & IXGBE_ETQF_ETHERTYPE;
6594                 filter->flags = 0;
6595                 filter->queue = (etqs & IXGBE_ETQS_RX_QUEUE) >>
6596                                IXGBE_ETQS_RX_QUEUE_SHIFT;
6597                 return 0;
6598         }
6599         return -ENOENT;
6600 }
6601
6602 /*
6603  * ixgbe_ethertype_filter_handle - Handle operations for ethertype filter.
6604  * @dev: pointer to rte_eth_dev structure
6605  * @filter_op:operation will be taken.
6606  * @arg: a pointer to specific structure corresponding to the filter_op
6607  */
6608 static int
6609 ixgbe_ethertype_filter_handle(struct rte_eth_dev *dev,
6610                                 enum rte_filter_op filter_op,
6611                                 void *arg)
6612 {
6613         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
6614         int ret;
6615
6616         MAC_TYPE_FILTER_SUP(hw->mac.type);
6617
6618         if (filter_op == RTE_ETH_FILTER_NOP)
6619                 return 0;
6620
6621         if (arg == NULL) {
6622                 PMD_DRV_LOG(ERR, "arg shouldn't be NULL for operation %u.",
6623                             filter_op);
6624                 return -EINVAL;
6625         }
6626
6627         switch (filter_op) {
6628         case RTE_ETH_FILTER_ADD:
6629                 ret = ixgbe_add_del_ethertype_filter(dev,
6630                         (struct rte_eth_ethertype_filter *)arg,
6631                         TRUE);
6632                 break;
6633         case RTE_ETH_FILTER_DELETE:
6634                 ret = ixgbe_add_del_ethertype_filter(dev,
6635                         (struct rte_eth_ethertype_filter *)arg,
6636                         FALSE);
6637                 break;
6638         case RTE_ETH_FILTER_GET:
6639                 ret = ixgbe_get_ethertype_filter(dev,
6640                         (struct rte_eth_ethertype_filter *)arg);
6641                 break;
6642         default:
6643                 PMD_DRV_LOG(ERR, "unsupported operation %u.", filter_op);
6644                 ret = -EINVAL;
6645                 break;
6646         }
6647         return ret;
6648 }
6649
6650 static int
6651 ixgbe_dev_filter_ctrl(struct rte_eth_dev *dev,
6652                      enum rte_filter_type filter_type,
6653                      enum rte_filter_op filter_op,
6654                      void *arg)
6655 {
6656         int ret = 0;
6657
6658         switch (filter_type) {
6659         case RTE_ETH_FILTER_NTUPLE:
6660                 ret = ixgbe_ntuple_filter_handle(dev, filter_op, arg);
6661                 break;
6662         case RTE_ETH_FILTER_ETHERTYPE:
6663                 ret = ixgbe_ethertype_filter_handle(dev, filter_op, arg);
6664                 break;
6665         case RTE_ETH_FILTER_SYN:
6666                 ret = ixgbe_syn_filter_handle(dev, filter_op, arg);
6667                 break;
6668         case RTE_ETH_FILTER_FDIR:
6669                 ret = ixgbe_fdir_ctrl_func(dev, filter_op, arg);
6670                 break;
6671         case RTE_ETH_FILTER_L2_TUNNEL:
6672                 ret = ixgbe_dev_l2_tunnel_filter_handle(dev, filter_op, arg);
6673                 break;
6674         case RTE_ETH_FILTER_GENERIC:
6675                 if (filter_op != RTE_ETH_FILTER_GET)
6676                         return -EINVAL;
6677                 *(const void **)arg = &ixgbe_flow_ops;
6678                 break;
6679         default:
6680                 PMD_DRV_LOG(WARNING, "Filter type (%d) not supported",
6681                                                         filter_type);
6682                 ret = -EINVAL;
6683                 break;
6684         }
6685
6686         return ret;
6687 }
6688
6689 static u8 *
6690 ixgbe_dev_addr_list_itr(__attribute__((unused)) struct ixgbe_hw *hw,
6691                         u8 **mc_addr_ptr, u32 *vmdq)
6692 {
6693         u8 *mc_addr;
6694
6695         *vmdq = 0;
6696         mc_addr = *mc_addr_ptr;
6697         *mc_addr_ptr = (mc_addr + sizeof(struct ether_addr));
6698         return mc_addr;
6699 }
6700
6701 static int
6702 ixgbe_dev_set_mc_addr_list(struct rte_eth_dev *dev,
6703                           struct ether_addr *mc_addr_set,
6704                           uint32_t nb_mc_addr)
6705 {
6706         struct ixgbe_hw *hw;
6707         u8 *mc_addr_list;
6708
6709         hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
6710         mc_addr_list = (u8 *)mc_addr_set;
6711         return ixgbe_update_mc_addr_list(hw, mc_addr_list, nb_mc_addr,
6712                                          ixgbe_dev_addr_list_itr, TRUE);
6713 }
6714
6715 static uint64_t
6716 ixgbe_read_systime_cyclecounter(struct rte_eth_dev *dev)
6717 {
6718         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
6719         uint64_t systime_cycles;
6720
6721         switch (hw->mac.type) {
6722         case ixgbe_mac_X550:
6723         case ixgbe_mac_X550EM_x:
6724         case ixgbe_mac_X550EM_a:
6725                 /* SYSTIMEL stores ns and SYSTIMEH stores seconds. */
6726                 systime_cycles = (uint64_t)IXGBE_READ_REG(hw, IXGBE_SYSTIML);
6727                 systime_cycles += (uint64_t)IXGBE_READ_REG(hw, IXGBE_SYSTIMH)
6728                                 * NSEC_PER_SEC;
6729                 break;
6730         default:
6731                 systime_cycles = (uint64_t)IXGBE_READ_REG(hw, IXGBE_SYSTIML);
6732                 systime_cycles |= (uint64_t)IXGBE_READ_REG(hw, IXGBE_SYSTIMH)
6733                                 << 32;
6734         }
6735
6736         return systime_cycles;
6737 }
6738
6739 static uint64_t
6740 ixgbe_read_rx_tstamp_cyclecounter(struct rte_eth_dev *dev)
6741 {
6742         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
6743         uint64_t rx_tstamp_cycles;
6744
6745         switch (hw->mac.type) {
6746         case ixgbe_mac_X550:
6747         case ixgbe_mac_X550EM_x:
6748         case ixgbe_mac_X550EM_a:
6749                 /* RXSTMPL stores ns and RXSTMPH stores seconds. */
6750                 rx_tstamp_cycles = (uint64_t)IXGBE_READ_REG(hw, IXGBE_RXSTMPL);
6751                 rx_tstamp_cycles += (uint64_t)IXGBE_READ_REG(hw, IXGBE_RXSTMPH)
6752                                 * NSEC_PER_SEC;
6753                 break;
6754         default:
6755                 /* RXSTMPL stores ns and RXSTMPH stores seconds. */
6756                 rx_tstamp_cycles = (uint64_t)IXGBE_READ_REG(hw, IXGBE_RXSTMPL);
6757                 rx_tstamp_cycles |= (uint64_t)IXGBE_READ_REG(hw, IXGBE_RXSTMPH)
6758                                 << 32;
6759         }
6760
6761         return rx_tstamp_cycles;
6762 }
6763
6764 static uint64_t
6765 ixgbe_read_tx_tstamp_cyclecounter(struct rte_eth_dev *dev)
6766 {
6767         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
6768         uint64_t tx_tstamp_cycles;
6769
6770         switch (hw->mac.type) {
6771         case ixgbe_mac_X550:
6772         case ixgbe_mac_X550EM_x:
6773         case ixgbe_mac_X550EM_a:
6774                 /* TXSTMPL stores ns and TXSTMPH stores seconds. */
6775                 tx_tstamp_cycles = (uint64_t)IXGBE_READ_REG(hw, IXGBE_TXSTMPL);
6776                 tx_tstamp_cycles += (uint64_t)IXGBE_READ_REG(hw, IXGBE_TXSTMPH)
6777                                 * NSEC_PER_SEC;
6778                 break;
6779         default:
6780                 /* TXSTMPL stores ns and TXSTMPH stores seconds. */
6781                 tx_tstamp_cycles = (uint64_t)IXGBE_READ_REG(hw, IXGBE_TXSTMPL);
6782                 tx_tstamp_cycles |= (uint64_t)IXGBE_READ_REG(hw, IXGBE_TXSTMPH)
6783                                 << 32;
6784         }
6785
6786         return tx_tstamp_cycles;
6787 }
6788
6789 static void
6790 ixgbe_start_timecounters(struct rte_eth_dev *dev)
6791 {
6792         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
6793         struct ixgbe_adapter *adapter =
6794                 (struct ixgbe_adapter *)dev->data->dev_private;
6795         struct rte_eth_link link;
6796         uint32_t incval = 0;
6797         uint32_t shift = 0;
6798
6799         /* Get current link speed. */
6800         memset(&link, 0, sizeof(link));
6801         ixgbe_dev_link_update(dev, 1);
6802         rte_ixgbe_dev_atomic_read_link_status(dev, &link);
6803
6804         switch (link.link_speed) {
6805         case ETH_SPEED_NUM_100M:
6806                 incval = IXGBE_INCVAL_100;
6807                 shift = IXGBE_INCVAL_SHIFT_100;
6808                 break;
6809         case ETH_SPEED_NUM_1G:
6810                 incval = IXGBE_INCVAL_1GB;
6811                 shift = IXGBE_INCVAL_SHIFT_1GB;
6812                 break;
6813         case ETH_SPEED_NUM_10G:
6814         default:
6815                 incval = IXGBE_INCVAL_10GB;
6816                 shift = IXGBE_INCVAL_SHIFT_10GB;
6817                 break;
6818         }
6819
6820         switch (hw->mac.type) {
6821         case ixgbe_mac_X550:
6822         case ixgbe_mac_X550EM_x:
6823         case ixgbe_mac_X550EM_a:
6824                 /* Independent of link speed. */
6825                 incval = 1;
6826                 /* Cycles read will be interpreted as ns. */
6827                 shift = 0;
6828                 /* Fall-through */
6829         case ixgbe_mac_X540:
6830                 IXGBE_WRITE_REG(hw, IXGBE_TIMINCA, incval);
6831                 break;
6832         case ixgbe_mac_82599EB:
6833                 incval >>= IXGBE_INCVAL_SHIFT_82599;
6834                 shift -= IXGBE_INCVAL_SHIFT_82599;
6835                 IXGBE_WRITE_REG(hw, IXGBE_TIMINCA,
6836                                 (1 << IXGBE_INCPER_SHIFT_82599) | incval);
6837                 break;
6838         default:
6839                 /* Not supported. */
6840                 return;
6841         }
6842
6843         memset(&adapter->systime_tc, 0, sizeof(struct rte_timecounter));
6844         memset(&adapter->rx_tstamp_tc, 0, sizeof(struct rte_timecounter));
6845         memset(&adapter->tx_tstamp_tc, 0, sizeof(struct rte_timecounter));
6846
6847         adapter->systime_tc.cc_mask = IXGBE_CYCLECOUNTER_MASK;
6848         adapter->systime_tc.cc_shift = shift;
6849         adapter->systime_tc.nsec_mask = (1ULL << shift) - 1;
6850
6851         adapter->rx_tstamp_tc.cc_mask = IXGBE_CYCLECOUNTER_MASK;
6852         adapter->rx_tstamp_tc.cc_shift = shift;
6853         adapter->rx_tstamp_tc.nsec_mask = (1ULL << shift) - 1;
6854
6855         adapter->tx_tstamp_tc.cc_mask = IXGBE_CYCLECOUNTER_MASK;
6856         adapter->tx_tstamp_tc.cc_shift = shift;
6857         adapter->tx_tstamp_tc.nsec_mask = (1ULL << shift) - 1;
6858 }
6859
6860 static int
6861 ixgbe_timesync_adjust_time(struct rte_eth_dev *dev, int64_t delta)
6862 {
6863         struct ixgbe_adapter *adapter =
6864                         (struct ixgbe_adapter *)dev->data->dev_private;
6865
6866         adapter->systime_tc.nsec += delta;
6867         adapter->rx_tstamp_tc.nsec += delta;
6868         adapter->tx_tstamp_tc.nsec += delta;
6869
6870         return 0;
6871 }
6872
6873 static int
6874 ixgbe_timesync_write_time(struct rte_eth_dev *dev, const struct timespec *ts)
6875 {
6876         uint64_t ns;
6877         struct ixgbe_adapter *adapter =
6878                         (struct ixgbe_adapter *)dev->data->dev_private;
6879
6880         ns = rte_timespec_to_ns(ts);
6881         /* Set the timecounters to a new value. */
6882         adapter->systime_tc.nsec = ns;
6883         adapter->rx_tstamp_tc.nsec = ns;
6884         adapter->tx_tstamp_tc.nsec = ns;
6885
6886         return 0;
6887 }
6888
6889 static int
6890 ixgbe_timesync_read_time(struct rte_eth_dev *dev, struct timespec *ts)
6891 {
6892         uint64_t ns, systime_cycles;
6893         struct ixgbe_adapter *adapter =
6894                         (struct ixgbe_adapter *)dev->data->dev_private;
6895
6896         systime_cycles = ixgbe_read_systime_cyclecounter(dev);
6897         ns = rte_timecounter_update(&adapter->systime_tc, systime_cycles);
6898         *ts = rte_ns_to_timespec(ns);
6899
6900         return 0;
6901 }
6902
6903 static int
6904 ixgbe_timesync_enable(struct rte_eth_dev *dev)
6905 {
6906         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
6907         uint32_t tsync_ctl;
6908         uint32_t tsauxc;
6909
6910         /* Stop the timesync system time. */
6911         IXGBE_WRITE_REG(hw, IXGBE_TIMINCA, 0x0);
6912         /* Reset the timesync system time value. */
6913         IXGBE_WRITE_REG(hw, IXGBE_SYSTIML, 0x0);
6914         IXGBE_WRITE_REG(hw, IXGBE_SYSTIMH, 0x0);
6915
6916         /* Enable system time for platforms where it isn't on by default. */
6917         tsauxc = IXGBE_READ_REG(hw, IXGBE_TSAUXC);
6918         tsauxc &= ~IXGBE_TSAUXC_DISABLE_SYSTIME;
6919         IXGBE_WRITE_REG(hw, IXGBE_TSAUXC, tsauxc);
6920
6921         ixgbe_start_timecounters(dev);
6922
6923         /* Enable L2 filtering of IEEE1588/802.1AS Ethernet frame types. */
6924         IXGBE_WRITE_REG(hw, IXGBE_ETQF(IXGBE_ETQF_FILTER_1588),
6925                         (ETHER_TYPE_1588 |
6926                          IXGBE_ETQF_FILTER_EN |
6927                          IXGBE_ETQF_1588));
6928
6929         /* Enable timestamping of received PTP packets. */
6930         tsync_ctl = IXGBE_READ_REG(hw, IXGBE_TSYNCRXCTL);
6931         tsync_ctl |= IXGBE_TSYNCRXCTL_ENABLED;
6932         IXGBE_WRITE_REG(hw, IXGBE_TSYNCRXCTL, tsync_ctl);
6933
6934         /* Enable timestamping of transmitted PTP packets. */
6935         tsync_ctl = IXGBE_READ_REG(hw, IXGBE_TSYNCTXCTL);
6936         tsync_ctl |= IXGBE_TSYNCTXCTL_ENABLED;
6937         IXGBE_WRITE_REG(hw, IXGBE_TSYNCTXCTL, tsync_ctl);
6938
6939         IXGBE_WRITE_FLUSH(hw);
6940
6941         return 0;
6942 }
6943
6944 static int
6945 ixgbe_timesync_disable(struct rte_eth_dev *dev)
6946 {
6947         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
6948         uint32_t tsync_ctl;
6949
6950         /* Disable timestamping of transmitted PTP packets. */
6951         tsync_ctl = IXGBE_READ_REG(hw, IXGBE_TSYNCTXCTL);
6952         tsync_ctl &= ~IXGBE_TSYNCTXCTL_ENABLED;
6953         IXGBE_WRITE_REG(hw, IXGBE_TSYNCTXCTL, tsync_ctl);
6954
6955         /* Disable timestamping of received PTP packets. */
6956         tsync_ctl = IXGBE_READ_REG(hw, IXGBE_TSYNCRXCTL);
6957         tsync_ctl &= ~IXGBE_TSYNCRXCTL_ENABLED;
6958         IXGBE_WRITE_REG(hw, IXGBE_TSYNCRXCTL, tsync_ctl);
6959
6960         /* Disable L2 filtering of IEEE1588/802.1AS Ethernet frame types. */
6961         IXGBE_WRITE_REG(hw, IXGBE_ETQF(IXGBE_ETQF_FILTER_1588), 0);
6962
6963         /* Stop incrementating the System Time registers. */
6964         IXGBE_WRITE_REG(hw, IXGBE_TIMINCA, 0);
6965
6966         return 0;
6967 }
6968
6969 static int
6970 ixgbe_timesync_read_rx_timestamp(struct rte_eth_dev *dev,
6971                                  struct timespec *timestamp,
6972                                  uint32_t flags __rte_unused)
6973 {
6974         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
6975         struct ixgbe_adapter *adapter =
6976                 (struct ixgbe_adapter *)dev->data->dev_private;
6977         uint32_t tsync_rxctl;
6978         uint64_t rx_tstamp_cycles;
6979         uint64_t ns;
6980
6981         tsync_rxctl = IXGBE_READ_REG(hw, IXGBE_TSYNCRXCTL);
6982         if ((tsync_rxctl & IXGBE_TSYNCRXCTL_VALID) == 0)
6983                 return -EINVAL;
6984
6985         rx_tstamp_cycles = ixgbe_read_rx_tstamp_cyclecounter(dev);
6986         ns = rte_timecounter_update(&adapter->rx_tstamp_tc, rx_tstamp_cycles);
6987         *timestamp = rte_ns_to_timespec(ns);
6988
6989         return  0;
6990 }
6991
6992 static int
6993 ixgbe_timesync_read_tx_timestamp(struct rte_eth_dev *dev,
6994                                  struct timespec *timestamp)
6995 {
6996         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
6997         struct ixgbe_adapter *adapter =
6998                 (struct ixgbe_adapter *)dev->data->dev_private;
6999         uint32_t tsync_txctl;
7000         uint64_t tx_tstamp_cycles;
7001         uint64_t ns;
7002
7003         tsync_txctl = IXGBE_READ_REG(hw, IXGBE_TSYNCTXCTL);
7004         if ((tsync_txctl & IXGBE_TSYNCTXCTL_VALID) == 0)
7005                 return -EINVAL;
7006
7007         tx_tstamp_cycles = ixgbe_read_tx_tstamp_cyclecounter(dev);
7008         ns = rte_timecounter_update(&adapter->tx_tstamp_tc, tx_tstamp_cycles);
7009         *timestamp = rte_ns_to_timespec(ns);
7010
7011         return 0;
7012 }
7013
7014 static int
7015 ixgbe_get_reg_length(struct rte_eth_dev *dev)
7016 {
7017         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
7018         int count = 0;
7019         int g_ind = 0;
7020         const struct reg_info *reg_group;
7021         const struct reg_info **reg_set = (hw->mac.type == ixgbe_mac_82598EB) ?
7022                                     ixgbe_regs_mac_82598EB : ixgbe_regs_others;
7023
7024         while ((reg_group = reg_set[g_ind++]))
7025                 count += ixgbe_regs_group_count(reg_group);
7026
7027         return count;
7028 }
7029
7030 static int
7031 ixgbevf_get_reg_length(struct rte_eth_dev *dev __rte_unused)
7032 {
7033         int count = 0;
7034         int g_ind = 0;
7035         const struct reg_info *reg_group;
7036
7037         while ((reg_group = ixgbevf_regs[g_ind++]))
7038                 count += ixgbe_regs_group_count(reg_group);
7039
7040         return count;
7041 }
7042
7043 static int
7044 ixgbe_get_regs(struct rte_eth_dev *dev,
7045               struct rte_dev_reg_info *regs)
7046 {
7047         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
7048         uint32_t *data = regs->data;
7049         int g_ind = 0;
7050         int count = 0;
7051         const struct reg_info *reg_group;
7052         const struct reg_info **reg_set = (hw->mac.type == ixgbe_mac_82598EB) ?
7053                                     ixgbe_regs_mac_82598EB : ixgbe_regs_others;
7054
7055         if (data == NULL) {
7056                 regs->length = ixgbe_get_reg_length(dev);
7057                 regs->width = sizeof(uint32_t);
7058                 return 0;
7059         }
7060
7061         /* Support only full register dump */
7062         if ((regs->length == 0) ||
7063             (regs->length == (uint32_t)ixgbe_get_reg_length(dev))) {
7064                 regs->version = hw->mac.type << 24 | hw->revision_id << 16 |
7065                         hw->device_id;
7066                 while ((reg_group = reg_set[g_ind++]))
7067                         count += ixgbe_read_regs_group(dev, &data[count],
7068                                 reg_group);
7069                 return 0;
7070         }
7071
7072         return -ENOTSUP;
7073 }
7074
7075 static int
7076 ixgbevf_get_regs(struct rte_eth_dev *dev,
7077                 struct rte_dev_reg_info *regs)
7078 {
7079         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
7080         uint32_t *data = regs->data;
7081         int g_ind = 0;
7082         int count = 0;
7083         const struct reg_info *reg_group;
7084
7085         if (data == NULL) {
7086                 regs->length = ixgbevf_get_reg_length(dev);
7087                 regs->width = sizeof(uint32_t);
7088                 return 0;
7089         }
7090
7091         /* Support only full register dump */
7092         if ((regs->length == 0) ||
7093             (regs->length == (uint32_t)ixgbevf_get_reg_length(dev))) {
7094                 regs->version = hw->mac.type << 24 | hw->revision_id << 16 |
7095                         hw->device_id;
7096                 while ((reg_group = ixgbevf_regs[g_ind++]))
7097                         count += ixgbe_read_regs_group(dev, &data[count],
7098                                                       reg_group);
7099                 return 0;
7100         }
7101
7102         return -ENOTSUP;
7103 }
7104
7105 static int
7106 ixgbe_get_eeprom_length(struct rte_eth_dev *dev)
7107 {
7108         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
7109
7110         /* Return unit is byte count */
7111         return hw->eeprom.word_size * 2;
7112 }
7113
7114 static int
7115 ixgbe_get_eeprom(struct rte_eth_dev *dev,
7116                 struct rte_dev_eeprom_info *in_eeprom)
7117 {
7118         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
7119         struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
7120         uint16_t *data = in_eeprom->data;
7121         int first, length;
7122
7123         first = in_eeprom->offset >> 1;
7124         length = in_eeprom->length >> 1;
7125         if ((first > hw->eeprom.word_size) ||
7126             ((first + length) > hw->eeprom.word_size))
7127                 return -EINVAL;
7128
7129         in_eeprom->magic = hw->vendor_id | (hw->device_id << 16);
7130
7131         return eeprom->ops.read_buffer(hw, first, length, data);
7132 }
7133
7134 static int
7135 ixgbe_set_eeprom(struct rte_eth_dev *dev,
7136                 struct rte_dev_eeprom_info *in_eeprom)
7137 {
7138         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
7139         struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
7140         uint16_t *data = in_eeprom->data;
7141         int first, length;
7142
7143         first = in_eeprom->offset >> 1;
7144         length = in_eeprom->length >> 1;
7145         if ((first > hw->eeprom.word_size) ||
7146             ((first + length) > hw->eeprom.word_size))
7147                 return -EINVAL;
7148
7149         in_eeprom->magic = hw->vendor_id | (hw->device_id << 16);
7150
7151         return eeprom->ops.write_buffer(hw,  first, length, data);
7152 }
7153
7154 uint16_t
7155 ixgbe_reta_size_get(enum ixgbe_mac_type mac_type) {
7156         switch (mac_type) {
7157         case ixgbe_mac_X550:
7158         case ixgbe_mac_X550EM_x:
7159         case ixgbe_mac_X550EM_a:
7160                 return ETH_RSS_RETA_SIZE_512;
7161         case ixgbe_mac_X550_vf:
7162         case ixgbe_mac_X550EM_x_vf:
7163         case ixgbe_mac_X550EM_a_vf:
7164                 return ETH_RSS_RETA_SIZE_64;
7165         default:
7166                 return ETH_RSS_RETA_SIZE_128;
7167         }
7168 }
7169
7170 uint32_t
7171 ixgbe_reta_reg_get(enum ixgbe_mac_type mac_type, uint16_t reta_idx) {
7172         switch (mac_type) {
7173         case ixgbe_mac_X550:
7174         case ixgbe_mac_X550EM_x:
7175         case ixgbe_mac_X550EM_a:
7176                 if (reta_idx < ETH_RSS_RETA_SIZE_128)
7177                         return IXGBE_RETA(reta_idx >> 2);
7178                 else
7179                         return IXGBE_ERETA((reta_idx - ETH_RSS_RETA_SIZE_128) >> 2);
7180         case ixgbe_mac_X550_vf:
7181         case ixgbe_mac_X550EM_x_vf:
7182         case ixgbe_mac_X550EM_a_vf:
7183                 return IXGBE_VFRETA(reta_idx >> 2);
7184         default:
7185                 return IXGBE_RETA(reta_idx >> 2);
7186         }
7187 }
7188
7189 uint32_t
7190 ixgbe_mrqc_reg_get(enum ixgbe_mac_type mac_type) {
7191         switch (mac_type) {
7192         case ixgbe_mac_X550_vf:
7193         case ixgbe_mac_X550EM_x_vf:
7194         case ixgbe_mac_X550EM_a_vf:
7195                 return IXGBE_VFMRQC;
7196         default:
7197                 return IXGBE_MRQC;
7198         }
7199 }
7200
7201 uint32_t
7202 ixgbe_rssrk_reg_get(enum ixgbe_mac_type mac_type, uint8_t i) {
7203         switch (mac_type) {
7204         case ixgbe_mac_X550_vf:
7205         case ixgbe_mac_X550EM_x_vf:
7206         case ixgbe_mac_X550EM_a_vf:
7207                 return IXGBE_VFRSSRK(i);
7208         default:
7209                 return IXGBE_RSSRK(i);
7210         }
7211 }
7212
7213 bool
7214 ixgbe_rss_update_sp(enum ixgbe_mac_type mac_type) {
7215         switch (mac_type) {
7216         case ixgbe_mac_82599_vf:
7217         case ixgbe_mac_X540_vf:
7218                 return 0;
7219         default:
7220                 return 1;
7221         }
7222 }
7223
7224 static int
7225 ixgbe_dev_get_dcb_info(struct rte_eth_dev *dev,
7226                         struct rte_eth_dcb_info *dcb_info)
7227 {
7228         struct ixgbe_dcb_config *dcb_config =
7229                         IXGBE_DEV_PRIVATE_TO_DCB_CFG(dev->data->dev_private);
7230         struct ixgbe_dcb_tc_config *tc;
7231         uint8_t i, j;
7232
7233         if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_DCB_FLAG)
7234                 dcb_info->nb_tcs = dcb_config->num_tcs.pg_tcs;
7235         else
7236                 dcb_info->nb_tcs = 1;
7237
7238         if (dcb_config->vt_mode) { /* vt is enabled*/
7239                 struct rte_eth_vmdq_dcb_conf *vmdq_rx_conf =
7240                                 &dev->data->dev_conf.rx_adv_conf.vmdq_dcb_conf;
7241                 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++)
7242                         dcb_info->prio_tc[i] = vmdq_rx_conf->dcb_tc[i];
7243                 for (i = 0; i < vmdq_rx_conf->nb_queue_pools; i++) {
7244                         for (j = 0; j < dcb_info->nb_tcs; j++) {
7245                                 dcb_info->tc_queue.tc_rxq[i][j].base =
7246                                                 i * dcb_info->nb_tcs + j;
7247                                 dcb_info->tc_queue.tc_rxq[i][j].nb_queue = 1;
7248                                 dcb_info->tc_queue.tc_txq[i][j].base =
7249                                                 i * dcb_info->nb_tcs + j;
7250                                 dcb_info->tc_queue.tc_txq[i][j].nb_queue = 1;
7251                         }
7252                 }
7253         } else { /* vt is disabled*/
7254                 struct rte_eth_dcb_rx_conf *rx_conf =
7255                                 &dev->data->dev_conf.rx_adv_conf.dcb_rx_conf;
7256                 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++)
7257                         dcb_info->prio_tc[i] = rx_conf->dcb_tc[i];
7258                 if (dcb_info->nb_tcs == ETH_4_TCS) {
7259                         for (i = 0; i < dcb_info->nb_tcs; i++) {
7260                                 dcb_info->tc_queue.tc_rxq[0][i].base = i * 32;
7261                                 dcb_info->tc_queue.tc_rxq[0][i].nb_queue = 16;
7262                         }
7263                         dcb_info->tc_queue.tc_txq[0][0].base = 0;
7264                         dcb_info->tc_queue.tc_txq[0][1].base = 64;
7265                         dcb_info->tc_queue.tc_txq[0][2].base = 96;
7266                         dcb_info->tc_queue.tc_txq[0][3].base = 112;
7267                         dcb_info->tc_queue.tc_txq[0][0].nb_queue = 64;
7268                         dcb_info->tc_queue.tc_txq[0][1].nb_queue = 32;
7269                         dcb_info->tc_queue.tc_txq[0][2].nb_queue = 16;
7270                         dcb_info->tc_queue.tc_txq[0][3].nb_queue = 16;
7271                 } else if (dcb_info->nb_tcs == ETH_8_TCS) {
7272                         for (i = 0; i < dcb_info->nb_tcs; i++) {
7273                                 dcb_info->tc_queue.tc_rxq[0][i].base = i * 16;
7274                                 dcb_info->tc_queue.tc_rxq[0][i].nb_queue = 16;
7275                         }
7276                         dcb_info->tc_queue.tc_txq[0][0].base = 0;
7277                         dcb_info->tc_queue.tc_txq[0][1].base = 32;
7278                         dcb_info->tc_queue.tc_txq[0][2].base = 64;
7279                         dcb_info->tc_queue.tc_txq[0][3].base = 80;
7280                         dcb_info->tc_queue.tc_txq[0][4].base = 96;
7281                         dcb_info->tc_queue.tc_txq[0][5].base = 104;
7282                         dcb_info->tc_queue.tc_txq[0][6].base = 112;
7283                         dcb_info->tc_queue.tc_txq[0][7].base = 120;
7284                         dcb_info->tc_queue.tc_txq[0][0].nb_queue = 32;
7285                         dcb_info->tc_queue.tc_txq[0][1].nb_queue = 32;
7286                         dcb_info->tc_queue.tc_txq[0][2].nb_queue = 16;
7287                         dcb_info->tc_queue.tc_txq[0][3].nb_queue = 16;
7288                         dcb_info->tc_queue.tc_txq[0][4].nb_queue = 8;
7289                         dcb_info->tc_queue.tc_txq[0][5].nb_queue = 8;
7290                         dcb_info->tc_queue.tc_txq[0][6].nb_queue = 8;
7291                         dcb_info->tc_queue.tc_txq[0][7].nb_queue = 8;
7292                 }
7293         }
7294         for (i = 0; i < dcb_info->nb_tcs; i++) {
7295                 tc = &dcb_config->tc_config[i];
7296                 dcb_info->tc_bws[i] = tc->path[IXGBE_DCB_TX_CONFIG].bwg_percent;
7297         }
7298         return 0;
7299 }
7300
7301 /* Update e-tag ether type */
7302 static int
7303 ixgbe_update_e_tag_eth_type(struct ixgbe_hw *hw,
7304                             uint16_t ether_type)
7305 {
7306         uint32_t etag_etype;
7307
7308         if (hw->mac.type != ixgbe_mac_X550 &&
7309             hw->mac.type != ixgbe_mac_X550EM_x &&
7310             hw->mac.type != ixgbe_mac_X550EM_a) {
7311                 return -ENOTSUP;
7312         }
7313
7314         etag_etype = IXGBE_READ_REG(hw, IXGBE_ETAG_ETYPE);
7315         etag_etype &= ~IXGBE_ETAG_ETYPE_MASK;
7316         etag_etype |= ether_type;
7317         IXGBE_WRITE_REG(hw, IXGBE_ETAG_ETYPE, etag_etype);
7318         IXGBE_WRITE_FLUSH(hw);
7319
7320         return 0;
7321 }
7322
7323 /* Config l2 tunnel ether type */
7324 static int
7325 ixgbe_dev_l2_tunnel_eth_type_conf(struct rte_eth_dev *dev,
7326                                   struct rte_eth_l2_tunnel_conf *l2_tunnel)
7327 {
7328         int ret = 0;
7329         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
7330         struct ixgbe_l2_tn_info *l2_tn_info =
7331                 IXGBE_DEV_PRIVATE_TO_L2_TN_INFO(dev->data->dev_private);
7332
7333         if (l2_tunnel == NULL)
7334                 return -EINVAL;
7335
7336         switch (l2_tunnel->l2_tunnel_type) {
7337         case RTE_L2_TUNNEL_TYPE_E_TAG:
7338                 l2_tn_info->e_tag_ether_type = l2_tunnel->ether_type;
7339                 ret = ixgbe_update_e_tag_eth_type(hw, l2_tunnel->ether_type);
7340                 break;
7341         default:
7342                 PMD_DRV_LOG(ERR, "Invalid tunnel type");
7343                 ret = -EINVAL;
7344                 break;
7345         }
7346
7347         return ret;
7348 }
7349
7350 /* Enable e-tag tunnel */
7351 static int
7352 ixgbe_e_tag_enable(struct ixgbe_hw *hw)
7353 {
7354         uint32_t etag_etype;
7355
7356         if (hw->mac.type != ixgbe_mac_X550 &&
7357             hw->mac.type != ixgbe_mac_X550EM_x &&
7358             hw->mac.type != ixgbe_mac_X550EM_a) {
7359                 return -ENOTSUP;
7360         }
7361
7362         etag_etype = IXGBE_READ_REG(hw, IXGBE_ETAG_ETYPE);
7363         etag_etype |= IXGBE_ETAG_ETYPE_VALID;
7364         IXGBE_WRITE_REG(hw, IXGBE_ETAG_ETYPE, etag_etype);
7365         IXGBE_WRITE_FLUSH(hw);
7366
7367         return 0;
7368 }
7369
7370 /* Enable l2 tunnel */
7371 static int
7372 ixgbe_dev_l2_tunnel_enable(struct rte_eth_dev *dev,
7373                            enum rte_eth_tunnel_type l2_tunnel_type)
7374 {
7375         int ret = 0;
7376         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
7377         struct ixgbe_l2_tn_info *l2_tn_info =
7378                 IXGBE_DEV_PRIVATE_TO_L2_TN_INFO(dev->data->dev_private);
7379
7380         switch (l2_tunnel_type) {
7381         case RTE_L2_TUNNEL_TYPE_E_TAG:
7382                 l2_tn_info->e_tag_en = TRUE;
7383                 ret = ixgbe_e_tag_enable(hw);
7384                 break;
7385         default:
7386                 PMD_DRV_LOG(ERR, "Invalid tunnel type");
7387                 ret = -EINVAL;
7388                 break;
7389         }
7390
7391         return ret;
7392 }
7393
7394 /* Disable e-tag tunnel */
7395 static int
7396 ixgbe_e_tag_disable(struct ixgbe_hw *hw)
7397 {
7398         uint32_t etag_etype;
7399
7400         if (hw->mac.type != ixgbe_mac_X550 &&
7401             hw->mac.type != ixgbe_mac_X550EM_x &&
7402             hw->mac.type != ixgbe_mac_X550EM_a) {
7403                 return -ENOTSUP;
7404         }
7405
7406         etag_etype = IXGBE_READ_REG(hw, IXGBE_ETAG_ETYPE);
7407         etag_etype &= ~IXGBE_ETAG_ETYPE_VALID;
7408         IXGBE_WRITE_REG(hw, IXGBE_ETAG_ETYPE, etag_etype);
7409         IXGBE_WRITE_FLUSH(hw);
7410
7411         return 0;
7412 }
7413
7414 /* Disable l2 tunnel */
7415 static int
7416 ixgbe_dev_l2_tunnel_disable(struct rte_eth_dev *dev,
7417                             enum rte_eth_tunnel_type l2_tunnel_type)
7418 {
7419         int ret = 0;
7420         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
7421         struct ixgbe_l2_tn_info *l2_tn_info =
7422                 IXGBE_DEV_PRIVATE_TO_L2_TN_INFO(dev->data->dev_private);
7423
7424         switch (l2_tunnel_type) {
7425         case RTE_L2_TUNNEL_TYPE_E_TAG:
7426                 l2_tn_info->e_tag_en = FALSE;
7427                 ret = ixgbe_e_tag_disable(hw);
7428                 break;
7429         default:
7430                 PMD_DRV_LOG(ERR, "Invalid tunnel type");
7431                 ret = -EINVAL;
7432                 break;
7433         }
7434
7435         return ret;
7436 }
7437
7438 static int
7439 ixgbe_e_tag_filter_del(struct rte_eth_dev *dev,
7440                        struct rte_eth_l2_tunnel_conf *l2_tunnel)
7441 {
7442         int ret = 0;
7443         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
7444         uint32_t i, rar_entries;
7445         uint32_t rar_low, rar_high;
7446
7447         if (hw->mac.type != ixgbe_mac_X550 &&
7448             hw->mac.type != ixgbe_mac_X550EM_x &&
7449             hw->mac.type != ixgbe_mac_X550EM_a) {
7450                 return -ENOTSUP;
7451         }
7452
7453         rar_entries = ixgbe_get_num_rx_addrs(hw);
7454
7455         for (i = 1; i < rar_entries; i++) {
7456                 rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(i));
7457                 rar_low  = IXGBE_READ_REG(hw, IXGBE_RAL(i));
7458                 if ((rar_high & IXGBE_RAH_AV) &&
7459                     (rar_high & IXGBE_RAH_ADTYPE) &&
7460                     ((rar_low & IXGBE_RAL_ETAG_FILTER_MASK) ==
7461                      l2_tunnel->tunnel_id)) {
7462                         IXGBE_WRITE_REG(hw, IXGBE_RAL(i), 0);
7463                         IXGBE_WRITE_REG(hw, IXGBE_RAH(i), 0);
7464
7465                         ixgbe_clear_vmdq(hw, i, IXGBE_CLEAR_VMDQ_ALL);
7466
7467                         return ret;
7468                 }
7469         }
7470
7471         return ret;
7472 }
7473
7474 static int
7475 ixgbe_e_tag_filter_add(struct rte_eth_dev *dev,
7476                        struct rte_eth_l2_tunnel_conf *l2_tunnel)
7477 {
7478         int ret = 0;
7479         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
7480         uint32_t i, rar_entries;
7481         uint32_t rar_low, rar_high;
7482
7483         if (hw->mac.type != ixgbe_mac_X550 &&
7484             hw->mac.type != ixgbe_mac_X550EM_x &&
7485             hw->mac.type != ixgbe_mac_X550EM_a) {
7486                 return -ENOTSUP;
7487         }
7488
7489         /* One entry for one tunnel. Try to remove potential existing entry. */
7490         ixgbe_e_tag_filter_del(dev, l2_tunnel);
7491
7492         rar_entries = ixgbe_get_num_rx_addrs(hw);
7493
7494         for (i = 1; i < rar_entries; i++) {
7495                 rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(i));
7496                 if (rar_high & IXGBE_RAH_AV) {
7497                         continue;
7498                 } else {
7499                         ixgbe_set_vmdq(hw, i, l2_tunnel->pool);
7500                         rar_high = IXGBE_RAH_AV | IXGBE_RAH_ADTYPE;
7501                         rar_low = l2_tunnel->tunnel_id;
7502
7503                         IXGBE_WRITE_REG(hw, IXGBE_RAL(i), rar_low);
7504                         IXGBE_WRITE_REG(hw, IXGBE_RAH(i), rar_high);
7505
7506                         return ret;
7507                 }
7508         }
7509
7510         PMD_INIT_LOG(NOTICE, "The table of E-tag forwarding rule is full."
7511                      " Please remove a rule before adding a new one.");
7512         return -EINVAL;
7513 }
7514
7515 static inline struct ixgbe_l2_tn_filter *
7516 ixgbe_l2_tn_filter_lookup(struct ixgbe_l2_tn_info *l2_tn_info,
7517                           struct ixgbe_l2_tn_key *key)
7518 {
7519         int ret;
7520
7521         ret = rte_hash_lookup(l2_tn_info->hash_handle, (const void *)key);
7522         if (ret < 0)
7523                 return NULL;
7524
7525         return l2_tn_info->hash_map[ret];
7526 }
7527
7528 static inline int
7529 ixgbe_insert_l2_tn_filter(struct ixgbe_l2_tn_info *l2_tn_info,
7530                           struct ixgbe_l2_tn_filter *l2_tn_filter)
7531 {
7532         int ret;
7533
7534         ret = rte_hash_add_key(l2_tn_info->hash_handle,
7535                                &l2_tn_filter->key);
7536
7537         if (ret < 0) {
7538                 PMD_DRV_LOG(ERR,
7539                             "Failed to insert L2 tunnel filter"
7540                             " to hash table %d!",
7541                             ret);
7542                 return ret;
7543         }
7544
7545         l2_tn_info->hash_map[ret] = l2_tn_filter;
7546
7547         TAILQ_INSERT_TAIL(&l2_tn_info->l2_tn_list, l2_tn_filter, entries);
7548
7549         return 0;
7550 }
7551
7552 static inline int
7553 ixgbe_remove_l2_tn_filter(struct ixgbe_l2_tn_info *l2_tn_info,
7554                           struct ixgbe_l2_tn_key *key)
7555 {
7556         int ret;
7557         struct ixgbe_l2_tn_filter *l2_tn_filter;
7558
7559         ret = rte_hash_del_key(l2_tn_info->hash_handle, key);
7560
7561         if (ret < 0) {
7562                 PMD_DRV_LOG(ERR,
7563                             "No such L2 tunnel filter to delete %d!",
7564                             ret);
7565                 return ret;
7566         }
7567
7568         l2_tn_filter = l2_tn_info->hash_map[ret];
7569         l2_tn_info->hash_map[ret] = NULL;
7570
7571         TAILQ_REMOVE(&l2_tn_info->l2_tn_list, l2_tn_filter, entries);
7572         rte_free(l2_tn_filter);
7573
7574         return 0;
7575 }
7576
7577 /* Add l2 tunnel filter */
7578 int
7579 ixgbe_dev_l2_tunnel_filter_add(struct rte_eth_dev *dev,
7580                                struct rte_eth_l2_tunnel_conf *l2_tunnel,
7581                                bool restore)
7582 {
7583         int ret;
7584         struct ixgbe_l2_tn_info *l2_tn_info =
7585                 IXGBE_DEV_PRIVATE_TO_L2_TN_INFO(dev->data->dev_private);
7586         struct ixgbe_l2_tn_key key;
7587         struct ixgbe_l2_tn_filter *node;
7588
7589         if (!restore) {
7590                 key.l2_tn_type = l2_tunnel->l2_tunnel_type;
7591                 key.tn_id = l2_tunnel->tunnel_id;
7592
7593                 node = ixgbe_l2_tn_filter_lookup(l2_tn_info, &key);
7594
7595                 if (node) {
7596                         PMD_DRV_LOG(ERR,
7597                                     "The L2 tunnel filter already exists!");
7598                         return -EINVAL;
7599                 }
7600
7601                 node = rte_zmalloc("ixgbe_l2_tn",
7602                                    sizeof(struct ixgbe_l2_tn_filter),
7603                                    0);
7604                 if (!node)
7605                         return -ENOMEM;
7606
7607                 rte_memcpy(&node->key,
7608                                  &key,
7609                                  sizeof(struct ixgbe_l2_tn_key));
7610                 node->pool = l2_tunnel->pool;
7611                 ret = ixgbe_insert_l2_tn_filter(l2_tn_info, node);
7612                 if (ret < 0) {
7613                         rte_free(node);
7614                         return ret;
7615                 }
7616         }
7617
7618         switch (l2_tunnel->l2_tunnel_type) {
7619         case RTE_L2_TUNNEL_TYPE_E_TAG:
7620                 ret = ixgbe_e_tag_filter_add(dev, l2_tunnel);
7621                 break;
7622         default:
7623                 PMD_DRV_LOG(ERR, "Invalid tunnel type");
7624                 ret = -EINVAL;
7625                 break;
7626         }
7627
7628         if ((!restore) && (ret < 0))
7629                 (void)ixgbe_remove_l2_tn_filter(l2_tn_info, &key);
7630
7631         return ret;
7632 }
7633
7634 /* Delete l2 tunnel filter */
7635 int
7636 ixgbe_dev_l2_tunnel_filter_del(struct rte_eth_dev *dev,
7637                                struct rte_eth_l2_tunnel_conf *l2_tunnel)
7638 {
7639         int ret;
7640         struct ixgbe_l2_tn_info *l2_tn_info =
7641                 IXGBE_DEV_PRIVATE_TO_L2_TN_INFO(dev->data->dev_private);
7642         struct ixgbe_l2_tn_key key;
7643
7644         key.l2_tn_type = l2_tunnel->l2_tunnel_type;
7645         key.tn_id = l2_tunnel->tunnel_id;
7646         ret = ixgbe_remove_l2_tn_filter(l2_tn_info, &key);
7647         if (ret < 0)
7648                 return ret;
7649
7650         switch (l2_tunnel->l2_tunnel_type) {
7651         case RTE_L2_TUNNEL_TYPE_E_TAG:
7652                 ret = ixgbe_e_tag_filter_del(dev, l2_tunnel);
7653                 break;
7654         default:
7655                 PMD_DRV_LOG(ERR, "Invalid tunnel type");
7656                 ret = -EINVAL;
7657                 break;
7658         }
7659
7660         return ret;
7661 }
7662
7663 /**
7664  * ixgbe_dev_l2_tunnel_filter_handle - Handle operations for l2 tunnel filter.
7665  * @dev: pointer to rte_eth_dev structure
7666  * @filter_op:operation will be taken.
7667  * @arg: a pointer to specific structure corresponding to the filter_op
7668  */
7669 static int
7670 ixgbe_dev_l2_tunnel_filter_handle(struct rte_eth_dev *dev,
7671                                   enum rte_filter_op filter_op,
7672                                   void *arg)
7673 {
7674         int ret;
7675
7676         if (filter_op == RTE_ETH_FILTER_NOP)
7677                 return 0;
7678
7679         if (arg == NULL) {
7680                 PMD_DRV_LOG(ERR, "arg shouldn't be NULL for operation %u.",
7681                             filter_op);
7682                 return -EINVAL;
7683         }
7684
7685         switch (filter_op) {
7686         case RTE_ETH_FILTER_ADD:
7687                 ret = ixgbe_dev_l2_tunnel_filter_add
7688                         (dev,
7689                          (struct rte_eth_l2_tunnel_conf *)arg,
7690                          FALSE);
7691                 break;
7692         case RTE_ETH_FILTER_DELETE:
7693                 ret = ixgbe_dev_l2_tunnel_filter_del
7694                         (dev,
7695                          (struct rte_eth_l2_tunnel_conf *)arg);
7696                 break;
7697         default:
7698                 PMD_DRV_LOG(ERR, "unsupported operation %u.", filter_op);
7699                 ret = -EINVAL;
7700                 break;
7701         }
7702         return ret;
7703 }
7704
7705 static int
7706 ixgbe_e_tag_forwarding_en_dis(struct rte_eth_dev *dev, bool en)
7707 {
7708         int ret = 0;
7709         uint32_t ctrl;
7710         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
7711
7712         if (hw->mac.type != ixgbe_mac_X550 &&
7713             hw->mac.type != ixgbe_mac_X550EM_x &&
7714             hw->mac.type != ixgbe_mac_X550EM_a) {
7715                 return -ENOTSUP;
7716         }
7717
7718         ctrl = IXGBE_READ_REG(hw, IXGBE_VT_CTL);
7719         ctrl &= ~IXGBE_VT_CTL_POOLING_MODE_MASK;
7720         if (en)
7721                 ctrl |= IXGBE_VT_CTL_POOLING_MODE_ETAG;
7722         IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, ctrl);
7723
7724         return ret;
7725 }
7726
7727 /* Enable l2 tunnel forwarding */
7728 static int
7729 ixgbe_dev_l2_tunnel_forwarding_enable
7730         (struct rte_eth_dev *dev,
7731          enum rte_eth_tunnel_type l2_tunnel_type)
7732 {
7733         struct ixgbe_l2_tn_info *l2_tn_info =
7734                 IXGBE_DEV_PRIVATE_TO_L2_TN_INFO(dev->data->dev_private);
7735         int ret = 0;
7736
7737         switch (l2_tunnel_type) {
7738         case RTE_L2_TUNNEL_TYPE_E_TAG:
7739                 l2_tn_info->e_tag_fwd_en = TRUE;
7740                 ret = ixgbe_e_tag_forwarding_en_dis(dev, 1);
7741                 break;
7742         default:
7743                 PMD_DRV_LOG(ERR, "Invalid tunnel type");
7744                 ret = -EINVAL;
7745                 break;
7746         }
7747
7748         return ret;
7749 }
7750
7751 /* Disable l2 tunnel forwarding */
7752 static int
7753 ixgbe_dev_l2_tunnel_forwarding_disable
7754         (struct rte_eth_dev *dev,
7755          enum rte_eth_tunnel_type l2_tunnel_type)
7756 {
7757         struct ixgbe_l2_tn_info *l2_tn_info =
7758                 IXGBE_DEV_PRIVATE_TO_L2_TN_INFO(dev->data->dev_private);
7759         int ret = 0;
7760
7761         switch (l2_tunnel_type) {
7762         case RTE_L2_TUNNEL_TYPE_E_TAG:
7763                 l2_tn_info->e_tag_fwd_en = FALSE;
7764                 ret = ixgbe_e_tag_forwarding_en_dis(dev, 0);
7765                 break;
7766         default:
7767                 PMD_DRV_LOG(ERR, "Invalid tunnel type");
7768                 ret = -EINVAL;
7769                 break;
7770         }
7771
7772         return ret;
7773 }
7774
7775 static int
7776 ixgbe_e_tag_insertion_en_dis(struct rte_eth_dev *dev,
7777                              struct rte_eth_l2_tunnel_conf *l2_tunnel,
7778                              bool en)
7779 {
7780         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
7781         int ret = 0;
7782         uint32_t vmtir, vmvir;
7783         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
7784
7785         if (l2_tunnel->vf_id >= pci_dev->max_vfs) {
7786                 PMD_DRV_LOG(ERR,
7787                             "VF id %u should be less than %u",
7788                             l2_tunnel->vf_id,
7789                             pci_dev->max_vfs);
7790                 return -EINVAL;
7791         }
7792
7793         if (hw->mac.type != ixgbe_mac_X550 &&
7794             hw->mac.type != ixgbe_mac_X550EM_x &&
7795             hw->mac.type != ixgbe_mac_X550EM_a) {
7796                 return -ENOTSUP;
7797         }
7798
7799         if (en)
7800                 vmtir = l2_tunnel->tunnel_id;
7801         else
7802                 vmtir = 0;
7803
7804         IXGBE_WRITE_REG(hw, IXGBE_VMTIR(l2_tunnel->vf_id), vmtir);
7805
7806         vmvir = IXGBE_READ_REG(hw, IXGBE_VMVIR(l2_tunnel->vf_id));
7807         vmvir &= ~IXGBE_VMVIR_TAGA_MASK;
7808         if (en)
7809                 vmvir |= IXGBE_VMVIR_TAGA_ETAG_INSERT;
7810         IXGBE_WRITE_REG(hw, IXGBE_VMVIR(l2_tunnel->vf_id), vmvir);
7811
7812         return ret;
7813 }
7814
7815 /* Enable l2 tunnel tag insertion */
7816 static int
7817 ixgbe_dev_l2_tunnel_insertion_enable(struct rte_eth_dev *dev,
7818                                      struct rte_eth_l2_tunnel_conf *l2_tunnel)
7819 {
7820         int ret = 0;
7821
7822         switch (l2_tunnel->l2_tunnel_type) {
7823         case RTE_L2_TUNNEL_TYPE_E_TAG:
7824                 ret = ixgbe_e_tag_insertion_en_dis(dev, l2_tunnel, 1);
7825                 break;
7826         default:
7827                 PMD_DRV_LOG(ERR, "Invalid tunnel type");
7828                 ret = -EINVAL;
7829                 break;
7830         }
7831
7832         return ret;
7833 }
7834
7835 /* Disable l2 tunnel tag insertion */
7836 static int
7837 ixgbe_dev_l2_tunnel_insertion_disable
7838         (struct rte_eth_dev *dev,
7839          struct rte_eth_l2_tunnel_conf *l2_tunnel)
7840 {
7841         int ret = 0;
7842
7843         switch (l2_tunnel->l2_tunnel_type) {
7844         case RTE_L2_TUNNEL_TYPE_E_TAG:
7845                 ret = ixgbe_e_tag_insertion_en_dis(dev, l2_tunnel, 0);
7846                 break;
7847         default:
7848                 PMD_DRV_LOG(ERR, "Invalid tunnel type");
7849                 ret = -EINVAL;
7850                 break;
7851         }
7852
7853         return ret;
7854 }
7855
7856 static int
7857 ixgbe_e_tag_stripping_en_dis(struct rte_eth_dev *dev,
7858                              bool en)
7859 {
7860         int ret = 0;
7861         uint32_t qde;
7862         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
7863
7864         if (hw->mac.type != ixgbe_mac_X550 &&
7865             hw->mac.type != ixgbe_mac_X550EM_x &&
7866             hw->mac.type != ixgbe_mac_X550EM_a) {
7867                 return -ENOTSUP;
7868         }
7869
7870         qde = IXGBE_READ_REG(hw, IXGBE_QDE);
7871         if (en)
7872                 qde |= IXGBE_QDE_STRIP_TAG;
7873         else
7874                 qde &= ~IXGBE_QDE_STRIP_TAG;
7875         qde &= ~IXGBE_QDE_READ;
7876         qde |= IXGBE_QDE_WRITE;
7877         IXGBE_WRITE_REG(hw, IXGBE_QDE, qde);
7878
7879         return ret;
7880 }
7881
7882 /* Enable l2 tunnel tag stripping */
7883 static int
7884 ixgbe_dev_l2_tunnel_stripping_enable
7885         (struct rte_eth_dev *dev,
7886          enum rte_eth_tunnel_type l2_tunnel_type)
7887 {
7888         int ret = 0;
7889
7890         switch (l2_tunnel_type) {
7891         case RTE_L2_TUNNEL_TYPE_E_TAG:
7892                 ret = ixgbe_e_tag_stripping_en_dis(dev, 1);
7893                 break;
7894         default:
7895                 PMD_DRV_LOG(ERR, "Invalid tunnel type");
7896                 ret = -EINVAL;
7897                 break;
7898         }
7899
7900         return ret;
7901 }
7902
7903 /* Disable l2 tunnel tag stripping */
7904 static int
7905 ixgbe_dev_l2_tunnel_stripping_disable
7906         (struct rte_eth_dev *dev,
7907          enum rte_eth_tunnel_type l2_tunnel_type)
7908 {
7909         int ret = 0;
7910
7911         switch (l2_tunnel_type) {
7912         case RTE_L2_TUNNEL_TYPE_E_TAG:
7913                 ret = ixgbe_e_tag_stripping_en_dis(dev, 0);
7914                 break;
7915         default:
7916                 PMD_DRV_LOG(ERR, "Invalid tunnel type");
7917                 ret = -EINVAL;
7918                 break;
7919         }
7920
7921         return ret;
7922 }
7923
7924 /* Enable/disable l2 tunnel offload functions */
7925 static int
7926 ixgbe_dev_l2_tunnel_offload_set
7927         (struct rte_eth_dev *dev,
7928          struct rte_eth_l2_tunnel_conf *l2_tunnel,
7929          uint32_t mask,
7930          uint8_t en)
7931 {
7932         int ret = 0;
7933
7934         if (l2_tunnel == NULL)
7935                 return -EINVAL;
7936
7937         ret = -EINVAL;
7938         if (mask & ETH_L2_TUNNEL_ENABLE_MASK) {
7939                 if (en)
7940                         ret = ixgbe_dev_l2_tunnel_enable(
7941                                 dev,
7942                                 l2_tunnel->l2_tunnel_type);
7943                 else
7944                         ret = ixgbe_dev_l2_tunnel_disable(
7945                                 dev,
7946                                 l2_tunnel->l2_tunnel_type);
7947         }
7948
7949         if (mask & ETH_L2_TUNNEL_INSERTION_MASK) {
7950                 if (en)
7951                         ret = ixgbe_dev_l2_tunnel_insertion_enable(
7952                                 dev,
7953                                 l2_tunnel);
7954                 else
7955                         ret = ixgbe_dev_l2_tunnel_insertion_disable(
7956                                 dev,
7957                                 l2_tunnel);
7958         }
7959
7960         if (mask & ETH_L2_TUNNEL_STRIPPING_MASK) {
7961                 if (en)
7962                         ret = ixgbe_dev_l2_tunnel_stripping_enable(
7963                                 dev,
7964                                 l2_tunnel->l2_tunnel_type);
7965                 else
7966                         ret = ixgbe_dev_l2_tunnel_stripping_disable(
7967                                 dev,
7968                                 l2_tunnel->l2_tunnel_type);
7969         }
7970
7971         if (mask & ETH_L2_TUNNEL_FORWARDING_MASK) {
7972                 if (en)
7973                         ret = ixgbe_dev_l2_tunnel_forwarding_enable(
7974                                 dev,
7975                                 l2_tunnel->l2_tunnel_type);
7976                 else
7977                         ret = ixgbe_dev_l2_tunnel_forwarding_disable(
7978                                 dev,
7979                                 l2_tunnel->l2_tunnel_type);
7980         }
7981
7982         return ret;
7983 }
7984
7985 static int
7986 ixgbe_update_vxlan_port(struct ixgbe_hw *hw,
7987                         uint16_t port)
7988 {
7989         IXGBE_WRITE_REG(hw, IXGBE_VXLANCTRL, port);
7990         IXGBE_WRITE_FLUSH(hw);
7991
7992         return 0;
7993 }
7994
7995 /* There's only one register for VxLAN UDP port.
7996  * So, we cannot add several ports. Will update it.
7997  */
7998 static int
7999 ixgbe_add_vxlan_port(struct ixgbe_hw *hw,
8000                      uint16_t port)
8001 {
8002         if (port == 0) {
8003                 PMD_DRV_LOG(ERR, "Add VxLAN port 0 is not allowed.");
8004                 return -EINVAL;
8005         }
8006
8007         return ixgbe_update_vxlan_port(hw, port);
8008 }
8009
8010 /* We cannot delete the VxLAN port. For there's a register for VxLAN
8011  * UDP port, it must have a value.
8012  * So, will reset it to the original value 0.
8013  */
8014 static int
8015 ixgbe_del_vxlan_port(struct ixgbe_hw *hw,
8016                      uint16_t port)
8017 {
8018         uint16_t cur_port;
8019
8020         cur_port = (uint16_t)IXGBE_READ_REG(hw, IXGBE_VXLANCTRL);
8021
8022         if (cur_port != port) {
8023                 PMD_DRV_LOG(ERR, "Port %u does not exist.", port);
8024                 return -EINVAL;
8025         }
8026
8027         return ixgbe_update_vxlan_port(hw, 0);
8028 }
8029
8030 /* Add UDP tunneling port */
8031 static int
8032 ixgbe_dev_udp_tunnel_port_add(struct rte_eth_dev *dev,
8033                               struct rte_eth_udp_tunnel *udp_tunnel)
8034 {
8035         int ret = 0;
8036         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
8037
8038         if (hw->mac.type != ixgbe_mac_X550 &&
8039             hw->mac.type != ixgbe_mac_X550EM_x &&
8040             hw->mac.type != ixgbe_mac_X550EM_a) {
8041                 return -ENOTSUP;
8042         }
8043
8044         if (udp_tunnel == NULL)
8045                 return -EINVAL;
8046
8047         switch (udp_tunnel->prot_type) {
8048         case RTE_TUNNEL_TYPE_VXLAN:
8049                 ret = ixgbe_add_vxlan_port(hw, udp_tunnel->udp_port);
8050                 break;
8051
8052         case RTE_TUNNEL_TYPE_GENEVE:
8053         case RTE_TUNNEL_TYPE_TEREDO:
8054                 PMD_DRV_LOG(ERR, "Tunnel type is not supported now.");
8055                 ret = -EINVAL;
8056                 break;
8057
8058         default:
8059                 PMD_DRV_LOG(ERR, "Invalid tunnel type");
8060                 ret = -EINVAL;
8061                 break;
8062         }
8063
8064         return ret;
8065 }
8066
8067 /* Remove UDP tunneling port */
8068 static int
8069 ixgbe_dev_udp_tunnel_port_del(struct rte_eth_dev *dev,
8070                               struct rte_eth_udp_tunnel *udp_tunnel)
8071 {
8072         int ret = 0;
8073         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
8074
8075         if (hw->mac.type != ixgbe_mac_X550 &&
8076             hw->mac.type != ixgbe_mac_X550EM_x &&
8077             hw->mac.type != ixgbe_mac_X550EM_a) {
8078                 return -ENOTSUP;
8079         }
8080
8081         if (udp_tunnel == NULL)
8082                 return -EINVAL;
8083
8084         switch (udp_tunnel->prot_type) {
8085         case RTE_TUNNEL_TYPE_VXLAN:
8086                 ret = ixgbe_del_vxlan_port(hw, udp_tunnel->udp_port);
8087                 break;
8088         case RTE_TUNNEL_TYPE_GENEVE:
8089         case RTE_TUNNEL_TYPE_TEREDO:
8090                 PMD_DRV_LOG(ERR, "Tunnel type is not supported now.");
8091                 ret = -EINVAL;
8092                 break;
8093         default:
8094                 PMD_DRV_LOG(ERR, "Invalid tunnel type");
8095                 ret = -EINVAL;
8096                 break;
8097         }
8098
8099         return ret;
8100 }
8101
8102 static void
8103 ixgbevf_dev_allmulticast_enable(struct rte_eth_dev *dev)
8104 {
8105         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
8106
8107         hw->mac.ops.update_xcast_mode(hw, IXGBEVF_XCAST_MODE_ALLMULTI);
8108 }
8109
8110 static void
8111 ixgbevf_dev_allmulticast_disable(struct rte_eth_dev *dev)
8112 {
8113         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
8114
8115         hw->mac.ops.update_xcast_mode(hw, IXGBEVF_XCAST_MODE_MULTI);
8116 }
8117
8118 static void ixgbevf_mbx_process(struct rte_eth_dev *dev)
8119 {
8120         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
8121         u32 in_msg = 0;
8122
8123         if (ixgbe_read_mbx(hw, &in_msg, 1, 0))
8124                 return;
8125
8126         /* PF reset VF event */
8127         if (in_msg == IXGBE_PF_CONTROL_MSG)
8128                 _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_RESET,
8129                                               NULL, NULL);
8130 }
8131
8132 static int
8133 ixgbevf_dev_interrupt_get_status(struct rte_eth_dev *dev)
8134 {
8135         uint32_t eicr;
8136         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
8137         struct ixgbe_interrupt *intr =
8138                 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
8139         ixgbevf_intr_disable(hw);
8140
8141         /* read-on-clear nic registers here */
8142         eicr = IXGBE_READ_REG(hw, IXGBE_VTEICR);
8143         intr->flags = 0;
8144
8145         /* only one misc vector supported - mailbox */
8146         eicr &= IXGBE_VTEICR_MASK;
8147         if (eicr == IXGBE_MISC_VEC_ID)
8148                 intr->flags |= IXGBE_FLAG_MAILBOX;
8149
8150         return 0;
8151 }
8152
8153 static int
8154 ixgbevf_dev_interrupt_action(struct rte_eth_dev *dev)
8155 {
8156         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
8157         struct ixgbe_interrupt *intr =
8158                 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
8159
8160         if (intr->flags & IXGBE_FLAG_MAILBOX) {
8161                 ixgbevf_mbx_process(dev);
8162                 intr->flags &= ~IXGBE_FLAG_MAILBOX;
8163         }
8164
8165         ixgbevf_intr_enable(hw);
8166
8167         return 0;
8168 }
8169
8170 static void
8171 ixgbevf_dev_interrupt_handler(void *param)
8172 {
8173         struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
8174
8175         ixgbevf_dev_interrupt_get_status(dev);
8176         ixgbevf_dev_interrupt_action(dev);
8177 }
8178
8179 /**
8180  *  ixgbe_disable_sec_tx_path_generic - Stops the transmit data path
8181  *  @hw: pointer to hardware structure
8182  *
8183  *  Stops the transmit data path and waits for the HW to internally empty
8184  *  the Tx security block
8185  **/
8186 int ixgbe_disable_sec_tx_path_generic(struct ixgbe_hw *hw)
8187 {
8188 #define IXGBE_MAX_SECTX_POLL 40
8189
8190         int i;
8191         int sectxreg;
8192
8193         sectxreg = IXGBE_READ_REG(hw, IXGBE_SECTXCTRL);
8194         sectxreg |= IXGBE_SECTXCTRL_TX_DIS;
8195         IXGBE_WRITE_REG(hw, IXGBE_SECTXCTRL, sectxreg);
8196         for (i = 0; i < IXGBE_MAX_SECTX_POLL; i++) {
8197                 sectxreg = IXGBE_READ_REG(hw, IXGBE_SECTXSTAT);
8198                 if (sectxreg & IXGBE_SECTXSTAT_SECTX_RDY)
8199                         break;
8200                 /* Use interrupt-safe sleep just in case */
8201                 usec_delay(1000);
8202         }
8203
8204         /* For informational purposes only */
8205         if (i >= IXGBE_MAX_SECTX_POLL)
8206                 PMD_DRV_LOG(DEBUG, "Tx unit being enabled before security "
8207                          "path fully disabled.  Continuing with init.");
8208
8209         return IXGBE_SUCCESS;
8210 }
8211
8212 /**
8213  *  ixgbe_enable_sec_tx_path_generic - Enables the transmit data path
8214  *  @hw: pointer to hardware structure
8215  *
8216  *  Enables the transmit data path.
8217  **/
8218 int ixgbe_enable_sec_tx_path_generic(struct ixgbe_hw *hw)
8219 {
8220         uint32_t sectxreg;
8221
8222         sectxreg = IXGBE_READ_REG(hw, IXGBE_SECTXCTRL);
8223         sectxreg &= ~IXGBE_SECTXCTRL_TX_DIS;
8224         IXGBE_WRITE_REG(hw, IXGBE_SECTXCTRL, sectxreg);
8225         IXGBE_WRITE_FLUSH(hw);
8226
8227         return IXGBE_SUCCESS;
8228 }
8229
8230 /* restore n-tuple filter */
8231 static inline void
8232 ixgbe_ntuple_filter_restore(struct rte_eth_dev *dev)
8233 {
8234         struct ixgbe_filter_info *filter_info =
8235                 IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
8236         struct ixgbe_5tuple_filter *node;
8237
8238         TAILQ_FOREACH(node, &filter_info->fivetuple_list, entries) {
8239                 ixgbe_inject_5tuple_filter(dev, node);
8240         }
8241 }
8242
8243 /* restore ethernet type filter */
8244 static inline void
8245 ixgbe_ethertype_filter_restore(struct rte_eth_dev *dev)
8246 {
8247         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
8248         struct ixgbe_filter_info *filter_info =
8249                 IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
8250         int i;
8251
8252         for (i = 0; i < IXGBE_MAX_ETQF_FILTERS; i++) {
8253                 if (filter_info->ethertype_mask & (1 << i)) {
8254                         IXGBE_WRITE_REG(hw, IXGBE_ETQF(i),
8255                                         filter_info->ethertype_filters[i].etqf);
8256                         IXGBE_WRITE_REG(hw, IXGBE_ETQS(i),
8257                                         filter_info->ethertype_filters[i].etqs);
8258                         IXGBE_WRITE_FLUSH(hw);
8259                 }
8260         }
8261 }
8262
8263 /* restore SYN filter */
8264 static inline void
8265 ixgbe_syn_filter_restore(struct rte_eth_dev *dev)
8266 {
8267         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
8268         struct ixgbe_filter_info *filter_info =
8269                 IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
8270         uint32_t synqf;
8271
8272         synqf = filter_info->syn_info;
8273
8274         if (synqf & IXGBE_SYN_FILTER_ENABLE) {
8275                 IXGBE_WRITE_REG(hw, IXGBE_SYNQF, synqf);
8276                 IXGBE_WRITE_FLUSH(hw);
8277         }
8278 }
8279
8280 /* restore L2 tunnel filter */
8281 static inline void
8282 ixgbe_l2_tn_filter_restore(struct rte_eth_dev *dev)
8283 {
8284         struct ixgbe_l2_tn_info *l2_tn_info =
8285                 IXGBE_DEV_PRIVATE_TO_L2_TN_INFO(dev->data->dev_private);
8286         struct ixgbe_l2_tn_filter *node;
8287         struct rte_eth_l2_tunnel_conf l2_tn_conf;
8288
8289         TAILQ_FOREACH(node, &l2_tn_info->l2_tn_list, entries) {
8290                 l2_tn_conf.l2_tunnel_type = node->key.l2_tn_type;
8291                 l2_tn_conf.tunnel_id      = node->key.tn_id;
8292                 l2_tn_conf.pool           = node->pool;
8293                 (void)ixgbe_dev_l2_tunnel_filter_add(dev, &l2_tn_conf, TRUE);
8294         }
8295 }
8296
8297 static int
8298 ixgbe_filter_restore(struct rte_eth_dev *dev)
8299 {
8300         ixgbe_ntuple_filter_restore(dev);
8301         ixgbe_ethertype_filter_restore(dev);
8302         ixgbe_syn_filter_restore(dev);
8303         ixgbe_fdir_filter_restore(dev);
8304         ixgbe_l2_tn_filter_restore(dev);
8305
8306         return 0;
8307 }
8308
8309 static void
8310 ixgbe_l2_tunnel_conf(struct rte_eth_dev *dev)
8311 {
8312         struct ixgbe_l2_tn_info *l2_tn_info =
8313                 IXGBE_DEV_PRIVATE_TO_L2_TN_INFO(dev->data->dev_private);
8314         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
8315
8316         if (l2_tn_info->e_tag_en)
8317                 (void)ixgbe_e_tag_enable(hw);
8318
8319         if (l2_tn_info->e_tag_fwd_en)
8320                 (void)ixgbe_e_tag_forwarding_en_dis(dev, 1);
8321
8322         (void)ixgbe_update_e_tag_eth_type(hw, l2_tn_info->e_tag_ether_type);
8323 }
8324
8325 /* remove all the n-tuple filters */
8326 void
8327 ixgbe_clear_all_ntuple_filter(struct rte_eth_dev *dev)
8328 {
8329         struct ixgbe_filter_info *filter_info =
8330                 IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
8331         struct ixgbe_5tuple_filter *p_5tuple;
8332
8333         while ((p_5tuple = TAILQ_FIRST(&filter_info->fivetuple_list)))
8334                 ixgbe_remove_5tuple_filter(dev, p_5tuple);
8335 }
8336
8337 /* remove all the ether type filters */
8338 void
8339 ixgbe_clear_all_ethertype_filter(struct rte_eth_dev *dev)
8340 {
8341         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
8342         struct ixgbe_filter_info *filter_info =
8343                 IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
8344         int i;
8345
8346         for (i = 0; i < IXGBE_MAX_ETQF_FILTERS; i++) {
8347                 if (filter_info->ethertype_mask & (1 << i) &&
8348                     !filter_info->ethertype_filters[i].conf) {
8349                         (void)ixgbe_ethertype_filter_remove(filter_info,
8350                                                             (uint8_t)i);
8351                         IXGBE_WRITE_REG(hw, IXGBE_ETQF(i), 0);
8352                         IXGBE_WRITE_REG(hw, IXGBE_ETQS(i), 0);
8353                         IXGBE_WRITE_FLUSH(hw);
8354                 }
8355         }
8356 }
8357
8358 /* remove the SYN filter */
8359 void
8360 ixgbe_clear_syn_filter(struct rte_eth_dev *dev)
8361 {
8362         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
8363         struct ixgbe_filter_info *filter_info =
8364                 IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
8365
8366         if (filter_info->syn_info & IXGBE_SYN_FILTER_ENABLE) {
8367                 filter_info->syn_info = 0;
8368
8369                 IXGBE_WRITE_REG(hw, IXGBE_SYNQF, 0);
8370                 IXGBE_WRITE_FLUSH(hw);
8371         }
8372 }
8373
8374 /* remove all the L2 tunnel filters */
8375 int
8376 ixgbe_clear_all_l2_tn_filter(struct rte_eth_dev *dev)
8377 {
8378         struct ixgbe_l2_tn_info *l2_tn_info =
8379                 IXGBE_DEV_PRIVATE_TO_L2_TN_INFO(dev->data->dev_private);
8380         struct ixgbe_l2_tn_filter *l2_tn_filter;
8381         struct rte_eth_l2_tunnel_conf l2_tn_conf;
8382         int ret = 0;
8383
8384         while ((l2_tn_filter = TAILQ_FIRST(&l2_tn_info->l2_tn_list))) {
8385                 l2_tn_conf.l2_tunnel_type = l2_tn_filter->key.l2_tn_type;
8386                 l2_tn_conf.tunnel_id      = l2_tn_filter->key.tn_id;
8387                 l2_tn_conf.pool           = l2_tn_filter->pool;
8388                 ret = ixgbe_dev_l2_tunnel_filter_del(dev, &l2_tn_conf);
8389                 if (ret < 0)
8390                         return ret;
8391         }
8392
8393         return 0;
8394 }
8395
8396 RTE_PMD_REGISTER_PCI(net_ixgbe, rte_ixgbe_pmd);
8397 RTE_PMD_REGISTER_PCI_TABLE(net_ixgbe, pci_id_ixgbe_map);
8398 RTE_PMD_REGISTER_KMOD_DEP(net_ixgbe, "* igb_uio | uio_pci_generic | vfio-pci");
8399 RTE_PMD_REGISTER_PCI(net_ixgbe_vf, rte_ixgbevf_pmd);
8400 RTE_PMD_REGISTER_PCI_TABLE(net_ixgbe_vf, pci_id_ixgbevf_map);
8401 RTE_PMD_REGISTER_KMOD_DEP(net_ixgbe_vf, "* igb_uio | vfio-pci");