77c6d387f67c5ccc2a17b85c2dd0894017111e8f
[dpdk.git] / drivers / net / ixgbe / ixgbe_ethdev.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2017 Intel Corporation
3  */
4
5 #include <sys/queue.h>
6 #include <stdio.h>
7 #include <errno.h>
8 #include <stdint.h>
9 #include <string.h>
10 #include <unistd.h>
11 #include <stdarg.h>
12 #include <inttypes.h>
13 #include <netinet/in.h>
14 #include <rte_string_fns.h>
15 #include <rte_byteorder.h>
16 #include <rte_common.h>
17 #include <rte_cycles.h>
18
19 #include <rte_interrupts.h>
20 #include <rte_log.h>
21 #include <rte_debug.h>
22 #include <rte_pci.h>
23 #include <rte_bus_pci.h>
24 #include <rte_branch_prediction.h>
25 #include <rte_memory.h>
26 #include <rte_kvargs.h>
27 #include <rte_eal.h>
28 #include <rte_alarm.h>
29 #include <rte_ether.h>
30 #include <rte_ethdev_driver.h>
31 #include <rte_ethdev_pci.h>
32 #include <rte_malloc.h>
33 #include <rte_random.h>
34 #include <rte_dev.h>
35 #include <rte_hash_crc.h>
36 #ifdef RTE_LIBRTE_SECURITY
37 #include <rte_security_driver.h>
38 #endif
39
40 #include "ixgbe_logs.h"
41 #include "base/ixgbe_api.h"
42 #include "base/ixgbe_vf.h"
43 #include "base/ixgbe_common.h"
44 #include "ixgbe_ethdev.h"
45 #include "ixgbe_bypass.h"
46 #include "ixgbe_rxtx.h"
47 #include "base/ixgbe_type.h"
48 #include "base/ixgbe_phy.h"
49 #include "ixgbe_regs.h"
50
51 /*
52  * High threshold controlling when to start sending XOFF frames. Must be at
53  * least 8 bytes less than receive packet buffer size. This value is in units
54  * of 1024 bytes.
55  */
56 #define IXGBE_FC_HI    0x80
57
58 /*
59  * Low threshold controlling when to start sending XON frames. This value is
60  * in units of 1024 bytes.
61  */
62 #define IXGBE_FC_LO    0x40
63
64 /* Timer value included in XOFF frames. */
65 #define IXGBE_FC_PAUSE 0x680
66
67 /*Default value of Max Rx Queue*/
68 #define IXGBE_MAX_RX_QUEUE_NUM 128
69
70 #define IXGBE_LINK_DOWN_CHECK_TIMEOUT 4000 /* ms */
71 #define IXGBE_LINK_UP_CHECK_TIMEOUT   1000 /* ms */
72 #define IXGBE_VMDQ_NUM_UC_MAC         4096 /* Maximum nb. of UC MAC addr. */
73
74 #define IXGBE_MMW_SIZE_DEFAULT        0x4
75 #define IXGBE_MMW_SIZE_JUMBO_FRAME    0x14
76 #define IXGBE_MAX_RING_DESC           4096 /* replicate define from rxtx */
77
78 /*
79  *  Default values for RX/TX configuration
80  */
81 #define IXGBE_DEFAULT_RX_FREE_THRESH  32
82 #define IXGBE_DEFAULT_RX_PTHRESH      8
83 #define IXGBE_DEFAULT_RX_HTHRESH      8
84 #define IXGBE_DEFAULT_RX_WTHRESH      0
85
86 #define IXGBE_DEFAULT_TX_FREE_THRESH  32
87 #define IXGBE_DEFAULT_TX_PTHRESH      32
88 #define IXGBE_DEFAULT_TX_HTHRESH      0
89 #define IXGBE_DEFAULT_TX_WTHRESH      0
90 #define IXGBE_DEFAULT_TX_RSBIT_THRESH 32
91
92 /* Bit shift and mask */
93 #define IXGBE_4_BIT_WIDTH  (CHAR_BIT / 2)
94 #define IXGBE_4_BIT_MASK   RTE_LEN2MASK(IXGBE_4_BIT_WIDTH, uint8_t)
95 #define IXGBE_8_BIT_WIDTH  CHAR_BIT
96 #define IXGBE_8_BIT_MASK   UINT8_MAX
97
98 #define IXGBEVF_PMD_NAME "rte_ixgbevf_pmd" /* PMD name */
99
100 #define IXGBE_QUEUE_STAT_COUNTERS (sizeof(hw_stats->qprc) / sizeof(hw_stats->qprc[0]))
101
102 /* Additional timesync values. */
103 #define NSEC_PER_SEC             1000000000L
104 #define IXGBE_INCVAL_10GB        0x66666666
105 #define IXGBE_INCVAL_1GB         0x40000000
106 #define IXGBE_INCVAL_100         0x50000000
107 #define IXGBE_INCVAL_SHIFT_10GB  28
108 #define IXGBE_INCVAL_SHIFT_1GB   24
109 #define IXGBE_INCVAL_SHIFT_100   21
110 #define IXGBE_INCVAL_SHIFT_82599 7
111 #define IXGBE_INCPER_SHIFT_82599 24
112
113 #define IXGBE_CYCLECOUNTER_MASK   0xffffffffffffffffULL
114
115 #define IXGBE_VT_CTL_POOLING_MODE_MASK         0x00030000
116 #define IXGBE_VT_CTL_POOLING_MODE_ETAG         0x00010000
117 #define IXGBE_ETAG_ETYPE                       0x00005084
118 #define IXGBE_ETAG_ETYPE_MASK                  0x0000ffff
119 #define IXGBE_ETAG_ETYPE_VALID                 0x80000000
120 #define IXGBE_RAH_ADTYPE                       0x40000000
121 #define IXGBE_RAL_ETAG_FILTER_MASK             0x00003fff
122 #define IXGBE_VMVIR_TAGA_MASK                  0x18000000
123 #define IXGBE_VMVIR_TAGA_ETAG_INSERT           0x08000000
124 #define IXGBE_VMTIR(_i) (0x00017000 + ((_i) * 4)) /* 64 of these (0-63) */
125 #define IXGBE_QDE_STRIP_TAG                    0x00000004
126 #define IXGBE_VTEICR_MASK                      0x07
127
128 #define IXGBE_EXVET_VET_EXT_SHIFT              16
129 #define IXGBE_DMATXCTL_VT_MASK                 0xFFFF0000
130
131 #define IXGBEVF_DEVARG_PFLINK_FULLCHK           "pflink_fullchk"
132
133 static const char * const ixgbevf_valid_arguments[] = {
134         IXGBEVF_DEVARG_PFLINK_FULLCHK,
135         NULL
136 };
137
138 static int eth_ixgbe_dev_init(struct rte_eth_dev *eth_dev, void *init_params);
139 static int eth_ixgbe_dev_uninit(struct rte_eth_dev *eth_dev);
140 static int ixgbe_fdir_filter_init(struct rte_eth_dev *eth_dev);
141 static int ixgbe_fdir_filter_uninit(struct rte_eth_dev *eth_dev);
142 static int ixgbe_l2_tn_filter_init(struct rte_eth_dev *eth_dev);
143 static int ixgbe_l2_tn_filter_uninit(struct rte_eth_dev *eth_dev);
144 static int ixgbe_ntuple_filter_uninit(struct rte_eth_dev *eth_dev);
145 static int  ixgbe_dev_configure(struct rte_eth_dev *dev);
146 static int  ixgbe_dev_start(struct rte_eth_dev *dev);
147 static void ixgbe_dev_stop(struct rte_eth_dev *dev);
148 static int  ixgbe_dev_set_link_up(struct rte_eth_dev *dev);
149 static int  ixgbe_dev_set_link_down(struct rte_eth_dev *dev);
150 static void ixgbe_dev_close(struct rte_eth_dev *dev);
151 static int  ixgbe_dev_reset(struct rte_eth_dev *dev);
152 static int ixgbe_dev_promiscuous_enable(struct rte_eth_dev *dev);
153 static int ixgbe_dev_promiscuous_disable(struct rte_eth_dev *dev);
154 static int ixgbe_dev_allmulticast_enable(struct rte_eth_dev *dev);
155 static int ixgbe_dev_allmulticast_disable(struct rte_eth_dev *dev);
156 static int ixgbe_dev_link_update(struct rte_eth_dev *dev,
157                                 int wait_to_complete);
158 static int ixgbe_dev_stats_get(struct rte_eth_dev *dev,
159                                 struct rte_eth_stats *stats);
160 static int ixgbe_dev_xstats_get(struct rte_eth_dev *dev,
161                                 struct rte_eth_xstat *xstats, unsigned n);
162 static int ixgbevf_dev_xstats_get(struct rte_eth_dev *dev,
163                                   struct rte_eth_xstat *xstats, unsigned n);
164 static int
165 ixgbe_dev_xstats_get_by_id(struct rte_eth_dev *dev, const uint64_t *ids,
166                 uint64_t *values, unsigned int n);
167 static int ixgbe_dev_stats_reset(struct rte_eth_dev *dev);
168 static int ixgbe_dev_xstats_reset(struct rte_eth_dev *dev);
169 static int ixgbe_dev_xstats_get_names(struct rte_eth_dev *dev,
170         struct rte_eth_xstat_name *xstats_names,
171         unsigned int size);
172 static int ixgbevf_dev_xstats_get_names(struct rte_eth_dev *dev,
173         struct rte_eth_xstat_name *xstats_names, unsigned limit);
174 static int ixgbe_dev_xstats_get_names_by_id(
175         struct rte_eth_dev *dev,
176         struct rte_eth_xstat_name *xstats_names,
177         const uint64_t *ids,
178         unsigned int limit);
179 static int ixgbe_dev_queue_stats_mapping_set(struct rte_eth_dev *eth_dev,
180                                              uint16_t queue_id,
181                                              uint8_t stat_idx,
182                                              uint8_t is_rx);
183 static int ixgbe_fw_version_get(struct rte_eth_dev *dev, char *fw_version,
184                                  size_t fw_size);
185 static int ixgbe_dev_info_get(struct rte_eth_dev *dev,
186                               struct rte_eth_dev_info *dev_info);
187 static const uint32_t *ixgbe_dev_supported_ptypes_get(struct rte_eth_dev *dev);
188 static int ixgbevf_dev_info_get(struct rte_eth_dev *dev,
189                                 struct rte_eth_dev_info *dev_info);
190 static int ixgbe_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu);
191
192 static int ixgbe_vlan_filter_set(struct rte_eth_dev *dev,
193                 uint16_t vlan_id, int on);
194 static int ixgbe_vlan_tpid_set(struct rte_eth_dev *dev,
195                                enum rte_vlan_type vlan_type,
196                                uint16_t tpid_id);
197 static void ixgbe_vlan_hw_strip_bitmap_set(struct rte_eth_dev *dev,
198                 uint16_t queue, bool on);
199 static void ixgbe_vlan_strip_queue_set(struct rte_eth_dev *dev, uint16_t queue,
200                 int on);
201 static void ixgbe_config_vlan_strip_on_all_queues(struct rte_eth_dev *dev,
202                                                   int mask);
203 static int ixgbe_vlan_offload_config(struct rte_eth_dev *dev, int mask);
204 static int ixgbe_vlan_offload_set(struct rte_eth_dev *dev, int mask);
205 static void ixgbe_vlan_hw_strip_enable(struct rte_eth_dev *dev, uint16_t queue);
206 static void ixgbe_vlan_hw_strip_disable(struct rte_eth_dev *dev, uint16_t queue);
207 static void ixgbe_vlan_hw_extend_enable(struct rte_eth_dev *dev);
208 static void ixgbe_vlan_hw_extend_disable(struct rte_eth_dev *dev);
209
210 static int ixgbe_dev_led_on(struct rte_eth_dev *dev);
211 static int ixgbe_dev_led_off(struct rte_eth_dev *dev);
212 static int ixgbe_flow_ctrl_get(struct rte_eth_dev *dev,
213                                struct rte_eth_fc_conf *fc_conf);
214 static int ixgbe_flow_ctrl_set(struct rte_eth_dev *dev,
215                                struct rte_eth_fc_conf *fc_conf);
216 static int ixgbe_priority_flow_ctrl_set(struct rte_eth_dev *dev,
217                 struct rte_eth_pfc_conf *pfc_conf);
218 static int ixgbe_dev_rss_reta_update(struct rte_eth_dev *dev,
219                         struct rte_eth_rss_reta_entry64 *reta_conf,
220                         uint16_t reta_size);
221 static int ixgbe_dev_rss_reta_query(struct rte_eth_dev *dev,
222                         struct rte_eth_rss_reta_entry64 *reta_conf,
223                         uint16_t reta_size);
224 static void ixgbe_dev_link_status_print(struct rte_eth_dev *dev);
225 static int ixgbe_dev_lsc_interrupt_setup(struct rte_eth_dev *dev, uint8_t on);
226 static int ixgbe_dev_macsec_interrupt_setup(struct rte_eth_dev *dev);
227 static int ixgbe_dev_rxq_interrupt_setup(struct rte_eth_dev *dev);
228 static int ixgbe_dev_interrupt_get_status(struct rte_eth_dev *dev);
229 static int ixgbe_dev_interrupt_action(struct rte_eth_dev *dev);
230 static void ixgbe_dev_interrupt_handler(void *param);
231 static void ixgbe_dev_interrupt_delayed_handler(void *param);
232 static void ixgbe_dev_setup_link_alarm_handler(void *param);
233
234 static int ixgbe_add_rar(struct rte_eth_dev *dev,
235                         struct rte_ether_addr *mac_addr,
236                         uint32_t index, uint32_t pool);
237 static void ixgbe_remove_rar(struct rte_eth_dev *dev, uint32_t index);
238 static int ixgbe_set_default_mac_addr(struct rte_eth_dev *dev,
239                                            struct rte_ether_addr *mac_addr);
240 static void ixgbe_dcb_init(struct ixgbe_hw *hw, struct ixgbe_dcb_config *dcb_config);
241 static bool is_device_supported(struct rte_eth_dev *dev,
242                                 struct rte_pci_driver *drv);
243
244 /* For Virtual Function support */
245 static int eth_ixgbevf_dev_init(struct rte_eth_dev *eth_dev);
246 static int eth_ixgbevf_dev_uninit(struct rte_eth_dev *eth_dev);
247 static int  ixgbevf_dev_configure(struct rte_eth_dev *dev);
248 static int  ixgbevf_dev_start(struct rte_eth_dev *dev);
249 static int ixgbevf_dev_link_update(struct rte_eth_dev *dev,
250                                    int wait_to_complete);
251 static void ixgbevf_dev_stop(struct rte_eth_dev *dev);
252 static void ixgbevf_dev_close(struct rte_eth_dev *dev);
253 static int  ixgbevf_dev_reset(struct rte_eth_dev *dev);
254 static void ixgbevf_intr_disable(struct rte_eth_dev *dev);
255 static void ixgbevf_intr_enable(struct rte_eth_dev *dev);
256 static int ixgbevf_dev_stats_get(struct rte_eth_dev *dev,
257                 struct rte_eth_stats *stats);
258 static int ixgbevf_dev_stats_reset(struct rte_eth_dev *dev);
259 static int ixgbevf_vlan_filter_set(struct rte_eth_dev *dev,
260                 uint16_t vlan_id, int on);
261 static void ixgbevf_vlan_strip_queue_set(struct rte_eth_dev *dev,
262                 uint16_t queue, int on);
263 static int ixgbevf_vlan_offload_config(struct rte_eth_dev *dev, int mask);
264 static int ixgbevf_vlan_offload_set(struct rte_eth_dev *dev, int mask);
265 static void ixgbevf_set_vfta_all(struct rte_eth_dev *dev, bool on);
266 static int ixgbevf_dev_rx_queue_intr_enable(struct rte_eth_dev *dev,
267                                             uint16_t queue_id);
268 static int ixgbevf_dev_rx_queue_intr_disable(struct rte_eth_dev *dev,
269                                              uint16_t queue_id);
270 static void ixgbevf_set_ivar_map(struct ixgbe_hw *hw, int8_t direction,
271                                  uint8_t queue, uint8_t msix_vector);
272 static void ixgbevf_configure_msix(struct rte_eth_dev *dev);
273 static int ixgbevf_dev_promiscuous_enable(struct rte_eth_dev *dev);
274 static int ixgbevf_dev_promiscuous_disable(struct rte_eth_dev *dev);
275 static int ixgbevf_dev_allmulticast_enable(struct rte_eth_dev *dev);
276 static int ixgbevf_dev_allmulticast_disable(struct rte_eth_dev *dev);
277
278 /* For Eth VMDQ APIs support */
279 static int ixgbe_uc_hash_table_set(struct rte_eth_dev *dev, struct
280                 rte_ether_addr * mac_addr, uint8_t on);
281 static int ixgbe_uc_all_hash_table_set(struct rte_eth_dev *dev, uint8_t on);
282 static int ixgbe_mirror_rule_set(struct rte_eth_dev *dev,
283                 struct rte_eth_mirror_conf *mirror_conf,
284                 uint8_t rule_id, uint8_t on);
285 static int ixgbe_mirror_rule_reset(struct rte_eth_dev *dev,
286                 uint8_t rule_id);
287 static int ixgbe_dev_rx_queue_intr_enable(struct rte_eth_dev *dev,
288                                           uint16_t queue_id);
289 static int ixgbe_dev_rx_queue_intr_disable(struct rte_eth_dev *dev,
290                                            uint16_t queue_id);
291 static void ixgbe_set_ivar_map(struct ixgbe_hw *hw, int8_t direction,
292                                uint8_t queue, uint8_t msix_vector);
293 static void ixgbe_configure_msix(struct rte_eth_dev *dev);
294
295 static int ixgbevf_add_mac_addr(struct rte_eth_dev *dev,
296                                 struct rte_ether_addr *mac_addr,
297                                 uint32_t index, uint32_t pool);
298 static void ixgbevf_remove_mac_addr(struct rte_eth_dev *dev, uint32_t index);
299 static int ixgbevf_set_default_mac_addr(struct rte_eth_dev *dev,
300                                              struct rte_ether_addr *mac_addr);
301 static int ixgbe_syn_filter_get(struct rte_eth_dev *dev,
302                         struct rte_eth_syn_filter *filter);
303 static int ixgbe_syn_filter_handle(struct rte_eth_dev *dev,
304                         enum rte_filter_op filter_op,
305                         void *arg);
306 static int ixgbe_add_5tuple_filter(struct rte_eth_dev *dev,
307                         struct ixgbe_5tuple_filter *filter);
308 static void ixgbe_remove_5tuple_filter(struct rte_eth_dev *dev,
309                         struct ixgbe_5tuple_filter *filter);
310 static int ixgbe_ntuple_filter_handle(struct rte_eth_dev *dev,
311                                 enum rte_filter_op filter_op,
312                                 void *arg);
313 static int ixgbe_get_ntuple_filter(struct rte_eth_dev *dev,
314                         struct rte_eth_ntuple_filter *filter);
315 static int ixgbe_ethertype_filter_handle(struct rte_eth_dev *dev,
316                                 enum rte_filter_op filter_op,
317                                 void *arg);
318 static int ixgbe_get_ethertype_filter(struct rte_eth_dev *dev,
319                         struct rte_eth_ethertype_filter *filter);
320 static int ixgbe_dev_filter_ctrl(struct rte_eth_dev *dev,
321                      enum rte_filter_type filter_type,
322                      enum rte_filter_op filter_op,
323                      void *arg);
324 static int ixgbevf_dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu);
325
326 static int ixgbe_dev_set_mc_addr_list(struct rte_eth_dev *dev,
327                                       struct rte_ether_addr *mc_addr_set,
328                                       uint32_t nb_mc_addr);
329 static int ixgbe_dev_get_dcb_info(struct rte_eth_dev *dev,
330                                    struct rte_eth_dcb_info *dcb_info);
331
332 static int ixgbe_get_reg_length(struct rte_eth_dev *dev);
333 static int ixgbe_get_regs(struct rte_eth_dev *dev,
334                             struct rte_dev_reg_info *regs);
335 static int ixgbe_get_eeprom_length(struct rte_eth_dev *dev);
336 static int ixgbe_get_eeprom(struct rte_eth_dev *dev,
337                                 struct rte_dev_eeprom_info *eeprom);
338 static int ixgbe_set_eeprom(struct rte_eth_dev *dev,
339                                 struct rte_dev_eeprom_info *eeprom);
340
341 static int ixgbe_get_module_info(struct rte_eth_dev *dev,
342                                  struct rte_eth_dev_module_info *modinfo);
343 static int ixgbe_get_module_eeprom(struct rte_eth_dev *dev,
344                                    struct rte_dev_eeprom_info *info);
345
346 static int ixgbevf_get_reg_length(struct rte_eth_dev *dev);
347 static int ixgbevf_get_regs(struct rte_eth_dev *dev,
348                                 struct rte_dev_reg_info *regs);
349
350 static int ixgbe_timesync_enable(struct rte_eth_dev *dev);
351 static int ixgbe_timesync_disable(struct rte_eth_dev *dev);
352 static int ixgbe_timesync_read_rx_timestamp(struct rte_eth_dev *dev,
353                                             struct timespec *timestamp,
354                                             uint32_t flags);
355 static int ixgbe_timesync_read_tx_timestamp(struct rte_eth_dev *dev,
356                                             struct timespec *timestamp);
357 static int ixgbe_timesync_adjust_time(struct rte_eth_dev *dev, int64_t delta);
358 static int ixgbe_timesync_read_time(struct rte_eth_dev *dev,
359                                    struct timespec *timestamp);
360 static int ixgbe_timesync_write_time(struct rte_eth_dev *dev,
361                                    const struct timespec *timestamp);
362 static void ixgbevf_dev_interrupt_handler(void *param);
363
364 static int ixgbe_dev_l2_tunnel_eth_type_conf
365         (struct rte_eth_dev *dev, struct rte_eth_l2_tunnel_conf *l2_tunnel);
366 static int ixgbe_dev_l2_tunnel_offload_set
367         (struct rte_eth_dev *dev,
368          struct rte_eth_l2_tunnel_conf *l2_tunnel,
369          uint32_t mask,
370          uint8_t en);
371 static int ixgbe_dev_l2_tunnel_filter_handle(struct rte_eth_dev *dev,
372                                              enum rte_filter_op filter_op,
373                                              void *arg);
374
375 static int ixgbe_dev_udp_tunnel_port_add(struct rte_eth_dev *dev,
376                                          struct rte_eth_udp_tunnel *udp_tunnel);
377 static int ixgbe_dev_udp_tunnel_port_del(struct rte_eth_dev *dev,
378                                          struct rte_eth_udp_tunnel *udp_tunnel);
379 static int ixgbe_filter_restore(struct rte_eth_dev *dev);
380 static void ixgbe_l2_tunnel_conf(struct rte_eth_dev *dev);
381
382 /*
383  * Define VF Stats MACRO for Non "cleared on read" register
384  */
385 #define UPDATE_VF_STAT(reg, last, cur)                          \
386 {                                                               \
387         uint32_t latest = IXGBE_READ_REG(hw, reg);              \
388         cur += (latest - last) & UINT_MAX;                      \
389         last = latest;                                          \
390 }
391
392 #define UPDATE_VF_STAT_36BIT(lsb, msb, last, cur)                \
393 {                                                                \
394         u64 new_lsb = IXGBE_READ_REG(hw, lsb);                   \
395         u64 new_msb = IXGBE_READ_REG(hw, msb);                   \
396         u64 latest = ((new_msb << 32) | new_lsb);                \
397         cur += (0x1000000000LL + latest - last) & 0xFFFFFFFFFLL; \
398         last = latest;                                           \
399 }
400
401 #define IXGBE_SET_HWSTRIP(h, q) do {\
402                 uint32_t idx = (q) / (sizeof((h)->bitmap[0]) * NBBY); \
403                 uint32_t bit = (q) % (sizeof((h)->bitmap[0]) * NBBY); \
404                 (h)->bitmap[idx] |= 1 << bit;\
405         } while (0)
406
407 #define IXGBE_CLEAR_HWSTRIP(h, q) do {\
408                 uint32_t idx = (q) / (sizeof((h)->bitmap[0]) * NBBY); \
409                 uint32_t bit = (q) % (sizeof((h)->bitmap[0]) * NBBY); \
410                 (h)->bitmap[idx] &= ~(1 << bit);\
411         } while (0)
412
413 #define IXGBE_GET_HWSTRIP(h, q, r) do {\
414                 uint32_t idx = (q) / (sizeof((h)->bitmap[0]) * NBBY); \
415                 uint32_t bit = (q) % (sizeof((h)->bitmap[0]) * NBBY); \
416                 (r) = (h)->bitmap[idx] >> bit & 1;\
417         } while (0)
418
419 int ixgbe_logtype_init;
420 int ixgbe_logtype_driver;
421
422 /*
423  * The set of PCI devices this driver supports
424  */
425 static const struct rte_pci_id pci_id_ixgbe_map[] = {
426         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598) },
427         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_BX) },
428         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_DUAL_PORT) },
429         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_SINGLE_PORT) },
430         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT) },
431         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT2) },
432         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_SFP_LOM) },
433         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_CX4) },
434         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_CX4_DUAL_PORT) },
435         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_DA_DUAL_PORT) },
436         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM) },
437         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_XF_LR) },
438         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4) },
439         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4_MEZZ) },
440         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KR) },
441         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_COMBO_BACKPLANE) },
442         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_CX4) },
443         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP) },
444         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_BACKPLANE_FCOE) },
445         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_FCOE) },
446         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_EM) },
447         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_SF2) },
448         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_SF_QP) },
449         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_QSFP_SF_QP) },
450         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599EN_SFP) },
451         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_XAUI_LOM) },
452         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_T3_LOM) },
453         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T) },
454         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T1) },
455         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_SFP) },
456         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_10G_T) },
457         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_1G_T) },
458         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550T) },
459         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550T1) },
460         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_KR) },
461         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_KR_L) },
462         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SFP_N) },
463         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SGMII) },
464         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SGMII_L) },
465         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_10G_T) },
466         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_QSFP) },
467         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_QSFP_N) },
468         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SFP) },
469         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_1G_T) },
470         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_1G_T_L) },
471         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_KX4) },
472         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_KR) },
473         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_XFI) },
474 #ifdef RTE_LIBRTE_IXGBE_BYPASS
475         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_BYPASS) },
476 #endif
477         { .vendor_id = 0, /* sentinel */ },
478 };
479
480 /*
481  * The set of PCI devices this driver supports (for 82599 VF)
482  */
483 static const struct rte_pci_id pci_id_ixgbevf_map[] = {
484         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_VF) },
485         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_VF_HV) },
486         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540_VF) },
487         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540_VF_HV) },
488         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550_VF_HV) },
489         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550_VF) },
490         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_VF) },
491         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_VF_HV) },
492         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_VF) },
493         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_VF_HV) },
494         { .vendor_id = 0, /* sentinel */ },
495 };
496
497 static const struct rte_eth_desc_lim rx_desc_lim = {
498         .nb_max = IXGBE_MAX_RING_DESC,
499         .nb_min = IXGBE_MIN_RING_DESC,
500         .nb_align = IXGBE_RXD_ALIGN,
501 };
502
503 static const struct rte_eth_desc_lim tx_desc_lim = {
504         .nb_max = IXGBE_MAX_RING_DESC,
505         .nb_min = IXGBE_MIN_RING_DESC,
506         .nb_align = IXGBE_TXD_ALIGN,
507         .nb_seg_max = IXGBE_TX_MAX_SEG,
508         .nb_mtu_seg_max = IXGBE_TX_MAX_SEG,
509 };
510
511 static const struct eth_dev_ops ixgbe_eth_dev_ops = {
512         .dev_configure        = ixgbe_dev_configure,
513         .dev_start            = ixgbe_dev_start,
514         .dev_stop             = ixgbe_dev_stop,
515         .dev_set_link_up    = ixgbe_dev_set_link_up,
516         .dev_set_link_down  = ixgbe_dev_set_link_down,
517         .dev_close            = ixgbe_dev_close,
518         .dev_reset            = ixgbe_dev_reset,
519         .promiscuous_enable   = ixgbe_dev_promiscuous_enable,
520         .promiscuous_disable  = ixgbe_dev_promiscuous_disable,
521         .allmulticast_enable  = ixgbe_dev_allmulticast_enable,
522         .allmulticast_disable = ixgbe_dev_allmulticast_disable,
523         .link_update          = ixgbe_dev_link_update,
524         .stats_get            = ixgbe_dev_stats_get,
525         .xstats_get           = ixgbe_dev_xstats_get,
526         .xstats_get_by_id     = ixgbe_dev_xstats_get_by_id,
527         .stats_reset          = ixgbe_dev_stats_reset,
528         .xstats_reset         = ixgbe_dev_xstats_reset,
529         .xstats_get_names     = ixgbe_dev_xstats_get_names,
530         .xstats_get_names_by_id = ixgbe_dev_xstats_get_names_by_id,
531         .queue_stats_mapping_set = ixgbe_dev_queue_stats_mapping_set,
532         .fw_version_get       = ixgbe_fw_version_get,
533         .dev_infos_get        = ixgbe_dev_info_get,
534         .dev_supported_ptypes_get = ixgbe_dev_supported_ptypes_get,
535         .mtu_set              = ixgbe_dev_mtu_set,
536         .vlan_filter_set      = ixgbe_vlan_filter_set,
537         .vlan_tpid_set        = ixgbe_vlan_tpid_set,
538         .vlan_offload_set     = ixgbe_vlan_offload_set,
539         .vlan_strip_queue_set = ixgbe_vlan_strip_queue_set,
540         .rx_queue_start       = ixgbe_dev_rx_queue_start,
541         .rx_queue_stop        = ixgbe_dev_rx_queue_stop,
542         .tx_queue_start       = ixgbe_dev_tx_queue_start,
543         .tx_queue_stop        = ixgbe_dev_tx_queue_stop,
544         .rx_queue_setup       = ixgbe_dev_rx_queue_setup,
545         .rx_queue_intr_enable = ixgbe_dev_rx_queue_intr_enable,
546         .rx_queue_intr_disable = ixgbe_dev_rx_queue_intr_disable,
547         .rx_queue_release     = ixgbe_dev_rx_queue_release,
548         .rx_queue_count       = ixgbe_dev_rx_queue_count,
549         .rx_descriptor_done   = ixgbe_dev_rx_descriptor_done,
550         .rx_descriptor_status = ixgbe_dev_rx_descriptor_status,
551         .tx_descriptor_status = ixgbe_dev_tx_descriptor_status,
552         .tx_queue_setup       = ixgbe_dev_tx_queue_setup,
553         .tx_queue_release     = ixgbe_dev_tx_queue_release,
554         .dev_led_on           = ixgbe_dev_led_on,
555         .dev_led_off          = ixgbe_dev_led_off,
556         .flow_ctrl_get        = ixgbe_flow_ctrl_get,
557         .flow_ctrl_set        = ixgbe_flow_ctrl_set,
558         .priority_flow_ctrl_set = ixgbe_priority_flow_ctrl_set,
559         .mac_addr_add         = ixgbe_add_rar,
560         .mac_addr_remove      = ixgbe_remove_rar,
561         .mac_addr_set         = ixgbe_set_default_mac_addr,
562         .uc_hash_table_set    = ixgbe_uc_hash_table_set,
563         .uc_all_hash_table_set  = ixgbe_uc_all_hash_table_set,
564         .mirror_rule_set      = ixgbe_mirror_rule_set,
565         .mirror_rule_reset    = ixgbe_mirror_rule_reset,
566         .set_queue_rate_limit = ixgbe_set_queue_rate_limit,
567         .reta_update          = ixgbe_dev_rss_reta_update,
568         .reta_query           = ixgbe_dev_rss_reta_query,
569         .rss_hash_update      = ixgbe_dev_rss_hash_update,
570         .rss_hash_conf_get    = ixgbe_dev_rss_hash_conf_get,
571         .filter_ctrl          = ixgbe_dev_filter_ctrl,
572         .set_mc_addr_list     = ixgbe_dev_set_mc_addr_list,
573         .rxq_info_get         = ixgbe_rxq_info_get,
574         .txq_info_get         = ixgbe_txq_info_get,
575         .timesync_enable      = ixgbe_timesync_enable,
576         .timesync_disable     = ixgbe_timesync_disable,
577         .timesync_read_rx_timestamp = ixgbe_timesync_read_rx_timestamp,
578         .timesync_read_tx_timestamp = ixgbe_timesync_read_tx_timestamp,
579         .get_reg              = ixgbe_get_regs,
580         .get_eeprom_length    = ixgbe_get_eeprom_length,
581         .get_eeprom           = ixgbe_get_eeprom,
582         .set_eeprom           = ixgbe_set_eeprom,
583         .get_module_info      = ixgbe_get_module_info,
584         .get_module_eeprom    = ixgbe_get_module_eeprom,
585         .get_dcb_info         = ixgbe_dev_get_dcb_info,
586         .timesync_adjust_time = ixgbe_timesync_adjust_time,
587         .timesync_read_time   = ixgbe_timesync_read_time,
588         .timesync_write_time  = ixgbe_timesync_write_time,
589         .l2_tunnel_eth_type_conf = ixgbe_dev_l2_tunnel_eth_type_conf,
590         .l2_tunnel_offload_set   = ixgbe_dev_l2_tunnel_offload_set,
591         .udp_tunnel_port_add  = ixgbe_dev_udp_tunnel_port_add,
592         .udp_tunnel_port_del  = ixgbe_dev_udp_tunnel_port_del,
593         .tm_ops_get           = ixgbe_tm_ops_get,
594 };
595
596 /*
597  * dev_ops for virtual function, bare necessities for basic vf
598  * operation have been implemented
599  */
600 static const struct eth_dev_ops ixgbevf_eth_dev_ops = {
601         .dev_configure        = ixgbevf_dev_configure,
602         .dev_start            = ixgbevf_dev_start,
603         .dev_stop             = ixgbevf_dev_stop,
604         .link_update          = ixgbevf_dev_link_update,
605         .stats_get            = ixgbevf_dev_stats_get,
606         .xstats_get           = ixgbevf_dev_xstats_get,
607         .stats_reset          = ixgbevf_dev_stats_reset,
608         .xstats_reset         = ixgbevf_dev_stats_reset,
609         .xstats_get_names     = ixgbevf_dev_xstats_get_names,
610         .dev_close            = ixgbevf_dev_close,
611         .dev_reset            = ixgbevf_dev_reset,
612         .promiscuous_enable   = ixgbevf_dev_promiscuous_enable,
613         .promiscuous_disable  = ixgbevf_dev_promiscuous_disable,
614         .allmulticast_enable  = ixgbevf_dev_allmulticast_enable,
615         .allmulticast_disable = ixgbevf_dev_allmulticast_disable,
616         .dev_infos_get        = ixgbevf_dev_info_get,
617         .dev_supported_ptypes_get = ixgbe_dev_supported_ptypes_get,
618         .mtu_set              = ixgbevf_dev_set_mtu,
619         .vlan_filter_set      = ixgbevf_vlan_filter_set,
620         .vlan_strip_queue_set = ixgbevf_vlan_strip_queue_set,
621         .vlan_offload_set     = ixgbevf_vlan_offload_set,
622         .rx_queue_setup       = ixgbe_dev_rx_queue_setup,
623         .rx_queue_release     = ixgbe_dev_rx_queue_release,
624         .rx_descriptor_done   = ixgbe_dev_rx_descriptor_done,
625         .rx_descriptor_status = ixgbe_dev_rx_descriptor_status,
626         .tx_descriptor_status = ixgbe_dev_tx_descriptor_status,
627         .tx_queue_setup       = ixgbe_dev_tx_queue_setup,
628         .tx_queue_release     = ixgbe_dev_tx_queue_release,
629         .rx_queue_intr_enable = ixgbevf_dev_rx_queue_intr_enable,
630         .rx_queue_intr_disable = ixgbevf_dev_rx_queue_intr_disable,
631         .mac_addr_add         = ixgbevf_add_mac_addr,
632         .mac_addr_remove      = ixgbevf_remove_mac_addr,
633         .set_mc_addr_list     = ixgbe_dev_set_mc_addr_list,
634         .rxq_info_get         = ixgbe_rxq_info_get,
635         .txq_info_get         = ixgbe_txq_info_get,
636         .mac_addr_set         = ixgbevf_set_default_mac_addr,
637         .get_reg              = ixgbevf_get_regs,
638         .reta_update          = ixgbe_dev_rss_reta_update,
639         .reta_query           = ixgbe_dev_rss_reta_query,
640         .rss_hash_update      = ixgbe_dev_rss_hash_update,
641         .rss_hash_conf_get    = ixgbe_dev_rss_hash_conf_get,
642 };
643
644 /* store statistics names and its offset in stats structure */
645 struct rte_ixgbe_xstats_name_off {
646         char name[RTE_ETH_XSTATS_NAME_SIZE];
647         unsigned offset;
648 };
649
650 static const struct rte_ixgbe_xstats_name_off rte_ixgbe_stats_strings[] = {
651         {"rx_crc_errors", offsetof(struct ixgbe_hw_stats, crcerrs)},
652         {"rx_illegal_byte_errors", offsetof(struct ixgbe_hw_stats, illerrc)},
653         {"rx_error_bytes", offsetof(struct ixgbe_hw_stats, errbc)},
654         {"mac_local_errors", offsetof(struct ixgbe_hw_stats, mlfc)},
655         {"mac_remote_errors", offsetof(struct ixgbe_hw_stats, mrfc)},
656         {"rx_length_errors", offsetof(struct ixgbe_hw_stats, rlec)},
657         {"tx_xon_packets", offsetof(struct ixgbe_hw_stats, lxontxc)},
658         {"rx_xon_packets", offsetof(struct ixgbe_hw_stats, lxonrxc)},
659         {"tx_xoff_packets", offsetof(struct ixgbe_hw_stats, lxofftxc)},
660         {"rx_xoff_packets", offsetof(struct ixgbe_hw_stats, lxoffrxc)},
661         {"rx_size_64_packets", offsetof(struct ixgbe_hw_stats, prc64)},
662         {"rx_size_65_to_127_packets", offsetof(struct ixgbe_hw_stats, prc127)},
663         {"rx_size_128_to_255_packets", offsetof(struct ixgbe_hw_stats, prc255)},
664         {"rx_size_256_to_511_packets", offsetof(struct ixgbe_hw_stats, prc511)},
665         {"rx_size_512_to_1023_packets", offsetof(struct ixgbe_hw_stats,
666                 prc1023)},
667         {"rx_size_1024_to_max_packets", offsetof(struct ixgbe_hw_stats,
668                 prc1522)},
669         {"rx_broadcast_packets", offsetof(struct ixgbe_hw_stats, bprc)},
670         {"rx_multicast_packets", offsetof(struct ixgbe_hw_stats, mprc)},
671         {"rx_fragment_errors", offsetof(struct ixgbe_hw_stats, rfc)},
672         {"rx_undersize_errors", offsetof(struct ixgbe_hw_stats, ruc)},
673         {"rx_oversize_errors", offsetof(struct ixgbe_hw_stats, roc)},
674         {"rx_jabber_errors", offsetof(struct ixgbe_hw_stats, rjc)},
675         {"rx_management_packets", offsetof(struct ixgbe_hw_stats, mngprc)},
676         {"rx_management_dropped", offsetof(struct ixgbe_hw_stats, mngpdc)},
677         {"tx_management_packets", offsetof(struct ixgbe_hw_stats, mngptc)},
678         {"rx_total_packets", offsetof(struct ixgbe_hw_stats, tpr)},
679         {"rx_total_bytes", offsetof(struct ixgbe_hw_stats, tor)},
680         {"tx_total_packets", offsetof(struct ixgbe_hw_stats, tpt)},
681         {"tx_size_64_packets", offsetof(struct ixgbe_hw_stats, ptc64)},
682         {"tx_size_65_to_127_packets", offsetof(struct ixgbe_hw_stats, ptc127)},
683         {"tx_size_128_to_255_packets", offsetof(struct ixgbe_hw_stats, ptc255)},
684         {"tx_size_256_to_511_packets", offsetof(struct ixgbe_hw_stats, ptc511)},
685         {"tx_size_512_to_1023_packets", offsetof(struct ixgbe_hw_stats,
686                 ptc1023)},
687         {"tx_size_1024_to_max_packets", offsetof(struct ixgbe_hw_stats,
688                 ptc1522)},
689         {"tx_multicast_packets", offsetof(struct ixgbe_hw_stats, mptc)},
690         {"tx_broadcast_packets", offsetof(struct ixgbe_hw_stats, bptc)},
691         {"rx_mac_short_packet_dropped", offsetof(struct ixgbe_hw_stats, mspdc)},
692         {"rx_l3_l4_xsum_error", offsetof(struct ixgbe_hw_stats, xec)},
693
694         {"flow_director_added_filters", offsetof(struct ixgbe_hw_stats,
695                 fdirustat_add)},
696         {"flow_director_removed_filters", offsetof(struct ixgbe_hw_stats,
697                 fdirustat_remove)},
698         {"flow_director_filter_add_errors", offsetof(struct ixgbe_hw_stats,
699                 fdirfstat_fadd)},
700         {"flow_director_filter_remove_errors", offsetof(struct ixgbe_hw_stats,
701                 fdirfstat_fremove)},
702         {"flow_director_matched_filters", offsetof(struct ixgbe_hw_stats,
703                 fdirmatch)},
704         {"flow_director_missed_filters", offsetof(struct ixgbe_hw_stats,
705                 fdirmiss)},
706
707         {"rx_fcoe_crc_errors", offsetof(struct ixgbe_hw_stats, fccrc)},
708         {"rx_fcoe_dropped", offsetof(struct ixgbe_hw_stats, fcoerpdc)},
709         {"rx_fcoe_mbuf_allocation_errors", offsetof(struct ixgbe_hw_stats,
710                 fclast)},
711         {"rx_fcoe_packets", offsetof(struct ixgbe_hw_stats, fcoeprc)},
712         {"tx_fcoe_packets", offsetof(struct ixgbe_hw_stats, fcoeptc)},
713         {"rx_fcoe_bytes", offsetof(struct ixgbe_hw_stats, fcoedwrc)},
714         {"tx_fcoe_bytes", offsetof(struct ixgbe_hw_stats, fcoedwtc)},
715         {"rx_fcoe_no_direct_data_placement", offsetof(struct ixgbe_hw_stats,
716                 fcoe_noddp)},
717         {"rx_fcoe_no_direct_data_placement_ext_buff",
718                 offsetof(struct ixgbe_hw_stats, fcoe_noddp_ext_buff)},
719
720         {"tx_flow_control_xon_packets", offsetof(struct ixgbe_hw_stats,
721                 lxontxc)},
722         {"rx_flow_control_xon_packets", offsetof(struct ixgbe_hw_stats,
723                 lxonrxc)},
724         {"tx_flow_control_xoff_packets", offsetof(struct ixgbe_hw_stats,
725                 lxofftxc)},
726         {"rx_flow_control_xoff_packets", offsetof(struct ixgbe_hw_stats,
727                 lxoffrxc)},
728         {"rx_total_missed_packets", offsetof(struct ixgbe_hw_stats, mpctotal)},
729 };
730
731 #define IXGBE_NB_HW_STATS (sizeof(rte_ixgbe_stats_strings) / \
732                            sizeof(rte_ixgbe_stats_strings[0]))
733
734 /* MACsec statistics */
735 static const struct rte_ixgbe_xstats_name_off rte_ixgbe_macsec_strings[] = {
736         {"out_pkts_untagged", offsetof(struct ixgbe_macsec_stats,
737                 out_pkts_untagged)},
738         {"out_pkts_encrypted", offsetof(struct ixgbe_macsec_stats,
739                 out_pkts_encrypted)},
740         {"out_pkts_protected", offsetof(struct ixgbe_macsec_stats,
741                 out_pkts_protected)},
742         {"out_octets_encrypted", offsetof(struct ixgbe_macsec_stats,
743                 out_octets_encrypted)},
744         {"out_octets_protected", offsetof(struct ixgbe_macsec_stats,
745                 out_octets_protected)},
746         {"in_pkts_untagged", offsetof(struct ixgbe_macsec_stats,
747                 in_pkts_untagged)},
748         {"in_pkts_badtag", offsetof(struct ixgbe_macsec_stats,
749                 in_pkts_badtag)},
750         {"in_pkts_nosci", offsetof(struct ixgbe_macsec_stats,
751                 in_pkts_nosci)},
752         {"in_pkts_unknownsci", offsetof(struct ixgbe_macsec_stats,
753                 in_pkts_unknownsci)},
754         {"in_octets_decrypted", offsetof(struct ixgbe_macsec_stats,
755                 in_octets_decrypted)},
756         {"in_octets_validated", offsetof(struct ixgbe_macsec_stats,
757                 in_octets_validated)},
758         {"in_pkts_unchecked", offsetof(struct ixgbe_macsec_stats,
759                 in_pkts_unchecked)},
760         {"in_pkts_delayed", offsetof(struct ixgbe_macsec_stats,
761                 in_pkts_delayed)},
762         {"in_pkts_late", offsetof(struct ixgbe_macsec_stats,
763                 in_pkts_late)},
764         {"in_pkts_ok", offsetof(struct ixgbe_macsec_stats,
765                 in_pkts_ok)},
766         {"in_pkts_invalid", offsetof(struct ixgbe_macsec_stats,
767                 in_pkts_invalid)},
768         {"in_pkts_notvalid", offsetof(struct ixgbe_macsec_stats,
769                 in_pkts_notvalid)},
770         {"in_pkts_unusedsa", offsetof(struct ixgbe_macsec_stats,
771                 in_pkts_unusedsa)},
772         {"in_pkts_notusingsa", offsetof(struct ixgbe_macsec_stats,
773                 in_pkts_notusingsa)},
774 };
775
776 #define IXGBE_NB_MACSEC_STATS (sizeof(rte_ixgbe_macsec_strings) / \
777                            sizeof(rte_ixgbe_macsec_strings[0]))
778
779 /* Per-queue statistics */
780 static const struct rte_ixgbe_xstats_name_off rte_ixgbe_rxq_strings[] = {
781         {"mbuf_allocation_errors", offsetof(struct ixgbe_hw_stats, rnbc)},
782         {"dropped", offsetof(struct ixgbe_hw_stats, mpc)},
783         {"xon_packets", offsetof(struct ixgbe_hw_stats, pxonrxc)},
784         {"xoff_packets", offsetof(struct ixgbe_hw_stats, pxoffrxc)},
785 };
786
787 #define IXGBE_NB_RXQ_PRIO_STATS (sizeof(rte_ixgbe_rxq_strings) / \
788                            sizeof(rte_ixgbe_rxq_strings[0]))
789 #define IXGBE_NB_RXQ_PRIO_VALUES 8
790
791 static const struct rte_ixgbe_xstats_name_off rte_ixgbe_txq_strings[] = {
792         {"xon_packets", offsetof(struct ixgbe_hw_stats, pxontxc)},
793         {"xoff_packets", offsetof(struct ixgbe_hw_stats, pxofftxc)},
794         {"xon_to_xoff_packets", offsetof(struct ixgbe_hw_stats,
795                 pxon2offc)},
796 };
797
798 #define IXGBE_NB_TXQ_PRIO_STATS (sizeof(rte_ixgbe_txq_strings) / \
799                            sizeof(rte_ixgbe_txq_strings[0]))
800 #define IXGBE_NB_TXQ_PRIO_VALUES 8
801
802 static const struct rte_ixgbe_xstats_name_off rte_ixgbevf_stats_strings[] = {
803         {"rx_multicast_packets", offsetof(struct ixgbevf_hw_stats, vfmprc)},
804 };
805
806 #define IXGBEVF_NB_XSTATS (sizeof(rte_ixgbevf_stats_strings) /  \
807                 sizeof(rte_ixgbevf_stats_strings[0]))
808
809 /*
810  * This function is the same as ixgbe_is_sfp() in base/ixgbe.h.
811  */
812 static inline int
813 ixgbe_is_sfp(struct ixgbe_hw *hw)
814 {
815         switch (hw->phy.type) {
816         case ixgbe_phy_sfp_avago:
817         case ixgbe_phy_sfp_ftl:
818         case ixgbe_phy_sfp_intel:
819         case ixgbe_phy_sfp_unknown:
820         case ixgbe_phy_sfp_passive_tyco:
821         case ixgbe_phy_sfp_passive_unknown:
822                 return 1;
823         default:
824                 return 0;
825         }
826 }
827
828 static inline int32_t
829 ixgbe_pf_reset_hw(struct ixgbe_hw *hw)
830 {
831         uint32_t ctrl_ext;
832         int32_t status;
833
834         status = ixgbe_reset_hw(hw);
835
836         ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
837         /* Set PF Reset Done bit so PF/VF Mail Ops can work */
838         ctrl_ext |= IXGBE_CTRL_EXT_PFRSTD;
839         IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
840         IXGBE_WRITE_FLUSH(hw);
841
842         if (status == IXGBE_ERR_SFP_NOT_PRESENT)
843                 status = IXGBE_SUCCESS;
844         return status;
845 }
846
847 static inline void
848 ixgbe_enable_intr(struct rte_eth_dev *dev)
849 {
850         struct ixgbe_interrupt *intr =
851                 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
852         struct ixgbe_hw *hw =
853                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
854
855         IXGBE_WRITE_REG(hw, IXGBE_EIMS, intr->mask);
856         IXGBE_WRITE_FLUSH(hw);
857 }
858
859 /*
860  * This function is based on ixgbe_disable_intr() in base/ixgbe.h.
861  */
862 static void
863 ixgbe_disable_intr(struct ixgbe_hw *hw)
864 {
865         PMD_INIT_FUNC_TRACE();
866
867         if (hw->mac.type == ixgbe_mac_82598EB) {
868                 IXGBE_WRITE_REG(hw, IXGBE_EIMC, ~0);
869         } else {
870                 IXGBE_WRITE_REG(hw, IXGBE_EIMC, 0xFFFF0000);
871                 IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(0), ~0);
872                 IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(1), ~0);
873         }
874         IXGBE_WRITE_FLUSH(hw);
875 }
876
877 /*
878  * This function resets queue statistics mapping registers.
879  * From Niantic datasheet, Initialization of Statistics section:
880  * "...if software requires the queue counters, the RQSMR and TQSM registers
881  * must be re-programmed following a device reset.
882  */
883 static void
884 ixgbe_reset_qstat_mappings(struct ixgbe_hw *hw)
885 {
886         uint32_t i;
887
888         for (i = 0; i != IXGBE_NB_STAT_MAPPING_REGS; i++) {
889                 IXGBE_WRITE_REG(hw, IXGBE_RQSMR(i), 0);
890                 IXGBE_WRITE_REG(hw, IXGBE_TQSM(i), 0);
891         }
892 }
893
894
895 static int
896 ixgbe_dev_queue_stats_mapping_set(struct rte_eth_dev *eth_dev,
897                                   uint16_t queue_id,
898                                   uint8_t stat_idx,
899                                   uint8_t is_rx)
900 {
901 #define QSM_REG_NB_BITS_PER_QMAP_FIELD 8
902 #define NB_QMAP_FIELDS_PER_QSM_REG 4
903 #define QMAP_FIELD_RESERVED_BITS_MASK 0x0f
904
905         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
906         struct ixgbe_stat_mapping_registers *stat_mappings =
907                 IXGBE_DEV_PRIVATE_TO_STAT_MAPPINGS(eth_dev->data->dev_private);
908         uint32_t qsmr_mask = 0;
909         uint32_t clearing_mask = QMAP_FIELD_RESERVED_BITS_MASK;
910         uint32_t q_map;
911         uint8_t n, offset;
912
913         if ((hw->mac.type != ixgbe_mac_82599EB) &&
914                 (hw->mac.type != ixgbe_mac_X540) &&
915                 (hw->mac.type != ixgbe_mac_X550) &&
916                 (hw->mac.type != ixgbe_mac_X550EM_x) &&
917                 (hw->mac.type != ixgbe_mac_X550EM_a))
918                 return -ENOSYS;
919
920         PMD_INIT_LOG(DEBUG, "Setting port %d, %s queue_id %d to stat index %d",
921                      (int)(eth_dev->data->port_id), is_rx ? "RX" : "TX",
922                      queue_id, stat_idx);
923
924         n = (uint8_t)(queue_id / NB_QMAP_FIELDS_PER_QSM_REG);
925         if (n >= IXGBE_NB_STAT_MAPPING_REGS) {
926                 PMD_INIT_LOG(ERR, "Nb of stat mapping registers exceeded");
927                 return -EIO;
928         }
929         offset = (uint8_t)(queue_id % NB_QMAP_FIELDS_PER_QSM_REG);
930
931         /* Now clear any previous stat_idx set */
932         clearing_mask <<= (QSM_REG_NB_BITS_PER_QMAP_FIELD * offset);
933         if (!is_rx)
934                 stat_mappings->tqsm[n] &= ~clearing_mask;
935         else
936                 stat_mappings->rqsmr[n] &= ~clearing_mask;
937
938         q_map = (uint32_t)stat_idx;
939         q_map &= QMAP_FIELD_RESERVED_BITS_MASK;
940         qsmr_mask = q_map << (QSM_REG_NB_BITS_PER_QMAP_FIELD * offset);
941         if (!is_rx)
942                 stat_mappings->tqsm[n] |= qsmr_mask;
943         else
944                 stat_mappings->rqsmr[n] |= qsmr_mask;
945
946         PMD_INIT_LOG(DEBUG, "Set port %d, %s queue_id %d to stat index %d",
947                      (int)(eth_dev->data->port_id), is_rx ? "RX" : "TX",
948                      queue_id, stat_idx);
949         PMD_INIT_LOG(DEBUG, "%s[%d] = 0x%08x", is_rx ? "RQSMR" : "TQSM", n,
950                      is_rx ? stat_mappings->rqsmr[n] : stat_mappings->tqsm[n]);
951
952         /* Now write the mapping in the appropriate register */
953         if (is_rx) {
954                 PMD_INIT_LOG(DEBUG, "Write 0x%x to RX IXGBE stat mapping reg:%d",
955                              stat_mappings->rqsmr[n], n);
956                 IXGBE_WRITE_REG(hw, IXGBE_RQSMR(n), stat_mappings->rqsmr[n]);
957         } else {
958                 PMD_INIT_LOG(DEBUG, "Write 0x%x to TX IXGBE stat mapping reg:%d",
959                              stat_mappings->tqsm[n], n);
960                 IXGBE_WRITE_REG(hw, IXGBE_TQSM(n), stat_mappings->tqsm[n]);
961         }
962         return 0;
963 }
964
965 static void
966 ixgbe_restore_statistics_mapping(struct rte_eth_dev *dev)
967 {
968         struct ixgbe_stat_mapping_registers *stat_mappings =
969                 IXGBE_DEV_PRIVATE_TO_STAT_MAPPINGS(dev->data->dev_private);
970         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
971         int i;
972
973         /* write whatever was in stat mapping table to the NIC */
974         for (i = 0; i < IXGBE_NB_STAT_MAPPING_REGS; i++) {
975                 /* rx */
976                 IXGBE_WRITE_REG(hw, IXGBE_RQSMR(i), stat_mappings->rqsmr[i]);
977
978                 /* tx */
979                 IXGBE_WRITE_REG(hw, IXGBE_TQSM(i), stat_mappings->tqsm[i]);
980         }
981 }
982
983 static void
984 ixgbe_dcb_init(struct ixgbe_hw *hw, struct ixgbe_dcb_config *dcb_config)
985 {
986         uint8_t i;
987         struct ixgbe_dcb_tc_config *tc;
988         uint8_t dcb_max_tc = IXGBE_DCB_MAX_TRAFFIC_CLASS;
989
990         dcb_config->num_tcs.pg_tcs = dcb_max_tc;
991         dcb_config->num_tcs.pfc_tcs = dcb_max_tc;
992         for (i = 0; i < dcb_max_tc; i++) {
993                 tc = &dcb_config->tc_config[i];
994                 tc->path[IXGBE_DCB_TX_CONFIG].bwg_id = i;
995                 tc->path[IXGBE_DCB_TX_CONFIG].bwg_percent =
996                                  (uint8_t)(100/dcb_max_tc + (i & 1));
997                 tc->path[IXGBE_DCB_RX_CONFIG].bwg_id = i;
998                 tc->path[IXGBE_DCB_RX_CONFIG].bwg_percent =
999                                  (uint8_t)(100/dcb_max_tc + (i & 1));
1000                 tc->pfc = ixgbe_dcb_pfc_disabled;
1001         }
1002
1003         /* Initialize default user to priority mapping, UPx->TC0 */
1004         tc = &dcb_config->tc_config[0];
1005         tc->path[IXGBE_DCB_TX_CONFIG].up_to_tc_bitmap = 0xFF;
1006         tc->path[IXGBE_DCB_RX_CONFIG].up_to_tc_bitmap = 0xFF;
1007         for (i = 0; i < IXGBE_DCB_MAX_BW_GROUP; i++) {
1008                 dcb_config->bw_percentage[IXGBE_DCB_TX_CONFIG][i] = 100;
1009                 dcb_config->bw_percentage[IXGBE_DCB_RX_CONFIG][i] = 100;
1010         }
1011         dcb_config->rx_pba_cfg = ixgbe_dcb_pba_equal;
1012         dcb_config->pfc_mode_enable = false;
1013         dcb_config->vt_mode = true;
1014         dcb_config->round_robin_enable = false;
1015         /* support all DCB capabilities in 82599 */
1016         dcb_config->support.capabilities = 0xFF;
1017
1018         /*we only support 4 Tcs for X540, X550 */
1019         if (hw->mac.type == ixgbe_mac_X540 ||
1020                 hw->mac.type == ixgbe_mac_X550 ||
1021                 hw->mac.type == ixgbe_mac_X550EM_x ||
1022                 hw->mac.type == ixgbe_mac_X550EM_a) {
1023                 dcb_config->num_tcs.pg_tcs = 4;
1024                 dcb_config->num_tcs.pfc_tcs = 4;
1025         }
1026 }
1027
1028 /*
1029  * Ensure that all locks are released before first NVM or PHY access
1030  */
1031 static void
1032 ixgbe_swfw_lock_reset(struct ixgbe_hw *hw)
1033 {
1034         uint16_t mask;
1035
1036         /*
1037          * Phy lock should not fail in this early stage. If this is the case,
1038          * it is due to an improper exit of the application.
1039          * So force the release of the faulty lock. Release of common lock
1040          * is done automatically by swfw_sync function.
1041          */
1042         mask = IXGBE_GSSR_PHY0_SM << hw->bus.func;
1043         if (ixgbe_acquire_swfw_semaphore(hw, mask) < 0) {
1044                 PMD_DRV_LOG(DEBUG, "SWFW phy%d lock released", hw->bus.func);
1045         }
1046         ixgbe_release_swfw_semaphore(hw, mask);
1047
1048         /*
1049          * These ones are more tricky since they are common to all ports; but
1050          * swfw_sync retries last long enough (1s) to be almost sure that if
1051          * lock can not be taken it is due to an improper lock of the
1052          * semaphore.
1053          */
1054         mask = IXGBE_GSSR_EEP_SM | IXGBE_GSSR_MAC_CSR_SM | IXGBE_GSSR_SW_MNG_SM;
1055         if (ixgbe_acquire_swfw_semaphore(hw, mask) < 0) {
1056                 PMD_DRV_LOG(DEBUG, "SWFW common locks released");
1057         }
1058         ixgbe_release_swfw_semaphore(hw, mask);
1059 }
1060
1061 /*
1062  * This function is based on code in ixgbe_attach() in base/ixgbe.c.
1063  * It returns 0 on success.
1064  */
1065 static int
1066 eth_ixgbe_dev_init(struct rte_eth_dev *eth_dev, void *init_params __rte_unused)
1067 {
1068         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
1069         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
1070         struct ixgbe_hw *hw =
1071                 IXGBE_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
1072         struct ixgbe_vfta *shadow_vfta =
1073                 IXGBE_DEV_PRIVATE_TO_VFTA(eth_dev->data->dev_private);
1074         struct ixgbe_hwstrip *hwstrip =
1075                 IXGBE_DEV_PRIVATE_TO_HWSTRIP_BITMAP(eth_dev->data->dev_private);
1076         struct ixgbe_dcb_config *dcb_config =
1077                 IXGBE_DEV_PRIVATE_TO_DCB_CFG(eth_dev->data->dev_private);
1078         struct ixgbe_filter_info *filter_info =
1079                 IXGBE_DEV_PRIVATE_TO_FILTER_INFO(eth_dev->data->dev_private);
1080         struct ixgbe_bw_conf *bw_conf =
1081                 IXGBE_DEV_PRIVATE_TO_BW_CONF(eth_dev->data->dev_private);
1082         uint32_t ctrl_ext;
1083         uint16_t csum;
1084         int diag, i;
1085
1086         PMD_INIT_FUNC_TRACE();
1087
1088         eth_dev->dev_ops = &ixgbe_eth_dev_ops;
1089         eth_dev->rx_pkt_burst = &ixgbe_recv_pkts;
1090         eth_dev->tx_pkt_burst = &ixgbe_xmit_pkts;
1091         eth_dev->tx_pkt_prepare = &ixgbe_prep_pkts;
1092
1093         /*
1094          * For secondary processes, we don't initialise any further as primary
1095          * has already done this work. Only check we don't need a different
1096          * RX and TX function.
1097          */
1098         if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
1099                 struct ixgbe_tx_queue *txq;
1100                 /* TX queue function in primary, set by last queue initialized
1101                  * Tx queue may not initialized by primary process
1102                  */
1103                 if (eth_dev->data->tx_queues) {
1104                         txq = eth_dev->data->tx_queues[eth_dev->data->nb_tx_queues-1];
1105                         ixgbe_set_tx_function(eth_dev, txq);
1106                 } else {
1107                         /* Use default TX function if we get here */
1108                         PMD_INIT_LOG(NOTICE, "No TX queues configured yet. "
1109                                      "Using default TX function.");
1110                 }
1111
1112                 ixgbe_set_rx_function(eth_dev);
1113
1114                 return 0;
1115         }
1116
1117         rte_eth_copy_pci_info(eth_dev, pci_dev);
1118
1119         /* Vendor and Device ID need to be set before init of shared code */
1120         hw->device_id = pci_dev->id.device_id;
1121         hw->vendor_id = pci_dev->id.vendor_id;
1122         hw->hw_addr = (void *)pci_dev->mem_resource[0].addr;
1123         hw->allow_unsupported_sfp = 1;
1124
1125         /* Initialize the shared code (base driver) */
1126 #ifdef RTE_LIBRTE_IXGBE_BYPASS
1127         diag = ixgbe_bypass_init_shared_code(hw);
1128 #else
1129         diag = ixgbe_init_shared_code(hw);
1130 #endif /* RTE_LIBRTE_IXGBE_BYPASS */
1131
1132         if (diag != IXGBE_SUCCESS) {
1133                 PMD_INIT_LOG(ERR, "Shared code init failed: %d", diag);
1134                 return -EIO;
1135         }
1136
1137         if (hw->mac.ops.fw_recovery_mode && hw->mac.ops.fw_recovery_mode(hw)) {
1138                 PMD_INIT_LOG(ERR, "\nERROR: "
1139                         "Firmware recovery mode detected. Limiting functionality.\n"
1140                         "Refer to the Intel(R) Ethernet Adapters and Devices "
1141                         "User Guide for details on firmware recovery mode.");
1142                 return -EIO;
1143         }
1144
1145         /* pick up the PCI bus settings for reporting later */
1146         ixgbe_get_bus_info(hw);
1147
1148         /* Unlock any pending hardware semaphore */
1149         ixgbe_swfw_lock_reset(hw);
1150
1151 #ifdef RTE_LIBRTE_SECURITY
1152         /* Initialize security_ctx only for primary process*/
1153         if (ixgbe_ipsec_ctx_create(eth_dev))
1154                 return -ENOMEM;
1155 #endif
1156
1157         /* Initialize DCB configuration*/
1158         memset(dcb_config, 0, sizeof(struct ixgbe_dcb_config));
1159         ixgbe_dcb_init(hw, dcb_config);
1160         /* Get Hardware Flow Control setting */
1161         hw->fc.requested_mode = ixgbe_fc_full;
1162         hw->fc.current_mode = ixgbe_fc_full;
1163         hw->fc.pause_time = IXGBE_FC_PAUSE;
1164         for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
1165                 hw->fc.low_water[i] = IXGBE_FC_LO;
1166                 hw->fc.high_water[i] = IXGBE_FC_HI;
1167         }
1168         hw->fc.send_xon = 1;
1169
1170         /* Make sure we have a good EEPROM before we read from it */
1171         diag = ixgbe_validate_eeprom_checksum(hw, &csum);
1172         if (diag != IXGBE_SUCCESS) {
1173                 PMD_INIT_LOG(ERR, "The EEPROM checksum is not valid: %d", diag);
1174                 return -EIO;
1175         }
1176
1177 #ifdef RTE_LIBRTE_IXGBE_BYPASS
1178         diag = ixgbe_bypass_init_hw(hw);
1179 #else
1180         diag = ixgbe_init_hw(hw);
1181 #endif /* RTE_LIBRTE_IXGBE_BYPASS */
1182
1183         /*
1184          * Devices with copper phys will fail to initialise if ixgbe_init_hw()
1185          * is called too soon after the kernel driver unbinding/binding occurs.
1186          * The failure occurs in ixgbe_identify_phy_generic() for all devices,
1187          * but for non-copper devies, ixgbe_identify_sfp_module_generic() is
1188          * also called. See ixgbe_identify_phy_82599(). The reason for the
1189          * failure is not known, and only occuts when virtualisation features
1190          * are disabled in the bios. A delay of 100ms  was found to be enough by
1191          * trial-and-error, and is doubled to be safe.
1192          */
1193         if (diag && (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_copper)) {
1194                 rte_delay_ms(200);
1195                 diag = ixgbe_init_hw(hw);
1196         }
1197
1198         if (diag == IXGBE_ERR_SFP_NOT_PRESENT)
1199                 diag = IXGBE_SUCCESS;
1200
1201         if (diag == IXGBE_ERR_EEPROM_VERSION) {
1202                 PMD_INIT_LOG(ERR, "This device is a pre-production adapter/"
1203                              "LOM.  Please be aware there may be issues associated "
1204                              "with your hardware.");
1205                 PMD_INIT_LOG(ERR, "If you are experiencing problems "
1206                              "please contact your Intel or hardware representative "
1207                              "who provided you with this hardware.");
1208         } else if (diag == IXGBE_ERR_SFP_NOT_SUPPORTED)
1209                 PMD_INIT_LOG(ERR, "Unsupported SFP+ Module");
1210         if (diag) {
1211                 PMD_INIT_LOG(ERR, "Hardware Initialization Failure: %d", diag);
1212                 return -EIO;
1213         }
1214
1215         /* Reset the hw statistics */
1216         ixgbe_dev_stats_reset(eth_dev);
1217
1218         /* disable interrupt */
1219         ixgbe_disable_intr(hw);
1220
1221         /* reset mappings for queue statistics hw counters*/
1222         ixgbe_reset_qstat_mappings(hw);
1223
1224         /* Allocate memory for storing MAC addresses */
1225         eth_dev->data->mac_addrs = rte_zmalloc("ixgbe", RTE_ETHER_ADDR_LEN *
1226                                                hw->mac.num_rar_entries, 0);
1227         if (eth_dev->data->mac_addrs == NULL) {
1228                 PMD_INIT_LOG(ERR,
1229                              "Failed to allocate %u bytes needed to store "
1230                              "MAC addresses",
1231                              RTE_ETHER_ADDR_LEN * hw->mac.num_rar_entries);
1232                 return -ENOMEM;
1233         }
1234         /* Copy the permanent MAC address */
1235         rte_ether_addr_copy((struct rte_ether_addr *)hw->mac.perm_addr,
1236                         &eth_dev->data->mac_addrs[0]);
1237
1238         /* Allocate memory for storing hash filter MAC addresses */
1239         eth_dev->data->hash_mac_addrs = rte_zmalloc(
1240                 "ixgbe", RTE_ETHER_ADDR_LEN * IXGBE_VMDQ_NUM_UC_MAC, 0);
1241         if (eth_dev->data->hash_mac_addrs == NULL) {
1242                 PMD_INIT_LOG(ERR,
1243                              "Failed to allocate %d bytes needed to store MAC addresses",
1244                              RTE_ETHER_ADDR_LEN * IXGBE_VMDQ_NUM_UC_MAC);
1245                 return -ENOMEM;
1246         }
1247
1248         /* Pass the information to the rte_eth_dev_close() that it should also
1249          * release the private port resources.
1250          */
1251         eth_dev->data->dev_flags |= RTE_ETH_DEV_CLOSE_REMOVE;
1252
1253         /* initialize the vfta */
1254         memset(shadow_vfta, 0, sizeof(*shadow_vfta));
1255
1256         /* initialize the hw strip bitmap*/
1257         memset(hwstrip, 0, sizeof(*hwstrip));
1258
1259         /* initialize PF if max_vfs not zero */
1260         ixgbe_pf_host_init(eth_dev);
1261
1262         ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
1263         /* let hardware know driver is loaded */
1264         ctrl_ext |= IXGBE_CTRL_EXT_DRV_LOAD;
1265         /* Set PF Reset Done bit so PF/VF Mail Ops can work */
1266         ctrl_ext |= IXGBE_CTRL_EXT_PFRSTD;
1267         IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
1268         IXGBE_WRITE_FLUSH(hw);
1269
1270         if (ixgbe_is_sfp(hw) && hw->phy.sfp_type != ixgbe_sfp_type_not_present)
1271                 PMD_INIT_LOG(DEBUG, "MAC: %d, PHY: %d, SFP+: %d",
1272                              (int) hw->mac.type, (int) hw->phy.type,
1273                              (int) hw->phy.sfp_type);
1274         else
1275                 PMD_INIT_LOG(DEBUG, "MAC: %d, PHY: %d",
1276                              (int) hw->mac.type, (int) hw->phy.type);
1277
1278         PMD_INIT_LOG(DEBUG, "port %d vendorID=0x%x deviceID=0x%x",
1279                      eth_dev->data->port_id, pci_dev->id.vendor_id,
1280                      pci_dev->id.device_id);
1281
1282         rte_intr_callback_register(intr_handle,
1283                                    ixgbe_dev_interrupt_handler, eth_dev);
1284
1285         /* enable uio/vfio intr/eventfd mapping */
1286         rte_intr_enable(intr_handle);
1287
1288         /* enable support intr */
1289         ixgbe_enable_intr(eth_dev);
1290
1291         /* initialize filter info */
1292         memset(filter_info, 0,
1293                sizeof(struct ixgbe_filter_info));
1294
1295         /* initialize 5tuple filter list */
1296         TAILQ_INIT(&filter_info->fivetuple_list);
1297
1298         /* initialize flow director filter list & hash */
1299         ixgbe_fdir_filter_init(eth_dev);
1300
1301         /* initialize l2 tunnel filter list & hash */
1302         ixgbe_l2_tn_filter_init(eth_dev);
1303
1304         /* initialize flow filter lists */
1305         ixgbe_filterlist_init();
1306
1307         /* initialize bandwidth configuration info */
1308         memset(bw_conf, 0, sizeof(struct ixgbe_bw_conf));
1309
1310         /* initialize Traffic Manager configuration */
1311         ixgbe_tm_conf_init(eth_dev);
1312
1313         return 0;
1314 }
1315
1316 static int
1317 eth_ixgbe_dev_uninit(struct rte_eth_dev *eth_dev)
1318 {
1319         PMD_INIT_FUNC_TRACE();
1320
1321         if (rte_eal_process_type() != RTE_PROC_PRIMARY)
1322                 return 0;
1323
1324         ixgbe_dev_close(eth_dev);
1325
1326         return 0;
1327 }
1328
1329 static int ixgbe_ntuple_filter_uninit(struct rte_eth_dev *eth_dev)
1330 {
1331         struct ixgbe_filter_info *filter_info =
1332                 IXGBE_DEV_PRIVATE_TO_FILTER_INFO(eth_dev->data->dev_private);
1333         struct ixgbe_5tuple_filter *p_5tuple;
1334
1335         while ((p_5tuple = TAILQ_FIRST(&filter_info->fivetuple_list))) {
1336                 TAILQ_REMOVE(&filter_info->fivetuple_list,
1337                              p_5tuple,
1338                              entries);
1339                 rte_free(p_5tuple);
1340         }
1341         memset(filter_info->fivetuple_mask, 0,
1342                sizeof(uint32_t) * IXGBE_5TUPLE_ARRAY_SIZE);
1343
1344         return 0;
1345 }
1346
1347 static int ixgbe_fdir_filter_uninit(struct rte_eth_dev *eth_dev)
1348 {
1349         struct ixgbe_hw_fdir_info *fdir_info =
1350                 IXGBE_DEV_PRIVATE_TO_FDIR_INFO(eth_dev->data->dev_private);
1351         struct ixgbe_fdir_filter *fdir_filter;
1352
1353                 if (fdir_info->hash_map)
1354                 rte_free(fdir_info->hash_map);
1355         if (fdir_info->hash_handle)
1356                 rte_hash_free(fdir_info->hash_handle);
1357
1358         while ((fdir_filter = TAILQ_FIRST(&fdir_info->fdir_list))) {
1359                 TAILQ_REMOVE(&fdir_info->fdir_list,
1360                              fdir_filter,
1361                              entries);
1362                 rte_free(fdir_filter);
1363         }
1364
1365         return 0;
1366 }
1367
1368 static int ixgbe_l2_tn_filter_uninit(struct rte_eth_dev *eth_dev)
1369 {
1370         struct ixgbe_l2_tn_info *l2_tn_info =
1371                 IXGBE_DEV_PRIVATE_TO_L2_TN_INFO(eth_dev->data->dev_private);
1372         struct ixgbe_l2_tn_filter *l2_tn_filter;
1373
1374         if (l2_tn_info->hash_map)
1375                 rte_free(l2_tn_info->hash_map);
1376         if (l2_tn_info->hash_handle)
1377                 rte_hash_free(l2_tn_info->hash_handle);
1378
1379         while ((l2_tn_filter = TAILQ_FIRST(&l2_tn_info->l2_tn_list))) {
1380                 TAILQ_REMOVE(&l2_tn_info->l2_tn_list,
1381                              l2_tn_filter,
1382                              entries);
1383                 rte_free(l2_tn_filter);
1384         }
1385
1386         return 0;
1387 }
1388
1389 static int ixgbe_fdir_filter_init(struct rte_eth_dev *eth_dev)
1390 {
1391         struct ixgbe_hw_fdir_info *fdir_info =
1392                 IXGBE_DEV_PRIVATE_TO_FDIR_INFO(eth_dev->data->dev_private);
1393         char fdir_hash_name[RTE_HASH_NAMESIZE];
1394         struct rte_hash_parameters fdir_hash_params = {
1395                 .name = fdir_hash_name,
1396                 .entries = IXGBE_MAX_FDIR_FILTER_NUM,
1397                 .key_len = sizeof(union ixgbe_atr_input),
1398                 .hash_func = rte_hash_crc,
1399                 .hash_func_init_val = 0,
1400                 .socket_id = rte_socket_id(),
1401         };
1402
1403         TAILQ_INIT(&fdir_info->fdir_list);
1404         snprintf(fdir_hash_name, RTE_HASH_NAMESIZE,
1405                  "fdir_%s", eth_dev->device->name);
1406         fdir_info->hash_handle = rte_hash_create(&fdir_hash_params);
1407         if (!fdir_info->hash_handle) {
1408                 PMD_INIT_LOG(ERR, "Failed to create fdir hash table!");
1409                 return -EINVAL;
1410         }
1411         fdir_info->hash_map = rte_zmalloc("ixgbe",
1412                                           sizeof(struct ixgbe_fdir_filter *) *
1413                                           IXGBE_MAX_FDIR_FILTER_NUM,
1414                                           0);
1415         if (!fdir_info->hash_map) {
1416                 PMD_INIT_LOG(ERR,
1417                              "Failed to allocate memory for fdir hash map!");
1418                 return -ENOMEM;
1419         }
1420         fdir_info->mask_added = FALSE;
1421
1422         return 0;
1423 }
1424
1425 static int ixgbe_l2_tn_filter_init(struct rte_eth_dev *eth_dev)
1426 {
1427         struct ixgbe_l2_tn_info *l2_tn_info =
1428                 IXGBE_DEV_PRIVATE_TO_L2_TN_INFO(eth_dev->data->dev_private);
1429         char l2_tn_hash_name[RTE_HASH_NAMESIZE];
1430         struct rte_hash_parameters l2_tn_hash_params = {
1431                 .name = l2_tn_hash_name,
1432                 .entries = IXGBE_MAX_L2_TN_FILTER_NUM,
1433                 .key_len = sizeof(struct ixgbe_l2_tn_key),
1434                 .hash_func = rte_hash_crc,
1435                 .hash_func_init_val = 0,
1436                 .socket_id = rte_socket_id(),
1437         };
1438
1439         TAILQ_INIT(&l2_tn_info->l2_tn_list);
1440         snprintf(l2_tn_hash_name, RTE_HASH_NAMESIZE,
1441                  "l2_tn_%s", eth_dev->device->name);
1442         l2_tn_info->hash_handle = rte_hash_create(&l2_tn_hash_params);
1443         if (!l2_tn_info->hash_handle) {
1444                 PMD_INIT_LOG(ERR, "Failed to create L2 TN hash table!");
1445                 return -EINVAL;
1446         }
1447         l2_tn_info->hash_map = rte_zmalloc("ixgbe",
1448                                    sizeof(struct ixgbe_l2_tn_filter *) *
1449                                    IXGBE_MAX_L2_TN_FILTER_NUM,
1450                                    0);
1451         if (!l2_tn_info->hash_map) {
1452                 PMD_INIT_LOG(ERR,
1453                         "Failed to allocate memory for L2 TN hash map!");
1454                 return -ENOMEM;
1455         }
1456         l2_tn_info->e_tag_en = FALSE;
1457         l2_tn_info->e_tag_fwd_en = FALSE;
1458         l2_tn_info->e_tag_ether_type = RTE_ETHER_TYPE_ETAG;
1459
1460         return 0;
1461 }
1462 /*
1463  * Negotiate mailbox API version with the PF.
1464  * After reset API version is always set to the basic one (ixgbe_mbox_api_10).
1465  * Then we try to negotiate starting with the most recent one.
1466  * If all negotiation attempts fail, then we will proceed with
1467  * the default one (ixgbe_mbox_api_10).
1468  */
1469 static void
1470 ixgbevf_negotiate_api(struct ixgbe_hw *hw)
1471 {
1472         int32_t i;
1473
1474         /* start with highest supported, proceed down */
1475         static const enum ixgbe_pfvf_api_rev sup_ver[] = {
1476                 ixgbe_mbox_api_13,
1477                 ixgbe_mbox_api_12,
1478                 ixgbe_mbox_api_11,
1479                 ixgbe_mbox_api_10,
1480         };
1481
1482         for (i = 0;
1483                         i != RTE_DIM(sup_ver) &&
1484                         ixgbevf_negotiate_api_version(hw, sup_ver[i]) != 0;
1485                         i++)
1486                 ;
1487 }
1488
1489 static void
1490 generate_random_mac_addr(struct rte_ether_addr *mac_addr)
1491 {
1492         uint64_t random;
1493
1494         /* Set Organizationally Unique Identifier (OUI) prefix. */
1495         mac_addr->addr_bytes[0] = 0x00;
1496         mac_addr->addr_bytes[1] = 0x09;
1497         mac_addr->addr_bytes[2] = 0xC0;
1498         /* Force indication of locally assigned MAC address. */
1499         mac_addr->addr_bytes[0] |= RTE_ETHER_LOCAL_ADMIN_ADDR;
1500         /* Generate the last 3 bytes of the MAC address with a random number. */
1501         random = rte_rand();
1502         memcpy(&mac_addr->addr_bytes[3], &random, 3);
1503 }
1504
1505 static int
1506 devarg_handle_int(__rte_unused const char *key, const char *value,
1507                   void *extra_args)
1508 {
1509         uint16_t *n = extra_args;
1510
1511         if (value == NULL || extra_args == NULL)
1512                 return -EINVAL;
1513
1514         *n = (uint16_t)strtoul(value, NULL, 0);
1515         if (*n == USHRT_MAX && errno == ERANGE)
1516                 return -1;
1517
1518         return 0;
1519 }
1520
1521 static void
1522 ixgbevf_parse_devargs(struct ixgbe_adapter *adapter,
1523                       struct rte_devargs *devargs)
1524 {
1525         struct rte_kvargs *kvlist;
1526         uint16_t pflink_fullchk;
1527
1528         if (devargs == NULL)
1529                 return;
1530
1531         kvlist = rte_kvargs_parse(devargs->args, ixgbevf_valid_arguments);
1532         if (kvlist == NULL)
1533                 return;
1534
1535         if (rte_kvargs_count(kvlist, IXGBEVF_DEVARG_PFLINK_FULLCHK) == 1 &&
1536             rte_kvargs_process(kvlist, IXGBEVF_DEVARG_PFLINK_FULLCHK,
1537                                devarg_handle_int, &pflink_fullchk) == 0 &&
1538             pflink_fullchk == 1)
1539                 adapter->pflink_fullchk = 1;
1540
1541         rte_kvargs_free(kvlist);
1542 }
1543
1544 /*
1545  * Virtual Function device init
1546  */
1547 static int
1548 eth_ixgbevf_dev_init(struct rte_eth_dev *eth_dev)
1549 {
1550         int diag;
1551         uint32_t tc, tcs;
1552         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
1553         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
1554         struct ixgbe_hw *hw =
1555                 IXGBE_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
1556         struct ixgbe_vfta *shadow_vfta =
1557                 IXGBE_DEV_PRIVATE_TO_VFTA(eth_dev->data->dev_private);
1558         struct ixgbe_hwstrip *hwstrip =
1559                 IXGBE_DEV_PRIVATE_TO_HWSTRIP_BITMAP(eth_dev->data->dev_private);
1560         struct rte_ether_addr *perm_addr =
1561                 (struct rte_ether_addr *)hw->mac.perm_addr;
1562
1563         PMD_INIT_FUNC_TRACE();
1564
1565         eth_dev->dev_ops = &ixgbevf_eth_dev_ops;
1566         eth_dev->rx_pkt_burst = &ixgbe_recv_pkts;
1567         eth_dev->tx_pkt_burst = &ixgbe_xmit_pkts;
1568
1569         /* for secondary processes, we don't initialise any further as primary
1570          * has already done this work. Only check we don't need a different
1571          * RX function
1572          */
1573         if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
1574                 struct ixgbe_tx_queue *txq;
1575                 /* TX queue function in primary, set by last queue initialized
1576                  * Tx queue may not initialized by primary process
1577                  */
1578                 if (eth_dev->data->tx_queues) {
1579                         txq = eth_dev->data->tx_queues[eth_dev->data->nb_tx_queues - 1];
1580                         ixgbe_set_tx_function(eth_dev, txq);
1581                 } else {
1582                         /* Use default TX function if we get here */
1583                         PMD_INIT_LOG(NOTICE,
1584                                      "No TX queues configured yet. Using default TX function.");
1585                 }
1586
1587                 ixgbe_set_rx_function(eth_dev);
1588
1589                 return 0;
1590         }
1591
1592         ixgbevf_parse_devargs(eth_dev->data->dev_private,
1593                               pci_dev->device.devargs);
1594
1595         rte_eth_copy_pci_info(eth_dev, pci_dev);
1596
1597         hw->device_id = pci_dev->id.device_id;
1598         hw->vendor_id = pci_dev->id.vendor_id;
1599         hw->hw_addr = (void *)pci_dev->mem_resource[0].addr;
1600
1601         /* initialize the vfta */
1602         memset(shadow_vfta, 0, sizeof(*shadow_vfta));
1603
1604         /* initialize the hw strip bitmap*/
1605         memset(hwstrip, 0, sizeof(*hwstrip));
1606
1607         /* Initialize the shared code (base driver) */
1608         diag = ixgbe_init_shared_code(hw);
1609         if (diag != IXGBE_SUCCESS) {
1610                 PMD_INIT_LOG(ERR, "Shared code init failed for ixgbevf: %d", diag);
1611                 return -EIO;
1612         }
1613
1614         /* init_mailbox_params */
1615         hw->mbx.ops.init_params(hw);
1616
1617         /* Reset the hw statistics */
1618         ixgbevf_dev_stats_reset(eth_dev);
1619
1620         /* Disable the interrupts for VF */
1621         ixgbevf_intr_disable(eth_dev);
1622
1623         hw->mac.num_rar_entries = 128; /* The MAX of the underlying PF */
1624         diag = hw->mac.ops.reset_hw(hw);
1625
1626         /*
1627          * The VF reset operation returns the IXGBE_ERR_INVALID_MAC_ADDR when
1628          * the underlying PF driver has not assigned a MAC address to the VF.
1629          * In this case, assign a random MAC address.
1630          */
1631         if ((diag != IXGBE_SUCCESS) && (diag != IXGBE_ERR_INVALID_MAC_ADDR)) {
1632                 PMD_INIT_LOG(ERR, "VF Initialization Failure: %d", diag);
1633                 /*
1634                  * This error code will be propagated to the app by
1635                  * rte_eth_dev_reset, so use a public error code rather than
1636                  * the internal-only IXGBE_ERR_RESET_FAILED
1637                  */
1638                 return -EAGAIN;
1639         }
1640
1641         /* negotiate mailbox API version to use with the PF. */
1642         ixgbevf_negotiate_api(hw);
1643
1644         /* Get Rx/Tx queue count via mailbox, which is ready after reset_hw */
1645         ixgbevf_get_queues(hw, &tcs, &tc);
1646
1647         /* Allocate memory for storing MAC addresses */
1648         eth_dev->data->mac_addrs = rte_zmalloc("ixgbevf", RTE_ETHER_ADDR_LEN *
1649                                                hw->mac.num_rar_entries, 0);
1650         if (eth_dev->data->mac_addrs == NULL) {
1651                 PMD_INIT_LOG(ERR,
1652                              "Failed to allocate %u bytes needed to store "
1653                              "MAC addresses",
1654                              RTE_ETHER_ADDR_LEN * hw->mac.num_rar_entries);
1655                 return -ENOMEM;
1656         }
1657
1658         /* Pass the information to the rte_eth_dev_close() that it should also
1659          * release the private port resources.
1660          */
1661         eth_dev->data->dev_flags |= RTE_ETH_DEV_CLOSE_REMOVE;
1662
1663         /* Generate a random MAC address, if none was assigned by PF. */
1664         if (rte_is_zero_ether_addr(perm_addr)) {
1665                 generate_random_mac_addr(perm_addr);
1666                 diag = ixgbe_set_rar_vf(hw, 1, perm_addr->addr_bytes, 0, 1);
1667                 if (diag) {
1668                         rte_free(eth_dev->data->mac_addrs);
1669                         eth_dev->data->mac_addrs = NULL;
1670                         return diag;
1671                 }
1672                 PMD_INIT_LOG(INFO, "\tVF MAC address not assigned by Host PF");
1673                 PMD_INIT_LOG(INFO, "\tAssign randomly generated MAC address "
1674                              "%02x:%02x:%02x:%02x:%02x:%02x",
1675                              perm_addr->addr_bytes[0],
1676                              perm_addr->addr_bytes[1],
1677                              perm_addr->addr_bytes[2],
1678                              perm_addr->addr_bytes[3],
1679                              perm_addr->addr_bytes[4],
1680                              perm_addr->addr_bytes[5]);
1681         }
1682
1683         /* Copy the permanent MAC address */
1684         rte_ether_addr_copy(perm_addr, &eth_dev->data->mac_addrs[0]);
1685
1686         /* reset the hardware with the new settings */
1687         diag = hw->mac.ops.start_hw(hw);
1688         switch (diag) {
1689         case  0:
1690                 break;
1691
1692         default:
1693                 PMD_INIT_LOG(ERR, "VF Initialization Failure: %d", diag);
1694                 return -EIO;
1695         }
1696
1697         rte_intr_callback_register(intr_handle,
1698                                    ixgbevf_dev_interrupt_handler, eth_dev);
1699         rte_intr_enable(intr_handle);
1700         ixgbevf_intr_enable(eth_dev);
1701
1702         PMD_INIT_LOG(DEBUG, "port %d vendorID=0x%x deviceID=0x%x mac.type=%s",
1703                      eth_dev->data->port_id, pci_dev->id.vendor_id,
1704                      pci_dev->id.device_id, "ixgbe_mac_82599_vf");
1705
1706         return 0;
1707 }
1708
1709 /* Virtual Function device uninit */
1710
1711 static int
1712 eth_ixgbevf_dev_uninit(struct rte_eth_dev *eth_dev)
1713 {
1714         PMD_INIT_FUNC_TRACE();
1715
1716         if (rte_eal_process_type() != RTE_PROC_PRIMARY)
1717                 return 0;
1718
1719         ixgbevf_dev_close(eth_dev);
1720
1721         return 0;
1722 }
1723
1724 static int
1725 eth_ixgbe_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
1726                 struct rte_pci_device *pci_dev)
1727 {
1728         char name[RTE_ETH_NAME_MAX_LEN];
1729         struct rte_eth_dev *pf_ethdev;
1730         struct rte_eth_devargs eth_da;
1731         int i, retval;
1732
1733         if (pci_dev->device.devargs) {
1734                 retval = rte_eth_devargs_parse(pci_dev->device.devargs->args,
1735                                 &eth_da);
1736                 if (retval)
1737                         return retval;
1738         } else
1739                 memset(&eth_da, 0, sizeof(eth_da));
1740
1741         retval = rte_eth_dev_create(&pci_dev->device, pci_dev->device.name,
1742                 sizeof(struct ixgbe_adapter),
1743                 eth_dev_pci_specific_init, pci_dev,
1744                 eth_ixgbe_dev_init, NULL);
1745
1746         if (retval || eth_da.nb_representor_ports < 1)
1747                 return retval;
1748
1749         pf_ethdev = rte_eth_dev_allocated(pci_dev->device.name);
1750         if (pf_ethdev == NULL)
1751                 return -ENODEV;
1752
1753         /* probe VF representor ports */
1754         for (i = 0; i < eth_da.nb_representor_ports; i++) {
1755                 struct ixgbe_vf_info *vfinfo;
1756                 struct ixgbe_vf_representor representor;
1757
1758                 vfinfo = *IXGBE_DEV_PRIVATE_TO_P_VFDATA(
1759                         pf_ethdev->data->dev_private);
1760                 if (vfinfo == NULL) {
1761                         PMD_DRV_LOG(ERR,
1762                                 "no virtual functions supported by PF");
1763                         break;
1764                 }
1765
1766                 representor.vf_id = eth_da.representor_ports[i];
1767                 representor.switch_domain_id = vfinfo->switch_domain_id;
1768                 representor.pf_ethdev = pf_ethdev;
1769
1770                 /* representor port net_bdf_port */
1771                 snprintf(name, sizeof(name), "net_%s_representor_%d",
1772                         pci_dev->device.name,
1773                         eth_da.representor_ports[i]);
1774
1775                 retval = rte_eth_dev_create(&pci_dev->device, name,
1776                         sizeof(struct ixgbe_vf_representor), NULL, NULL,
1777                         ixgbe_vf_representor_init, &representor);
1778
1779                 if (retval)
1780                         PMD_DRV_LOG(ERR, "failed to create ixgbe vf "
1781                                 "representor %s.", name);
1782         }
1783
1784         return 0;
1785 }
1786
1787 static int eth_ixgbe_pci_remove(struct rte_pci_device *pci_dev)
1788 {
1789         struct rte_eth_dev *ethdev;
1790
1791         ethdev = rte_eth_dev_allocated(pci_dev->device.name);
1792         if (!ethdev)
1793                 return -ENODEV;
1794
1795         if (ethdev->data->dev_flags & RTE_ETH_DEV_REPRESENTOR)
1796                 return rte_eth_dev_destroy(ethdev, ixgbe_vf_representor_uninit);
1797         else
1798                 return rte_eth_dev_destroy(ethdev, eth_ixgbe_dev_uninit);
1799 }
1800
1801 static struct rte_pci_driver rte_ixgbe_pmd = {
1802         .id_table = pci_id_ixgbe_map,
1803         .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC,
1804         .probe = eth_ixgbe_pci_probe,
1805         .remove = eth_ixgbe_pci_remove,
1806 };
1807
1808 static int eth_ixgbevf_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
1809         struct rte_pci_device *pci_dev)
1810 {
1811         return rte_eth_dev_pci_generic_probe(pci_dev,
1812                 sizeof(struct ixgbe_adapter), eth_ixgbevf_dev_init);
1813 }
1814
1815 static int eth_ixgbevf_pci_remove(struct rte_pci_device *pci_dev)
1816 {
1817         return rte_eth_dev_pci_generic_remove(pci_dev, eth_ixgbevf_dev_uninit);
1818 }
1819
1820 /*
1821  * virtual function driver struct
1822  */
1823 static struct rte_pci_driver rte_ixgbevf_pmd = {
1824         .id_table = pci_id_ixgbevf_map,
1825         .drv_flags = RTE_PCI_DRV_NEED_MAPPING,
1826         .probe = eth_ixgbevf_pci_probe,
1827         .remove = eth_ixgbevf_pci_remove,
1828 };
1829
1830 static int
1831 ixgbe_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
1832 {
1833         struct ixgbe_hw *hw =
1834                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1835         struct ixgbe_vfta *shadow_vfta =
1836                 IXGBE_DEV_PRIVATE_TO_VFTA(dev->data->dev_private);
1837         uint32_t vfta;
1838         uint32_t vid_idx;
1839         uint32_t vid_bit;
1840
1841         vid_idx = (uint32_t) ((vlan_id >> 5) & 0x7F);
1842         vid_bit = (uint32_t) (1 << (vlan_id & 0x1F));
1843         vfta = IXGBE_READ_REG(hw, IXGBE_VFTA(vid_idx));
1844         if (on)
1845                 vfta |= vid_bit;
1846         else
1847                 vfta &= ~vid_bit;
1848         IXGBE_WRITE_REG(hw, IXGBE_VFTA(vid_idx), vfta);
1849
1850         /* update local VFTA copy */
1851         shadow_vfta->vfta[vid_idx] = vfta;
1852
1853         return 0;
1854 }
1855
1856 static void
1857 ixgbe_vlan_strip_queue_set(struct rte_eth_dev *dev, uint16_t queue, int on)
1858 {
1859         if (on)
1860                 ixgbe_vlan_hw_strip_enable(dev, queue);
1861         else
1862                 ixgbe_vlan_hw_strip_disable(dev, queue);
1863 }
1864
1865 static int
1866 ixgbe_vlan_tpid_set(struct rte_eth_dev *dev,
1867                     enum rte_vlan_type vlan_type,
1868                     uint16_t tpid)
1869 {
1870         struct ixgbe_hw *hw =
1871                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1872         int ret = 0;
1873         uint32_t reg;
1874         uint32_t qinq;
1875
1876         qinq = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
1877         qinq &= IXGBE_DMATXCTL_GDV;
1878
1879         switch (vlan_type) {
1880         case ETH_VLAN_TYPE_INNER:
1881                 if (qinq) {
1882                         reg = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
1883                         reg = (reg & (~IXGBE_VLNCTRL_VET)) | (uint32_t)tpid;
1884                         IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, reg);
1885                         reg = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
1886                         reg = (reg & (~IXGBE_DMATXCTL_VT_MASK))
1887                                 | ((uint32_t)tpid << IXGBE_DMATXCTL_VT_SHIFT);
1888                         IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, reg);
1889                 } else {
1890                         ret = -ENOTSUP;
1891                         PMD_DRV_LOG(ERR, "Inner type is not supported"
1892                                     " by single VLAN");
1893                 }
1894                 break;
1895         case ETH_VLAN_TYPE_OUTER:
1896                 if (qinq) {
1897                         /* Only the high 16-bits is valid */
1898                         IXGBE_WRITE_REG(hw, IXGBE_EXVET, (uint32_t)tpid <<
1899                                         IXGBE_EXVET_VET_EXT_SHIFT);
1900                 } else {
1901                         reg = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
1902                         reg = (reg & (~IXGBE_VLNCTRL_VET)) | (uint32_t)tpid;
1903                         IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, reg);
1904                         reg = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
1905                         reg = (reg & (~IXGBE_DMATXCTL_VT_MASK))
1906                                 | ((uint32_t)tpid << IXGBE_DMATXCTL_VT_SHIFT);
1907                         IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, reg);
1908                 }
1909
1910                 break;
1911         default:
1912                 ret = -EINVAL;
1913                 PMD_DRV_LOG(ERR, "Unsupported VLAN type %d", vlan_type);
1914                 break;
1915         }
1916
1917         return ret;
1918 }
1919
1920 void
1921 ixgbe_vlan_hw_filter_disable(struct rte_eth_dev *dev)
1922 {
1923         struct ixgbe_hw *hw =
1924                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1925         uint32_t vlnctrl;
1926
1927         PMD_INIT_FUNC_TRACE();
1928
1929         /* Filter Table Disable */
1930         vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
1931         vlnctrl &= ~IXGBE_VLNCTRL_VFE;
1932
1933         IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
1934 }
1935
1936 void
1937 ixgbe_vlan_hw_filter_enable(struct rte_eth_dev *dev)
1938 {
1939         struct ixgbe_hw *hw =
1940                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1941         struct ixgbe_vfta *shadow_vfta =
1942                 IXGBE_DEV_PRIVATE_TO_VFTA(dev->data->dev_private);
1943         uint32_t vlnctrl;
1944         uint16_t i;
1945
1946         PMD_INIT_FUNC_TRACE();
1947
1948         /* Filter Table Enable */
1949         vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
1950         vlnctrl &= ~IXGBE_VLNCTRL_CFIEN;
1951         vlnctrl |= IXGBE_VLNCTRL_VFE;
1952
1953         IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
1954
1955         /* write whatever is in local vfta copy */
1956         for (i = 0; i < IXGBE_VFTA_SIZE; i++)
1957                 IXGBE_WRITE_REG(hw, IXGBE_VFTA(i), shadow_vfta->vfta[i]);
1958 }
1959
1960 static void
1961 ixgbe_vlan_hw_strip_bitmap_set(struct rte_eth_dev *dev, uint16_t queue, bool on)
1962 {
1963         struct ixgbe_hwstrip *hwstrip =
1964                 IXGBE_DEV_PRIVATE_TO_HWSTRIP_BITMAP(dev->data->dev_private);
1965         struct ixgbe_rx_queue *rxq;
1966
1967         if (queue >= IXGBE_MAX_RX_QUEUE_NUM)
1968                 return;
1969
1970         if (on)
1971                 IXGBE_SET_HWSTRIP(hwstrip, queue);
1972         else
1973                 IXGBE_CLEAR_HWSTRIP(hwstrip, queue);
1974
1975         if (queue >= dev->data->nb_rx_queues)
1976                 return;
1977
1978         rxq = dev->data->rx_queues[queue];
1979
1980         if (on) {
1981                 rxq->vlan_flags = PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED;
1982                 rxq->offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
1983         } else {
1984                 rxq->vlan_flags = PKT_RX_VLAN;
1985                 rxq->offloads &= ~DEV_RX_OFFLOAD_VLAN_STRIP;
1986         }
1987 }
1988
1989 static void
1990 ixgbe_vlan_hw_strip_disable(struct rte_eth_dev *dev, uint16_t queue)
1991 {
1992         struct ixgbe_hw *hw =
1993                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1994         uint32_t ctrl;
1995
1996         PMD_INIT_FUNC_TRACE();
1997
1998         if (hw->mac.type == ixgbe_mac_82598EB) {
1999                 /* No queue level support */
2000                 PMD_INIT_LOG(NOTICE, "82598EB not support queue level hw strip");
2001                 return;
2002         }
2003
2004         /* Other 10G NIC, the VLAN strip can be setup per queue in RXDCTL */
2005         ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(queue));
2006         ctrl &= ~IXGBE_RXDCTL_VME;
2007         IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(queue), ctrl);
2008
2009         /* record those setting for HW strip per queue */
2010         ixgbe_vlan_hw_strip_bitmap_set(dev, queue, 0);
2011 }
2012
2013 static void
2014 ixgbe_vlan_hw_strip_enable(struct rte_eth_dev *dev, uint16_t queue)
2015 {
2016         struct ixgbe_hw *hw =
2017                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2018         uint32_t ctrl;
2019
2020         PMD_INIT_FUNC_TRACE();
2021
2022         if (hw->mac.type == ixgbe_mac_82598EB) {
2023                 /* No queue level supported */
2024                 PMD_INIT_LOG(NOTICE, "82598EB not support queue level hw strip");
2025                 return;
2026         }
2027
2028         /* Other 10G NIC, the VLAN strip can be setup per queue in RXDCTL */
2029         ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(queue));
2030         ctrl |= IXGBE_RXDCTL_VME;
2031         IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(queue), ctrl);
2032
2033         /* record those setting for HW strip per queue */
2034         ixgbe_vlan_hw_strip_bitmap_set(dev, queue, 1);
2035 }
2036
2037 static void
2038 ixgbe_vlan_hw_extend_disable(struct rte_eth_dev *dev)
2039 {
2040         struct ixgbe_hw *hw =
2041                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2042         uint32_t ctrl;
2043
2044         PMD_INIT_FUNC_TRACE();
2045
2046         /* DMATXCTRL: Geric Double VLAN Disable */
2047         ctrl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
2048         ctrl &= ~IXGBE_DMATXCTL_GDV;
2049         IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, ctrl);
2050
2051         /* CTRL_EXT: Global Double VLAN Disable */
2052         ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
2053         ctrl &= ~IXGBE_EXTENDED_VLAN;
2054         IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl);
2055
2056 }
2057
2058 static void
2059 ixgbe_vlan_hw_extend_enable(struct rte_eth_dev *dev)
2060 {
2061         struct ixgbe_hw *hw =
2062                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2063         uint32_t ctrl;
2064
2065         PMD_INIT_FUNC_TRACE();
2066
2067         /* DMATXCTRL: Geric Double VLAN Enable */
2068         ctrl  = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
2069         ctrl |= IXGBE_DMATXCTL_GDV;
2070         IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, ctrl);
2071
2072         /* CTRL_EXT: Global Double VLAN Enable */
2073         ctrl  = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
2074         ctrl |= IXGBE_EXTENDED_VLAN;
2075         IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl);
2076
2077         /* Clear pooling mode of PFVTCTL. It's required by X550. */
2078         if (hw->mac.type == ixgbe_mac_X550 ||
2079             hw->mac.type == ixgbe_mac_X550EM_x ||
2080             hw->mac.type == ixgbe_mac_X550EM_a) {
2081                 ctrl = IXGBE_READ_REG(hw, IXGBE_VT_CTL);
2082                 ctrl &= ~IXGBE_VT_CTL_POOLING_MODE_MASK;
2083                 IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, ctrl);
2084         }
2085
2086         /*
2087          * VET EXT field in the EXVET register = 0x8100 by default
2088          * So no need to change. Same to VT field of DMATXCTL register
2089          */
2090 }
2091
2092 void
2093 ixgbe_vlan_hw_strip_config(struct rte_eth_dev *dev)
2094 {
2095         struct ixgbe_hw *hw =
2096                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2097         struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode;
2098         uint32_t ctrl;
2099         uint16_t i;
2100         struct ixgbe_rx_queue *rxq;
2101         bool on;
2102
2103         PMD_INIT_FUNC_TRACE();
2104
2105         if (hw->mac.type == ixgbe_mac_82598EB) {
2106                 if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP) {
2107                         ctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
2108                         ctrl |= IXGBE_VLNCTRL_VME;
2109                         IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, ctrl);
2110                 } else {
2111                         ctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
2112                         ctrl &= ~IXGBE_VLNCTRL_VME;
2113                         IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, ctrl);
2114                 }
2115         } else {
2116                 /*
2117                  * Other 10G NIC, the VLAN strip can be setup
2118                  * per queue in RXDCTL
2119                  */
2120                 for (i = 0; i < dev->data->nb_rx_queues; i++) {
2121                         rxq = dev->data->rx_queues[i];
2122                         ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxq->reg_idx));
2123                         if (rxq->offloads & DEV_RX_OFFLOAD_VLAN_STRIP) {
2124                                 ctrl |= IXGBE_RXDCTL_VME;
2125                                 on = TRUE;
2126                         } else {
2127                                 ctrl &= ~IXGBE_RXDCTL_VME;
2128                                 on = FALSE;
2129                         }
2130                         IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxq->reg_idx), ctrl);
2131
2132                         /* record those setting for HW strip per queue */
2133                         ixgbe_vlan_hw_strip_bitmap_set(dev, i, on);
2134                 }
2135         }
2136 }
2137
2138 static void
2139 ixgbe_config_vlan_strip_on_all_queues(struct rte_eth_dev *dev, int mask)
2140 {
2141         uint16_t i;
2142         struct rte_eth_rxmode *rxmode;
2143         struct ixgbe_rx_queue *rxq;
2144
2145         if (mask & ETH_VLAN_STRIP_MASK) {
2146                 rxmode = &dev->data->dev_conf.rxmode;
2147                 if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
2148                         for (i = 0; i < dev->data->nb_rx_queues; i++) {
2149                                 rxq = dev->data->rx_queues[i];
2150                                 rxq->offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
2151                         }
2152                 else
2153                         for (i = 0; i < dev->data->nb_rx_queues; i++) {
2154                                 rxq = dev->data->rx_queues[i];
2155                                 rxq->offloads &= ~DEV_RX_OFFLOAD_VLAN_STRIP;
2156                         }
2157         }
2158 }
2159
2160 static int
2161 ixgbe_vlan_offload_config(struct rte_eth_dev *dev, int mask)
2162 {
2163         struct rte_eth_rxmode *rxmode;
2164         rxmode = &dev->data->dev_conf.rxmode;
2165
2166         if (mask & ETH_VLAN_STRIP_MASK) {
2167                 ixgbe_vlan_hw_strip_config(dev);
2168         }
2169
2170         if (mask & ETH_VLAN_FILTER_MASK) {
2171                 if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
2172                         ixgbe_vlan_hw_filter_enable(dev);
2173                 else
2174                         ixgbe_vlan_hw_filter_disable(dev);
2175         }
2176
2177         if (mask & ETH_VLAN_EXTEND_MASK) {
2178                 if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_EXTEND)
2179                         ixgbe_vlan_hw_extend_enable(dev);
2180                 else
2181                         ixgbe_vlan_hw_extend_disable(dev);
2182         }
2183
2184         return 0;
2185 }
2186
2187 static int
2188 ixgbe_vlan_offload_set(struct rte_eth_dev *dev, int mask)
2189 {
2190         ixgbe_config_vlan_strip_on_all_queues(dev, mask);
2191
2192         ixgbe_vlan_offload_config(dev, mask);
2193
2194         return 0;
2195 }
2196
2197 static void
2198 ixgbe_vmdq_vlan_hw_filter_enable(struct rte_eth_dev *dev)
2199 {
2200         struct ixgbe_hw *hw =
2201                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2202         /* VLNCTRL: enable vlan filtering and allow all vlan tags through */
2203         uint32_t vlanctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
2204
2205         vlanctrl |= IXGBE_VLNCTRL_VFE; /* enable vlan filters */
2206         IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlanctrl);
2207 }
2208
2209 static int
2210 ixgbe_check_vf_rss_rxq_num(struct rte_eth_dev *dev, uint16_t nb_rx_q)
2211 {
2212         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
2213
2214         switch (nb_rx_q) {
2215         case 1:
2216         case 2:
2217                 RTE_ETH_DEV_SRIOV(dev).active = ETH_64_POOLS;
2218                 break;
2219         case 4:
2220                 RTE_ETH_DEV_SRIOV(dev).active = ETH_32_POOLS;
2221                 break;
2222         default:
2223                 return -EINVAL;
2224         }
2225
2226         RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool =
2227                 IXGBE_MAX_RX_QUEUE_NUM / RTE_ETH_DEV_SRIOV(dev).active;
2228         RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx =
2229                 pci_dev->max_vfs * RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool;
2230         return 0;
2231 }
2232
2233 static int
2234 ixgbe_check_mq_mode(struct rte_eth_dev *dev)
2235 {
2236         struct rte_eth_conf *dev_conf = &dev->data->dev_conf;
2237         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2238         uint16_t nb_rx_q = dev->data->nb_rx_queues;
2239         uint16_t nb_tx_q = dev->data->nb_tx_queues;
2240
2241         if (RTE_ETH_DEV_SRIOV(dev).active != 0) {
2242                 /* check multi-queue mode */
2243                 switch (dev_conf->rxmode.mq_mode) {
2244                 case ETH_MQ_RX_VMDQ_DCB:
2245                         PMD_INIT_LOG(INFO, "ETH_MQ_RX_VMDQ_DCB mode supported in SRIOV");
2246                         break;
2247                 case ETH_MQ_RX_VMDQ_DCB_RSS:
2248                         /* DCB/RSS VMDQ in SRIOV mode, not implement yet */
2249                         PMD_INIT_LOG(ERR, "SRIOV active,"
2250                                         " unsupported mq_mode rx %d.",
2251                                         dev_conf->rxmode.mq_mode);
2252                         return -EINVAL;
2253                 case ETH_MQ_RX_RSS:
2254                 case ETH_MQ_RX_VMDQ_RSS:
2255                         dev->data->dev_conf.rxmode.mq_mode = ETH_MQ_RX_VMDQ_RSS;
2256                         if (nb_rx_q <= RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool)
2257                                 if (ixgbe_check_vf_rss_rxq_num(dev, nb_rx_q)) {
2258                                         PMD_INIT_LOG(ERR, "SRIOV is active,"
2259                                                 " invalid queue number"
2260                                                 " for VMDQ RSS, allowed"
2261                                                 " value are 1, 2 or 4.");
2262                                         return -EINVAL;
2263                                 }
2264                         break;
2265                 case ETH_MQ_RX_VMDQ_ONLY:
2266                 case ETH_MQ_RX_NONE:
2267                         /* if nothing mq mode configure, use default scheme */
2268                         dev->data->dev_conf.rxmode.mq_mode = ETH_MQ_RX_VMDQ_ONLY;
2269                         break;
2270                 default: /* ETH_MQ_RX_DCB, ETH_MQ_RX_DCB_RSS or ETH_MQ_TX_DCB*/
2271                         /* SRIOV only works in VMDq enable mode */
2272                         PMD_INIT_LOG(ERR, "SRIOV is active,"
2273                                         " wrong mq_mode rx %d.",
2274                                         dev_conf->rxmode.mq_mode);
2275                         return -EINVAL;
2276                 }
2277
2278                 switch (dev_conf->txmode.mq_mode) {
2279                 case ETH_MQ_TX_VMDQ_DCB:
2280                         PMD_INIT_LOG(INFO, "ETH_MQ_TX_VMDQ_DCB mode supported in SRIOV");
2281                         dev->data->dev_conf.txmode.mq_mode = ETH_MQ_TX_VMDQ_DCB;
2282                         break;
2283                 default: /* ETH_MQ_TX_VMDQ_ONLY or ETH_MQ_TX_NONE */
2284                         dev->data->dev_conf.txmode.mq_mode = ETH_MQ_TX_VMDQ_ONLY;
2285                         break;
2286                 }
2287
2288                 /* check valid queue number */
2289                 if ((nb_rx_q > RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool) ||
2290                     (nb_tx_q > RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool)) {
2291                         PMD_INIT_LOG(ERR, "SRIOV is active,"
2292                                         " nb_rx_q=%d nb_tx_q=%d queue number"
2293                                         " must be less than or equal to %d.",
2294                                         nb_rx_q, nb_tx_q,
2295                                         RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool);
2296                         return -EINVAL;
2297                 }
2298         } else {
2299                 if (dev_conf->rxmode.mq_mode == ETH_MQ_RX_VMDQ_DCB_RSS) {
2300                         PMD_INIT_LOG(ERR, "VMDQ+DCB+RSS mq_mode is"
2301                                           " not supported.");
2302                         return -EINVAL;
2303                 }
2304                 /* check configuration for vmdb+dcb mode */
2305                 if (dev_conf->rxmode.mq_mode == ETH_MQ_RX_VMDQ_DCB) {
2306                         const struct rte_eth_vmdq_dcb_conf *conf;
2307
2308                         if (nb_rx_q != IXGBE_VMDQ_DCB_NB_QUEUES) {
2309                                 PMD_INIT_LOG(ERR, "VMDQ+DCB, nb_rx_q != %d.",
2310                                                 IXGBE_VMDQ_DCB_NB_QUEUES);
2311                                 return -EINVAL;
2312                         }
2313                         conf = &dev_conf->rx_adv_conf.vmdq_dcb_conf;
2314                         if (!(conf->nb_queue_pools == ETH_16_POOLS ||
2315                                conf->nb_queue_pools == ETH_32_POOLS)) {
2316                                 PMD_INIT_LOG(ERR, "VMDQ+DCB selected,"
2317                                                 " nb_queue_pools must be %d or %d.",
2318                                                 ETH_16_POOLS, ETH_32_POOLS);
2319                                 return -EINVAL;
2320                         }
2321                 }
2322                 if (dev_conf->txmode.mq_mode == ETH_MQ_TX_VMDQ_DCB) {
2323                         const struct rte_eth_vmdq_dcb_tx_conf *conf;
2324
2325                         if (nb_tx_q != IXGBE_VMDQ_DCB_NB_QUEUES) {
2326                                 PMD_INIT_LOG(ERR, "VMDQ+DCB, nb_tx_q != %d",
2327                                                  IXGBE_VMDQ_DCB_NB_QUEUES);
2328                                 return -EINVAL;
2329                         }
2330                         conf = &dev_conf->tx_adv_conf.vmdq_dcb_tx_conf;
2331                         if (!(conf->nb_queue_pools == ETH_16_POOLS ||
2332                                conf->nb_queue_pools == ETH_32_POOLS)) {
2333                                 PMD_INIT_LOG(ERR, "VMDQ+DCB selected,"
2334                                                 " nb_queue_pools != %d and"
2335                                                 " nb_queue_pools != %d.",
2336                                                 ETH_16_POOLS, ETH_32_POOLS);
2337                                 return -EINVAL;
2338                         }
2339                 }
2340
2341                 /* For DCB mode check our configuration before we go further */
2342                 if (dev_conf->rxmode.mq_mode == ETH_MQ_RX_DCB) {
2343                         const struct rte_eth_dcb_rx_conf *conf;
2344
2345                         conf = &dev_conf->rx_adv_conf.dcb_rx_conf;
2346                         if (!(conf->nb_tcs == ETH_4_TCS ||
2347                                conf->nb_tcs == ETH_8_TCS)) {
2348                                 PMD_INIT_LOG(ERR, "DCB selected, nb_tcs != %d"
2349                                                 " and nb_tcs != %d.",
2350                                                 ETH_4_TCS, ETH_8_TCS);
2351                                 return -EINVAL;
2352                         }
2353                 }
2354
2355                 if (dev_conf->txmode.mq_mode == ETH_MQ_TX_DCB) {
2356                         const struct rte_eth_dcb_tx_conf *conf;
2357
2358                         conf = &dev_conf->tx_adv_conf.dcb_tx_conf;
2359                         if (!(conf->nb_tcs == ETH_4_TCS ||
2360                                conf->nb_tcs == ETH_8_TCS)) {
2361                                 PMD_INIT_LOG(ERR, "DCB selected, nb_tcs != %d"
2362                                                 " and nb_tcs != %d.",
2363                                                 ETH_4_TCS, ETH_8_TCS);
2364                                 return -EINVAL;
2365                         }
2366                 }
2367
2368                 /*
2369                  * When DCB/VT is off, maximum number of queues changes,
2370                  * except for 82598EB, which remains constant.
2371                  */
2372                 if (dev_conf->txmode.mq_mode == ETH_MQ_TX_NONE &&
2373                                 hw->mac.type != ixgbe_mac_82598EB) {
2374                         if (nb_tx_q > IXGBE_NONE_MODE_TX_NB_QUEUES) {
2375                                 PMD_INIT_LOG(ERR,
2376                                              "Neither VT nor DCB are enabled, "
2377                                              "nb_tx_q > %d.",
2378                                              IXGBE_NONE_MODE_TX_NB_QUEUES);
2379                                 return -EINVAL;
2380                         }
2381                 }
2382         }
2383         return 0;
2384 }
2385
2386 static int
2387 ixgbe_dev_configure(struct rte_eth_dev *dev)
2388 {
2389         struct ixgbe_interrupt *intr =
2390                 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
2391         struct ixgbe_adapter *adapter = dev->data->dev_private;
2392         int ret;
2393
2394         PMD_INIT_FUNC_TRACE();
2395         /* multipe queue mode checking */
2396         ret  = ixgbe_check_mq_mode(dev);
2397         if (ret != 0) {
2398                 PMD_DRV_LOG(ERR, "ixgbe_check_mq_mode fails with %d.",
2399                             ret);
2400                 return ret;
2401         }
2402
2403         /* set flag to update link status after init */
2404         intr->flags |= IXGBE_FLAG_NEED_LINK_UPDATE;
2405
2406         /*
2407          * Initialize to TRUE. If any of Rx queues doesn't meet the bulk
2408          * allocation or vector Rx preconditions we will reset it.
2409          */
2410         adapter->rx_bulk_alloc_allowed = true;
2411         adapter->rx_vec_allowed = true;
2412
2413         return 0;
2414 }
2415
2416 static void
2417 ixgbe_dev_phy_intr_setup(struct rte_eth_dev *dev)
2418 {
2419         struct ixgbe_hw *hw =
2420                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2421         struct ixgbe_interrupt *intr =
2422                 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
2423         uint32_t gpie;
2424
2425         /* only set up it on X550EM_X */
2426         if (hw->mac.type == ixgbe_mac_X550EM_x) {
2427                 gpie = IXGBE_READ_REG(hw, IXGBE_GPIE);
2428                 gpie |= IXGBE_SDP0_GPIEN_X550EM_x;
2429                 IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
2430                 if (hw->phy.type == ixgbe_phy_x550em_ext_t)
2431                         intr->mask |= IXGBE_EICR_GPI_SDP0_X550EM_x;
2432         }
2433 }
2434
2435 int
2436 ixgbe_set_vf_rate_limit(struct rte_eth_dev *dev, uint16_t vf,
2437                         uint16_t tx_rate, uint64_t q_msk)
2438 {
2439         struct ixgbe_hw *hw;
2440         struct ixgbe_vf_info *vfinfo;
2441         struct rte_eth_link link;
2442         uint8_t  nb_q_per_pool;
2443         uint32_t queue_stride;
2444         uint32_t queue_idx, idx = 0, vf_idx;
2445         uint32_t queue_end;
2446         uint16_t total_rate = 0;
2447         struct rte_pci_device *pci_dev;
2448         int ret;
2449
2450         pci_dev = RTE_ETH_DEV_TO_PCI(dev);
2451         ret = rte_eth_link_get_nowait(dev->data->port_id, &link);
2452         if (ret < 0)
2453                 return ret;
2454
2455         if (vf >= pci_dev->max_vfs)
2456                 return -EINVAL;
2457
2458         if (tx_rate > link.link_speed)
2459                 return -EINVAL;
2460
2461         if (q_msk == 0)
2462                 return 0;
2463
2464         hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2465         vfinfo = *(IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private));
2466         nb_q_per_pool = RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool;
2467         queue_stride = IXGBE_MAX_RX_QUEUE_NUM / RTE_ETH_DEV_SRIOV(dev).active;
2468         queue_idx = vf * queue_stride;
2469         queue_end = queue_idx + nb_q_per_pool - 1;
2470         if (queue_end >= hw->mac.max_tx_queues)
2471                 return -EINVAL;
2472
2473         if (vfinfo) {
2474                 for (vf_idx = 0; vf_idx < pci_dev->max_vfs; vf_idx++) {
2475                         if (vf_idx == vf)
2476                                 continue;
2477                         for (idx = 0; idx < RTE_DIM(vfinfo[vf_idx].tx_rate);
2478                                 idx++)
2479                                 total_rate += vfinfo[vf_idx].tx_rate[idx];
2480                 }
2481         } else {
2482                 return -EINVAL;
2483         }
2484
2485         /* Store tx_rate for this vf. */
2486         for (idx = 0; idx < nb_q_per_pool; idx++) {
2487                 if (((uint64_t)0x1 << idx) & q_msk) {
2488                         if (vfinfo[vf].tx_rate[idx] != tx_rate)
2489                                 vfinfo[vf].tx_rate[idx] = tx_rate;
2490                         total_rate += tx_rate;
2491                 }
2492         }
2493
2494         if (total_rate > dev->data->dev_link.link_speed) {
2495                 /* Reset stored TX rate of the VF if it causes exceed
2496                  * link speed.
2497                  */
2498                 memset(vfinfo[vf].tx_rate, 0, sizeof(vfinfo[vf].tx_rate));
2499                 return -EINVAL;
2500         }
2501
2502         /* Set RTTBCNRC of each queue/pool for vf X  */
2503         for (; queue_idx <= queue_end; queue_idx++) {
2504                 if (0x1 & q_msk)
2505                         ixgbe_set_queue_rate_limit(dev, queue_idx, tx_rate);
2506                 q_msk = q_msk >> 1;
2507         }
2508
2509         return 0;
2510 }
2511
2512 /*
2513  * Configure device link speed and setup link.
2514  * It returns 0 on success.
2515  */
2516 static int
2517 ixgbe_dev_start(struct rte_eth_dev *dev)
2518 {
2519         struct ixgbe_hw *hw =
2520                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2521         struct ixgbe_vf_info *vfinfo =
2522                 *IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private);
2523         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
2524         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
2525         uint32_t intr_vector = 0;
2526         int err, link_up = 0, negotiate = 0;
2527         uint32_t speed = 0;
2528         uint32_t allowed_speeds = 0;
2529         int mask = 0;
2530         int status;
2531         uint16_t vf, idx;
2532         uint32_t *link_speeds;
2533         struct ixgbe_tm_conf *tm_conf =
2534                 IXGBE_DEV_PRIVATE_TO_TM_CONF(dev->data->dev_private);
2535
2536         PMD_INIT_FUNC_TRACE();
2537
2538         /* IXGBE devices don't support:
2539         *    - half duplex (checked afterwards for valid speeds)
2540         *    - fixed speed: TODO implement
2541         */
2542         if (dev->data->dev_conf.link_speeds & ETH_LINK_SPEED_FIXED) {
2543                 PMD_INIT_LOG(ERR,
2544                 "Invalid link_speeds for port %u, fix speed not supported",
2545                                 dev->data->port_id);
2546                 return -EINVAL;
2547         }
2548
2549         /* Stop the link setup handler before resetting the HW. */
2550         rte_eal_alarm_cancel(ixgbe_dev_setup_link_alarm_handler, dev);
2551
2552         /* disable uio/vfio intr/eventfd mapping */
2553         rte_intr_disable(intr_handle);
2554
2555         /* stop adapter */
2556         hw->adapter_stopped = 0;
2557         ixgbe_stop_adapter(hw);
2558
2559         /* reinitialize adapter
2560          * this calls reset and start
2561          */
2562         status = ixgbe_pf_reset_hw(hw);
2563         if (status != 0)
2564                 return -1;
2565         hw->mac.ops.start_hw(hw);
2566         hw->mac.get_link_status = true;
2567
2568         /* configure PF module if SRIOV enabled */
2569         ixgbe_pf_host_configure(dev);
2570
2571         ixgbe_dev_phy_intr_setup(dev);
2572
2573         /* check and configure queue intr-vector mapping */
2574         if ((rte_intr_cap_multiple(intr_handle) ||
2575              !RTE_ETH_DEV_SRIOV(dev).active) &&
2576             dev->data->dev_conf.intr_conf.rxq != 0) {
2577                 intr_vector = dev->data->nb_rx_queues;
2578                 if (intr_vector > IXGBE_MAX_INTR_QUEUE_NUM) {
2579                         PMD_INIT_LOG(ERR, "At most %d intr queues supported",
2580                                         IXGBE_MAX_INTR_QUEUE_NUM);
2581                         return -ENOTSUP;
2582                 }
2583                 if (rte_intr_efd_enable(intr_handle, intr_vector))
2584                         return -1;
2585         }
2586
2587         if (rte_intr_dp_is_en(intr_handle) && !intr_handle->intr_vec) {
2588                 intr_handle->intr_vec =
2589                         rte_zmalloc("intr_vec",
2590                                     dev->data->nb_rx_queues * sizeof(int), 0);
2591                 if (intr_handle->intr_vec == NULL) {
2592                         PMD_INIT_LOG(ERR, "Failed to allocate %d rx_queues"
2593                                      " intr_vec", dev->data->nb_rx_queues);
2594                         return -ENOMEM;
2595                 }
2596         }
2597
2598         /* confiugre msix for sleep until rx interrupt */
2599         ixgbe_configure_msix(dev);
2600
2601         /* initialize transmission unit */
2602         ixgbe_dev_tx_init(dev);
2603
2604         /* This can fail when allocating mbufs for descriptor rings */
2605         err = ixgbe_dev_rx_init(dev);
2606         if (err) {
2607                 PMD_INIT_LOG(ERR, "Unable to initialize RX hardware");
2608                 goto error;
2609         }
2610
2611         mask = ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK |
2612                 ETH_VLAN_EXTEND_MASK;
2613         err = ixgbe_vlan_offload_config(dev, mask);
2614         if (err) {
2615                 PMD_INIT_LOG(ERR, "Unable to set VLAN offload");
2616                 goto error;
2617         }
2618
2619         if (dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_VMDQ_ONLY) {
2620                 /* Enable vlan filtering for VMDq */
2621                 ixgbe_vmdq_vlan_hw_filter_enable(dev);
2622         }
2623
2624         /* Configure DCB hw */
2625         ixgbe_configure_dcb(dev);
2626
2627         if (dev->data->dev_conf.fdir_conf.mode != RTE_FDIR_MODE_NONE) {
2628                 err = ixgbe_fdir_configure(dev);
2629                 if (err)
2630                         goto error;
2631         }
2632
2633         /* Restore vf rate limit */
2634         if (vfinfo != NULL) {
2635                 for (vf = 0; vf < pci_dev->max_vfs; vf++)
2636                         for (idx = 0; idx < IXGBE_MAX_QUEUE_NUM_PER_VF; idx++)
2637                                 if (vfinfo[vf].tx_rate[idx] != 0)
2638                                         ixgbe_set_vf_rate_limit(
2639                                                 dev, vf,
2640                                                 vfinfo[vf].tx_rate[idx],
2641                                                 1 << idx);
2642         }
2643
2644         ixgbe_restore_statistics_mapping(dev);
2645
2646         err = ixgbe_dev_rxtx_start(dev);
2647         if (err < 0) {
2648                 PMD_INIT_LOG(ERR, "Unable to start rxtx queues");
2649                 goto error;
2650         }
2651
2652         /* Skip link setup if loopback mode is enabled. */
2653         if (dev->data->dev_conf.lpbk_mode != 0) {
2654                 err = ixgbe_check_supported_loopback_mode(dev);
2655                 if (err < 0) {
2656                         PMD_INIT_LOG(ERR, "Unsupported loopback mode");
2657                         goto error;
2658                 } else {
2659                         goto skip_link_setup;
2660                 }
2661         }
2662
2663         if (ixgbe_is_sfp(hw) && hw->phy.multispeed_fiber) {
2664                 err = hw->mac.ops.setup_sfp(hw);
2665                 if (err)
2666                         goto error;
2667         }
2668
2669         if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_copper) {
2670                 /* Turn on the copper */
2671                 ixgbe_set_phy_power(hw, true);
2672         } else {
2673                 /* Turn on the laser */
2674                 ixgbe_enable_tx_laser(hw);
2675         }
2676
2677         err = ixgbe_check_link(hw, &speed, &link_up, 0);
2678         if (err)
2679                 goto error;
2680         dev->data->dev_link.link_status = link_up;
2681
2682         err = ixgbe_get_link_capabilities(hw, &speed, &negotiate);
2683         if (err)
2684                 goto error;
2685
2686         switch (hw->mac.type) {
2687         case ixgbe_mac_X550:
2688         case ixgbe_mac_X550EM_x:
2689         case ixgbe_mac_X550EM_a:
2690                 allowed_speeds = ETH_LINK_SPEED_100M | ETH_LINK_SPEED_1G |
2691                         ETH_LINK_SPEED_2_5G |  ETH_LINK_SPEED_5G |
2692                         ETH_LINK_SPEED_10G;
2693                 if (hw->device_id == IXGBE_DEV_ID_X550EM_A_1G_T ||
2694                                 hw->device_id == IXGBE_DEV_ID_X550EM_A_1G_T_L)
2695                         allowed_speeds = ETH_LINK_SPEED_10M |
2696                                 ETH_LINK_SPEED_100M | ETH_LINK_SPEED_1G;
2697                 break;
2698         default:
2699                 allowed_speeds = ETH_LINK_SPEED_100M | ETH_LINK_SPEED_1G |
2700                         ETH_LINK_SPEED_10G;
2701         }
2702
2703         link_speeds = &dev->data->dev_conf.link_speeds;
2704         if (*link_speeds & ~allowed_speeds) {
2705                 PMD_INIT_LOG(ERR, "Invalid link setting");
2706                 goto error;
2707         }
2708
2709         speed = 0x0;
2710         if (*link_speeds == ETH_LINK_SPEED_AUTONEG) {
2711                 switch (hw->mac.type) {
2712                 case ixgbe_mac_82598EB:
2713                         speed = IXGBE_LINK_SPEED_82598_AUTONEG;
2714                         break;
2715                 case ixgbe_mac_82599EB:
2716                 case ixgbe_mac_X540:
2717                         speed = IXGBE_LINK_SPEED_82599_AUTONEG;
2718                         break;
2719                 case ixgbe_mac_X550:
2720                 case ixgbe_mac_X550EM_x:
2721                 case ixgbe_mac_X550EM_a:
2722                         speed = IXGBE_LINK_SPEED_X550_AUTONEG;
2723                         break;
2724                 default:
2725                         speed = IXGBE_LINK_SPEED_82599_AUTONEG;
2726                 }
2727         } else {
2728                 if (*link_speeds & ETH_LINK_SPEED_10G)
2729                         speed |= IXGBE_LINK_SPEED_10GB_FULL;
2730                 if (*link_speeds & ETH_LINK_SPEED_5G)
2731                         speed |= IXGBE_LINK_SPEED_5GB_FULL;
2732                 if (*link_speeds & ETH_LINK_SPEED_2_5G)
2733                         speed |= IXGBE_LINK_SPEED_2_5GB_FULL;
2734                 if (*link_speeds & ETH_LINK_SPEED_1G)
2735                         speed |= IXGBE_LINK_SPEED_1GB_FULL;
2736                 if (*link_speeds & ETH_LINK_SPEED_100M)
2737                         speed |= IXGBE_LINK_SPEED_100_FULL;
2738                 if (*link_speeds & ETH_LINK_SPEED_10M)
2739                         speed |= IXGBE_LINK_SPEED_10_FULL;
2740         }
2741
2742         err = ixgbe_setup_link(hw, speed, link_up);
2743         if (err)
2744                 goto error;
2745
2746 skip_link_setup:
2747
2748         if (rte_intr_allow_others(intr_handle)) {
2749                 /* check if lsc interrupt is enabled */
2750                 if (dev->data->dev_conf.intr_conf.lsc != 0)
2751                         ixgbe_dev_lsc_interrupt_setup(dev, TRUE);
2752                 else
2753                         ixgbe_dev_lsc_interrupt_setup(dev, FALSE);
2754                 ixgbe_dev_macsec_interrupt_setup(dev);
2755         } else {
2756                 rte_intr_callback_unregister(intr_handle,
2757                                              ixgbe_dev_interrupt_handler, dev);
2758                 if (dev->data->dev_conf.intr_conf.lsc != 0)
2759                         PMD_INIT_LOG(INFO, "lsc won't enable because of"
2760                                      " no intr multiplex");
2761         }
2762
2763         /* check if rxq interrupt is enabled */
2764         if (dev->data->dev_conf.intr_conf.rxq != 0 &&
2765             rte_intr_dp_is_en(intr_handle))
2766                 ixgbe_dev_rxq_interrupt_setup(dev);
2767
2768         /* enable uio/vfio intr/eventfd mapping */
2769         rte_intr_enable(intr_handle);
2770
2771         /* resume enabled intr since hw reset */
2772         ixgbe_enable_intr(dev);
2773         ixgbe_l2_tunnel_conf(dev);
2774         ixgbe_filter_restore(dev);
2775
2776         if (tm_conf->root && !tm_conf->committed)
2777                 PMD_DRV_LOG(WARNING,
2778                             "please call hierarchy_commit() "
2779                             "before starting the port");
2780
2781         /*
2782          * Update link status right before return, because it may
2783          * start link configuration process in a separate thread.
2784          */
2785         ixgbe_dev_link_update(dev, 0);
2786
2787         return 0;
2788
2789 error:
2790         PMD_INIT_LOG(ERR, "failure in ixgbe_dev_start(): %d", err);
2791         ixgbe_dev_clear_queues(dev);
2792         return -EIO;
2793 }
2794
2795 /*
2796  * Stop device: disable rx and tx functions to allow for reconfiguring.
2797  */
2798 static void
2799 ixgbe_dev_stop(struct rte_eth_dev *dev)
2800 {
2801         struct rte_eth_link link;
2802         struct ixgbe_adapter *adapter = dev->data->dev_private;
2803         struct ixgbe_hw *hw =
2804                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2805         struct ixgbe_vf_info *vfinfo =
2806                 *IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private);
2807         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
2808         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
2809         int vf;
2810         struct ixgbe_tm_conf *tm_conf =
2811                 IXGBE_DEV_PRIVATE_TO_TM_CONF(dev->data->dev_private);
2812
2813         if (hw->adapter_stopped)
2814                 return;
2815
2816         PMD_INIT_FUNC_TRACE();
2817
2818         rte_eal_alarm_cancel(ixgbe_dev_setup_link_alarm_handler, dev);
2819
2820         /* disable interrupts */
2821         ixgbe_disable_intr(hw);
2822
2823         /* reset the NIC */
2824         ixgbe_pf_reset_hw(hw);
2825         hw->adapter_stopped = 0;
2826
2827         /* stop adapter */
2828         ixgbe_stop_adapter(hw);
2829
2830         for (vf = 0; vfinfo != NULL && vf < pci_dev->max_vfs; vf++)
2831                 vfinfo[vf].clear_to_send = false;
2832
2833         if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_copper) {
2834                 /* Turn off the copper */
2835                 ixgbe_set_phy_power(hw, false);
2836         } else {
2837                 /* Turn off the laser */
2838                 ixgbe_disable_tx_laser(hw);
2839         }
2840
2841         ixgbe_dev_clear_queues(dev);
2842
2843         /* Clear stored conf */
2844         dev->data->scattered_rx = 0;
2845         dev->data->lro = 0;
2846
2847         /* Clear recorded link status */
2848         memset(&link, 0, sizeof(link));
2849         rte_eth_linkstatus_set(dev, &link);
2850
2851         if (!rte_intr_allow_others(intr_handle))
2852                 /* resume to the default handler */
2853                 rte_intr_callback_register(intr_handle,
2854                                            ixgbe_dev_interrupt_handler,
2855                                            (void *)dev);
2856
2857         /* Clean datapath event and queue/vec mapping */
2858         rte_intr_efd_disable(intr_handle);
2859         if (intr_handle->intr_vec != NULL) {
2860                 rte_free(intr_handle->intr_vec);
2861                 intr_handle->intr_vec = NULL;
2862         }
2863
2864         /* reset hierarchy commit */
2865         tm_conf->committed = false;
2866
2867         adapter->rss_reta_updated = 0;
2868
2869         hw->adapter_stopped = true;
2870 }
2871
2872 /*
2873  * Set device link up: enable tx.
2874  */
2875 static int
2876 ixgbe_dev_set_link_up(struct rte_eth_dev *dev)
2877 {
2878         struct ixgbe_hw *hw =
2879                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2880         if (hw->mac.type == ixgbe_mac_82599EB) {
2881 #ifdef RTE_LIBRTE_IXGBE_BYPASS
2882                 if (hw->device_id == IXGBE_DEV_ID_82599_BYPASS) {
2883                         /* Not suported in bypass mode */
2884                         PMD_INIT_LOG(ERR, "Set link up is not supported "
2885                                      "by device id 0x%x", hw->device_id);
2886                         return -ENOTSUP;
2887                 }
2888 #endif
2889         }
2890
2891         if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_copper) {
2892                 /* Turn on the copper */
2893                 ixgbe_set_phy_power(hw, true);
2894         } else {
2895                 /* Turn on the laser */
2896                 ixgbe_enable_tx_laser(hw);
2897         }
2898
2899         return 0;
2900 }
2901
2902 /*
2903  * Set device link down: disable tx.
2904  */
2905 static int
2906 ixgbe_dev_set_link_down(struct rte_eth_dev *dev)
2907 {
2908         struct ixgbe_hw *hw =
2909                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2910         if (hw->mac.type == ixgbe_mac_82599EB) {
2911 #ifdef RTE_LIBRTE_IXGBE_BYPASS
2912                 if (hw->device_id == IXGBE_DEV_ID_82599_BYPASS) {
2913                         /* Not suported in bypass mode */
2914                         PMD_INIT_LOG(ERR, "Set link down is not supported "
2915                                      "by device id 0x%x", hw->device_id);
2916                         return -ENOTSUP;
2917                 }
2918 #endif
2919         }
2920
2921         if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_copper) {
2922                 /* Turn off the copper */
2923                 ixgbe_set_phy_power(hw, false);
2924         } else {
2925                 /* Turn off the laser */
2926                 ixgbe_disable_tx_laser(hw);
2927         }
2928
2929         return 0;
2930 }
2931
2932 /*
2933  * Reset and stop device.
2934  */
2935 static void
2936 ixgbe_dev_close(struct rte_eth_dev *dev)
2937 {
2938         struct ixgbe_hw *hw =
2939                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2940         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
2941         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
2942         int retries = 0;
2943         int ret;
2944
2945         PMD_INIT_FUNC_TRACE();
2946
2947         ixgbe_pf_reset_hw(hw);
2948
2949         ixgbe_dev_stop(dev);
2950
2951         ixgbe_dev_free_queues(dev);
2952
2953         ixgbe_disable_pcie_master(hw);
2954
2955         /* reprogram the RAR[0] in case user changed it. */
2956         ixgbe_set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
2957
2958         dev->dev_ops = NULL;
2959         dev->rx_pkt_burst = NULL;
2960         dev->tx_pkt_burst = NULL;
2961
2962         /* Unlock any pending hardware semaphore */
2963         ixgbe_swfw_lock_reset(hw);
2964
2965         /* disable uio intr before callback unregister */
2966         rte_intr_disable(intr_handle);
2967
2968         do {
2969                 ret = rte_intr_callback_unregister(intr_handle,
2970                                 ixgbe_dev_interrupt_handler, dev);
2971                 if (ret >= 0) {
2972                         break;
2973                 } else if (ret != -EAGAIN) {
2974                         PMD_INIT_LOG(ERR,
2975                                 "intr callback unregister failed: %d",
2976                                 ret);
2977                 }
2978                 rte_delay_ms(100);
2979         } while (retries++ < (10 + IXGBE_LINK_UP_TIME));
2980
2981         /* cancel the delay handler before remove dev */
2982         rte_eal_alarm_cancel(ixgbe_dev_interrupt_delayed_handler, dev);
2983
2984         /* uninitialize PF if max_vfs not zero */
2985         ixgbe_pf_host_uninit(dev);
2986
2987         /* remove all the fdir filters & hash */
2988         ixgbe_fdir_filter_uninit(dev);
2989
2990         /* remove all the L2 tunnel filters & hash */
2991         ixgbe_l2_tn_filter_uninit(dev);
2992
2993         /* Remove all ntuple filters of the device */
2994         ixgbe_ntuple_filter_uninit(dev);
2995
2996         /* clear all the filters list */
2997         ixgbe_filterlist_flush();
2998
2999         /* Remove all Traffic Manager configuration */
3000         ixgbe_tm_conf_uninit(dev);
3001
3002 #ifdef RTE_LIBRTE_SECURITY
3003         rte_free(dev->security_ctx);
3004 #endif
3005
3006 }
3007
3008 /*
3009  * Reset PF device.
3010  */
3011 static int
3012 ixgbe_dev_reset(struct rte_eth_dev *dev)
3013 {
3014         int ret;
3015
3016         /* When a DPDK PMD PF begin to reset PF port, it should notify all
3017          * its VF to make them align with it. The detailed notification
3018          * mechanism is PMD specific. As to ixgbe PF, it is rather complex.
3019          * To avoid unexpected behavior in VF, currently reset of PF with
3020          * SR-IOV activation is not supported. It might be supported later.
3021          */
3022         if (dev->data->sriov.active)
3023                 return -ENOTSUP;
3024
3025         ret = eth_ixgbe_dev_uninit(dev);
3026         if (ret)
3027                 return ret;
3028
3029         ret = eth_ixgbe_dev_init(dev, NULL);
3030
3031         return ret;
3032 }
3033
3034 static void
3035 ixgbe_read_stats_registers(struct ixgbe_hw *hw,
3036                            struct ixgbe_hw_stats *hw_stats,
3037                            struct ixgbe_macsec_stats *macsec_stats,
3038                            uint64_t *total_missed_rx, uint64_t *total_qbrc,
3039                            uint64_t *total_qprc, uint64_t *total_qprdc)
3040 {
3041         uint32_t bprc, lxon, lxoff, total;
3042         uint32_t delta_gprc = 0;
3043         unsigned i;
3044         /* Workaround for RX byte count not including CRC bytes when CRC
3045          * strip is enabled. CRC bytes are removed from counters when crc_strip
3046          * is disabled.
3047          */
3048         int crc_strip = (IXGBE_READ_REG(hw, IXGBE_HLREG0) &
3049                         IXGBE_HLREG0_RXCRCSTRP);
3050
3051         hw_stats->crcerrs += IXGBE_READ_REG(hw, IXGBE_CRCERRS);
3052         hw_stats->illerrc += IXGBE_READ_REG(hw, IXGBE_ILLERRC);
3053         hw_stats->errbc += IXGBE_READ_REG(hw, IXGBE_ERRBC);
3054         hw_stats->mspdc += IXGBE_READ_REG(hw, IXGBE_MSPDC);
3055
3056         for (i = 0; i < 8; i++) {
3057                 uint32_t mp = IXGBE_READ_REG(hw, IXGBE_MPC(i));
3058
3059                 /* global total per queue */
3060                 hw_stats->mpc[i] += mp;
3061                 /* Running comprehensive total for stats display */
3062                 *total_missed_rx += hw_stats->mpc[i];
3063                 if (hw->mac.type == ixgbe_mac_82598EB) {
3064                         hw_stats->rnbc[i] +=
3065                             IXGBE_READ_REG(hw, IXGBE_RNBC(i));
3066                         hw_stats->pxonrxc[i] +=
3067                                 IXGBE_READ_REG(hw, IXGBE_PXONRXC(i));
3068                         hw_stats->pxoffrxc[i] +=
3069                                 IXGBE_READ_REG(hw, IXGBE_PXOFFRXC(i));
3070                 } else {
3071                         hw_stats->pxonrxc[i] +=
3072                                 IXGBE_READ_REG(hw, IXGBE_PXONRXCNT(i));
3073                         hw_stats->pxoffrxc[i] +=
3074                                 IXGBE_READ_REG(hw, IXGBE_PXOFFRXCNT(i));
3075                         hw_stats->pxon2offc[i] +=
3076                                 IXGBE_READ_REG(hw, IXGBE_PXON2OFFCNT(i));
3077                 }
3078                 hw_stats->pxontxc[i] +=
3079                     IXGBE_READ_REG(hw, IXGBE_PXONTXC(i));
3080                 hw_stats->pxofftxc[i] +=
3081                     IXGBE_READ_REG(hw, IXGBE_PXOFFTXC(i));
3082         }
3083         for (i = 0; i < IXGBE_QUEUE_STAT_COUNTERS; i++) {
3084                 uint32_t delta_qprc = IXGBE_READ_REG(hw, IXGBE_QPRC(i));
3085                 uint32_t delta_qptc = IXGBE_READ_REG(hw, IXGBE_QPTC(i));
3086                 uint32_t delta_qprdc = IXGBE_READ_REG(hw, IXGBE_QPRDC(i));
3087
3088                 delta_gprc += delta_qprc;
3089
3090                 hw_stats->qprc[i] += delta_qprc;
3091                 hw_stats->qptc[i] += delta_qptc;
3092
3093                 hw_stats->qbrc[i] += IXGBE_READ_REG(hw, IXGBE_QBRC_L(i));
3094                 hw_stats->qbrc[i] +=
3095                     ((uint64_t)IXGBE_READ_REG(hw, IXGBE_QBRC_H(i)) << 32);
3096                 if (crc_strip == 0)
3097                         hw_stats->qbrc[i] -= delta_qprc * RTE_ETHER_CRC_LEN;
3098
3099                 hw_stats->qbtc[i] += IXGBE_READ_REG(hw, IXGBE_QBTC_L(i));
3100                 hw_stats->qbtc[i] +=
3101                     ((uint64_t)IXGBE_READ_REG(hw, IXGBE_QBTC_H(i)) << 32);
3102
3103                 hw_stats->qprdc[i] += delta_qprdc;
3104                 *total_qprdc += hw_stats->qprdc[i];
3105
3106                 *total_qprc += hw_stats->qprc[i];
3107                 *total_qbrc += hw_stats->qbrc[i];
3108         }
3109         hw_stats->mlfc += IXGBE_READ_REG(hw, IXGBE_MLFC);
3110         hw_stats->mrfc += IXGBE_READ_REG(hw, IXGBE_MRFC);
3111         hw_stats->rlec += IXGBE_READ_REG(hw, IXGBE_RLEC);
3112
3113         /*
3114          * An errata states that gprc actually counts good + missed packets:
3115          * Workaround to set gprc to summated queue packet receives
3116          */
3117         hw_stats->gprc = *total_qprc;
3118
3119         if (hw->mac.type != ixgbe_mac_82598EB) {
3120                 hw_stats->gorc += IXGBE_READ_REG(hw, IXGBE_GORCL);
3121                 hw_stats->gorc += ((u64)IXGBE_READ_REG(hw, IXGBE_GORCH) << 32);
3122                 hw_stats->gotc += IXGBE_READ_REG(hw, IXGBE_GOTCL);
3123                 hw_stats->gotc += ((u64)IXGBE_READ_REG(hw, IXGBE_GOTCH) << 32);
3124                 hw_stats->tor += IXGBE_READ_REG(hw, IXGBE_TORL);
3125                 hw_stats->tor += ((u64)IXGBE_READ_REG(hw, IXGBE_TORH) << 32);
3126                 hw_stats->lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXCNT);
3127                 hw_stats->lxoffrxc += IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT);
3128         } else {
3129                 hw_stats->lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXC);
3130                 hw_stats->lxoffrxc += IXGBE_READ_REG(hw, IXGBE_LXOFFRXC);
3131                 /* 82598 only has a counter in the high register */
3132                 hw_stats->gorc += IXGBE_READ_REG(hw, IXGBE_GORCH);
3133                 hw_stats->gotc += IXGBE_READ_REG(hw, IXGBE_GOTCH);
3134                 hw_stats->tor += IXGBE_READ_REG(hw, IXGBE_TORH);
3135         }
3136         uint64_t old_tpr = hw_stats->tpr;
3137
3138         hw_stats->tpr += IXGBE_READ_REG(hw, IXGBE_TPR);
3139         hw_stats->tpt += IXGBE_READ_REG(hw, IXGBE_TPT);
3140
3141         if (crc_strip == 0)
3142                 hw_stats->gorc -= delta_gprc * RTE_ETHER_CRC_LEN;
3143
3144         uint64_t delta_gptc = IXGBE_READ_REG(hw, IXGBE_GPTC);
3145         hw_stats->gptc += delta_gptc;
3146         hw_stats->gotc -= delta_gptc * RTE_ETHER_CRC_LEN;
3147         hw_stats->tor -= (hw_stats->tpr - old_tpr) * RTE_ETHER_CRC_LEN;
3148
3149         /*
3150          * Workaround: mprc hardware is incorrectly counting
3151          * broadcasts, so for now we subtract those.
3152          */
3153         bprc = IXGBE_READ_REG(hw, IXGBE_BPRC);
3154         hw_stats->bprc += bprc;
3155         hw_stats->mprc += IXGBE_READ_REG(hw, IXGBE_MPRC);
3156         if (hw->mac.type == ixgbe_mac_82598EB)
3157                 hw_stats->mprc -= bprc;
3158
3159         hw_stats->prc64 += IXGBE_READ_REG(hw, IXGBE_PRC64);
3160         hw_stats->prc127 += IXGBE_READ_REG(hw, IXGBE_PRC127);
3161         hw_stats->prc255 += IXGBE_READ_REG(hw, IXGBE_PRC255);
3162         hw_stats->prc511 += IXGBE_READ_REG(hw, IXGBE_PRC511);
3163         hw_stats->prc1023 += IXGBE_READ_REG(hw, IXGBE_PRC1023);
3164         hw_stats->prc1522 += IXGBE_READ_REG(hw, IXGBE_PRC1522);
3165
3166         lxon = IXGBE_READ_REG(hw, IXGBE_LXONTXC);
3167         hw_stats->lxontxc += lxon;
3168         lxoff = IXGBE_READ_REG(hw, IXGBE_LXOFFTXC);
3169         hw_stats->lxofftxc += lxoff;
3170         total = lxon + lxoff;
3171
3172         hw_stats->mptc += IXGBE_READ_REG(hw, IXGBE_MPTC);
3173         hw_stats->ptc64 += IXGBE_READ_REG(hw, IXGBE_PTC64);
3174         hw_stats->gptc -= total;
3175         hw_stats->mptc -= total;
3176         hw_stats->ptc64 -= total;
3177         hw_stats->gotc -= total * RTE_ETHER_MIN_LEN;
3178
3179         hw_stats->ruc += IXGBE_READ_REG(hw, IXGBE_RUC);
3180         hw_stats->rfc += IXGBE_READ_REG(hw, IXGBE_RFC);
3181         hw_stats->roc += IXGBE_READ_REG(hw, IXGBE_ROC);
3182         hw_stats->rjc += IXGBE_READ_REG(hw, IXGBE_RJC);
3183         hw_stats->mngprc += IXGBE_READ_REG(hw, IXGBE_MNGPRC);
3184         hw_stats->mngpdc += IXGBE_READ_REG(hw, IXGBE_MNGPDC);
3185         hw_stats->mngptc += IXGBE_READ_REG(hw, IXGBE_MNGPTC);
3186         hw_stats->ptc127 += IXGBE_READ_REG(hw, IXGBE_PTC127);
3187         hw_stats->ptc255 += IXGBE_READ_REG(hw, IXGBE_PTC255);
3188         hw_stats->ptc511 += IXGBE_READ_REG(hw, IXGBE_PTC511);
3189         hw_stats->ptc1023 += IXGBE_READ_REG(hw, IXGBE_PTC1023);
3190         hw_stats->ptc1522 += IXGBE_READ_REG(hw, IXGBE_PTC1522);
3191         hw_stats->bptc += IXGBE_READ_REG(hw, IXGBE_BPTC);
3192         hw_stats->xec += IXGBE_READ_REG(hw, IXGBE_XEC);
3193         hw_stats->fccrc += IXGBE_READ_REG(hw, IXGBE_FCCRC);
3194         hw_stats->fclast += IXGBE_READ_REG(hw, IXGBE_FCLAST);
3195         /* Only read FCOE on 82599 */
3196         if (hw->mac.type != ixgbe_mac_82598EB) {
3197                 hw_stats->fcoerpdc += IXGBE_READ_REG(hw, IXGBE_FCOERPDC);
3198                 hw_stats->fcoeprc += IXGBE_READ_REG(hw, IXGBE_FCOEPRC);
3199                 hw_stats->fcoeptc += IXGBE_READ_REG(hw, IXGBE_FCOEPTC);
3200                 hw_stats->fcoedwrc += IXGBE_READ_REG(hw, IXGBE_FCOEDWRC);
3201                 hw_stats->fcoedwtc += IXGBE_READ_REG(hw, IXGBE_FCOEDWTC);
3202         }
3203
3204         /* Flow Director Stats registers */
3205         if (hw->mac.type != ixgbe_mac_82598EB) {
3206                 hw_stats->fdirmatch += IXGBE_READ_REG(hw, IXGBE_FDIRMATCH);
3207                 hw_stats->fdirmiss += IXGBE_READ_REG(hw, IXGBE_FDIRMISS);
3208                 hw_stats->fdirustat_add += IXGBE_READ_REG(hw,
3209                                         IXGBE_FDIRUSTAT) & 0xFFFF;
3210                 hw_stats->fdirustat_remove += (IXGBE_READ_REG(hw,
3211                                         IXGBE_FDIRUSTAT) >> 16) & 0xFFFF;
3212                 hw_stats->fdirfstat_fadd += IXGBE_READ_REG(hw,
3213                                         IXGBE_FDIRFSTAT) & 0xFFFF;
3214                 hw_stats->fdirfstat_fremove += (IXGBE_READ_REG(hw,
3215                                         IXGBE_FDIRFSTAT) >> 16) & 0xFFFF;
3216         }
3217         /* MACsec Stats registers */
3218         macsec_stats->out_pkts_untagged += IXGBE_READ_REG(hw, IXGBE_LSECTXUT);
3219         macsec_stats->out_pkts_encrypted +=
3220                 IXGBE_READ_REG(hw, IXGBE_LSECTXPKTE);
3221         macsec_stats->out_pkts_protected +=
3222                 IXGBE_READ_REG(hw, IXGBE_LSECTXPKTP);
3223         macsec_stats->out_octets_encrypted +=
3224                 IXGBE_READ_REG(hw, IXGBE_LSECTXOCTE);
3225         macsec_stats->out_octets_protected +=
3226                 IXGBE_READ_REG(hw, IXGBE_LSECTXOCTP);
3227         macsec_stats->in_pkts_untagged += IXGBE_READ_REG(hw, IXGBE_LSECRXUT);
3228         macsec_stats->in_pkts_badtag += IXGBE_READ_REG(hw, IXGBE_LSECRXBAD);
3229         macsec_stats->in_pkts_nosci += IXGBE_READ_REG(hw, IXGBE_LSECRXNOSCI);
3230         macsec_stats->in_pkts_unknownsci +=
3231                 IXGBE_READ_REG(hw, IXGBE_LSECRXUNSCI);
3232         macsec_stats->in_octets_decrypted +=
3233                 IXGBE_READ_REG(hw, IXGBE_LSECRXOCTD);
3234         macsec_stats->in_octets_validated +=
3235                 IXGBE_READ_REG(hw, IXGBE_LSECRXOCTV);
3236         macsec_stats->in_pkts_unchecked += IXGBE_READ_REG(hw, IXGBE_LSECRXUNCH);
3237         macsec_stats->in_pkts_delayed += IXGBE_READ_REG(hw, IXGBE_LSECRXDELAY);
3238         macsec_stats->in_pkts_late += IXGBE_READ_REG(hw, IXGBE_LSECRXLATE);
3239         for (i = 0; i < 2; i++) {
3240                 macsec_stats->in_pkts_ok +=
3241                         IXGBE_READ_REG(hw, IXGBE_LSECRXOK(i));
3242                 macsec_stats->in_pkts_invalid +=
3243                         IXGBE_READ_REG(hw, IXGBE_LSECRXINV(i));
3244                 macsec_stats->in_pkts_notvalid +=
3245                         IXGBE_READ_REG(hw, IXGBE_LSECRXNV(i));
3246         }
3247         macsec_stats->in_pkts_unusedsa += IXGBE_READ_REG(hw, IXGBE_LSECRXUNSA);
3248         macsec_stats->in_pkts_notusingsa +=
3249                 IXGBE_READ_REG(hw, IXGBE_LSECRXNUSA);
3250 }
3251
3252 /*
3253  * This function is based on ixgbe_update_stats_counters() in ixgbe/ixgbe.c
3254  */
3255 static int
3256 ixgbe_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
3257 {
3258         struct ixgbe_hw *hw =
3259                         IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3260         struct ixgbe_hw_stats *hw_stats =
3261                         IXGBE_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
3262         struct ixgbe_macsec_stats *macsec_stats =
3263                         IXGBE_DEV_PRIVATE_TO_MACSEC_STATS(
3264                                 dev->data->dev_private);
3265         uint64_t total_missed_rx, total_qbrc, total_qprc, total_qprdc;
3266         unsigned i;
3267
3268         total_missed_rx = 0;
3269         total_qbrc = 0;
3270         total_qprc = 0;
3271         total_qprdc = 0;
3272
3273         ixgbe_read_stats_registers(hw, hw_stats, macsec_stats, &total_missed_rx,
3274                         &total_qbrc, &total_qprc, &total_qprdc);
3275
3276         if (stats == NULL)
3277                 return -EINVAL;
3278
3279         /* Fill out the rte_eth_stats statistics structure */
3280         stats->ipackets = total_qprc;
3281         stats->ibytes = total_qbrc;
3282         stats->opackets = hw_stats->gptc;
3283         stats->obytes = hw_stats->gotc;
3284
3285         for (i = 0; i < IXGBE_QUEUE_STAT_COUNTERS; i++) {
3286                 stats->q_ipackets[i] = hw_stats->qprc[i];
3287                 stats->q_opackets[i] = hw_stats->qptc[i];
3288                 stats->q_ibytes[i] = hw_stats->qbrc[i];
3289                 stats->q_obytes[i] = hw_stats->qbtc[i];
3290                 stats->q_errors[i] = hw_stats->qprdc[i];
3291         }
3292
3293         /* Rx Errors */
3294         stats->imissed  = total_missed_rx;
3295         stats->ierrors  = hw_stats->crcerrs +
3296                           hw_stats->mspdc +
3297                           hw_stats->rlec +
3298                           hw_stats->ruc +
3299                           hw_stats->roc +
3300                           hw_stats->illerrc +
3301                           hw_stats->errbc +
3302                           hw_stats->rfc +
3303                           hw_stats->fccrc +
3304                           hw_stats->fclast;
3305
3306         /* Tx Errors */
3307         stats->oerrors  = 0;
3308         return 0;
3309 }
3310
3311 static int
3312 ixgbe_dev_stats_reset(struct rte_eth_dev *dev)
3313 {
3314         struct ixgbe_hw_stats *stats =
3315                         IXGBE_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
3316
3317         /* HW registers are cleared on read */
3318         ixgbe_dev_stats_get(dev, NULL);
3319
3320         /* Reset software totals */
3321         memset(stats, 0, sizeof(*stats));
3322
3323         return 0;
3324 }
3325
3326 /* This function calculates the number of xstats based on the current config */
3327 static unsigned
3328 ixgbe_xstats_calc_num(void) {
3329         return IXGBE_NB_HW_STATS + IXGBE_NB_MACSEC_STATS +
3330                 (IXGBE_NB_RXQ_PRIO_STATS * IXGBE_NB_RXQ_PRIO_VALUES) +
3331                 (IXGBE_NB_TXQ_PRIO_STATS * IXGBE_NB_TXQ_PRIO_VALUES);
3332 }
3333
3334 static int ixgbe_dev_xstats_get_names(__rte_unused struct rte_eth_dev *dev,
3335         struct rte_eth_xstat_name *xstats_names, __rte_unused unsigned int size)
3336 {
3337         const unsigned cnt_stats = ixgbe_xstats_calc_num();
3338         unsigned stat, i, count;
3339
3340         if (xstats_names != NULL) {
3341                 count = 0;
3342
3343                 /* Note: limit >= cnt_stats checked upstream
3344                  * in rte_eth_xstats_names()
3345                  */
3346
3347                 /* Extended stats from ixgbe_hw_stats */
3348                 for (i = 0; i < IXGBE_NB_HW_STATS; i++) {
3349                         strlcpy(xstats_names[count].name,
3350                                 rte_ixgbe_stats_strings[i].name,
3351                                 sizeof(xstats_names[count].name));
3352                         count++;
3353                 }
3354
3355                 /* MACsec Stats */
3356                 for (i = 0; i < IXGBE_NB_MACSEC_STATS; i++) {
3357                         strlcpy(xstats_names[count].name,
3358                                 rte_ixgbe_macsec_strings[i].name,
3359                                 sizeof(xstats_names[count].name));
3360                         count++;
3361                 }
3362
3363                 /* RX Priority Stats */
3364                 for (stat = 0; stat < IXGBE_NB_RXQ_PRIO_STATS; stat++) {
3365                         for (i = 0; i < IXGBE_NB_RXQ_PRIO_VALUES; i++) {
3366                                 snprintf(xstats_names[count].name,
3367                                         sizeof(xstats_names[count].name),
3368                                         "rx_priority%u_%s", i,
3369                                         rte_ixgbe_rxq_strings[stat].name);
3370                                 count++;
3371                         }
3372                 }
3373
3374                 /* TX Priority Stats */
3375                 for (stat = 0; stat < IXGBE_NB_TXQ_PRIO_STATS; stat++) {
3376                         for (i = 0; i < IXGBE_NB_TXQ_PRIO_VALUES; i++) {
3377                                 snprintf(xstats_names[count].name,
3378                                         sizeof(xstats_names[count].name),
3379                                         "tx_priority%u_%s", i,
3380                                         rte_ixgbe_txq_strings[stat].name);
3381                                 count++;
3382                         }
3383                 }
3384         }
3385         return cnt_stats;
3386 }
3387
3388 static int ixgbe_dev_xstats_get_names_by_id(
3389         struct rte_eth_dev *dev,
3390         struct rte_eth_xstat_name *xstats_names,
3391         const uint64_t *ids,
3392         unsigned int limit)
3393 {
3394         if (!ids) {
3395                 const unsigned int cnt_stats = ixgbe_xstats_calc_num();
3396                 unsigned int stat, i, count;
3397
3398                 if (xstats_names != NULL) {
3399                         count = 0;
3400
3401                         /* Note: limit >= cnt_stats checked upstream
3402                          * in rte_eth_xstats_names()
3403                          */
3404
3405                         /* Extended stats from ixgbe_hw_stats */
3406                         for (i = 0; i < IXGBE_NB_HW_STATS; i++) {
3407                                 strlcpy(xstats_names[count].name,
3408                                         rte_ixgbe_stats_strings[i].name,
3409                                         sizeof(xstats_names[count].name));
3410                                 count++;
3411                         }
3412
3413                         /* MACsec Stats */
3414                         for (i = 0; i < IXGBE_NB_MACSEC_STATS; i++) {
3415                                 strlcpy(xstats_names[count].name,
3416                                         rte_ixgbe_macsec_strings[i].name,
3417                                         sizeof(xstats_names[count].name));
3418                                 count++;
3419                         }
3420
3421                         /* RX Priority Stats */
3422                         for (stat = 0; stat < IXGBE_NB_RXQ_PRIO_STATS; stat++) {
3423                                 for (i = 0; i < IXGBE_NB_RXQ_PRIO_VALUES; i++) {
3424                                         snprintf(xstats_names[count].name,
3425                                             sizeof(xstats_names[count].name),
3426                                             "rx_priority%u_%s", i,
3427                                             rte_ixgbe_rxq_strings[stat].name);
3428                                         count++;
3429                                 }
3430                         }
3431
3432                         /* TX Priority Stats */
3433                         for (stat = 0; stat < IXGBE_NB_TXQ_PRIO_STATS; stat++) {
3434                                 for (i = 0; i < IXGBE_NB_TXQ_PRIO_VALUES; i++) {
3435                                         snprintf(xstats_names[count].name,
3436                                             sizeof(xstats_names[count].name),
3437                                             "tx_priority%u_%s", i,
3438                                             rte_ixgbe_txq_strings[stat].name);
3439                                         count++;
3440                                 }
3441                         }
3442                 }
3443                 return cnt_stats;
3444         }
3445
3446         uint16_t i;
3447         uint16_t size = ixgbe_xstats_calc_num();
3448         struct rte_eth_xstat_name xstats_names_copy[size];
3449
3450         ixgbe_dev_xstats_get_names_by_id(dev, xstats_names_copy, NULL,
3451                         size);
3452
3453         for (i = 0; i < limit; i++) {
3454                 if (ids[i] >= size) {
3455                         PMD_INIT_LOG(ERR, "id value isn't valid");
3456                         return -1;
3457                 }
3458                 strcpy(xstats_names[i].name,
3459                                 xstats_names_copy[ids[i]].name);
3460         }
3461         return limit;
3462 }
3463
3464 static int ixgbevf_dev_xstats_get_names(__rte_unused struct rte_eth_dev *dev,
3465         struct rte_eth_xstat_name *xstats_names, unsigned limit)
3466 {
3467         unsigned i;
3468
3469         if (limit < IXGBEVF_NB_XSTATS && xstats_names != NULL)
3470                 return -ENOMEM;
3471
3472         if (xstats_names != NULL)
3473                 for (i = 0; i < IXGBEVF_NB_XSTATS; i++)
3474                         strlcpy(xstats_names[i].name,
3475                                 rte_ixgbevf_stats_strings[i].name,
3476                                 sizeof(xstats_names[i].name));
3477         return IXGBEVF_NB_XSTATS;
3478 }
3479
3480 static int
3481 ixgbe_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
3482                                          unsigned n)
3483 {
3484         struct ixgbe_hw *hw =
3485                         IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3486         struct ixgbe_hw_stats *hw_stats =
3487                         IXGBE_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
3488         struct ixgbe_macsec_stats *macsec_stats =
3489                         IXGBE_DEV_PRIVATE_TO_MACSEC_STATS(
3490                                 dev->data->dev_private);
3491         uint64_t total_missed_rx, total_qbrc, total_qprc, total_qprdc;
3492         unsigned i, stat, count = 0;
3493
3494         count = ixgbe_xstats_calc_num();
3495
3496         if (n < count)
3497                 return count;
3498
3499         total_missed_rx = 0;
3500         total_qbrc = 0;
3501         total_qprc = 0;
3502         total_qprdc = 0;
3503
3504         ixgbe_read_stats_registers(hw, hw_stats, macsec_stats, &total_missed_rx,
3505                         &total_qbrc, &total_qprc, &total_qprdc);
3506
3507         /* If this is a reset xstats is NULL, and we have cleared the
3508          * registers by reading them.
3509          */
3510         if (!xstats)
3511                 return 0;
3512
3513         /* Extended stats from ixgbe_hw_stats */
3514         count = 0;
3515         for (i = 0; i < IXGBE_NB_HW_STATS; i++) {
3516                 xstats[count].value = *(uint64_t *)(((char *)hw_stats) +
3517                                 rte_ixgbe_stats_strings[i].offset);
3518                 xstats[count].id = count;
3519                 count++;
3520         }
3521
3522         /* MACsec Stats */
3523         for (i = 0; i < IXGBE_NB_MACSEC_STATS; i++) {
3524                 xstats[count].value = *(uint64_t *)(((char *)macsec_stats) +
3525                                 rte_ixgbe_macsec_strings[i].offset);
3526                 xstats[count].id = count;
3527                 count++;
3528         }
3529
3530         /* RX Priority Stats */
3531         for (stat = 0; stat < IXGBE_NB_RXQ_PRIO_STATS; stat++) {
3532                 for (i = 0; i < IXGBE_NB_RXQ_PRIO_VALUES; i++) {
3533                         xstats[count].value = *(uint64_t *)(((char *)hw_stats) +
3534                                         rte_ixgbe_rxq_strings[stat].offset +
3535                                         (sizeof(uint64_t) * i));
3536                         xstats[count].id = count;
3537                         count++;
3538                 }
3539         }
3540
3541         /* TX Priority Stats */
3542         for (stat = 0; stat < IXGBE_NB_TXQ_PRIO_STATS; stat++) {
3543                 for (i = 0; i < IXGBE_NB_TXQ_PRIO_VALUES; i++) {
3544                         xstats[count].value = *(uint64_t *)(((char *)hw_stats) +
3545                                         rte_ixgbe_txq_strings[stat].offset +
3546                                         (sizeof(uint64_t) * i));
3547                         xstats[count].id = count;
3548                         count++;
3549                 }
3550         }
3551         return count;
3552 }
3553
3554 static int
3555 ixgbe_dev_xstats_get_by_id(struct rte_eth_dev *dev, const uint64_t *ids,
3556                 uint64_t *values, unsigned int n)
3557 {
3558         if (!ids) {
3559                 struct ixgbe_hw *hw =
3560                                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3561                 struct ixgbe_hw_stats *hw_stats =
3562                                 IXGBE_DEV_PRIVATE_TO_STATS(
3563                                                 dev->data->dev_private);
3564                 struct ixgbe_macsec_stats *macsec_stats =
3565                                 IXGBE_DEV_PRIVATE_TO_MACSEC_STATS(
3566                                         dev->data->dev_private);
3567                 uint64_t total_missed_rx, total_qbrc, total_qprc, total_qprdc;
3568                 unsigned int i, stat, count = 0;
3569
3570                 count = ixgbe_xstats_calc_num();
3571
3572                 if (!ids && n < count)
3573                         return count;
3574
3575                 total_missed_rx = 0;
3576                 total_qbrc = 0;
3577                 total_qprc = 0;
3578                 total_qprdc = 0;
3579
3580                 ixgbe_read_stats_registers(hw, hw_stats, macsec_stats,
3581                                 &total_missed_rx, &total_qbrc, &total_qprc,
3582                                 &total_qprdc);
3583
3584                 /* If this is a reset xstats is NULL, and we have cleared the
3585                  * registers by reading them.
3586                  */
3587                 if (!ids && !values)
3588                         return 0;
3589
3590                 /* Extended stats from ixgbe_hw_stats */
3591                 count = 0;
3592                 for (i = 0; i < IXGBE_NB_HW_STATS; i++) {
3593                         values[count] = *(uint64_t *)(((char *)hw_stats) +
3594                                         rte_ixgbe_stats_strings[i].offset);
3595                         count++;
3596                 }
3597
3598                 /* MACsec Stats */
3599                 for (i = 0; i < IXGBE_NB_MACSEC_STATS; i++) {
3600                         values[count] = *(uint64_t *)(((char *)macsec_stats) +
3601                                         rte_ixgbe_macsec_strings[i].offset);
3602                         count++;
3603                 }
3604
3605                 /* RX Priority Stats */
3606                 for (stat = 0; stat < IXGBE_NB_RXQ_PRIO_STATS; stat++) {
3607                         for (i = 0; i < IXGBE_NB_RXQ_PRIO_VALUES; i++) {
3608                                 values[count] =
3609                                         *(uint64_t *)(((char *)hw_stats) +
3610                                         rte_ixgbe_rxq_strings[stat].offset +
3611                                         (sizeof(uint64_t) * i));
3612                                 count++;
3613                         }
3614                 }
3615
3616                 /* TX Priority Stats */
3617                 for (stat = 0; stat < IXGBE_NB_TXQ_PRIO_STATS; stat++) {
3618                         for (i = 0; i < IXGBE_NB_TXQ_PRIO_VALUES; i++) {
3619                                 values[count] =
3620                                         *(uint64_t *)(((char *)hw_stats) +
3621                                         rte_ixgbe_txq_strings[stat].offset +
3622                                         (sizeof(uint64_t) * i));
3623                                 count++;
3624                         }
3625                 }
3626                 return count;
3627         }
3628
3629         uint16_t i;
3630         uint16_t size = ixgbe_xstats_calc_num();
3631         uint64_t values_copy[size];
3632
3633         ixgbe_dev_xstats_get_by_id(dev, NULL, values_copy, size);
3634
3635         for (i = 0; i < n; i++) {
3636                 if (ids[i] >= size) {
3637                         PMD_INIT_LOG(ERR, "id value isn't valid");
3638                         return -1;
3639                 }
3640                 values[i] = values_copy[ids[i]];
3641         }
3642         return n;
3643 }
3644
3645 static int
3646 ixgbe_dev_xstats_reset(struct rte_eth_dev *dev)
3647 {
3648         struct ixgbe_hw_stats *stats =
3649                         IXGBE_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
3650         struct ixgbe_macsec_stats *macsec_stats =
3651                         IXGBE_DEV_PRIVATE_TO_MACSEC_STATS(
3652                                 dev->data->dev_private);
3653
3654         unsigned count = ixgbe_xstats_calc_num();
3655
3656         /* HW registers are cleared on read */
3657         ixgbe_dev_xstats_get(dev, NULL, count);
3658
3659         /* Reset software totals */
3660         memset(stats, 0, sizeof(*stats));
3661         memset(macsec_stats, 0, sizeof(*macsec_stats));
3662
3663         return 0;
3664 }
3665
3666 static void
3667 ixgbevf_update_stats(struct rte_eth_dev *dev)
3668 {
3669         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3670         struct ixgbevf_hw_stats *hw_stats = (struct ixgbevf_hw_stats *)
3671                           IXGBE_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
3672
3673         /* Good Rx packet, include VF loopback */
3674         UPDATE_VF_STAT(IXGBE_VFGPRC,
3675             hw_stats->last_vfgprc, hw_stats->vfgprc);
3676
3677         /* Good Rx octets, include VF loopback */
3678         UPDATE_VF_STAT_36BIT(IXGBE_VFGORC_LSB, IXGBE_VFGORC_MSB,
3679             hw_stats->last_vfgorc, hw_stats->vfgorc);
3680
3681         /* Good Tx packet, include VF loopback */
3682         UPDATE_VF_STAT(IXGBE_VFGPTC,
3683             hw_stats->last_vfgptc, hw_stats->vfgptc);
3684
3685         /* Good Tx octets, include VF loopback */
3686         UPDATE_VF_STAT_36BIT(IXGBE_VFGOTC_LSB, IXGBE_VFGOTC_MSB,
3687             hw_stats->last_vfgotc, hw_stats->vfgotc);
3688
3689         /* Rx Multicst Packet */
3690         UPDATE_VF_STAT(IXGBE_VFMPRC,
3691             hw_stats->last_vfmprc, hw_stats->vfmprc);
3692 }
3693
3694 static int
3695 ixgbevf_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
3696                        unsigned n)
3697 {
3698         struct ixgbevf_hw_stats *hw_stats = (struct ixgbevf_hw_stats *)
3699                         IXGBE_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
3700         unsigned i;
3701
3702         if (n < IXGBEVF_NB_XSTATS)
3703                 return IXGBEVF_NB_XSTATS;
3704
3705         ixgbevf_update_stats(dev);
3706
3707         if (!xstats)
3708                 return 0;
3709
3710         /* Extended stats */
3711         for (i = 0; i < IXGBEVF_NB_XSTATS; i++) {
3712                 xstats[i].id = i;
3713                 xstats[i].value = *(uint64_t *)(((char *)hw_stats) +
3714                         rte_ixgbevf_stats_strings[i].offset);
3715         }
3716
3717         return IXGBEVF_NB_XSTATS;
3718 }
3719
3720 static int
3721 ixgbevf_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
3722 {
3723         struct ixgbevf_hw_stats *hw_stats = (struct ixgbevf_hw_stats *)
3724                           IXGBE_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
3725
3726         ixgbevf_update_stats(dev);
3727
3728         if (stats == NULL)
3729                 return -EINVAL;
3730
3731         stats->ipackets = hw_stats->vfgprc;
3732         stats->ibytes = hw_stats->vfgorc;
3733         stats->opackets = hw_stats->vfgptc;
3734         stats->obytes = hw_stats->vfgotc;
3735         return 0;
3736 }
3737
3738 static int
3739 ixgbevf_dev_stats_reset(struct rte_eth_dev *dev)
3740 {
3741         struct ixgbevf_hw_stats *hw_stats = (struct ixgbevf_hw_stats *)
3742                         IXGBE_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
3743
3744         /* Sync HW register to the last stats */
3745         ixgbevf_dev_stats_get(dev, NULL);
3746
3747         /* reset HW current stats*/
3748         hw_stats->vfgprc = 0;
3749         hw_stats->vfgorc = 0;
3750         hw_stats->vfgptc = 0;
3751         hw_stats->vfgotc = 0;
3752
3753         return 0;
3754 }
3755
3756 static int
3757 ixgbe_fw_version_get(struct rte_eth_dev *dev, char *fw_version, size_t fw_size)
3758 {
3759         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3760         u16 eeprom_verh, eeprom_verl;
3761         u32 etrack_id;
3762         int ret;
3763
3764         ixgbe_read_eeprom(hw, 0x2e, &eeprom_verh);
3765         ixgbe_read_eeprom(hw, 0x2d, &eeprom_verl);
3766
3767         etrack_id = (eeprom_verh << 16) | eeprom_verl;
3768         ret = snprintf(fw_version, fw_size, "0x%08x", etrack_id);
3769
3770         ret += 1; /* add the size of '\0' */
3771         if (fw_size < (u32)ret)
3772                 return ret;
3773         else
3774                 return 0;
3775 }
3776
3777 static int
3778 ixgbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
3779 {
3780         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
3781         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3782         struct rte_eth_conf *dev_conf = &dev->data->dev_conf;
3783
3784         dev_info->max_rx_queues = (uint16_t)hw->mac.max_rx_queues;
3785         dev_info->max_tx_queues = (uint16_t)hw->mac.max_tx_queues;
3786         if (RTE_ETH_DEV_SRIOV(dev).active == 0) {
3787                 /*
3788                  * When DCB/VT is off, maximum number of queues changes,
3789                  * except for 82598EB, which remains constant.
3790                  */
3791                 if (dev_conf->txmode.mq_mode == ETH_MQ_TX_NONE &&
3792                                 hw->mac.type != ixgbe_mac_82598EB)
3793                         dev_info->max_tx_queues = IXGBE_NONE_MODE_TX_NB_QUEUES;
3794         }
3795         dev_info->min_rx_bufsize = 1024; /* cf BSIZEPACKET in SRRCTL register */
3796         dev_info->max_rx_pktlen = 15872; /* includes CRC, cf MAXFRS register */
3797         dev_info->max_mac_addrs = hw->mac.num_rar_entries;
3798         dev_info->max_hash_mac_addrs = IXGBE_VMDQ_NUM_UC_MAC;
3799         dev_info->max_vfs = pci_dev->max_vfs;
3800         if (hw->mac.type == ixgbe_mac_82598EB)
3801                 dev_info->max_vmdq_pools = ETH_16_POOLS;
3802         else
3803                 dev_info->max_vmdq_pools = ETH_64_POOLS;
3804         dev_info->max_mtu =  dev_info->max_rx_pktlen - IXGBE_ETH_OVERHEAD;
3805         dev_info->min_mtu = RTE_ETHER_MIN_MTU;
3806         dev_info->vmdq_queue_num = dev_info->max_rx_queues;
3807         dev_info->rx_queue_offload_capa = ixgbe_get_rx_queue_offloads(dev);
3808         dev_info->rx_offload_capa = (ixgbe_get_rx_port_offloads(dev) |
3809                                      dev_info->rx_queue_offload_capa);
3810         dev_info->tx_queue_offload_capa = ixgbe_get_tx_queue_offloads(dev);
3811         dev_info->tx_offload_capa = ixgbe_get_tx_port_offloads(dev);
3812
3813         dev_info->default_rxconf = (struct rte_eth_rxconf) {
3814                 .rx_thresh = {
3815                         .pthresh = IXGBE_DEFAULT_RX_PTHRESH,
3816                         .hthresh = IXGBE_DEFAULT_RX_HTHRESH,
3817                         .wthresh = IXGBE_DEFAULT_RX_WTHRESH,
3818                 },
3819                 .rx_free_thresh = IXGBE_DEFAULT_RX_FREE_THRESH,
3820                 .rx_drop_en = 0,
3821                 .offloads = 0,
3822         };
3823
3824         dev_info->default_txconf = (struct rte_eth_txconf) {
3825                 .tx_thresh = {
3826                         .pthresh = IXGBE_DEFAULT_TX_PTHRESH,
3827                         .hthresh = IXGBE_DEFAULT_TX_HTHRESH,
3828                         .wthresh = IXGBE_DEFAULT_TX_WTHRESH,
3829                 },
3830                 .tx_free_thresh = IXGBE_DEFAULT_TX_FREE_THRESH,
3831                 .tx_rs_thresh = IXGBE_DEFAULT_TX_RSBIT_THRESH,
3832                 .offloads = 0,
3833         };
3834
3835         dev_info->rx_desc_lim = rx_desc_lim;
3836         dev_info->tx_desc_lim = tx_desc_lim;
3837
3838         dev_info->hash_key_size = IXGBE_HKEY_MAX_INDEX * sizeof(uint32_t);
3839         dev_info->reta_size = ixgbe_reta_size_get(hw->mac.type);
3840         dev_info->flow_type_rss_offloads = IXGBE_RSS_OFFLOAD_ALL;
3841
3842         dev_info->speed_capa = ETH_LINK_SPEED_1G | ETH_LINK_SPEED_10G;
3843         if (hw->device_id == IXGBE_DEV_ID_X550EM_A_1G_T ||
3844                         hw->device_id == IXGBE_DEV_ID_X550EM_A_1G_T_L)
3845                 dev_info->speed_capa = ETH_LINK_SPEED_10M |
3846                         ETH_LINK_SPEED_100M | ETH_LINK_SPEED_1G;
3847
3848         if (hw->mac.type == ixgbe_mac_X540 ||
3849             hw->mac.type == ixgbe_mac_X540_vf ||
3850             hw->mac.type == ixgbe_mac_X550 ||
3851             hw->mac.type == ixgbe_mac_X550_vf) {
3852                 dev_info->speed_capa |= ETH_LINK_SPEED_100M;
3853         }
3854         if (hw->mac.type == ixgbe_mac_X550) {
3855                 dev_info->speed_capa |= ETH_LINK_SPEED_2_5G;
3856                 dev_info->speed_capa |= ETH_LINK_SPEED_5G;
3857         }
3858
3859         /* Driver-preferred Rx/Tx parameters */
3860         dev_info->default_rxportconf.burst_size = 32;
3861         dev_info->default_txportconf.burst_size = 32;
3862         dev_info->default_rxportconf.nb_queues = 1;
3863         dev_info->default_txportconf.nb_queues = 1;
3864         dev_info->default_rxportconf.ring_size = 256;
3865         dev_info->default_txportconf.ring_size = 256;
3866
3867         return 0;
3868 }
3869
3870 static const uint32_t *
3871 ixgbe_dev_supported_ptypes_get(struct rte_eth_dev *dev)
3872 {
3873         static const uint32_t ptypes[] = {
3874                 /* For non-vec functions,
3875                  * refers to ixgbe_rxd_pkt_info_to_pkt_type();
3876                  * for vec functions,
3877                  * refers to _recv_raw_pkts_vec().
3878                  */
3879                 RTE_PTYPE_L2_ETHER,
3880                 RTE_PTYPE_L3_IPV4,
3881                 RTE_PTYPE_L3_IPV4_EXT,
3882                 RTE_PTYPE_L3_IPV6,
3883                 RTE_PTYPE_L3_IPV6_EXT,
3884                 RTE_PTYPE_L4_SCTP,
3885                 RTE_PTYPE_L4_TCP,
3886                 RTE_PTYPE_L4_UDP,
3887                 RTE_PTYPE_TUNNEL_IP,
3888                 RTE_PTYPE_INNER_L3_IPV6,
3889                 RTE_PTYPE_INNER_L3_IPV6_EXT,
3890                 RTE_PTYPE_INNER_L4_TCP,
3891                 RTE_PTYPE_INNER_L4_UDP,
3892                 RTE_PTYPE_UNKNOWN
3893         };
3894
3895         if (dev->rx_pkt_burst == ixgbe_recv_pkts ||
3896             dev->rx_pkt_burst == ixgbe_recv_pkts_lro_single_alloc ||
3897             dev->rx_pkt_burst == ixgbe_recv_pkts_lro_bulk_alloc ||
3898             dev->rx_pkt_burst == ixgbe_recv_pkts_bulk_alloc)
3899                 return ptypes;
3900
3901 #if defined(RTE_ARCH_X86)
3902         if (dev->rx_pkt_burst == ixgbe_recv_pkts_vec ||
3903             dev->rx_pkt_burst == ixgbe_recv_scattered_pkts_vec)
3904                 return ptypes;
3905 #endif
3906         return NULL;
3907 }
3908
3909 static int
3910 ixgbevf_dev_info_get(struct rte_eth_dev *dev,
3911                      struct rte_eth_dev_info *dev_info)
3912 {
3913         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
3914         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3915
3916         dev_info->max_rx_queues = (uint16_t)hw->mac.max_rx_queues;
3917         dev_info->max_tx_queues = (uint16_t)hw->mac.max_tx_queues;
3918         dev_info->min_rx_bufsize = 1024; /* cf BSIZEPACKET in SRRCTL reg */
3919         dev_info->max_rx_pktlen = 9728; /* includes CRC, cf MAXFRS reg */
3920         dev_info->max_mtu = dev_info->max_rx_pktlen - IXGBE_ETH_OVERHEAD;
3921         dev_info->max_mac_addrs = hw->mac.num_rar_entries;
3922         dev_info->max_hash_mac_addrs = IXGBE_VMDQ_NUM_UC_MAC;
3923         dev_info->max_vfs = pci_dev->max_vfs;
3924         if (hw->mac.type == ixgbe_mac_82598EB)
3925                 dev_info->max_vmdq_pools = ETH_16_POOLS;
3926         else
3927                 dev_info->max_vmdq_pools = ETH_64_POOLS;
3928         dev_info->rx_queue_offload_capa = ixgbe_get_rx_queue_offloads(dev);
3929         dev_info->rx_offload_capa = (ixgbe_get_rx_port_offloads(dev) |
3930                                      dev_info->rx_queue_offload_capa);
3931         dev_info->tx_queue_offload_capa = ixgbe_get_tx_queue_offloads(dev);
3932         dev_info->tx_offload_capa = ixgbe_get_tx_port_offloads(dev);
3933         dev_info->hash_key_size = IXGBE_HKEY_MAX_INDEX * sizeof(uint32_t);
3934         dev_info->reta_size = ixgbe_reta_size_get(hw->mac.type);
3935         dev_info->flow_type_rss_offloads = IXGBE_RSS_OFFLOAD_ALL;
3936
3937         dev_info->default_rxconf = (struct rte_eth_rxconf) {
3938                 .rx_thresh = {
3939                         .pthresh = IXGBE_DEFAULT_RX_PTHRESH,
3940                         .hthresh = IXGBE_DEFAULT_RX_HTHRESH,
3941                         .wthresh = IXGBE_DEFAULT_RX_WTHRESH,
3942                 },
3943                 .rx_free_thresh = IXGBE_DEFAULT_RX_FREE_THRESH,
3944                 .rx_drop_en = 0,
3945                 .offloads = 0,
3946         };
3947
3948         dev_info->default_txconf = (struct rte_eth_txconf) {
3949                 .tx_thresh = {
3950                         .pthresh = IXGBE_DEFAULT_TX_PTHRESH,
3951                         .hthresh = IXGBE_DEFAULT_TX_HTHRESH,
3952                         .wthresh = IXGBE_DEFAULT_TX_WTHRESH,
3953                 },
3954                 .tx_free_thresh = IXGBE_DEFAULT_TX_FREE_THRESH,
3955                 .tx_rs_thresh = IXGBE_DEFAULT_TX_RSBIT_THRESH,
3956                 .offloads = 0,
3957         };
3958
3959         dev_info->rx_desc_lim = rx_desc_lim;
3960         dev_info->tx_desc_lim = tx_desc_lim;
3961
3962         return 0;
3963 }
3964
3965 static int
3966 ixgbevf_check_link(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
3967                    int *link_up, int wait_to_complete)
3968 {
3969         struct ixgbe_adapter *adapter = container_of(hw,
3970                                                      struct ixgbe_adapter, hw);
3971         struct ixgbe_mbx_info *mbx = &hw->mbx;
3972         struct ixgbe_mac_info *mac = &hw->mac;
3973         uint32_t links_reg, in_msg;
3974         int ret_val = 0;
3975
3976         /* If we were hit with a reset drop the link */
3977         if (!mbx->ops.check_for_rst(hw, 0) || !mbx->timeout)
3978                 mac->get_link_status = true;
3979
3980         if (!mac->get_link_status)
3981                 goto out;
3982
3983         /* if link status is down no point in checking to see if pf is up */
3984         links_reg = IXGBE_READ_REG(hw, IXGBE_VFLINKS);
3985         if (!(links_reg & IXGBE_LINKS_UP))
3986                 goto out;
3987
3988         /* for SFP+ modules and DA cables on 82599 it can take up to 500usecs
3989          * before the link status is correct
3990          */
3991         if (mac->type == ixgbe_mac_82599_vf && wait_to_complete) {
3992                 int i;
3993
3994                 for (i = 0; i < 5; i++) {
3995                         rte_delay_us(100);
3996                         links_reg = IXGBE_READ_REG(hw, IXGBE_VFLINKS);
3997
3998                         if (!(links_reg & IXGBE_LINKS_UP))
3999                                 goto out;
4000                 }
4001         }
4002
4003         switch (links_reg & IXGBE_LINKS_SPEED_82599) {
4004         case IXGBE_LINKS_SPEED_10G_82599:
4005                 *speed = IXGBE_LINK_SPEED_10GB_FULL;
4006                 if (hw->mac.type >= ixgbe_mac_X550) {
4007                         if (links_reg & IXGBE_LINKS_SPEED_NON_STD)
4008                                 *speed = IXGBE_LINK_SPEED_2_5GB_FULL;
4009                 }
4010                 break;
4011         case IXGBE_LINKS_SPEED_1G_82599:
4012                 *speed = IXGBE_LINK_SPEED_1GB_FULL;
4013                 break;
4014         case IXGBE_LINKS_SPEED_100_82599:
4015                 *speed = IXGBE_LINK_SPEED_100_FULL;
4016                 if (hw->mac.type == ixgbe_mac_X550) {
4017                         if (links_reg & IXGBE_LINKS_SPEED_NON_STD)
4018                                 *speed = IXGBE_LINK_SPEED_5GB_FULL;
4019                 }
4020                 break;
4021         case IXGBE_LINKS_SPEED_10_X550EM_A:
4022                 *speed = IXGBE_LINK_SPEED_UNKNOWN;
4023                 /* Since Reserved in older MAC's */
4024                 if (hw->mac.type >= ixgbe_mac_X550)
4025                         *speed = IXGBE_LINK_SPEED_10_FULL;
4026                 break;
4027         default:
4028                 *speed = IXGBE_LINK_SPEED_UNKNOWN;
4029         }
4030
4031         if (wait_to_complete == 0 && adapter->pflink_fullchk == 0) {
4032                 if (*speed == IXGBE_LINK_SPEED_UNKNOWN)
4033                         mac->get_link_status = true;
4034                 else
4035                         mac->get_link_status = false;
4036
4037                 goto out;
4038         }
4039
4040         /* if the read failed it could just be a mailbox collision, best wait
4041          * until we are called again and don't report an error
4042          */
4043         if (mbx->ops.read(hw, &in_msg, 1, 0))
4044                 goto out;
4045
4046         if (!(in_msg & IXGBE_VT_MSGTYPE_CTS)) {
4047                 /* msg is not CTS and is NACK we must have lost CTS status */
4048                 if (in_msg & IXGBE_VT_MSGTYPE_NACK)
4049                         mac->get_link_status = false;
4050                 goto out;
4051         }
4052
4053         /* the pf is talking, if we timed out in the past we reinit */
4054         if (!mbx->timeout) {
4055                 ret_val = -1;
4056                 goto out;
4057         }
4058
4059         /* if we passed all the tests above then the link is up and we no
4060          * longer need to check for link
4061          */
4062         mac->get_link_status = false;
4063
4064 out:
4065         *link_up = !mac->get_link_status;
4066         return ret_val;
4067 }
4068
4069 static void
4070 ixgbe_dev_setup_link_alarm_handler(void *param)
4071 {
4072         struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
4073         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4074         struct ixgbe_interrupt *intr =
4075                 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
4076         u32 speed;
4077         bool autoneg = false;
4078
4079         speed = hw->phy.autoneg_advertised;
4080         if (!speed)
4081                 ixgbe_get_link_capabilities(hw, &speed, &autoneg);
4082
4083         ixgbe_setup_link(hw, speed, true);
4084
4085         intr->flags &= ~IXGBE_FLAG_NEED_LINK_CONFIG;
4086 }
4087
4088 /* return 0 means link status changed, -1 means not changed */
4089 int
4090 ixgbe_dev_link_update_share(struct rte_eth_dev *dev,
4091                             int wait_to_complete, int vf)
4092 {
4093         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4094         struct rte_eth_link link;
4095         ixgbe_link_speed link_speed = IXGBE_LINK_SPEED_UNKNOWN;
4096         struct ixgbe_interrupt *intr =
4097                 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
4098         int link_up;
4099         int diag;
4100         int wait = 1;
4101
4102         memset(&link, 0, sizeof(link));
4103         link.link_status = ETH_LINK_DOWN;
4104         link.link_speed = ETH_SPEED_NUM_NONE;
4105         link.link_duplex = ETH_LINK_HALF_DUPLEX;
4106         link.link_autoneg = ETH_LINK_AUTONEG;
4107
4108         hw->mac.get_link_status = true;
4109
4110         if (intr->flags & IXGBE_FLAG_NEED_LINK_CONFIG)
4111                 return rte_eth_linkstatus_set(dev, &link);
4112
4113         /* check if it needs to wait to complete, if lsc interrupt is enabled */
4114         if (wait_to_complete == 0 || dev->data->dev_conf.intr_conf.lsc != 0)
4115                 wait = 0;
4116
4117         if (vf)
4118                 diag = ixgbevf_check_link(hw, &link_speed, &link_up, wait);
4119         else
4120                 diag = ixgbe_check_link(hw, &link_speed, &link_up, wait);
4121
4122         if (diag != 0) {
4123                 link.link_speed = ETH_SPEED_NUM_100M;
4124                 link.link_duplex = ETH_LINK_FULL_DUPLEX;
4125                 return rte_eth_linkstatus_set(dev, &link);
4126         }
4127
4128         if (link_up == 0) {
4129                 if (ixgbe_get_media_type(hw) == ixgbe_media_type_fiber) {
4130                         intr->flags |= IXGBE_FLAG_NEED_LINK_CONFIG;
4131                         rte_eal_alarm_set(10,
4132                                 ixgbe_dev_setup_link_alarm_handler, dev);
4133                 }
4134                 return rte_eth_linkstatus_set(dev, &link);
4135         }
4136
4137         link.link_status = ETH_LINK_UP;
4138         link.link_duplex = ETH_LINK_FULL_DUPLEX;
4139
4140         switch (link_speed) {
4141         default:
4142         case IXGBE_LINK_SPEED_UNKNOWN:
4143                 if (hw->device_id == IXGBE_DEV_ID_X550EM_A_1G_T ||
4144                         hw->device_id == IXGBE_DEV_ID_X550EM_A_1G_T_L)
4145                         link.link_speed = ETH_SPEED_NUM_10M;
4146                 else
4147                         link.link_speed = ETH_SPEED_NUM_100M;
4148                 break;
4149
4150         case IXGBE_LINK_SPEED_100_FULL:
4151                 link.link_speed = ETH_SPEED_NUM_100M;
4152                 break;
4153
4154         case IXGBE_LINK_SPEED_1GB_FULL:
4155                 link.link_speed = ETH_SPEED_NUM_1G;
4156                 break;
4157
4158         case IXGBE_LINK_SPEED_2_5GB_FULL:
4159                 link.link_speed = ETH_SPEED_NUM_2_5G;
4160                 break;
4161
4162         case IXGBE_LINK_SPEED_5GB_FULL:
4163                 link.link_speed = ETH_SPEED_NUM_5G;
4164                 break;
4165
4166         case IXGBE_LINK_SPEED_10GB_FULL:
4167                 link.link_speed = ETH_SPEED_NUM_10G;
4168                 break;
4169         }
4170
4171         return rte_eth_linkstatus_set(dev, &link);
4172 }
4173
4174 static int
4175 ixgbe_dev_link_update(struct rte_eth_dev *dev, int wait_to_complete)
4176 {
4177         return ixgbe_dev_link_update_share(dev, wait_to_complete, 0);
4178 }
4179
4180 static int
4181 ixgbevf_dev_link_update(struct rte_eth_dev *dev, int wait_to_complete)
4182 {
4183         return ixgbe_dev_link_update_share(dev, wait_to_complete, 1);
4184 }
4185
4186 static int
4187 ixgbe_dev_promiscuous_enable(struct rte_eth_dev *dev)
4188 {
4189         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4190         uint32_t fctrl;
4191
4192         fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
4193         fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
4194         IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
4195
4196         return 0;
4197 }
4198
4199 static int
4200 ixgbe_dev_promiscuous_disable(struct rte_eth_dev *dev)
4201 {
4202         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4203         uint32_t fctrl;
4204
4205         fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
4206         fctrl &= (~IXGBE_FCTRL_UPE);
4207         if (dev->data->all_multicast == 1)
4208                 fctrl |= IXGBE_FCTRL_MPE;
4209         else
4210                 fctrl &= (~IXGBE_FCTRL_MPE);
4211         IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
4212
4213         return 0;
4214 }
4215
4216 static int
4217 ixgbe_dev_allmulticast_enable(struct rte_eth_dev *dev)
4218 {
4219         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4220         uint32_t fctrl;
4221
4222         fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
4223         fctrl |= IXGBE_FCTRL_MPE;
4224         IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
4225
4226         return 0;
4227 }
4228
4229 static int
4230 ixgbe_dev_allmulticast_disable(struct rte_eth_dev *dev)
4231 {
4232         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4233         uint32_t fctrl;
4234
4235         if (dev->data->promiscuous == 1)
4236                 return 0; /* must remain in all_multicast mode */
4237
4238         fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
4239         fctrl &= (~IXGBE_FCTRL_MPE);
4240         IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
4241
4242         return 0;
4243 }
4244
4245 /**
4246  * It clears the interrupt causes and enables the interrupt.
4247  * It will be called once only during nic initialized.
4248  *
4249  * @param dev
4250  *  Pointer to struct rte_eth_dev.
4251  * @param on
4252  *  Enable or Disable.
4253  *
4254  * @return
4255  *  - On success, zero.
4256  *  - On failure, a negative value.
4257  */
4258 static int
4259 ixgbe_dev_lsc_interrupt_setup(struct rte_eth_dev *dev, uint8_t on)
4260 {
4261         struct ixgbe_interrupt *intr =
4262                 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
4263
4264         ixgbe_dev_link_status_print(dev);
4265         if (on)
4266                 intr->mask |= IXGBE_EICR_LSC;
4267         else
4268                 intr->mask &= ~IXGBE_EICR_LSC;
4269
4270         return 0;
4271 }
4272
4273 /**
4274  * It clears the interrupt causes and enables the interrupt.
4275  * It will be called once only during nic initialized.
4276  *
4277  * @param dev
4278  *  Pointer to struct rte_eth_dev.
4279  *
4280  * @return
4281  *  - On success, zero.
4282  *  - On failure, a negative value.
4283  */
4284 static int
4285 ixgbe_dev_rxq_interrupt_setup(struct rte_eth_dev *dev)
4286 {
4287         struct ixgbe_interrupt *intr =
4288                 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
4289
4290         intr->mask |= IXGBE_EICR_RTX_QUEUE;
4291
4292         return 0;
4293 }
4294
4295 /**
4296  * It clears the interrupt causes and enables the interrupt.
4297  * It will be called once only during nic initialized.
4298  *
4299  * @param dev
4300  *  Pointer to struct rte_eth_dev.
4301  *
4302  * @return
4303  *  - On success, zero.
4304  *  - On failure, a negative value.
4305  */
4306 static int
4307 ixgbe_dev_macsec_interrupt_setup(struct rte_eth_dev *dev)
4308 {
4309         struct ixgbe_interrupt *intr =
4310                 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
4311
4312         intr->mask |= IXGBE_EICR_LINKSEC;
4313
4314         return 0;
4315 }
4316
4317 /*
4318  * It reads ICR and sets flag (IXGBE_EICR_LSC) for the link_update.
4319  *
4320  * @param dev
4321  *  Pointer to struct rte_eth_dev.
4322  *
4323  * @return
4324  *  - On success, zero.
4325  *  - On failure, a negative value.
4326  */
4327 static int
4328 ixgbe_dev_interrupt_get_status(struct rte_eth_dev *dev)
4329 {
4330         uint32_t eicr;
4331         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4332         struct ixgbe_interrupt *intr =
4333                 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
4334
4335         /* clear all cause mask */
4336         ixgbe_disable_intr(hw);
4337
4338         /* read-on-clear nic registers here */
4339         eicr = IXGBE_READ_REG(hw, IXGBE_EICR);
4340         PMD_DRV_LOG(DEBUG, "eicr %x", eicr);
4341
4342         intr->flags = 0;
4343
4344         /* set flag for async link update */
4345         if (eicr & IXGBE_EICR_LSC)
4346                 intr->flags |= IXGBE_FLAG_NEED_LINK_UPDATE;
4347
4348         if (eicr & IXGBE_EICR_MAILBOX)
4349                 intr->flags |= IXGBE_FLAG_MAILBOX;
4350
4351         if (eicr & IXGBE_EICR_LINKSEC)
4352                 intr->flags |= IXGBE_FLAG_MACSEC;
4353
4354         if (hw->mac.type ==  ixgbe_mac_X550EM_x &&
4355             hw->phy.type == ixgbe_phy_x550em_ext_t &&
4356             (eicr & IXGBE_EICR_GPI_SDP0_X550EM_x))
4357                 intr->flags |= IXGBE_FLAG_PHY_INTERRUPT;
4358
4359         return 0;
4360 }
4361
4362 /**
4363  * It gets and then prints the link status.
4364  *
4365  * @param dev
4366  *  Pointer to struct rte_eth_dev.
4367  *
4368  * @return
4369  *  - On success, zero.
4370  *  - On failure, a negative value.
4371  */
4372 static void
4373 ixgbe_dev_link_status_print(struct rte_eth_dev *dev)
4374 {
4375         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
4376         struct rte_eth_link link;
4377
4378         rte_eth_linkstatus_get(dev, &link);
4379
4380         if (link.link_status) {
4381                 PMD_INIT_LOG(INFO, "Port %d: Link Up - speed %u Mbps - %s",
4382                                         (int)(dev->data->port_id),
4383                                         (unsigned)link.link_speed,
4384                         link.link_duplex == ETH_LINK_FULL_DUPLEX ?
4385                                         "full-duplex" : "half-duplex");
4386         } else {
4387                 PMD_INIT_LOG(INFO, " Port %d: Link Down",
4388                                 (int)(dev->data->port_id));
4389         }
4390         PMD_INIT_LOG(DEBUG, "PCI Address: " PCI_PRI_FMT,
4391                                 pci_dev->addr.domain,
4392                                 pci_dev->addr.bus,
4393                                 pci_dev->addr.devid,
4394                                 pci_dev->addr.function);
4395 }
4396
4397 /*
4398  * It executes link_update after knowing an interrupt occurred.
4399  *
4400  * @param dev
4401  *  Pointer to struct rte_eth_dev.
4402  *
4403  * @return
4404  *  - On success, zero.
4405  *  - On failure, a negative value.
4406  */
4407 static int
4408 ixgbe_dev_interrupt_action(struct rte_eth_dev *dev)
4409 {
4410         struct ixgbe_interrupt *intr =
4411                 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
4412         int64_t timeout;
4413         struct ixgbe_hw *hw =
4414                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4415
4416         PMD_DRV_LOG(DEBUG, "intr action type %d", intr->flags);
4417
4418         if (intr->flags & IXGBE_FLAG_MAILBOX) {
4419                 ixgbe_pf_mbx_process(dev);
4420                 intr->flags &= ~IXGBE_FLAG_MAILBOX;
4421         }
4422
4423         if (intr->flags & IXGBE_FLAG_PHY_INTERRUPT) {
4424                 ixgbe_handle_lasi(hw);
4425                 intr->flags &= ~IXGBE_FLAG_PHY_INTERRUPT;
4426         }
4427
4428         if (intr->flags & IXGBE_FLAG_NEED_LINK_UPDATE) {
4429                 struct rte_eth_link link;
4430
4431                 /* get the link status before link update, for predicting later */
4432                 rte_eth_linkstatus_get(dev, &link);
4433
4434                 ixgbe_dev_link_update(dev, 0);
4435
4436                 /* likely to up */
4437                 if (!link.link_status)
4438                         /* handle it 1 sec later, wait it being stable */
4439                         timeout = IXGBE_LINK_UP_CHECK_TIMEOUT;
4440                 /* likely to down */
4441                 else
4442                         /* handle it 4 sec later, wait it being stable */
4443                         timeout = IXGBE_LINK_DOWN_CHECK_TIMEOUT;
4444
4445                 ixgbe_dev_link_status_print(dev);
4446                 if (rte_eal_alarm_set(timeout * 1000,
4447                                       ixgbe_dev_interrupt_delayed_handler, (void *)dev) < 0)
4448                         PMD_DRV_LOG(ERR, "Error setting alarm");
4449                 else {
4450                         /* remember original mask */
4451                         intr->mask_original = intr->mask;
4452                         /* only disable lsc interrupt */
4453                         intr->mask &= ~IXGBE_EIMS_LSC;
4454                 }
4455         }
4456
4457         PMD_DRV_LOG(DEBUG, "enable intr immediately");
4458         ixgbe_enable_intr(dev);
4459
4460         return 0;
4461 }
4462
4463 /**
4464  * Interrupt handler which shall be registered for alarm callback for delayed
4465  * handling specific interrupt to wait for the stable nic state. As the
4466  * NIC interrupt state is not stable for ixgbe after link is just down,
4467  * it needs to wait 4 seconds to get the stable status.
4468  *
4469  * @param handle
4470  *  Pointer to interrupt handle.
4471  * @param param
4472  *  The address of parameter (struct rte_eth_dev *) regsitered before.
4473  *
4474  * @return
4475  *  void
4476  */
4477 static void
4478 ixgbe_dev_interrupt_delayed_handler(void *param)
4479 {
4480         struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
4481         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
4482         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
4483         struct ixgbe_interrupt *intr =
4484                 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
4485         struct ixgbe_hw *hw =
4486                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4487         uint32_t eicr;
4488
4489         ixgbe_disable_intr(hw);
4490
4491         eicr = IXGBE_READ_REG(hw, IXGBE_EICR);
4492         if (eicr & IXGBE_EICR_MAILBOX)
4493                 ixgbe_pf_mbx_process(dev);
4494
4495         if (intr->flags & IXGBE_FLAG_PHY_INTERRUPT) {
4496                 ixgbe_handle_lasi(hw);
4497                 intr->flags &= ~IXGBE_FLAG_PHY_INTERRUPT;
4498         }
4499
4500         if (intr->flags & IXGBE_FLAG_NEED_LINK_UPDATE) {
4501                 ixgbe_dev_link_update(dev, 0);
4502                 intr->flags &= ~IXGBE_FLAG_NEED_LINK_UPDATE;
4503                 ixgbe_dev_link_status_print(dev);
4504                 _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC,
4505                                               NULL);
4506         }
4507
4508         if (intr->flags & IXGBE_FLAG_MACSEC) {
4509                 _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_MACSEC,
4510                                               NULL);
4511                 intr->flags &= ~IXGBE_FLAG_MACSEC;
4512         }
4513
4514         /* restore original mask */
4515         intr->mask = intr->mask_original;
4516         intr->mask_original = 0;
4517
4518         PMD_DRV_LOG(DEBUG, "enable intr in delayed handler S[%08x]", eicr);
4519         ixgbe_enable_intr(dev);
4520         rte_intr_ack(intr_handle);
4521 }
4522
4523 /**
4524  * Interrupt handler triggered by NIC  for handling
4525  * specific interrupt.
4526  *
4527  * @param handle
4528  *  Pointer to interrupt handle.
4529  * @param param
4530  *  The address of parameter (struct rte_eth_dev *) regsitered before.
4531  *
4532  * @return
4533  *  void
4534  */
4535 static void
4536 ixgbe_dev_interrupt_handler(void *param)
4537 {
4538         struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
4539
4540         ixgbe_dev_interrupt_get_status(dev);
4541         ixgbe_dev_interrupt_action(dev);
4542 }
4543
4544 static int
4545 ixgbe_dev_led_on(struct rte_eth_dev *dev)
4546 {
4547         struct ixgbe_hw *hw;
4548
4549         hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4550         return ixgbe_led_on(hw, 0) == IXGBE_SUCCESS ? 0 : -ENOTSUP;
4551 }
4552
4553 static int
4554 ixgbe_dev_led_off(struct rte_eth_dev *dev)
4555 {
4556         struct ixgbe_hw *hw;
4557
4558         hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4559         return ixgbe_led_off(hw, 0) == IXGBE_SUCCESS ? 0 : -ENOTSUP;
4560 }
4561
4562 static int
4563 ixgbe_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
4564 {
4565         struct ixgbe_hw *hw;
4566         uint32_t mflcn_reg;
4567         uint32_t fccfg_reg;
4568         int rx_pause;
4569         int tx_pause;
4570
4571         hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4572
4573         fc_conf->pause_time = hw->fc.pause_time;
4574         fc_conf->high_water = hw->fc.high_water[0];
4575         fc_conf->low_water = hw->fc.low_water[0];
4576         fc_conf->send_xon = hw->fc.send_xon;
4577         fc_conf->autoneg = !hw->fc.disable_fc_autoneg;
4578
4579         /*
4580          * Return rx_pause status according to actual setting of
4581          * MFLCN register.
4582          */
4583         mflcn_reg = IXGBE_READ_REG(hw, IXGBE_MFLCN);
4584         if (mflcn_reg & (IXGBE_MFLCN_RPFCE | IXGBE_MFLCN_RFCE))
4585                 rx_pause = 1;
4586         else
4587                 rx_pause = 0;
4588
4589         /*
4590          * Return tx_pause status according to actual setting of
4591          * FCCFG register.
4592          */
4593         fccfg_reg = IXGBE_READ_REG(hw, IXGBE_FCCFG);
4594         if (fccfg_reg & (IXGBE_FCCFG_TFCE_802_3X | IXGBE_FCCFG_TFCE_PRIORITY))
4595                 tx_pause = 1;
4596         else
4597                 tx_pause = 0;
4598
4599         if (rx_pause && tx_pause)
4600                 fc_conf->mode = RTE_FC_FULL;
4601         else if (rx_pause)
4602                 fc_conf->mode = RTE_FC_RX_PAUSE;
4603         else if (tx_pause)
4604                 fc_conf->mode = RTE_FC_TX_PAUSE;
4605         else
4606                 fc_conf->mode = RTE_FC_NONE;
4607
4608         return 0;
4609 }
4610
4611 static int
4612 ixgbe_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
4613 {
4614         struct ixgbe_hw *hw;
4615         int err;
4616         uint32_t rx_buf_size;
4617         uint32_t max_high_water;
4618         uint32_t mflcn;
4619         enum ixgbe_fc_mode rte_fcmode_2_ixgbe_fcmode[] = {
4620                 ixgbe_fc_none,
4621                 ixgbe_fc_rx_pause,
4622                 ixgbe_fc_tx_pause,
4623                 ixgbe_fc_full
4624         };
4625
4626         PMD_INIT_FUNC_TRACE();
4627
4628         hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4629         rx_buf_size = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(0));
4630         PMD_INIT_LOG(DEBUG, "Rx packet buffer size = 0x%x", rx_buf_size);
4631
4632         /*
4633          * At least reserve one Ethernet frame for watermark
4634          * high_water/low_water in kilo bytes for ixgbe
4635          */
4636         max_high_water = (rx_buf_size -
4637                         RTE_ETHER_MAX_LEN) >> IXGBE_RXPBSIZE_SHIFT;
4638         if ((fc_conf->high_water > max_high_water) ||
4639                 (fc_conf->high_water < fc_conf->low_water)) {
4640                 PMD_INIT_LOG(ERR, "Invalid high/low water setup value in KB");
4641                 PMD_INIT_LOG(ERR, "High_water must <= 0x%x", max_high_water);
4642                 return -EINVAL;
4643         }
4644
4645         hw->fc.requested_mode = rte_fcmode_2_ixgbe_fcmode[fc_conf->mode];
4646         hw->fc.pause_time     = fc_conf->pause_time;
4647         hw->fc.high_water[0]  = fc_conf->high_water;
4648         hw->fc.low_water[0]   = fc_conf->low_water;
4649         hw->fc.send_xon       = fc_conf->send_xon;
4650         hw->fc.disable_fc_autoneg = !fc_conf->autoneg;
4651
4652         err = ixgbe_fc_enable(hw);
4653
4654         /* Not negotiated is not an error case */
4655         if ((err == IXGBE_SUCCESS) || (err == IXGBE_ERR_FC_NOT_NEGOTIATED)) {
4656
4657                 /* check if we want to forward MAC frames - driver doesn't have native
4658                  * capability to do that, so we'll write the registers ourselves */
4659
4660                 mflcn = IXGBE_READ_REG(hw, IXGBE_MFLCN);
4661
4662                 /* set or clear MFLCN.PMCF bit depending on configuration */
4663                 if (fc_conf->mac_ctrl_frame_fwd != 0)
4664                         mflcn |= IXGBE_MFLCN_PMCF;
4665                 else
4666                         mflcn &= ~IXGBE_MFLCN_PMCF;
4667
4668                 IXGBE_WRITE_REG(hw, IXGBE_MFLCN, mflcn);
4669                 IXGBE_WRITE_FLUSH(hw);
4670
4671                 return 0;
4672         }
4673
4674         PMD_INIT_LOG(ERR, "ixgbe_fc_enable = 0x%x", err);
4675         return -EIO;
4676 }
4677
4678 /**
4679  *  ixgbe_pfc_enable_generic - Enable flow control
4680  *  @hw: pointer to hardware structure
4681  *  @tc_num: traffic class number
4682  *  Enable flow control according to the current settings.
4683  */
4684 static int
4685 ixgbe_dcb_pfc_enable_generic(struct ixgbe_hw *hw, uint8_t tc_num)
4686 {
4687         int ret_val = 0;
4688         uint32_t mflcn_reg, fccfg_reg;
4689         uint32_t reg;
4690         uint32_t fcrtl, fcrth;
4691         uint8_t i;
4692         uint8_t nb_rx_en;
4693
4694         /* Validate the water mark configuration */
4695         if (!hw->fc.pause_time) {
4696                 ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
4697                 goto out;
4698         }
4699
4700         /* Low water mark of zero causes XOFF floods */
4701         if (hw->fc.current_mode & ixgbe_fc_tx_pause) {
4702                  /* High/Low water can not be 0 */
4703                 if ((!hw->fc.high_water[tc_num]) || (!hw->fc.low_water[tc_num])) {
4704                         PMD_INIT_LOG(ERR, "Invalid water mark configuration");
4705                         ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
4706                         goto out;
4707                 }
4708
4709                 if (hw->fc.low_water[tc_num] >= hw->fc.high_water[tc_num]) {
4710                         PMD_INIT_LOG(ERR, "Invalid water mark configuration");
4711                         ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
4712                         goto out;
4713                 }
4714         }
4715         /* Negotiate the fc mode to use */
4716         ixgbe_fc_autoneg(hw);
4717
4718         /* Disable any previous flow control settings */
4719         mflcn_reg = IXGBE_READ_REG(hw, IXGBE_MFLCN);
4720         mflcn_reg &= ~(IXGBE_MFLCN_RPFCE_SHIFT | IXGBE_MFLCN_RFCE|IXGBE_MFLCN_RPFCE);
4721
4722         fccfg_reg = IXGBE_READ_REG(hw, IXGBE_FCCFG);
4723         fccfg_reg &= ~(IXGBE_FCCFG_TFCE_802_3X | IXGBE_FCCFG_TFCE_PRIORITY);
4724
4725         switch (hw->fc.current_mode) {
4726         case ixgbe_fc_none:
4727                 /*
4728                  * If the count of enabled RX Priority Flow control >1,
4729                  * and the TX pause can not be disabled
4730                  */
4731                 nb_rx_en = 0;
4732                 for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
4733                         reg = IXGBE_READ_REG(hw, IXGBE_FCRTH_82599(i));
4734                         if (reg & IXGBE_FCRTH_FCEN)
4735                                 nb_rx_en++;
4736                 }
4737                 if (nb_rx_en > 1)
4738                         fccfg_reg |= IXGBE_FCCFG_TFCE_PRIORITY;
4739                 break;
4740         case ixgbe_fc_rx_pause:
4741                 /*
4742                  * Rx Flow control is enabled and Tx Flow control is
4743                  * disabled by software override. Since there really
4744                  * isn't a way to advertise that we are capable of RX
4745                  * Pause ONLY, we will advertise that we support both
4746                  * symmetric and asymmetric Rx PAUSE.  Later, we will
4747                  * disable the adapter's ability to send PAUSE frames.
4748                  */
4749                 mflcn_reg |= IXGBE_MFLCN_RPFCE;
4750                 /*
4751                  * If the count of enabled RX Priority Flow control >1,
4752                  * and the TX pause can not be disabled
4753                  */
4754                 nb_rx_en = 0;
4755                 for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
4756                         reg = IXGBE_READ_REG(hw, IXGBE_FCRTH_82599(i));
4757                         if (reg & IXGBE_FCRTH_FCEN)
4758                                 nb_rx_en++;
4759                 }
4760                 if (nb_rx_en > 1)
4761                         fccfg_reg |= IXGBE_FCCFG_TFCE_PRIORITY;
4762                 break;
4763         case ixgbe_fc_tx_pause:
4764                 /*
4765                  * Tx Flow control is enabled, and Rx Flow control is
4766                  * disabled by software override.
4767                  */
4768                 fccfg_reg |= IXGBE_FCCFG_TFCE_PRIORITY;
4769                 break;
4770         case ixgbe_fc_full:
4771                 /* Flow control (both Rx and Tx) is enabled by SW override. */
4772                 mflcn_reg |= IXGBE_MFLCN_RPFCE;
4773                 fccfg_reg |= IXGBE_FCCFG_TFCE_PRIORITY;
4774                 break;
4775         default:
4776                 PMD_DRV_LOG(DEBUG, "Flow control param set incorrectly");
4777                 ret_val = IXGBE_ERR_CONFIG;
4778                 goto out;
4779         }
4780
4781         /* Set 802.3x based flow control settings. */
4782         mflcn_reg |= IXGBE_MFLCN_DPF;
4783         IXGBE_WRITE_REG(hw, IXGBE_MFLCN, mflcn_reg);
4784         IXGBE_WRITE_REG(hw, IXGBE_FCCFG, fccfg_reg);
4785
4786         /* Set up and enable Rx high/low water mark thresholds, enable XON. */
4787         if ((hw->fc.current_mode & ixgbe_fc_tx_pause) &&
4788                 hw->fc.high_water[tc_num]) {
4789                 fcrtl = (hw->fc.low_water[tc_num] << 10) | IXGBE_FCRTL_XONE;
4790                 IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(tc_num), fcrtl);
4791                 fcrth = (hw->fc.high_water[tc_num] << 10) | IXGBE_FCRTH_FCEN;
4792         } else {
4793                 IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(tc_num), 0);
4794                 /*
4795                  * In order to prevent Tx hangs when the internal Tx
4796                  * switch is enabled we must set the high water mark
4797                  * to the maximum FCRTH value.  This allows the Tx
4798                  * switch to function even under heavy Rx workloads.
4799                  */
4800                 fcrth = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(tc_num)) - 32;
4801         }
4802         IXGBE_WRITE_REG(hw, IXGBE_FCRTH_82599(tc_num), fcrth);
4803
4804         /* Configure pause time (2 TCs per register) */
4805         reg = hw->fc.pause_time * 0x00010001;
4806         for (i = 0; i < (IXGBE_DCB_MAX_TRAFFIC_CLASS / 2); i++)
4807                 IXGBE_WRITE_REG(hw, IXGBE_FCTTV(i), reg);
4808
4809         /* Configure flow control refresh threshold value */
4810         IXGBE_WRITE_REG(hw, IXGBE_FCRTV, hw->fc.pause_time / 2);
4811
4812 out:
4813         return ret_val;
4814 }
4815
4816 static int
4817 ixgbe_dcb_pfc_enable(struct rte_eth_dev *dev, uint8_t tc_num)
4818 {
4819         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4820         int32_t ret_val = IXGBE_NOT_IMPLEMENTED;
4821
4822         if (hw->mac.type != ixgbe_mac_82598EB) {
4823                 ret_val = ixgbe_dcb_pfc_enable_generic(hw, tc_num);
4824         }
4825         return ret_val;
4826 }
4827
4828 static int
4829 ixgbe_priority_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_pfc_conf *pfc_conf)
4830 {
4831         int err;
4832         uint32_t rx_buf_size;
4833         uint32_t max_high_water;
4834         uint8_t tc_num;
4835         uint8_t  map[IXGBE_DCB_MAX_USER_PRIORITY] = { 0 };
4836         struct ixgbe_hw *hw =
4837                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4838         struct ixgbe_dcb_config *dcb_config =
4839                 IXGBE_DEV_PRIVATE_TO_DCB_CFG(dev->data->dev_private);
4840
4841         enum ixgbe_fc_mode rte_fcmode_2_ixgbe_fcmode[] = {
4842                 ixgbe_fc_none,
4843                 ixgbe_fc_rx_pause,
4844                 ixgbe_fc_tx_pause,
4845                 ixgbe_fc_full
4846         };
4847
4848         PMD_INIT_FUNC_TRACE();
4849
4850         ixgbe_dcb_unpack_map_cee(dcb_config, IXGBE_DCB_RX_CONFIG, map);
4851         tc_num = map[pfc_conf->priority];
4852         rx_buf_size = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(tc_num));
4853         PMD_INIT_LOG(DEBUG, "Rx packet buffer size = 0x%x", rx_buf_size);
4854         /*
4855          * At least reserve one Ethernet frame for watermark
4856          * high_water/low_water in kilo bytes for ixgbe
4857          */
4858         max_high_water = (rx_buf_size -
4859                         RTE_ETHER_MAX_LEN) >> IXGBE_RXPBSIZE_SHIFT;
4860         if ((pfc_conf->fc.high_water > max_high_water) ||
4861             (pfc_conf->fc.high_water <= pfc_conf->fc.low_water)) {
4862                 PMD_INIT_LOG(ERR, "Invalid high/low water setup value in KB");
4863                 PMD_INIT_LOG(ERR, "High_water must <= 0x%x", max_high_water);
4864                 return -EINVAL;
4865         }
4866
4867         hw->fc.requested_mode = rte_fcmode_2_ixgbe_fcmode[pfc_conf->fc.mode];
4868         hw->fc.pause_time = pfc_conf->fc.pause_time;
4869         hw->fc.send_xon = pfc_conf->fc.send_xon;
4870         hw->fc.low_water[tc_num] =  pfc_conf->fc.low_water;
4871         hw->fc.high_water[tc_num] = pfc_conf->fc.high_water;
4872
4873         err = ixgbe_dcb_pfc_enable(dev, tc_num);
4874
4875         /* Not negotiated is not an error case */
4876         if ((err == IXGBE_SUCCESS) || (err == IXGBE_ERR_FC_NOT_NEGOTIATED))
4877                 return 0;
4878
4879         PMD_INIT_LOG(ERR, "ixgbe_dcb_pfc_enable = 0x%x", err);
4880         return -EIO;
4881 }
4882
4883 static int
4884 ixgbe_dev_rss_reta_update(struct rte_eth_dev *dev,
4885                           struct rte_eth_rss_reta_entry64 *reta_conf,
4886                           uint16_t reta_size)
4887 {
4888         uint16_t i, sp_reta_size;
4889         uint8_t j, mask;
4890         uint32_t reta, r;
4891         uint16_t idx, shift;
4892         struct ixgbe_adapter *adapter = dev->data->dev_private;
4893         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4894         uint32_t reta_reg;
4895
4896         PMD_INIT_FUNC_TRACE();
4897
4898         if (!ixgbe_rss_update_sp(hw->mac.type)) {
4899                 PMD_DRV_LOG(ERR, "RSS reta update is not supported on this "
4900                         "NIC.");
4901                 return -ENOTSUP;
4902         }
4903
4904         sp_reta_size = ixgbe_reta_size_get(hw->mac.type);
4905         if (reta_size != sp_reta_size) {
4906                 PMD_DRV_LOG(ERR, "The size of hash lookup table configured "
4907                         "(%d) doesn't match the number hardware can supported "
4908                         "(%d)", reta_size, sp_reta_size);
4909                 return -EINVAL;
4910         }
4911
4912         for (i = 0; i < reta_size; i += IXGBE_4_BIT_WIDTH) {
4913                 idx = i / RTE_RETA_GROUP_SIZE;
4914                 shift = i % RTE_RETA_GROUP_SIZE;
4915                 mask = (uint8_t)((reta_conf[idx].mask >> shift) &
4916                                                 IXGBE_4_BIT_MASK);
4917                 if (!mask)
4918                         continue;
4919                 reta_reg = ixgbe_reta_reg_get(hw->mac.type, i);
4920                 if (mask == IXGBE_4_BIT_MASK)
4921                         r = 0;
4922                 else
4923                         r = IXGBE_READ_REG(hw, reta_reg);
4924                 for (j = 0, reta = 0; j < IXGBE_4_BIT_WIDTH; j++) {
4925                         if (mask & (0x1 << j))
4926                                 reta |= reta_conf[idx].reta[shift + j] <<
4927                                                         (CHAR_BIT * j);
4928                         else
4929                                 reta |= r & (IXGBE_8_BIT_MASK <<
4930                                                 (CHAR_BIT * j));
4931                 }
4932                 IXGBE_WRITE_REG(hw, reta_reg, reta);
4933         }
4934         adapter->rss_reta_updated = 1;
4935
4936         return 0;
4937 }
4938
4939 static int
4940 ixgbe_dev_rss_reta_query(struct rte_eth_dev *dev,
4941                          struct rte_eth_rss_reta_entry64 *reta_conf,
4942                          uint16_t reta_size)
4943 {
4944         uint16_t i, sp_reta_size;
4945         uint8_t j, mask;
4946         uint32_t reta;
4947         uint16_t idx, shift;
4948         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4949         uint32_t reta_reg;
4950
4951         PMD_INIT_FUNC_TRACE();
4952         sp_reta_size = ixgbe_reta_size_get(hw->mac.type);
4953         if (reta_size != sp_reta_size) {
4954                 PMD_DRV_LOG(ERR, "The size of hash lookup table configured "
4955                         "(%d) doesn't match the number hardware can supported "
4956                         "(%d)", reta_size, sp_reta_size);
4957                 return -EINVAL;
4958         }
4959
4960         for (i = 0; i < reta_size; i += IXGBE_4_BIT_WIDTH) {
4961                 idx = i / RTE_RETA_GROUP_SIZE;
4962                 shift = i % RTE_RETA_GROUP_SIZE;
4963                 mask = (uint8_t)((reta_conf[idx].mask >> shift) &
4964                                                 IXGBE_4_BIT_MASK);
4965                 if (!mask)
4966                         continue;
4967
4968                 reta_reg = ixgbe_reta_reg_get(hw->mac.type, i);
4969                 reta = IXGBE_READ_REG(hw, reta_reg);
4970                 for (j = 0; j < IXGBE_4_BIT_WIDTH; j++) {
4971                         if (mask & (0x1 << j))
4972                                 reta_conf[idx].reta[shift + j] =
4973                                         ((reta >> (CHAR_BIT * j)) &
4974                                                 IXGBE_8_BIT_MASK);
4975                 }
4976         }
4977
4978         return 0;
4979 }
4980
4981 static int
4982 ixgbe_add_rar(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr,
4983                                 uint32_t index, uint32_t pool)
4984 {
4985         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4986         uint32_t enable_addr = 1;
4987
4988         return ixgbe_set_rar(hw, index, mac_addr->addr_bytes,
4989                              pool, enable_addr);
4990 }
4991
4992 static void
4993 ixgbe_remove_rar(struct rte_eth_dev *dev, uint32_t index)
4994 {
4995         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4996
4997         ixgbe_clear_rar(hw, index);
4998 }
4999
5000 static int
5001 ixgbe_set_default_mac_addr(struct rte_eth_dev *dev, struct rte_ether_addr *addr)
5002 {
5003         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
5004
5005         ixgbe_remove_rar(dev, 0);
5006         ixgbe_add_rar(dev, addr, 0, pci_dev->max_vfs);
5007
5008         return 0;
5009 }
5010
5011 static bool
5012 is_device_supported(struct rte_eth_dev *dev, struct rte_pci_driver *drv)
5013 {
5014         if (strcmp(dev->device->driver->name, drv->driver.name))
5015                 return false;
5016
5017         return true;
5018 }
5019
5020 bool
5021 is_ixgbe_supported(struct rte_eth_dev *dev)
5022 {
5023         return is_device_supported(dev, &rte_ixgbe_pmd);
5024 }
5025
5026 static int
5027 ixgbe_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
5028 {
5029         uint32_t hlreg0;
5030         uint32_t maxfrs;
5031         struct ixgbe_hw *hw;
5032         struct rte_eth_dev_info dev_info;
5033         uint32_t frame_size = mtu + IXGBE_ETH_OVERHEAD;
5034         struct rte_eth_dev_data *dev_data = dev->data;
5035         int ret;
5036
5037         ret = ixgbe_dev_info_get(dev, &dev_info);
5038         if (ret != 0)
5039                 return ret;
5040
5041         /* check that mtu is within the allowed range */
5042         if (mtu < RTE_ETHER_MIN_MTU || frame_size > dev_info.max_rx_pktlen)
5043                 return -EINVAL;
5044
5045         /* If device is started, refuse mtu that requires the support of
5046          * scattered packets when this feature has not been enabled before.
5047          */
5048         if (dev_data->dev_started && !dev_data->scattered_rx &&
5049             (frame_size + 2 * IXGBE_VLAN_TAG_SIZE >
5050              dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM)) {
5051                 PMD_INIT_LOG(ERR, "Stop port first.");
5052                 return -EINVAL;
5053         }
5054
5055         hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5056         hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0);
5057
5058         /* switch to jumbo mode if needed */
5059         if (frame_size > RTE_ETHER_MAX_LEN) {
5060                 dev->data->dev_conf.rxmode.offloads |=
5061                         DEV_RX_OFFLOAD_JUMBO_FRAME;
5062                 hlreg0 |= IXGBE_HLREG0_JUMBOEN;
5063         } else {
5064                 dev->data->dev_conf.rxmode.offloads &=
5065                         ~DEV_RX_OFFLOAD_JUMBO_FRAME;
5066                 hlreg0 &= ~IXGBE_HLREG0_JUMBOEN;
5067         }
5068         IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0);
5069
5070         /* update max frame size */
5071         dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
5072
5073         maxfrs = IXGBE_READ_REG(hw, IXGBE_MAXFRS);
5074         maxfrs &= 0x0000FFFF;
5075         maxfrs |= (dev->data->dev_conf.rxmode.max_rx_pkt_len << 16);
5076         IXGBE_WRITE_REG(hw, IXGBE_MAXFRS, maxfrs);
5077
5078         return 0;
5079 }
5080
5081 /*
5082  * Virtual Function operations
5083  */
5084 static void
5085 ixgbevf_intr_disable(struct rte_eth_dev *dev)
5086 {
5087         struct ixgbe_interrupt *intr =
5088                 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
5089         struct ixgbe_hw *hw =
5090                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5091
5092         PMD_INIT_FUNC_TRACE();
5093
5094         /* Clear interrupt mask to stop from interrupts being generated */
5095         IXGBE_WRITE_REG(hw, IXGBE_VTEIMC, IXGBE_VF_IRQ_CLEAR_MASK);
5096
5097         IXGBE_WRITE_FLUSH(hw);
5098
5099         /* Clear mask value. */
5100         intr->mask = 0;
5101 }
5102
5103 static void
5104 ixgbevf_intr_enable(struct rte_eth_dev *dev)
5105 {
5106         struct ixgbe_interrupt *intr =
5107                 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
5108         struct ixgbe_hw *hw =
5109                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5110
5111         PMD_INIT_FUNC_TRACE();
5112
5113         /* VF enable interrupt autoclean */
5114         IXGBE_WRITE_REG(hw, IXGBE_VTEIAM, IXGBE_VF_IRQ_ENABLE_MASK);
5115         IXGBE_WRITE_REG(hw, IXGBE_VTEIAC, IXGBE_VF_IRQ_ENABLE_MASK);
5116         IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, IXGBE_VF_IRQ_ENABLE_MASK);
5117
5118         IXGBE_WRITE_FLUSH(hw);
5119
5120         /* Save IXGBE_VTEIMS value to mask. */
5121         intr->mask = IXGBE_VF_IRQ_ENABLE_MASK;
5122 }
5123
5124 static int
5125 ixgbevf_dev_configure(struct rte_eth_dev *dev)
5126 {
5127         struct rte_eth_conf *conf = &dev->data->dev_conf;
5128         struct ixgbe_adapter *adapter = dev->data->dev_private;
5129
5130         PMD_INIT_LOG(DEBUG, "Configured Virtual Function port id: %d",
5131                      dev->data->port_id);
5132
5133         /*
5134          * VF has no ability to enable/disable HW CRC
5135          * Keep the persistent behavior the same as Host PF
5136          */
5137 #ifndef RTE_LIBRTE_IXGBE_PF_DISABLE_STRIP_CRC
5138         if (conf->rxmode.offloads & DEV_RX_OFFLOAD_KEEP_CRC) {
5139                 PMD_INIT_LOG(NOTICE, "VF can't disable HW CRC Strip");
5140                 conf->rxmode.offloads &= ~DEV_RX_OFFLOAD_KEEP_CRC;
5141         }
5142 #else
5143         if (!(conf->rxmode.offloads & DEV_RX_OFFLOAD_KEEP_CRC)) {
5144                 PMD_INIT_LOG(NOTICE, "VF can't enable HW CRC Strip");
5145                 conf->rxmode.offloads |= DEV_RX_OFFLOAD_KEEP_CRC;
5146         }
5147 #endif
5148
5149         /*
5150          * Initialize to TRUE. If any of Rx queues doesn't meet the bulk
5151          * allocation or vector Rx preconditions we will reset it.
5152          */
5153         adapter->rx_bulk_alloc_allowed = true;
5154         adapter->rx_vec_allowed = true;
5155
5156         return 0;
5157 }
5158
5159 static int
5160 ixgbevf_dev_start(struct rte_eth_dev *dev)
5161 {
5162         struct ixgbe_hw *hw =
5163                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5164         uint32_t intr_vector = 0;
5165         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
5166         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
5167
5168         int err, mask = 0;
5169
5170         PMD_INIT_FUNC_TRACE();
5171
5172         /* Stop the link setup handler before resetting the HW. */
5173         rte_eal_alarm_cancel(ixgbe_dev_setup_link_alarm_handler, dev);
5174
5175         err = hw->mac.ops.reset_hw(hw);
5176         if (err) {
5177                 PMD_INIT_LOG(ERR, "Unable to reset vf hardware (%d)", err);
5178                 return err;
5179         }
5180         hw->mac.get_link_status = true;
5181
5182         /* negotiate mailbox API version to use with the PF. */
5183         ixgbevf_negotiate_api(hw);
5184
5185         ixgbevf_dev_tx_init(dev);
5186
5187         /* This can fail when allocating mbufs for descriptor rings */
5188         err = ixgbevf_dev_rx_init(dev);
5189         if (err) {
5190                 PMD_INIT_LOG(ERR, "Unable to initialize RX hardware (%d)", err);
5191                 ixgbe_dev_clear_queues(dev);
5192                 return err;
5193         }
5194
5195         /* Set vfta */
5196         ixgbevf_set_vfta_all(dev, 1);
5197
5198         /* Set HW strip */
5199         mask = ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK |
5200                 ETH_VLAN_EXTEND_MASK;
5201         err = ixgbevf_vlan_offload_config(dev, mask);
5202         if (err) {
5203                 PMD_INIT_LOG(ERR, "Unable to set VLAN offload (%d)", err);
5204                 ixgbe_dev_clear_queues(dev);
5205                 return err;
5206         }
5207
5208         ixgbevf_dev_rxtx_start(dev);
5209
5210         /* check and configure queue intr-vector mapping */
5211         if (rte_intr_cap_multiple(intr_handle) &&
5212             dev->data->dev_conf.intr_conf.rxq) {
5213                 /* According to datasheet, only vector 0/1/2 can be used,
5214                  * now only one vector is used for Rx queue
5215                  */
5216                 intr_vector = 1;
5217                 if (rte_intr_efd_enable(intr_handle, intr_vector))
5218                         return -1;
5219         }
5220
5221         if (rte_intr_dp_is_en(intr_handle) && !intr_handle->intr_vec) {
5222                 intr_handle->intr_vec =
5223                         rte_zmalloc("intr_vec",
5224                                     dev->data->nb_rx_queues * sizeof(int), 0);
5225                 if (intr_handle->intr_vec == NULL) {
5226                         PMD_INIT_LOG(ERR, "Failed to allocate %d rx_queues"
5227                                      " intr_vec", dev->data->nb_rx_queues);
5228                         return -ENOMEM;
5229                 }
5230         }
5231         ixgbevf_configure_msix(dev);
5232
5233         /* When a VF port is bound to VFIO-PCI, only miscellaneous interrupt
5234          * is mapped to VFIO vector 0 in eth_ixgbevf_dev_init( ).
5235          * If previous VFIO interrupt mapping setting in eth_ixgbevf_dev_init( )
5236          * is not cleared, it will fail when following rte_intr_enable( ) tries
5237          * to map Rx queue interrupt to other VFIO vectors.
5238          * So clear uio/vfio intr/evevnfd first to avoid failure.
5239          */
5240         rte_intr_disable(intr_handle);
5241
5242         rte_intr_enable(intr_handle);
5243
5244         /* Re-enable interrupt for VF */
5245         ixgbevf_intr_enable(dev);
5246
5247         /*
5248          * Update link status right before return, because it may
5249          * start link configuration process in a separate thread.
5250          */
5251         ixgbevf_dev_link_update(dev, 0);
5252
5253         hw->adapter_stopped = false;
5254
5255         return 0;
5256 }
5257
5258 static void
5259 ixgbevf_dev_stop(struct rte_eth_dev *dev)
5260 {
5261         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5262         struct ixgbe_adapter *adapter = dev->data->dev_private;
5263         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
5264         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
5265
5266         if (hw->adapter_stopped)
5267                 return;
5268
5269         PMD_INIT_FUNC_TRACE();
5270
5271         rte_eal_alarm_cancel(ixgbe_dev_setup_link_alarm_handler, dev);
5272
5273         ixgbevf_intr_disable(dev);
5274
5275         hw->adapter_stopped = 1;
5276         ixgbe_stop_adapter(hw);
5277
5278         /*
5279           * Clear what we set, but we still keep shadow_vfta to
5280           * restore after device starts
5281           */
5282         ixgbevf_set_vfta_all(dev, 0);
5283
5284         /* Clear stored conf */
5285         dev->data->scattered_rx = 0;
5286
5287         ixgbe_dev_clear_queues(dev);
5288
5289         /* Clean datapath event and queue/vec mapping */
5290         rte_intr_efd_disable(intr_handle);
5291         if (intr_handle->intr_vec != NULL) {
5292                 rte_free(intr_handle->intr_vec);
5293                 intr_handle->intr_vec = NULL;
5294         }
5295
5296         adapter->rss_reta_updated = 0;
5297 }
5298
5299 static void
5300 ixgbevf_dev_close(struct rte_eth_dev *dev)
5301 {
5302         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5303         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
5304         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
5305
5306         PMD_INIT_FUNC_TRACE();
5307
5308         ixgbe_reset_hw(hw);
5309
5310         ixgbevf_dev_stop(dev);
5311
5312         ixgbe_dev_free_queues(dev);
5313
5314         /**
5315          * Remove the VF MAC address ro ensure
5316          * that the VF traffic goes to the PF
5317          * after stop, close and detach of the VF
5318          **/
5319         ixgbevf_remove_mac_addr(dev, 0);
5320
5321         dev->dev_ops = NULL;
5322         dev->rx_pkt_burst = NULL;
5323         dev->tx_pkt_burst = NULL;
5324
5325         rte_intr_disable(intr_handle);
5326         rte_intr_callback_unregister(intr_handle,
5327                                      ixgbevf_dev_interrupt_handler, dev);
5328 }
5329
5330 /*
5331  * Reset VF device
5332  */
5333 static int
5334 ixgbevf_dev_reset(struct rte_eth_dev *dev)
5335 {
5336         int ret;
5337
5338         ret = eth_ixgbevf_dev_uninit(dev);
5339         if (ret)
5340                 return ret;
5341
5342         ret = eth_ixgbevf_dev_init(dev);
5343
5344         return ret;
5345 }
5346
5347 static void ixgbevf_set_vfta_all(struct rte_eth_dev *dev, bool on)
5348 {
5349         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5350         struct ixgbe_vfta *shadow_vfta =
5351                 IXGBE_DEV_PRIVATE_TO_VFTA(dev->data->dev_private);
5352         int i = 0, j = 0, vfta = 0, mask = 1;
5353
5354         for (i = 0; i < IXGBE_VFTA_SIZE; i++) {
5355                 vfta = shadow_vfta->vfta[i];
5356                 if (vfta) {
5357                         mask = 1;
5358                         for (j = 0; j < 32; j++) {
5359                                 if (vfta & mask)
5360                                         ixgbe_set_vfta(hw, (i<<5)+j, 0,
5361                                                        on, false);
5362                                 mask <<= 1;
5363                         }
5364                 }
5365         }
5366
5367 }
5368
5369 static int
5370 ixgbevf_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
5371 {
5372         struct ixgbe_hw *hw =
5373                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5374         struct ixgbe_vfta *shadow_vfta =
5375                 IXGBE_DEV_PRIVATE_TO_VFTA(dev->data->dev_private);
5376         uint32_t vid_idx = 0;
5377         uint32_t vid_bit = 0;
5378         int ret = 0;
5379
5380         PMD_INIT_FUNC_TRACE();
5381
5382         /* vind is not used in VF driver, set to 0, check ixgbe_set_vfta_vf */
5383         ret = ixgbe_set_vfta(hw, vlan_id, 0, !!on, false);
5384         if (ret) {
5385                 PMD_INIT_LOG(ERR, "Unable to set VF vlan");
5386                 return ret;
5387         }
5388         vid_idx = (uint32_t) ((vlan_id >> 5) & 0x7F);
5389         vid_bit = (uint32_t) (1 << (vlan_id & 0x1F));
5390
5391         /* Save what we set and retore it after device reset */
5392         if (on)
5393                 shadow_vfta->vfta[vid_idx] |= vid_bit;
5394         else
5395                 shadow_vfta->vfta[vid_idx] &= ~vid_bit;
5396
5397         return 0;
5398 }
5399
5400 static void
5401 ixgbevf_vlan_strip_queue_set(struct rte_eth_dev *dev, uint16_t queue, int on)
5402 {
5403         struct ixgbe_hw *hw =
5404                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5405         uint32_t ctrl;
5406
5407         PMD_INIT_FUNC_TRACE();
5408
5409         if (queue >= hw->mac.max_rx_queues)
5410                 return;
5411
5412         ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(queue));
5413         if (on)
5414                 ctrl |= IXGBE_RXDCTL_VME;
5415         else
5416                 ctrl &= ~IXGBE_RXDCTL_VME;
5417         IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(queue), ctrl);
5418
5419         ixgbe_vlan_hw_strip_bitmap_set(dev, queue, on);
5420 }
5421
5422 static int
5423 ixgbevf_vlan_offload_config(struct rte_eth_dev *dev, int mask)
5424 {
5425         struct ixgbe_rx_queue *rxq;
5426         uint16_t i;
5427         int on = 0;
5428
5429         /* VF function only support hw strip feature, others are not support */
5430         if (mask & ETH_VLAN_STRIP_MASK) {
5431                 for (i = 0; i < dev->data->nb_rx_queues; i++) {
5432                         rxq = dev->data->rx_queues[i];
5433                         on = !!(rxq->offloads & DEV_RX_OFFLOAD_VLAN_STRIP);
5434                         ixgbevf_vlan_strip_queue_set(dev, i, on);
5435                 }
5436         }
5437
5438         return 0;
5439 }
5440
5441 static int
5442 ixgbevf_vlan_offload_set(struct rte_eth_dev *dev, int mask)
5443 {
5444         ixgbe_config_vlan_strip_on_all_queues(dev, mask);
5445
5446         ixgbevf_vlan_offload_config(dev, mask);
5447
5448         return 0;
5449 }
5450
5451 int
5452 ixgbe_vt_check(struct ixgbe_hw *hw)
5453 {
5454         uint32_t reg_val;
5455
5456         /* if Virtualization Technology is enabled */
5457         reg_val = IXGBE_READ_REG(hw, IXGBE_VT_CTL);
5458         if (!(reg_val & IXGBE_VT_CTL_VT_ENABLE)) {
5459                 PMD_INIT_LOG(ERR, "VT must be enabled for this setting");
5460                 return -1;
5461         }
5462
5463         return 0;
5464 }
5465
5466 static uint32_t
5467 ixgbe_uta_vector(struct ixgbe_hw *hw, struct rte_ether_addr *uc_addr)
5468 {
5469         uint32_t vector = 0;
5470
5471         switch (hw->mac.mc_filter_type) {
5472         case 0:   /* use bits [47:36] of the address */
5473                 vector = ((uc_addr->addr_bytes[4] >> 4) |
5474                         (((uint16_t)uc_addr->addr_bytes[5]) << 4));
5475                 break;
5476         case 1:   /* use bits [46:35] of the address */
5477                 vector = ((uc_addr->addr_bytes[4] >> 3) |
5478                         (((uint16_t)uc_addr->addr_bytes[5]) << 5));
5479                 break;
5480         case 2:   /* use bits [45:34] of the address */
5481                 vector = ((uc_addr->addr_bytes[4] >> 2) |
5482                         (((uint16_t)uc_addr->addr_bytes[5]) << 6));
5483                 break;
5484         case 3:   /* use bits [43:32] of the address */
5485                 vector = ((uc_addr->addr_bytes[4]) |
5486                         (((uint16_t)uc_addr->addr_bytes[5]) << 8));
5487                 break;
5488         default:  /* Invalid mc_filter_type */
5489                 break;
5490         }
5491
5492         /* vector can only be 12-bits or boundary will be exceeded */
5493         vector &= 0xFFF;
5494         return vector;
5495 }
5496
5497 static int
5498 ixgbe_uc_hash_table_set(struct rte_eth_dev *dev,
5499                         struct rte_ether_addr *mac_addr, uint8_t on)
5500 {
5501         uint32_t vector;
5502         uint32_t uta_idx;
5503         uint32_t reg_val;
5504         uint32_t uta_shift;
5505         uint32_t rc;
5506         const uint32_t ixgbe_uta_idx_mask = 0x7F;
5507         const uint32_t ixgbe_uta_bit_shift = 5;
5508         const uint32_t ixgbe_uta_bit_mask = (0x1 << ixgbe_uta_bit_shift) - 1;
5509         const uint32_t bit1 = 0x1;
5510
5511         struct ixgbe_hw *hw =
5512                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5513         struct ixgbe_uta_info *uta_info =
5514                 IXGBE_DEV_PRIVATE_TO_UTA(dev->data->dev_private);
5515
5516         /* The UTA table only exists on 82599 hardware and newer */
5517         if (hw->mac.type < ixgbe_mac_82599EB)
5518                 return -ENOTSUP;
5519
5520         vector = ixgbe_uta_vector(hw, mac_addr);
5521         uta_idx = (vector >> ixgbe_uta_bit_shift) & ixgbe_uta_idx_mask;
5522         uta_shift = vector & ixgbe_uta_bit_mask;
5523
5524         rc = ((uta_info->uta_shadow[uta_idx] >> uta_shift & bit1) != 0);
5525         if (rc == on)
5526                 return 0;
5527
5528         reg_val = IXGBE_READ_REG(hw, IXGBE_UTA(uta_idx));
5529         if (on) {
5530                 uta_info->uta_in_use++;
5531                 reg_val |= (bit1 << uta_shift);
5532                 uta_info->uta_shadow[uta_idx] |= (bit1 << uta_shift);
5533         } else {
5534                 uta_info->uta_in_use--;
5535                 reg_val &= ~(bit1 << uta_shift);
5536                 uta_info->uta_shadow[uta_idx] &= ~(bit1 << uta_shift);
5537         }
5538
5539         IXGBE_WRITE_REG(hw, IXGBE_UTA(uta_idx), reg_val);
5540
5541         if (uta_info->uta_in_use > 0)
5542                 IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL,
5543                                 IXGBE_MCSTCTRL_MFE | hw->mac.mc_filter_type);
5544         else
5545                 IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, hw->mac.mc_filter_type);
5546
5547         return 0;
5548 }
5549
5550 static int
5551 ixgbe_uc_all_hash_table_set(struct rte_eth_dev *dev, uint8_t on)
5552 {
5553         int i;
5554         struct ixgbe_hw *hw =
5555                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5556         struct ixgbe_uta_info *uta_info =
5557                 IXGBE_DEV_PRIVATE_TO_UTA(dev->data->dev_private);
5558
5559         /* The UTA table only exists on 82599 hardware and newer */
5560         if (hw->mac.type < ixgbe_mac_82599EB)
5561                 return -ENOTSUP;
5562
5563         if (on) {
5564                 for (i = 0; i < ETH_VMDQ_NUM_UC_HASH_ARRAY; i++) {
5565                         uta_info->uta_shadow[i] = ~0;
5566                         IXGBE_WRITE_REG(hw, IXGBE_UTA(i), ~0);
5567                 }
5568         } else {
5569                 for (i = 0; i < ETH_VMDQ_NUM_UC_HASH_ARRAY; i++) {
5570                         uta_info->uta_shadow[i] = 0;
5571                         IXGBE_WRITE_REG(hw, IXGBE_UTA(i), 0);
5572                 }
5573         }
5574         return 0;
5575
5576 }
5577
5578 uint32_t
5579 ixgbe_convert_vm_rx_mask_to_val(uint16_t rx_mask, uint32_t orig_val)
5580 {
5581         uint32_t new_val = orig_val;
5582
5583         if (rx_mask & ETH_VMDQ_ACCEPT_UNTAG)
5584                 new_val |= IXGBE_VMOLR_AUPE;
5585         if (rx_mask & ETH_VMDQ_ACCEPT_HASH_MC)
5586                 new_val |= IXGBE_VMOLR_ROMPE;
5587         if (rx_mask & ETH_VMDQ_ACCEPT_HASH_UC)
5588                 new_val |= IXGBE_VMOLR_ROPE;
5589         if (rx_mask & ETH_VMDQ_ACCEPT_BROADCAST)
5590                 new_val |= IXGBE_VMOLR_BAM;
5591         if (rx_mask & ETH_VMDQ_ACCEPT_MULTICAST)
5592                 new_val |= IXGBE_VMOLR_MPE;
5593
5594         return new_val;
5595 }
5596
5597 #define IXGBE_MRCTL_VPME  0x01 /* Virtual Pool Mirroring. */
5598 #define IXGBE_MRCTL_UPME  0x02 /* Uplink Port Mirroring. */
5599 #define IXGBE_MRCTL_DPME  0x04 /* Downlink Port Mirroring. */
5600 #define IXGBE_MRCTL_VLME  0x08 /* VLAN Mirroring. */
5601 #define IXGBE_INVALID_MIRROR_TYPE(mirror_type) \
5602         ((mirror_type) & ~(uint8_t)(ETH_MIRROR_VIRTUAL_POOL_UP | \
5603         ETH_MIRROR_UPLINK_PORT | ETH_MIRROR_DOWNLINK_PORT | ETH_MIRROR_VLAN))
5604
5605 static int
5606 ixgbe_mirror_rule_set(struct rte_eth_dev *dev,
5607                       struct rte_eth_mirror_conf *mirror_conf,
5608                       uint8_t rule_id, uint8_t on)
5609 {
5610         uint32_t mr_ctl, vlvf;
5611         uint32_t mp_lsb = 0;
5612         uint32_t mv_msb = 0;
5613         uint32_t mv_lsb = 0;
5614         uint32_t mp_msb = 0;
5615         uint8_t i = 0;
5616         int reg_index = 0;
5617         uint64_t vlan_mask = 0;
5618
5619         const uint8_t pool_mask_offset = 32;
5620         const uint8_t vlan_mask_offset = 32;
5621         const uint8_t dst_pool_offset = 8;
5622         const uint8_t rule_mr_offset  = 4;
5623         const uint8_t mirror_rule_mask = 0x0F;
5624
5625         struct ixgbe_mirror_info *mr_info =
5626                         (IXGBE_DEV_PRIVATE_TO_PFDATA(dev->data->dev_private));
5627         struct ixgbe_hw *hw =
5628                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5629         uint8_t mirror_type = 0;
5630
5631         if (ixgbe_vt_check(hw) < 0)
5632                 return -ENOTSUP;
5633
5634         if (rule_id >= IXGBE_MAX_MIRROR_RULES)
5635                 return -EINVAL;
5636
5637         if (IXGBE_INVALID_MIRROR_TYPE(mirror_conf->rule_type)) {
5638                 PMD_DRV_LOG(ERR, "unsupported mirror type 0x%x.",
5639                             mirror_conf->rule_type);
5640                 return -EINVAL;
5641         }
5642
5643         if (mirror_conf->rule_type & ETH_MIRROR_VLAN) {
5644                 mirror_type |= IXGBE_MRCTL_VLME;
5645                 /* Check if vlan id is valid and find conresponding VLAN ID
5646                  * index in VLVF
5647                  */
5648                 for (i = 0; i < IXGBE_VLVF_ENTRIES; i++) {
5649                         if (mirror_conf->vlan.vlan_mask & (1ULL << i)) {
5650                                 /* search vlan id related pool vlan filter
5651                                  * index
5652                                  */
5653                                 reg_index = ixgbe_find_vlvf_slot(
5654                                                 hw,
5655                                                 mirror_conf->vlan.vlan_id[i],
5656                                                 false);
5657                                 if (reg_index < 0)
5658                                         return -EINVAL;
5659                                 vlvf = IXGBE_READ_REG(hw,
5660                                                       IXGBE_VLVF(reg_index));
5661                                 if ((vlvf & IXGBE_VLVF_VIEN) &&
5662                                     ((vlvf & IXGBE_VLVF_VLANID_MASK) ==
5663                                       mirror_conf->vlan.vlan_id[i]))
5664                                         vlan_mask |= (1ULL << reg_index);
5665                                 else
5666                                         return -EINVAL;
5667                         }
5668                 }
5669
5670                 if (on) {
5671                         mv_lsb = vlan_mask & 0xFFFFFFFF;
5672                         mv_msb = vlan_mask >> vlan_mask_offset;
5673
5674                         mr_info->mr_conf[rule_id].vlan.vlan_mask =
5675                                                 mirror_conf->vlan.vlan_mask;
5676                         for (i = 0; i < ETH_VMDQ_MAX_VLAN_FILTERS; i++) {
5677                                 if (mirror_conf->vlan.vlan_mask & (1ULL << i))
5678                                         mr_info->mr_conf[rule_id].vlan.vlan_id[i] =
5679                                                 mirror_conf->vlan.vlan_id[i];
5680                         }
5681                 } else {
5682                         mv_lsb = 0;
5683                         mv_msb = 0;
5684                         mr_info->mr_conf[rule_id].vlan.vlan_mask = 0;
5685                         for (i = 0; i < ETH_VMDQ_MAX_VLAN_FILTERS; i++)
5686                                 mr_info->mr_conf[rule_id].vlan.vlan_id[i] = 0;
5687                 }
5688         }
5689
5690         /**
5691          * if enable pool mirror, write related pool mask register,if disable
5692          * pool mirror, clear PFMRVM register
5693          */
5694         if (mirror_conf->rule_type & ETH_MIRROR_VIRTUAL_POOL_UP) {
5695                 mirror_type |= IXGBE_MRCTL_VPME;
5696                 if (on) {
5697                         mp_lsb = mirror_conf->pool_mask & 0xFFFFFFFF;
5698                         mp_msb = mirror_conf->pool_mask >> pool_mask_offset;
5699                         mr_info->mr_conf[rule_id].pool_mask =
5700                                         mirror_conf->pool_mask;
5701
5702                 } else {
5703                         mp_lsb = 0;
5704                         mp_msb = 0;
5705                         mr_info->mr_conf[rule_id].pool_mask = 0;
5706                 }
5707         }
5708         if (mirror_conf->rule_type & ETH_MIRROR_UPLINK_PORT)
5709                 mirror_type |= IXGBE_MRCTL_UPME;
5710         if (mirror_conf->rule_type & ETH_MIRROR_DOWNLINK_PORT)
5711                 mirror_type |= IXGBE_MRCTL_DPME;
5712
5713         /* read  mirror control register and recalculate it */
5714         mr_ctl = IXGBE_READ_REG(hw, IXGBE_MRCTL(rule_id));
5715
5716         if (on) {
5717                 mr_ctl |= mirror_type;
5718                 mr_ctl &= mirror_rule_mask;
5719                 mr_ctl |= mirror_conf->dst_pool << dst_pool_offset;
5720         } else {
5721                 mr_ctl &= ~(mirror_conf->rule_type & mirror_rule_mask);
5722         }
5723
5724         mr_info->mr_conf[rule_id].rule_type = mirror_conf->rule_type;
5725         mr_info->mr_conf[rule_id].dst_pool = mirror_conf->dst_pool;
5726
5727         /* write mirrror control  register */
5728         IXGBE_WRITE_REG(hw, IXGBE_MRCTL(rule_id), mr_ctl);
5729
5730         /* write pool mirrror control  register */
5731         if (mirror_conf->rule_type & ETH_MIRROR_VIRTUAL_POOL_UP) {
5732                 IXGBE_WRITE_REG(hw, IXGBE_VMRVM(rule_id), mp_lsb);
5733                 IXGBE_WRITE_REG(hw, IXGBE_VMRVM(rule_id + rule_mr_offset),
5734                                 mp_msb);
5735         }
5736         /* write VLAN mirrror control  register */
5737         if (mirror_conf->rule_type & ETH_MIRROR_VLAN) {
5738                 IXGBE_WRITE_REG(hw, IXGBE_VMRVLAN(rule_id), mv_lsb);
5739                 IXGBE_WRITE_REG(hw, IXGBE_VMRVLAN(rule_id + rule_mr_offset),
5740                                 mv_msb);
5741         }
5742
5743         return 0;
5744 }
5745
5746 static int
5747 ixgbe_mirror_rule_reset(struct rte_eth_dev *dev, uint8_t rule_id)
5748 {
5749         int mr_ctl = 0;
5750         uint32_t lsb_val = 0;
5751         uint32_t msb_val = 0;
5752         const uint8_t rule_mr_offset = 4;
5753
5754         struct ixgbe_hw *hw =
5755                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5756         struct ixgbe_mirror_info *mr_info =
5757                 (IXGBE_DEV_PRIVATE_TO_PFDATA(dev->data->dev_private));
5758
5759         if (ixgbe_vt_check(hw) < 0)
5760                 return -ENOTSUP;
5761
5762         if (rule_id >= IXGBE_MAX_MIRROR_RULES)
5763                 return -EINVAL;
5764
5765         memset(&mr_info->mr_conf[rule_id], 0,
5766                sizeof(struct rte_eth_mirror_conf));
5767
5768         /* clear PFVMCTL register */
5769         IXGBE_WRITE_REG(hw, IXGBE_MRCTL(rule_id), mr_ctl);
5770
5771         /* clear pool mask register */
5772         IXGBE_WRITE_REG(hw, IXGBE_VMRVM(rule_id), lsb_val);
5773         IXGBE_WRITE_REG(hw, IXGBE_VMRVM(rule_id + rule_mr_offset), msb_val);
5774
5775         /* clear vlan mask register */
5776         IXGBE_WRITE_REG(hw, IXGBE_VMRVLAN(rule_id), lsb_val);
5777         IXGBE_WRITE_REG(hw, IXGBE_VMRVLAN(rule_id + rule_mr_offset), msb_val);
5778
5779         return 0;
5780 }
5781
5782 static int
5783 ixgbevf_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id)
5784 {
5785         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
5786         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
5787         struct ixgbe_interrupt *intr =
5788                 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
5789         struct ixgbe_hw *hw =
5790                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5791         uint32_t vec = IXGBE_MISC_VEC_ID;
5792
5793         if (rte_intr_allow_others(intr_handle))
5794                 vec = IXGBE_RX_VEC_START;
5795         intr->mask |= (1 << vec);
5796         RTE_SET_USED(queue_id);
5797         IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, intr->mask);
5798
5799         rte_intr_ack(intr_handle);
5800
5801         return 0;
5802 }
5803
5804 static int
5805 ixgbevf_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id)
5806 {
5807         struct ixgbe_interrupt *intr =
5808                 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
5809         struct ixgbe_hw *hw =
5810                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5811         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
5812         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
5813         uint32_t vec = IXGBE_MISC_VEC_ID;
5814
5815         if (rte_intr_allow_others(intr_handle))
5816                 vec = IXGBE_RX_VEC_START;
5817         intr->mask &= ~(1 << vec);
5818         RTE_SET_USED(queue_id);
5819         IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, intr->mask);
5820
5821         return 0;
5822 }
5823
5824 static int
5825 ixgbe_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id)
5826 {
5827         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
5828         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
5829         uint32_t mask;
5830         struct ixgbe_hw *hw =
5831                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5832         struct ixgbe_interrupt *intr =
5833                 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
5834
5835         if (queue_id < 16) {
5836                 ixgbe_disable_intr(hw);
5837                 intr->mask |= (1 << queue_id);
5838                 ixgbe_enable_intr(dev);
5839         } else if (queue_id < 32) {
5840                 mask = IXGBE_READ_REG(hw, IXGBE_EIMS_EX(0));
5841                 mask &= (1 << queue_id);
5842                 IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(0), mask);
5843         } else if (queue_id < 64) {
5844                 mask = IXGBE_READ_REG(hw, IXGBE_EIMS_EX(1));
5845                 mask &= (1 << (queue_id - 32));
5846                 IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(1), mask);
5847         }
5848         rte_intr_ack(intr_handle);
5849
5850         return 0;
5851 }
5852
5853 static int
5854 ixgbe_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id)
5855 {
5856         uint32_t mask;
5857         struct ixgbe_hw *hw =
5858                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5859         struct ixgbe_interrupt *intr =
5860                 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
5861
5862         if (queue_id < 16) {
5863                 ixgbe_disable_intr(hw);
5864                 intr->mask &= ~(1 << queue_id);
5865                 ixgbe_enable_intr(dev);
5866         } else if (queue_id < 32) {
5867                 mask = IXGBE_READ_REG(hw, IXGBE_EIMS_EX(0));
5868                 mask &= ~(1 << queue_id);
5869                 IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(0), mask);
5870         } else if (queue_id < 64) {
5871                 mask = IXGBE_READ_REG(hw, IXGBE_EIMS_EX(1));
5872                 mask &= ~(1 << (queue_id - 32));
5873                 IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(1), mask);
5874         }
5875
5876         return 0;
5877 }
5878
5879 static void
5880 ixgbevf_set_ivar_map(struct ixgbe_hw *hw, int8_t direction,
5881                      uint8_t queue, uint8_t msix_vector)
5882 {
5883         uint32_t tmp, idx;
5884
5885         if (direction == -1) {
5886                 /* other causes */
5887                 msix_vector |= IXGBE_IVAR_ALLOC_VAL;
5888                 tmp = IXGBE_READ_REG(hw, IXGBE_VTIVAR_MISC);
5889                 tmp &= ~0xFF;
5890                 tmp |= msix_vector;
5891                 IXGBE_WRITE_REG(hw, IXGBE_VTIVAR_MISC, tmp);
5892         } else {
5893                 /* rx or tx cause */
5894                 msix_vector |= IXGBE_IVAR_ALLOC_VAL;
5895                 idx = ((16 * (queue & 1)) + (8 * direction));
5896                 tmp = IXGBE_READ_REG(hw, IXGBE_VTIVAR(queue >> 1));
5897                 tmp &= ~(0xFF << idx);
5898                 tmp |= (msix_vector << idx);
5899                 IXGBE_WRITE_REG(hw, IXGBE_VTIVAR(queue >> 1), tmp);
5900         }
5901 }
5902
5903 /**
5904  * set the IVAR registers, mapping interrupt causes to vectors
5905  * @param hw
5906  *  pointer to ixgbe_hw struct
5907  * @direction
5908  *  0 for Rx, 1 for Tx, -1 for other causes
5909  * @queue
5910  *  queue to map the corresponding interrupt to
5911  * @msix_vector
5912  *  the vector to map to the corresponding queue
5913  */
5914 static void
5915 ixgbe_set_ivar_map(struct ixgbe_hw *hw, int8_t direction,
5916                    uint8_t queue, uint8_t msix_vector)
5917 {
5918         uint32_t tmp, idx;
5919
5920         msix_vector |= IXGBE_IVAR_ALLOC_VAL;
5921         if (hw->mac.type == ixgbe_mac_82598EB) {
5922                 if (direction == -1)
5923                         direction = 0;
5924                 idx = (((direction * 64) + queue) >> 2) & 0x1F;
5925                 tmp = IXGBE_READ_REG(hw, IXGBE_IVAR(idx));
5926                 tmp &= ~(0xFF << (8 * (queue & 0x3)));
5927                 tmp |= (msix_vector << (8 * (queue & 0x3)));
5928                 IXGBE_WRITE_REG(hw, IXGBE_IVAR(idx), tmp);
5929         } else if ((hw->mac.type == ixgbe_mac_82599EB) ||
5930                         (hw->mac.type == ixgbe_mac_X540) ||
5931                         (hw->mac.type == ixgbe_mac_X550) ||
5932                         (hw->mac.type == ixgbe_mac_X550EM_x)) {
5933                 if (direction == -1) {
5934                         /* other causes */
5935                         idx = ((queue & 1) * 8);
5936                         tmp = IXGBE_READ_REG(hw, IXGBE_IVAR_MISC);
5937                         tmp &= ~(0xFF << idx);
5938                         tmp |= (msix_vector << idx);
5939                         IXGBE_WRITE_REG(hw, IXGBE_IVAR_MISC, tmp);
5940                 } else {
5941                         /* rx or tx causes */
5942                         idx = ((16 * (queue & 1)) + (8 * direction));
5943                         tmp = IXGBE_READ_REG(hw, IXGBE_IVAR(queue >> 1));
5944                         tmp &= ~(0xFF << idx);
5945                         tmp |= (msix_vector << idx);
5946                         IXGBE_WRITE_REG(hw, IXGBE_IVAR(queue >> 1), tmp);
5947                 }
5948         }
5949 }
5950
5951 static void
5952 ixgbevf_configure_msix(struct rte_eth_dev *dev)
5953 {
5954         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
5955         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
5956         struct ixgbe_hw *hw =
5957                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5958         uint32_t q_idx;
5959         uint32_t vector_idx = IXGBE_MISC_VEC_ID;
5960         uint32_t base = IXGBE_MISC_VEC_ID;
5961
5962         /* Configure VF other cause ivar */
5963         ixgbevf_set_ivar_map(hw, -1, 1, vector_idx);
5964
5965         /* won't configure msix register if no mapping is done
5966          * between intr vector and event fd.
5967          */
5968         if (!rte_intr_dp_is_en(intr_handle))
5969                 return;
5970
5971         if (rte_intr_allow_others(intr_handle)) {
5972                 base = IXGBE_RX_VEC_START;
5973                 vector_idx = IXGBE_RX_VEC_START;
5974         }
5975
5976         /* Configure all RX queues of VF */
5977         for (q_idx = 0; q_idx < dev->data->nb_rx_queues; q_idx++) {
5978                 /* Force all queue use vector 0,
5979                  * as IXGBE_VF_MAXMSIVECOTR = 1
5980                  */
5981                 ixgbevf_set_ivar_map(hw, 0, q_idx, vector_idx);
5982                 intr_handle->intr_vec[q_idx] = vector_idx;
5983                 if (vector_idx < base + intr_handle->nb_efd - 1)
5984                         vector_idx++;
5985         }
5986
5987         /* As RX queue setting above show, all queues use the vector 0.
5988          * Set only the ITR value of IXGBE_MISC_VEC_ID.
5989          */
5990         IXGBE_WRITE_REG(hw, IXGBE_VTEITR(IXGBE_MISC_VEC_ID),
5991                         IXGBE_EITR_INTERVAL_US(IXGBE_QUEUE_ITR_INTERVAL_DEFAULT)
5992                         | IXGBE_EITR_CNT_WDIS);
5993 }
5994
5995 /**
5996  * Sets up the hardware to properly generate MSI-X interrupts
5997  * @hw
5998  *  board private structure
5999  */
6000 static void
6001 ixgbe_configure_msix(struct rte_eth_dev *dev)
6002 {
6003         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
6004         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
6005         struct ixgbe_hw *hw =
6006                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
6007         uint32_t queue_id, base = IXGBE_MISC_VEC_ID;
6008         uint32_t vec = IXGBE_MISC_VEC_ID;
6009         uint32_t mask;
6010         uint32_t gpie;
6011
6012         /* won't configure msix register if no mapping is done
6013          * between intr vector and event fd
6014          * but if misx has been enabled already, need to configure
6015          * auto clean, auto mask and throttling.
6016          */
6017         gpie = IXGBE_READ_REG(hw, IXGBE_GPIE);
6018         if (!rte_intr_dp_is_en(intr_handle) &&
6019             !(gpie & (IXGBE_GPIE_MSIX_MODE | IXGBE_GPIE_PBA_SUPPORT)))
6020                 return;
6021
6022         if (rte_intr_allow_others(intr_handle))
6023                 vec = base = IXGBE_RX_VEC_START;
6024
6025         /* setup GPIE for MSI-x mode */
6026         gpie = IXGBE_READ_REG(hw, IXGBE_GPIE);
6027         gpie |= IXGBE_GPIE_MSIX_MODE | IXGBE_GPIE_PBA_SUPPORT |
6028                 IXGBE_GPIE_OCD | IXGBE_GPIE_EIAME;
6029         /* auto clearing and auto setting corresponding bits in EIMS
6030          * when MSI-X interrupt is triggered
6031          */
6032         if (hw->mac.type == ixgbe_mac_82598EB) {
6033                 IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
6034         } else {
6035                 IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(0), 0xFFFFFFFF);
6036                 IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(1), 0xFFFFFFFF);
6037         }
6038         IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
6039
6040         /* Populate the IVAR table and set the ITR values to the
6041          * corresponding register.
6042          */
6043         if (rte_intr_dp_is_en(intr_handle)) {
6044                 for (queue_id = 0; queue_id < dev->data->nb_rx_queues;
6045                         queue_id++) {
6046                         /* by default, 1:1 mapping */
6047                         ixgbe_set_ivar_map(hw, 0, queue_id, vec);
6048                         intr_handle->intr_vec[queue_id] = vec;
6049                         if (vec < base + intr_handle->nb_efd - 1)
6050                                 vec++;
6051                 }
6052
6053                 switch (hw->mac.type) {
6054                 case ixgbe_mac_82598EB:
6055                         ixgbe_set_ivar_map(hw, -1,
6056                                            IXGBE_IVAR_OTHER_CAUSES_INDEX,
6057                                            IXGBE_MISC_VEC_ID);
6058                         break;
6059                 case ixgbe_mac_82599EB:
6060                 case ixgbe_mac_X540:
6061                 case ixgbe_mac_X550:
6062                 case ixgbe_mac_X550EM_x:
6063                         ixgbe_set_ivar_map(hw, -1, 1, IXGBE_MISC_VEC_ID);
6064                         break;
6065                 default:
6066                         break;
6067                 }
6068         }
6069         IXGBE_WRITE_REG(hw, IXGBE_EITR(IXGBE_MISC_VEC_ID),
6070                         IXGBE_EITR_INTERVAL_US(IXGBE_QUEUE_ITR_INTERVAL_DEFAULT)
6071                         | IXGBE_EITR_CNT_WDIS);
6072
6073         /* set up to autoclear timer, and the vectors */
6074         mask = IXGBE_EIMS_ENABLE_MASK;
6075         mask &= ~(IXGBE_EIMS_OTHER |
6076                   IXGBE_EIMS_MAILBOX |
6077                   IXGBE_EIMS_LSC);
6078
6079         IXGBE_WRITE_REG(hw, IXGBE_EIAC, mask);
6080 }
6081
6082 int
6083 ixgbe_set_queue_rate_limit(struct rte_eth_dev *dev,
6084                            uint16_t queue_idx, uint16_t tx_rate)
6085 {
6086         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
6087         struct rte_eth_rxmode *rxmode;
6088         uint32_t rf_dec, rf_int;
6089         uint32_t bcnrc_val;
6090         uint16_t link_speed = dev->data->dev_link.link_speed;
6091
6092         if (queue_idx >= hw->mac.max_tx_queues)
6093                 return -EINVAL;
6094
6095         if (tx_rate != 0) {
6096                 /* Calculate the rate factor values to set */
6097                 rf_int = (uint32_t)link_speed / (uint32_t)tx_rate;
6098                 rf_dec = (uint32_t)link_speed % (uint32_t)tx_rate;
6099                 rf_dec = (rf_dec << IXGBE_RTTBCNRC_RF_INT_SHIFT) / tx_rate;
6100
6101                 bcnrc_val = IXGBE_RTTBCNRC_RS_ENA;
6102                 bcnrc_val |= ((rf_int << IXGBE_RTTBCNRC_RF_INT_SHIFT) &
6103                                 IXGBE_RTTBCNRC_RF_INT_MASK_M);
6104                 bcnrc_val |= (rf_dec & IXGBE_RTTBCNRC_RF_DEC_MASK);
6105         } else {
6106                 bcnrc_val = 0;
6107         }
6108
6109         rxmode = &dev->data->dev_conf.rxmode;
6110         /*
6111          * Set global transmit compensation time to the MMW_SIZE in RTTBCNRM
6112          * register. MMW_SIZE=0x014 if 9728-byte jumbo is supported, otherwise
6113          * set as 0x4.
6114          */
6115         if ((rxmode->offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) &&
6116             (rxmode->max_rx_pkt_len >= IXGBE_MAX_JUMBO_FRAME_SIZE))
6117                 IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRM,
6118                         IXGBE_MMW_SIZE_JUMBO_FRAME);
6119         else
6120                 IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRM,
6121                         IXGBE_MMW_SIZE_DEFAULT);
6122
6123         /* Set RTTBCNRC of queue X */
6124         IXGBE_WRITE_REG(hw, IXGBE_RTTDQSEL, queue_idx);
6125         IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRC, bcnrc_val);
6126         IXGBE_WRITE_FLUSH(hw);
6127
6128         return 0;
6129 }
6130
6131 static int
6132 ixgbevf_add_mac_addr(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr,
6133                      __attribute__((unused)) uint32_t index,
6134                      __attribute__((unused)) uint32_t pool)
6135 {
6136         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
6137         int diag;
6138
6139         /*
6140          * On a 82599 VF, adding again the same MAC addr is not an idempotent
6141          * operation. Trap this case to avoid exhausting the [very limited]
6142          * set of PF resources used to store VF MAC addresses.
6143          */
6144         if (memcmp(hw->mac.perm_addr, mac_addr,
6145                         sizeof(struct rte_ether_addr)) == 0)
6146                 return -1;
6147         diag = ixgbevf_set_uc_addr_vf(hw, 2, mac_addr->addr_bytes);
6148         if (diag != 0)
6149                 PMD_DRV_LOG(ERR, "Unable to add MAC address "
6150                             "%02x:%02x:%02x:%02x:%02x:%02x - diag=%d",
6151                             mac_addr->addr_bytes[0],
6152                             mac_addr->addr_bytes[1],
6153                             mac_addr->addr_bytes[2],
6154                             mac_addr->addr_bytes[3],
6155                             mac_addr->addr_bytes[4],
6156                             mac_addr->addr_bytes[5],
6157                             diag);
6158         return diag;
6159 }
6160
6161 static void
6162 ixgbevf_remove_mac_addr(struct rte_eth_dev *dev, uint32_t index)
6163 {
6164         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
6165         struct rte_ether_addr *perm_addr =
6166                 (struct rte_ether_addr *)hw->mac.perm_addr;
6167         struct rte_ether_addr *mac_addr;
6168         uint32_t i;
6169         int diag;
6170
6171         /*
6172          * The IXGBE_VF_SET_MACVLAN command of the ixgbe-pf driver does
6173          * not support the deletion of a given MAC address.
6174          * Instead, it imposes to delete all MAC addresses, then to add again
6175          * all MAC addresses with the exception of the one to be deleted.
6176          */
6177         (void) ixgbevf_set_uc_addr_vf(hw, 0, NULL);
6178
6179         /*
6180          * Add again all MAC addresses, with the exception of the deleted one
6181          * and of the permanent MAC address.
6182          */
6183         for (i = 0, mac_addr = dev->data->mac_addrs;
6184              i < hw->mac.num_rar_entries; i++, mac_addr++) {
6185                 /* Skip the deleted MAC address */
6186                 if (i == index)
6187                         continue;
6188                 /* Skip NULL MAC addresses */
6189                 if (rte_is_zero_ether_addr(mac_addr))
6190                         continue;
6191                 /* Skip the permanent MAC address */
6192                 if (memcmp(perm_addr, mac_addr,
6193                                 sizeof(struct rte_ether_addr)) == 0)
6194                         continue;
6195                 diag = ixgbevf_set_uc_addr_vf(hw, 2, mac_addr->addr_bytes);
6196                 if (diag != 0)
6197                         PMD_DRV_LOG(ERR,
6198                                     "Adding again MAC address "
6199                                     "%02x:%02x:%02x:%02x:%02x:%02x failed "
6200                                     "diag=%d",
6201                                     mac_addr->addr_bytes[0],
6202                                     mac_addr->addr_bytes[1],
6203                                     mac_addr->addr_bytes[2],
6204                                     mac_addr->addr_bytes[3],
6205                                     mac_addr->addr_bytes[4],
6206                                     mac_addr->addr_bytes[5],
6207                                     diag);
6208         }
6209 }
6210
6211 static int
6212 ixgbevf_set_default_mac_addr(struct rte_eth_dev *dev,
6213                         struct rte_ether_addr *addr)
6214 {
6215         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
6216
6217         hw->mac.ops.set_rar(hw, 0, (void *)addr, 0, 0);
6218
6219         return 0;
6220 }
6221
6222 int
6223 ixgbe_syn_filter_set(struct rte_eth_dev *dev,
6224                         struct rte_eth_syn_filter *filter,
6225                         bool add)
6226 {
6227         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
6228         struct ixgbe_filter_info *filter_info =
6229                 IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
6230         uint32_t syn_info;
6231         uint32_t synqf;
6232
6233         if (filter->queue >= IXGBE_MAX_RX_QUEUE_NUM)
6234                 return -EINVAL;
6235
6236         syn_info = filter_info->syn_info;
6237
6238         if (add) {
6239                 if (syn_info & IXGBE_SYN_FILTER_ENABLE)
6240                         return -EINVAL;
6241                 synqf = (uint32_t)(((filter->queue << IXGBE_SYN_FILTER_QUEUE_SHIFT) &
6242                         IXGBE_SYN_FILTER_QUEUE) | IXGBE_SYN_FILTER_ENABLE);
6243
6244                 if (filter->hig_pri)
6245                         synqf |= IXGBE_SYN_FILTER_SYNQFP;
6246                 else
6247                         synqf &= ~IXGBE_SYN_FILTER_SYNQFP;
6248         } else {
6249                 synqf = IXGBE_READ_REG(hw, IXGBE_SYNQF);
6250                 if (!(syn_info & IXGBE_SYN_FILTER_ENABLE))
6251                         return -ENOENT;
6252                 synqf &= ~(IXGBE_SYN_FILTER_QUEUE | IXGBE_SYN_FILTER_ENABLE);
6253         }
6254
6255         filter_info->syn_info = synqf;
6256         IXGBE_WRITE_REG(hw, IXGBE_SYNQF, synqf);
6257         IXGBE_WRITE_FLUSH(hw);
6258         return 0;
6259 }
6260
6261 static int
6262 ixgbe_syn_filter_get(struct rte_eth_dev *dev,
6263                         struct rte_eth_syn_filter *filter)
6264 {
6265         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
6266         uint32_t synqf = IXGBE_READ_REG(hw, IXGBE_SYNQF);
6267
6268         if (synqf & IXGBE_SYN_FILTER_ENABLE) {
6269                 filter->hig_pri = (synqf & IXGBE_SYN_FILTER_SYNQFP) ? 1 : 0;
6270                 filter->queue = (uint16_t)((synqf & IXGBE_SYN_FILTER_QUEUE) >> 1);
6271                 return 0;
6272         }
6273         return -ENOENT;
6274 }
6275
6276 static int
6277 ixgbe_syn_filter_handle(struct rte_eth_dev *dev,
6278                         enum rte_filter_op filter_op,
6279                         void *arg)
6280 {
6281         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
6282         int ret;
6283
6284         MAC_TYPE_FILTER_SUP(hw->mac.type);
6285
6286         if (filter_op == RTE_ETH_FILTER_NOP)
6287                 return 0;
6288
6289         if (arg == NULL) {
6290                 PMD_DRV_LOG(ERR, "arg shouldn't be NULL for operation %u",
6291                             filter_op);
6292                 return -EINVAL;
6293         }
6294
6295         switch (filter_op) {
6296         case RTE_ETH_FILTER_ADD:
6297                 ret = ixgbe_syn_filter_set(dev,
6298                                 (struct rte_eth_syn_filter *)arg,
6299                                 TRUE);
6300                 break;
6301         case RTE_ETH_FILTER_DELETE:
6302                 ret = ixgbe_syn_filter_set(dev,
6303                                 (struct rte_eth_syn_filter *)arg,
6304                                 FALSE);
6305                 break;
6306         case RTE_ETH_FILTER_GET:
6307                 ret = ixgbe_syn_filter_get(dev,
6308                                 (struct rte_eth_syn_filter *)arg);
6309                 break;
6310         default:
6311                 PMD_DRV_LOG(ERR, "unsupported operation %u", filter_op);
6312                 ret = -EINVAL;
6313                 break;
6314         }
6315
6316         return ret;
6317 }
6318
6319
6320 static inline enum ixgbe_5tuple_protocol
6321 convert_protocol_type(uint8_t protocol_value)
6322 {
6323         if (protocol_value == IPPROTO_TCP)
6324                 return IXGBE_FILTER_PROTOCOL_TCP;
6325         else if (protocol_value == IPPROTO_UDP)
6326                 return IXGBE_FILTER_PROTOCOL_UDP;
6327         else if (protocol_value == IPPROTO_SCTP)
6328                 return IXGBE_FILTER_PROTOCOL_SCTP;
6329         else
6330                 return IXGBE_FILTER_PROTOCOL_NONE;
6331 }
6332
6333 /* inject a 5-tuple filter to HW */
6334 static inline void
6335 ixgbe_inject_5tuple_filter(struct rte_eth_dev *dev,
6336                            struct ixgbe_5tuple_filter *filter)
6337 {
6338         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
6339         int i;
6340         uint32_t ftqf, sdpqf;
6341         uint32_t l34timir = 0;
6342         uint8_t mask = 0xff;
6343
6344         i = filter->index;
6345
6346         sdpqf = (uint32_t)(filter->filter_info.dst_port <<
6347                                 IXGBE_SDPQF_DSTPORT_SHIFT);
6348         sdpqf = sdpqf | (filter->filter_info.src_port & IXGBE_SDPQF_SRCPORT);
6349
6350         ftqf = (uint32_t)(filter->filter_info.proto &
6351                 IXGBE_FTQF_PROTOCOL_MASK);
6352         ftqf |= (uint32_t)((filter->filter_info.priority &
6353                 IXGBE_FTQF_PRIORITY_MASK) << IXGBE_FTQF_PRIORITY_SHIFT);
6354         if (filter->filter_info.src_ip_mask == 0) /* 0 means compare. */
6355                 mask &= IXGBE_FTQF_SOURCE_ADDR_MASK;
6356         if (filter->filter_info.dst_ip_mask == 0)
6357                 mask &= IXGBE_FTQF_DEST_ADDR_MASK;
6358         if (filter->filter_info.src_port_mask == 0)
6359                 mask &= IXGBE_FTQF_SOURCE_PORT_MASK;
6360         if (filter->filter_info.dst_port_mask == 0)
6361                 mask &= IXGBE_FTQF_DEST_PORT_MASK;
6362         if (filter->filter_info.proto_mask == 0)
6363                 mask &= IXGBE_FTQF_PROTOCOL_COMP_MASK;
6364         ftqf |= mask << IXGBE_FTQF_5TUPLE_MASK_SHIFT;
6365         ftqf |= IXGBE_FTQF_POOL_MASK_EN;
6366         ftqf |= IXGBE_FTQF_QUEUE_ENABLE;
6367
6368         IXGBE_WRITE_REG(hw, IXGBE_DAQF(i), filter->filter_info.dst_ip);
6369         IXGBE_WRITE_REG(hw, IXGBE_SAQF(i), filter->filter_info.src_ip);
6370         IXGBE_WRITE_REG(hw, IXGBE_SDPQF(i), sdpqf);
6371         IXGBE_WRITE_REG(hw, IXGBE_FTQF(i), ftqf);
6372
6373         l34timir |= IXGBE_L34T_IMIR_RESERVE;
6374         l34timir |= (uint32_t)(filter->queue <<
6375                                 IXGBE_L34T_IMIR_QUEUE_SHIFT);
6376         IXGBE_WRITE_REG(hw, IXGBE_L34T_IMIR(i), l34timir);
6377 }
6378
6379 /*
6380  * add a 5tuple filter
6381  *
6382  * @param
6383  * dev: Pointer to struct rte_eth_dev.
6384  * index: the index the filter allocates.
6385  * filter: ponter to the filter that will be added.
6386  * rx_queue: the queue id the filter assigned to.
6387  *
6388  * @return
6389  *    - On success, zero.
6390  *    - On failure, a negative value.
6391  */
6392 static int
6393 ixgbe_add_5tuple_filter(struct rte_eth_dev *dev,
6394                         struct ixgbe_5tuple_filter *filter)
6395 {
6396         struct ixgbe_filter_info *filter_info =
6397                 IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
6398         int i, idx, shift;
6399
6400         /*
6401          * look for an unused 5tuple filter index,
6402          * and insert the filter to list.
6403          */
6404         for (i = 0; i < IXGBE_MAX_FTQF_FILTERS; i++) {
6405                 idx = i / (sizeof(uint32_t) * NBBY);
6406                 shift = i % (sizeof(uint32_t) * NBBY);
6407                 if (!(filter_info->fivetuple_mask[idx] & (1 << shift))) {
6408                         filter_info->fivetuple_mask[idx] |= 1 << shift;
6409                         filter->index = i;
6410                         TAILQ_INSERT_TAIL(&filter_info->fivetuple_list,
6411                                           filter,
6412                                           entries);
6413                         break;
6414                 }
6415         }
6416         if (i >= IXGBE_MAX_FTQF_FILTERS) {
6417                 PMD_DRV_LOG(ERR, "5tuple filters are full.");
6418                 return -ENOSYS;
6419         }
6420
6421         ixgbe_inject_5tuple_filter(dev, filter);
6422
6423         return 0;
6424 }
6425
6426 /*
6427  * remove a 5tuple filter
6428  *
6429  * @param
6430  * dev: Pointer to struct rte_eth_dev.
6431  * filter: the pointer of the filter will be removed.
6432  */
6433 static void
6434 ixgbe_remove_5tuple_filter(struct rte_eth_dev *dev,
6435                         struct ixgbe_5tuple_filter *filter)
6436 {
6437         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
6438         struct ixgbe_filter_info *filter_info =
6439                 IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
6440         uint16_t index = filter->index;
6441
6442         filter_info->fivetuple_mask[index / (sizeof(uint32_t) * NBBY)] &=
6443                                 ~(1 << (index % (sizeof(uint32_t) * NBBY)));
6444         TAILQ_REMOVE(&filter_info->fivetuple_list, filter, entries);
6445         rte_free(filter);
6446
6447         IXGBE_WRITE_REG(hw, IXGBE_DAQF(index), 0);
6448         IXGBE_WRITE_REG(hw, IXGBE_SAQF(index), 0);
6449         IXGBE_WRITE_REG(hw, IXGBE_SDPQF(index), 0);
6450         IXGBE_WRITE_REG(hw, IXGBE_FTQF(index), 0);
6451         IXGBE_WRITE_REG(hw, IXGBE_L34T_IMIR(index), 0);
6452 }
6453
6454 static int
6455 ixgbevf_dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu)
6456 {
6457         struct ixgbe_hw *hw;
6458         uint32_t max_frame = mtu + IXGBE_ETH_OVERHEAD;
6459         struct rte_eth_dev_data *dev_data = dev->data;
6460
6461         hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
6462
6463         if (mtu < RTE_ETHER_MIN_MTU ||
6464                         max_frame > RTE_ETHER_MAX_JUMBO_FRAME_LEN)
6465                 return -EINVAL;
6466
6467         /* If device is started, refuse mtu that requires the support of
6468          * scattered packets when this feature has not been enabled before.
6469          */
6470         if (dev_data->dev_started && !dev_data->scattered_rx &&
6471             (max_frame + 2 * IXGBE_VLAN_TAG_SIZE >
6472              dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM)) {
6473                 PMD_INIT_LOG(ERR, "Stop port first.");
6474                 return -EINVAL;
6475         }
6476
6477         /*
6478          * When supported by the underlying PF driver, use the IXGBE_VF_SET_MTU
6479          * request of the version 2.0 of the mailbox API.
6480          * For now, use the IXGBE_VF_SET_LPE request of the version 1.0
6481          * of the mailbox API.
6482          * This call to IXGBE_SET_LPE action won't work with ixgbe pf drivers
6483          * prior to 3.11.33 which contains the following change:
6484          * "ixgbe: Enable jumbo frames support w/ SR-IOV"
6485          */
6486         ixgbevf_rlpml_set_vf(hw, max_frame);
6487
6488         /* update max frame size */
6489         dev->data->dev_conf.rxmode.max_rx_pkt_len = max_frame;
6490         return 0;
6491 }
6492
6493 static inline struct ixgbe_5tuple_filter *
6494 ixgbe_5tuple_filter_lookup(struct ixgbe_5tuple_filter_list *filter_list,
6495                         struct ixgbe_5tuple_filter_info *key)
6496 {
6497         struct ixgbe_5tuple_filter *it;
6498
6499         TAILQ_FOREACH(it, filter_list, entries) {
6500                 if (memcmp(key, &it->filter_info,
6501                         sizeof(struct ixgbe_5tuple_filter_info)) == 0) {
6502                         return it;
6503                 }
6504         }
6505         return NULL;
6506 }
6507
6508 /* translate elements in struct rte_eth_ntuple_filter to struct ixgbe_5tuple_filter_info*/
6509 static inline int
6510 ntuple_filter_to_5tuple(struct rte_eth_ntuple_filter *filter,
6511                         struct ixgbe_5tuple_filter_info *filter_info)
6512 {
6513         if (filter->queue >= IXGBE_MAX_RX_QUEUE_NUM ||
6514                 filter->priority > IXGBE_5TUPLE_MAX_PRI ||
6515                 filter->priority < IXGBE_5TUPLE_MIN_PRI)
6516                 return -EINVAL;
6517
6518         switch (filter->dst_ip_mask) {
6519         case UINT32_MAX:
6520                 filter_info->dst_ip_mask = 0;
6521                 filter_info->dst_ip = filter->dst_ip;
6522                 break;
6523         case 0:
6524                 filter_info->dst_ip_mask = 1;
6525                 break;
6526         default:
6527                 PMD_DRV_LOG(ERR, "invalid dst_ip mask.");
6528                 return -EINVAL;
6529         }
6530
6531         switch (filter->src_ip_mask) {
6532         case UINT32_MAX:
6533                 filter_info->src_ip_mask = 0;
6534                 filter_info->src_ip = filter->src_ip;
6535                 break;
6536         case 0:
6537                 filter_info->src_ip_mask = 1;
6538                 break;
6539         default:
6540                 PMD_DRV_LOG(ERR, "invalid src_ip mask.");
6541                 return -EINVAL;
6542         }
6543
6544         switch (filter->dst_port_mask) {
6545         case UINT16_MAX:
6546                 filter_info->dst_port_mask = 0;
6547                 filter_info->dst_port = filter->dst_port;
6548                 break;
6549         case 0:
6550                 filter_info->dst_port_mask = 1;
6551                 break;
6552         default:
6553                 PMD_DRV_LOG(ERR, "invalid dst_port mask.");
6554                 return -EINVAL;
6555         }
6556
6557         switch (filter->src_port_mask) {
6558         case UINT16_MAX:
6559                 filter_info->src_port_mask = 0;
6560                 filter_info->src_port = filter->src_port;
6561                 break;
6562         case 0:
6563                 filter_info->src_port_mask = 1;
6564                 break;
6565         default:
6566                 PMD_DRV_LOG(ERR, "invalid src_port mask.");
6567                 return -EINVAL;
6568         }
6569
6570         switch (filter->proto_mask) {
6571         case UINT8_MAX:
6572                 filter_info->proto_mask = 0;
6573                 filter_info->proto =
6574                         convert_protocol_type(filter->proto);
6575                 break;
6576         case 0:
6577                 filter_info->proto_mask = 1;
6578                 break;
6579         default:
6580                 PMD_DRV_LOG(ERR, "invalid protocol mask.");
6581                 return -EINVAL;
6582         }
6583
6584         filter_info->priority = (uint8_t)filter->priority;
6585         return 0;
6586 }
6587
6588 /*
6589  * add or delete a ntuple filter
6590  *
6591  * @param
6592  * dev: Pointer to struct rte_eth_dev.
6593  * ntuple_filter: Pointer to struct rte_eth_ntuple_filter
6594  * add: if true, add filter, if false, remove filter
6595  *
6596  * @return
6597  *    - On success, zero.
6598  *    - On failure, a negative value.
6599  */
6600 int
6601 ixgbe_add_del_ntuple_filter(struct rte_eth_dev *dev,
6602                         struct rte_eth_ntuple_filter *ntuple_filter,
6603                         bool add)
6604 {
6605         struct ixgbe_filter_info *filter_info =
6606                 IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
6607         struct ixgbe_5tuple_filter_info filter_5tuple;
6608         struct ixgbe_5tuple_filter *filter;
6609         int ret;
6610
6611         if (ntuple_filter->flags != RTE_5TUPLE_FLAGS) {
6612                 PMD_DRV_LOG(ERR, "only 5tuple is supported.");
6613                 return -EINVAL;
6614         }
6615
6616         memset(&filter_5tuple, 0, sizeof(struct ixgbe_5tuple_filter_info));
6617         ret = ntuple_filter_to_5tuple(ntuple_filter, &filter_5tuple);
6618         if (ret < 0)
6619                 return ret;
6620
6621         filter = ixgbe_5tuple_filter_lookup(&filter_info->fivetuple_list,
6622                                          &filter_5tuple);
6623         if (filter != NULL && add) {
6624                 PMD_DRV_LOG(ERR, "filter exists.");
6625                 return -EEXIST;
6626         }
6627         if (filter == NULL && !add) {
6628                 PMD_DRV_LOG(ERR, "filter doesn't exist.");
6629                 return -ENOENT;
6630         }
6631
6632         if (add) {
6633                 filter = rte_zmalloc("ixgbe_5tuple_filter",
6634                                 sizeof(struct ixgbe_5tuple_filter), 0);
6635                 if (filter == NULL)
6636                         return -ENOMEM;
6637                 rte_memcpy(&filter->filter_info,
6638                                  &filter_5tuple,
6639                                  sizeof(struct ixgbe_5tuple_filter_info));
6640                 filter->queue = ntuple_filter->queue;
6641                 ret = ixgbe_add_5tuple_filter(dev, filter);
6642                 if (ret < 0) {
6643                         rte_free(filter);
6644                         return ret;
6645                 }
6646         } else
6647                 ixgbe_remove_5tuple_filter(dev, filter);
6648
6649         return 0;
6650 }
6651
6652 /*
6653  * get a ntuple filter
6654  *
6655  * @param
6656  * dev: Pointer to struct rte_eth_dev.
6657  * ntuple_filter: Pointer to struct rte_eth_ntuple_filter
6658  *
6659  * @return
6660  *    - On success, zero.
6661  *    - On failure, a negative value.
6662  */
6663 static int
6664 ixgbe_get_ntuple_filter(struct rte_eth_dev *dev,
6665                         struct rte_eth_ntuple_filter *ntuple_filter)
6666 {
6667         struct ixgbe_filter_info *filter_info =
6668                 IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
6669         struct ixgbe_5tuple_filter_info filter_5tuple;
6670         struct ixgbe_5tuple_filter *filter;
6671         int ret;
6672
6673         if (ntuple_filter->flags != RTE_5TUPLE_FLAGS) {
6674                 PMD_DRV_LOG(ERR, "only 5tuple is supported.");
6675                 return -EINVAL;
6676         }
6677
6678         memset(&filter_5tuple, 0, sizeof(struct ixgbe_5tuple_filter_info));
6679         ret = ntuple_filter_to_5tuple(ntuple_filter, &filter_5tuple);
6680         if (ret < 0)
6681                 return ret;
6682
6683         filter = ixgbe_5tuple_filter_lookup(&filter_info->fivetuple_list,
6684                                          &filter_5tuple);
6685         if (filter == NULL) {
6686                 PMD_DRV_LOG(ERR, "filter doesn't exist.");
6687                 return -ENOENT;
6688         }
6689         ntuple_filter->queue = filter->queue;
6690         return 0;
6691 }
6692
6693 /*
6694  * ixgbe_ntuple_filter_handle - Handle operations for ntuple filter.
6695  * @dev: pointer to rte_eth_dev structure
6696  * @filter_op:operation will be taken.
6697  * @arg: a pointer to specific structure corresponding to the filter_op
6698  *
6699  * @return
6700  *    - On success, zero.
6701  *    - On failure, a negative value.
6702  */
6703 static int
6704 ixgbe_ntuple_filter_handle(struct rte_eth_dev *dev,
6705                                 enum rte_filter_op filter_op,
6706                                 void *arg)
6707 {
6708         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
6709         int ret;
6710
6711         MAC_TYPE_FILTER_SUP_EXT(hw->mac.type);
6712
6713         if (filter_op == RTE_ETH_FILTER_NOP)
6714                 return 0;
6715
6716         if (arg == NULL) {
6717                 PMD_DRV_LOG(ERR, "arg shouldn't be NULL for operation %u.",
6718                             filter_op);
6719                 return -EINVAL;
6720         }
6721
6722         switch (filter_op) {
6723         case RTE_ETH_FILTER_ADD:
6724                 ret = ixgbe_add_del_ntuple_filter(dev,
6725                         (struct rte_eth_ntuple_filter *)arg,
6726                         TRUE);
6727                 break;
6728         case RTE_ETH_FILTER_DELETE:
6729                 ret = ixgbe_add_del_ntuple_filter(dev,
6730                         (struct rte_eth_ntuple_filter *)arg,
6731                         FALSE);
6732                 break;
6733         case RTE_ETH_FILTER_GET:
6734                 ret = ixgbe_get_ntuple_filter(dev,
6735                         (struct rte_eth_ntuple_filter *)arg);
6736                 break;
6737         default:
6738                 PMD_DRV_LOG(ERR, "unsupported operation %u.", filter_op);
6739                 ret = -EINVAL;
6740                 break;
6741         }
6742         return ret;
6743 }
6744
6745 int
6746 ixgbe_add_del_ethertype_filter(struct rte_eth_dev *dev,
6747                         struct rte_eth_ethertype_filter *filter,
6748                         bool add)
6749 {
6750         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
6751         struct ixgbe_filter_info *filter_info =
6752                 IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
6753         uint32_t etqf = 0;
6754         uint32_t etqs = 0;
6755         int ret;
6756         struct ixgbe_ethertype_filter ethertype_filter;
6757
6758         if (filter->queue >= IXGBE_MAX_RX_QUEUE_NUM)
6759                 return -EINVAL;
6760
6761         if (filter->ether_type == RTE_ETHER_TYPE_IPV4 ||
6762                 filter->ether_type == RTE_ETHER_TYPE_IPV6) {
6763                 PMD_DRV_LOG(ERR, "unsupported ether_type(0x%04x) in"
6764                         " ethertype filter.", filter->ether_type);
6765                 return -EINVAL;
6766         }
6767
6768         if (filter->flags & RTE_ETHTYPE_FLAGS_MAC) {
6769                 PMD_DRV_LOG(ERR, "mac compare is unsupported.");
6770                 return -EINVAL;
6771         }
6772         if (filter->flags & RTE_ETHTYPE_FLAGS_DROP) {
6773                 PMD_DRV_LOG(ERR, "drop option is unsupported.");
6774                 return -EINVAL;
6775         }
6776
6777         ret = ixgbe_ethertype_filter_lookup(filter_info, filter->ether_type);
6778         if (ret >= 0 && add) {
6779                 PMD_DRV_LOG(ERR, "ethertype (0x%04x) filter exists.",
6780                             filter->ether_type);
6781                 return -EEXIST;
6782         }
6783         if (ret < 0 && !add) {
6784                 PMD_DRV_LOG(ERR, "ethertype (0x%04x) filter doesn't exist.",
6785                             filter->ether_type);
6786                 return -ENOENT;
6787         }
6788
6789         if (add) {
6790                 etqf = IXGBE_ETQF_FILTER_EN;
6791                 etqf |= (uint32_t)filter->ether_type;
6792                 etqs |= (uint32_t)((filter->queue <<
6793                                     IXGBE_ETQS_RX_QUEUE_SHIFT) &
6794                                     IXGBE_ETQS_RX_QUEUE);
6795                 etqs |= IXGBE_ETQS_QUEUE_EN;
6796
6797                 ethertype_filter.ethertype = filter->ether_type;
6798                 ethertype_filter.etqf = etqf;
6799                 ethertype_filter.etqs = etqs;
6800                 ethertype_filter.conf = FALSE;
6801                 ret = ixgbe_ethertype_filter_insert(filter_info,
6802                                                     &ethertype_filter);
6803                 if (ret < 0) {
6804                         PMD_DRV_LOG(ERR, "ethertype filters are full.");
6805                         return -ENOSPC;
6806                 }
6807         } else {
6808                 ret = ixgbe_ethertype_filter_remove(filter_info, (uint8_t)ret);
6809                 if (ret < 0)
6810                         return -ENOSYS;
6811         }
6812         IXGBE_WRITE_REG(hw, IXGBE_ETQF(ret), etqf);
6813         IXGBE_WRITE_REG(hw, IXGBE_ETQS(ret), etqs);
6814         IXGBE_WRITE_FLUSH(hw);
6815
6816         return 0;
6817 }
6818
6819 static int
6820 ixgbe_get_ethertype_filter(struct rte_eth_dev *dev,
6821                         struct rte_eth_ethertype_filter *filter)
6822 {
6823         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
6824         struct ixgbe_filter_info *filter_info =
6825                 IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
6826         uint32_t etqf, etqs;
6827         int ret;
6828
6829         ret = ixgbe_ethertype_filter_lookup(filter_info, filter->ether_type);
6830         if (ret < 0) {
6831                 PMD_DRV_LOG(ERR, "ethertype (0x%04x) filter doesn't exist.",
6832                             filter->ether_type);
6833                 return -ENOENT;
6834         }
6835
6836         etqf = IXGBE_READ_REG(hw, IXGBE_ETQF(ret));
6837         if (etqf & IXGBE_ETQF_FILTER_EN) {
6838                 etqs = IXGBE_READ_REG(hw, IXGBE_ETQS(ret));
6839                 filter->ether_type = etqf & IXGBE_ETQF_ETHERTYPE;
6840                 filter->flags = 0;
6841                 filter->queue = (etqs & IXGBE_ETQS_RX_QUEUE) >>
6842                                IXGBE_ETQS_RX_QUEUE_SHIFT;
6843                 return 0;
6844         }
6845         return -ENOENT;
6846 }
6847
6848 /*
6849  * ixgbe_ethertype_filter_handle - Handle operations for ethertype filter.
6850  * @dev: pointer to rte_eth_dev structure
6851  * @filter_op:operation will be taken.
6852  * @arg: a pointer to specific structure corresponding to the filter_op
6853  */
6854 static int
6855 ixgbe_ethertype_filter_handle(struct rte_eth_dev *dev,
6856                                 enum rte_filter_op filter_op,
6857                                 void *arg)
6858 {
6859         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
6860         int ret;
6861
6862         MAC_TYPE_FILTER_SUP(hw->mac.type);
6863
6864         if (filter_op == RTE_ETH_FILTER_NOP)
6865                 return 0;
6866
6867         if (arg == NULL) {
6868                 PMD_DRV_LOG(ERR, "arg shouldn't be NULL for operation %u.",
6869                             filter_op);
6870                 return -EINVAL;
6871         }
6872
6873         switch (filter_op) {
6874         case RTE_ETH_FILTER_ADD:
6875                 ret = ixgbe_add_del_ethertype_filter(dev,
6876                         (struct rte_eth_ethertype_filter *)arg,
6877                         TRUE);
6878                 break;
6879         case RTE_ETH_FILTER_DELETE:
6880                 ret = ixgbe_add_del_ethertype_filter(dev,
6881                         (struct rte_eth_ethertype_filter *)arg,
6882                         FALSE);
6883                 break;
6884         case RTE_ETH_FILTER_GET:
6885                 ret = ixgbe_get_ethertype_filter(dev,
6886                         (struct rte_eth_ethertype_filter *)arg);
6887                 break;
6888         default:
6889                 PMD_DRV_LOG(ERR, "unsupported operation %u.", filter_op);
6890                 ret = -EINVAL;
6891                 break;
6892         }
6893         return ret;
6894 }
6895
6896 static int
6897 ixgbe_dev_filter_ctrl(struct rte_eth_dev *dev,
6898                      enum rte_filter_type filter_type,
6899                      enum rte_filter_op filter_op,
6900                      void *arg)
6901 {
6902         int ret = 0;
6903
6904         switch (filter_type) {
6905         case RTE_ETH_FILTER_NTUPLE:
6906                 ret = ixgbe_ntuple_filter_handle(dev, filter_op, arg);
6907                 break;
6908         case RTE_ETH_FILTER_ETHERTYPE:
6909                 ret = ixgbe_ethertype_filter_handle(dev, filter_op, arg);
6910                 break;
6911         case RTE_ETH_FILTER_SYN:
6912                 ret = ixgbe_syn_filter_handle(dev, filter_op, arg);
6913                 break;
6914         case RTE_ETH_FILTER_FDIR:
6915                 ret = ixgbe_fdir_ctrl_func(dev, filter_op, arg);
6916                 break;
6917         case RTE_ETH_FILTER_L2_TUNNEL:
6918                 ret = ixgbe_dev_l2_tunnel_filter_handle(dev, filter_op, arg);
6919                 break;
6920         case RTE_ETH_FILTER_GENERIC:
6921                 if (filter_op != RTE_ETH_FILTER_GET)
6922                         return -EINVAL;
6923                 *(const void **)arg = &ixgbe_flow_ops;
6924                 break;
6925         default:
6926                 PMD_DRV_LOG(WARNING, "Filter type (%d) not supported",
6927                                                         filter_type);
6928                 ret = -EINVAL;
6929                 break;
6930         }
6931
6932         return ret;
6933 }
6934
6935 static u8 *
6936 ixgbe_dev_addr_list_itr(__attribute__((unused)) struct ixgbe_hw *hw,
6937                         u8 **mc_addr_ptr, u32 *vmdq)
6938 {
6939         u8 *mc_addr;
6940
6941         *vmdq = 0;
6942         mc_addr = *mc_addr_ptr;
6943         *mc_addr_ptr = (mc_addr + sizeof(struct rte_ether_addr));
6944         return mc_addr;
6945 }
6946
6947 static int
6948 ixgbe_dev_set_mc_addr_list(struct rte_eth_dev *dev,
6949                           struct rte_ether_addr *mc_addr_set,
6950                           uint32_t nb_mc_addr)
6951 {
6952         struct ixgbe_hw *hw;
6953         u8 *mc_addr_list;
6954
6955         hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
6956         mc_addr_list = (u8 *)mc_addr_set;
6957         return ixgbe_update_mc_addr_list(hw, mc_addr_list, nb_mc_addr,
6958                                          ixgbe_dev_addr_list_itr, TRUE);
6959 }
6960
6961 static uint64_t
6962 ixgbe_read_systime_cyclecounter(struct rte_eth_dev *dev)
6963 {
6964         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
6965         uint64_t systime_cycles;
6966
6967         switch (hw->mac.type) {
6968         case ixgbe_mac_X550:
6969         case ixgbe_mac_X550EM_x:
6970         case ixgbe_mac_X550EM_a:
6971                 /* SYSTIMEL stores ns and SYSTIMEH stores seconds. */
6972                 systime_cycles = (uint64_t)IXGBE_READ_REG(hw, IXGBE_SYSTIML);
6973                 systime_cycles += (uint64_t)IXGBE_READ_REG(hw, IXGBE_SYSTIMH)
6974                                 * NSEC_PER_SEC;
6975                 break;
6976         default:
6977                 systime_cycles = (uint64_t)IXGBE_READ_REG(hw, IXGBE_SYSTIML);
6978                 systime_cycles |= (uint64_t)IXGBE_READ_REG(hw, IXGBE_SYSTIMH)
6979                                 << 32;
6980         }
6981
6982         return systime_cycles;
6983 }
6984
6985 static uint64_t
6986 ixgbe_read_rx_tstamp_cyclecounter(struct rte_eth_dev *dev)
6987 {
6988         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
6989         uint64_t rx_tstamp_cycles;
6990
6991         switch (hw->mac.type) {
6992         case ixgbe_mac_X550:
6993         case ixgbe_mac_X550EM_x:
6994         case ixgbe_mac_X550EM_a:
6995                 /* RXSTMPL stores ns and RXSTMPH stores seconds. */
6996                 rx_tstamp_cycles = (uint64_t)IXGBE_READ_REG(hw, IXGBE_RXSTMPL);
6997                 rx_tstamp_cycles += (uint64_t)IXGBE_READ_REG(hw, IXGBE_RXSTMPH)
6998                                 * NSEC_PER_SEC;
6999                 break;
7000         default:
7001                 /* RXSTMPL stores ns and RXSTMPH stores seconds. */
7002                 rx_tstamp_cycles = (uint64_t)IXGBE_READ_REG(hw, IXGBE_RXSTMPL);
7003                 rx_tstamp_cycles |= (uint64_t)IXGBE_READ_REG(hw, IXGBE_RXSTMPH)
7004                                 << 32;
7005         }
7006
7007         return rx_tstamp_cycles;
7008 }
7009
7010 static uint64_t
7011 ixgbe_read_tx_tstamp_cyclecounter(struct rte_eth_dev *dev)
7012 {
7013         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
7014         uint64_t tx_tstamp_cycles;
7015
7016         switch (hw->mac.type) {
7017         case ixgbe_mac_X550:
7018         case ixgbe_mac_X550EM_x:
7019         case ixgbe_mac_X550EM_a:
7020                 /* TXSTMPL stores ns and TXSTMPH stores seconds. */
7021                 tx_tstamp_cycles = (uint64_t)IXGBE_READ_REG(hw, IXGBE_TXSTMPL);
7022                 tx_tstamp_cycles += (uint64_t)IXGBE_READ_REG(hw, IXGBE_TXSTMPH)
7023                                 * NSEC_PER_SEC;
7024                 break;
7025         default:
7026                 /* TXSTMPL stores ns and TXSTMPH stores seconds. */
7027                 tx_tstamp_cycles = (uint64_t)IXGBE_READ_REG(hw, IXGBE_TXSTMPL);
7028                 tx_tstamp_cycles |= (uint64_t)IXGBE_READ_REG(hw, IXGBE_TXSTMPH)
7029                                 << 32;
7030         }
7031
7032         return tx_tstamp_cycles;
7033 }
7034
7035 static void
7036 ixgbe_start_timecounters(struct rte_eth_dev *dev)
7037 {
7038         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
7039         struct ixgbe_adapter *adapter = dev->data->dev_private;
7040         struct rte_eth_link link;
7041         uint32_t incval = 0;
7042         uint32_t shift = 0;
7043
7044         /* Get current link speed. */
7045         ixgbe_dev_link_update(dev, 1);
7046         rte_eth_linkstatus_get(dev, &link);
7047
7048         switch (link.link_speed) {
7049         case ETH_SPEED_NUM_100M:
7050                 incval = IXGBE_INCVAL_100;
7051                 shift = IXGBE_INCVAL_SHIFT_100;
7052                 break;
7053         case ETH_SPEED_NUM_1G:
7054                 incval = IXGBE_INCVAL_1GB;
7055                 shift = IXGBE_INCVAL_SHIFT_1GB;
7056                 break;
7057         case ETH_SPEED_NUM_10G:
7058         default:
7059                 incval = IXGBE_INCVAL_10GB;
7060                 shift = IXGBE_INCVAL_SHIFT_10GB;
7061                 break;
7062         }
7063
7064         switch (hw->mac.type) {
7065         case ixgbe_mac_X550:
7066         case ixgbe_mac_X550EM_x:
7067         case ixgbe_mac_X550EM_a:
7068                 /* Independent of link speed. */
7069                 incval = 1;
7070                 /* Cycles read will be interpreted as ns. */
7071                 shift = 0;
7072                 /* Fall-through */
7073         case ixgbe_mac_X540:
7074                 IXGBE_WRITE_REG(hw, IXGBE_TIMINCA, incval);
7075                 break;
7076         case ixgbe_mac_82599EB:
7077                 incval >>= IXGBE_INCVAL_SHIFT_82599;
7078                 shift -= IXGBE_INCVAL_SHIFT_82599;
7079                 IXGBE_WRITE_REG(hw, IXGBE_TIMINCA,
7080                                 (1 << IXGBE_INCPER_SHIFT_82599) | incval);
7081                 break;
7082         default:
7083                 /* Not supported. */
7084                 return;
7085         }
7086
7087         memset(&adapter->systime_tc, 0, sizeof(struct rte_timecounter));
7088         memset(&adapter->rx_tstamp_tc, 0, sizeof(struct rte_timecounter));
7089         memset(&adapter->tx_tstamp_tc, 0, sizeof(struct rte_timecounter));
7090
7091         adapter->systime_tc.cc_mask = IXGBE_CYCLECOUNTER_MASK;
7092         adapter->systime_tc.cc_shift = shift;
7093         adapter->systime_tc.nsec_mask = (1ULL << shift) - 1;
7094
7095         adapter->rx_tstamp_tc.cc_mask = IXGBE_CYCLECOUNTER_MASK;
7096         adapter->rx_tstamp_tc.cc_shift = shift;
7097         adapter->rx_tstamp_tc.nsec_mask = (1ULL << shift) - 1;
7098
7099         adapter->tx_tstamp_tc.cc_mask = IXGBE_CYCLECOUNTER_MASK;
7100         adapter->tx_tstamp_tc.cc_shift = shift;
7101         adapter->tx_tstamp_tc.nsec_mask = (1ULL << shift) - 1;
7102 }
7103
7104 static int
7105 ixgbe_timesync_adjust_time(struct rte_eth_dev *dev, int64_t delta)
7106 {
7107         struct ixgbe_adapter *adapter = dev->data->dev_private;
7108
7109         adapter->systime_tc.nsec += delta;
7110         adapter->rx_tstamp_tc.nsec += delta;
7111         adapter->tx_tstamp_tc.nsec += delta;
7112
7113         return 0;
7114 }
7115
7116 static int
7117 ixgbe_timesync_write_time(struct rte_eth_dev *dev, const struct timespec *ts)
7118 {
7119         uint64_t ns;
7120         struct ixgbe_adapter *adapter = dev->data->dev_private;
7121
7122         ns = rte_timespec_to_ns(ts);
7123         /* Set the timecounters to a new value. */
7124         adapter->systime_tc.nsec = ns;
7125         adapter->rx_tstamp_tc.nsec = ns;
7126         adapter->tx_tstamp_tc.nsec = ns;
7127
7128         return 0;
7129 }
7130
7131 static int
7132 ixgbe_timesync_read_time(struct rte_eth_dev *dev, struct timespec *ts)
7133 {
7134         uint64_t ns, systime_cycles;
7135         struct ixgbe_adapter *adapter = dev->data->dev_private;
7136
7137         systime_cycles = ixgbe_read_systime_cyclecounter(dev);
7138         ns = rte_timecounter_update(&adapter->systime_tc, systime_cycles);
7139         *ts = rte_ns_to_timespec(ns);
7140
7141         return 0;
7142 }
7143
7144 static int
7145 ixgbe_timesync_enable(struct rte_eth_dev *dev)
7146 {
7147         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
7148         uint32_t tsync_ctl;
7149         uint32_t tsauxc;
7150
7151         /* Stop the timesync system time. */
7152         IXGBE_WRITE_REG(hw, IXGBE_TIMINCA, 0x0);
7153         /* Reset the timesync system time value. */
7154         IXGBE_WRITE_REG(hw, IXGBE_SYSTIML, 0x0);
7155         IXGBE_WRITE_REG(hw, IXGBE_SYSTIMH, 0x0);
7156
7157         /* Enable system time for platforms where it isn't on by default. */
7158         tsauxc = IXGBE_READ_REG(hw, IXGBE_TSAUXC);
7159         tsauxc &= ~IXGBE_TSAUXC_DISABLE_SYSTIME;
7160         IXGBE_WRITE_REG(hw, IXGBE_TSAUXC, tsauxc);
7161
7162         ixgbe_start_timecounters(dev);
7163
7164         /* Enable L2 filtering of IEEE1588/802.1AS Ethernet frame types. */
7165         IXGBE_WRITE_REG(hw, IXGBE_ETQF(IXGBE_ETQF_FILTER_1588),
7166                         (RTE_ETHER_TYPE_1588 |
7167                          IXGBE_ETQF_FILTER_EN |
7168                          IXGBE_ETQF_1588));
7169
7170         /* Enable timestamping of received PTP packets. */
7171         tsync_ctl = IXGBE_READ_REG(hw, IXGBE_TSYNCRXCTL);
7172         tsync_ctl |= IXGBE_TSYNCRXCTL_ENABLED;
7173         IXGBE_WRITE_REG(hw, IXGBE_TSYNCRXCTL, tsync_ctl);
7174
7175         /* Enable timestamping of transmitted PTP packets. */
7176         tsync_ctl = IXGBE_READ_REG(hw, IXGBE_TSYNCTXCTL);
7177         tsync_ctl |= IXGBE_TSYNCTXCTL_ENABLED;
7178         IXGBE_WRITE_REG(hw, IXGBE_TSYNCTXCTL, tsync_ctl);
7179
7180         IXGBE_WRITE_FLUSH(hw);
7181
7182         return 0;
7183 }
7184
7185 static int
7186 ixgbe_timesync_disable(struct rte_eth_dev *dev)
7187 {
7188         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
7189         uint32_t tsync_ctl;
7190
7191         /* Disable timestamping of transmitted PTP packets. */
7192         tsync_ctl = IXGBE_READ_REG(hw, IXGBE_TSYNCTXCTL);
7193         tsync_ctl &= ~IXGBE_TSYNCTXCTL_ENABLED;
7194         IXGBE_WRITE_REG(hw, IXGBE_TSYNCTXCTL, tsync_ctl);
7195
7196         /* Disable timestamping of received PTP packets. */
7197         tsync_ctl = IXGBE_READ_REG(hw, IXGBE_TSYNCRXCTL);
7198         tsync_ctl &= ~IXGBE_TSYNCRXCTL_ENABLED;
7199         IXGBE_WRITE_REG(hw, IXGBE_TSYNCRXCTL, tsync_ctl);
7200
7201         /* Disable L2 filtering of IEEE1588/802.1AS Ethernet frame types. */
7202         IXGBE_WRITE_REG(hw, IXGBE_ETQF(IXGBE_ETQF_FILTER_1588), 0);
7203
7204         /* Stop incrementating the System Time registers. */
7205         IXGBE_WRITE_REG(hw, IXGBE_TIMINCA, 0);
7206
7207         return 0;
7208 }
7209
7210 static int
7211 ixgbe_timesync_read_rx_timestamp(struct rte_eth_dev *dev,
7212                                  struct timespec *timestamp,
7213                                  uint32_t flags __rte_unused)
7214 {
7215         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
7216         struct ixgbe_adapter *adapter = dev->data->dev_private;
7217         uint32_t tsync_rxctl;
7218         uint64_t rx_tstamp_cycles;
7219         uint64_t ns;
7220
7221         tsync_rxctl = IXGBE_READ_REG(hw, IXGBE_TSYNCRXCTL);
7222         if ((tsync_rxctl & IXGBE_TSYNCRXCTL_VALID) == 0)
7223                 return -EINVAL;
7224
7225         rx_tstamp_cycles = ixgbe_read_rx_tstamp_cyclecounter(dev);
7226         ns = rte_timecounter_update(&adapter->rx_tstamp_tc, rx_tstamp_cycles);
7227         *timestamp = rte_ns_to_timespec(ns);
7228
7229         return  0;
7230 }
7231
7232 static int
7233 ixgbe_timesync_read_tx_timestamp(struct rte_eth_dev *dev,
7234                                  struct timespec *timestamp)
7235 {
7236         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
7237         struct ixgbe_adapter *adapter = dev->data->dev_private;
7238         uint32_t tsync_txctl;
7239         uint64_t tx_tstamp_cycles;
7240         uint64_t ns;
7241
7242         tsync_txctl = IXGBE_READ_REG(hw, IXGBE_TSYNCTXCTL);
7243         if ((tsync_txctl & IXGBE_TSYNCTXCTL_VALID) == 0)
7244                 return -EINVAL;
7245
7246         tx_tstamp_cycles = ixgbe_read_tx_tstamp_cyclecounter(dev);
7247         ns = rte_timecounter_update(&adapter->tx_tstamp_tc, tx_tstamp_cycles);
7248         *timestamp = rte_ns_to_timespec(ns);
7249
7250         return 0;
7251 }
7252
7253 static int
7254 ixgbe_get_reg_length(struct rte_eth_dev *dev)
7255 {
7256         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
7257         int count = 0;
7258         int g_ind = 0;
7259         const struct reg_info *reg_group;
7260         const struct reg_info **reg_set = (hw->mac.type == ixgbe_mac_82598EB) ?
7261                                     ixgbe_regs_mac_82598EB : ixgbe_regs_others;
7262
7263         while ((reg_group = reg_set[g_ind++]))
7264                 count += ixgbe_regs_group_count(reg_group);
7265
7266         return count;
7267 }
7268
7269 static int
7270 ixgbevf_get_reg_length(struct rte_eth_dev *dev __rte_unused)
7271 {
7272         int count = 0;
7273         int g_ind = 0;
7274         const struct reg_info *reg_group;
7275
7276         while ((reg_group = ixgbevf_regs[g_ind++]))
7277                 count += ixgbe_regs_group_count(reg_group);
7278
7279         return count;
7280 }
7281
7282 static int
7283 ixgbe_get_regs(struct rte_eth_dev *dev,
7284               struct rte_dev_reg_info *regs)
7285 {
7286         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
7287         uint32_t *data = regs->data;
7288         int g_ind = 0;
7289         int count = 0;
7290         const struct reg_info *reg_group;
7291         const struct reg_info **reg_set = (hw->mac.type == ixgbe_mac_82598EB) ?
7292                                     ixgbe_regs_mac_82598EB : ixgbe_regs_others;
7293
7294         if (data == NULL) {
7295                 regs->length = ixgbe_get_reg_length(dev);
7296                 regs->width = sizeof(uint32_t);
7297                 return 0;
7298         }
7299
7300         /* Support only full register dump */
7301         if ((regs->length == 0) ||
7302             (regs->length == (uint32_t)ixgbe_get_reg_length(dev))) {
7303                 regs->version = hw->mac.type << 24 | hw->revision_id << 16 |
7304                         hw->device_id;
7305                 while ((reg_group = reg_set[g_ind++]))
7306                         count += ixgbe_read_regs_group(dev, &data[count],
7307                                 reg_group);
7308                 return 0;
7309         }
7310
7311         return -ENOTSUP;
7312 }
7313
7314 static int
7315 ixgbevf_get_regs(struct rte_eth_dev *dev,
7316                 struct rte_dev_reg_info *regs)
7317 {
7318         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
7319         uint32_t *data = regs->data;
7320         int g_ind = 0;
7321         int count = 0;
7322         const struct reg_info *reg_group;
7323
7324         if (data == NULL) {
7325                 regs->length = ixgbevf_get_reg_length(dev);
7326                 regs->width = sizeof(uint32_t);
7327                 return 0;
7328         }
7329
7330         /* Support only full register dump */
7331         if ((regs->length == 0) ||
7332             (regs->length == (uint32_t)ixgbevf_get_reg_length(dev))) {
7333                 regs->version = hw->mac.type << 24 | hw->revision_id << 16 |
7334                         hw->device_id;
7335                 while ((reg_group = ixgbevf_regs[g_ind++]))
7336                         count += ixgbe_read_regs_group(dev, &data[count],
7337                                                       reg_group);
7338                 return 0;
7339         }
7340
7341         return -ENOTSUP;
7342 }
7343
7344 static int
7345 ixgbe_get_eeprom_length(struct rte_eth_dev *dev)
7346 {
7347         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
7348
7349         /* Return unit is byte count */
7350         return hw->eeprom.word_size * 2;
7351 }
7352
7353 static int
7354 ixgbe_get_eeprom(struct rte_eth_dev *dev,
7355                 struct rte_dev_eeprom_info *in_eeprom)
7356 {
7357         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
7358         struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
7359         uint16_t *data = in_eeprom->data;
7360         int first, length;
7361
7362         first = in_eeprom->offset >> 1;
7363         length = in_eeprom->length >> 1;
7364         if ((first > hw->eeprom.word_size) ||
7365             ((first + length) > hw->eeprom.word_size))
7366                 return -EINVAL;
7367
7368         in_eeprom->magic = hw->vendor_id | (hw->device_id << 16);
7369
7370         return eeprom->ops.read_buffer(hw, first, length, data);
7371 }
7372
7373 static int
7374 ixgbe_set_eeprom(struct rte_eth_dev *dev,
7375                 struct rte_dev_eeprom_info *in_eeprom)
7376 {
7377         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
7378         struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
7379         uint16_t *data = in_eeprom->data;
7380         int first, length;
7381
7382         first = in_eeprom->offset >> 1;
7383         length = in_eeprom->length >> 1;
7384         if ((first > hw->eeprom.word_size) ||
7385             ((first + length) > hw->eeprom.word_size))
7386                 return -EINVAL;
7387
7388         in_eeprom->magic = hw->vendor_id | (hw->device_id << 16);
7389
7390         return eeprom->ops.write_buffer(hw,  first, length, data);
7391 }
7392
7393 static int
7394 ixgbe_get_module_info(struct rte_eth_dev *dev,
7395                       struct rte_eth_dev_module_info *modinfo)
7396 {
7397         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
7398         uint32_t status;
7399         uint8_t sff8472_rev, addr_mode;
7400         bool page_swap = false;
7401
7402         /* Check whether we support SFF-8472 or not */
7403         status = hw->phy.ops.read_i2c_eeprom(hw,
7404                                              IXGBE_SFF_SFF_8472_COMP,
7405                                              &sff8472_rev);
7406         if (status != 0)
7407                 return -EIO;
7408
7409         /* addressing mode is not supported */
7410         status = hw->phy.ops.read_i2c_eeprom(hw,
7411                                              IXGBE_SFF_SFF_8472_SWAP,
7412                                              &addr_mode);
7413         if (status != 0)
7414                 return -EIO;
7415
7416         if (addr_mode & IXGBE_SFF_ADDRESSING_MODE) {
7417                 PMD_DRV_LOG(ERR,
7418                             "Address change required to access page 0xA2, "
7419                             "but not supported. Please report the module "
7420                             "type to the driver maintainers.");
7421                 page_swap = true;
7422         }
7423
7424         if (sff8472_rev == IXGBE_SFF_SFF_8472_UNSUP || page_swap) {
7425                 /* We have a SFP, but it does not support SFF-8472 */
7426                 modinfo->type = RTE_ETH_MODULE_SFF_8079;
7427                 modinfo->eeprom_len = RTE_ETH_MODULE_SFF_8079_LEN;
7428         } else {
7429                 /* We have a SFP which supports a revision of SFF-8472. */
7430                 modinfo->type = RTE_ETH_MODULE_SFF_8472;
7431                 modinfo->eeprom_len = RTE_ETH_MODULE_SFF_8472_LEN;
7432         }
7433
7434         return 0;
7435 }
7436
7437 static int
7438 ixgbe_get_module_eeprom(struct rte_eth_dev *dev,
7439                         struct rte_dev_eeprom_info *info)
7440 {
7441         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
7442         uint32_t status = IXGBE_ERR_PHY_ADDR_INVALID;
7443         uint8_t databyte = 0xFF;
7444         uint8_t *data = info->data;
7445         uint32_t i = 0;
7446
7447         if (info->length == 0)
7448                 return -EINVAL;
7449
7450         for (i = info->offset; i < info->offset + info->length; i++) {
7451                 if (i < RTE_ETH_MODULE_SFF_8079_LEN)
7452                         status = hw->phy.ops.read_i2c_eeprom(hw, i, &databyte);
7453                 else
7454                         status = hw->phy.ops.read_i2c_sff8472(hw, i, &databyte);
7455
7456                 if (status != 0)
7457                         return -EIO;
7458
7459                 data[i - info->offset] = databyte;
7460         }
7461
7462         return 0;
7463 }
7464
7465 uint16_t
7466 ixgbe_reta_size_get(enum ixgbe_mac_type mac_type) {
7467         switch (mac_type) {
7468         case ixgbe_mac_X550:
7469         case ixgbe_mac_X550EM_x:
7470         case ixgbe_mac_X550EM_a:
7471                 return ETH_RSS_RETA_SIZE_512;
7472         case ixgbe_mac_X550_vf:
7473         case ixgbe_mac_X550EM_x_vf:
7474         case ixgbe_mac_X550EM_a_vf:
7475                 return ETH_RSS_RETA_SIZE_64;
7476         case ixgbe_mac_X540_vf:
7477         case ixgbe_mac_82599_vf:
7478                 return 0;
7479         default:
7480                 return ETH_RSS_RETA_SIZE_128;
7481         }
7482 }
7483
7484 uint32_t
7485 ixgbe_reta_reg_get(enum ixgbe_mac_type mac_type, uint16_t reta_idx) {
7486         switch (mac_type) {
7487         case ixgbe_mac_X550:
7488         case ixgbe_mac_X550EM_x:
7489         case ixgbe_mac_X550EM_a:
7490                 if (reta_idx < ETH_RSS_RETA_SIZE_128)
7491                         return IXGBE_RETA(reta_idx >> 2);
7492                 else
7493                         return IXGBE_ERETA((reta_idx - ETH_RSS_RETA_SIZE_128) >> 2);
7494         case ixgbe_mac_X550_vf:
7495         case ixgbe_mac_X550EM_x_vf:
7496         case ixgbe_mac_X550EM_a_vf:
7497                 return IXGBE_VFRETA(reta_idx >> 2);
7498         default:
7499                 return IXGBE_RETA(reta_idx >> 2);
7500         }
7501 }
7502
7503 uint32_t
7504 ixgbe_mrqc_reg_get(enum ixgbe_mac_type mac_type) {
7505         switch (mac_type) {
7506         case ixgbe_mac_X550_vf:
7507         case ixgbe_mac_X550EM_x_vf:
7508         case ixgbe_mac_X550EM_a_vf:
7509                 return IXGBE_VFMRQC;
7510         default:
7511                 return IXGBE_MRQC;
7512         }
7513 }
7514
7515 uint32_t
7516 ixgbe_rssrk_reg_get(enum ixgbe_mac_type mac_type, uint8_t i) {
7517         switch (mac_type) {
7518         case ixgbe_mac_X550_vf:
7519         case ixgbe_mac_X550EM_x_vf:
7520         case ixgbe_mac_X550EM_a_vf:
7521                 return IXGBE_VFRSSRK(i);
7522         default:
7523                 return IXGBE_RSSRK(i);
7524         }
7525 }
7526
7527 bool
7528 ixgbe_rss_update_sp(enum ixgbe_mac_type mac_type) {
7529         switch (mac_type) {
7530         case ixgbe_mac_82599_vf:
7531         case ixgbe_mac_X540_vf:
7532                 return 0;
7533         default:
7534                 return 1;
7535         }
7536 }
7537
7538 static int
7539 ixgbe_dev_get_dcb_info(struct rte_eth_dev *dev,
7540                         struct rte_eth_dcb_info *dcb_info)
7541 {
7542         struct ixgbe_dcb_config *dcb_config =
7543                         IXGBE_DEV_PRIVATE_TO_DCB_CFG(dev->data->dev_private);
7544         struct ixgbe_dcb_tc_config *tc;
7545         struct rte_eth_dcb_tc_queue_mapping *tc_queue;
7546         uint8_t nb_tcs;
7547         uint8_t i, j;
7548
7549         if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_DCB_FLAG)
7550                 dcb_info->nb_tcs = dcb_config->num_tcs.pg_tcs;
7551         else
7552                 dcb_info->nb_tcs = 1;
7553
7554         tc_queue = &dcb_info->tc_queue;
7555         nb_tcs = dcb_info->nb_tcs;
7556
7557         if (dcb_config->vt_mode) { /* vt is enabled*/
7558                 struct rte_eth_vmdq_dcb_conf *vmdq_rx_conf =
7559                                 &dev->data->dev_conf.rx_adv_conf.vmdq_dcb_conf;
7560                 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++)
7561                         dcb_info->prio_tc[i] = vmdq_rx_conf->dcb_tc[i];
7562                 if (RTE_ETH_DEV_SRIOV(dev).active > 0) {
7563                         for (j = 0; j < nb_tcs; j++) {
7564                                 tc_queue->tc_rxq[0][j].base = j;
7565                                 tc_queue->tc_rxq[0][j].nb_queue = 1;
7566                                 tc_queue->tc_txq[0][j].base = j;
7567                                 tc_queue->tc_txq[0][j].nb_queue = 1;
7568                         }
7569                 } else {
7570                         for (i = 0; i < vmdq_rx_conf->nb_queue_pools; i++) {
7571                                 for (j = 0; j < nb_tcs; j++) {
7572                                         tc_queue->tc_rxq[i][j].base =
7573                                                 i * nb_tcs + j;
7574                                         tc_queue->tc_rxq[i][j].nb_queue = 1;
7575                                         tc_queue->tc_txq[i][j].base =
7576                                                 i * nb_tcs + j;
7577                                         tc_queue->tc_txq[i][j].nb_queue = 1;
7578                                 }
7579                         }
7580                 }
7581         } else { /* vt is disabled*/
7582                 struct rte_eth_dcb_rx_conf *rx_conf =
7583                                 &dev->data->dev_conf.rx_adv_conf.dcb_rx_conf;
7584                 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++)
7585                         dcb_info->prio_tc[i] = rx_conf->dcb_tc[i];
7586                 if (dcb_info->nb_tcs == ETH_4_TCS) {
7587                         for (i = 0; i < dcb_info->nb_tcs; i++) {
7588                                 dcb_info->tc_queue.tc_rxq[0][i].base = i * 32;
7589                                 dcb_info->tc_queue.tc_rxq[0][i].nb_queue = 16;
7590                         }
7591                         dcb_info->tc_queue.tc_txq[0][0].base = 0;
7592                         dcb_info->tc_queue.tc_txq[0][1].base = 64;
7593                         dcb_info->tc_queue.tc_txq[0][2].base = 96;
7594                         dcb_info->tc_queue.tc_txq[0][3].base = 112;
7595                         dcb_info->tc_queue.tc_txq[0][0].nb_queue = 64;
7596                         dcb_info->tc_queue.tc_txq[0][1].nb_queue = 32;
7597                         dcb_info->tc_queue.tc_txq[0][2].nb_queue = 16;
7598                         dcb_info->tc_queue.tc_txq[0][3].nb_queue = 16;
7599                 } else if (dcb_info->nb_tcs == ETH_8_TCS) {
7600                         for (i = 0; i < dcb_info->nb_tcs; i++) {
7601                                 dcb_info->tc_queue.tc_rxq[0][i].base = i * 16;
7602                                 dcb_info->tc_queue.tc_rxq[0][i].nb_queue = 16;
7603                         }
7604                         dcb_info->tc_queue.tc_txq[0][0].base = 0;
7605                         dcb_info->tc_queue.tc_txq[0][1].base = 32;
7606                         dcb_info->tc_queue.tc_txq[0][2].base = 64;
7607                         dcb_info->tc_queue.tc_txq[0][3].base = 80;
7608                         dcb_info->tc_queue.tc_txq[0][4].base = 96;
7609                         dcb_info->tc_queue.tc_txq[0][5].base = 104;
7610                         dcb_info->tc_queue.tc_txq[0][6].base = 112;
7611                         dcb_info->tc_queue.tc_txq[0][7].base = 120;
7612                         dcb_info->tc_queue.tc_txq[0][0].nb_queue = 32;
7613                         dcb_info->tc_queue.tc_txq[0][1].nb_queue = 32;
7614                         dcb_info->tc_queue.tc_txq[0][2].nb_queue = 16;
7615                         dcb_info->tc_queue.tc_txq[0][3].nb_queue = 16;
7616                         dcb_info->tc_queue.tc_txq[0][4].nb_queue = 8;
7617                         dcb_info->tc_queue.tc_txq[0][5].nb_queue = 8;
7618                         dcb_info->tc_queue.tc_txq[0][6].nb_queue = 8;
7619                         dcb_info->tc_queue.tc_txq[0][7].nb_queue = 8;
7620                 }
7621         }
7622         for (i = 0; i < dcb_info->nb_tcs; i++) {
7623                 tc = &dcb_config->tc_config[i];
7624                 dcb_info->tc_bws[i] = tc->path[IXGBE_DCB_TX_CONFIG].bwg_percent;
7625         }
7626         return 0;
7627 }
7628
7629 /* Update e-tag ether type */
7630 static int
7631 ixgbe_update_e_tag_eth_type(struct ixgbe_hw *hw,
7632                             uint16_t ether_type)
7633 {
7634         uint32_t etag_etype;
7635
7636         if (hw->mac.type != ixgbe_mac_X550 &&
7637             hw->mac.type != ixgbe_mac_X550EM_x &&
7638             hw->mac.type != ixgbe_mac_X550EM_a) {
7639                 return -ENOTSUP;
7640         }
7641
7642         etag_etype = IXGBE_READ_REG(hw, IXGBE_ETAG_ETYPE);
7643         etag_etype &= ~IXGBE_ETAG_ETYPE_MASK;
7644         etag_etype |= ether_type;
7645         IXGBE_WRITE_REG(hw, IXGBE_ETAG_ETYPE, etag_etype);
7646         IXGBE_WRITE_FLUSH(hw);
7647
7648         return 0;
7649 }
7650
7651 /* Config l2 tunnel ether type */
7652 static int
7653 ixgbe_dev_l2_tunnel_eth_type_conf(struct rte_eth_dev *dev,
7654                                   struct rte_eth_l2_tunnel_conf *l2_tunnel)
7655 {
7656         int ret = 0;
7657         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
7658         struct ixgbe_l2_tn_info *l2_tn_info =
7659                 IXGBE_DEV_PRIVATE_TO_L2_TN_INFO(dev->data->dev_private);
7660
7661         if (l2_tunnel == NULL)
7662                 return -EINVAL;
7663
7664         switch (l2_tunnel->l2_tunnel_type) {
7665         case RTE_L2_TUNNEL_TYPE_E_TAG:
7666                 l2_tn_info->e_tag_ether_type = l2_tunnel->ether_type;
7667                 ret = ixgbe_update_e_tag_eth_type(hw, l2_tunnel->ether_type);
7668                 break;
7669         default:
7670                 PMD_DRV_LOG(ERR, "Invalid tunnel type");
7671                 ret = -EINVAL;
7672                 break;
7673         }
7674
7675         return ret;
7676 }
7677
7678 /* Enable e-tag tunnel */
7679 static int
7680 ixgbe_e_tag_enable(struct ixgbe_hw *hw)
7681 {
7682         uint32_t etag_etype;
7683
7684         if (hw->mac.type != ixgbe_mac_X550 &&
7685             hw->mac.type != ixgbe_mac_X550EM_x &&
7686             hw->mac.type != ixgbe_mac_X550EM_a) {
7687                 return -ENOTSUP;
7688         }
7689
7690         etag_etype = IXGBE_READ_REG(hw, IXGBE_ETAG_ETYPE);
7691         etag_etype |= IXGBE_ETAG_ETYPE_VALID;
7692         IXGBE_WRITE_REG(hw, IXGBE_ETAG_ETYPE, etag_etype);
7693         IXGBE_WRITE_FLUSH(hw);
7694
7695         return 0;
7696 }
7697
7698 /* Enable l2 tunnel */
7699 static int
7700 ixgbe_dev_l2_tunnel_enable(struct rte_eth_dev *dev,
7701                            enum rte_eth_tunnel_type l2_tunnel_type)
7702 {
7703         int ret = 0;
7704         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
7705         struct ixgbe_l2_tn_info *l2_tn_info =
7706                 IXGBE_DEV_PRIVATE_TO_L2_TN_INFO(dev->data->dev_private);
7707
7708         switch (l2_tunnel_type) {
7709         case RTE_L2_TUNNEL_TYPE_E_TAG:
7710                 l2_tn_info->e_tag_en = TRUE;
7711                 ret = ixgbe_e_tag_enable(hw);
7712                 break;
7713         default:
7714                 PMD_DRV_LOG(ERR, "Invalid tunnel type");
7715                 ret = -EINVAL;
7716                 break;
7717         }
7718
7719         return ret;
7720 }
7721
7722 /* Disable e-tag tunnel */
7723 static int
7724 ixgbe_e_tag_disable(struct ixgbe_hw *hw)
7725 {
7726         uint32_t etag_etype;
7727
7728         if (hw->mac.type != ixgbe_mac_X550 &&
7729             hw->mac.type != ixgbe_mac_X550EM_x &&
7730             hw->mac.type != ixgbe_mac_X550EM_a) {
7731                 return -ENOTSUP;
7732         }
7733
7734         etag_etype = IXGBE_READ_REG(hw, IXGBE_ETAG_ETYPE);
7735         etag_etype &= ~IXGBE_ETAG_ETYPE_VALID;
7736         IXGBE_WRITE_REG(hw, IXGBE_ETAG_ETYPE, etag_etype);
7737         IXGBE_WRITE_FLUSH(hw);
7738
7739         return 0;
7740 }
7741
7742 /* Disable l2 tunnel */
7743 static int
7744 ixgbe_dev_l2_tunnel_disable(struct rte_eth_dev *dev,
7745                             enum rte_eth_tunnel_type l2_tunnel_type)
7746 {
7747         int ret = 0;
7748         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
7749         struct ixgbe_l2_tn_info *l2_tn_info =
7750                 IXGBE_DEV_PRIVATE_TO_L2_TN_INFO(dev->data->dev_private);
7751
7752         switch (l2_tunnel_type) {
7753         case RTE_L2_TUNNEL_TYPE_E_TAG:
7754                 l2_tn_info->e_tag_en = FALSE;
7755                 ret = ixgbe_e_tag_disable(hw);
7756                 break;
7757         default:
7758                 PMD_DRV_LOG(ERR, "Invalid tunnel type");
7759                 ret = -EINVAL;
7760                 break;
7761         }
7762
7763         return ret;
7764 }
7765
7766 static int
7767 ixgbe_e_tag_filter_del(struct rte_eth_dev *dev,
7768                        struct rte_eth_l2_tunnel_conf *l2_tunnel)
7769 {
7770         int ret = 0;
7771         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
7772         uint32_t i, rar_entries;
7773         uint32_t rar_low, rar_high;
7774
7775         if (hw->mac.type != ixgbe_mac_X550 &&
7776             hw->mac.type != ixgbe_mac_X550EM_x &&
7777             hw->mac.type != ixgbe_mac_X550EM_a) {
7778                 return -ENOTSUP;
7779         }
7780
7781         rar_entries = ixgbe_get_num_rx_addrs(hw);
7782
7783         for (i = 1; i < rar_entries; i++) {
7784                 rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(i));
7785                 rar_low  = IXGBE_READ_REG(hw, IXGBE_RAL(i));
7786                 if ((rar_high & IXGBE_RAH_AV) &&
7787                     (rar_high & IXGBE_RAH_ADTYPE) &&
7788                     ((rar_low & IXGBE_RAL_ETAG_FILTER_MASK) ==
7789                      l2_tunnel->tunnel_id)) {
7790                         IXGBE_WRITE_REG(hw, IXGBE_RAL(i), 0);
7791                         IXGBE_WRITE_REG(hw, IXGBE_RAH(i), 0);
7792
7793                         ixgbe_clear_vmdq(hw, i, IXGBE_CLEAR_VMDQ_ALL);
7794
7795                         return ret;
7796                 }
7797         }
7798
7799         return ret;
7800 }
7801
7802 static int
7803 ixgbe_e_tag_filter_add(struct rte_eth_dev *dev,
7804                        struct rte_eth_l2_tunnel_conf *l2_tunnel)
7805 {
7806         int ret = 0;
7807         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
7808         uint32_t i, rar_entries;
7809         uint32_t rar_low, rar_high;
7810
7811         if (hw->mac.type != ixgbe_mac_X550 &&
7812             hw->mac.type != ixgbe_mac_X550EM_x &&
7813             hw->mac.type != ixgbe_mac_X550EM_a) {
7814                 return -ENOTSUP;
7815         }
7816
7817         /* One entry for one tunnel. Try to remove potential existing entry. */
7818         ixgbe_e_tag_filter_del(dev, l2_tunnel);
7819
7820         rar_entries = ixgbe_get_num_rx_addrs(hw);
7821
7822         for (i = 1; i < rar_entries; i++) {
7823                 rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(i));
7824                 if (rar_high & IXGBE_RAH_AV) {
7825                         continue;
7826                 } else {
7827                         ixgbe_set_vmdq(hw, i, l2_tunnel->pool);
7828                         rar_high = IXGBE_RAH_AV | IXGBE_RAH_ADTYPE;
7829                         rar_low = l2_tunnel->tunnel_id;
7830
7831                         IXGBE_WRITE_REG(hw, IXGBE_RAL(i), rar_low);
7832                         IXGBE_WRITE_REG(hw, IXGBE_RAH(i), rar_high);
7833
7834                         return ret;
7835                 }
7836         }
7837
7838         PMD_INIT_LOG(NOTICE, "The table of E-tag forwarding rule is full."
7839                      " Please remove a rule before adding a new one.");
7840         return -EINVAL;
7841 }
7842
7843 static inline struct ixgbe_l2_tn_filter *
7844 ixgbe_l2_tn_filter_lookup(struct ixgbe_l2_tn_info *l2_tn_info,
7845                           struct ixgbe_l2_tn_key *key)
7846 {
7847         int ret;
7848
7849         ret = rte_hash_lookup(l2_tn_info->hash_handle, (const void *)key);
7850         if (ret < 0)
7851                 return NULL;
7852
7853         return l2_tn_info->hash_map[ret];
7854 }
7855
7856 static inline int
7857 ixgbe_insert_l2_tn_filter(struct ixgbe_l2_tn_info *l2_tn_info,
7858                           struct ixgbe_l2_tn_filter *l2_tn_filter)
7859 {
7860         int ret;
7861
7862         ret = rte_hash_add_key(l2_tn_info->hash_handle,
7863                                &l2_tn_filter->key);
7864
7865         if (ret < 0) {
7866                 PMD_DRV_LOG(ERR,
7867                             "Failed to insert L2 tunnel filter"
7868                             " to hash table %d!",
7869                             ret);
7870                 return ret;
7871         }
7872
7873         l2_tn_info->hash_map[ret] = l2_tn_filter;
7874
7875         TAILQ_INSERT_TAIL(&l2_tn_info->l2_tn_list, l2_tn_filter, entries);
7876
7877         return 0;
7878 }
7879
7880 static inline int
7881 ixgbe_remove_l2_tn_filter(struct ixgbe_l2_tn_info *l2_tn_info,
7882                           struct ixgbe_l2_tn_key *key)
7883 {
7884         int ret;
7885         struct ixgbe_l2_tn_filter *l2_tn_filter;
7886
7887         ret = rte_hash_del_key(l2_tn_info->hash_handle, key);
7888
7889         if (ret < 0) {
7890                 PMD_DRV_LOG(ERR,
7891                             "No such L2 tunnel filter to delete %d!",
7892                             ret);
7893                 return ret;
7894         }
7895
7896         l2_tn_filter = l2_tn_info->hash_map[ret];
7897         l2_tn_info->hash_map[ret] = NULL;
7898
7899         TAILQ_REMOVE(&l2_tn_info->l2_tn_list, l2_tn_filter, entries);
7900         rte_free(l2_tn_filter);
7901
7902         return 0;
7903 }
7904
7905 /* Add l2 tunnel filter */
7906 int
7907 ixgbe_dev_l2_tunnel_filter_add(struct rte_eth_dev *dev,
7908                                struct rte_eth_l2_tunnel_conf *l2_tunnel,
7909                                bool restore)
7910 {
7911         int ret;
7912         struct ixgbe_l2_tn_info *l2_tn_info =
7913                 IXGBE_DEV_PRIVATE_TO_L2_TN_INFO(dev->data->dev_private);
7914         struct ixgbe_l2_tn_key key;
7915         struct ixgbe_l2_tn_filter *node;
7916
7917         if (!restore) {
7918                 key.l2_tn_type = l2_tunnel->l2_tunnel_type;
7919                 key.tn_id = l2_tunnel->tunnel_id;
7920
7921                 node = ixgbe_l2_tn_filter_lookup(l2_tn_info, &key);
7922
7923                 if (node) {
7924                         PMD_DRV_LOG(ERR,
7925                                     "The L2 tunnel filter already exists!");
7926                         return -EINVAL;
7927                 }
7928
7929                 node = rte_zmalloc("ixgbe_l2_tn",
7930                                    sizeof(struct ixgbe_l2_tn_filter),
7931                                    0);
7932                 if (!node)
7933                         return -ENOMEM;
7934
7935                 rte_memcpy(&node->key,
7936                                  &key,
7937                                  sizeof(struct ixgbe_l2_tn_key));
7938                 node->pool = l2_tunnel->pool;
7939                 ret = ixgbe_insert_l2_tn_filter(l2_tn_info, node);
7940                 if (ret < 0) {
7941                         rte_free(node);
7942                         return ret;
7943                 }
7944         }
7945
7946         switch (l2_tunnel->l2_tunnel_type) {
7947         case RTE_L2_TUNNEL_TYPE_E_TAG:
7948                 ret = ixgbe_e_tag_filter_add(dev, l2_tunnel);
7949                 break;
7950         default:
7951                 PMD_DRV_LOG(ERR, "Invalid tunnel type");
7952                 ret = -EINVAL;
7953                 break;
7954         }
7955
7956         if ((!restore) && (ret < 0))
7957                 (void)ixgbe_remove_l2_tn_filter(l2_tn_info, &key);
7958
7959         return ret;
7960 }
7961
7962 /* Delete l2 tunnel filter */
7963 int
7964 ixgbe_dev_l2_tunnel_filter_del(struct rte_eth_dev *dev,
7965                                struct rte_eth_l2_tunnel_conf *l2_tunnel)
7966 {
7967         int ret;
7968         struct ixgbe_l2_tn_info *l2_tn_info =
7969                 IXGBE_DEV_PRIVATE_TO_L2_TN_INFO(dev->data->dev_private);
7970         struct ixgbe_l2_tn_key key;
7971
7972         key.l2_tn_type = l2_tunnel->l2_tunnel_type;
7973         key.tn_id = l2_tunnel->tunnel_id;
7974         ret = ixgbe_remove_l2_tn_filter(l2_tn_info, &key);
7975         if (ret < 0)
7976                 return ret;
7977
7978         switch (l2_tunnel->l2_tunnel_type) {
7979         case RTE_L2_TUNNEL_TYPE_E_TAG:
7980                 ret = ixgbe_e_tag_filter_del(dev, l2_tunnel);
7981                 break;
7982         default:
7983                 PMD_DRV_LOG(ERR, "Invalid tunnel type");
7984                 ret = -EINVAL;
7985                 break;
7986         }
7987
7988         return ret;
7989 }
7990
7991 /**
7992  * ixgbe_dev_l2_tunnel_filter_handle - Handle operations for l2 tunnel filter.
7993  * @dev: pointer to rte_eth_dev structure
7994  * @filter_op:operation will be taken.
7995  * @arg: a pointer to specific structure corresponding to the filter_op
7996  */
7997 static int
7998 ixgbe_dev_l2_tunnel_filter_handle(struct rte_eth_dev *dev,
7999                                   enum rte_filter_op filter_op,
8000                                   void *arg)
8001 {
8002         int ret;
8003
8004         if (filter_op == RTE_ETH_FILTER_NOP)
8005                 return 0;
8006
8007         if (arg == NULL) {
8008                 PMD_DRV_LOG(ERR, "arg shouldn't be NULL for operation %u.",
8009                             filter_op);
8010                 return -EINVAL;
8011         }
8012
8013         switch (filter_op) {
8014         case RTE_ETH_FILTER_ADD:
8015                 ret = ixgbe_dev_l2_tunnel_filter_add
8016                         (dev,
8017                          (struct rte_eth_l2_tunnel_conf *)arg,
8018                          FALSE);
8019                 break;
8020         case RTE_ETH_FILTER_DELETE:
8021                 ret = ixgbe_dev_l2_tunnel_filter_del
8022                         (dev,
8023                          (struct rte_eth_l2_tunnel_conf *)arg);
8024                 break;
8025         default:
8026                 PMD_DRV_LOG(ERR, "unsupported operation %u.", filter_op);
8027                 ret = -EINVAL;
8028                 break;
8029         }
8030         return ret;
8031 }
8032
8033 static int
8034 ixgbe_e_tag_forwarding_en_dis(struct rte_eth_dev *dev, bool en)
8035 {
8036         int ret = 0;
8037         uint32_t ctrl;
8038         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
8039
8040         if (hw->mac.type != ixgbe_mac_X550 &&
8041             hw->mac.type != ixgbe_mac_X550EM_x &&
8042             hw->mac.type != ixgbe_mac_X550EM_a) {
8043                 return -ENOTSUP;
8044         }
8045
8046         ctrl = IXGBE_READ_REG(hw, IXGBE_VT_CTL);
8047         ctrl &= ~IXGBE_VT_CTL_POOLING_MODE_MASK;
8048         if (en)
8049                 ctrl |= IXGBE_VT_CTL_POOLING_MODE_ETAG;
8050         IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, ctrl);
8051
8052         return ret;
8053 }
8054
8055 /* Enable l2 tunnel forwarding */
8056 static int
8057 ixgbe_dev_l2_tunnel_forwarding_enable
8058         (struct rte_eth_dev *dev,
8059          enum rte_eth_tunnel_type l2_tunnel_type)
8060 {
8061         struct ixgbe_l2_tn_info *l2_tn_info =
8062                 IXGBE_DEV_PRIVATE_TO_L2_TN_INFO(dev->data->dev_private);
8063         int ret = 0;
8064
8065         switch (l2_tunnel_type) {
8066         case RTE_L2_TUNNEL_TYPE_E_TAG:
8067                 l2_tn_info->e_tag_fwd_en = TRUE;
8068                 ret = ixgbe_e_tag_forwarding_en_dis(dev, 1);
8069                 break;
8070         default:
8071                 PMD_DRV_LOG(ERR, "Invalid tunnel type");
8072                 ret = -EINVAL;
8073                 break;
8074         }
8075
8076         return ret;
8077 }
8078
8079 /* Disable l2 tunnel forwarding */
8080 static int
8081 ixgbe_dev_l2_tunnel_forwarding_disable
8082         (struct rte_eth_dev *dev,
8083          enum rte_eth_tunnel_type l2_tunnel_type)
8084 {
8085         struct ixgbe_l2_tn_info *l2_tn_info =
8086                 IXGBE_DEV_PRIVATE_TO_L2_TN_INFO(dev->data->dev_private);
8087         int ret = 0;
8088
8089         switch (l2_tunnel_type) {
8090         case RTE_L2_TUNNEL_TYPE_E_TAG:
8091                 l2_tn_info->e_tag_fwd_en = FALSE;
8092                 ret = ixgbe_e_tag_forwarding_en_dis(dev, 0);
8093                 break;
8094         default:
8095                 PMD_DRV_LOG(ERR, "Invalid tunnel type");
8096                 ret = -EINVAL;
8097                 break;
8098         }
8099
8100         return ret;
8101 }
8102
8103 static int
8104 ixgbe_e_tag_insertion_en_dis(struct rte_eth_dev *dev,
8105                              struct rte_eth_l2_tunnel_conf *l2_tunnel,
8106                              bool en)
8107 {
8108         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
8109         int ret = 0;
8110         uint32_t vmtir, vmvir;
8111         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
8112
8113         if (l2_tunnel->vf_id >= pci_dev->max_vfs) {
8114                 PMD_DRV_LOG(ERR,
8115                             "VF id %u should be less than %u",
8116                             l2_tunnel->vf_id,
8117                             pci_dev->max_vfs);
8118                 return -EINVAL;
8119         }
8120
8121         if (hw->mac.type != ixgbe_mac_X550 &&
8122             hw->mac.type != ixgbe_mac_X550EM_x &&
8123             hw->mac.type != ixgbe_mac_X550EM_a) {
8124                 return -ENOTSUP;
8125         }
8126
8127         if (en)
8128                 vmtir = l2_tunnel->tunnel_id;
8129         else
8130                 vmtir = 0;
8131
8132         IXGBE_WRITE_REG(hw, IXGBE_VMTIR(l2_tunnel->vf_id), vmtir);
8133
8134         vmvir = IXGBE_READ_REG(hw, IXGBE_VMVIR(l2_tunnel->vf_id));
8135         vmvir &= ~IXGBE_VMVIR_TAGA_MASK;
8136         if (en)
8137                 vmvir |= IXGBE_VMVIR_TAGA_ETAG_INSERT;
8138         IXGBE_WRITE_REG(hw, IXGBE_VMVIR(l2_tunnel->vf_id), vmvir);
8139
8140         return ret;
8141 }
8142
8143 /* Enable l2 tunnel tag insertion */
8144 static int
8145 ixgbe_dev_l2_tunnel_insertion_enable(struct rte_eth_dev *dev,
8146                                      struct rte_eth_l2_tunnel_conf *l2_tunnel)
8147 {
8148         int ret = 0;
8149
8150         switch (l2_tunnel->l2_tunnel_type) {
8151         case RTE_L2_TUNNEL_TYPE_E_TAG:
8152                 ret = ixgbe_e_tag_insertion_en_dis(dev, l2_tunnel, 1);
8153                 break;
8154         default:
8155                 PMD_DRV_LOG(ERR, "Invalid tunnel type");
8156                 ret = -EINVAL;
8157                 break;
8158         }
8159
8160         return ret;
8161 }
8162
8163 /* Disable l2 tunnel tag insertion */
8164 static int
8165 ixgbe_dev_l2_tunnel_insertion_disable
8166         (struct rte_eth_dev *dev,
8167          struct rte_eth_l2_tunnel_conf *l2_tunnel)
8168 {
8169         int ret = 0;
8170
8171         switch (l2_tunnel->l2_tunnel_type) {
8172         case RTE_L2_TUNNEL_TYPE_E_TAG:
8173                 ret = ixgbe_e_tag_insertion_en_dis(dev, l2_tunnel, 0);
8174                 break;
8175         default:
8176                 PMD_DRV_LOG(ERR, "Invalid tunnel type");
8177                 ret = -EINVAL;
8178                 break;
8179         }
8180
8181         return ret;
8182 }
8183
8184 static int
8185 ixgbe_e_tag_stripping_en_dis(struct rte_eth_dev *dev,
8186                              bool en)
8187 {
8188         int ret = 0;
8189         uint32_t qde;
8190         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
8191
8192         if (hw->mac.type != ixgbe_mac_X550 &&
8193             hw->mac.type != ixgbe_mac_X550EM_x &&
8194             hw->mac.type != ixgbe_mac_X550EM_a) {
8195                 return -ENOTSUP;
8196         }
8197
8198         qde = IXGBE_READ_REG(hw, IXGBE_QDE);
8199         if (en)
8200                 qde |= IXGBE_QDE_STRIP_TAG;
8201         else
8202                 qde &= ~IXGBE_QDE_STRIP_TAG;
8203         qde &= ~IXGBE_QDE_READ;
8204         qde |= IXGBE_QDE_WRITE;
8205         IXGBE_WRITE_REG(hw, IXGBE_QDE, qde);
8206
8207         return ret;
8208 }
8209
8210 /* Enable l2 tunnel tag stripping */
8211 static int
8212 ixgbe_dev_l2_tunnel_stripping_enable
8213         (struct rte_eth_dev *dev,
8214          enum rte_eth_tunnel_type l2_tunnel_type)
8215 {
8216         int ret = 0;
8217
8218         switch (l2_tunnel_type) {
8219         case RTE_L2_TUNNEL_TYPE_E_TAG:
8220                 ret = ixgbe_e_tag_stripping_en_dis(dev, 1);
8221                 break;
8222         default:
8223                 PMD_DRV_LOG(ERR, "Invalid tunnel type");
8224                 ret = -EINVAL;
8225                 break;
8226         }
8227
8228         return ret;
8229 }
8230
8231 /* Disable l2 tunnel tag stripping */
8232 static int
8233 ixgbe_dev_l2_tunnel_stripping_disable
8234         (struct rte_eth_dev *dev,
8235          enum rte_eth_tunnel_type l2_tunnel_type)
8236 {
8237         int ret = 0;
8238
8239         switch (l2_tunnel_type) {
8240         case RTE_L2_TUNNEL_TYPE_E_TAG:
8241                 ret = ixgbe_e_tag_stripping_en_dis(dev, 0);
8242                 break;
8243         default:
8244                 PMD_DRV_LOG(ERR, "Invalid tunnel type");
8245                 ret = -EINVAL;
8246                 break;
8247         }
8248
8249         return ret;
8250 }
8251
8252 /* Enable/disable l2 tunnel offload functions */
8253 static int
8254 ixgbe_dev_l2_tunnel_offload_set
8255         (struct rte_eth_dev *dev,
8256          struct rte_eth_l2_tunnel_conf *l2_tunnel,
8257          uint32_t mask,
8258          uint8_t en)
8259 {
8260         int ret = 0;
8261
8262         if (l2_tunnel == NULL)
8263                 return -EINVAL;
8264
8265         ret = -EINVAL;
8266         if (mask & ETH_L2_TUNNEL_ENABLE_MASK) {
8267                 if (en)
8268                         ret = ixgbe_dev_l2_tunnel_enable(
8269                                 dev,
8270                                 l2_tunnel->l2_tunnel_type);
8271                 else
8272                         ret = ixgbe_dev_l2_tunnel_disable(
8273                                 dev,
8274                                 l2_tunnel->l2_tunnel_type);
8275         }
8276
8277         if (mask & ETH_L2_TUNNEL_INSERTION_MASK) {
8278                 if (en)
8279                         ret = ixgbe_dev_l2_tunnel_insertion_enable(
8280                                 dev,
8281                                 l2_tunnel);
8282                 else
8283                         ret = ixgbe_dev_l2_tunnel_insertion_disable(
8284                                 dev,
8285                                 l2_tunnel);
8286         }
8287
8288         if (mask & ETH_L2_TUNNEL_STRIPPING_MASK) {
8289                 if (en)
8290                         ret = ixgbe_dev_l2_tunnel_stripping_enable(
8291                                 dev,
8292                                 l2_tunnel->l2_tunnel_type);
8293                 else
8294                         ret = ixgbe_dev_l2_tunnel_stripping_disable(
8295                                 dev,
8296                                 l2_tunnel->l2_tunnel_type);
8297         }
8298
8299         if (mask & ETH_L2_TUNNEL_FORWARDING_MASK) {
8300                 if (en)
8301                         ret = ixgbe_dev_l2_tunnel_forwarding_enable(
8302                                 dev,
8303                                 l2_tunnel->l2_tunnel_type);
8304                 else
8305                         ret = ixgbe_dev_l2_tunnel_forwarding_disable(
8306                                 dev,
8307                                 l2_tunnel->l2_tunnel_type);
8308         }
8309
8310         return ret;
8311 }
8312
8313 static int
8314 ixgbe_update_vxlan_port(struct ixgbe_hw *hw,
8315                         uint16_t port)
8316 {
8317         IXGBE_WRITE_REG(hw, IXGBE_VXLANCTRL, port);
8318         IXGBE_WRITE_FLUSH(hw);
8319
8320         return 0;
8321 }
8322
8323 /* There's only one register for VxLAN UDP port.
8324  * So, we cannot add several ports. Will update it.
8325  */
8326 static int
8327 ixgbe_add_vxlan_port(struct ixgbe_hw *hw,
8328                      uint16_t port)
8329 {
8330         if (port == 0) {
8331                 PMD_DRV_LOG(ERR, "Add VxLAN port 0 is not allowed.");
8332                 return -EINVAL;
8333         }
8334
8335         return ixgbe_update_vxlan_port(hw, port);
8336 }
8337
8338 /* We cannot delete the VxLAN port. For there's a register for VxLAN
8339  * UDP port, it must have a value.
8340  * So, will reset it to the original value 0.
8341  */
8342 static int
8343 ixgbe_del_vxlan_port(struct ixgbe_hw *hw,
8344                      uint16_t port)
8345 {
8346         uint16_t cur_port;
8347
8348         cur_port = (uint16_t)IXGBE_READ_REG(hw, IXGBE_VXLANCTRL);
8349
8350         if (cur_port != port) {
8351                 PMD_DRV_LOG(ERR, "Port %u does not exist.", port);
8352                 return -EINVAL;
8353         }
8354
8355         return ixgbe_update_vxlan_port(hw, 0);
8356 }
8357
8358 /* Add UDP tunneling port */
8359 static int
8360 ixgbe_dev_udp_tunnel_port_add(struct rte_eth_dev *dev,
8361                               struct rte_eth_udp_tunnel *udp_tunnel)
8362 {
8363         int ret = 0;
8364         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
8365
8366         if (hw->mac.type != ixgbe_mac_X550 &&
8367             hw->mac.type != ixgbe_mac_X550EM_x &&
8368             hw->mac.type != ixgbe_mac_X550EM_a) {
8369                 return -ENOTSUP;
8370         }
8371
8372         if (udp_tunnel == NULL)
8373                 return -EINVAL;
8374
8375         switch (udp_tunnel->prot_type) {
8376         case RTE_TUNNEL_TYPE_VXLAN:
8377                 ret = ixgbe_add_vxlan_port(hw, udp_tunnel->udp_port);
8378                 break;
8379
8380         case RTE_TUNNEL_TYPE_GENEVE:
8381         case RTE_TUNNEL_TYPE_TEREDO:
8382                 PMD_DRV_LOG(ERR, "Tunnel type is not supported now.");
8383                 ret = -EINVAL;
8384                 break;
8385
8386         default:
8387                 PMD_DRV_LOG(ERR, "Invalid tunnel type");
8388                 ret = -EINVAL;
8389                 break;
8390         }
8391
8392         return ret;
8393 }
8394
8395 /* Remove UDP tunneling port */
8396 static int
8397 ixgbe_dev_udp_tunnel_port_del(struct rte_eth_dev *dev,
8398                               struct rte_eth_udp_tunnel *udp_tunnel)
8399 {
8400         int ret = 0;
8401         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
8402
8403         if (hw->mac.type != ixgbe_mac_X550 &&
8404             hw->mac.type != ixgbe_mac_X550EM_x &&
8405             hw->mac.type != ixgbe_mac_X550EM_a) {
8406                 return -ENOTSUP;
8407         }
8408
8409         if (udp_tunnel == NULL)
8410                 return -EINVAL;
8411
8412         switch (udp_tunnel->prot_type) {
8413         case RTE_TUNNEL_TYPE_VXLAN:
8414                 ret = ixgbe_del_vxlan_port(hw, udp_tunnel->udp_port);
8415                 break;
8416         case RTE_TUNNEL_TYPE_GENEVE:
8417         case RTE_TUNNEL_TYPE_TEREDO:
8418                 PMD_DRV_LOG(ERR, "Tunnel type is not supported now.");
8419                 ret = -EINVAL;
8420                 break;
8421         default:
8422                 PMD_DRV_LOG(ERR, "Invalid tunnel type");
8423                 ret = -EINVAL;
8424                 break;
8425         }
8426
8427         return ret;
8428 }
8429
8430 static int
8431 ixgbevf_dev_promiscuous_enable(struct rte_eth_dev *dev)
8432 {
8433         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
8434         int ret;
8435
8436         switch (hw->mac.ops.update_xcast_mode(hw, IXGBEVF_XCAST_MODE_PROMISC)) {
8437         case IXGBE_SUCCESS:
8438                 ret = 0;
8439                 break;
8440         case IXGBE_ERR_FEATURE_NOT_SUPPORTED:
8441                 ret = -ENOTSUP;
8442                 break;
8443         default:
8444                 ret = -EAGAIN;
8445                 break;
8446         }
8447
8448         return ret;
8449 }
8450
8451 static int
8452 ixgbevf_dev_promiscuous_disable(struct rte_eth_dev *dev)
8453 {
8454         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
8455         int ret;
8456
8457         switch (hw->mac.ops.update_xcast_mode(hw, IXGBEVF_XCAST_MODE_NONE)) {
8458         case IXGBE_SUCCESS:
8459                 ret = 0;
8460                 break;
8461         case IXGBE_ERR_FEATURE_NOT_SUPPORTED:
8462                 ret = -ENOTSUP;
8463                 break;
8464         default:
8465                 ret = -EAGAIN;
8466                 break;
8467         }
8468
8469         return ret;
8470 }
8471
8472 static int
8473 ixgbevf_dev_allmulticast_enable(struct rte_eth_dev *dev)
8474 {
8475         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
8476         int ret;
8477         int mode = IXGBEVF_XCAST_MODE_ALLMULTI;
8478
8479         switch (hw->mac.ops.update_xcast_mode(hw, mode)) {
8480         case IXGBE_SUCCESS:
8481                 ret = 0;
8482                 break;
8483         case IXGBE_ERR_FEATURE_NOT_SUPPORTED:
8484                 ret = -ENOTSUP;
8485                 break;
8486         default:
8487                 ret = -EAGAIN;
8488                 break;
8489         }
8490
8491         return ret;
8492 }
8493
8494 static int
8495 ixgbevf_dev_allmulticast_disable(struct rte_eth_dev *dev)
8496 {
8497         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
8498         int ret;
8499
8500         switch (hw->mac.ops.update_xcast_mode(hw, IXGBEVF_XCAST_MODE_MULTI)) {
8501         case IXGBE_SUCCESS:
8502                 ret = 0;
8503                 break;
8504         case IXGBE_ERR_FEATURE_NOT_SUPPORTED:
8505                 ret = -ENOTSUP;
8506                 break;
8507         default:
8508                 ret = -EAGAIN;
8509                 break;
8510         }
8511
8512         return ret;
8513 }
8514
8515 static void ixgbevf_mbx_process(struct rte_eth_dev *dev)
8516 {
8517         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
8518         u32 in_msg = 0;
8519
8520         /* peek the message first */
8521         in_msg = IXGBE_READ_REG(hw, IXGBE_VFMBMEM);
8522
8523         /* PF reset VF event */
8524         if (in_msg == IXGBE_PF_CONTROL_MSG) {
8525                 /* dummy mbx read to ack pf */
8526                 if (ixgbe_read_mbx(hw, &in_msg, 1, 0))
8527                         return;
8528                 _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_RESET,
8529                                               NULL);
8530         }
8531 }
8532
8533 static int
8534 ixgbevf_dev_interrupt_get_status(struct rte_eth_dev *dev)
8535 {
8536         uint32_t eicr;
8537         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
8538         struct ixgbe_interrupt *intr =
8539                 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
8540         ixgbevf_intr_disable(dev);
8541
8542         /* read-on-clear nic registers here */
8543         eicr = IXGBE_READ_REG(hw, IXGBE_VTEICR);
8544         intr->flags = 0;
8545
8546         /* only one misc vector supported - mailbox */
8547         eicr &= IXGBE_VTEICR_MASK;
8548         if (eicr == IXGBE_MISC_VEC_ID)
8549                 intr->flags |= IXGBE_FLAG_MAILBOX;
8550
8551         return 0;
8552 }
8553
8554 static int
8555 ixgbevf_dev_interrupt_action(struct rte_eth_dev *dev)
8556 {
8557         struct ixgbe_interrupt *intr =
8558                 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
8559
8560         if (intr->flags & IXGBE_FLAG_MAILBOX) {
8561                 ixgbevf_mbx_process(dev);
8562                 intr->flags &= ~IXGBE_FLAG_MAILBOX;
8563         }
8564
8565         ixgbevf_intr_enable(dev);
8566
8567         return 0;
8568 }
8569
8570 static void
8571 ixgbevf_dev_interrupt_handler(void *param)
8572 {
8573         struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
8574
8575         ixgbevf_dev_interrupt_get_status(dev);
8576         ixgbevf_dev_interrupt_action(dev);
8577 }
8578
8579 /**
8580  *  ixgbe_disable_sec_tx_path_generic - Stops the transmit data path
8581  *  @hw: pointer to hardware structure
8582  *
8583  *  Stops the transmit data path and waits for the HW to internally empty
8584  *  the Tx security block
8585  **/
8586 int ixgbe_disable_sec_tx_path_generic(struct ixgbe_hw *hw)
8587 {
8588 #define IXGBE_MAX_SECTX_POLL 40
8589
8590         int i;
8591         int sectxreg;
8592
8593         sectxreg = IXGBE_READ_REG(hw, IXGBE_SECTXCTRL);
8594         sectxreg |= IXGBE_SECTXCTRL_TX_DIS;
8595         IXGBE_WRITE_REG(hw, IXGBE_SECTXCTRL, sectxreg);
8596         for (i = 0; i < IXGBE_MAX_SECTX_POLL; i++) {
8597                 sectxreg = IXGBE_READ_REG(hw, IXGBE_SECTXSTAT);
8598                 if (sectxreg & IXGBE_SECTXSTAT_SECTX_RDY)
8599                         break;
8600                 /* Use interrupt-safe sleep just in case */
8601                 usec_delay(1000);
8602         }
8603
8604         /* For informational purposes only */
8605         if (i >= IXGBE_MAX_SECTX_POLL)
8606                 PMD_DRV_LOG(DEBUG, "Tx unit being enabled before security "
8607                          "path fully disabled.  Continuing with init.");
8608
8609         return IXGBE_SUCCESS;
8610 }
8611
8612 /**
8613  *  ixgbe_enable_sec_tx_path_generic - Enables the transmit data path
8614  *  @hw: pointer to hardware structure
8615  *
8616  *  Enables the transmit data path.
8617  **/
8618 int ixgbe_enable_sec_tx_path_generic(struct ixgbe_hw *hw)
8619 {
8620         uint32_t sectxreg;
8621
8622         sectxreg = IXGBE_READ_REG(hw, IXGBE_SECTXCTRL);
8623         sectxreg &= ~IXGBE_SECTXCTRL_TX_DIS;
8624         IXGBE_WRITE_REG(hw, IXGBE_SECTXCTRL, sectxreg);
8625         IXGBE_WRITE_FLUSH(hw);
8626
8627         return IXGBE_SUCCESS;
8628 }
8629
8630 /* restore n-tuple filter */
8631 static inline void
8632 ixgbe_ntuple_filter_restore(struct rte_eth_dev *dev)
8633 {
8634         struct ixgbe_filter_info *filter_info =
8635                 IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
8636         struct ixgbe_5tuple_filter *node;
8637
8638         TAILQ_FOREACH(node, &filter_info->fivetuple_list, entries) {
8639                 ixgbe_inject_5tuple_filter(dev, node);
8640         }
8641 }
8642
8643 /* restore ethernet type filter */
8644 static inline void
8645 ixgbe_ethertype_filter_restore(struct rte_eth_dev *dev)
8646 {
8647         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
8648         struct ixgbe_filter_info *filter_info =
8649                 IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
8650         int i;
8651
8652         for (i = 0; i < IXGBE_MAX_ETQF_FILTERS; i++) {
8653                 if (filter_info->ethertype_mask & (1 << i)) {
8654                         IXGBE_WRITE_REG(hw, IXGBE_ETQF(i),
8655                                         filter_info->ethertype_filters[i].etqf);
8656                         IXGBE_WRITE_REG(hw, IXGBE_ETQS(i),
8657                                         filter_info->ethertype_filters[i].etqs);
8658                         IXGBE_WRITE_FLUSH(hw);
8659                 }
8660         }
8661 }
8662
8663 /* restore SYN filter */
8664 static inline void
8665 ixgbe_syn_filter_restore(struct rte_eth_dev *dev)
8666 {
8667         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
8668         struct ixgbe_filter_info *filter_info =
8669                 IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
8670         uint32_t synqf;
8671
8672         synqf = filter_info->syn_info;
8673
8674         if (synqf & IXGBE_SYN_FILTER_ENABLE) {
8675                 IXGBE_WRITE_REG(hw, IXGBE_SYNQF, synqf);
8676                 IXGBE_WRITE_FLUSH(hw);
8677         }
8678 }
8679
8680 /* restore L2 tunnel filter */
8681 static inline void
8682 ixgbe_l2_tn_filter_restore(struct rte_eth_dev *dev)
8683 {
8684         struct ixgbe_l2_tn_info *l2_tn_info =
8685                 IXGBE_DEV_PRIVATE_TO_L2_TN_INFO(dev->data->dev_private);
8686         struct ixgbe_l2_tn_filter *node;
8687         struct rte_eth_l2_tunnel_conf l2_tn_conf;
8688
8689         TAILQ_FOREACH(node, &l2_tn_info->l2_tn_list, entries) {
8690                 l2_tn_conf.l2_tunnel_type = node->key.l2_tn_type;
8691                 l2_tn_conf.tunnel_id      = node->key.tn_id;
8692                 l2_tn_conf.pool           = node->pool;
8693                 (void)ixgbe_dev_l2_tunnel_filter_add(dev, &l2_tn_conf, TRUE);
8694         }
8695 }
8696
8697 /* restore rss filter */
8698 static inline void
8699 ixgbe_rss_filter_restore(struct rte_eth_dev *dev)
8700 {
8701         struct ixgbe_filter_info *filter_info =
8702                 IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
8703
8704         if (filter_info->rss_info.conf.queue_num)
8705                 ixgbe_config_rss_filter(dev,
8706                         &filter_info->rss_info, TRUE);
8707 }
8708
8709 static int
8710 ixgbe_filter_restore(struct rte_eth_dev *dev)
8711 {
8712         ixgbe_ntuple_filter_restore(dev);
8713         ixgbe_ethertype_filter_restore(dev);
8714         ixgbe_syn_filter_restore(dev);
8715         ixgbe_fdir_filter_restore(dev);
8716         ixgbe_l2_tn_filter_restore(dev);
8717         ixgbe_rss_filter_restore(dev);
8718
8719         return 0;
8720 }
8721
8722 static void
8723 ixgbe_l2_tunnel_conf(struct rte_eth_dev *dev)
8724 {
8725         struct ixgbe_l2_tn_info *l2_tn_info =
8726                 IXGBE_DEV_PRIVATE_TO_L2_TN_INFO(dev->data->dev_private);
8727         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
8728
8729         if (l2_tn_info->e_tag_en)
8730                 (void)ixgbe_e_tag_enable(hw);
8731
8732         if (l2_tn_info->e_tag_fwd_en)
8733                 (void)ixgbe_e_tag_forwarding_en_dis(dev, 1);
8734
8735         (void)ixgbe_update_e_tag_eth_type(hw, l2_tn_info->e_tag_ether_type);
8736 }
8737
8738 /* remove all the n-tuple filters */
8739 void
8740 ixgbe_clear_all_ntuple_filter(struct rte_eth_dev *dev)
8741 {
8742         struct ixgbe_filter_info *filter_info =
8743                 IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
8744         struct ixgbe_5tuple_filter *p_5tuple;
8745
8746         while ((p_5tuple = TAILQ_FIRST(&filter_info->fivetuple_list)))
8747                 ixgbe_remove_5tuple_filter(dev, p_5tuple);
8748 }
8749
8750 /* remove all the ether type filters */
8751 void
8752 ixgbe_clear_all_ethertype_filter(struct rte_eth_dev *dev)
8753 {
8754         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
8755         struct ixgbe_filter_info *filter_info =
8756                 IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
8757         int i;
8758
8759         for (i = 0; i < IXGBE_MAX_ETQF_FILTERS; i++) {
8760                 if (filter_info->ethertype_mask & (1 << i) &&
8761                     !filter_info->ethertype_filters[i].conf) {
8762                         (void)ixgbe_ethertype_filter_remove(filter_info,
8763                                                             (uint8_t)i);
8764                         IXGBE_WRITE_REG(hw, IXGBE_ETQF(i), 0);
8765                         IXGBE_WRITE_REG(hw, IXGBE_ETQS(i), 0);
8766                         IXGBE_WRITE_FLUSH(hw);
8767                 }
8768         }
8769 }
8770
8771 /* remove the SYN filter */
8772 void
8773 ixgbe_clear_syn_filter(struct rte_eth_dev *dev)
8774 {
8775         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
8776         struct ixgbe_filter_info *filter_info =
8777                 IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
8778
8779         if (filter_info->syn_info & IXGBE_SYN_FILTER_ENABLE) {
8780                 filter_info->syn_info = 0;
8781
8782                 IXGBE_WRITE_REG(hw, IXGBE_SYNQF, 0);
8783                 IXGBE_WRITE_FLUSH(hw);
8784         }
8785 }
8786
8787 /* remove all the L2 tunnel filters */
8788 int
8789 ixgbe_clear_all_l2_tn_filter(struct rte_eth_dev *dev)
8790 {
8791         struct ixgbe_l2_tn_info *l2_tn_info =
8792                 IXGBE_DEV_PRIVATE_TO_L2_TN_INFO(dev->data->dev_private);
8793         struct ixgbe_l2_tn_filter *l2_tn_filter;
8794         struct rte_eth_l2_tunnel_conf l2_tn_conf;
8795         int ret = 0;
8796
8797         while ((l2_tn_filter = TAILQ_FIRST(&l2_tn_info->l2_tn_list))) {
8798                 l2_tn_conf.l2_tunnel_type = l2_tn_filter->key.l2_tn_type;
8799                 l2_tn_conf.tunnel_id      = l2_tn_filter->key.tn_id;
8800                 l2_tn_conf.pool           = l2_tn_filter->pool;
8801                 ret = ixgbe_dev_l2_tunnel_filter_del(dev, &l2_tn_conf);
8802                 if (ret < 0)
8803                         return ret;
8804         }
8805
8806         return 0;
8807 }
8808
8809 RTE_PMD_REGISTER_PCI(net_ixgbe, rte_ixgbe_pmd);
8810 RTE_PMD_REGISTER_PCI_TABLE(net_ixgbe, pci_id_ixgbe_map);
8811 RTE_PMD_REGISTER_KMOD_DEP(net_ixgbe, "* igb_uio | uio_pci_generic | vfio-pci");
8812 RTE_PMD_REGISTER_PCI(net_ixgbe_vf, rte_ixgbevf_pmd);
8813 RTE_PMD_REGISTER_PCI_TABLE(net_ixgbe_vf, pci_id_ixgbevf_map);
8814 RTE_PMD_REGISTER_KMOD_DEP(net_ixgbe_vf, "* igb_uio | vfio-pci");
8815 RTE_PMD_REGISTER_PARAM_STRING(net_ixgbe_vf,
8816                               IXGBEVF_DEVARG_PFLINK_FULLCHK "=<0|1>");
8817
8818 RTE_INIT(ixgbe_init_log)
8819 {
8820         ixgbe_logtype_init = rte_log_register("pmd.net.ixgbe.init");
8821         if (ixgbe_logtype_init >= 0)
8822                 rte_log_set_level(ixgbe_logtype_init, RTE_LOG_NOTICE);
8823         ixgbe_logtype_driver = rte_log_register("pmd.net.ixgbe.driver");
8824         if (ixgbe_logtype_driver >= 0)
8825                 rte_log_set_level(ixgbe_logtype_driver, RTE_LOG_NOTICE);
8826 }