b985585ee9e25c6095ae5a27d02b48e4b51dd8a2
[dpdk.git] / drivers / net / ixgbe / ixgbe_ethdev.c
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright(c) 2010-2017 Intel Corporation. All rights reserved.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33
34 #include <sys/queue.h>
35 #include <stdio.h>
36 #include <errno.h>
37 #include <stdint.h>
38 #include <string.h>
39 #include <unistd.h>
40 #include <stdarg.h>
41 #include <inttypes.h>
42 #include <netinet/in.h>
43 #include <rte_byteorder.h>
44 #include <rte_common.h>
45 #include <rte_cycles.h>
46
47 #include <rte_interrupts.h>
48 #include <rte_log.h>
49 #include <rte_debug.h>
50 #include <rte_pci.h>
51 #include <rte_bus_pci.h>
52 #include <rte_atomic.h>
53 #include <rte_branch_prediction.h>
54 #include <rte_memory.h>
55 #include <rte_memzone.h>
56 #include <rte_eal.h>
57 #include <rte_alarm.h>
58 #include <rte_ether.h>
59 #include <rte_ethdev.h>
60 #include <rte_ethdev_pci.h>
61 #include <rte_malloc.h>
62 #include <rte_random.h>
63 #include <rte_dev.h>
64 #include <rte_hash_crc.h>
65 #ifdef RTE_LIBRTE_SECURITY
66 #include <rte_security_driver.h>
67 #endif
68
69 #include "ixgbe_logs.h"
70 #include "base/ixgbe_api.h"
71 #include "base/ixgbe_vf.h"
72 #include "base/ixgbe_common.h"
73 #include "ixgbe_ethdev.h"
74 #include "ixgbe_bypass.h"
75 #include "ixgbe_rxtx.h"
76 #include "base/ixgbe_type.h"
77 #include "base/ixgbe_phy.h"
78 #include "ixgbe_regs.h"
79
80 /*
81  * High threshold controlling when to start sending XOFF frames. Must be at
82  * least 8 bytes less than receive packet buffer size. This value is in units
83  * of 1024 bytes.
84  */
85 #define IXGBE_FC_HI    0x80
86
87 /*
88  * Low threshold controlling when to start sending XON frames. This value is
89  * in units of 1024 bytes.
90  */
91 #define IXGBE_FC_LO    0x40
92
93 /* Default minimum inter-interrupt interval for EITR configuration */
94 #define IXGBE_MIN_INTER_INTERRUPT_INTERVAL_DEFAULT    0x79E
95
96 /* Timer value included in XOFF frames. */
97 #define IXGBE_FC_PAUSE 0x680
98
99 #define IXGBE_LINK_DOWN_CHECK_TIMEOUT 4000 /* ms */
100 #define IXGBE_LINK_UP_CHECK_TIMEOUT   1000 /* ms */
101 #define IXGBE_VMDQ_NUM_UC_MAC         4096 /* Maximum nb. of UC MAC addr. */
102
103 #define IXGBE_MMW_SIZE_DEFAULT        0x4
104 #define IXGBE_MMW_SIZE_JUMBO_FRAME    0x14
105 #define IXGBE_MAX_RING_DESC           4096 /* replicate define from rxtx */
106
107 /*
108  *  Default values for RX/TX configuration
109  */
110 #define IXGBE_DEFAULT_RX_FREE_THRESH  32
111 #define IXGBE_DEFAULT_RX_PTHRESH      8
112 #define IXGBE_DEFAULT_RX_HTHRESH      8
113 #define IXGBE_DEFAULT_RX_WTHRESH      0
114
115 #define IXGBE_DEFAULT_TX_FREE_THRESH  32
116 #define IXGBE_DEFAULT_TX_PTHRESH      32
117 #define IXGBE_DEFAULT_TX_HTHRESH      0
118 #define IXGBE_DEFAULT_TX_WTHRESH      0
119 #define IXGBE_DEFAULT_TX_RSBIT_THRESH 32
120
121 /* Bit shift and mask */
122 #define IXGBE_4_BIT_WIDTH  (CHAR_BIT / 2)
123 #define IXGBE_4_BIT_MASK   RTE_LEN2MASK(IXGBE_4_BIT_WIDTH, uint8_t)
124 #define IXGBE_8_BIT_WIDTH  CHAR_BIT
125 #define IXGBE_8_BIT_MASK   UINT8_MAX
126
127 #define IXGBEVF_PMD_NAME "rte_ixgbevf_pmd" /* PMD name */
128
129 #define IXGBE_QUEUE_STAT_COUNTERS (sizeof(hw_stats->qprc) / sizeof(hw_stats->qprc[0]))
130
131 #define IXGBE_HKEY_MAX_INDEX 10
132
133 /* Additional timesync values. */
134 #define NSEC_PER_SEC             1000000000L
135 #define IXGBE_INCVAL_10GB        0x66666666
136 #define IXGBE_INCVAL_1GB         0x40000000
137 #define IXGBE_INCVAL_100         0x50000000
138 #define IXGBE_INCVAL_SHIFT_10GB  28
139 #define IXGBE_INCVAL_SHIFT_1GB   24
140 #define IXGBE_INCVAL_SHIFT_100   21
141 #define IXGBE_INCVAL_SHIFT_82599 7
142 #define IXGBE_INCPER_SHIFT_82599 24
143
144 #define IXGBE_CYCLECOUNTER_MASK   0xffffffffffffffffULL
145
146 #define IXGBE_VT_CTL_POOLING_MODE_MASK         0x00030000
147 #define IXGBE_VT_CTL_POOLING_MODE_ETAG         0x00010000
148 #define DEFAULT_ETAG_ETYPE                     0x893f
149 #define IXGBE_ETAG_ETYPE                       0x00005084
150 #define IXGBE_ETAG_ETYPE_MASK                  0x0000ffff
151 #define IXGBE_ETAG_ETYPE_VALID                 0x80000000
152 #define IXGBE_RAH_ADTYPE                       0x40000000
153 #define IXGBE_RAL_ETAG_FILTER_MASK             0x00003fff
154 #define IXGBE_VMVIR_TAGA_MASK                  0x18000000
155 #define IXGBE_VMVIR_TAGA_ETAG_INSERT           0x08000000
156 #define IXGBE_VMTIR(_i) (0x00017000 + ((_i) * 4)) /* 64 of these (0-63) */
157 #define IXGBE_QDE_STRIP_TAG                    0x00000004
158 #define IXGBE_VTEICR_MASK                      0x07
159
160 #define IXGBE_EXVET_VET_EXT_SHIFT              16
161 #define IXGBE_DMATXCTL_VT_MASK                 0xFFFF0000
162
163 static int eth_ixgbe_dev_init(struct rte_eth_dev *eth_dev);
164 static int eth_ixgbe_dev_uninit(struct rte_eth_dev *eth_dev);
165 static int ixgbe_fdir_filter_init(struct rte_eth_dev *eth_dev);
166 static int ixgbe_fdir_filter_uninit(struct rte_eth_dev *eth_dev);
167 static int ixgbe_l2_tn_filter_init(struct rte_eth_dev *eth_dev);
168 static int ixgbe_l2_tn_filter_uninit(struct rte_eth_dev *eth_dev);
169 static int ixgbe_ntuple_filter_uninit(struct rte_eth_dev *eth_dev);
170 static int  ixgbe_dev_configure(struct rte_eth_dev *dev);
171 static int  ixgbe_dev_start(struct rte_eth_dev *dev);
172 static void ixgbe_dev_stop(struct rte_eth_dev *dev);
173 static int  ixgbe_dev_set_link_up(struct rte_eth_dev *dev);
174 static int  ixgbe_dev_set_link_down(struct rte_eth_dev *dev);
175 static void ixgbe_dev_close(struct rte_eth_dev *dev);
176 static int  ixgbe_dev_reset(struct rte_eth_dev *dev);
177 static void ixgbe_dev_promiscuous_enable(struct rte_eth_dev *dev);
178 static void ixgbe_dev_promiscuous_disable(struct rte_eth_dev *dev);
179 static void ixgbe_dev_allmulticast_enable(struct rte_eth_dev *dev);
180 static void ixgbe_dev_allmulticast_disable(struct rte_eth_dev *dev);
181 static int ixgbe_dev_link_update(struct rte_eth_dev *dev,
182                                 int wait_to_complete);
183 static int ixgbe_dev_stats_get(struct rte_eth_dev *dev,
184                                 struct rte_eth_stats *stats);
185 static int ixgbe_dev_xstats_get(struct rte_eth_dev *dev,
186                                 struct rte_eth_xstat *xstats, unsigned n);
187 static int ixgbevf_dev_xstats_get(struct rte_eth_dev *dev,
188                                   struct rte_eth_xstat *xstats, unsigned n);
189 static int
190 ixgbe_dev_xstats_get_by_id(struct rte_eth_dev *dev, const uint64_t *ids,
191                 uint64_t *values, unsigned int n);
192 static void ixgbe_dev_stats_reset(struct rte_eth_dev *dev);
193 static void ixgbe_dev_xstats_reset(struct rte_eth_dev *dev);
194 static int ixgbe_dev_xstats_get_names(struct rte_eth_dev *dev,
195         struct rte_eth_xstat_name *xstats_names,
196         unsigned int size);
197 static int ixgbevf_dev_xstats_get_names(struct rte_eth_dev *dev,
198         struct rte_eth_xstat_name *xstats_names, unsigned limit);
199 static int ixgbe_dev_xstats_get_names_by_id(
200         struct rte_eth_dev *dev,
201         struct rte_eth_xstat_name *xstats_names,
202         const uint64_t *ids,
203         unsigned int limit);
204 static int ixgbe_dev_queue_stats_mapping_set(struct rte_eth_dev *eth_dev,
205                                              uint16_t queue_id,
206                                              uint8_t stat_idx,
207                                              uint8_t is_rx);
208 static int ixgbe_fw_version_get(struct rte_eth_dev *dev, char *fw_version,
209                                  size_t fw_size);
210 static void ixgbe_dev_info_get(struct rte_eth_dev *dev,
211                                struct rte_eth_dev_info *dev_info);
212 static const uint32_t *ixgbe_dev_supported_ptypes_get(struct rte_eth_dev *dev);
213 static void ixgbevf_dev_info_get(struct rte_eth_dev *dev,
214                                  struct rte_eth_dev_info *dev_info);
215 static int ixgbe_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu);
216
217 static int ixgbe_vlan_filter_set(struct rte_eth_dev *dev,
218                 uint16_t vlan_id, int on);
219 static int ixgbe_vlan_tpid_set(struct rte_eth_dev *dev,
220                                enum rte_vlan_type vlan_type,
221                                uint16_t tpid_id);
222 static void ixgbe_vlan_hw_strip_bitmap_set(struct rte_eth_dev *dev,
223                 uint16_t queue, bool on);
224 static void ixgbe_vlan_strip_queue_set(struct rte_eth_dev *dev, uint16_t queue,
225                 int on);
226 static int ixgbe_vlan_offload_set(struct rte_eth_dev *dev, int mask);
227 static void ixgbe_vlan_hw_strip_enable(struct rte_eth_dev *dev, uint16_t queue);
228 static void ixgbe_vlan_hw_strip_disable(struct rte_eth_dev *dev, uint16_t queue);
229 static void ixgbe_vlan_hw_extend_enable(struct rte_eth_dev *dev);
230 static void ixgbe_vlan_hw_extend_disable(struct rte_eth_dev *dev);
231
232 static int ixgbe_dev_led_on(struct rte_eth_dev *dev);
233 static int ixgbe_dev_led_off(struct rte_eth_dev *dev);
234 static int ixgbe_flow_ctrl_get(struct rte_eth_dev *dev,
235                                struct rte_eth_fc_conf *fc_conf);
236 static int ixgbe_flow_ctrl_set(struct rte_eth_dev *dev,
237                                struct rte_eth_fc_conf *fc_conf);
238 static int ixgbe_priority_flow_ctrl_set(struct rte_eth_dev *dev,
239                 struct rte_eth_pfc_conf *pfc_conf);
240 static int ixgbe_dev_rss_reta_update(struct rte_eth_dev *dev,
241                         struct rte_eth_rss_reta_entry64 *reta_conf,
242                         uint16_t reta_size);
243 static int ixgbe_dev_rss_reta_query(struct rte_eth_dev *dev,
244                         struct rte_eth_rss_reta_entry64 *reta_conf,
245                         uint16_t reta_size);
246 static void ixgbe_dev_link_status_print(struct rte_eth_dev *dev);
247 static int ixgbe_dev_lsc_interrupt_setup(struct rte_eth_dev *dev, uint8_t on);
248 static int ixgbe_dev_macsec_interrupt_setup(struct rte_eth_dev *dev);
249 static int ixgbe_dev_rxq_interrupt_setup(struct rte_eth_dev *dev);
250 static int ixgbe_dev_interrupt_get_status(struct rte_eth_dev *dev);
251 static int ixgbe_dev_interrupt_action(struct rte_eth_dev *dev,
252                                       struct rte_intr_handle *handle);
253 static void ixgbe_dev_interrupt_handler(void *param);
254 static void ixgbe_dev_interrupt_delayed_handler(void *param);
255 static int ixgbe_add_rar(struct rte_eth_dev *dev, struct ether_addr *mac_addr,
256                          uint32_t index, uint32_t pool);
257 static void ixgbe_remove_rar(struct rte_eth_dev *dev, uint32_t index);
258 static void ixgbe_set_default_mac_addr(struct rte_eth_dev *dev,
259                                            struct ether_addr *mac_addr);
260 static void ixgbe_dcb_init(struct ixgbe_hw *hw, struct ixgbe_dcb_config *dcb_config);
261 static bool is_device_supported(struct rte_eth_dev *dev,
262                                 struct rte_pci_driver *drv);
263
264 /* For Virtual Function support */
265 static int eth_ixgbevf_dev_init(struct rte_eth_dev *eth_dev);
266 static int eth_ixgbevf_dev_uninit(struct rte_eth_dev *eth_dev);
267 static int  ixgbevf_dev_configure(struct rte_eth_dev *dev);
268 static int  ixgbevf_dev_start(struct rte_eth_dev *dev);
269 static int ixgbevf_dev_link_update(struct rte_eth_dev *dev,
270                                    int wait_to_complete);
271 static void ixgbevf_dev_stop(struct rte_eth_dev *dev);
272 static void ixgbevf_dev_close(struct rte_eth_dev *dev);
273 static int  ixgbevf_dev_reset(struct rte_eth_dev *dev);
274 static void ixgbevf_intr_disable(struct ixgbe_hw *hw);
275 static void ixgbevf_intr_enable(struct ixgbe_hw *hw);
276 static int ixgbevf_dev_stats_get(struct rte_eth_dev *dev,
277                 struct rte_eth_stats *stats);
278 static void ixgbevf_dev_stats_reset(struct rte_eth_dev *dev);
279 static int ixgbevf_vlan_filter_set(struct rte_eth_dev *dev,
280                 uint16_t vlan_id, int on);
281 static void ixgbevf_vlan_strip_queue_set(struct rte_eth_dev *dev,
282                 uint16_t queue, int on);
283 static int ixgbevf_vlan_offload_set(struct rte_eth_dev *dev, int mask);
284 static void ixgbevf_set_vfta_all(struct rte_eth_dev *dev, bool on);
285 static int ixgbevf_dev_rx_queue_intr_enable(struct rte_eth_dev *dev,
286                                             uint16_t queue_id);
287 static int ixgbevf_dev_rx_queue_intr_disable(struct rte_eth_dev *dev,
288                                              uint16_t queue_id);
289 static void ixgbevf_set_ivar_map(struct ixgbe_hw *hw, int8_t direction,
290                                  uint8_t queue, uint8_t msix_vector);
291 static void ixgbevf_configure_msix(struct rte_eth_dev *dev);
292 static void ixgbevf_dev_allmulticast_enable(struct rte_eth_dev *dev);
293 static void ixgbevf_dev_allmulticast_disable(struct rte_eth_dev *dev);
294
295 /* For Eth VMDQ APIs support */
296 static int ixgbe_uc_hash_table_set(struct rte_eth_dev *dev, struct
297                 ether_addr * mac_addr, uint8_t on);
298 static int ixgbe_uc_all_hash_table_set(struct rte_eth_dev *dev, uint8_t on);
299 static int ixgbe_mirror_rule_set(struct rte_eth_dev *dev,
300                 struct rte_eth_mirror_conf *mirror_conf,
301                 uint8_t rule_id, uint8_t on);
302 static int ixgbe_mirror_rule_reset(struct rte_eth_dev *dev,
303                 uint8_t rule_id);
304 static int ixgbe_dev_rx_queue_intr_enable(struct rte_eth_dev *dev,
305                                           uint16_t queue_id);
306 static int ixgbe_dev_rx_queue_intr_disable(struct rte_eth_dev *dev,
307                                            uint16_t queue_id);
308 static void ixgbe_set_ivar_map(struct ixgbe_hw *hw, int8_t direction,
309                                uint8_t queue, uint8_t msix_vector);
310 static void ixgbe_configure_msix(struct rte_eth_dev *dev);
311
312 static int ixgbevf_add_mac_addr(struct rte_eth_dev *dev,
313                                 struct ether_addr *mac_addr,
314                                 uint32_t index, uint32_t pool);
315 static void ixgbevf_remove_mac_addr(struct rte_eth_dev *dev, uint32_t index);
316 static void ixgbevf_set_default_mac_addr(struct rte_eth_dev *dev,
317                                              struct ether_addr *mac_addr);
318 static int ixgbe_syn_filter_get(struct rte_eth_dev *dev,
319                         struct rte_eth_syn_filter *filter);
320 static int ixgbe_syn_filter_handle(struct rte_eth_dev *dev,
321                         enum rte_filter_op filter_op,
322                         void *arg);
323 static int ixgbe_add_5tuple_filter(struct rte_eth_dev *dev,
324                         struct ixgbe_5tuple_filter *filter);
325 static void ixgbe_remove_5tuple_filter(struct rte_eth_dev *dev,
326                         struct ixgbe_5tuple_filter *filter);
327 static int ixgbe_ntuple_filter_handle(struct rte_eth_dev *dev,
328                                 enum rte_filter_op filter_op,
329                                 void *arg);
330 static int ixgbe_get_ntuple_filter(struct rte_eth_dev *dev,
331                         struct rte_eth_ntuple_filter *filter);
332 static int ixgbe_ethertype_filter_handle(struct rte_eth_dev *dev,
333                                 enum rte_filter_op filter_op,
334                                 void *arg);
335 static int ixgbe_get_ethertype_filter(struct rte_eth_dev *dev,
336                         struct rte_eth_ethertype_filter *filter);
337 static int ixgbe_dev_filter_ctrl(struct rte_eth_dev *dev,
338                      enum rte_filter_type filter_type,
339                      enum rte_filter_op filter_op,
340                      void *arg);
341 static int ixgbevf_dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu);
342
343 static int ixgbe_dev_set_mc_addr_list(struct rte_eth_dev *dev,
344                                       struct ether_addr *mc_addr_set,
345                                       uint32_t nb_mc_addr);
346 static int ixgbe_dev_get_dcb_info(struct rte_eth_dev *dev,
347                                    struct rte_eth_dcb_info *dcb_info);
348
349 static int ixgbe_get_reg_length(struct rte_eth_dev *dev);
350 static int ixgbe_get_regs(struct rte_eth_dev *dev,
351                             struct rte_dev_reg_info *regs);
352 static int ixgbe_get_eeprom_length(struct rte_eth_dev *dev);
353 static int ixgbe_get_eeprom(struct rte_eth_dev *dev,
354                                 struct rte_dev_eeprom_info *eeprom);
355 static int ixgbe_set_eeprom(struct rte_eth_dev *dev,
356                                 struct rte_dev_eeprom_info *eeprom);
357
358 static int ixgbevf_get_reg_length(struct rte_eth_dev *dev);
359 static int ixgbevf_get_regs(struct rte_eth_dev *dev,
360                                 struct rte_dev_reg_info *regs);
361
362 static int ixgbe_timesync_enable(struct rte_eth_dev *dev);
363 static int ixgbe_timesync_disable(struct rte_eth_dev *dev);
364 static int ixgbe_timesync_read_rx_timestamp(struct rte_eth_dev *dev,
365                                             struct timespec *timestamp,
366                                             uint32_t flags);
367 static int ixgbe_timesync_read_tx_timestamp(struct rte_eth_dev *dev,
368                                             struct timespec *timestamp);
369 static int ixgbe_timesync_adjust_time(struct rte_eth_dev *dev, int64_t delta);
370 static int ixgbe_timesync_read_time(struct rte_eth_dev *dev,
371                                    struct timespec *timestamp);
372 static int ixgbe_timesync_write_time(struct rte_eth_dev *dev,
373                                    const struct timespec *timestamp);
374 static void ixgbevf_dev_interrupt_handler(void *param);
375
376 static int ixgbe_dev_l2_tunnel_eth_type_conf
377         (struct rte_eth_dev *dev, struct rte_eth_l2_tunnel_conf *l2_tunnel);
378 static int ixgbe_dev_l2_tunnel_offload_set
379         (struct rte_eth_dev *dev,
380          struct rte_eth_l2_tunnel_conf *l2_tunnel,
381          uint32_t mask,
382          uint8_t en);
383 static int ixgbe_dev_l2_tunnel_filter_handle(struct rte_eth_dev *dev,
384                                              enum rte_filter_op filter_op,
385                                              void *arg);
386
387 static int ixgbe_dev_udp_tunnel_port_add(struct rte_eth_dev *dev,
388                                          struct rte_eth_udp_tunnel *udp_tunnel);
389 static int ixgbe_dev_udp_tunnel_port_del(struct rte_eth_dev *dev,
390                                          struct rte_eth_udp_tunnel *udp_tunnel);
391 static int ixgbe_filter_restore(struct rte_eth_dev *dev);
392 static void ixgbe_l2_tunnel_conf(struct rte_eth_dev *dev);
393
394 /*
395  * Define VF Stats MACRO for Non "cleared on read" register
396  */
397 #define UPDATE_VF_STAT(reg, last, cur)                          \
398 {                                                               \
399         uint32_t latest = IXGBE_READ_REG(hw, reg);              \
400         cur += (latest - last) & UINT_MAX;                      \
401         last = latest;                                          \
402 }
403
404 #define UPDATE_VF_STAT_36BIT(lsb, msb, last, cur)                \
405 {                                                                \
406         u64 new_lsb = IXGBE_READ_REG(hw, lsb);                   \
407         u64 new_msb = IXGBE_READ_REG(hw, msb);                   \
408         u64 latest = ((new_msb << 32) | new_lsb);                \
409         cur += (0x1000000000LL + latest - last) & 0xFFFFFFFFFLL; \
410         last = latest;                                           \
411 }
412
413 #define IXGBE_SET_HWSTRIP(h, q) do {\
414                 uint32_t idx = (q) / (sizeof((h)->bitmap[0]) * NBBY); \
415                 uint32_t bit = (q) % (sizeof((h)->bitmap[0]) * NBBY); \
416                 (h)->bitmap[idx] |= 1 << bit;\
417         } while (0)
418
419 #define IXGBE_CLEAR_HWSTRIP(h, q) do {\
420                 uint32_t idx = (q) / (sizeof((h)->bitmap[0]) * NBBY); \
421                 uint32_t bit = (q) % (sizeof((h)->bitmap[0]) * NBBY); \
422                 (h)->bitmap[idx] &= ~(1 << bit);\
423         } while (0)
424
425 #define IXGBE_GET_HWSTRIP(h, q, r) do {\
426                 uint32_t idx = (q) / (sizeof((h)->bitmap[0]) * NBBY); \
427                 uint32_t bit = (q) % (sizeof((h)->bitmap[0]) * NBBY); \
428                 (r) = (h)->bitmap[idx] >> bit & 1;\
429         } while (0)
430
431 /*
432  * The set of PCI devices this driver supports
433  */
434 static const struct rte_pci_id pci_id_ixgbe_map[] = {
435         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598) },
436         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_BX) },
437         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_DUAL_PORT) },
438         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_SINGLE_PORT) },
439         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT) },
440         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT2) },
441         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_SFP_LOM) },
442         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_CX4) },
443         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_CX4_DUAL_PORT) },
444         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_DA_DUAL_PORT) },
445         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM) },
446         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_XF_LR) },
447         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4) },
448         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4_MEZZ) },
449         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KR) },
450         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_COMBO_BACKPLANE) },
451         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_CX4) },
452         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP) },
453         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_BACKPLANE_FCOE) },
454         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_FCOE) },
455         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_EM) },
456         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_SF2) },
457         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_SF_QP) },
458         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_QSFP_SF_QP) },
459         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599EN_SFP) },
460         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_XAUI_LOM) },
461         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_T3_LOM) },
462         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_LS) },
463         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T) },
464         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T1) },
465         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_SFP) },
466         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_10G_T) },
467         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_1G_T) },
468         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550T) },
469         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550T1) },
470         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_KR) },
471         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_KR_L) },
472         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SFP_N) },
473         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SGMII) },
474         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SGMII_L) },
475         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_10G_T) },
476         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_QSFP) },
477         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_QSFP_N) },
478         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SFP) },
479         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_1G_T) },
480         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_1G_T_L) },
481         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_KX4) },
482         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_KR) },
483 #ifdef RTE_LIBRTE_IXGBE_BYPASS
484         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_BYPASS) },
485 #endif
486         { .vendor_id = 0, /* sentinel */ },
487 };
488
489 /*
490  * The set of PCI devices this driver supports (for 82599 VF)
491  */
492 static const struct rte_pci_id pci_id_ixgbevf_map[] = {
493         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_VF) },
494         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_VF_HV) },
495         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540_VF) },
496         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540_VF_HV) },
497         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550_VF_HV) },
498         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550_VF) },
499         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_VF) },
500         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_VF_HV) },
501         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_VF) },
502         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_VF_HV) },
503         { .vendor_id = 0, /* sentinel */ },
504 };
505
506 static const struct rte_eth_desc_lim rx_desc_lim = {
507         .nb_max = IXGBE_MAX_RING_DESC,
508         .nb_min = IXGBE_MIN_RING_DESC,
509         .nb_align = IXGBE_RXD_ALIGN,
510 };
511
512 static const struct rte_eth_desc_lim tx_desc_lim = {
513         .nb_max = IXGBE_MAX_RING_DESC,
514         .nb_min = IXGBE_MIN_RING_DESC,
515         .nb_align = IXGBE_TXD_ALIGN,
516         .nb_seg_max = IXGBE_TX_MAX_SEG,
517         .nb_mtu_seg_max = IXGBE_TX_MAX_SEG,
518 };
519
520 static const struct eth_dev_ops ixgbe_eth_dev_ops = {
521         .dev_configure        = ixgbe_dev_configure,
522         .dev_start            = ixgbe_dev_start,
523         .dev_stop             = ixgbe_dev_stop,
524         .dev_set_link_up    = ixgbe_dev_set_link_up,
525         .dev_set_link_down  = ixgbe_dev_set_link_down,
526         .dev_close            = ixgbe_dev_close,
527         .dev_reset            = ixgbe_dev_reset,
528         .promiscuous_enable   = ixgbe_dev_promiscuous_enable,
529         .promiscuous_disable  = ixgbe_dev_promiscuous_disable,
530         .allmulticast_enable  = ixgbe_dev_allmulticast_enable,
531         .allmulticast_disable = ixgbe_dev_allmulticast_disable,
532         .link_update          = ixgbe_dev_link_update,
533         .stats_get            = ixgbe_dev_stats_get,
534         .xstats_get           = ixgbe_dev_xstats_get,
535         .xstats_get_by_id     = ixgbe_dev_xstats_get_by_id,
536         .stats_reset          = ixgbe_dev_stats_reset,
537         .xstats_reset         = ixgbe_dev_xstats_reset,
538         .xstats_get_names     = ixgbe_dev_xstats_get_names,
539         .xstats_get_names_by_id = ixgbe_dev_xstats_get_names_by_id,
540         .queue_stats_mapping_set = ixgbe_dev_queue_stats_mapping_set,
541         .fw_version_get       = ixgbe_fw_version_get,
542         .dev_infos_get        = ixgbe_dev_info_get,
543         .dev_supported_ptypes_get = ixgbe_dev_supported_ptypes_get,
544         .mtu_set              = ixgbe_dev_mtu_set,
545         .vlan_filter_set      = ixgbe_vlan_filter_set,
546         .vlan_tpid_set        = ixgbe_vlan_tpid_set,
547         .vlan_offload_set     = ixgbe_vlan_offload_set,
548         .vlan_strip_queue_set = ixgbe_vlan_strip_queue_set,
549         .rx_queue_start       = ixgbe_dev_rx_queue_start,
550         .rx_queue_stop        = ixgbe_dev_rx_queue_stop,
551         .tx_queue_start       = ixgbe_dev_tx_queue_start,
552         .tx_queue_stop        = ixgbe_dev_tx_queue_stop,
553         .rx_queue_setup       = ixgbe_dev_rx_queue_setup,
554         .rx_queue_intr_enable = ixgbe_dev_rx_queue_intr_enable,
555         .rx_queue_intr_disable = ixgbe_dev_rx_queue_intr_disable,
556         .rx_queue_release     = ixgbe_dev_rx_queue_release,
557         .rx_queue_count       = ixgbe_dev_rx_queue_count,
558         .rx_descriptor_done   = ixgbe_dev_rx_descriptor_done,
559         .rx_descriptor_status = ixgbe_dev_rx_descriptor_status,
560         .tx_descriptor_status = ixgbe_dev_tx_descriptor_status,
561         .tx_queue_setup       = ixgbe_dev_tx_queue_setup,
562         .tx_queue_release     = ixgbe_dev_tx_queue_release,
563         .dev_led_on           = ixgbe_dev_led_on,
564         .dev_led_off          = ixgbe_dev_led_off,
565         .flow_ctrl_get        = ixgbe_flow_ctrl_get,
566         .flow_ctrl_set        = ixgbe_flow_ctrl_set,
567         .priority_flow_ctrl_set = ixgbe_priority_flow_ctrl_set,
568         .mac_addr_add         = ixgbe_add_rar,
569         .mac_addr_remove      = ixgbe_remove_rar,
570         .mac_addr_set         = ixgbe_set_default_mac_addr,
571         .uc_hash_table_set    = ixgbe_uc_hash_table_set,
572         .uc_all_hash_table_set  = ixgbe_uc_all_hash_table_set,
573         .mirror_rule_set      = ixgbe_mirror_rule_set,
574         .mirror_rule_reset    = ixgbe_mirror_rule_reset,
575         .set_queue_rate_limit = ixgbe_set_queue_rate_limit,
576         .reta_update          = ixgbe_dev_rss_reta_update,
577         .reta_query           = ixgbe_dev_rss_reta_query,
578         .rss_hash_update      = ixgbe_dev_rss_hash_update,
579         .rss_hash_conf_get    = ixgbe_dev_rss_hash_conf_get,
580         .filter_ctrl          = ixgbe_dev_filter_ctrl,
581         .set_mc_addr_list     = ixgbe_dev_set_mc_addr_list,
582         .rxq_info_get         = ixgbe_rxq_info_get,
583         .txq_info_get         = ixgbe_txq_info_get,
584         .timesync_enable      = ixgbe_timesync_enable,
585         .timesync_disable     = ixgbe_timesync_disable,
586         .timesync_read_rx_timestamp = ixgbe_timesync_read_rx_timestamp,
587         .timesync_read_tx_timestamp = ixgbe_timesync_read_tx_timestamp,
588         .get_reg              = ixgbe_get_regs,
589         .get_eeprom_length    = ixgbe_get_eeprom_length,
590         .get_eeprom           = ixgbe_get_eeprom,
591         .set_eeprom           = ixgbe_set_eeprom,
592         .get_dcb_info         = ixgbe_dev_get_dcb_info,
593         .timesync_adjust_time = ixgbe_timesync_adjust_time,
594         .timesync_read_time   = ixgbe_timesync_read_time,
595         .timesync_write_time  = ixgbe_timesync_write_time,
596         .l2_tunnel_eth_type_conf = ixgbe_dev_l2_tunnel_eth_type_conf,
597         .l2_tunnel_offload_set   = ixgbe_dev_l2_tunnel_offload_set,
598         .udp_tunnel_port_add  = ixgbe_dev_udp_tunnel_port_add,
599         .udp_tunnel_port_del  = ixgbe_dev_udp_tunnel_port_del,
600         .tm_ops_get           = ixgbe_tm_ops_get,
601 };
602
603 /*
604  * dev_ops for virtual function, bare necessities for basic vf
605  * operation have been implemented
606  */
607 static const struct eth_dev_ops ixgbevf_eth_dev_ops = {
608         .dev_configure        = ixgbevf_dev_configure,
609         .dev_start            = ixgbevf_dev_start,
610         .dev_stop             = ixgbevf_dev_stop,
611         .link_update          = ixgbevf_dev_link_update,
612         .stats_get            = ixgbevf_dev_stats_get,
613         .xstats_get           = ixgbevf_dev_xstats_get,
614         .stats_reset          = ixgbevf_dev_stats_reset,
615         .xstats_reset         = ixgbevf_dev_stats_reset,
616         .xstats_get_names     = ixgbevf_dev_xstats_get_names,
617         .dev_close            = ixgbevf_dev_close,
618         .dev_reset            = ixgbevf_dev_reset,
619         .allmulticast_enable  = ixgbevf_dev_allmulticast_enable,
620         .allmulticast_disable = ixgbevf_dev_allmulticast_disable,
621         .dev_infos_get        = ixgbevf_dev_info_get,
622         .dev_supported_ptypes_get = ixgbe_dev_supported_ptypes_get,
623         .mtu_set              = ixgbevf_dev_set_mtu,
624         .vlan_filter_set      = ixgbevf_vlan_filter_set,
625         .vlan_strip_queue_set = ixgbevf_vlan_strip_queue_set,
626         .vlan_offload_set     = ixgbevf_vlan_offload_set,
627         .rx_queue_setup       = ixgbe_dev_rx_queue_setup,
628         .rx_queue_release     = ixgbe_dev_rx_queue_release,
629         .rx_descriptor_done   = ixgbe_dev_rx_descriptor_done,
630         .rx_descriptor_status = ixgbe_dev_rx_descriptor_status,
631         .tx_descriptor_status = ixgbe_dev_tx_descriptor_status,
632         .tx_queue_setup       = ixgbe_dev_tx_queue_setup,
633         .tx_queue_release     = ixgbe_dev_tx_queue_release,
634         .rx_queue_intr_enable = ixgbevf_dev_rx_queue_intr_enable,
635         .rx_queue_intr_disable = ixgbevf_dev_rx_queue_intr_disable,
636         .mac_addr_add         = ixgbevf_add_mac_addr,
637         .mac_addr_remove      = ixgbevf_remove_mac_addr,
638         .set_mc_addr_list     = ixgbe_dev_set_mc_addr_list,
639         .rxq_info_get         = ixgbe_rxq_info_get,
640         .txq_info_get         = ixgbe_txq_info_get,
641         .mac_addr_set         = ixgbevf_set_default_mac_addr,
642         .get_reg              = ixgbevf_get_regs,
643         .reta_update          = ixgbe_dev_rss_reta_update,
644         .reta_query           = ixgbe_dev_rss_reta_query,
645         .rss_hash_update      = ixgbe_dev_rss_hash_update,
646         .rss_hash_conf_get    = ixgbe_dev_rss_hash_conf_get,
647 };
648
649 /* store statistics names and its offset in stats structure */
650 struct rte_ixgbe_xstats_name_off {
651         char name[RTE_ETH_XSTATS_NAME_SIZE];
652         unsigned offset;
653 };
654
655 static const struct rte_ixgbe_xstats_name_off rte_ixgbe_stats_strings[] = {
656         {"rx_crc_errors", offsetof(struct ixgbe_hw_stats, crcerrs)},
657         {"rx_illegal_byte_errors", offsetof(struct ixgbe_hw_stats, illerrc)},
658         {"rx_error_bytes", offsetof(struct ixgbe_hw_stats, errbc)},
659         {"mac_local_errors", offsetof(struct ixgbe_hw_stats, mlfc)},
660         {"mac_remote_errors", offsetof(struct ixgbe_hw_stats, mrfc)},
661         {"rx_length_errors", offsetof(struct ixgbe_hw_stats, rlec)},
662         {"tx_xon_packets", offsetof(struct ixgbe_hw_stats, lxontxc)},
663         {"rx_xon_packets", offsetof(struct ixgbe_hw_stats, lxonrxc)},
664         {"tx_xoff_packets", offsetof(struct ixgbe_hw_stats, lxofftxc)},
665         {"rx_xoff_packets", offsetof(struct ixgbe_hw_stats, lxoffrxc)},
666         {"rx_size_64_packets", offsetof(struct ixgbe_hw_stats, prc64)},
667         {"rx_size_65_to_127_packets", offsetof(struct ixgbe_hw_stats, prc127)},
668         {"rx_size_128_to_255_packets", offsetof(struct ixgbe_hw_stats, prc255)},
669         {"rx_size_256_to_511_packets", offsetof(struct ixgbe_hw_stats, prc511)},
670         {"rx_size_512_to_1023_packets", offsetof(struct ixgbe_hw_stats,
671                 prc1023)},
672         {"rx_size_1024_to_max_packets", offsetof(struct ixgbe_hw_stats,
673                 prc1522)},
674         {"rx_broadcast_packets", offsetof(struct ixgbe_hw_stats, bprc)},
675         {"rx_multicast_packets", offsetof(struct ixgbe_hw_stats, mprc)},
676         {"rx_fragment_errors", offsetof(struct ixgbe_hw_stats, rfc)},
677         {"rx_undersize_errors", offsetof(struct ixgbe_hw_stats, ruc)},
678         {"rx_oversize_errors", offsetof(struct ixgbe_hw_stats, roc)},
679         {"rx_jabber_errors", offsetof(struct ixgbe_hw_stats, rjc)},
680         {"rx_management_packets", offsetof(struct ixgbe_hw_stats, mngprc)},
681         {"rx_management_dropped", offsetof(struct ixgbe_hw_stats, mngpdc)},
682         {"tx_management_packets", offsetof(struct ixgbe_hw_stats, mngptc)},
683         {"rx_total_packets", offsetof(struct ixgbe_hw_stats, tpr)},
684         {"rx_total_bytes", offsetof(struct ixgbe_hw_stats, tor)},
685         {"tx_total_packets", offsetof(struct ixgbe_hw_stats, tpt)},
686         {"tx_size_64_packets", offsetof(struct ixgbe_hw_stats, ptc64)},
687         {"tx_size_65_to_127_packets", offsetof(struct ixgbe_hw_stats, ptc127)},
688         {"tx_size_128_to_255_packets", offsetof(struct ixgbe_hw_stats, ptc255)},
689         {"tx_size_256_to_511_packets", offsetof(struct ixgbe_hw_stats, ptc511)},
690         {"tx_size_512_to_1023_packets", offsetof(struct ixgbe_hw_stats,
691                 ptc1023)},
692         {"tx_size_1024_to_max_packets", offsetof(struct ixgbe_hw_stats,
693                 ptc1522)},
694         {"tx_multicast_packets", offsetof(struct ixgbe_hw_stats, mptc)},
695         {"tx_broadcast_packets", offsetof(struct ixgbe_hw_stats, bptc)},
696         {"rx_mac_short_packet_dropped", offsetof(struct ixgbe_hw_stats, mspdc)},
697         {"rx_l3_l4_xsum_error", offsetof(struct ixgbe_hw_stats, xec)},
698
699         {"flow_director_added_filters", offsetof(struct ixgbe_hw_stats,
700                 fdirustat_add)},
701         {"flow_director_removed_filters", offsetof(struct ixgbe_hw_stats,
702                 fdirustat_remove)},
703         {"flow_director_filter_add_errors", offsetof(struct ixgbe_hw_stats,
704                 fdirfstat_fadd)},
705         {"flow_director_filter_remove_errors", offsetof(struct ixgbe_hw_stats,
706                 fdirfstat_fremove)},
707         {"flow_director_matched_filters", offsetof(struct ixgbe_hw_stats,
708                 fdirmatch)},
709         {"flow_director_missed_filters", offsetof(struct ixgbe_hw_stats,
710                 fdirmiss)},
711
712         {"rx_fcoe_crc_errors", offsetof(struct ixgbe_hw_stats, fccrc)},
713         {"rx_fcoe_dropped", offsetof(struct ixgbe_hw_stats, fcoerpdc)},
714         {"rx_fcoe_mbuf_allocation_errors", offsetof(struct ixgbe_hw_stats,
715                 fclast)},
716         {"rx_fcoe_packets", offsetof(struct ixgbe_hw_stats, fcoeprc)},
717         {"tx_fcoe_packets", offsetof(struct ixgbe_hw_stats, fcoeptc)},
718         {"rx_fcoe_bytes", offsetof(struct ixgbe_hw_stats, fcoedwrc)},
719         {"tx_fcoe_bytes", offsetof(struct ixgbe_hw_stats, fcoedwtc)},
720         {"rx_fcoe_no_direct_data_placement", offsetof(struct ixgbe_hw_stats,
721                 fcoe_noddp)},
722         {"rx_fcoe_no_direct_data_placement_ext_buff",
723                 offsetof(struct ixgbe_hw_stats, fcoe_noddp_ext_buff)},
724
725         {"tx_flow_control_xon_packets", offsetof(struct ixgbe_hw_stats,
726                 lxontxc)},
727         {"rx_flow_control_xon_packets", offsetof(struct ixgbe_hw_stats,
728                 lxonrxc)},
729         {"tx_flow_control_xoff_packets", offsetof(struct ixgbe_hw_stats,
730                 lxofftxc)},
731         {"rx_flow_control_xoff_packets", offsetof(struct ixgbe_hw_stats,
732                 lxoffrxc)},
733         {"rx_total_missed_packets", offsetof(struct ixgbe_hw_stats, mpctotal)},
734 };
735
736 #define IXGBE_NB_HW_STATS (sizeof(rte_ixgbe_stats_strings) / \
737                            sizeof(rte_ixgbe_stats_strings[0]))
738
739 /* MACsec statistics */
740 static const struct rte_ixgbe_xstats_name_off rte_ixgbe_macsec_strings[] = {
741         {"out_pkts_untagged", offsetof(struct ixgbe_macsec_stats,
742                 out_pkts_untagged)},
743         {"out_pkts_encrypted", offsetof(struct ixgbe_macsec_stats,
744                 out_pkts_encrypted)},
745         {"out_pkts_protected", offsetof(struct ixgbe_macsec_stats,
746                 out_pkts_protected)},
747         {"out_octets_encrypted", offsetof(struct ixgbe_macsec_stats,
748                 out_octets_encrypted)},
749         {"out_octets_protected", offsetof(struct ixgbe_macsec_stats,
750                 out_octets_protected)},
751         {"in_pkts_untagged", offsetof(struct ixgbe_macsec_stats,
752                 in_pkts_untagged)},
753         {"in_pkts_badtag", offsetof(struct ixgbe_macsec_stats,
754                 in_pkts_badtag)},
755         {"in_pkts_nosci", offsetof(struct ixgbe_macsec_stats,
756                 in_pkts_nosci)},
757         {"in_pkts_unknownsci", offsetof(struct ixgbe_macsec_stats,
758                 in_pkts_unknownsci)},
759         {"in_octets_decrypted", offsetof(struct ixgbe_macsec_stats,
760                 in_octets_decrypted)},
761         {"in_octets_validated", offsetof(struct ixgbe_macsec_stats,
762                 in_octets_validated)},
763         {"in_pkts_unchecked", offsetof(struct ixgbe_macsec_stats,
764                 in_pkts_unchecked)},
765         {"in_pkts_delayed", offsetof(struct ixgbe_macsec_stats,
766                 in_pkts_delayed)},
767         {"in_pkts_late", offsetof(struct ixgbe_macsec_stats,
768                 in_pkts_late)},
769         {"in_pkts_ok", offsetof(struct ixgbe_macsec_stats,
770                 in_pkts_ok)},
771         {"in_pkts_invalid", offsetof(struct ixgbe_macsec_stats,
772                 in_pkts_invalid)},
773         {"in_pkts_notvalid", offsetof(struct ixgbe_macsec_stats,
774                 in_pkts_notvalid)},
775         {"in_pkts_unusedsa", offsetof(struct ixgbe_macsec_stats,
776                 in_pkts_unusedsa)},
777         {"in_pkts_notusingsa", offsetof(struct ixgbe_macsec_stats,
778                 in_pkts_notusingsa)},
779 };
780
781 #define IXGBE_NB_MACSEC_STATS (sizeof(rte_ixgbe_macsec_strings) / \
782                            sizeof(rte_ixgbe_macsec_strings[0]))
783
784 /* Per-queue statistics */
785 static const struct rte_ixgbe_xstats_name_off rte_ixgbe_rxq_strings[] = {
786         {"mbuf_allocation_errors", offsetof(struct ixgbe_hw_stats, rnbc)},
787         {"dropped", offsetof(struct ixgbe_hw_stats, mpc)},
788         {"xon_packets", offsetof(struct ixgbe_hw_stats, pxonrxc)},
789         {"xoff_packets", offsetof(struct ixgbe_hw_stats, pxoffrxc)},
790 };
791
792 #define IXGBE_NB_RXQ_PRIO_STATS (sizeof(rte_ixgbe_rxq_strings) / \
793                            sizeof(rte_ixgbe_rxq_strings[0]))
794 #define IXGBE_NB_RXQ_PRIO_VALUES 8
795
796 static const struct rte_ixgbe_xstats_name_off rte_ixgbe_txq_strings[] = {
797         {"xon_packets", offsetof(struct ixgbe_hw_stats, pxontxc)},
798         {"xoff_packets", offsetof(struct ixgbe_hw_stats, pxofftxc)},
799         {"xon_to_xoff_packets", offsetof(struct ixgbe_hw_stats,
800                 pxon2offc)},
801 };
802
803 #define IXGBE_NB_TXQ_PRIO_STATS (sizeof(rte_ixgbe_txq_strings) / \
804                            sizeof(rte_ixgbe_txq_strings[0]))
805 #define IXGBE_NB_TXQ_PRIO_VALUES 8
806
807 static const struct rte_ixgbe_xstats_name_off rte_ixgbevf_stats_strings[] = {
808         {"rx_multicast_packets", offsetof(struct ixgbevf_hw_stats, vfmprc)},
809 };
810
811 #define IXGBEVF_NB_XSTATS (sizeof(rte_ixgbevf_stats_strings) /  \
812                 sizeof(rte_ixgbevf_stats_strings[0]))
813
814 /**
815  * Atomically reads the link status information from global
816  * structure rte_eth_dev.
817  *
818  * @param dev
819  *   - Pointer to the structure rte_eth_dev to read from.
820  *   - Pointer to the buffer to be saved with the link status.
821  *
822  * @return
823  *   - On success, zero.
824  *   - On failure, negative value.
825  */
826 static inline int
827 rte_ixgbe_dev_atomic_read_link_status(struct rte_eth_dev *dev,
828                                 struct rte_eth_link *link)
829 {
830         struct rte_eth_link *dst = link;
831         struct rte_eth_link *src = &(dev->data->dev_link);
832
833         if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
834                                         *(uint64_t *)src) == 0)
835                 return -1;
836
837         return 0;
838 }
839
840 /**
841  * Atomically writes the link status information into global
842  * structure rte_eth_dev.
843  *
844  * @param dev
845  *   - Pointer to the structure rte_eth_dev to read from.
846  *   - Pointer to the buffer to be saved with the link status.
847  *
848  * @return
849  *   - On success, zero.
850  *   - On failure, negative value.
851  */
852 static inline int
853 rte_ixgbe_dev_atomic_write_link_status(struct rte_eth_dev *dev,
854                                 struct rte_eth_link *link)
855 {
856         struct rte_eth_link *dst = &(dev->data->dev_link);
857         struct rte_eth_link *src = link;
858
859         if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
860                                         *(uint64_t *)src) == 0)
861                 return -1;
862
863         return 0;
864 }
865
866 /*
867  * This function is the same as ixgbe_is_sfp() in base/ixgbe.h.
868  */
869 static inline int
870 ixgbe_is_sfp(struct ixgbe_hw *hw)
871 {
872         switch (hw->phy.type) {
873         case ixgbe_phy_sfp_avago:
874         case ixgbe_phy_sfp_ftl:
875         case ixgbe_phy_sfp_intel:
876         case ixgbe_phy_sfp_unknown:
877         case ixgbe_phy_sfp_passive_tyco:
878         case ixgbe_phy_sfp_passive_unknown:
879                 return 1;
880         default:
881                 return 0;
882         }
883 }
884
885 static inline int32_t
886 ixgbe_pf_reset_hw(struct ixgbe_hw *hw)
887 {
888         uint32_t ctrl_ext;
889         int32_t status;
890
891         status = ixgbe_reset_hw(hw);
892
893         ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
894         /* Set PF Reset Done bit so PF/VF Mail Ops can work */
895         ctrl_ext |= IXGBE_CTRL_EXT_PFRSTD;
896         IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
897         IXGBE_WRITE_FLUSH(hw);
898
899         if (status == IXGBE_ERR_SFP_NOT_PRESENT)
900                 status = IXGBE_SUCCESS;
901         return status;
902 }
903
904 static inline void
905 ixgbe_enable_intr(struct rte_eth_dev *dev)
906 {
907         struct ixgbe_interrupt *intr =
908                 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
909         struct ixgbe_hw *hw =
910                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
911
912         IXGBE_WRITE_REG(hw, IXGBE_EIMS, intr->mask);
913         IXGBE_WRITE_FLUSH(hw);
914 }
915
916 /*
917  * This function is based on ixgbe_disable_intr() in base/ixgbe.h.
918  */
919 static void
920 ixgbe_disable_intr(struct ixgbe_hw *hw)
921 {
922         PMD_INIT_FUNC_TRACE();
923
924         if (hw->mac.type == ixgbe_mac_82598EB) {
925                 IXGBE_WRITE_REG(hw, IXGBE_EIMC, ~0);
926         } else {
927                 IXGBE_WRITE_REG(hw, IXGBE_EIMC, 0xFFFF0000);
928                 IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(0), ~0);
929                 IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(1), ~0);
930         }
931         IXGBE_WRITE_FLUSH(hw);
932 }
933
934 /*
935  * This function resets queue statistics mapping registers.
936  * From Niantic datasheet, Initialization of Statistics section:
937  * "...if software requires the queue counters, the RQSMR and TQSM registers
938  * must be re-programmed following a device reset.
939  */
940 static void
941 ixgbe_reset_qstat_mappings(struct ixgbe_hw *hw)
942 {
943         uint32_t i;
944
945         for (i = 0; i != IXGBE_NB_STAT_MAPPING_REGS; i++) {
946                 IXGBE_WRITE_REG(hw, IXGBE_RQSMR(i), 0);
947                 IXGBE_WRITE_REG(hw, IXGBE_TQSM(i), 0);
948         }
949 }
950
951
952 static int
953 ixgbe_dev_queue_stats_mapping_set(struct rte_eth_dev *eth_dev,
954                                   uint16_t queue_id,
955                                   uint8_t stat_idx,
956                                   uint8_t is_rx)
957 {
958 #define QSM_REG_NB_BITS_PER_QMAP_FIELD 8
959 #define NB_QMAP_FIELDS_PER_QSM_REG 4
960 #define QMAP_FIELD_RESERVED_BITS_MASK 0x0f
961
962         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
963         struct ixgbe_stat_mapping_registers *stat_mappings =
964                 IXGBE_DEV_PRIVATE_TO_STAT_MAPPINGS(eth_dev->data->dev_private);
965         uint32_t qsmr_mask = 0;
966         uint32_t clearing_mask = QMAP_FIELD_RESERVED_BITS_MASK;
967         uint32_t q_map;
968         uint8_t n, offset;
969
970         if ((hw->mac.type != ixgbe_mac_82599EB) &&
971                 (hw->mac.type != ixgbe_mac_X540) &&
972                 (hw->mac.type != ixgbe_mac_X550) &&
973                 (hw->mac.type != ixgbe_mac_X550EM_x) &&
974                 (hw->mac.type != ixgbe_mac_X550EM_a))
975                 return -ENOSYS;
976
977         PMD_INIT_LOG(DEBUG, "Setting port %d, %s queue_id %d to stat index %d",
978                      (int)(eth_dev->data->port_id), is_rx ? "RX" : "TX",
979                      queue_id, stat_idx);
980
981         n = (uint8_t)(queue_id / NB_QMAP_FIELDS_PER_QSM_REG);
982         if (n >= IXGBE_NB_STAT_MAPPING_REGS) {
983                 PMD_INIT_LOG(ERR, "Nb of stat mapping registers exceeded");
984                 return -EIO;
985         }
986         offset = (uint8_t)(queue_id % NB_QMAP_FIELDS_PER_QSM_REG);
987
988         /* Now clear any previous stat_idx set */
989         clearing_mask <<= (QSM_REG_NB_BITS_PER_QMAP_FIELD * offset);
990         if (!is_rx)
991                 stat_mappings->tqsm[n] &= ~clearing_mask;
992         else
993                 stat_mappings->rqsmr[n] &= ~clearing_mask;
994
995         q_map = (uint32_t)stat_idx;
996         q_map &= QMAP_FIELD_RESERVED_BITS_MASK;
997         qsmr_mask = q_map << (QSM_REG_NB_BITS_PER_QMAP_FIELD * offset);
998         if (!is_rx)
999                 stat_mappings->tqsm[n] |= qsmr_mask;
1000         else
1001                 stat_mappings->rqsmr[n] |= qsmr_mask;
1002
1003         PMD_INIT_LOG(DEBUG, "Set port %d, %s queue_id %d to stat index %d",
1004                      (int)(eth_dev->data->port_id), is_rx ? "RX" : "TX",
1005                      queue_id, stat_idx);
1006         PMD_INIT_LOG(DEBUG, "%s[%d] = 0x%08x", is_rx ? "RQSMR" : "TQSM", n,
1007                      is_rx ? stat_mappings->rqsmr[n] : stat_mappings->tqsm[n]);
1008
1009         /* Now write the mapping in the appropriate register */
1010         if (is_rx) {
1011                 PMD_INIT_LOG(DEBUG, "Write 0x%x to RX IXGBE stat mapping reg:%d",
1012                              stat_mappings->rqsmr[n], n);
1013                 IXGBE_WRITE_REG(hw, IXGBE_RQSMR(n), stat_mappings->rqsmr[n]);
1014         } else {
1015                 PMD_INIT_LOG(DEBUG, "Write 0x%x to TX IXGBE stat mapping reg:%d",
1016                              stat_mappings->tqsm[n], n);
1017                 IXGBE_WRITE_REG(hw, IXGBE_TQSM(n), stat_mappings->tqsm[n]);
1018         }
1019         return 0;
1020 }
1021
1022 static void
1023 ixgbe_restore_statistics_mapping(struct rte_eth_dev *dev)
1024 {
1025         struct ixgbe_stat_mapping_registers *stat_mappings =
1026                 IXGBE_DEV_PRIVATE_TO_STAT_MAPPINGS(dev->data->dev_private);
1027         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1028         int i;
1029
1030         /* write whatever was in stat mapping table to the NIC */
1031         for (i = 0; i < IXGBE_NB_STAT_MAPPING_REGS; i++) {
1032                 /* rx */
1033                 IXGBE_WRITE_REG(hw, IXGBE_RQSMR(i), stat_mappings->rqsmr[i]);
1034
1035                 /* tx */
1036                 IXGBE_WRITE_REG(hw, IXGBE_TQSM(i), stat_mappings->tqsm[i]);
1037         }
1038 }
1039
1040 static void
1041 ixgbe_dcb_init(struct ixgbe_hw *hw, struct ixgbe_dcb_config *dcb_config)
1042 {
1043         uint8_t i;
1044         struct ixgbe_dcb_tc_config *tc;
1045         uint8_t dcb_max_tc = IXGBE_DCB_MAX_TRAFFIC_CLASS;
1046
1047         dcb_config->num_tcs.pg_tcs = dcb_max_tc;
1048         dcb_config->num_tcs.pfc_tcs = dcb_max_tc;
1049         for (i = 0; i < dcb_max_tc; i++) {
1050                 tc = &dcb_config->tc_config[i];
1051                 tc->path[IXGBE_DCB_TX_CONFIG].bwg_id = i;
1052                 tc->path[IXGBE_DCB_TX_CONFIG].bwg_percent =
1053                                  (uint8_t)(100/dcb_max_tc + (i & 1));
1054                 tc->path[IXGBE_DCB_RX_CONFIG].bwg_id = i;
1055                 tc->path[IXGBE_DCB_RX_CONFIG].bwg_percent =
1056                                  (uint8_t)(100/dcb_max_tc + (i & 1));
1057                 tc->pfc = ixgbe_dcb_pfc_disabled;
1058         }
1059
1060         /* Initialize default user to priority mapping, UPx->TC0 */
1061         tc = &dcb_config->tc_config[0];
1062         tc->path[IXGBE_DCB_TX_CONFIG].up_to_tc_bitmap = 0xFF;
1063         tc->path[IXGBE_DCB_RX_CONFIG].up_to_tc_bitmap = 0xFF;
1064         for (i = 0; i < IXGBE_DCB_MAX_BW_GROUP; i++) {
1065                 dcb_config->bw_percentage[IXGBE_DCB_TX_CONFIG][i] = 100;
1066                 dcb_config->bw_percentage[IXGBE_DCB_RX_CONFIG][i] = 100;
1067         }
1068         dcb_config->rx_pba_cfg = ixgbe_dcb_pba_equal;
1069         dcb_config->pfc_mode_enable = false;
1070         dcb_config->vt_mode = true;
1071         dcb_config->round_robin_enable = false;
1072         /* support all DCB capabilities in 82599 */
1073         dcb_config->support.capabilities = 0xFF;
1074
1075         /*we only support 4 Tcs for X540, X550 */
1076         if (hw->mac.type == ixgbe_mac_X540 ||
1077                 hw->mac.type == ixgbe_mac_X550 ||
1078                 hw->mac.type == ixgbe_mac_X550EM_x ||
1079                 hw->mac.type == ixgbe_mac_X550EM_a) {
1080                 dcb_config->num_tcs.pg_tcs = 4;
1081                 dcb_config->num_tcs.pfc_tcs = 4;
1082         }
1083 }
1084
1085 /*
1086  * Ensure that all locks are released before first NVM or PHY access
1087  */
1088 static void
1089 ixgbe_swfw_lock_reset(struct ixgbe_hw *hw)
1090 {
1091         uint16_t mask;
1092
1093         /*
1094          * Phy lock should not fail in this early stage. If this is the case,
1095          * it is due to an improper exit of the application.
1096          * So force the release of the faulty lock. Release of common lock
1097          * is done automatically by swfw_sync function.
1098          */
1099         mask = IXGBE_GSSR_PHY0_SM << hw->bus.func;
1100         if (ixgbe_acquire_swfw_semaphore(hw, mask) < 0) {
1101                 PMD_DRV_LOG(DEBUG, "SWFW phy%d lock released", hw->bus.func);
1102         }
1103         ixgbe_release_swfw_semaphore(hw, mask);
1104
1105         /*
1106          * These ones are more tricky since they are common to all ports; but
1107          * swfw_sync retries last long enough (1s) to be almost sure that if
1108          * lock can not be taken it is due to an improper lock of the
1109          * semaphore.
1110          */
1111         mask = IXGBE_GSSR_EEP_SM | IXGBE_GSSR_MAC_CSR_SM | IXGBE_GSSR_SW_MNG_SM;
1112         if (ixgbe_acquire_swfw_semaphore(hw, mask) < 0) {
1113                 PMD_DRV_LOG(DEBUG, "SWFW common locks released");
1114         }
1115         ixgbe_release_swfw_semaphore(hw, mask);
1116 }
1117
1118 /*
1119  * This function is based on code in ixgbe_attach() in base/ixgbe.c.
1120  * It returns 0 on success.
1121  */
1122 static int
1123 eth_ixgbe_dev_init(struct rte_eth_dev *eth_dev)
1124 {
1125         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
1126         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
1127         struct ixgbe_hw *hw =
1128                 IXGBE_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
1129         struct ixgbe_vfta *shadow_vfta =
1130                 IXGBE_DEV_PRIVATE_TO_VFTA(eth_dev->data->dev_private);
1131         struct ixgbe_hwstrip *hwstrip =
1132                 IXGBE_DEV_PRIVATE_TO_HWSTRIP_BITMAP(eth_dev->data->dev_private);
1133         struct ixgbe_dcb_config *dcb_config =
1134                 IXGBE_DEV_PRIVATE_TO_DCB_CFG(eth_dev->data->dev_private);
1135         struct ixgbe_filter_info *filter_info =
1136                 IXGBE_DEV_PRIVATE_TO_FILTER_INFO(eth_dev->data->dev_private);
1137         struct ixgbe_bw_conf *bw_conf =
1138                 IXGBE_DEV_PRIVATE_TO_BW_CONF(eth_dev->data->dev_private);
1139         uint32_t ctrl_ext;
1140         uint16_t csum;
1141         int diag, i;
1142
1143         PMD_INIT_FUNC_TRACE();
1144
1145         eth_dev->dev_ops = &ixgbe_eth_dev_ops;
1146         eth_dev->rx_pkt_burst = &ixgbe_recv_pkts;
1147         eth_dev->tx_pkt_burst = &ixgbe_xmit_pkts;
1148         eth_dev->tx_pkt_prepare = &ixgbe_prep_pkts;
1149
1150         /*
1151          * For secondary processes, we don't initialise any further as primary
1152          * has already done this work. Only check we don't need a different
1153          * RX and TX function.
1154          */
1155         if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
1156                 struct ixgbe_tx_queue *txq;
1157                 /* TX queue function in primary, set by last queue initialized
1158                  * Tx queue may not initialized by primary process
1159                  */
1160                 if (eth_dev->data->tx_queues) {
1161                         txq = eth_dev->data->tx_queues[eth_dev->data->nb_tx_queues-1];
1162                         ixgbe_set_tx_function(eth_dev, txq);
1163                 } else {
1164                         /* Use default TX function if we get here */
1165                         PMD_INIT_LOG(NOTICE, "No TX queues configured yet. "
1166                                      "Using default TX function.");
1167                 }
1168
1169                 ixgbe_set_rx_function(eth_dev);
1170
1171                 return 0;
1172         }
1173
1174 #ifdef RTE_LIBRTE_SECURITY
1175         /* Initialize security_ctx only for primary process*/
1176         eth_dev->security_ctx = ixgbe_ipsec_ctx_create(eth_dev);
1177         if (eth_dev->security_ctx == NULL)
1178                 return -ENOMEM;
1179 #endif
1180
1181         rte_eth_copy_pci_info(eth_dev, pci_dev);
1182
1183         /* Vendor and Device ID need to be set before init of shared code */
1184         hw->device_id = pci_dev->id.device_id;
1185         hw->vendor_id = pci_dev->id.vendor_id;
1186         hw->hw_addr = (void *)pci_dev->mem_resource[0].addr;
1187         hw->allow_unsupported_sfp = 1;
1188
1189         /* Initialize the shared code (base driver) */
1190 #ifdef RTE_LIBRTE_IXGBE_BYPASS
1191         diag = ixgbe_bypass_init_shared_code(hw);
1192 #else
1193         diag = ixgbe_init_shared_code(hw);
1194 #endif /* RTE_LIBRTE_IXGBE_BYPASS */
1195
1196         if (diag != IXGBE_SUCCESS) {
1197                 PMD_INIT_LOG(ERR, "Shared code init failed: %d", diag);
1198                 return -EIO;
1199         }
1200
1201         /* pick up the PCI bus settings for reporting later */
1202         ixgbe_get_bus_info(hw);
1203
1204         /* Unlock any pending hardware semaphore */
1205         ixgbe_swfw_lock_reset(hw);
1206
1207         /* Initialize DCB configuration*/
1208         memset(dcb_config, 0, sizeof(struct ixgbe_dcb_config));
1209         ixgbe_dcb_init(hw, dcb_config);
1210         /* Get Hardware Flow Control setting */
1211         hw->fc.requested_mode = ixgbe_fc_full;
1212         hw->fc.current_mode = ixgbe_fc_full;
1213         hw->fc.pause_time = IXGBE_FC_PAUSE;
1214         for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
1215                 hw->fc.low_water[i] = IXGBE_FC_LO;
1216                 hw->fc.high_water[i] = IXGBE_FC_HI;
1217         }
1218         hw->fc.send_xon = 1;
1219
1220         /* Make sure we have a good EEPROM before we read from it */
1221         diag = ixgbe_validate_eeprom_checksum(hw, &csum);
1222         if (diag != IXGBE_SUCCESS) {
1223                 PMD_INIT_LOG(ERR, "The EEPROM checksum is not valid: %d", diag);
1224                 return -EIO;
1225         }
1226
1227 #ifdef RTE_LIBRTE_IXGBE_BYPASS
1228         diag = ixgbe_bypass_init_hw(hw);
1229 #else
1230         diag = ixgbe_init_hw(hw);
1231 #endif /* RTE_LIBRTE_IXGBE_BYPASS */
1232
1233         /*
1234          * Devices with copper phys will fail to initialise if ixgbe_init_hw()
1235          * is called too soon after the kernel driver unbinding/binding occurs.
1236          * The failure occurs in ixgbe_identify_phy_generic() for all devices,
1237          * but for non-copper devies, ixgbe_identify_sfp_module_generic() is
1238          * also called. See ixgbe_identify_phy_82599(). The reason for the
1239          * failure is not known, and only occuts when virtualisation features
1240          * are disabled in the bios. A delay of 100ms  was found to be enough by
1241          * trial-and-error, and is doubled to be safe.
1242          */
1243         if (diag && (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_copper)) {
1244                 rte_delay_ms(200);
1245                 diag = ixgbe_init_hw(hw);
1246         }
1247
1248         if (diag == IXGBE_ERR_SFP_NOT_PRESENT)
1249                 diag = IXGBE_SUCCESS;
1250
1251         if (diag == IXGBE_ERR_EEPROM_VERSION) {
1252                 PMD_INIT_LOG(ERR, "This device is a pre-production adapter/"
1253                              "LOM.  Please be aware there may be issues associated "
1254                              "with your hardware.");
1255                 PMD_INIT_LOG(ERR, "If you are experiencing problems "
1256                              "please contact your Intel or hardware representative "
1257                              "who provided you with this hardware.");
1258         } else if (diag == IXGBE_ERR_SFP_NOT_SUPPORTED)
1259                 PMD_INIT_LOG(ERR, "Unsupported SFP+ Module");
1260         if (diag) {
1261                 PMD_INIT_LOG(ERR, "Hardware Initialization Failure: %d", diag);
1262                 return -EIO;
1263         }
1264
1265         /* Reset the hw statistics */
1266         ixgbe_dev_stats_reset(eth_dev);
1267
1268         /* disable interrupt */
1269         ixgbe_disable_intr(hw);
1270
1271         /* reset mappings for queue statistics hw counters*/
1272         ixgbe_reset_qstat_mappings(hw);
1273
1274         /* Allocate memory for storing MAC addresses */
1275         eth_dev->data->mac_addrs = rte_zmalloc("ixgbe", ETHER_ADDR_LEN *
1276                                                hw->mac.num_rar_entries, 0);
1277         if (eth_dev->data->mac_addrs == NULL) {
1278                 PMD_INIT_LOG(ERR,
1279                              "Failed to allocate %u bytes needed to store "
1280                              "MAC addresses",
1281                              ETHER_ADDR_LEN * hw->mac.num_rar_entries);
1282                 return -ENOMEM;
1283         }
1284         /* Copy the permanent MAC address */
1285         ether_addr_copy((struct ether_addr *) hw->mac.perm_addr,
1286                         &eth_dev->data->mac_addrs[0]);
1287
1288         /* Allocate memory for storing hash filter MAC addresses */
1289         eth_dev->data->hash_mac_addrs = rte_zmalloc("ixgbe", ETHER_ADDR_LEN *
1290                                                     IXGBE_VMDQ_NUM_UC_MAC, 0);
1291         if (eth_dev->data->hash_mac_addrs == NULL) {
1292                 PMD_INIT_LOG(ERR,
1293                              "Failed to allocate %d bytes needed to store MAC addresses",
1294                              ETHER_ADDR_LEN * IXGBE_VMDQ_NUM_UC_MAC);
1295                 return -ENOMEM;
1296         }
1297
1298         /* initialize the vfta */
1299         memset(shadow_vfta, 0, sizeof(*shadow_vfta));
1300
1301         /* initialize the hw strip bitmap*/
1302         memset(hwstrip, 0, sizeof(*hwstrip));
1303
1304         /* initialize PF if max_vfs not zero */
1305         ixgbe_pf_host_init(eth_dev);
1306
1307         ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
1308         /* let hardware know driver is loaded */
1309         ctrl_ext |= IXGBE_CTRL_EXT_DRV_LOAD;
1310         /* Set PF Reset Done bit so PF/VF Mail Ops can work */
1311         ctrl_ext |= IXGBE_CTRL_EXT_PFRSTD;
1312         IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
1313         IXGBE_WRITE_FLUSH(hw);
1314
1315         if (ixgbe_is_sfp(hw) && hw->phy.sfp_type != ixgbe_sfp_type_not_present)
1316                 PMD_INIT_LOG(DEBUG, "MAC: %d, PHY: %d, SFP+: %d",
1317                              (int) hw->mac.type, (int) hw->phy.type,
1318                              (int) hw->phy.sfp_type);
1319         else
1320                 PMD_INIT_LOG(DEBUG, "MAC: %d, PHY: %d",
1321                              (int) hw->mac.type, (int) hw->phy.type);
1322
1323         PMD_INIT_LOG(DEBUG, "port %d vendorID=0x%x deviceID=0x%x",
1324                      eth_dev->data->port_id, pci_dev->id.vendor_id,
1325                      pci_dev->id.device_id);
1326
1327         rte_intr_callback_register(intr_handle,
1328                                    ixgbe_dev_interrupt_handler, eth_dev);
1329
1330         /* enable uio/vfio intr/eventfd mapping */
1331         rte_intr_enable(intr_handle);
1332
1333         /* enable support intr */
1334         ixgbe_enable_intr(eth_dev);
1335
1336         /* initialize filter info */
1337         memset(filter_info, 0,
1338                sizeof(struct ixgbe_filter_info));
1339
1340         /* initialize 5tuple filter list */
1341         TAILQ_INIT(&filter_info->fivetuple_list);
1342
1343         /* initialize flow director filter list & hash */
1344         ixgbe_fdir_filter_init(eth_dev);
1345
1346         /* initialize l2 tunnel filter list & hash */
1347         ixgbe_l2_tn_filter_init(eth_dev);
1348
1349         /* initialize flow filter lists */
1350         ixgbe_filterlist_init();
1351
1352         /* initialize bandwidth configuration info */
1353         memset(bw_conf, 0, sizeof(struct ixgbe_bw_conf));
1354
1355         /* initialize Traffic Manager configuration */
1356         ixgbe_tm_conf_init(eth_dev);
1357
1358         return 0;
1359 }
1360
1361 static int
1362 eth_ixgbe_dev_uninit(struct rte_eth_dev *eth_dev)
1363 {
1364         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
1365         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
1366         struct ixgbe_hw *hw;
1367
1368         PMD_INIT_FUNC_TRACE();
1369
1370         if (rte_eal_process_type() != RTE_PROC_PRIMARY)
1371                 return -EPERM;
1372
1373         hw = IXGBE_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
1374
1375         if (hw->adapter_stopped == 0)
1376                 ixgbe_dev_close(eth_dev);
1377
1378         eth_dev->dev_ops = NULL;
1379         eth_dev->rx_pkt_burst = NULL;
1380         eth_dev->tx_pkt_burst = NULL;
1381
1382         /* Unlock any pending hardware semaphore */
1383         ixgbe_swfw_lock_reset(hw);
1384
1385         /* disable uio intr before callback unregister */
1386         rte_intr_disable(intr_handle);
1387         rte_intr_callback_unregister(intr_handle,
1388                                      ixgbe_dev_interrupt_handler, eth_dev);
1389
1390         /* uninitialize PF if max_vfs not zero */
1391         ixgbe_pf_host_uninit(eth_dev);
1392
1393         rte_free(eth_dev->data->mac_addrs);
1394         eth_dev->data->mac_addrs = NULL;
1395
1396         rte_free(eth_dev->data->hash_mac_addrs);
1397         eth_dev->data->hash_mac_addrs = NULL;
1398
1399         /* remove all the fdir filters & hash */
1400         ixgbe_fdir_filter_uninit(eth_dev);
1401
1402         /* remove all the L2 tunnel filters & hash */
1403         ixgbe_l2_tn_filter_uninit(eth_dev);
1404
1405         /* Remove all ntuple filters of the device */
1406         ixgbe_ntuple_filter_uninit(eth_dev);
1407
1408         /* clear all the filters list */
1409         ixgbe_filterlist_flush();
1410
1411         /* Remove all Traffic Manager configuration */
1412         ixgbe_tm_conf_uninit(eth_dev);
1413
1414 #ifdef RTE_LIBRTE_SECURITY
1415         rte_free(eth_dev->security_ctx);
1416 #endif
1417
1418         return 0;
1419 }
1420
1421 static int ixgbe_ntuple_filter_uninit(struct rte_eth_dev *eth_dev)
1422 {
1423         struct ixgbe_filter_info *filter_info =
1424                 IXGBE_DEV_PRIVATE_TO_FILTER_INFO(eth_dev->data->dev_private);
1425         struct ixgbe_5tuple_filter *p_5tuple;
1426
1427         while ((p_5tuple = TAILQ_FIRST(&filter_info->fivetuple_list))) {
1428                 TAILQ_REMOVE(&filter_info->fivetuple_list,
1429                              p_5tuple,
1430                              entries);
1431                 rte_free(p_5tuple);
1432         }
1433         memset(filter_info->fivetuple_mask, 0,
1434                sizeof(uint32_t) * IXGBE_5TUPLE_ARRAY_SIZE);
1435
1436         return 0;
1437 }
1438
1439 static int ixgbe_fdir_filter_uninit(struct rte_eth_dev *eth_dev)
1440 {
1441         struct ixgbe_hw_fdir_info *fdir_info =
1442                 IXGBE_DEV_PRIVATE_TO_FDIR_INFO(eth_dev->data->dev_private);
1443         struct ixgbe_fdir_filter *fdir_filter;
1444
1445                 if (fdir_info->hash_map)
1446                 rte_free(fdir_info->hash_map);
1447         if (fdir_info->hash_handle)
1448                 rte_hash_free(fdir_info->hash_handle);
1449
1450         while ((fdir_filter = TAILQ_FIRST(&fdir_info->fdir_list))) {
1451                 TAILQ_REMOVE(&fdir_info->fdir_list,
1452                              fdir_filter,
1453                              entries);
1454                 rte_free(fdir_filter);
1455         }
1456
1457         return 0;
1458 }
1459
1460 static int ixgbe_l2_tn_filter_uninit(struct rte_eth_dev *eth_dev)
1461 {
1462         struct ixgbe_l2_tn_info *l2_tn_info =
1463                 IXGBE_DEV_PRIVATE_TO_L2_TN_INFO(eth_dev->data->dev_private);
1464         struct ixgbe_l2_tn_filter *l2_tn_filter;
1465
1466         if (l2_tn_info->hash_map)
1467                 rte_free(l2_tn_info->hash_map);
1468         if (l2_tn_info->hash_handle)
1469                 rte_hash_free(l2_tn_info->hash_handle);
1470
1471         while ((l2_tn_filter = TAILQ_FIRST(&l2_tn_info->l2_tn_list))) {
1472                 TAILQ_REMOVE(&l2_tn_info->l2_tn_list,
1473                              l2_tn_filter,
1474                              entries);
1475                 rte_free(l2_tn_filter);
1476         }
1477
1478         return 0;
1479 }
1480
1481 static int ixgbe_fdir_filter_init(struct rte_eth_dev *eth_dev)
1482 {
1483         struct ixgbe_hw_fdir_info *fdir_info =
1484                 IXGBE_DEV_PRIVATE_TO_FDIR_INFO(eth_dev->data->dev_private);
1485         char fdir_hash_name[RTE_HASH_NAMESIZE];
1486         struct rte_hash_parameters fdir_hash_params = {
1487                 .name = fdir_hash_name,
1488                 .entries = IXGBE_MAX_FDIR_FILTER_NUM,
1489                 .key_len = sizeof(union ixgbe_atr_input),
1490                 .hash_func = rte_hash_crc,
1491                 .hash_func_init_val = 0,
1492                 .socket_id = rte_socket_id(),
1493         };
1494
1495         TAILQ_INIT(&fdir_info->fdir_list);
1496         snprintf(fdir_hash_name, RTE_HASH_NAMESIZE,
1497                  "fdir_%s", eth_dev->device->name);
1498         fdir_info->hash_handle = rte_hash_create(&fdir_hash_params);
1499         if (!fdir_info->hash_handle) {
1500                 PMD_INIT_LOG(ERR, "Failed to create fdir hash table!");
1501                 return -EINVAL;
1502         }
1503         fdir_info->hash_map = rte_zmalloc("ixgbe",
1504                                           sizeof(struct ixgbe_fdir_filter *) *
1505                                           IXGBE_MAX_FDIR_FILTER_NUM,
1506                                           0);
1507         if (!fdir_info->hash_map) {
1508                 PMD_INIT_LOG(ERR,
1509                              "Failed to allocate memory for fdir hash map!");
1510                 return -ENOMEM;
1511         }
1512         fdir_info->mask_added = FALSE;
1513
1514         return 0;
1515 }
1516
1517 static int ixgbe_l2_tn_filter_init(struct rte_eth_dev *eth_dev)
1518 {
1519         struct ixgbe_l2_tn_info *l2_tn_info =
1520                 IXGBE_DEV_PRIVATE_TO_L2_TN_INFO(eth_dev->data->dev_private);
1521         char l2_tn_hash_name[RTE_HASH_NAMESIZE];
1522         struct rte_hash_parameters l2_tn_hash_params = {
1523                 .name = l2_tn_hash_name,
1524                 .entries = IXGBE_MAX_L2_TN_FILTER_NUM,
1525                 .key_len = sizeof(struct ixgbe_l2_tn_key),
1526                 .hash_func = rte_hash_crc,
1527                 .hash_func_init_val = 0,
1528                 .socket_id = rte_socket_id(),
1529         };
1530
1531         TAILQ_INIT(&l2_tn_info->l2_tn_list);
1532         snprintf(l2_tn_hash_name, RTE_HASH_NAMESIZE,
1533                  "l2_tn_%s", eth_dev->device->name);
1534         l2_tn_info->hash_handle = rte_hash_create(&l2_tn_hash_params);
1535         if (!l2_tn_info->hash_handle) {
1536                 PMD_INIT_LOG(ERR, "Failed to create L2 TN hash table!");
1537                 return -EINVAL;
1538         }
1539         l2_tn_info->hash_map = rte_zmalloc("ixgbe",
1540                                    sizeof(struct ixgbe_l2_tn_filter *) *
1541                                    IXGBE_MAX_L2_TN_FILTER_NUM,
1542                                    0);
1543         if (!l2_tn_info->hash_map) {
1544                 PMD_INIT_LOG(ERR,
1545                         "Failed to allocate memory for L2 TN hash map!");
1546                 return -ENOMEM;
1547         }
1548         l2_tn_info->e_tag_en = FALSE;
1549         l2_tn_info->e_tag_fwd_en = FALSE;
1550         l2_tn_info->e_tag_ether_type = DEFAULT_ETAG_ETYPE;
1551
1552         return 0;
1553 }
1554 /*
1555  * Negotiate mailbox API version with the PF.
1556  * After reset API version is always set to the basic one (ixgbe_mbox_api_10).
1557  * Then we try to negotiate starting with the most recent one.
1558  * If all negotiation attempts fail, then we will proceed with
1559  * the default one (ixgbe_mbox_api_10).
1560  */
1561 static void
1562 ixgbevf_negotiate_api(struct ixgbe_hw *hw)
1563 {
1564         int32_t i;
1565
1566         /* start with highest supported, proceed down */
1567         static const enum ixgbe_pfvf_api_rev sup_ver[] = {
1568                 ixgbe_mbox_api_12,
1569                 ixgbe_mbox_api_11,
1570                 ixgbe_mbox_api_10,
1571         };
1572
1573         for (i = 0;
1574                         i != RTE_DIM(sup_ver) &&
1575                         ixgbevf_negotiate_api_version(hw, sup_ver[i]) != 0;
1576                         i++)
1577                 ;
1578 }
1579
1580 static void
1581 generate_random_mac_addr(struct ether_addr *mac_addr)
1582 {
1583         uint64_t random;
1584
1585         /* Set Organizationally Unique Identifier (OUI) prefix. */
1586         mac_addr->addr_bytes[0] = 0x00;
1587         mac_addr->addr_bytes[1] = 0x09;
1588         mac_addr->addr_bytes[2] = 0xC0;
1589         /* Force indication of locally assigned MAC address. */
1590         mac_addr->addr_bytes[0] |= ETHER_LOCAL_ADMIN_ADDR;
1591         /* Generate the last 3 bytes of the MAC address with a random number. */
1592         random = rte_rand();
1593         memcpy(&mac_addr->addr_bytes[3], &random, 3);
1594 }
1595
1596 /*
1597  * Virtual Function device init
1598  */
1599 static int
1600 eth_ixgbevf_dev_init(struct rte_eth_dev *eth_dev)
1601 {
1602         int diag;
1603         uint32_t tc, tcs;
1604         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
1605         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
1606         struct ixgbe_hw *hw =
1607                 IXGBE_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
1608         struct ixgbe_vfta *shadow_vfta =
1609                 IXGBE_DEV_PRIVATE_TO_VFTA(eth_dev->data->dev_private);
1610         struct ixgbe_hwstrip *hwstrip =
1611                 IXGBE_DEV_PRIVATE_TO_HWSTRIP_BITMAP(eth_dev->data->dev_private);
1612         struct ether_addr *perm_addr = (struct ether_addr *) hw->mac.perm_addr;
1613
1614         PMD_INIT_FUNC_TRACE();
1615
1616         eth_dev->dev_ops = &ixgbevf_eth_dev_ops;
1617         eth_dev->rx_pkt_burst = &ixgbe_recv_pkts;
1618         eth_dev->tx_pkt_burst = &ixgbe_xmit_pkts;
1619
1620         /* for secondary processes, we don't initialise any further as primary
1621          * has already done this work. Only check we don't need a different
1622          * RX function
1623          */
1624         if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
1625                 struct ixgbe_tx_queue *txq;
1626                 /* TX queue function in primary, set by last queue initialized
1627                  * Tx queue may not initialized by primary process
1628                  */
1629                 if (eth_dev->data->tx_queues) {
1630                         txq = eth_dev->data->tx_queues[eth_dev->data->nb_tx_queues - 1];
1631                         ixgbe_set_tx_function(eth_dev, txq);
1632                 } else {
1633                         /* Use default TX function if we get here */
1634                         PMD_INIT_LOG(NOTICE,
1635                                      "No TX queues configured yet. Using default TX function.");
1636                 }
1637
1638                 ixgbe_set_rx_function(eth_dev);
1639
1640                 return 0;
1641         }
1642
1643         rte_eth_copy_pci_info(eth_dev, pci_dev);
1644
1645         hw->device_id = pci_dev->id.device_id;
1646         hw->vendor_id = pci_dev->id.vendor_id;
1647         hw->hw_addr = (void *)pci_dev->mem_resource[0].addr;
1648
1649         /* initialize the vfta */
1650         memset(shadow_vfta, 0, sizeof(*shadow_vfta));
1651
1652         /* initialize the hw strip bitmap*/
1653         memset(hwstrip, 0, sizeof(*hwstrip));
1654
1655         /* Initialize the shared code (base driver) */
1656         diag = ixgbe_init_shared_code(hw);
1657         if (diag != IXGBE_SUCCESS) {
1658                 PMD_INIT_LOG(ERR, "Shared code init failed for ixgbevf: %d", diag);
1659                 return -EIO;
1660         }
1661
1662         /* init_mailbox_params */
1663         hw->mbx.ops.init_params(hw);
1664
1665         /* Reset the hw statistics */
1666         ixgbevf_dev_stats_reset(eth_dev);
1667
1668         /* Disable the interrupts for VF */
1669         ixgbevf_intr_disable(hw);
1670
1671         hw->mac.num_rar_entries = 128; /* The MAX of the underlying PF */
1672         diag = hw->mac.ops.reset_hw(hw);
1673
1674         /*
1675          * The VF reset operation returns the IXGBE_ERR_INVALID_MAC_ADDR when
1676          * the underlying PF driver has not assigned a MAC address to the VF.
1677          * In this case, assign a random MAC address.
1678          */
1679         if ((diag != IXGBE_SUCCESS) && (diag != IXGBE_ERR_INVALID_MAC_ADDR)) {
1680                 PMD_INIT_LOG(ERR, "VF Initialization Failure: %d", diag);
1681                 return diag;
1682         }
1683
1684         /* negotiate mailbox API version to use with the PF. */
1685         ixgbevf_negotiate_api(hw);
1686
1687         /* Get Rx/Tx queue count via mailbox, which is ready after reset_hw */
1688         ixgbevf_get_queues(hw, &tcs, &tc);
1689
1690         /* Allocate memory for storing MAC addresses */
1691         eth_dev->data->mac_addrs = rte_zmalloc("ixgbevf", ETHER_ADDR_LEN *
1692                                                hw->mac.num_rar_entries, 0);
1693         if (eth_dev->data->mac_addrs == NULL) {
1694                 PMD_INIT_LOG(ERR,
1695                              "Failed to allocate %u bytes needed to store "
1696                              "MAC addresses",
1697                              ETHER_ADDR_LEN * hw->mac.num_rar_entries);
1698                 return -ENOMEM;
1699         }
1700
1701         /* Generate a random MAC address, if none was assigned by PF. */
1702         if (is_zero_ether_addr(perm_addr)) {
1703                 generate_random_mac_addr(perm_addr);
1704                 diag = ixgbe_set_rar_vf(hw, 1, perm_addr->addr_bytes, 0, 1);
1705                 if (diag) {
1706                         rte_free(eth_dev->data->mac_addrs);
1707                         eth_dev->data->mac_addrs = NULL;
1708                         return diag;
1709                 }
1710                 PMD_INIT_LOG(INFO, "\tVF MAC address not assigned by Host PF");
1711                 PMD_INIT_LOG(INFO, "\tAssign randomly generated MAC address "
1712                              "%02x:%02x:%02x:%02x:%02x:%02x",
1713                              perm_addr->addr_bytes[0],
1714                              perm_addr->addr_bytes[1],
1715                              perm_addr->addr_bytes[2],
1716                              perm_addr->addr_bytes[3],
1717                              perm_addr->addr_bytes[4],
1718                              perm_addr->addr_bytes[5]);
1719         }
1720
1721         /* Copy the permanent MAC address */
1722         ether_addr_copy(perm_addr, &eth_dev->data->mac_addrs[0]);
1723
1724         /* reset the hardware with the new settings */
1725         diag = hw->mac.ops.start_hw(hw);
1726         switch (diag) {
1727         case  0:
1728                 break;
1729
1730         default:
1731                 PMD_INIT_LOG(ERR, "VF Initialization Failure: %d", diag);
1732                 return -EIO;
1733         }
1734
1735         rte_intr_callback_register(intr_handle,
1736                                    ixgbevf_dev_interrupt_handler, eth_dev);
1737         rte_intr_enable(intr_handle);
1738         ixgbevf_intr_enable(hw);
1739
1740         PMD_INIT_LOG(DEBUG, "port %d vendorID=0x%x deviceID=0x%x mac.type=%s",
1741                      eth_dev->data->port_id, pci_dev->id.vendor_id,
1742                      pci_dev->id.device_id, "ixgbe_mac_82599_vf");
1743
1744         return 0;
1745 }
1746
1747 /* Virtual Function device uninit */
1748
1749 static int
1750 eth_ixgbevf_dev_uninit(struct rte_eth_dev *eth_dev)
1751 {
1752         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
1753         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
1754         struct ixgbe_hw *hw;
1755
1756         PMD_INIT_FUNC_TRACE();
1757
1758         if (rte_eal_process_type() != RTE_PROC_PRIMARY)
1759                 return -EPERM;
1760
1761         hw = IXGBE_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
1762
1763         if (hw->adapter_stopped == 0)
1764                 ixgbevf_dev_close(eth_dev);
1765
1766         eth_dev->dev_ops = NULL;
1767         eth_dev->rx_pkt_burst = NULL;
1768         eth_dev->tx_pkt_burst = NULL;
1769
1770         /* Disable the interrupts for VF */
1771         ixgbevf_intr_disable(hw);
1772
1773         rte_free(eth_dev->data->mac_addrs);
1774         eth_dev->data->mac_addrs = NULL;
1775
1776         rte_intr_disable(intr_handle);
1777         rte_intr_callback_unregister(intr_handle,
1778                                      ixgbevf_dev_interrupt_handler, eth_dev);
1779
1780         return 0;
1781 }
1782
1783 static int eth_ixgbe_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
1784         struct rte_pci_device *pci_dev)
1785 {
1786         return rte_eth_dev_pci_generic_probe(pci_dev,
1787                 sizeof(struct ixgbe_adapter), eth_ixgbe_dev_init);
1788 }
1789
1790 static int eth_ixgbe_pci_remove(struct rte_pci_device *pci_dev)
1791 {
1792         return rte_eth_dev_pci_generic_remove(pci_dev, eth_ixgbe_dev_uninit);
1793 }
1794
1795 static struct rte_pci_driver rte_ixgbe_pmd = {
1796         .id_table = pci_id_ixgbe_map,
1797         .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC |
1798                      RTE_PCI_DRV_IOVA_AS_VA,
1799         .probe = eth_ixgbe_pci_probe,
1800         .remove = eth_ixgbe_pci_remove,
1801 };
1802
1803 static int eth_ixgbevf_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
1804         struct rte_pci_device *pci_dev)
1805 {
1806         return rte_eth_dev_pci_generic_probe(pci_dev,
1807                 sizeof(struct ixgbe_adapter), eth_ixgbevf_dev_init);
1808 }
1809
1810 static int eth_ixgbevf_pci_remove(struct rte_pci_device *pci_dev)
1811 {
1812         return rte_eth_dev_pci_generic_remove(pci_dev, eth_ixgbevf_dev_uninit);
1813 }
1814
1815 /*
1816  * virtual function driver struct
1817  */
1818 static struct rte_pci_driver rte_ixgbevf_pmd = {
1819         .id_table = pci_id_ixgbevf_map,
1820         .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_IOVA_AS_VA,
1821         .probe = eth_ixgbevf_pci_probe,
1822         .remove = eth_ixgbevf_pci_remove,
1823 };
1824
1825 static int
1826 ixgbe_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
1827 {
1828         struct ixgbe_hw *hw =
1829                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1830         struct ixgbe_vfta *shadow_vfta =
1831                 IXGBE_DEV_PRIVATE_TO_VFTA(dev->data->dev_private);
1832         uint32_t vfta;
1833         uint32_t vid_idx;
1834         uint32_t vid_bit;
1835
1836         vid_idx = (uint32_t) ((vlan_id >> 5) & 0x7F);
1837         vid_bit = (uint32_t) (1 << (vlan_id & 0x1F));
1838         vfta = IXGBE_READ_REG(hw, IXGBE_VFTA(vid_idx));
1839         if (on)
1840                 vfta |= vid_bit;
1841         else
1842                 vfta &= ~vid_bit;
1843         IXGBE_WRITE_REG(hw, IXGBE_VFTA(vid_idx), vfta);
1844
1845         /* update local VFTA copy */
1846         shadow_vfta->vfta[vid_idx] = vfta;
1847
1848         return 0;
1849 }
1850
1851 static void
1852 ixgbe_vlan_strip_queue_set(struct rte_eth_dev *dev, uint16_t queue, int on)
1853 {
1854         if (on)
1855                 ixgbe_vlan_hw_strip_enable(dev, queue);
1856         else
1857                 ixgbe_vlan_hw_strip_disable(dev, queue);
1858 }
1859
1860 static int
1861 ixgbe_vlan_tpid_set(struct rte_eth_dev *dev,
1862                     enum rte_vlan_type vlan_type,
1863                     uint16_t tpid)
1864 {
1865         struct ixgbe_hw *hw =
1866                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1867         int ret = 0;
1868         uint32_t reg;
1869         uint32_t qinq;
1870
1871         qinq = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
1872         qinq &= IXGBE_DMATXCTL_GDV;
1873
1874         switch (vlan_type) {
1875         case ETH_VLAN_TYPE_INNER:
1876                 if (qinq) {
1877                         reg = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
1878                         reg = (reg & (~IXGBE_VLNCTRL_VET)) | (uint32_t)tpid;
1879                         IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, reg);
1880                         reg = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
1881                         reg = (reg & (~IXGBE_DMATXCTL_VT_MASK))
1882                                 | ((uint32_t)tpid << IXGBE_DMATXCTL_VT_SHIFT);
1883                         IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, reg);
1884                 } else {
1885                         ret = -ENOTSUP;
1886                         PMD_DRV_LOG(ERR, "Inner type is not supported"
1887                                     " by single VLAN");
1888                 }
1889                 break;
1890         case ETH_VLAN_TYPE_OUTER:
1891                 if (qinq) {
1892                         /* Only the high 16-bits is valid */
1893                         IXGBE_WRITE_REG(hw, IXGBE_EXVET, (uint32_t)tpid <<
1894                                         IXGBE_EXVET_VET_EXT_SHIFT);
1895                 } else {
1896                         reg = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
1897                         reg = (reg & (~IXGBE_VLNCTRL_VET)) | (uint32_t)tpid;
1898                         IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, reg);
1899                         reg = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
1900                         reg = (reg & (~IXGBE_DMATXCTL_VT_MASK))
1901                                 | ((uint32_t)tpid << IXGBE_DMATXCTL_VT_SHIFT);
1902                         IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, reg);
1903                 }
1904
1905                 break;
1906         default:
1907                 ret = -EINVAL;
1908                 PMD_DRV_LOG(ERR, "Unsupported VLAN type %d", vlan_type);
1909                 break;
1910         }
1911
1912         return ret;
1913 }
1914
1915 void
1916 ixgbe_vlan_hw_filter_disable(struct rte_eth_dev *dev)
1917 {
1918         struct ixgbe_hw *hw =
1919                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1920         uint32_t vlnctrl;
1921
1922         PMD_INIT_FUNC_TRACE();
1923
1924         /* Filter Table Disable */
1925         vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
1926         vlnctrl &= ~IXGBE_VLNCTRL_VFE;
1927
1928         IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
1929 }
1930
1931 void
1932 ixgbe_vlan_hw_filter_enable(struct rte_eth_dev *dev)
1933 {
1934         struct ixgbe_hw *hw =
1935                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1936         struct ixgbe_vfta *shadow_vfta =
1937                 IXGBE_DEV_PRIVATE_TO_VFTA(dev->data->dev_private);
1938         uint32_t vlnctrl;
1939         uint16_t i;
1940
1941         PMD_INIT_FUNC_TRACE();
1942
1943         /* Filter Table Enable */
1944         vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
1945         vlnctrl &= ~IXGBE_VLNCTRL_CFIEN;
1946         vlnctrl |= IXGBE_VLNCTRL_VFE;
1947
1948         IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
1949
1950         /* write whatever is in local vfta copy */
1951         for (i = 0; i < IXGBE_VFTA_SIZE; i++)
1952                 IXGBE_WRITE_REG(hw, IXGBE_VFTA(i), shadow_vfta->vfta[i]);
1953 }
1954
1955 static void
1956 ixgbe_vlan_hw_strip_bitmap_set(struct rte_eth_dev *dev, uint16_t queue, bool on)
1957 {
1958         struct ixgbe_hwstrip *hwstrip =
1959                 IXGBE_DEV_PRIVATE_TO_HWSTRIP_BITMAP(dev->data->dev_private);
1960         struct ixgbe_rx_queue *rxq;
1961
1962         if (queue >= IXGBE_MAX_RX_QUEUE_NUM)
1963                 return;
1964
1965         if (on)
1966                 IXGBE_SET_HWSTRIP(hwstrip, queue);
1967         else
1968                 IXGBE_CLEAR_HWSTRIP(hwstrip, queue);
1969
1970         if (queue >= dev->data->nb_rx_queues)
1971                 return;
1972
1973         rxq = dev->data->rx_queues[queue];
1974
1975         if (on)
1976                 rxq->vlan_flags = PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED;
1977         else
1978                 rxq->vlan_flags = PKT_RX_VLAN;
1979 }
1980
1981 static void
1982 ixgbe_vlan_hw_strip_disable(struct rte_eth_dev *dev, uint16_t queue)
1983 {
1984         struct ixgbe_hw *hw =
1985                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1986         uint32_t ctrl;
1987
1988         PMD_INIT_FUNC_TRACE();
1989
1990         if (hw->mac.type == ixgbe_mac_82598EB) {
1991                 /* No queue level support */
1992                 PMD_INIT_LOG(NOTICE, "82598EB not support queue level hw strip");
1993                 return;
1994         }
1995
1996         /* Other 10G NIC, the VLAN strip can be setup per queue in RXDCTL */
1997         ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(queue));
1998         ctrl &= ~IXGBE_RXDCTL_VME;
1999         IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(queue), ctrl);
2000
2001         /* record those setting for HW strip per queue */
2002         ixgbe_vlan_hw_strip_bitmap_set(dev, queue, 0);
2003 }
2004
2005 static void
2006 ixgbe_vlan_hw_strip_enable(struct rte_eth_dev *dev, uint16_t queue)
2007 {
2008         struct ixgbe_hw *hw =
2009                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2010         uint32_t ctrl;
2011
2012         PMD_INIT_FUNC_TRACE();
2013
2014         if (hw->mac.type == ixgbe_mac_82598EB) {
2015                 /* No queue level supported */
2016                 PMD_INIT_LOG(NOTICE, "82598EB not support queue level hw strip");
2017                 return;
2018         }
2019
2020         /* Other 10G NIC, the VLAN strip can be setup per queue in RXDCTL */
2021         ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(queue));
2022         ctrl |= IXGBE_RXDCTL_VME;
2023         IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(queue), ctrl);
2024
2025         /* record those setting for HW strip per queue */
2026         ixgbe_vlan_hw_strip_bitmap_set(dev, queue, 1);
2027 }
2028
2029 void
2030 ixgbe_vlan_hw_strip_disable_all(struct rte_eth_dev *dev)
2031 {
2032         struct ixgbe_hw *hw =
2033                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2034         uint32_t ctrl;
2035         uint16_t i;
2036         struct ixgbe_rx_queue *rxq;
2037
2038         PMD_INIT_FUNC_TRACE();
2039
2040         if (hw->mac.type == ixgbe_mac_82598EB) {
2041                 ctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
2042                 ctrl &= ~IXGBE_VLNCTRL_VME;
2043                 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, ctrl);
2044         } else {
2045                 /* Other 10G NIC, the VLAN strip can be setup per queue in RXDCTL */
2046                 for (i = 0; i < dev->data->nb_rx_queues; i++) {
2047                         rxq = dev->data->rx_queues[i];
2048                         ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxq->reg_idx));
2049                         ctrl &= ~IXGBE_RXDCTL_VME;
2050                         IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxq->reg_idx), ctrl);
2051
2052                         /* record those setting for HW strip per queue */
2053                         ixgbe_vlan_hw_strip_bitmap_set(dev, i, 0);
2054                 }
2055         }
2056 }
2057
2058 void
2059 ixgbe_vlan_hw_strip_enable_all(struct rte_eth_dev *dev)
2060 {
2061         struct ixgbe_hw *hw =
2062                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2063         uint32_t ctrl;
2064         uint16_t i;
2065         struct ixgbe_rx_queue *rxq;
2066
2067         PMD_INIT_FUNC_TRACE();
2068
2069         if (hw->mac.type == ixgbe_mac_82598EB) {
2070                 ctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
2071                 ctrl |= IXGBE_VLNCTRL_VME;
2072                 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, ctrl);
2073         } else {
2074                 /* Other 10G NIC, the VLAN strip can be setup per queue in RXDCTL */
2075                 for (i = 0; i < dev->data->nb_rx_queues; i++) {
2076                         rxq = dev->data->rx_queues[i];
2077                         ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxq->reg_idx));
2078                         ctrl |= IXGBE_RXDCTL_VME;
2079                         IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxq->reg_idx), ctrl);
2080
2081                         /* record those setting for HW strip per queue */
2082                         ixgbe_vlan_hw_strip_bitmap_set(dev, i, 1);
2083                 }
2084         }
2085 }
2086
2087 static void
2088 ixgbe_vlan_hw_extend_disable(struct rte_eth_dev *dev)
2089 {
2090         struct ixgbe_hw *hw =
2091                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2092         uint32_t ctrl;
2093
2094         PMD_INIT_FUNC_TRACE();
2095
2096         /* DMATXCTRL: Geric Double VLAN Disable */
2097         ctrl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
2098         ctrl &= ~IXGBE_DMATXCTL_GDV;
2099         IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, ctrl);
2100
2101         /* CTRL_EXT: Global Double VLAN Disable */
2102         ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
2103         ctrl &= ~IXGBE_EXTENDED_VLAN;
2104         IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl);
2105
2106 }
2107
2108 static void
2109 ixgbe_vlan_hw_extend_enable(struct rte_eth_dev *dev)
2110 {
2111         struct ixgbe_hw *hw =
2112                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2113         uint32_t ctrl;
2114
2115         PMD_INIT_FUNC_TRACE();
2116
2117         /* DMATXCTRL: Geric Double VLAN Enable */
2118         ctrl  = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
2119         ctrl |= IXGBE_DMATXCTL_GDV;
2120         IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, ctrl);
2121
2122         /* CTRL_EXT: Global Double VLAN Enable */
2123         ctrl  = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
2124         ctrl |= IXGBE_EXTENDED_VLAN;
2125         IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl);
2126
2127         /* Clear pooling mode of PFVTCTL. It's required by X550. */
2128         if (hw->mac.type == ixgbe_mac_X550 ||
2129             hw->mac.type == ixgbe_mac_X550EM_x ||
2130             hw->mac.type == ixgbe_mac_X550EM_a) {
2131                 ctrl = IXGBE_READ_REG(hw, IXGBE_VT_CTL);
2132                 ctrl &= ~IXGBE_VT_CTL_POOLING_MODE_MASK;
2133                 IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, ctrl);
2134         }
2135
2136         /*
2137          * VET EXT field in the EXVET register = 0x8100 by default
2138          * So no need to change. Same to VT field of DMATXCTL register
2139          */
2140 }
2141
2142 static int
2143 ixgbe_vlan_offload_set(struct rte_eth_dev *dev, int mask)
2144 {
2145         if (mask & ETH_VLAN_STRIP_MASK) {
2146                 if (dev->data->dev_conf.rxmode.hw_vlan_strip)
2147                         ixgbe_vlan_hw_strip_enable_all(dev);
2148                 else
2149                         ixgbe_vlan_hw_strip_disable_all(dev);
2150         }
2151
2152         if (mask & ETH_VLAN_FILTER_MASK) {
2153                 if (dev->data->dev_conf.rxmode.hw_vlan_filter)
2154                         ixgbe_vlan_hw_filter_enable(dev);
2155                 else
2156                         ixgbe_vlan_hw_filter_disable(dev);
2157         }
2158
2159         if (mask & ETH_VLAN_EXTEND_MASK) {
2160                 if (dev->data->dev_conf.rxmode.hw_vlan_extend)
2161                         ixgbe_vlan_hw_extend_enable(dev);
2162                 else
2163                         ixgbe_vlan_hw_extend_disable(dev);
2164         }
2165
2166         return 0;
2167 }
2168
2169 static void
2170 ixgbe_vmdq_vlan_hw_filter_enable(struct rte_eth_dev *dev)
2171 {
2172         struct ixgbe_hw *hw =
2173                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2174         /* VLNCTRL: enable vlan filtering and allow all vlan tags through */
2175         uint32_t vlanctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
2176
2177         vlanctrl |= IXGBE_VLNCTRL_VFE; /* enable vlan filters */
2178         IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlanctrl);
2179 }
2180
2181 static int
2182 ixgbe_check_vf_rss_rxq_num(struct rte_eth_dev *dev, uint16_t nb_rx_q)
2183 {
2184         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
2185
2186         switch (nb_rx_q) {
2187         case 1:
2188         case 2:
2189                 RTE_ETH_DEV_SRIOV(dev).active = ETH_64_POOLS;
2190                 break;
2191         case 4:
2192                 RTE_ETH_DEV_SRIOV(dev).active = ETH_32_POOLS;
2193                 break;
2194         default:
2195                 return -EINVAL;
2196         }
2197
2198         RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool = nb_rx_q;
2199         RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx = pci_dev->max_vfs * nb_rx_q;
2200
2201         return 0;
2202 }
2203
2204 static int
2205 ixgbe_check_mq_mode(struct rte_eth_dev *dev)
2206 {
2207         struct rte_eth_conf *dev_conf = &dev->data->dev_conf;
2208         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2209         uint16_t nb_rx_q = dev->data->nb_rx_queues;
2210         uint16_t nb_tx_q = dev->data->nb_tx_queues;
2211
2212         if (RTE_ETH_DEV_SRIOV(dev).active != 0) {
2213                 /* check multi-queue mode */
2214                 switch (dev_conf->rxmode.mq_mode) {
2215                 case ETH_MQ_RX_VMDQ_DCB:
2216                         PMD_INIT_LOG(INFO, "ETH_MQ_RX_VMDQ_DCB mode supported in SRIOV");
2217                         break;
2218                 case ETH_MQ_RX_VMDQ_DCB_RSS:
2219                         /* DCB/RSS VMDQ in SRIOV mode, not implement yet */
2220                         PMD_INIT_LOG(ERR, "SRIOV active,"
2221                                         " unsupported mq_mode rx %d.",
2222                                         dev_conf->rxmode.mq_mode);
2223                         return -EINVAL;
2224                 case ETH_MQ_RX_RSS:
2225                 case ETH_MQ_RX_VMDQ_RSS:
2226                         dev->data->dev_conf.rxmode.mq_mode = ETH_MQ_RX_VMDQ_RSS;
2227                         if (nb_rx_q <= RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool)
2228                                 if (ixgbe_check_vf_rss_rxq_num(dev, nb_rx_q)) {
2229                                         PMD_INIT_LOG(ERR, "SRIOV is active,"
2230                                                 " invalid queue number"
2231                                                 " for VMDQ RSS, allowed"
2232                                                 " value are 1, 2 or 4.");
2233                                         return -EINVAL;
2234                                 }
2235                         break;
2236                 case ETH_MQ_RX_VMDQ_ONLY:
2237                 case ETH_MQ_RX_NONE:
2238                         /* if nothing mq mode configure, use default scheme */
2239                         dev->data->dev_conf.rxmode.mq_mode = ETH_MQ_RX_VMDQ_ONLY;
2240                         if (RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool > 1)
2241                                 RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool = 1;
2242                         break;
2243                 default: /* ETH_MQ_RX_DCB, ETH_MQ_RX_DCB_RSS or ETH_MQ_TX_DCB*/
2244                         /* SRIOV only works in VMDq enable mode */
2245                         PMD_INIT_LOG(ERR, "SRIOV is active,"
2246                                         " wrong mq_mode rx %d.",
2247                                         dev_conf->rxmode.mq_mode);
2248                         return -EINVAL;
2249                 }
2250
2251                 switch (dev_conf->txmode.mq_mode) {
2252                 case ETH_MQ_TX_VMDQ_DCB:
2253                         PMD_INIT_LOG(INFO, "ETH_MQ_TX_VMDQ_DCB mode supported in SRIOV");
2254                         dev->data->dev_conf.txmode.mq_mode = ETH_MQ_TX_VMDQ_DCB;
2255                         break;
2256                 default: /* ETH_MQ_TX_VMDQ_ONLY or ETH_MQ_TX_NONE */
2257                         dev->data->dev_conf.txmode.mq_mode = ETH_MQ_TX_VMDQ_ONLY;
2258                         break;
2259                 }
2260
2261                 /* check valid queue number */
2262                 if ((nb_rx_q > RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool) ||
2263                     (nb_tx_q > RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool)) {
2264                         PMD_INIT_LOG(ERR, "SRIOV is active,"
2265                                         " nb_rx_q=%d nb_tx_q=%d queue number"
2266                                         " must be less than or equal to %d.",
2267                                         nb_rx_q, nb_tx_q,
2268                                         RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool);
2269                         return -EINVAL;
2270                 }
2271         } else {
2272                 if (dev_conf->rxmode.mq_mode == ETH_MQ_RX_VMDQ_DCB_RSS) {
2273                         PMD_INIT_LOG(ERR, "VMDQ+DCB+RSS mq_mode is"
2274                                           " not supported.");
2275                         return -EINVAL;
2276                 }
2277                 /* check configuration for vmdb+dcb mode */
2278                 if (dev_conf->rxmode.mq_mode == ETH_MQ_RX_VMDQ_DCB) {
2279                         const struct rte_eth_vmdq_dcb_conf *conf;
2280
2281                         if (nb_rx_q != IXGBE_VMDQ_DCB_NB_QUEUES) {
2282                                 PMD_INIT_LOG(ERR, "VMDQ+DCB, nb_rx_q != %d.",
2283                                                 IXGBE_VMDQ_DCB_NB_QUEUES);
2284                                 return -EINVAL;
2285                         }
2286                         conf = &dev_conf->rx_adv_conf.vmdq_dcb_conf;
2287                         if (!(conf->nb_queue_pools == ETH_16_POOLS ||
2288                                conf->nb_queue_pools == ETH_32_POOLS)) {
2289                                 PMD_INIT_LOG(ERR, "VMDQ+DCB selected,"
2290                                                 " nb_queue_pools must be %d or %d.",
2291                                                 ETH_16_POOLS, ETH_32_POOLS);
2292                                 return -EINVAL;
2293                         }
2294                 }
2295                 if (dev_conf->txmode.mq_mode == ETH_MQ_TX_VMDQ_DCB) {
2296                         const struct rte_eth_vmdq_dcb_tx_conf *conf;
2297
2298                         if (nb_tx_q != IXGBE_VMDQ_DCB_NB_QUEUES) {
2299                                 PMD_INIT_LOG(ERR, "VMDQ+DCB, nb_tx_q != %d",
2300                                                  IXGBE_VMDQ_DCB_NB_QUEUES);
2301                                 return -EINVAL;
2302                         }
2303                         conf = &dev_conf->tx_adv_conf.vmdq_dcb_tx_conf;
2304                         if (!(conf->nb_queue_pools == ETH_16_POOLS ||
2305                                conf->nb_queue_pools == ETH_32_POOLS)) {
2306                                 PMD_INIT_LOG(ERR, "VMDQ+DCB selected,"
2307                                                 " nb_queue_pools != %d and"
2308                                                 " nb_queue_pools != %d.",
2309                                                 ETH_16_POOLS, ETH_32_POOLS);
2310                                 return -EINVAL;
2311                         }
2312                 }
2313
2314                 /* For DCB mode check our configuration before we go further */
2315                 if (dev_conf->rxmode.mq_mode == ETH_MQ_RX_DCB) {
2316                         const struct rte_eth_dcb_rx_conf *conf;
2317
2318                         if (nb_rx_q != IXGBE_DCB_NB_QUEUES) {
2319                                 PMD_INIT_LOG(ERR, "DCB selected, nb_rx_q != %d.",
2320                                                  IXGBE_DCB_NB_QUEUES);
2321                                 return -EINVAL;
2322                         }
2323                         conf = &dev_conf->rx_adv_conf.dcb_rx_conf;
2324                         if (!(conf->nb_tcs == ETH_4_TCS ||
2325                                conf->nb_tcs == ETH_8_TCS)) {
2326                                 PMD_INIT_LOG(ERR, "DCB selected, nb_tcs != %d"
2327                                                 " and nb_tcs != %d.",
2328                                                 ETH_4_TCS, ETH_8_TCS);
2329                                 return -EINVAL;
2330                         }
2331                 }
2332
2333                 if (dev_conf->txmode.mq_mode == ETH_MQ_TX_DCB) {
2334                         const struct rte_eth_dcb_tx_conf *conf;
2335
2336                         if (nb_tx_q != IXGBE_DCB_NB_QUEUES) {
2337                                 PMD_INIT_LOG(ERR, "DCB, nb_tx_q != %d.",
2338                                                  IXGBE_DCB_NB_QUEUES);
2339                                 return -EINVAL;
2340                         }
2341                         conf = &dev_conf->tx_adv_conf.dcb_tx_conf;
2342                         if (!(conf->nb_tcs == ETH_4_TCS ||
2343                                conf->nb_tcs == ETH_8_TCS)) {
2344                                 PMD_INIT_LOG(ERR, "DCB selected, nb_tcs != %d"
2345                                                 " and nb_tcs != %d.",
2346                                                 ETH_4_TCS, ETH_8_TCS);
2347                                 return -EINVAL;
2348                         }
2349                 }
2350
2351                 /*
2352                  * When DCB/VT is off, maximum number of queues changes,
2353                  * except for 82598EB, which remains constant.
2354                  */
2355                 if (dev_conf->txmode.mq_mode == ETH_MQ_TX_NONE &&
2356                                 hw->mac.type != ixgbe_mac_82598EB) {
2357                         if (nb_tx_q > IXGBE_NONE_MODE_TX_NB_QUEUES) {
2358                                 PMD_INIT_LOG(ERR,
2359                                              "Neither VT nor DCB are enabled, "
2360                                              "nb_tx_q > %d.",
2361                                              IXGBE_NONE_MODE_TX_NB_QUEUES);
2362                                 return -EINVAL;
2363                         }
2364                 }
2365         }
2366         return 0;
2367 }
2368
2369 static int
2370 ixgbe_dev_configure(struct rte_eth_dev *dev)
2371 {
2372         struct ixgbe_interrupt *intr =
2373                 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
2374         struct ixgbe_adapter *adapter =
2375                 (struct ixgbe_adapter *)dev->data->dev_private;
2376         int ret;
2377
2378         PMD_INIT_FUNC_TRACE();
2379         /* multipe queue mode checking */
2380         ret  = ixgbe_check_mq_mode(dev);
2381         if (ret != 0) {
2382                 PMD_DRV_LOG(ERR, "ixgbe_check_mq_mode fails with %d.",
2383                             ret);
2384                 return ret;
2385         }
2386
2387         /* set flag to update link status after init */
2388         intr->flags |= IXGBE_FLAG_NEED_LINK_UPDATE;
2389
2390         /*
2391          * Initialize to TRUE. If any of Rx queues doesn't meet the bulk
2392          * allocation or vector Rx preconditions we will reset it.
2393          */
2394         adapter->rx_bulk_alloc_allowed = true;
2395         adapter->rx_vec_allowed = true;
2396
2397         return 0;
2398 }
2399
2400 static void
2401 ixgbe_dev_phy_intr_setup(struct rte_eth_dev *dev)
2402 {
2403         struct ixgbe_hw *hw =
2404                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2405         struct ixgbe_interrupt *intr =
2406                 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
2407         uint32_t gpie;
2408
2409         /* only set up it on X550EM_X */
2410         if (hw->mac.type == ixgbe_mac_X550EM_x) {
2411                 gpie = IXGBE_READ_REG(hw, IXGBE_GPIE);
2412                 gpie |= IXGBE_SDP0_GPIEN_X550EM_x;
2413                 IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
2414                 if (hw->phy.type == ixgbe_phy_x550em_ext_t)
2415                         intr->mask |= IXGBE_EICR_GPI_SDP0_X550EM_x;
2416         }
2417 }
2418
2419 int
2420 ixgbe_set_vf_rate_limit(struct rte_eth_dev *dev, uint16_t vf,
2421                         uint16_t tx_rate, uint64_t q_msk)
2422 {
2423         struct ixgbe_hw *hw;
2424         struct ixgbe_vf_info *vfinfo;
2425         struct rte_eth_link link;
2426         uint8_t  nb_q_per_pool;
2427         uint32_t queue_stride;
2428         uint32_t queue_idx, idx = 0, vf_idx;
2429         uint32_t queue_end;
2430         uint16_t total_rate = 0;
2431         struct rte_pci_device *pci_dev;
2432
2433         pci_dev = RTE_ETH_DEV_TO_PCI(dev);
2434         rte_eth_link_get_nowait(dev->data->port_id, &link);
2435
2436         if (vf >= pci_dev->max_vfs)
2437                 return -EINVAL;
2438
2439         if (tx_rate > link.link_speed)
2440                 return -EINVAL;
2441
2442         if (q_msk == 0)
2443                 return 0;
2444
2445         hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2446         vfinfo = *(IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private));
2447         nb_q_per_pool = RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool;
2448         queue_stride = IXGBE_MAX_RX_QUEUE_NUM / RTE_ETH_DEV_SRIOV(dev).active;
2449         queue_idx = vf * queue_stride;
2450         queue_end = queue_idx + nb_q_per_pool - 1;
2451         if (queue_end >= hw->mac.max_tx_queues)
2452                 return -EINVAL;
2453
2454         if (vfinfo) {
2455                 for (vf_idx = 0; vf_idx < pci_dev->max_vfs; vf_idx++) {
2456                         if (vf_idx == vf)
2457                                 continue;
2458                         for (idx = 0; idx < RTE_DIM(vfinfo[vf_idx].tx_rate);
2459                                 idx++)
2460                                 total_rate += vfinfo[vf_idx].tx_rate[idx];
2461                 }
2462         } else {
2463                 return -EINVAL;
2464         }
2465
2466         /* Store tx_rate for this vf. */
2467         for (idx = 0; idx < nb_q_per_pool; idx++) {
2468                 if (((uint64_t)0x1 << idx) & q_msk) {
2469                         if (vfinfo[vf].tx_rate[idx] != tx_rate)
2470                                 vfinfo[vf].tx_rate[idx] = tx_rate;
2471                         total_rate += tx_rate;
2472                 }
2473         }
2474
2475         if (total_rate > dev->data->dev_link.link_speed) {
2476                 /* Reset stored TX rate of the VF if it causes exceed
2477                  * link speed.
2478                  */
2479                 memset(vfinfo[vf].tx_rate, 0, sizeof(vfinfo[vf].tx_rate));
2480                 return -EINVAL;
2481         }
2482
2483         /* Set RTTBCNRC of each queue/pool for vf X  */
2484         for (; queue_idx <= queue_end; queue_idx++) {
2485                 if (0x1 & q_msk)
2486                         ixgbe_set_queue_rate_limit(dev, queue_idx, tx_rate);
2487                 q_msk = q_msk >> 1;
2488         }
2489
2490         return 0;
2491 }
2492
2493 /*
2494  * Configure device link speed and setup link.
2495  * It returns 0 on success.
2496  */
2497 static int
2498 ixgbe_dev_start(struct rte_eth_dev *dev)
2499 {
2500         struct ixgbe_hw *hw =
2501                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2502         struct ixgbe_vf_info *vfinfo =
2503                 *IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private);
2504         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
2505         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
2506         uint32_t intr_vector = 0;
2507         int err, link_up = 0, negotiate = 0;
2508         uint32_t speed = 0;
2509         int mask = 0;
2510         int status;
2511         uint16_t vf, idx;
2512         uint32_t *link_speeds;
2513         struct ixgbe_tm_conf *tm_conf =
2514                 IXGBE_DEV_PRIVATE_TO_TM_CONF(dev->data->dev_private);
2515
2516         PMD_INIT_FUNC_TRACE();
2517
2518         /* IXGBE devices don't support:
2519         *    - half duplex (checked afterwards for valid speeds)
2520         *    - fixed speed: TODO implement
2521         */
2522         if (dev->data->dev_conf.link_speeds & ETH_LINK_SPEED_FIXED) {
2523                 PMD_INIT_LOG(ERR,
2524                 "Invalid link_speeds for port %u, fix speed not supported",
2525                                 dev->data->port_id);
2526                 return -EINVAL;
2527         }
2528
2529         /* disable uio/vfio intr/eventfd mapping */
2530         rte_intr_disable(intr_handle);
2531
2532         /* stop adapter */
2533         hw->adapter_stopped = 0;
2534         ixgbe_stop_adapter(hw);
2535
2536         /* reinitialize adapter
2537          * this calls reset and start
2538          */
2539         status = ixgbe_pf_reset_hw(hw);
2540         if (status != 0)
2541                 return -1;
2542         hw->mac.ops.start_hw(hw);
2543         hw->mac.get_link_status = true;
2544
2545         /* configure PF module if SRIOV enabled */
2546         ixgbe_pf_host_configure(dev);
2547
2548         ixgbe_dev_phy_intr_setup(dev);
2549
2550         /* check and configure queue intr-vector mapping */
2551         if ((rte_intr_cap_multiple(intr_handle) ||
2552              !RTE_ETH_DEV_SRIOV(dev).active) &&
2553             dev->data->dev_conf.intr_conf.rxq != 0) {
2554                 intr_vector = dev->data->nb_rx_queues;
2555                 if (intr_vector > IXGBE_MAX_INTR_QUEUE_NUM) {
2556                         PMD_INIT_LOG(ERR, "At most %d intr queues supported",
2557                                         IXGBE_MAX_INTR_QUEUE_NUM);
2558                         return -ENOTSUP;
2559                 }
2560                 if (rte_intr_efd_enable(intr_handle, intr_vector))
2561                         return -1;
2562         }
2563
2564         if (rte_intr_dp_is_en(intr_handle) && !intr_handle->intr_vec) {
2565                 intr_handle->intr_vec =
2566                         rte_zmalloc("intr_vec",
2567                                     dev->data->nb_rx_queues * sizeof(int), 0);
2568                 if (intr_handle->intr_vec == NULL) {
2569                         PMD_INIT_LOG(ERR, "Failed to allocate %d rx_queues"
2570                                      " intr_vec", dev->data->nb_rx_queues);
2571                         return -ENOMEM;
2572                 }
2573         }
2574
2575         /* confiugre msix for sleep until rx interrupt */
2576         ixgbe_configure_msix(dev);
2577
2578         /* initialize transmission unit */
2579         ixgbe_dev_tx_init(dev);
2580
2581         /* This can fail when allocating mbufs for descriptor rings */
2582         err = ixgbe_dev_rx_init(dev);
2583         if (err) {
2584                 PMD_INIT_LOG(ERR, "Unable to initialize RX hardware");
2585                 goto error;
2586         }
2587
2588         mask = ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK |
2589                 ETH_VLAN_EXTEND_MASK;
2590         err = ixgbe_vlan_offload_set(dev, mask);
2591         if (err) {
2592                 PMD_INIT_LOG(ERR, "Unable to set VLAN offload");
2593                 goto error;
2594         }
2595
2596         if (dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_VMDQ_ONLY) {
2597                 /* Enable vlan filtering for VMDq */
2598                 ixgbe_vmdq_vlan_hw_filter_enable(dev);
2599         }
2600
2601         /* Configure DCB hw */
2602         ixgbe_configure_dcb(dev);
2603
2604         if (dev->data->dev_conf.fdir_conf.mode != RTE_FDIR_MODE_NONE) {
2605                 err = ixgbe_fdir_configure(dev);
2606                 if (err)
2607                         goto error;
2608         }
2609
2610         /* Restore vf rate limit */
2611         if (vfinfo != NULL) {
2612                 for (vf = 0; vf < pci_dev->max_vfs; vf++)
2613                         for (idx = 0; idx < IXGBE_MAX_QUEUE_NUM_PER_VF; idx++)
2614                                 if (vfinfo[vf].tx_rate[idx] != 0)
2615                                         ixgbe_set_vf_rate_limit(
2616                                                 dev, vf,
2617                                                 vfinfo[vf].tx_rate[idx],
2618                                                 1 << idx);
2619         }
2620
2621         ixgbe_restore_statistics_mapping(dev);
2622
2623         err = ixgbe_dev_rxtx_start(dev);
2624         if (err < 0) {
2625                 PMD_INIT_LOG(ERR, "Unable to start rxtx queues");
2626                 goto error;
2627         }
2628
2629         /* Skip link setup if loopback mode is enabled for 82599. */
2630         if (hw->mac.type == ixgbe_mac_82599EB &&
2631                         dev->data->dev_conf.lpbk_mode == IXGBE_LPBK_82599_TX_RX)
2632                 goto skip_link_setup;
2633
2634         if (ixgbe_is_sfp(hw) && hw->phy.multispeed_fiber) {
2635                 err = hw->mac.ops.setup_sfp(hw);
2636                 if (err)
2637                         goto error;
2638         }
2639
2640         if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_copper) {
2641                 /* Turn on the copper */
2642                 ixgbe_set_phy_power(hw, true);
2643         } else {
2644                 /* Turn on the laser */
2645                 ixgbe_enable_tx_laser(hw);
2646         }
2647
2648         err = ixgbe_check_link(hw, &speed, &link_up, 0);
2649         if (err)
2650                 goto error;
2651         dev->data->dev_link.link_status = link_up;
2652
2653         err = ixgbe_get_link_capabilities(hw, &speed, &negotiate);
2654         if (err)
2655                 goto error;
2656
2657         link_speeds = &dev->data->dev_conf.link_speeds;
2658         if (*link_speeds & ~(ETH_LINK_SPEED_100M | ETH_LINK_SPEED_1G |
2659                         ETH_LINK_SPEED_10G)) {
2660                 PMD_INIT_LOG(ERR, "Invalid link setting");
2661                 goto error;
2662         }
2663
2664         speed = 0x0;
2665         if (*link_speeds == ETH_LINK_SPEED_AUTONEG) {
2666                 switch (hw->mac.type) {
2667                 case ixgbe_mac_82598EB:
2668                         speed = IXGBE_LINK_SPEED_82598_AUTONEG;
2669                         break;
2670                 case ixgbe_mac_82599EB:
2671                 case ixgbe_mac_X540:
2672                         speed = IXGBE_LINK_SPEED_82599_AUTONEG;
2673                         break;
2674                 case ixgbe_mac_X550:
2675                 case ixgbe_mac_X550EM_x:
2676                 case ixgbe_mac_X550EM_a:
2677                         speed = IXGBE_LINK_SPEED_X550_AUTONEG;
2678                         break;
2679                 default:
2680                         speed = IXGBE_LINK_SPEED_82599_AUTONEG;
2681                 }
2682         } else {
2683                 if (*link_speeds & ETH_LINK_SPEED_10G)
2684                         speed |= IXGBE_LINK_SPEED_10GB_FULL;
2685                 if (*link_speeds & ETH_LINK_SPEED_1G)
2686                         speed |= IXGBE_LINK_SPEED_1GB_FULL;
2687                 if (*link_speeds & ETH_LINK_SPEED_100M)
2688                         speed |= IXGBE_LINK_SPEED_100_FULL;
2689         }
2690
2691         err = ixgbe_setup_link(hw, speed, link_up);
2692         if (err)
2693                 goto error;
2694
2695 skip_link_setup:
2696
2697         if (rte_intr_allow_others(intr_handle)) {
2698                 /* check if lsc interrupt is enabled */
2699                 if (dev->data->dev_conf.intr_conf.lsc != 0)
2700                         ixgbe_dev_lsc_interrupt_setup(dev, TRUE);
2701                 else
2702                         ixgbe_dev_lsc_interrupt_setup(dev, FALSE);
2703                 ixgbe_dev_macsec_interrupt_setup(dev);
2704         } else {
2705                 rte_intr_callback_unregister(intr_handle,
2706                                              ixgbe_dev_interrupt_handler, dev);
2707                 if (dev->data->dev_conf.intr_conf.lsc != 0)
2708                         PMD_INIT_LOG(INFO, "lsc won't enable because of"
2709                                      " no intr multiplex");
2710         }
2711
2712         /* check if rxq interrupt is enabled */
2713         if (dev->data->dev_conf.intr_conf.rxq != 0 &&
2714             rte_intr_dp_is_en(intr_handle))
2715                 ixgbe_dev_rxq_interrupt_setup(dev);
2716
2717         /* enable uio/vfio intr/eventfd mapping */
2718         rte_intr_enable(intr_handle);
2719
2720         /* resume enabled intr since hw reset */
2721         ixgbe_enable_intr(dev);
2722         ixgbe_l2_tunnel_conf(dev);
2723         ixgbe_filter_restore(dev);
2724
2725         if (tm_conf->root && !tm_conf->committed)
2726                 PMD_DRV_LOG(WARNING,
2727                             "please call hierarchy_commit() "
2728                             "before starting the port");
2729
2730         return 0;
2731
2732 error:
2733         PMD_INIT_LOG(ERR, "failure in ixgbe_dev_start(): %d", err);
2734         ixgbe_dev_clear_queues(dev);
2735         return -EIO;
2736 }
2737
2738 /*
2739  * Stop device: disable rx and tx functions to allow for reconfiguring.
2740  */
2741 static void
2742 ixgbe_dev_stop(struct rte_eth_dev *dev)
2743 {
2744         struct rte_eth_link link;
2745         struct ixgbe_hw *hw =
2746                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2747         struct ixgbe_vf_info *vfinfo =
2748                 *IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private);
2749         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
2750         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
2751         int vf;
2752         struct ixgbe_tm_conf *tm_conf =
2753                 IXGBE_DEV_PRIVATE_TO_TM_CONF(dev->data->dev_private);
2754
2755         PMD_INIT_FUNC_TRACE();
2756
2757         /* disable interrupts */
2758         ixgbe_disable_intr(hw);
2759
2760         /* reset the NIC */
2761         ixgbe_pf_reset_hw(hw);
2762         hw->adapter_stopped = 0;
2763
2764         /* stop adapter */
2765         ixgbe_stop_adapter(hw);
2766
2767         for (vf = 0; vfinfo != NULL && vf < pci_dev->max_vfs; vf++)
2768                 vfinfo[vf].clear_to_send = false;
2769
2770         if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_copper) {
2771                 /* Turn off the copper */
2772                 ixgbe_set_phy_power(hw, false);
2773         } else {
2774                 /* Turn off the laser */
2775                 ixgbe_disable_tx_laser(hw);
2776         }
2777
2778         ixgbe_dev_clear_queues(dev);
2779
2780         /* Clear stored conf */
2781         dev->data->scattered_rx = 0;
2782         dev->data->lro = 0;
2783
2784         /* Clear recorded link status */
2785         memset(&link, 0, sizeof(link));
2786         rte_ixgbe_dev_atomic_write_link_status(dev, &link);
2787
2788         if (!rte_intr_allow_others(intr_handle))
2789                 /* resume to the default handler */
2790                 rte_intr_callback_register(intr_handle,
2791                                            ixgbe_dev_interrupt_handler,
2792                                            (void *)dev);
2793
2794         /* Clean datapath event and queue/vec mapping */
2795         rte_intr_efd_disable(intr_handle);
2796         if (intr_handle->intr_vec != NULL) {
2797                 rte_free(intr_handle->intr_vec);
2798                 intr_handle->intr_vec = NULL;
2799         }
2800
2801         /* reset hierarchy commit */
2802         tm_conf->committed = false;
2803 }
2804
2805 /*
2806  * Set device link up: enable tx.
2807  */
2808 static int
2809 ixgbe_dev_set_link_up(struct rte_eth_dev *dev)
2810 {
2811         struct ixgbe_hw *hw =
2812                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2813         if (hw->mac.type == ixgbe_mac_82599EB) {
2814 #ifdef RTE_LIBRTE_IXGBE_BYPASS
2815                 if (hw->device_id == IXGBE_DEV_ID_82599_BYPASS) {
2816                         /* Not suported in bypass mode */
2817                         PMD_INIT_LOG(ERR, "Set link up is not supported "
2818                                      "by device id 0x%x", hw->device_id);
2819                         return -ENOTSUP;
2820                 }
2821 #endif
2822         }
2823
2824         if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_copper) {
2825                 /* Turn on the copper */
2826                 ixgbe_set_phy_power(hw, true);
2827         } else {
2828                 /* Turn on the laser */
2829                 ixgbe_enable_tx_laser(hw);
2830         }
2831
2832         return 0;
2833 }
2834
2835 /*
2836  * Set device link down: disable tx.
2837  */
2838 static int
2839 ixgbe_dev_set_link_down(struct rte_eth_dev *dev)
2840 {
2841         struct ixgbe_hw *hw =
2842                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2843         if (hw->mac.type == ixgbe_mac_82599EB) {
2844 #ifdef RTE_LIBRTE_IXGBE_BYPASS
2845                 if (hw->device_id == IXGBE_DEV_ID_82599_BYPASS) {
2846                         /* Not suported in bypass mode */
2847                         PMD_INIT_LOG(ERR, "Set link down is not supported "
2848                                      "by device id 0x%x", hw->device_id);
2849                         return -ENOTSUP;
2850                 }
2851 #endif
2852         }
2853
2854         if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_copper) {
2855                 /* Turn off the copper */
2856                 ixgbe_set_phy_power(hw, false);
2857         } else {
2858                 /* Turn off the laser */
2859                 ixgbe_disable_tx_laser(hw);
2860         }
2861
2862         return 0;
2863 }
2864
2865 /*
2866  * Reset and stop device.
2867  */
2868 static void
2869 ixgbe_dev_close(struct rte_eth_dev *dev)
2870 {
2871         struct ixgbe_hw *hw =
2872                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2873
2874         PMD_INIT_FUNC_TRACE();
2875
2876         ixgbe_pf_reset_hw(hw);
2877
2878         ixgbe_dev_stop(dev);
2879         hw->adapter_stopped = 1;
2880
2881         ixgbe_dev_free_queues(dev);
2882
2883         ixgbe_disable_pcie_master(hw);
2884
2885         /* reprogram the RAR[0] in case user changed it. */
2886         ixgbe_set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
2887 }
2888
2889 /*
2890  * Reset PF device.
2891  */
2892 static int
2893 ixgbe_dev_reset(struct rte_eth_dev *dev)
2894 {
2895         int ret;
2896
2897         /* When a DPDK PMD PF begin to reset PF port, it should notify all
2898          * its VF to make them align with it. The detailed notification
2899          * mechanism is PMD specific. As to ixgbe PF, it is rather complex.
2900          * To avoid unexpected behavior in VF, currently reset of PF with
2901          * SR-IOV activation is not supported. It might be supported later.
2902          */
2903         if (dev->data->sriov.active)
2904                 return -ENOTSUP;
2905
2906         ret = eth_ixgbe_dev_uninit(dev);
2907         if (ret)
2908                 return ret;
2909
2910         ret = eth_ixgbe_dev_init(dev);
2911
2912         return ret;
2913 }
2914
2915 static void
2916 ixgbe_read_stats_registers(struct ixgbe_hw *hw,
2917                            struct ixgbe_hw_stats *hw_stats,
2918                            struct ixgbe_macsec_stats *macsec_stats,
2919                            uint64_t *total_missed_rx, uint64_t *total_qbrc,
2920                            uint64_t *total_qprc, uint64_t *total_qprdc)
2921 {
2922         uint32_t bprc, lxon, lxoff, total;
2923         uint32_t delta_gprc = 0;
2924         unsigned i;
2925         /* Workaround for RX byte count not including CRC bytes when CRC
2926          * strip is enabled. CRC bytes are removed from counters when crc_strip
2927          * is disabled.
2928          */
2929         int crc_strip = (IXGBE_READ_REG(hw, IXGBE_HLREG0) &
2930                         IXGBE_HLREG0_RXCRCSTRP);
2931
2932         hw_stats->crcerrs += IXGBE_READ_REG(hw, IXGBE_CRCERRS);
2933         hw_stats->illerrc += IXGBE_READ_REG(hw, IXGBE_ILLERRC);
2934         hw_stats->errbc += IXGBE_READ_REG(hw, IXGBE_ERRBC);
2935         hw_stats->mspdc += IXGBE_READ_REG(hw, IXGBE_MSPDC);
2936
2937         for (i = 0; i < 8; i++) {
2938                 uint32_t mp = IXGBE_READ_REG(hw, IXGBE_MPC(i));
2939
2940                 /* global total per queue */
2941                 hw_stats->mpc[i] += mp;
2942                 /* Running comprehensive total for stats display */
2943                 *total_missed_rx += hw_stats->mpc[i];
2944                 if (hw->mac.type == ixgbe_mac_82598EB) {
2945                         hw_stats->rnbc[i] +=
2946                             IXGBE_READ_REG(hw, IXGBE_RNBC(i));
2947                         hw_stats->pxonrxc[i] +=
2948                                 IXGBE_READ_REG(hw, IXGBE_PXONRXC(i));
2949                         hw_stats->pxoffrxc[i] +=
2950                                 IXGBE_READ_REG(hw, IXGBE_PXOFFRXC(i));
2951                 } else {
2952                         hw_stats->pxonrxc[i] +=
2953                                 IXGBE_READ_REG(hw, IXGBE_PXONRXCNT(i));
2954                         hw_stats->pxoffrxc[i] +=
2955                                 IXGBE_READ_REG(hw, IXGBE_PXOFFRXCNT(i));
2956                         hw_stats->pxon2offc[i] +=
2957                                 IXGBE_READ_REG(hw, IXGBE_PXON2OFFCNT(i));
2958                 }
2959                 hw_stats->pxontxc[i] +=
2960                     IXGBE_READ_REG(hw, IXGBE_PXONTXC(i));
2961                 hw_stats->pxofftxc[i] +=
2962                     IXGBE_READ_REG(hw, IXGBE_PXOFFTXC(i));
2963         }
2964         for (i = 0; i < IXGBE_QUEUE_STAT_COUNTERS; i++) {
2965                 uint32_t delta_qprc = IXGBE_READ_REG(hw, IXGBE_QPRC(i));
2966                 uint32_t delta_qptc = IXGBE_READ_REG(hw, IXGBE_QPTC(i));
2967                 uint32_t delta_qprdc = IXGBE_READ_REG(hw, IXGBE_QPRDC(i));
2968
2969                 delta_gprc += delta_qprc;
2970
2971                 hw_stats->qprc[i] += delta_qprc;
2972                 hw_stats->qptc[i] += delta_qptc;
2973
2974                 hw_stats->qbrc[i] += IXGBE_READ_REG(hw, IXGBE_QBRC_L(i));
2975                 hw_stats->qbrc[i] +=
2976                     ((uint64_t)IXGBE_READ_REG(hw, IXGBE_QBRC_H(i)) << 32);
2977                 if (crc_strip == 0)
2978                         hw_stats->qbrc[i] -= delta_qprc * ETHER_CRC_LEN;
2979
2980                 hw_stats->qbtc[i] += IXGBE_READ_REG(hw, IXGBE_QBTC_L(i));
2981                 hw_stats->qbtc[i] +=
2982                     ((uint64_t)IXGBE_READ_REG(hw, IXGBE_QBTC_H(i)) << 32);
2983
2984                 hw_stats->qprdc[i] += delta_qprdc;
2985                 *total_qprdc += hw_stats->qprdc[i];
2986
2987                 *total_qprc += hw_stats->qprc[i];
2988                 *total_qbrc += hw_stats->qbrc[i];
2989         }
2990         hw_stats->mlfc += IXGBE_READ_REG(hw, IXGBE_MLFC);
2991         hw_stats->mrfc += IXGBE_READ_REG(hw, IXGBE_MRFC);
2992         hw_stats->rlec += IXGBE_READ_REG(hw, IXGBE_RLEC);
2993
2994         /*
2995          * An errata states that gprc actually counts good + missed packets:
2996          * Workaround to set gprc to summated queue packet receives
2997          */
2998         hw_stats->gprc = *total_qprc;
2999
3000         if (hw->mac.type != ixgbe_mac_82598EB) {
3001                 hw_stats->gorc += IXGBE_READ_REG(hw, IXGBE_GORCL);
3002                 hw_stats->gorc += ((u64)IXGBE_READ_REG(hw, IXGBE_GORCH) << 32);
3003                 hw_stats->gotc += IXGBE_READ_REG(hw, IXGBE_GOTCL);
3004                 hw_stats->gotc += ((u64)IXGBE_READ_REG(hw, IXGBE_GOTCH) << 32);
3005                 hw_stats->tor += IXGBE_READ_REG(hw, IXGBE_TORL);
3006                 hw_stats->tor += ((u64)IXGBE_READ_REG(hw, IXGBE_TORH) << 32);
3007                 hw_stats->lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXCNT);
3008                 hw_stats->lxoffrxc += IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT);
3009         } else {
3010                 hw_stats->lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXC);
3011                 hw_stats->lxoffrxc += IXGBE_READ_REG(hw, IXGBE_LXOFFRXC);
3012                 /* 82598 only has a counter in the high register */
3013                 hw_stats->gorc += IXGBE_READ_REG(hw, IXGBE_GORCH);
3014                 hw_stats->gotc += IXGBE_READ_REG(hw, IXGBE_GOTCH);
3015                 hw_stats->tor += IXGBE_READ_REG(hw, IXGBE_TORH);
3016         }
3017         uint64_t old_tpr = hw_stats->tpr;
3018
3019         hw_stats->tpr += IXGBE_READ_REG(hw, IXGBE_TPR);
3020         hw_stats->tpt += IXGBE_READ_REG(hw, IXGBE_TPT);
3021
3022         if (crc_strip == 0)
3023                 hw_stats->gorc -= delta_gprc * ETHER_CRC_LEN;
3024
3025         uint64_t delta_gptc = IXGBE_READ_REG(hw, IXGBE_GPTC);
3026         hw_stats->gptc += delta_gptc;
3027         hw_stats->gotc -= delta_gptc * ETHER_CRC_LEN;
3028         hw_stats->tor -= (hw_stats->tpr - old_tpr) * ETHER_CRC_LEN;
3029
3030         /*
3031          * Workaround: mprc hardware is incorrectly counting
3032          * broadcasts, so for now we subtract those.
3033          */
3034         bprc = IXGBE_READ_REG(hw, IXGBE_BPRC);
3035         hw_stats->bprc += bprc;
3036         hw_stats->mprc += IXGBE_READ_REG(hw, IXGBE_MPRC);
3037         if (hw->mac.type == ixgbe_mac_82598EB)
3038                 hw_stats->mprc -= bprc;
3039
3040         hw_stats->prc64 += IXGBE_READ_REG(hw, IXGBE_PRC64);
3041         hw_stats->prc127 += IXGBE_READ_REG(hw, IXGBE_PRC127);
3042         hw_stats->prc255 += IXGBE_READ_REG(hw, IXGBE_PRC255);
3043         hw_stats->prc511 += IXGBE_READ_REG(hw, IXGBE_PRC511);
3044         hw_stats->prc1023 += IXGBE_READ_REG(hw, IXGBE_PRC1023);
3045         hw_stats->prc1522 += IXGBE_READ_REG(hw, IXGBE_PRC1522);
3046
3047         lxon = IXGBE_READ_REG(hw, IXGBE_LXONTXC);
3048         hw_stats->lxontxc += lxon;
3049         lxoff = IXGBE_READ_REG(hw, IXGBE_LXOFFTXC);
3050         hw_stats->lxofftxc += lxoff;
3051         total = lxon + lxoff;
3052
3053         hw_stats->mptc += IXGBE_READ_REG(hw, IXGBE_MPTC);
3054         hw_stats->ptc64 += IXGBE_READ_REG(hw, IXGBE_PTC64);
3055         hw_stats->gptc -= total;
3056         hw_stats->mptc -= total;
3057         hw_stats->ptc64 -= total;
3058         hw_stats->gotc -= total * ETHER_MIN_LEN;
3059
3060         hw_stats->ruc += IXGBE_READ_REG(hw, IXGBE_RUC);
3061         hw_stats->rfc += IXGBE_READ_REG(hw, IXGBE_RFC);
3062         hw_stats->roc += IXGBE_READ_REG(hw, IXGBE_ROC);
3063         hw_stats->rjc += IXGBE_READ_REG(hw, IXGBE_RJC);
3064         hw_stats->mngprc += IXGBE_READ_REG(hw, IXGBE_MNGPRC);
3065         hw_stats->mngpdc += IXGBE_READ_REG(hw, IXGBE_MNGPDC);
3066         hw_stats->mngptc += IXGBE_READ_REG(hw, IXGBE_MNGPTC);
3067         hw_stats->ptc127 += IXGBE_READ_REG(hw, IXGBE_PTC127);
3068         hw_stats->ptc255 += IXGBE_READ_REG(hw, IXGBE_PTC255);
3069         hw_stats->ptc511 += IXGBE_READ_REG(hw, IXGBE_PTC511);
3070         hw_stats->ptc1023 += IXGBE_READ_REG(hw, IXGBE_PTC1023);
3071         hw_stats->ptc1522 += IXGBE_READ_REG(hw, IXGBE_PTC1522);
3072         hw_stats->bptc += IXGBE_READ_REG(hw, IXGBE_BPTC);
3073         hw_stats->xec += IXGBE_READ_REG(hw, IXGBE_XEC);
3074         hw_stats->fccrc += IXGBE_READ_REG(hw, IXGBE_FCCRC);
3075         hw_stats->fclast += IXGBE_READ_REG(hw, IXGBE_FCLAST);
3076         /* Only read FCOE on 82599 */
3077         if (hw->mac.type != ixgbe_mac_82598EB) {
3078                 hw_stats->fcoerpdc += IXGBE_READ_REG(hw, IXGBE_FCOERPDC);
3079                 hw_stats->fcoeprc += IXGBE_READ_REG(hw, IXGBE_FCOEPRC);
3080                 hw_stats->fcoeptc += IXGBE_READ_REG(hw, IXGBE_FCOEPTC);
3081                 hw_stats->fcoedwrc += IXGBE_READ_REG(hw, IXGBE_FCOEDWRC);
3082                 hw_stats->fcoedwtc += IXGBE_READ_REG(hw, IXGBE_FCOEDWTC);
3083         }
3084
3085         /* Flow Director Stats registers */
3086         hw_stats->fdirmatch += IXGBE_READ_REG(hw, IXGBE_FDIRMATCH);
3087         hw_stats->fdirmiss += IXGBE_READ_REG(hw, IXGBE_FDIRMISS);
3088
3089         /* MACsec Stats registers */
3090         macsec_stats->out_pkts_untagged += IXGBE_READ_REG(hw, IXGBE_LSECTXUT);
3091         macsec_stats->out_pkts_encrypted +=
3092                 IXGBE_READ_REG(hw, IXGBE_LSECTXPKTE);
3093         macsec_stats->out_pkts_protected +=
3094                 IXGBE_READ_REG(hw, IXGBE_LSECTXPKTP);
3095         macsec_stats->out_octets_encrypted +=
3096                 IXGBE_READ_REG(hw, IXGBE_LSECTXOCTE);
3097         macsec_stats->out_octets_protected +=
3098                 IXGBE_READ_REG(hw, IXGBE_LSECTXOCTP);
3099         macsec_stats->in_pkts_untagged += IXGBE_READ_REG(hw, IXGBE_LSECRXUT);
3100         macsec_stats->in_pkts_badtag += IXGBE_READ_REG(hw, IXGBE_LSECRXBAD);
3101         macsec_stats->in_pkts_nosci += IXGBE_READ_REG(hw, IXGBE_LSECRXNOSCI);
3102         macsec_stats->in_pkts_unknownsci +=
3103                 IXGBE_READ_REG(hw, IXGBE_LSECRXUNSCI);
3104         macsec_stats->in_octets_decrypted +=
3105                 IXGBE_READ_REG(hw, IXGBE_LSECRXOCTD);
3106         macsec_stats->in_octets_validated +=
3107                 IXGBE_READ_REG(hw, IXGBE_LSECRXOCTV);
3108         macsec_stats->in_pkts_unchecked += IXGBE_READ_REG(hw, IXGBE_LSECRXUNCH);
3109         macsec_stats->in_pkts_delayed += IXGBE_READ_REG(hw, IXGBE_LSECRXDELAY);
3110         macsec_stats->in_pkts_late += IXGBE_READ_REG(hw, IXGBE_LSECRXLATE);
3111         for (i = 0; i < 2; i++) {
3112                 macsec_stats->in_pkts_ok +=
3113                         IXGBE_READ_REG(hw, IXGBE_LSECRXOK(i));
3114                 macsec_stats->in_pkts_invalid +=
3115                         IXGBE_READ_REG(hw, IXGBE_LSECRXINV(i));
3116                 macsec_stats->in_pkts_notvalid +=
3117                         IXGBE_READ_REG(hw, IXGBE_LSECRXNV(i));
3118         }
3119         macsec_stats->in_pkts_unusedsa += IXGBE_READ_REG(hw, IXGBE_LSECRXUNSA);
3120         macsec_stats->in_pkts_notusingsa +=
3121                 IXGBE_READ_REG(hw, IXGBE_LSECRXNUSA);
3122 }
3123
3124 /*
3125  * This function is based on ixgbe_update_stats_counters() in ixgbe/ixgbe.c
3126  */
3127 static int
3128 ixgbe_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
3129 {
3130         struct ixgbe_hw *hw =
3131                         IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3132         struct ixgbe_hw_stats *hw_stats =
3133                         IXGBE_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
3134         struct ixgbe_macsec_stats *macsec_stats =
3135                         IXGBE_DEV_PRIVATE_TO_MACSEC_STATS(
3136                                 dev->data->dev_private);
3137         uint64_t total_missed_rx, total_qbrc, total_qprc, total_qprdc;
3138         unsigned i;
3139
3140         total_missed_rx = 0;
3141         total_qbrc = 0;
3142         total_qprc = 0;
3143         total_qprdc = 0;
3144
3145         ixgbe_read_stats_registers(hw, hw_stats, macsec_stats, &total_missed_rx,
3146                         &total_qbrc, &total_qprc, &total_qprdc);
3147
3148         if (stats == NULL)
3149                 return -EINVAL;
3150
3151         /* Fill out the rte_eth_stats statistics structure */
3152         stats->ipackets = total_qprc;
3153         stats->ibytes = total_qbrc;
3154         stats->opackets = hw_stats->gptc;
3155         stats->obytes = hw_stats->gotc;
3156
3157         for (i = 0; i < IXGBE_QUEUE_STAT_COUNTERS; i++) {
3158                 stats->q_ipackets[i] = hw_stats->qprc[i];
3159                 stats->q_opackets[i] = hw_stats->qptc[i];
3160                 stats->q_ibytes[i] = hw_stats->qbrc[i];
3161                 stats->q_obytes[i] = hw_stats->qbtc[i];
3162                 stats->q_errors[i] = hw_stats->qprdc[i];
3163         }
3164
3165         /* Rx Errors */
3166         stats->imissed  = total_missed_rx;
3167         stats->ierrors  = hw_stats->crcerrs +
3168                           hw_stats->mspdc +
3169                           hw_stats->rlec +
3170                           hw_stats->ruc +
3171                           hw_stats->roc +
3172                           hw_stats->illerrc +
3173                           hw_stats->errbc +
3174                           hw_stats->rfc +
3175                           hw_stats->fccrc +
3176                           hw_stats->fclast;
3177
3178         /* Tx Errors */
3179         stats->oerrors  = 0;
3180         return 0;
3181 }
3182
3183 static void
3184 ixgbe_dev_stats_reset(struct rte_eth_dev *dev)
3185 {
3186         struct ixgbe_hw_stats *stats =
3187                         IXGBE_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
3188
3189         /* HW registers are cleared on read */
3190         ixgbe_dev_stats_get(dev, NULL);
3191
3192         /* Reset software totals */
3193         memset(stats, 0, sizeof(*stats));
3194 }
3195
3196 /* This function calculates the number of xstats based on the current config */
3197 static unsigned
3198 ixgbe_xstats_calc_num(void) {
3199         return IXGBE_NB_HW_STATS + IXGBE_NB_MACSEC_STATS +
3200                 (IXGBE_NB_RXQ_PRIO_STATS * IXGBE_NB_RXQ_PRIO_VALUES) +
3201                 (IXGBE_NB_TXQ_PRIO_STATS * IXGBE_NB_TXQ_PRIO_VALUES);
3202 }
3203
3204 static int ixgbe_dev_xstats_get_names(__rte_unused struct rte_eth_dev *dev,
3205         struct rte_eth_xstat_name *xstats_names, __rte_unused unsigned int size)
3206 {
3207         const unsigned cnt_stats = ixgbe_xstats_calc_num();
3208         unsigned stat, i, count;
3209
3210         if (xstats_names != NULL) {
3211                 count = 0;
3212
3213                 /* Note: limit >= cnt_stats checked upstream
3214                  * in rte_eth_xstats_names()
3215                  */
3216
3217                 /* Extended stats from ixgbe_hw_stats */
3218                 for (i = 0; i < IXGBE_NB_HW_STATS; i++) {
3219                         snprintf(xstats_names[count].name,
3220                                 sizeof(xstats_names[count].name),
3221                                 "%s",
3222                                 rte_ixgbe_stats_strings[i].name);
3223                         count++;
3224                 }
3225
3226                 /* MACsec Stats */
3227                 for (i = 0; i < IXGBE_NB_MACSEC_STATS; i++) {
3228                         snprintf(xstats_names[count].name,
3229                                 sizeof(xstats_names[count].name),
3230                                 "%s",
3231                                 rte_ixgbe_macsec_strings[i].name);
3232                         count++;
3233                 }
3234
3235                 /* RX Priority Stats */
3236                 for (stat = 0; stat < IXGBE_NB_RXQ_PRIO_STATS; stat++) {
3237                         for (i = 0; i < IXGBE_NB_RXQ_PRIO_VALUES; i++) {
3238                                 snprintf(xstats_names[count].name,
3239                                         sizeof(xstats_names[count].name),
3240                                         "rx_priority%u_%s", i,
3241                                         rte_ixgbe_rxq_strings[stat].name);
3242                                 count++;
3243                         }
3244                 }
3245
3246                 /* TX Priority Stats */
3247                 for (stat = 0; stat < IXGBE_NB_TXQ_PRIO_STATS; stat++) {
3248                         for (i = 0; i < IXGBE_NB_TXQ_PRIO_VALUES; i++) {
3249                                 snprintf(xstats_names[count].name,
3250                                         sizeof(xstats_names[count].name),
3251                                         "tx_priority%u_%s", i,
3252                                         rte_ixgbe_txq_strings[stat].name);
3253                                 count++;
3254                         }
3255                 }
3256         }
3257         return cnt_stats;
3258 }
3259
3260 static int ixgbe_dev_xstats_get_names_by_id(
3261         struct rte_eth_dev *dev,
3262         struct rte_eth_xstat_name *xstats_names,
3263         const uint64_t *ids,
3264         unsigned int limit)
3265 {
3266         if (!ids) {
3267                 const unsigned int cnt_stats = ixgbe_xstats_calc_num();
3268                 unsigned int stat, i, count;
3269
3270                 if (xstats_names != NULL) {
3271                         count = 0;
3272
3273                         /* Note: limit >= cnt_stats checked upstream
3274                          * in rte_eth_xstats_names()
3275                          */
3276
3277                         /* Extended stats from ixgbe_hw_stats */
3278                         for (i = 0; i < IXGBE_NB_HW_STATS; i++) {
3279                                 snprintf(xstats_names[count].name,
3280                                         sizeof(xstats_names[count].name),
3281                                         "%s",
3282                                         rte_ixgbe_stats_strings[i].name);
3283                                 count++;
3284                         }
3285
3286                         /* MACsec Stats */
3287                         for (i = 0; i < IXGBE_NB_MACSEC_STATS; i++) {
3288                                 snprintf(xstats_names[count].name,
3289                                         sizeof(xstats_names[count].name),
3290                                         "%s",
3291                                         rte_ixgbe_macsec_strings[i].name);
3292                                 count++;
3293                         }
3294
3295                         /* RX Priority Stats */
3296                         for (stat = 0; stat < IXGBE_NB_RXQ_PRIO_STATS; stat++) {
3297                                 for (i = 0; i < IXGBE_NB_RXQ_PRIO_VALUES; i++) {
3298                                         snprintf(xstats_names[count].name,
3299                                             sizeof(xstats_names[count].name),
3300                                             "rx_priority%u_%s", i,
3301                                             rte_ixgbe_rxq_strings[stat].name);
3302                                         count++;
3303                                 }
3304                         }
3305
3306                         /* TX Priority Stats */
3307                         for (stat = 0; stat < IXGBE_NB_TXQ_PRIO_STATS; stat++) {
3308                                 for (i = 0; i < IXGBE_NB_TXQ_PRIO_VALUES; i++) {
3309                                         snprintf(xstats_names[count].name,
3310                                             sizeof(xstats_names[count].name),
3311                                             "tx_priority%u_%s", i,
3312                                             rte_ixgbe_txq_strings[stat].name);
3313                                         count++;
3314                                 }
3315                         }
3316                 }
3317                 return cnt_stats;
3318         }
3319
3320         uint16_t i;
3321         uint16_t size = ixgbe_xstats_calc_num();
3322         struct rte_eth_xstat_name xstats_names_copy[size];
3323
3324         ixgbe_dev_xstats_get_names_by_id(dev, xstats_names_copy, NULL,
3325                         size);
3326
3327         for (i = 0; i < limit; i++) {
3328                 if (ids[i] >= size) {
3329                         PMD_INIT_LOG(ERR, "id value isn't valid");
3330                         return -1;
3331                 }
3332                 strcpy(xstats_names[i].name,
3333                                 xstats_names_copy[ids[i]].name);
3334         }
3335         return limit;
3336 }
3337
3338 static int ixgbevf_dev_xstats_get_names(__rte_unused struct rte_eth_dev *dev,
3339         struct rte_eth_xstat_name *xstats_names, unsigned limit)
3340 {
3341         unsigned i;
3342
3343         if (limit < IXGBEVF_NB_XSTATS && xstats_names != NULL)
3344                 return -ENOMEM;
3345
3346         if (xstats_names != NULL)
3347                 for (i = 0; i < IXGBEVF_NB_XSTATS; i++)
3348                         snprintf(xstats_names[i].name,
3349                                 sizeof(xstats_names[i].name),
3350                                 "%s", rte_ixgbevf_stats_strings[i].name);
3351         return IXGBEVF_NB_XSTATS;
3352 }
3353
3354 static int
3355 ixgbe_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
3356                                          unsigned n)
3357 {
3358         struct ixgbe_hw *hw =
3359                         IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3360         struct ixgbe_hw_stats *hw_stats =
3361                         IXGBE_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
3362         struct ixgbe_macsec_stats *macsec_stats =
3363                         IXGBE_DEV_PRIVATE_TO_MACSEC_STATS(
3364                                 dev->data->dev_private);
3365         uint64_t total_missed_rx, total_qbrc, total_qprc, total_qprdc;
3366         unsigned i, stat, count = 0;
3367
3368         count = ixgbe_xstats_calc_num();
3369
3370         if (n < count)
3371                 return count;
3372
3373         total_missed_rx = 0;
3374         total_qbrc = 0;
3375         total_qprc = 0;
3376         total_qprdc = 0;
3377
3378         ixgbe_read_stats_registers(hw, hw_stats, macsec_stats, &total_missed_rx,
3379                         &total_qbrc, &total_qprc, &total_qprdc);
3380
3381         /* If this is a reset xstats is NULL, and we have cleared the
3382          * registers by reading them.
3383          */
3384         if (!xstats)
3385                 return 0;
3386
3387         /* Extended stats from ixgbe_hw_stats */
3388         count = 0;
3389         for (i = 0; i < IXGBE_NB_HW_STATS; i++) {
3390                 xstats[count].value = *(uint64_t *)(((char *)hw_stats) +
3391                                 rte_ixgbe_stats_strings[i].offset);
3392                 xstats[count].id = count;
3393                 count++;
3394         }
3395
3396         /* MACsec Stats */
3397         for (i = 0; i < IXGBE_NB_MACSEC_STATS; i++) {
3398                 xstats[count].value = *(uint64_t *)(((char *)macsec_stats) +
3399                                 rte_ixgbe_macsec_strings[i].offset);
3400                 xstats[count].id = count;
3401                 count++;
3402         }
3403
3404         /* RX Priority Stats */
3405         for (stat = 0; stat < IXGBE_NB_RXQ_PRIO_STATS; stat++) {
3406                 for (i = 0; i < IXGBE_NB_RXQ_PRIO_VALUES; i++) {
3407                         xstats[count].value = *(uint64_t *)(((char *)hw_stats) +
3408                                         rte_ixgbe_rxq_strings[stat].offset +
3409                                         (sizeof(uint64_t) * i));
3410                         xstats[count].id = count;
3411                         count++;
3412                 }
3413         }
3414
3415         /* TX Priority Stats */
3416         for (stat = 0; stat < IXGBE_NB_TXQ_PRIO_STATS; stat++) {
3417                 for (i = 0; i < IXGBE_NB_TXQ_PRIO_VALUES; i++) {
3418                         xstats[count].value = *(uint64_t *)(((char *)hw_stats) +
3419                                         rte_ixgbe_txq_strings[stat].offset +
3420                                         (sizeof(uint64_t) * i));
3421                         xstats[count].id = count;
3422                         count++;
3423                 }
3424         }
3425         return count;
3426 }
3427
3428 static int
3429 ixgbe_dev_xstats_get_by_id(struct rte_eth_dev *dev, const uint64_t *ids,
3430                 uint64_t *values, unsigned int n)
3431 {
3432         if (!ids) {
3433                 struct ixgbe_hw *hw =
3434                                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3435                 struct ixgbe_hw_stats *hw_stats =
3436                                 IXGBE_DEV_PRIVATE_TO_STATS(
3437                                                 dev->data->dev_private);
3438                 struct ixgbe_macsec_stats *macsec_stats =
3439                                 IXGBE_DEV_PRIVATE_TO_MACSEC_STATS(
3440                                         dev->data->dev_private);
3441                 uint64_t total_missed_rx, total_qbrc, total_qprc, total_qprdc;
3442                 unsigned int i, stat, count = 0;
3443
3444                 count = ixgbe_xstats_calc_num();
3445
3446                 if (!ids && n < count)
3447                         return count;
3448
3449                 total_missed_rx = 0;
3450                 total_qbrc = 0;
3451                 total_qprc = 0;
3452                 total_qprdc = 0;
3453
3454                 ixgbe_read_stats_registers(hw, hw_stats, macsec_stats,
3455                                 &total_missed_rx, &total_qbrc, &total_qprc,
3456                                 &total_qprdc);
3457
3458                 /* If this is a reset xstats is NULL, and we have cleared the
3459                  * registers by reading them.
3460                  */
3461                 if (!ids && !values)
3462                         return 0;
3463
3464                 /* Extended stats from ixgbe_hw_stats */
3465                 count = 0;
3466                 for (i = 0; i < IXGBE_NB_HW_STATS; i++) {
3467                         values[count] = *(uint64_t *)(((char *)hw_stats) +
3468                                         rte_ixgbe_stats_strings[i].offset);
3469                         count++;
3470                 }
3471
3472                 /* MACsec Stats */
3473                 for (i = 0; i < IXGBE_NB_MACSEC_STATS; i++) {
3474                         values[count] = *(uint64_t *)(((char *)macsec_stats) +
3475                                         rte_ixgbe_macsec_strings[i].offset);
3476                         count++;
3477                 }
3478
3479                 /* RX Priority Stats */
3480                 for (stat = 0; stat < IXGBE_NB_RXQ_PRIO_STATS; stat++) {
3481                         for (i = 0; i < IXGBE_NB_RXQ_PRIO_VALUES; i++) {
3482                                 values[count] =
3483                                         *(uint64_t *)(((char *)hw_stats) +
3484                                         rte_ixgbe_rxq_strings[stat].offset +
3485                                         (sizeof(uint64_t) * i));
3486                                 count++;
3487                         }
3488                 }
3489
3490                 /* TX Priority Stats */
3491                 for (stat = 0; stat < IXGBE_NB_TXQ_PRIO_STATS; stat++) {
3492                         for (i = 0; i < IXGBE_NB_TXQ_PRIO_VALUES; i++) {
3493                                 values[count] =
3494                                         *(uint64_t *)(((char *)hw_stats) +
3495                                         rte_ixgbe_txq_strings[stat].offset +
3496                                         (sizeof(uint64_t) * i));
3497                                 count++;
3498                         }
3499                 }
3500                 return count;
3501         }
3502
3503         uint16_t i;
3504         uint16_t size = ixgbe_xstats_calc_num();
3505         uint64_t values_copy[size];
3506
3507         ixgbe_dev_xstats_get_by_id(dev, NULL, values_copy, size);
3508
3509         for (i = 0; i < n; i++) {
3510                 if (ids[i] >= size) {
3511                         PMD_INIT_LOG(ERR, "id value isn't valid");
3512                         return -1;
3513                 }
3514                 values[i] = values_copy[ids[i]];
3515         }
3516         return n;
3517 }
3518
3519 static void
3520 ixgbe_dev_xstats_reset(struct rte_eth_dev *dev)
3521 {
3522         struct ixgbe_hw_stats *stats =
3523                         IXGBE_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
3524         struct ixgbe_macsec_stats *macsec_stats =
3525                         IXGBE_DEV_PRIVATE_TO_MACSEC_STATS(
3526                                 dev->data->dev_private);
3527
3528         unsigned count = ixgbe_xstats_calc_num();
3529
3530         /* HW registers are cleared on read */
3531         ixgbe_dev_xstats_get(dev, NULL, count);
3532
3533         /* Reset software totals */
3534         memset(stats, 0, sizeof(*stats));
3535         memset(macsec_stats, 0, sizeof(*macsec_stats));
3536 }
3537
3538 static void
3539 ixgbevf_update_stats(struct rte_eth_dev *dev)
3540 {
3541         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3542         struct ixgbevf_hw_stats *hw_stats = (struct ixgbevf_hw_stats *)
3543                           IXGBE_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
3544
3545         /* Good Rx packet, include VF loopback */
3546         UPDATE_VF_STAT(IXGBE_VFGPRC,
3547             hw_stats->last_vfgprc, hw_stats->vfgprc);
3548
3549         /* Good Rx octets, include VF loopback */
3550         UPDATE_VF_STAT_36BIT(IXGBE_VFGORC_LSB, IXGBE_VFGORC_MSB,
3551             hw_stats->last_vfgorc, hw_stats->vfgorc);
3552
3553         /* Good Tx packet, include VF loopback */
3554         UPDATE_VF_STAT(IXGBE_VFGPTC,
3555             hw_stats->last_vfgptc, hw_stats->vfgptc);
3556
3557         /* Good Tx octets, include VF loopback */
3558         UPDATE_VF_STAT_36BIT(IXGBE_VFGOTC_LSB, IXGBE_VFGOTC_MSB,
3559             hw_stats->last_vfgotc, hw_stats->vfgotc);
3560
3561         /* Rx Multicst Packet */
3562         UPDATE_VF_STAT(IXGBE_VFMPRC,
3563             hw_stats->last_vfmprc, hw_stats->vfmprc);
3564 }
3565
3566 static int
3567 ixgbevf_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
3568                        unsigned n)
3569 {
3570         struct ixgbevf_hw_stats *hw_stats = (struct ixgbevf_hw_stats *)
3571                         IXGBE_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
3572         unsigned i;
3573
3574         if (n < IXGBEVF_NB_XSTATS)
3575                 return IXGBEVF_NB_XSTATS;
3576
3577         ixgbevf_update_stats(dev);
3578
3579         if (!xstats)
3580                 return 0;
3581
3582         /* Extended stats */
3583         for (i = 0; i < IXGBEVF_NB_XSTATS; i++) {
3584                 xstats[i].id = i;
3585                 xstats[i].value = *(uint64_t *)(((char *)hw_stats) +
3586                         rte_ixgbevf_stats_strings[i].offset);
3587         }
3588
3589         return IXGBEVF_NB_XSTATS;
3590 }
3591
3592 static int
3593 ixgbevf_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
3594 {
3595         struct ixgbevf_hw_stats *hw_stats = (struct ixgbevf_hw_stats *)
3596                           IXGBE_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
3597
3598         ixgbevf_update_stats(dev);
3599
3600         if (stats == NULL)
3601                 return -EINVAL;
3602
3603         stats->ipackets = hw_stats->vfgprc;
3604         stats->ibytes = hw_stats->vfgorc;
3605         stats->opackets = hw_stats->vfgptc;
3606         stats->obytes = hw_stats->vfgotc;
3607         return 0;
3608 }
3609
3610 static void
3611 ixgbevf_dev_stats_reset(struct rte_eth_dev *dev)
3612 {
3613         struct ixgbevf_hw_stats *hw_stats = (struct ixgbevf_hw_stats *)
3614                         IXGBE_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
3615
3616         /* Sync HW register to the last stats */
3617         ixgbevf_dev_stats_get(dev, NULL);
3618
3619         /* reset HW current stats*/
3620         hw_stats->vfgprc = 0;
3621         hw_stats->vfgorc = 0;
3622         hw_stats->vfgptc = 0;
3623         hw_stats->vfgotc = 0;
3624 }
3625
3626 static int
3627 ixgbe_fw_version_get(struct rte_eth_dev *dev, char *fw_version, size_t fw_size)
3628 {
3629         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3630         u16 eeprom_verh, eeprom_verl;
3631         u32 etrack_id;
3632         int ret;
3633
3634         ixgbe_read_eeprom(hw, 0x2e, &eeprom_verh);
3635         ixgbe_read_eeprom(hw, 0x2d, &eeprom_verl);
3636
3637         etrack_id = (eeprom_verh << 16) | eeprom_verl;
3638         ret = snprintf(fw_version, fw_size, "0x%08x", etrack_id);
3639
3640         ret += 1; /* add the size of '\0' */
3641         if (fw_size < (u32)ret)
3642                 return ret;
3643         else
3644                 return 0;
3645 }
3646
3647 static void
3648 ixgbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
3649 {
3650         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
3651         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3652         struct rte_eth_conf *dev_conf = &dev->data->dev_conf;
3653
3654         dev_info->pci_dev = pci_dev;
3655         dev_info->max_rx_queues = (uint16_t)hw->mac.max_rx_queues;
3656         dev_info->max_tx_queues = (uint16_t)hw->mac.max_tx_queues;
3657         if (RTE_ETH_DEV_SRIOV(dev).active == 0) {
3658                 /*
3659                  * When DCB/VT is off, maximum number of queues changes,
3660                  * except for 82598EB, which remains constant.
3661                  */
3662                 if (dev_conf->txmode.mq_mode == ETH_MQ_TX_NONE &&
3663                                 hw->mac.type != ixgbe_mac_82598EB)
3664                         dev_info->max_tx_queues = IXGBE_NONE_MODE_TX_NB_QUEUES;
3665         }
3666         dev_info->min_rx_bufsize = 1024; /* cf BSIZEPACKET in SRRCTL register */
3667         dev_info->max_rx_pktlen = 15872; /* includes CRC, cf MAXFRS register */
3668         dev_info->max_mac_addrs = hw->mac.num_rar_entries;
3669         dev_info->max_hash_mac_addrs = IXGBE_VMDQ_NUM_UC_MAC;
3670         dev_info->max_vfs = pci_dev->max_vfs;
3671         if (hw->mac.type == ixgbe_mac_82598EB)
3672                 dev_info->max_vmdq_pools = ETH_16_POOLS;
3673         else
3674                 dev_info->max_vmdq_pools = ETH_64_POOLS;
3675         dev_info->vmdq_queue_num = dev_info->max_rx_queues;
3676         dev_info->rx_offload_capa =
3677                 DEV_RX_OFFLOAD_VLAN_STRIP |
3678                 DEV_RX_OFFLOAD_IPV4_CKSUM |
3679                 DEV_RX_OFFLOAD_UDP_CKSUM  |
3680                 DEV_RX_OFFLOAD_TCP_CKSUM;
3681
3682         /*
3683          * RSC is only supported by 82599 and x540 PF devices in a non-SR-IOV
3684          * mode.
3685          */
3686         if ((hw->mac.type == ixgbe_mac_82599EB ||
3687              hw->mac.type == ixgbe_mac_X540) &&
3688             !RTE_ETH_DEV_SRIOV(dev).active)
3689                 dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_TCP_LRO;
3690
3691         if (hw->mac.type == ixgbe_mac_82599EB ||
3692             hw->mac.type == ixgbe_mac_X540)
3693                 dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_MACSEC_STRIP;
3694
3695         if (hw->mac.type == ixgbe_mac_X550 ||
3696             hw->mac.type == ixgbe_mac_X550EM_x ||
3697             hw->mac.type == ixgbe_mac_X550EM_a)
3698                 dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM;
3699
3700         dev_info->tx_offload_capa =
3701                 DEV_TX_OFFLOAD_VLAN_INSERT |
3702                 DEV_TX_OFFLOAD_IPV4_CKSUM  |
3703                 DEV_TX_OFFLOAD_UDP_CKSUM   |
3704                 DEV_TX_OFFLOAD_TCP_CKSUM   |
3705                 DEV_TX_OFFLOAD_SCTP_CKSUM  |
3706                 DEV_TX_OFFLOAD_TCP_TSO;
3707
3708         if (hw->mac.type == ixgbe_mac_82599EB ||
3709             hw->mac.type == ixgbe_mac_X540)
3710                 dev_info->tx_offload_capa |= DEV_TX_OFFLOAD_MACSEC_INSERT;
3711
3712         if (hw->mac.type == ixgbe_mac_X550 ||
3713             hw->mac.type == ixgbe_mac_X550EM_x ||
3714             hw->mac.type == ixgbe_mac_X550EM_a)
3715                 dev_info->tx_offload_capa |= DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM;
3716
3717 #ifdef RTE_LIBRTE_SECURITY
3718         dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_SECURITY;
3719         dev_info->tx_offload_capa |= DEV_TX_OFFLOAD_SECURITY;
3720 #endif
3721
3722         dev_info->default_rxconf = (struct rte_eth_rxconf) {
3723                 .rx_thresh = {
3724                         .pthresh = IXGBE_DEFAULT_RX_PTHRESH,
3725                         .hthresh = IXGBE_DEFAULT_RX_HTHRESH,
3726                         .wthresh = IXGBE_DEFAULT_RX_WTHRESH,
3727                 },
3728                 .rx_free_thresh = IXGBE_DEFAULT_RX_FREE_THRESH,
3729                 .rx_drop_en = 0,
3730         };
3731
3732         dev_info->default_txconf = (struct rte_eth_txconf) {
3733                 .tx_thresh = {
3734                         .pthresh = IXGBE_DEFAULT_TX_PTHRESH,
3735                         .hthresh = IXGBE_DEFAULT_TX_HTHRESH,
3736                         .wthresh = IXGBE_DEFAULT_TX_WTHRESH,
3737                 },
3738                 .tx_free_thresh = IXGBE_DEFAULT_TX_FREE_THRESH,
3739                 .tx_rs_thresh = IXGBE_DEFAULT_TX_RSBIT_THRESH,
3740                 .txq_flags = ETH_TXQ_FLAGS_NOMULTSEGS |
3741                                 ETH_TXQ_FLAGS_NOOFFLOADS,
3742         };
3743
3744         dev_info->rx_desc_lim = rx_desc_lim;
3745         dev_info->tx_desc_lim = tx_desc_lim;
3746
3747         dev_info->hash_key_size = IXGBE_HKEY_MAX_INDEX * sizeof(uint32_t);
3748         dev_info->reta_size = ixgbe_reta_size_get(hw->mac.type);
3749         dev_info->flow_type_rss_offloads = IXGBE_RSS_OFFLOAD_ALL;
3750
3751         dev_info->speed_capa = ETH_LINK_SPEED_1G | ETH_LINK_SPEED_10G;
3752         if (hw->mac.type == ixgbe_mac_X540 ||
3753             hw->mac.type == ixgbe_mac_X540_vf ||
3754             hw->mac.type == ixgbe_mac_X550 ||
3755             hw->mac.type == ixgbe_mac_X550_vf) {
3756                 dev_info->speed_capa |= ETH_LINK_SPEED_100M;
3757         }
3758         if (hw->mac.type == ixgbe_mac_X550) {
3759                 dev_info->speed_capa |= ETH_LINK_SPEED_2_5G;
3760                 dev_info->speed_capa |= ETH_LINK_SPEED_5G;
3761         }
3762 }
3763
3764 static const uint32_t *
3765 ixgbe_dev_supported_ptypes_get(struct rte_eth_dev *dev)
3766 {
3767         static const uint32_t ptypes[] = {
3768                 /* For non-vec functions,
3769                  * refers to ixgbe_rxd_pkt_info_to_pkt_type();
3770                  * for vec functions,
3771                  * refers to _recv_raw_pkts_vec().
3772                  */
3773                 RTE_PTYPE_L2_ETHER,
3774                 RTE_PTYPE_L3_IPV4,
3775                 RTE_PTYPE_L3_IPV4_EXT,
3776                 RTE_PTYPE_L3_IPV6,
3777                 RTE_PTYPE_L3_IPV6_EXT,
3778                 RTE_PTYPE_L4_SCTP,
3779                 RTE_PTYPE_L4_TCP,
3780                 RTE_PTYPE_L4_UDP,
3781                 RTE_PTYPE_TUNNEL_IP,
3782                 RTE_PTYPE_INNER_L3_IPV6,
3783                 RTE_PTYPE_INNER_L3_IPV6_EXT,
3784                 RTE_PTYPE_INNER_L4_TCP,
3785                 RTE_PTYPE_INNER_L4_UDP,
3786                 RTE_PTYPE_UNKNOWN
3787         };
3788
3789         if (dev->rx_pkt_burst == ixgbe_recv_pkts ||
3790             dev->rx_pkt_burst == ixgbe_recv_pkts_lro_single_alloc ||
3791             dev->rx_pkt_burst == ixgbe_recv_pkts_lro_bulk_alloc ||
3792             dev->rx_pkt_burst == ixgbe_recv_pkts_bulk_alloc)
3793                 return ptypes;
3794
3795 #if defined(RTE_ARCH_X86)
3796         if (dev->rx_pkt_burst == ixgbe_recv_pkts_vec ||
3797             dev->rx_pkt_burst == ixgbe_recv_scattered_pkts_vec)
3798                 return ptypes;
3799 #endif
3800         return NULL;
3801 }
3802
3803 static void
3804 ixgbevf_dev_info_get(struct rte_eth_dev *dev,
3805                      struct rte_eth_dev_info *dev_info)
3806 {
3807         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
3808         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3809
3810         dev_info->pci_dev = pci_dev;
3811         dev_info->max_rx_queues = (uint16_t)hw->mac.max_rx_queues;
3812         dev_info->max_tx_queues = (uint16_t)hw->mac.max_tx_queues;
3813         dev_info->min_rx_bufsize = 1024; /* cf BSIZEPACKET in SRRCTL reg */
3814         dev_info->max_rx_pktlen = 9728; /* includes CRC, cf MAXFRS reg */
3815         dev_info->max_mac_addrs = hw->mac.num_rar_entries;
3816         dev_info->max_hash_mac_addrs = IXGBE_VMDQ_NUM_UC_MAC;
3817         dev_info->max_vfs = pci_dev->max_vfs;
3818         if (hw->mac.type == ixgbe_mac_82598EB)
3819                 dev_info->max_vmdq_pools = ETH_16_POOLS;
3820         else
3821                 dev_info->max_vmdq_pools = ETH_64_POOLS;
3822         dev_info->rx_offload_capa = DEV_RX_OFFLOAD_VLAN_STRIP |
3823                                 DEV_RX_OFFLOAD_IPV4_CKSUM |
3824                                 DEV_RX_OFFLOAD_UDP_CKSUM  |
3825                                 DEV_RX_OFFLOAD_TCP_CKSUM;
3826         dev_info->tx_offload_capa = DEV_TX_OFFLOAD_VLAN_INSERT |
3827                                 DEV_TX_OFFLOAD_IPV4_CKSUM  |
3828                                 DEV_TX_OFFLOAD_UDP_CKSUM   |
3829                                 DEV_TX_OFFLOAD_TCP_CKSUM   |
3830                                 DEV_TX_OFFLOAD_SCTP_CKSUM  |
3831                                 DEV_TX_OFFLOAD_TCP_TSO;
3832
3833         dev_info->default_rxconf = (struct rte_eth_rxconf) {
3834                 .rx_thresh = {
3835                         .pthresh = IXGBE_DEFAULT_RX_PTHRESH,
3836                         .hthresh = IXGBE_DEFAULT_RX_HTHRESH,
3837                         .wthresh = IXGBE_DEFAULT_RX_WTHRESH,
3838                 },
3839                 .rx_free_thresh = IXGBE_DEFAULT_RX_FREE_THRESH,
3840                 .rx_drop_en = 0,
3841         };
3842
3843         dev_info->default_txconf = (struct rte_eth_txconf) {
3844                 .tx_thresh = {
3845                         .pthresh = IXGBE_DEFAULT_TX_PTHRESH,
3846                         .hthresh = IXGBE_DEFAULT_TX_HTHRESH,
3847                         .wthresh = IXGBE_DEFAULT_TX_WTHRESH,
3848                 },
3849                 .tx_free_thresh = IXGBE_DEFAULT_TX_FREE_THRESH,
3850                 .tx_rs_thresh = IXGBE_DEFAULT_TX_RSBIT_THRESH,
3851                 .txq_flags = ETH_TXQ_FLAGS_NOMULTSEGS |
3852                                 ETH_TXQ_FLAGS_NOOFFLOADS,
3853         };
3854
3855         dev_info->rx_desc_lim = rx_desc_lim;
3856         dev_info->tx_desc_lim = tx_desc_lim;
3857 }
3858
3859 static int
3860 ixgbevf_check_link(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
3861                    int *link_up, int wait_to_complete)
3862 {
3863         /**
3864          * for a quick link status checking, wait_to_compelet == 0,
3865          * skip PF link status checking
3866          */
3867         bool no_pflink_check = wait_to_complete == 0;
3868         struct ixgbe_mbx_info *mbx = &hw->mbx;
3869         struct ixgbe_mac_info *mac = &hw->mac;
3870         uint32_t links_reg, in_msg;
3871         int ret_val = 0;
3872
3873         /* If we were hit with a reset drop the link */
3874         if (!mbx->ops.check_for_rst(hw, 0) || !mbx->timeout)
3875                 mac->get_link_status = true;
3876
3877         if (!mac->get_link_status)
3878                 goto out;
3879
3880         /* if link status is down no point in checking to see if pf is up */
3881         links_reg = IXGBE_READ_REG(hw, IXGBE_VFLINKS);
3882         if (!(links_reg & IXGBE_LINKS_UP))
3883                 goto out;
3884
3885         /* for SFP+ modules and DA cables on 82599 it can take up to 500usecs
3886          * before the link status is correct
3887          */
3888         if (mac->type == ixgbe_mac_82599_vf) {
3889                 int i;
3890
3891                 for (i = 0; i < 5; i++) {
3892                         rte_delay_us(100);
3893                         links_reg = IXGBE_READ_REG(hw, IXGBE_VFLINKS);
3894
3895                         if (!(links_reg & IXGBE_LINKS_UP))
3896                                 goto out;
3897                 }
3898         }
3899
3900         switch (links_reg & IXGBE_LINKS_SPEED_82599) {
3901         case IXGBE_LINKS_SPEED_10G_82599:
3902                 *speed = IXGBE_LINK_SPEED_10GB_FULL;
3903                 if (hw->mac.type >= ixgbe_mac_X550) {
3904                         if (links_reg & IXGBE_LINKS_SPEED_NON_STD)
3905                                 *speed = IXGBE_LINK_SPEED_2_5GB_FULL;
3906                 }
3907                 break;
3908         case IXGBE_LINKS_SPEED_1G_82599:
3909                 *speed = IXGBE_LINK_SPEED_1GB_FULL;
3910                 break;
3911         case IXGBE_LINKS_SPEED_100_82599:
3912                 *speed = IXGBE_LINK_SPEED_100_FULL;
3913                 if (hw->mac.type == ixgbe_mac_X550) {
3914                         if (links_reg & IXGBE_LINKS_SPEED_NON_STD)
3915                                 *speed = IXGBE_LINK_SPEED_5GB_FULL;
3916                 }
3917                 break;
3918         case IXGBE_LINKS_SPEED_10_X550EM_A:
3919                 *speed = IXGBE_LINK_SPEED_UNKNOWN;
3920                 /* Since Reserved in older MAC's */
3921                 if (hw->mac.type >= ixgbe_mac_X550)
3922                         *speed = IXGBE_LINK_SPEED_10_FULL;
3923                 break;
3924         default:
3925                 *speed = IXGBE_LINK_SPEED_UNKNOWN;
3926         }
3927
3928         if (no_pflink_check) {
3929                 if (*speed == IXGBE_LINK_SPEED_UNKNOWN)
3930                         mac->get_link_status = true;
3931                 else
3932                         mac->get_link_status = false;
3933
3934                 goto out;
3935         }
3936         /* if the read failed it could just be a mailbox collision, best wait
3937          * until we are called again and don't report an error
3938          */
3939         if (mbx->ops.read(hw, &in_msg, 1, 0))
3940                 goto out;
3941
3942         if (!(in_msg & IXGBE_VT_MSGTYPE_CTS)) {
3943                 /* msg is not CTS and is NACK we must have lost CTS status */
3944                 if (in_msg & IXGBE_VT_MSGTYPE_NACK)
3945                         ret_val = -1;
3946                 goto out;
3947         }
3948
3949         /* the pf is talking, if we timed out in the past we reinit */
3950         if (!mbx->timeout) {
3951                 ret_val = -1;
3952                 goto out;
3953         }
3954
3955         /* if we passed all the tests above then the link is up and we no
3956          * longer need to check for link
3957          */
3958         mac->get_link_status = false;
3959
3960 out:
3961         *link_up = !mac->get_link_status;
3962         return ret_val;
3963 }
3964
3965 /* return 0 means link status changed, -1 means not changed */
3966 static int
3967 ixgbe_dev_link_update_share(struct rte_eth_dev *dev,
3968                             int wait_to_complete, int vf)
3969 {
3970         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3971         struct rte_eth_link link, old;
3972         ixgbe_link_speed link_speed = IXGBE_LINK_SPEED_UNKNOWN;
3973         struct ixgbe_interrupt *intr =
3974                 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
3975         int link_up;
3976         int diag;
3977         u32 speed = 0;
3978         int wait = 1;
3979         bool autoneg = false;
3980
3981         link.link_status = ETH_LINK_DOWN;
3982         link.link_speed = 0;
3983         link.link_duplex = ETH_LINK_HALF_DUPLEX;
3984         link.link_autoneg = ETH_LINK_AUTONEG;
3985         memset(&old, 0, sizeof(old));
3986         rte_ixgbe_dev_atomic_read_link_status(dev, &old);
3987
3988         hw->mac.get_link_status = true;
3989
3990         if ((intr->flags & IXGBE_FLAG_NEED_LINK_CONFIG) &&
3991                 ixgbe_get_media_type(hw) == ixgbe_media_type_fiber) {
3992                 speed = hw->phy.autoneg_advertised;
3993                 if (!speed)
3994                         ixgbe_get_link_capabilities(hw, &speed, &autoneg);
3995                 ixgbe_setup_link(hw, speed, true);
3996         }
3997
3998         /* check if it needs to wait to complete, if lsc interrupt is enabled */
3999         if (wait_to_complete == 0 || dev->data->dev_conf.intr_conf.lsc != 0)
4000                 wait = 0;
4001
4002         if (vf)
4003                 diag = ixgbevf_check_link(hw, &link_speed, &link_up, wait);
4004         else
4005                 diag = ixgbe_check_link(hw, &link_speed, &link_up, wait);
4006
4007         if (diag != 0) {
4008                 link.link_speed = ETH_SPEED_NUM_100M;
4009                 link.link_duplex = ETH_LINK_FULL_DUPLEX;
4010                 rte_ixgbe_dev_atomic_write_link_status(dev, &link);
4011                 if (link.link_status == old.link_status)
4012                         return -1;
4013                 return 0;
4014         }
4015
4016         if (link_up == 0) {
4017                 rte_ixgbe_dev_atomic_write_link_status(dev, &link);
4018                 intr->flags |= IXGBE_FLAG_NEED_LINK_CONFIG;
4019                 if (link.link_status == old.link_status)
4020                         return -1;
4021                 return 0;
4022         }
4023         intr->flags &= ~IXGBE_FLAG_NEED_LINK_CONFIG;
4024         link.link_status = ETH_LINK_UP;
4025         link.link_duplex = ETH_LINK_FULL_DUPLEX;
4026
4027         switch (link_speed) {
4028         default:
4029         case IXGBE_LINK_SPEED_UNKNOWN:
4030                 link.link_duplex = ETH_LINK_FULL_DUPLEX;
4031                 link.link_speed = ETH_SPEED_NUM_100M;
4032                 break;
4033
4034         case IXGBE_LINK_SPEED_100_FULL:
4035                 link.link_speed = ETH_SPEED_NUM_100M;
4036                 break;
4037
4038         case IXGBE_LINK_SPEED_1GB_FULL:
4039                 link.link_speed = ETH_SPEED_NUM_1G;
4040                 break;
4041
4042         case IXGBE_LINK_SPEED_2_5GB_FULL:
4043                 link.link_speed = ETH_SPEED_NUM_2_5G;
4044                 break;
4045
4046         case IXGBE_LINK_SPEED_5GB_FULL:
4047                 link.link_speed = ETH_SPEED_NUM_5G;
4048                 break;
4049
4050         case IXGBE_LINK_SPEED_10GB_FULL:
4051                 link.link_speed = ETH_SPEED_NUM_10G;
4052                 break;
4053         }
4054         rte_ixgbe_dev_atomic_write_link_status(dev, &link);
4055
4056         if (link.link_status == old.link_status)
4057                 return -1;
4058
4059         return 0;
4060 }
4061
4062 static int
4063 ixgbe_dev_link_update(struct rte_eth_dev *dev, int wait_to_complete)
4064 {
4065         return ixgbe_dev_link_update_share(dev, wait_to_complete, 0);
4066 }
4067
4068 static int
4069 ixgbevf_dev_link_update(struct rte_eth_dev *dev, int wait_to_complete)
4070 {
4071         return ixgbe_dev_link_update_share(dev, wait_to_complete, 1);
4072 }
4073
4074 static void
4075 ixgbe_dev_promiscuous_enable(struct rte_eth_dev *dev)
4076 {
4077         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4078         uint32_t fctrl;
4079
4080         fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
4081         fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
4082         IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
4083 }
4084
4085 static void
4086 ixgbe_dev_promiscuous_disable(struct rte_eth_dev *dev)
4087 {
4088         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4089         uint32_t fctrl;
4090
4091         fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
4092         fctrl &= (~IXGBE_FCTRL_UPE);
4093         if (dev->data->all_multicast == 1)
4094                 fctrl |= IXGBE_FCTRL_MPE;
4095         else
4096                 fctrl &= (~IXGBE_FCTRL_MPE);
4097         IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
4098 }
4099
4100 static void
4101 ixgbe_dev_allmulticast_enable(struct rte_eth_dev *dev)
4102 {
4103         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4104         uint32_t fctrl;
4105
4106         fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
4107         fctrl |= IXGBE_FCTRL_MPE;
4108         IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
4109 }
4110
4111 static void
4112 ixgbe_dev_allmulticast_disable(struct rte_eth_dev *dev)
4113 {
4114         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4115         uint32_t fctrl;
4116
4117         if (dev->data->promiscuous == 1)
4118                 return; /* must remain in all_multicast mode */
4119
4120         fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
4121         fctrl &= (~IXGBE_FCTRL_MPE);
4122         IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
4123 }
4124
4125 /**
4126  * It clears the interrupt causes and enables the interrupt.
4127  * It will be called once only during nic initialized.
4128  *
4129  * @param dev
4130  *  Pointer to struct rte_eth_dev.
4131  * @param on
4132  *  Enable or Disable.
4133  *
4134  * @return
4135  *  - On success, zero.
4136  *  - On failure, a negative value.
4137  */
4138 static int
4139 ixgbe_dev_lsc_interrupt_setup(struct rte_eth_dev *dev, uint8_t on)
4140 {
4141         struct ixgbe_interrupt *intr =
4142                 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
4143
4144         ixgbe_dev_link_status_print(dev);
4145         if (on)
4146                 intr->mask |= IXGBE_EICR_LSC;
4147         else
4148                 intr->mask &= ~IXGBE_EICR_LSC;
4149
4150         return 0;
4151 }
4152
4153 /**
4154  * It clears the interrupt causes and enables the interrupt.
4155  * It will be called once only during nic initialized.
4156  *
4157  * @param dev
4158  *  Pointer to struct rte_eth_dev.
4159  *
4160  * @return
4161  *  - On success, zero.
4162  *  - On failure, a negative value.
4163  */
4164 static int
4165 ixgbe_dev_rxq_interrupt_setup(struct rte_eth_dev *dev)
4166 {
4167         struct ixgbe_interrupt *intr =
4168                 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
4169
4170         intr->mask |= IXGBE_EICR_RTX_QUEUE;
4171
4172         return 0;
4173 }
4174
4175 /**
4176  * It clears the interrupt causes and enables the interrupt.
4177  * It will be called once only during nic initialized.
4178  *
4179  * @param dev
4180  *  Pointer to struct rte_eth_dev.
4181  *
4182  * @return
4183  *  - On success, zero.
4184  *  - On failure, a negative value.
4185  */
4186 static int
4187 ixgbe_dev_macsec_interrupt_setup(struct rte_eth_dev *dev)
4188 {
4189         struct ixgbe_interrupt *intr =
4190                 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
4191
4192         intr->mask |= IXGBE_EICR_LINKSEC;
4193
4194         return 0;
4195 }
4196
4197 /*
4198  * It reads ICR and sets flag (IXGBE_EICR_LSC) for the link_update.
4199  *
4200  * @param dev
4201  *  Pointer to struct rte_eth_dev.
4202  *
4203  * @return
4204  *  - On success, zero.
4205  *  - On failure, a negative value.
4206  */
4207 static int
4208 ixgbe_dev_interrupt_get_status(struct rte_eth_dev *dev)
4209 {
4210         uint32_t eicr;
4211         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4212         struct ixgbe_interrupt *intr =
4213                 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
4214
4215         /* clear all cause mask */
4216         ixgbe_disable_intr(hw);
4217
4218         /* read-on-clear nic registers here */
4219         eicr = IXGBE_READ_REG(hw, IXGBE_EICR);
4220         PMD_DRV_LOG(DEBUG, "eicr %x", eicr);
4221
4222         intr->flags = 0;
4223
4224         /* set flag for async link update */
4225         if (eicr & IXGBE_EICR_LSC)
4226                 intr->flags |= IXGBE_FLAG_NEED_LINK_UPDATE;
4227
4228         if (eicr & IXGBE_EICR_MAILBOX)
4229                 intr->flags |= IXGBE_FLAG_MAILBOX;
4230
4231         if (eicr & IXGBE_EICR_LINKSEC)
4232                 intr->flags |= IXGBE_FLAG_MACSEC;
4233
4234         if (hw->mac.type ==  ixgbe_mac_X550EM_x &&
4235             hw->phy.type == ixgbe_phy_x550em_ext_t &&
4236             (eicr & IXGBE_EICR_GPI_SDP0_X550EM_x))
4237                 intr->flags |= IXGBE_FLAG_PHY_INTERRUPT;
4238
4239         return 0;
4240 }
4241
4242 /**
4243  * It gets and then prints the link status.
4244  *
4245  * @param dev
4246  *  Pointer to struct rte_eth_dev.
4247  *
4248  * @return
4249  *  - On success, zero.
4250  *  - On failure, a negative value.
4251  */
4252 static void
4253 ixgbe_dev_link_status_print(struct rte_eth_dev *dev)
4254 {
4255         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
4256         struct rte_eth_link link;
4257
4258         memset(&link, 0, sizeof(link));
4259         rte_ixgbe_dev_atomic_read_link_status(dev, &link);
4260         if (link.link_status) {
4261                 PMD_INIT_LOG(INFO, "Port %d: Link Up - speed %u Mbps - %s",
4262                                         (int)(dev->data->port_id),
4263                                         (unsigned)link.link_speed,
4264                         link.link_duplex == ETH_LINK_FULL_DUPLEX ?
4265                                         "full-duplex" : "half-duplex");
4266         } else {
4267                 PMD_INIT_LOG(INFO, " Port %d: Link Down",
4268                                 (int)(dev->data->port_id));
4269         }
4270         PMD_INIT_LOG(DEBUG, "PCI Address: " PCI_PRI_FMT,
4271                                 pci_dev->addr.domain,
4272                                 pci_dev->addr.bus,
4273                                 pci_dev->addr.devid,
4274                                 pci_dev->addr.function);
4275 }
4276
4277 /*
4278  * It executes link_update after knowing an interrupt occurred.
4279  *
4280  * @param dev
4281  *  Pointer to struct rte_eth_dev.
4282  *
4283  * @return
4284  *  - On success, zero.
4285  *  - On failure, a negative value.
4286  */
4287 static int
4288 ixgbe_dev_interrupt_action(struct rte_eth_dev *dev,
4289                            struct rte_intr_handle *intr_handle)
4290 {
4291         struct ixgbe_interrupt *intr =
4292                 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
4293         int64_t timeout;
4294         struct rte_eth_link link;
4295         struct ixgbe_hw *hw =
4296                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4297
4298         PMD_DRV_LOG(DEBUG, "intr action type %d", intr->flags);
4299
4300         if (intr->flags & IXGBE_FLAG_MAILBOX) {
4301                 ixgbe_pf_mbx_process(dev);
4302                 intr->flags &= ~IXGBE_FLAG_MAILBOX;
4303         }
4304
4305         if (intr->flags & IXGBE_FLAG_PHY_INTERRUPT) {
4306                 ixgbe_handle_lasi(hw);
4307                 intr->flags &= ~IXGBE_FLAG_PHY_INTERRUPT;
4308         }
4309
4310         if (intr->flags & IXGBE_FLAG_NEED_LINK_UPDATE) {
4311                 /* get the link status before link update, for predicting later */
4312                 memset(&link, 0, sizeof(link));
4313                 rte_ixgbe_dev_atomic_read_link_status(dev, &link);
4314
4315                 ixgbe_dev_link_update(dev, 0);
4316
4317                 /* likely to up */
4318                 if (!link.link_status)
4319                         /* handle it 1 sec later, wait it being stable */
4320                         timeout = IXGBE_LINK_UP_CHECK_TIMEOUT;
4321                 /* likely to down */
4322                 else
4323                         /* handle it 4 sec later, wait it being stable */
4324                         timeout = IXGBE_LINK_DOWN_CHECK_TIMEOUT;
4325
4326                 ixgbe_dev_link_status_print(dev);
4327                 if (rte_eal_alarm_set(timeout * 1000,
4328                                       ixgbe_dev_interrupt_delayed_handler, (void *)dev) < 0)
4329                         PMD_DRV_LOG(ERR, "Error setting alarm");
4330                 else {
4331                         /* remember original mask */
4332                         intr->mask_original = intr->mask;
4333                         /* only disable lsc interrupt */
4334                         intr->mask &= ~IXGBE_EIMS_LSC;
4335                 }
4336         }
4337
4338         PMD_DRV_LOG(DEBUG, "enable intr immediately");
4339         ixgbe_enable_intr(dev);
4340         rte_intr_enable(intr_handle);
4341
4342         return 0;
4343 }
4344
4345 /**
4346  * Interrupt handler which shall be registered for alarm callback for delayed
4347  * handling specific interrupt to wait for the stable nic state. As the
4348  * NIC interrupt state is not stable for ixgbe after link is just down,
4349  * it needs to wait 4 seconds to get the stable status.
4350  *
4351  * @param handle
4352  *  Pointer to interrupt handle.
4353  * @param param
4354  *  The address of parameter (struct rte_eth_dev *) regsitered before.
4355  *
4356  * @return
4357  *  void
4358  */
4359 static void
4360 ixgbe_dev_interrupt_delayed_handler(void *param)
4361 {
4362         struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
4363         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
4364         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
4365         struct ixgbe_interrupt *intr =
4366                 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
4367         struct ixgbe_hw *hw =
4368                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4369         uint32_t eicr;
4370
4371         ixgbe_disable_intr(hw);
4372
4373         eicr = IXGBE_READ_REG(hw, IXGBE_EICR);
4374         if (eicr & IXGBE_EICR_MAILBOX)
4375                 ixgbe_pf_mbx_process(dev);
4376
4377         if (intr->flags & IXGBE_FLAG_PHY_INTERRUPT) {
4378                 ixgbe_handle_lasi(hw);
4379                 intr->flags &= ~IXGBE_FLAG_PHY_INTERRUPT;
4380         }
4381
4382         if (intr->flags & IXGBE_FLAG_NEED_LINK_UPDATE) {
4383                 ixgbe_dev_link_update(dev, 0);
4384                 intr->flags &= ~IXGBE_FLAG_NEED_LINK_UPDATE;
4385                 ixgbe_dev_link_status_print(dev);
4386                 _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC,
4387                                               NULL, NULL);
4388         }
4389
4390         if (intr->flags & IXGBE_FLAG_MACSEC) {
4391                 _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_MACSEC,
4392                                               NULL, NULL);
4393                 intr->flags &= ~IXGBE_FLAG_MACSEC;
4394         }
4395
4396         /* restore original mask */
4397         intr->mask = intr->mask_original;
4398         intr->mask_original = 0;
4399
4400         PMD_DRV_LOG(DEBUG, "enable intr in delayed handler S[%08x]", eicr);
4401         ixgbe_enable_intr(dev);
4402         rte_intr_enable(intr_handle);
4403 }
4404
4405 /**
4406  * Interrupt handler triggered by NIC  for handling
4407  * specific interrupt.
4408  *
4409  * @param handle
4410  *  Pointer to interrupt handle.
4411  * @param param
4412  *  The address of parameter (struct rte_eth_dev *) regsitered before.
4413  *
4414  * @return
4415  *  void
4416  */
4417 static void
4418 ixgbe_dev_interrupt_handler(void *param)
4419 {
4420         struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
4421
4422         ixgbe_dev_interrupt_get_status(dev);
4423         ixgbe_dev_interrupt_action(dev, dev->intr_handle);
4424 }
4425
4426 static int
4427 ixgbe_dev_led_on(struct rte_eth_dev *dev)
4428 {
4429         struct ixgbe_hw *hw;
4430
4431         hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4432         return ixgbe_led_on(hw, 0) == IXGBE_SUCCESS ? 0 : -ENOTSUP;
4433 }
4434
4435 static int
4436 ixgbe_dev_led_off(struct rte_eth_dev *dev)
4437 {
4438         struct ixgbe_hw *hw;
4439
4440         hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4441         return ixgbe_led_off(hw, 0) == IXGBE_SUCCESS ? 0 : -ENOTSUP;
4442 }
4443
4444 static int
4445 ixgbe_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
4446 {
4447         struct ixgbe_hw *hw;
4448         uint32_t mflcn_reg;
4449         uint32_t fccfg_reg;
4450         int rx_pause;
4451         int tx_pause;
4452
4453         hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4454
4455         fc_conf->pause_time = hw->fc.pause_time;
4456         fc_conf->high_water = hw->fc.high_water[0];
4457         fc_conf->low_water = hw->fc.low_water[0];
4458         fc_conf->send_xon = hw->fc.send_xon;
4459         fc_conf->autoneg = !hw->fc.disable_fc_autoneg;
4460
4461         /*
4462          * Return rx_pause status according to actual setting of
4463          * MFLCN register.
4464          */
4465         mflcn_reg = IXGBE_READ_REG(hw, IXGBE_MFLCN);
4466         if (mflcn_reg & (IXGBE_MFLCN_RPFCE | IXGBE_MFLCN_RFCE))
4467                 rx_pause = 1;
4468         else
4469                 rx_pause = 0;
4470
4471         /*
4472          * Return tx_pause status according to actual setting of
4473          * FCCFG register.
4474          */
4475         fccfg_reg = IXGBE_READ_REG(hw, IXGBE_FCCFG);
4476         if (fccfg_reg & (IXGBE_FCCFG_TFCE_802_3X | IXGBE_FCCFG_TFCE_PRIORITY))
4477                 tx_pause = 1;
4478         else
4479                 tx_pause = 0;
4480
4481         if (rx_pause && tx_pause)
4482                 fc_conf->mode = RTE_FC_FULL;
4483         else if (rx_pause)
4484                 fc_conf->mode = RTE_FC_RX_PAUSE;
4485         else if (tx_pause)
4486                 fc_conf->mode = RTE_FC_TX_PAUSE;
4487         else
4488                 fc_conf->mode = RTE_FC_NONE;
4489
4490         return 0;
4491 }
4492
4493 static int
4494 ixgbe_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
4495 {
4496         struct ixgbe_hw *hw;
4497         int err;
4498         uint32_t rx_buf_size;
4499         uint32_t max_high_water;
4500         uint32_t mflcn;
4501         enum ixgbe_fc_mode rte_fcmode_2_ixgbe_fcmode[] = {
4502                 ixgbe_fc_none,
4503                 ixgbe_fc_rx_pause,
4504                 ixgbe_fc_tx_pause,
4505                 ixgbe_fc_full
4506         };
4507
4508         PMD_INIT_FUNC_TRACE();
4509
4510         hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4511         rx_buf_size = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(0));
4512         PMD_INIT_LOG(DEBUG, "Rx packet buffer size = 0x%x", rx_buf_size);
4513
4514         /*
4515          * At least reserve one Ethernet frame for watermark
4516          * high_water/low_water in kilo bytes for ixgbe
4517          */
4518         max_high_water = (rx_buf_size - ETHER_MAX_LEN) >> IXGBE_RXPBSIZE_SHIFT;
4519         if ((fc_conf->high_water > max_high_water) ||
4520                 (fc_conf->high_water < fc_conf->low_water)) {
4521                 PMD_INIT_LOG(ERR, "Invalid high/low water setup value in KB");
4522                 PMD_INIT_LOG(ERR, "High_water must <= 0x%x", max_high_water);
4523                 return -EINVAL;
4524         }
4525
4526         hw->fc.requested_mode = rte_fcmode_2_ixgbe_fcmode[fc_conf->mode];
4527         hw->fc.pause_time     = fc_conf->pause_time;
4528         hw->fc.high_water[0]  = fc_conf->high_water;
4529         hw->fc.low_water[0]   = fc_conf->low_water;
4530         hw->fc.send_xon       = fc_conf->send_xon;
4531         hw->fc.disable_fc_autoneg = !fc_conf->autoneg;
4532
4533         err = ixgbe_fc_enable(hw);
4534
4535         /* Not negotiated is not an error case */
4536         if ((err == IXGBE_SUCCESS) || (err == IXGBE_ERR_FC_NOT_NEGOTIATED)) {
4537
4538                 /* check if we want to forward MAC frames - driver doesn't have native
4539                  * capability to do that, so we'll write the registers ourselves */
4540
4541                 mflcn = IXGBE_READ_REG(hw, IXGBE_MFLCN);
4542
4543                 /* set or clear MFLCN.PMCF bit depending on configuration */
4544                 if (fc_conf->mac_ctrl_frame_fwd != 0)
4545                         mflcn |= IXGBE_MFLCN_PMCF;
4546                 else
4547                         mflcn &= ~IXGBE_MFLCN_PMCF;
4548
4549                 IXGBE_WRITE_REG(hw, IXGBE_MFLCN, mflcn);
4550                 IXGBE_WRITE_FLUSH(hw);
4551
4552                 return 0;
4553         }
4554
4555         PMD_INIT_LOG(ERR, "ixgbe_fc_enable = 0x%x", err);
4556         return -EIO;
4557 }
4558
4559 /**
4560  *  ixgbe_pfc_enable_generic - Enable flow control
4561  *  @hw: pointer to hardware structure
4562  *  @tc_num: traffic class number
4563  *  Enable flow control according to the current settings.
4564  */
4565 static int
4566 ixgbe_dcb_pfc_enable_generic(struct ixgbe_hw *hw, uint8_t tc_num)
4567 {
4568         int ret_val = 0;
4569         uint32_t mflcn_reg, fccfg_reg;
4570         uint32_t reg;
4571         uint32_t fcrtl, fcrth;
4572         uint8_t i;
4573         uint8_t nb_rx_en;
4574
4575         /* Validate the water mark configuration */
4576         if (!hw->fc.pause_time) {
4577                 ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
4578                 goto out;
4579         }
4580
4581         /* Low water mark of zero causes XOFF floods */
4582         if (hw->fc.current_mode & ixgbe_fc_tx_pause) {
4583                  /* High/Low water can not be 0 */
4584                 if ((!hw->fc.high_water[tc_num]) || (!hw->fc.low_water[tc_num])) {
4585                         PMD_INIT_LOG(ERR, "Invalid water mark configuration");
4586                         ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
4587                         goto out;
4588                 }
4589
4590                 if (hw->fc.low_water[tc_num] >= hw->fc.high_water[tc_num]) {
4591                         PMD_INIT_LOG(ERR, "Invalid water mark configuration");
4592                         ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
4593                         goto out;
4594                 }
4595         }
4596         /* Negotiate the fc mode to use */
4597         ixgbe_fc_autoneg(hw);
4598
4599         /* Disable any previous flow control settings */
4600         mflcn_reg = IXGBE_READ_REG(hw, IXGBE_MFLCN);
4601         mflcn_reg &= ~(IXGBE_MFLCN_RPFCE_SHIFT | IXGBE_MFLCN_RFCE|IXGBE_MFLCN_RPFCE);
4602
4603         fccfg_reg = IXGBE_READ_REG(hw, IXGBE_FCCFG);
4604         fccfg_reg &= ~(IXGBE_FCCFG_TFCE_802_3X | IXGBE_FCCFG_TFCE_PRIORITY);
4605
4606         switch (hw->fc.current_mode) {
4607         case ixgbe_fc_none:
4608                 /*
4609                  * If the count of enabled RX Priority Flow control >1,
4610                  * and the TX pause can not be disabled
4611                  */
4612                 nb_rx_en = 0;
4613                 for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
4614                         reg = IXGBE_READ_REG(hw, IXGBE_FCRTH_82599(i));
4615                         if (reg & IXGBE_FCRTH_FCEN)
4616                                 nb_rx_en++;
4617                 }
4618                 if (nb_rx_en > 1)
4619                         fccfg_reg |= IXGBE_FCCFG_TFCE_PRIORITY;
4620                 break;
4621         case ixgbe_fc_rx_pause:
4622                 /*
4623                  * Rx Flow control is enabled and Tx Flow control is
4624                  * disabled by software override. Since there really
4625                  * isn't a way to advertise that we are capable of RX
4626                  * Pause ONLY, we will advertise that we support both
4627                  * symmetric and asymmetric Rx PAUSE.  Later, we will
4628                  * disable the adapter's ability to send PAUSE frames.
4629                  */
4630                 mflcn_reg |= IXGBE_MFLCN_RPFCE;
4631                 /*
4632                  * If the count of enabled RX Priority Flow control >1,
4633                  * and the TX pause can not be disabled
4634                  */
4635                 nb_rx_en = 0;
4636                 for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
4637                         reg = IXGBE_READ_REG(hw, IXGBE_FCRTH_82599(i));
4638                         if (reg & IXGBE_FCRTH_FCEN)
4639                                 nb_rx_en++;
4640                 }
4641                 if (nb_rx_en > 1)
4642                         fccfg_reg |= IXGBE_FCCFG_TFCE_PRIORITY;
4643                 break;
4644         case ixgbe_fc_tx_pause:
4645                 /*
4646                  * Tx Flow control is enabled, and Rx Flow control is
4647                  * disabled by software override.
4648                  */
4649                 fccfg_reg |= IXGBE_FCCFG_TFCE_PRIORITY;
4650                 break;
4651         case ixgbe_fc_full:
4652                 /* Flow control (both Rx and Tx) is enabled by SW override. */
4653                 mflcn_reg |= IXGBE_MFLCN_RPFCE;
4654                 fccfg_reg |= IXGBE_FCCFG_TFCE_PRIORITY;
4655                 break;
4656         default:
4657                 PMD_DRV_LOG(DEBUG, "Flow control param set incorrectly");
4658                 ret_val = IXGBE_ERR_CONFIG;
4659                 goto out;
4660         }
4661
4662         /* Set 802.3x based flow control settings. */
4663         mflcn_reg |= IXGBE_MFLCN_DPF;
4664         IXGBE_WRITE_REG(hw, IXGBE_MFLCN, mflcn_reg);
4665         IXGBE_WRITE_REG(hw, IXGBE_FCCFG, fccfg_reg);
4666
4667         /* Set up and enable Rx high/low water mark thresholds, enable XON. */
4668         if ((hw->fc.current_mode & ixgbe_fc_tx_pause) &&
4669                 hw->fc.high_water[tc_num]) {
4670                 fcrtl = (hw->fc.low_water[tc_num] << 10) | IXGBE_FCRTL_XONE;
4671                 IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(tc_num), fcrtl);
4672                 fcrth = (hw->fc.high_water[tc_num] << 10) | IXGBE_FCRTH_FCEN;
4673         } else {
4674                 IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(tc_num), 0);
4675                 /*
4676                  * In order to prevent Tx hangs when the internal Tx
4677                  * switch is enabled we must set the high water mark
4678                  * to the maximum FCRTH value.  This allows the Tx
4679                  * switch to function even under heavy Rx workloads.
4680                  */
4681                 fcrth = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(tc_num)) - 32;
4682         }
4683         IXGBE_WRITE_REG(hw, IXGBE_FCRTH_82599(tc_num), fcrth);
4684
4685         /* Configure pause time (2 TCs per register) */
4686         reg = hw->fc.pause_time * 0x00010001;
4687         for (i = 0; i < (IXGBE_DCB_MAX_TRAFFIC_CLASS / 2); i++)
4688                 IXGBE_WRITE_REG(hw, IXGBE_FCTTV(i), reg);
4689
4690         /* Configure flow control refresh threshold value */
4691         IXGBE_WRITE_REG(hw, IXGBE_FCRTV, hw->fc.pause_time / 2);
4692
4693 out:
4694         return ret_val;
4695 }
4696
4697 static int
4698 ixgbe_dcb_pfc_enable(struct rte_eth_dev *dev, uint8_t tc_num)
4699 {
4700         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4701         int32_t ret_val = IXGBE_NOT_IMPLEMENTED;
4702
4703         if (hw->mac.type != ixgbe_mac_82598EB) {
4704                 ret_val = ixgbe_dcb_pfc_enable_generic(hw, tc_num);
4705         }
4706         return ret_val;
4707 }
4708
4709 static int
4710 ixgbe_priority_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_pfc_conf *pfc_conf)
4711 {
4712         int err;
4713         uint32_t rx_buf_size;
4714         uint32_t max_high_water;
4715         uint8_t tc_num;
4716         uint8_t  map[IXGBE_DCB_MAX_USER_PRIORITY] = { 0 };
4717         struct ixgbe_hw *hw =
4718                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4719         struct ixgbe_dcb_config *dcb_config =
4720                 IXGBE_DEV_PRIVATE_TO_DCB_CFG(dev->data->dev_private);
4721
4722         enum ixgbe_fc_mode rte_fcmode_2_ixgbe_fcmode[] = {
4723                 ixgbe_fc_none,
4724                 ixgbe_fc_rx_pause,
4725                 ixgbe_fc_tx_pause,
4726                 ixgbe_fc_full
4727         };
4728
4729         PMD_INIT_FUNC_TRACE();
4730
4731         ixgbe_dcb_unpack_map_cee(dcb_config, IXGBE_DCB_RX_CONFIG, map);
4732         tc_num = map[pfc_conf->priority];
4733         rx_buf_size = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(tc_num));
4734         PMD_INIT_LOG(DEBUG, "Rx packet buffer size = 0x%x", rx_buf_size);
4735         /*
4736          * At least reserve one Ethernet frame for watermark
4737          * high_water/low_water in kilo bytes for ixgbe
4738          */
4739         max_high_water = (rx_buf_size - ETHER_MAX_LEN) >> IXGBE_RXPBSIZE_SHIFT;
4740         if ((pfc_conf->fc.high_water > max_high_water) ||
4741             (pfc_conf->fc.high_water <= pfc_conf->fc.low_water)) {
4742                 PMD_INIT_LOG(ERR, "Invalid high/low water setup value in KB");
4743                 PMD_INIT_LOG(ERR, "High_water must <= 0x%x", max_high_water);
4744                 return -EINVAL;
4745         }
4746
4747         hw->fc.requested_mode = rte_fcmode_2_ixgbe_fcmode[pfc_conf->fc.mode];
4748         hw->fc.pause_time = pfc_conf->fc.pause_time;
4749         hw->fc.send_xon = pfc_conf->fc.send_xon;
4750         hw->fc.low_water[tc_num] =  pfc_conf->fc.low_water;
4751         hw->fc.high_water[tc_num] = pfc_conf->fc.high_water;
4752
4753         err = ixgbe_dcb_pfc_enable(dev, tc_num);
4754
4755         /* Not negotiated is not an error case */
4756         if ((err == IXGBE_SUCCESS) || (err == IXGBE_ERR_FC_NOT_NEGOTIATED))
4757                 return 0;
4758
4759         PMD_INIT_LOG(ERR, "ixgbe_dcb_pfc_enable = 0x%x", err);
4760         return -EIO;
4761 }
4762
4763 static int
4764 ixgbe_dev_rss_reta_update(struct rte_eth_dev *dev,
4765                           struct rte_eth_rss_reta_entry64 *reta_conf,
4766                           uint16_t reta_size)
4767 {
4768         uint16_t i, sp_reta_size;
4769         uint8_t j, mask;
4770         uint32_t reta, r;
4771         uint16_t idx, shift;
4772         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4773         uint32_t reta_reg;
4774
4775         PMD_INIT_FUNC_TRACE();
4776
4777         if (!ixgbe_rss_update_sp(hw->mac.type)) {
4778                 PMD_DRV_LOG(ERR, "RSS reta update is not supported on this "
4779                         "NIC.");
4780                 return -ENOTSUP;
4781         }
4782
4783         sp_reta_size = ixgbe_reta_size_get(hw->mac.type);
4784         if (reta_size != sp_reta_size) {
4785                 PMD_DRV_LOG(ERR, "The size of hash lookup table configured "
4786                         "(%d) doesn't match the number hardware can supported "
4787                         "(%d)", reta_size, sp_reta_size);
4788                 return -EINVAL;
4789         }
4790
4791         for (i = 0; i < reta_size; i += IXGBE_4_BIT_WIDTH) {
4792                 idx = i / RTE_RETA_GROUP_SIZE;
4793                 shift = i % RTE_RETA_GROUP_SIZE;
4794                 mask = (uint8_t)((reta_conf[idx].mask >> shift) &
4795                                                 IXGBE_4_BIT_MASK);
4796                 if (!mask)
4797                         continue;
4798                 reta_reg = ixgbe_reta_reg_get(hw->mac.type, i);
4799                 if (mask == IXGBE_4_BIT_MASK)
4800                         r = 0;
4801                 else
4802                         r = IXGBE_READ_REG(hw, reta_reg);
4803                 for (j = 0, reta = 0; j < IXGBE_4_BIT_WIDTH; j++) {
4804                         if (mask & (0x1 << j))
4805                                 reta |= reta_conf[idx].reta[shift + j] <<
4806                                                         (CHAR_BIT * j);
4807                         else
4808                                 reta |= r & (IXGBE_8_BIT_MASK <<
4809                                                 (CHAR_BIT * j));
4810                 }
4811                 IXGBE_WRITE_REG(hw, reta_reg, reta);
4812         }
4813
4814         return 0;
4815 }
4816
4817 static int
4818 ixgbe_dev_rss_reta_query(struct rte_eth_dev *dev,
4819                          struct rte_eth_rss_reta_entry64 *reta_conf,
4820                          uint16_t reta_size)
4821 {
4822         uint16_t i, sp_reta_size;
4823         uint8_t j, mask;
4824         uint32_t reta;
4825         uint16_t idx, shift;
4826         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4827         uint32_t reta_reg;
4828
4829         PMD_INIT_FUNC_TRACE();
4830         sp_reta_size = ixgbe_reta_size_get(hw->mac.type);
4831         if (reta_size != sp_reta_size) {
4832                 PMD_DRV_LOG(ERR, "The size of hash lookup table configured "
4833                         "(%d) doesn't match the number hardware can supported "
4834                         "(%d)", reta_size, sp_reta_size);
4835                 return -EINVAL;
4836         }
4837
4838         for (i = 0; i < reta_size; i += IXGBE_4_BIT_WIDTH) {
4839                 idx = i / RTE_RETA_GROUP_SIZE;
4840                 shift = i % RTE_RETA_GROUP_SIZE;
4841                 mask = (uint8_t)((reta_conf[idx].mask >> shift) &
4842                                                 IXGBE_4_BIT_MASK);
4843                 if (!mask)
4844                         continue;
4845
4846                 reta_reg = ixgbe_reta_reg_get(hw->mac.type, i);
4847                 reta = IXGBE_READ_REG(hw, reta_reg);
4848                 for (j = 0; j < IXGBE_4_BIT_WIDTH; j++) {
4849                         if (mask & (0x1 << j))
4850                                 reta_conf[idx].reta[shift + j] =
4851                                         ((reta >> (CHAR_BIT * j)) &
4852                                                 IXGBE_8_BIT_MASK);
4853                 }
4854         }
4855
4856         return 0;
4857 }
4858
4859 static int
4860 ixgbe_add_rar(struct rte_eth_dev *dev, struct ether_addr *mac_addr,
4861                                 uint32_t index, uint32_t pool)
4862 {
4863         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4864         uint32_t enable_addr = 1;
4865
4866         return ixgbe_set_rar(hw, index, mac_addr->addr_bytes,
4867                              pool, enable_addr);
4868 }
4869
4870 static void
4871 ixgbe_remove_rar(struct rte_eth_dev *dev, uint32_t index)
4872 {
4873         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4874
4875         ixgbe_clear_rar(hw, index);
4876 }
4877
4878 static void
4879 ixgbe_set_default_mac_addr(struct rte_eth_dev *dev, struct ether_addr *addr)
4880 {
4881         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
4882
4883         ixgbe_remove_rar(dev, 0);
4884
4885         ixgbe_add_rar(dev, addr, 0, pci_dev->max_vfs);
4886 }
4887
4888 static bool
4889 is_device_supported(struct rte_eth_dev *dev, struct rte_pci_driver *drv)
4890 {
4891         if (strcmp(dev->device->driver->name, drv->driver.name))
4892                 return false;
4893
4894         return true;
4895 }
4896
4897 bool
4898 is_ixgbe_supported(struct rte_eth_dev *dev)
4899 {
4900         return is_device_supported(dev, &rte_ixgbe_pmd);
4901 }
4902
4903 static int
4904 ixgbe_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
4905 {
4906         uint32_t hlreg0;
4907         uint32_t maxfrs;
4908         struct ixgbe_hw *hw;
4909         struct rte_eth_dev_info dev_info;
4910         uint32_t frame_size = mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
4911         struct rte_eth_dev_data *dev_data = dev->data;
4912
4913         ixgbe_dev_info_get(dev, &dev_info);
4914
4915         /* check that mtu is within the allowed range */
4916         if ((mtu < ETHER_MIN_MTU) || (frame_size > dev_info.max_rx_pktlen))
4917                 return -EINVAL;
4918
4919         /* If device is started, refuse mtu that requires the support of
4920          * scattered packets when this feature has not been enabled before.
4921          */
4922         if (dev_data->dev_started && !dev_data->scattered_rx &&
4923             (frame_size + 2 * IXGBE_VLAN_TAG_SIZE >
4924              dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM)) {
4925                 PMD_INIT_LOG(ERR, "Stop port first.");
4926                 return -EINVAL;
4927         }
4928
4929         hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4930         hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0);
4931
4932         /* switch to jumbo mode if needed */
4933         if (frame_size > ETHER_MAX_LEN) {
4934                 dev->data->dev_conf.rxmode.jumbo_frame = 1;
4935                 hlreg0 |= IXGBE_HLREG0_JUMBOEN;
4936         } else {
4937                 dev->data->dev_conf.rxmode.jumbo_frame = 0;
4938                 hlreg0 &= ~IXGBE_HLREG0_JUMBOEN;
4939         }
4940         IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0);
4941
4942         /* update max frame size */
4943         dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
4944
4945         maxfrs = IXGBE_READ_REG(hw, IXGBE_MAXFRS);
4946         maxfrs &= 0x0000FFFF;
4947         maxfrs |= (dev->data->dev_conf.rxmode.max_rx_pkt_len << 16);
4948         IXGBE_WRITE_REG(hw, IXGBE_MAXFRS, maxfrs);
4949
4950         return 0;
4951 }
4952
4953 /*
4954  * Virtual Function operations
4955  */
4956 static void
4957 ixgbevf_intr_disable(struct ixgbe_hw *hw)
4958 {
4959         PMD_INIT_FUNC_TRACE();
4960
4961         /* Clear interrupt mask to stop from interrupts being generated */
4962         IXGBE_WRITE_REG(hw, IXGBE_VTEIMC, IXGBE_VF_IRQ_CLEAR_MASK);
4963
4964         IXGBE_WRITE_FLUSH(hw);
4965 }
4966
4967 static void
4968 ixgbevf_intr_enable(struct ixgbe_hw *hw)
4969 {
4970         PMD_INIT_FUNC_TRACE();
4971
4972         /* VF enable interrupt autoclean */
4973         IXGBE_WRITE_REG(hw, IXGBE_VTEIAM, IXGBE_VF_IRQ_ENABLE_MASK);
4974         IXGBE_WRITE_REG(hw, IXGBE_VTEIAC, IXGBE_VF_IRQ_ENABLE_MASK);
4975         IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, IXGBE_VF_IRQ_ENABLE_MASK);
4976
4977         IXGBE_WRITE_FLUSH(hw);
4978 }
4979
4980 static int
4981 ixgbevf_dev_configure(struct rte_eth_dev *dev)
4982 {
4983         struct rte_eth_conf *conf = &dev->data->dev_conf;
4984         struct ixgbe_adapter *adapter =
4985                         (struct ixgbe_adapter *)dev->data->dev_private;
4986
4987         PMD_INIT_LOG(DEBUG, "Configured Virtual Function port id: %d",
4988                      dev->data->port_id);
4989
4990         /*
4991          * VF has no ability to enable/disable HW CRC
4992          * Keep the persistent behavior the same as Host PF
4993          */
4994 #ifndef RTE_LIBRTE_IXGBE_PF_DISABLE_STRIP_CRC
4995         if (!conf->rxmode.hw_strip_crc) {
4996                 PMD_INIT_LOG(NOTICE, "VF can't disable HW CRC Strip");
4997                 conf->rxmode.hw_strip_crc = 1;
4998         }
4999 #else
5000         if (conf->rxmode.hw_strip_crc) {
5001                 PMD_INIT_LOG(NOTICE, "VF can't enable HW CRC Strip");
5002                 conf->rxmode.hw_strip_crc = 0;
5003         }
5004 #endif
5005
5006         /*
5007          * Initialize to TRUE. If any of Rx queues doesn't meet the bulk
5008          * allocation or vector Rx preconditions we will reset it.
5009          */
5010         adapter->rx_bulk_alloc_allowed = true;
5011         adapter->rx_vec_allowed = true;
5012
5013         return 0;
5014 }
5015
5016 static int
5017 ixgbevf_dev_start(struct rte_eth_dev *dev)
5018 {
5019         struct ixgbe_hw *hw =
5020                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5021         uint32_t intr_vector = 0;
5022         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
5023         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
5024
5025         int err, mask = 0;
5026
5027         PMD_INIT_FUNC_TRACE();
5028
5029         hw->mac.ops.reset_hw(hw);
5030         hw->mac.get_link_status = true;
5031
5032         /* negotiate mailbox API version to use with the PF. */
5033         ixgbevf_negotiate_api(hw);
5034
5035         ixgbevf_dev_tx_init(dev);
5036
5037         /* This can fail when allocating mbufs for descriptor rings */
5038         err = ixgbevf_dev_rx_init(dev);
5039         if (err) {
5040                 PMD_INIT_LOG(ERR, "Unable to initialize RX hardware (%d)", err);
5041                 ixgbe_dev_clear_queues(dev);
5042                 return err;
5043         }
5044
5045         /* Set vfta */
5046         ixgbevf_set_vfta_all(dev, 1);
5047
5048         /* Set HW strip */
5049         mask = ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK |
5050                 ETH_VLAN_EXTEND_MASK;
5051         err = ixgbevf_vlan_offload_set(dev, mask);
5052         if (err) {
5053                 PMD_INIT_LOG(ERR, "Unable to set VLAN offload (%d)", err);
5054                 ixgbe_dev_clear_queues(dev);
5055                 return err;
5056         }
5057
5058         ixgbevf_dev_rxtx_start(dev);
5059
5060         /* check and configure queue intr-vector mapping */
5061         if (dev->data->dev_conf.intr_conf.rxq != 0) {
5062                 /* According to datasheet, only vector 0/1/2 can be used,
5063                  * now only one vector is used for Rx queue
5064                  */
5065                 intr_vector = 1;
5066                 if (rte_intr_efd_enable(intr_handle, intr_vector))
5067                         return -1;
5068         }
5069
5070         if (rte_intr_dp_is_en(intr_handle) && !intr_handle->intr_vec) {
5071                 intr_handle->intr_vec =
5072                         rte_zmalloc("intr_vec",
5073                                     dev->data->nb_rx_queues * sizeof(int), 0);
5074                 if (intr_handle->intr_vec == NULL) {
5075                         PMD_INIT_LOG(ERR, "Failed to allocate %d rx_queues"
5076                                      " intr_vec", dev->data->nb_rx_queues);
5077                         return -ENOMEM;
5078                 }
5079         }
5080         ixgbevf_configure_msix(dev);
5081
5082         /* When a VF port is bound to VFIO-PCI, only miscellaneous interrupt
5083          * is mapped to VFIO vector 0 in eth_ixgbevf_dev_init( ).
5084          * If previous VFIO interrupt mapping setting in eth_ixgbevf_dev_init( )
5085          * is not cleared, it will fail when following rte_intr_enable( ) tries
5086          * to map Rx queue interrupt to other VFIO vectors.
5087          * So clear uio/vfio intr/evevnfd first to avoid failure.
5088          */
5089         rte_intr_disable(intr_handle);
5090
5091         rte_intr_enable(intr_handle);
5092
5093         /* Re-enable interrupt for VF */
5094         ixgbevf_intr_enable(hw);
5095
5096         return 0;
5097 }
5098
5099 static void
5100 ixgbevf_dev_stop(struct rte_eth_dev *dev)
5101 {
5102         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5103         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
5104         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
5105
5106         PMD_INIT_FUNC_TRACE();
5107
5108         ixgbevf_intr_disable(hw);
5109
5110         hw->adapter_stopped = 1;
5111         ixgbe_stop_adapter(hw);
5112
5113         /*
5114           * Clear what we set, but we still keep shadow_vfta to
5115           * restore after device starts
5116           */
5117         ixgbevf_set_vfta_all(dev, 0);
5118
5119         /* Clear stored conf */
5120         dev->data->scattered_rx = 0;
5121
5122         ixgbe_dev_clear_queues(dev);
5123
5124         /* Clean datapath event and queue/vec mapping */
5125         rte_intr_efd_disable(intr_handle);
5126         if (intr_handle->intr_vec != NULL) {
5127                 rte_free(intr_handle->intr_vec);
5128                 intr_handle->intr_vec = NULL;
5129         }
5130 }
5131
5132 static void
5133 ixgbevf_dev_close(struct rte_eth_dev *dev)
5134 {
5135         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5136
5137         PMD_INIT_FUNC_TRACE();
5138
5139         ixgbe_reset_hw(hw);
5140
5141         ixgbevf_dev_stop(dev);
5142
5143         ixgbe_dev_free_queues(dev);
5144
5145         /**
5146          * Remove the VF MAC address ro ensure
5147          * that the VF traffic goes to the PF
5148          * after stop, close and detach of the VF
5149          **/
5150         ixgbevf_remove_mac_addr(dev, 0);
5151 }
5152
5153 /*
5154  * Reset VF device
5155  */
5156 static int
5157 ixgbevf_dev_reset(struct rte_eth_dev *dev)
5158 {
5159         int ret;
5160
5161         ret = eth_ixgbevf_dev_uninit(dev);
5162         if (ret)
5163                 return ret;
5164
5165         ret = eth_ixgbevf_dev_init(dev);
5166
5167         return ret;
5168 }
5169
5170 static void ixgbevf_set_vfta_all(struct rte_eth_dev *dev, bool on)
5171 {
5172         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5173         struct ixgbe_vfta *shadow_vfta =
5174                 IXGBE_DEV_PRIVATE_TO_VFTA(dev->data->dev_private);
5175         int i = 0, j = 0, vfta = 0, mask = 1;
5176
5177         for (i = 0; i < IXGBE_VFTA_SIZE; i++) {
5178                 vfta = shadow_vfta->vfta[i];
5179                 if (vfta) {
5180                         mask = 1;
5181                         for (j = 0; j < 32; j++) {
5182                                 if (vfta & mask)
5183                                         ixgbe_set_vfta(hw, (i<<5)+j, 0,
5184                                                        on, false);
5185                                 mask <<= 1;
5186                         }
5187                 }
5188         }
5189
5190 }
5191
5192 static int
5193 ixgbevf_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
5194 {
5195         struct ixgbe_hw *hw =
5196                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5197         struct ixgbe_vfta *shadow_vfta =
5198                 IXGBE_DEV_PRIVATE_TO_VFTA(dev->data->dev_private);
5199         uint32_t vid_idx = 0;
5200         uint32_t vid_bit = 0;
5201         int ret = 0;
5202
5203         PMD_INIT_FUNC_TRACE();
5204
5205         /* vind is not used in VF driver, set to 0, check ixgbe_set_vfta_vf */
5206         ret = ixgbe_set_vfta(hw, vlan_id, 0, !!on, false);
5207         if (ret) {
5208                 PMD_INIT_LOG(ERR, "Unable to set VF vlan");
5209                 return ret;
5210         }
5211         vid_idx = (uint32_t) ((vlan_id >> 5) & 0x7F);
5212         vid_bit = (uint32_t) (1 << (vlan_id & 0x1F));
5213
5214         /* Save what we set and retore it after device reset */
5215         if (on)
5216                 shadow_vfta->vfta[vid_idx] |= vid_bit;
5217         else
5218                 shadow_vfta->vfta[vid_idx] &= ~vid_bit;
5219
5220         return 0;
5221 }
5222
5223 static void
5224 ixgbevf_vlan_strip_queue_set(struct rte_eth_dev *dev, uint16_t queue, int on)
5225 {
5226         struct ixgbe_hw *hw =
5227                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5228         uint32_t ctrl;
5229
5230         PMD_INIT_FUNC_TRACE();
5231
5232         if (queue >= hw->mac.max_rx_queues)
5233                 return;
5234
5235         ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(queue));
5236         if (on)
5237                 ctrl |= IXGBE_RXDCTL_VME;
5238         else
5239                 ctrl &= ~IXGBE_RXDCTL_VME;
5240         IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(queue), ctrl);
5241
5242         ixgbe_vlan_hw_strip_bitmap_set(dev, queue, on);
5243 }
5244
5245 static int
5246 ixgbevf_vlan_offload_set(struct rte_eth_dev *dev, int mask)
5247 {
5248         struct ixgbe_hw *hw =
5249                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5250         uint16_t i;
5251         int on = 0;
5252
5253         /* VF function only support hw strip feature, others are not support */
5254         if (mask & ETH_VLAN_STRIP_MASK) {
5255                 on = !!(dev->data->dev_conf.rxmode.hw_vlan_strip);
5256
5257                 for (i = 0; i < hw->mac.max_rx_queues; i++)
5258                         ixgbevf_vlan_strip_queue_set(dev, i, on);
5259         }
5260
5261         return 0;
5262 }
5263
5264 int
5265 ixgbe_vt_check(struct ixgbe_hw *hw)
5266 {
5267         uint32_t reg_val;
5268
5269         /* if Virtualization Technology is enabled */
5270         reg_val = IXGBE_READ_REG(hw, IXGBE_VT_CTL);
5271         if (!(reg_val & IXGBE_VT_CTL_VT_ENABLE)) {
5272                 PMD_INIT_LOG(ERR, "VT must be enabled for this setting");
5273                 return -1;
5274         }
5275
5276         return 0;
5277 }
5278
5279 static uint32_t
5280 ixgbe_uta_vector(struct ixgbe_hw *hw, struct ether_addr *uc_addr)
5281 {
5282         uint32_t vector = 0;
5283
5284         switch (hw->mac.mc_filter_type) {
5285         case 0:   /* use bits [47:36] of the address */
5286                 vector = ((uc_addr->addr_bytes[4] >> 4) |
5287                         (((uint16_t)uc_addr->addr_bytes[5]) << 4));
5288                 break;
5289         case 1:   /* use bits [46:35] of the address */
5290                 vector = ((uc_addr->addr_bytes[4] >> 3) |
5291                         (((uint16_t)uc_addr->addr_bytes[5]) << 5));
5292                 break;
5293         case 2:   /* use bits [45:34] of the address */
5294                 vector = ((uc_addr->addr_bytes[4] >> 2) |
5295                         (((uint16_t)uc_addr->addr_bytes[5]) << 6));
5296                 break;
5297         case 3:   /* use bits [43:32] of the address */
5298                 vector = ((uc_addr->addr_bytes[4]) |
5299                         (((uint16_t)uc_addr->addr_bytes[5]) << 8));
5300                 break;
5301         default:  /* Invalid mc_filter_type */
5302                 break;
5303         }
5304
5305         /* vector can only be 12-bits or boundary will be exceeded */
5306         vector &= 0xFFF;
5307         return vector;
5308 }
5309
5310 static int
5311 ixgbe_uc_hash_table_set(struct rte_eth_dev *dev, struct ether_addr *mac_addr,
5312                         uint8_t on)
5313 {
5314         uint32_t vector;
5315         uint32_t uta_idx;
5316         uint32_t reg_val;
5317         uint32_t uta_shift;
5318         uint32_t rc;
5319         const uint32_t ixgbe_uta_idx_mask = 0x7F;
5320         const uint32_t ixgbe_uta_bit_shift = 5;
5321         const uint32_t ixgbe_uta_bit_mask = (0x1 << ixgbe_uta_bit_shift) - 1;
5322         const uint32_t bit1 = 0x1;
5323
5324         struct ixgbe_hw *hw =
5325                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5326         struct ixgbe_uta_info *uta_info =
5327                 IXGBE_DEV_PRIVATE_TO_UTA(dev->data->dev_private);
5328
5329         /* The UTA table only exists on 82599 hardware and newer */
5330         if (hw->mac.type < ixgbe_mac_82599EB)
5331                 return -ENOTSUP;
5332
5333         vector = ixgbe_uta_vector(hw, mac_addr);
5334         uta_idx = (vector >> ixgbe_uta_bit_shift) & ixgbe_uta_idx_mask;
5335         uta_shift = vector & ixgbe_uta_bit_mask;
5336
5337         rc = ((uta_info->uta_shadow[uta_idx] >> uta_shift & bit1) != 0);
5338         if (rc == on)
5339                 return 0;
5340
5341         reg_val = IXGBE_READ_REG(hw, IXGBE_UTA(uta_idx));
5342         if (on) {
5343                 uta_info->uta_in_use++;
5344                 reg_val |= (bit1 << uta_shift);
5345                 uta_info->uta_shadow[uta_idx] |= (bit1 << uta_shift);
5346         } else {
5347                 uta_info->uta_in_use--;
5348                 reg_val &= ~(bit1 << uta_shift);
5349                 uta_info->uta_shadow[uta_idx] &= ~(bit1 << uta_shift);
5350         }
5351
5352         IXGBE_WRITE_REG(hw, IXGBE_UTA(uta_idx), reg_val);
5353
5354         if (uta_info->uta_in_use > 0)
5355                 IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL,
5356                                 IXGBE_MCSTCTRL_MFE | hw->mac.mc_filter_type);
5357         else
5358                 IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, hw->mac.mc_filter_type);
5359
5360         return 0;
5361 }
5362
5363 static int
5364 ixgbe_uc_all_hash_table_set(struct rte_eth_dev *dev, uint8_t on)
5365 {
5366         int i;
5367         struct ixgbe_hw *hw =
5368                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5369         struct ixgbe_uta_info *uta_info =
5370                 IXGBE_DEV_PRIVATE_TO_UTA(dev->data->dev_private);
5371
5372         /* The UTA table only exists on 82599 hardware and newer */
5373         if (hw->mac.type < ixgbe_mac_82599EB)
5374                 return -ENOTSUP;
5375
5376         if (on) {
5377                 for (i = 0; i < ETH_VMDQ_NUM_UC_HASH_ARRAY; i++) {
5378                         uta_info->uta_shadow[i] = ~0;
5379                         IXGBE_WRITE_REG(hw, IXGBE_UTA(i), ~0);
5380                 }
5381         } else {
5382                 for (i = 0; i < ETH_VMDQ_NUM_UC_HASH_ARRAY; i++) {
5383                         uta_info->uta_shadow[i] = 0;
5384                         IXGBE_WRITE_REG(hw, IXGBE_UTA(i), 0);
5385                 }
5386         }
5387         return 0;
5388
5389 }
5390
5391 uint32_t
5392 ixgbe_convert_vm_rx_mask_to_val(uint16_t rx_mask, uint32_t orig_val)
5393 {
5394         uint32_t new_val = orig_val;
5395
5396         if (rx_mask & ETH_VMDQ_ACCEPT_UNTAG)
5397                 new_val |= IXGBE_VMOLR_AUPE;
5398         if (rx_mask & ETH_VMDQ_ACCEPT_HASH_MC)
5399                 new_val |= IXGBE_VMOLR_ROMPE;
5400         if (rx_mask & ETH_VMDQ_ACCEPT_HASH_UC)
5401                 new_val |= IXGBE_VMOLR_ROPE;
5402         if (rx_mask & ETH_VMDQ_ACCEPT_BROADCAST)
5403                 new_val |= IXGBE_VMOLR_BAM;
5404         if (rx_mask & ETH_VMDQ_ACCEPT_MULTICAST)
5405                 new_val |= IXGBE_VMOLR_MPE;
5406
5407         return new_val;
5408 }
5409
5410 #define IXGBE_MRCTL_VPME  0x01 /* Virtual Pool Mirroring. */
5411 #define IXGBE_MRCTL_UPME  0x02 /* Uplink Port Mirroring. */
5412 #define IXGBE_MRCTL_DPME  0x04 /* Downlink Port Mirroring. */
5413 #define IXGBE_MRCTL_VLME  0x08 /* VLAN Mirroring. */
5414 #define IXGBE_INVALID_MIRROR_TYPE(mirror_type) \
5415         ((mirror_type) & ~(uint8_t)(ETH_MIRROR_VIRTUAL_POOL_UP | \
5416         ETH_MIRROR_UPLINK_PORT | ETH_MIRROR_DOWNLINK_PORT | ETH_MIRROR_VLAN))
5417
5418 static int
5419 ixgbe_mirror_rule_set(struct rte_eth_dev *dev,
5420                       struct rte_eth_mirror_conf *mirror_conf,
5421                       uint8_t rule_id, uint8_t on)
5422 {
5423         uint32_t mr_ctl, vlvf;
5424         uint32_t mp_lsb = 0;
5425         uint32_t mv_msb = 0;
5426         uint32_t mv_lsb = 0;
5427         uint32_t mp_msb = 0;
5428         uint8_t i = 0;
5429         int reg_index = 0;
5430         uint64_t vlan_mask = 0;
5431
5432         const uint8_t pool_mask_offset = 32;
5433         const uint8_t vlan_mask_offset = 32;
5434         const uint8_t dst_pool_offset = 8;
5435         const uint8_t rule_mr_offset  = 4;
5436         const uint8_t mirror_rule_mask = 0x0F;
5437
5438         struct ixgbe_mirror_info *mr_info =
5439                         (IXGBE_DEV_PRIVATE_TO_PFDATA(dev->data->dev_private));
5440         struct ixgbe_hw *hw =
5441                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5442         uint8_t mirror_type = 0;
5443
5444         if (ixgbe_vt_check(hw) < 0)
5445                 return -ENOTSUP;
5446
5447         if (rule_id >= IXGBE_MAX_MIRROR_RULES)
5448                 return -EINVAL;
5449
5450         if (IXGBE_INVALID_MIRROR_TYPE(mirror_conf->rule_type)) {
5451                 PMD_DRV_LOG(ERR, "unsupported mirror type 0x%x.",
5452                             mirror_conf->rule_type);
5453                 return -EINVAL;
5454         }
5455
5456         if (mirror_conf->rule_type & ETH_MIRROR_VLAN) {
5457                 mirror_type |= IXGBE_MRCTL_VLME;
5458                 /* Check if vlan id is valid and find conresponding VLAN ID
5459                  * index in VLVF
5460                  */
5461                 for (i = 0; i < IXGBE_VLVF_ENTRIES; i++) {
5462                         if (mirror_conf->vlan.vlan_mask & (1ULL << i)) {
5463                                 /* search vlan id related pool vlan filter
5464                                  * index
5465                                  */
5466                                 reg_index = ixgbe_find_vlvf_slot(
5467                                                 hw,
5468                                                 mirror_conf->vlan.vlan_id[i],
5469                                                 false);
5470                                 if (reg_index < 0)
5471                                         return -EINVAL;
5472                                 vlvf = IXGBE_READ_REG(hw,
5473                                                       IXGBE_VLVF(reg_index));
5474                                 if ((vlvf & IXGBE_VLVF_VIEN) &&
5475                                     ((vlvf & IXGBE_VLVF_VLANID_MASK) ==
5476                                       mirror_conf->vlan.vlan_id[i]))
5477                                         vlan_mask |= (1ULL << reg_index);
5478                                 else
5479                                         return -EINVAL;
5480                         }
5481                 }
5482
5483                 if (on) {
5484                         mv_lsb = vlan_mask & 0xFFFFFFFF;
5485                         mv_msb = vlan_mask >> vlan_mask_offset;
5486
5487                         mr_info->mr_conf[rule_id].vlan.vlan_mask =
5488                                                 mirror_conf->vlan.vlan_mask;
5489                         for (i = 0; i < ETH_VMDQ_MAX_VLAN_FILTERS; i++) {
5490                                 if (mirror_conf->vlan.vlan_mask & (1ULL << i))
5491                                         mr_info->mr_conf[rule_id].vlan.vlan_id[i] =
5492                                                 mirror_conf->vlan.vlan_id[i];
5493                         }
5494                 } else {
5495                         mv_lsb = 0;
5496                         mv_msb = 0;
5497                         mr_info->mr_conf[rule_id].vlan.vlan_mask = 0;
5498                         for (i = 0; i < ETH_VMDQ_MAX_VLAN_FILTERS; i++)
5499                                 mr_info->mr_conf[rule_id].vlan.vlan_id[i] = 0;
5500                 }
5501         }
5502
5503         /**
5504          * if enable pool mirror, write related pool mask register,if disable
5505          * pool mirror, clear PFMRVM register
5506          */
5507         if (mirror_conf->rule_type & ETH_MIRROR_VIRTUAL_POOL_UP) {
5508                 mirror_type |= IXGBE_MRCTL_VPME;
5509                 if (on) {
5510                         mp_lsb = mirror_conf->pool_mask & 0xFFFFFFFF;
5511                         mp_msb = mirror_conf->pool_mask >> pool_mask_offset;
5512                         mr_info->mr_conf[rule_id].pool_mask =
5513                                         mirror_conf->pool_mask;
5514
5515                 } else {
5516                         mp_lsb = 0;
5517                         mp_msb = 0;
5518                         mr_info->mr_conf[rule_id].pool_mask = 0;
5519                 }
5520         }
5521         if (mirror_conf->rule_type & ETH_MIRROR_UPLINK_PORT)
5522                 mirror_type |= IXGBE_MRCTL_UPME;
5523         if (mirror_conf->rule_type & ETH_MIRROR_DOWNLINK_PORT)
5524                 mirror_type |= IXGBE_MRCTL_DPME;
5525
5526         /* read  mirror control register and recalculate it */
5527         mr_ctl = IXGBE_READ_REG(hw, IXGBE_MRCTL(rule_id));
5528
5529         if (on) {
5530                 mr_ctl |= mirror_type;
5531                 mr_ctl &= mirror_rule_mask;
5532                 mr_ctl |= mirror_conf->dst_pool << dst_pool_offset;
5533         } else {
5534                 mr_ctl &= ~(mirror_conf->rule_type & mirror_rule_mask);
5535         }
5536
5537         mr_info->mr_conf[rule_id].rule_type = mirror_conf->rule_type;
5538         mr_info->mr_conf[rule_id].dst_pool = mirror_conf->dst_pool;
5539
5540         /* write mirrror control  register */
5541         IXGBE_WRITE_REG(hw, IXGBE_MRCTL(rule_id), mr_ctl);
5542
5543         /* write pool mirrror control  register */
5544         if (mirror_conf->rule_type & ETH_MIRROR_VIRTUAL_POOL_UP) {
5545                 IXGBE_WRITE_REG(hw, IXGBE_VMRVM(rule_id), mp_lsb);
5546                 IXGBE_WRITE_REG(hw, IXGBE_VMRVM(rule_id + rule_mr_offset),
5547                                 mp_msb);
5548         }
5549         /* write VLAN mirrror control  register */
5550         if (mirror_conf->rule_type & ETH_MIRROR_VLAN) {
5551                 IXGBE_WRITE_REG(hw, IXGBE_VMRVLAN(rule_id), mv_lsb);
5552                 IXGBE_WRITE_REG(hw, IXGBE_VMRVLAN(rule_id + rule_mr_offset),
5553                                 mv_msb);
5554         }
5555
5556         return 0;
5557 }
5558
5559 static int
5560 ixgbe_mirror_rule_reset(struct rte_eth_dev *dev, uint8_t rule_id)
5561 {
5562         int mr_ctl = 0;
5563         uint32_t lsb_val = 0;
5564         uint32_t msb_val = 0;
5565         const uint8_t rule_mr_offset = 4;
5566
5567         struct ixgbe_hw *hw =
5568                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5569         struct ixgbe_mirror_info *mr_info =
5570                 (IXGBE_DEV_PRIVATE_TO_PFDATA(dev->data->dev_private));
5571
5572         if (ixgbe_vt_check(hw) < 0)
5573                 return -ENOTSUP;
5574
5575         if (rule_id >= IXGBE_MAX_MIRROR_RULES)
5576                 return -EINVAL;
5577
5578         memset(&mr_info->mr_conf[rule_id], 0,
5579                sizeof(struct rte_eth_mirror_conf));
5580
5581         /* clear PFVMCTL register */
5582         IXGBE_WRITE_REG(hw, IXGBE_MRCTL(rule_id), mr_ctl);
5583
5584         /* clear pool mask register */
5585         IXGBE_WRITE_REG(hw, IXGBE_VMRVM(rule_id), lsb_val);
5586         IXGBE_WRITE_REG(hw, IXGBE_VMRVM(rule_id + rule_mr_offset), msb_val);
5587
5588         /* clear vlan mask register */
5589         IXGBE_WRITE_REG(hw, IXGBE_VMRVLAN(rule_id), lsb_val);
5590         IXGBE_WRITE_REG(hw, IXGBE_VMRVLAN(rule_id + rule_mr_offset), msb_val);
5591
5592         return 0;
5593 }
5594
5595 static int
5596 ixgbevf_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id)
5597 {
5598         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
5599         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
5600         uint32_t mask;
5601         struct ixgbe_hw *hw =
5602                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5603         uint32_t vec = IXGBE_MISC_VEC_ID;
5604
5605         mask = IXGBE_READ_REG(hw, IXGBE_VTEIMS);
5606         if (rte_intr_allow_others(intr_handle))
5607                 vec = IXGBE_RX_VEC_START;
5608         mask |= (1 << vec);
5609         RTE_SET_USED(queue_id);
5610         IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, mask);
5611
5612         rte_intr_enable(intr_handle);
5613
5614         return 0;
5615 }
5616
5617 static int
5618 ixgbevf_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id)
5619 {
5620         uint32_t mask;
5621         struct ixgbe_hw *hw =
5622                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5623         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
5624         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
5625         uint32_t vec = IXGBE_MISC_VEC_ID;
5626
5627         mask = IXGBE_READ_REG(hw, IXGBE_VTEIMS);
5628         if (rte_intr_allow_others(intr_handle))
5629                 vec = IXGBE_RX_VEC_START;
5630         mask &= ~(1 << vec);
5631         RTE_SET_USED(queue_id);
5632         IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, mask);
5633
5634         return 0;
5635 }
5636
5637 static int
5638 ixgbe_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id)
5639 {
5640         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
5641         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
5642         uint32_t mask;
5643         struct ixgbe_hw *hw =
5644                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5645         struct ixgbe_interrupt *intr =
5646                 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
5647
5648         if (queue_id < 16) {
5649                 ixgbe_disable_intr(hw);
5650                 intr->mask |= (1 << queue_id);
5651                 ixgbe_enable_intr(dev);
5652         } else if (queue_id < 32) {
5653                 mask = IXGBE_READ_REG(hw, IXGBE_EIMS_EX(0));
5654                 mask &= (1 << queue_id);
5655                 IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(0), mask);
5656         } else if (queue_id < 64) {
5657                 mask = IXGBE_READ_REG(hw, IXGBE_EIMS_EX(1));
5658                 mask &= (1 << (queue_id - 32));
5659                 IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(1), mask);
5660         }
5661         rte_intr_enable(intr_handle);
5662
5663         return 0;
5664 }
5665
5666 static int
5667 ixgbe_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id)
5668 {
5669         uint32_t mask;
5670         struct ixgbe_hw *hw =
5671                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5672         struct ixgbe_interrupt *intr =
5673                 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
5674
5675         if (queue_id < 16) {
5676                 ixgbe_disable_intr(hw);
5677                 intr->mask &= ~(1 << queue_id);
5678                 ixgbe_enable_intr(dev);
5679         } else if (queue_id < 32) {
5680                 mask = IXGBE_READ_REG(hw, IXGBE_EIMS_EX(0));
5681                 mask &= ~(1 << queue_id);
5682                 IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(0), mask);
5683         } else if (queue_id < 64) {
5684                 mask = IXGBE_READ_REG(hw, IXGBE_EIMS_EX(1));
5685                 mask &= ~(1 << (queue_id - 32));
5686                 IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(1), mask);
5687         }
5688
5689         return 0;
5690 }
5691
5692 static void
5693 ixgbevf_set_ivar_map(struct ixgbe_hw *hw, int8_t direction,
5694                      uint8_t queue, uint8_t msix_vector)
5695 {
5696         uint32_t tmp, idx;
5697
5698         if (direction == -1) {
5699                 /* other causes */
5700                 msix_vector |= IXGBE_IVAR_ALLOC_VAL;
5701                 tmp = IXGBE_READ_REG(hw, IXGBE_VTIVAR_MISC);
5702                 tmp &= ~0xFF;
5703                 tmp |= msix_vector;
5704                 IXGBE_WRITE_REG(hw, IXGBE_VTIVAR_MISC, tmp);
5705         } else {
5706                 /* rx or tx cause */
5707                 msix_vector |= IXGBE_IVAR_ALLOC_VAL;
5708                 idx = ((16 * (queue & 1)) + (8 * direction));
5709                 tmp = IXGBE_READ_REG(hw, IXGBE_VTIVAR(queue >> 1));
5710                 tmp &= ~(0xFF << idx);
5711                 tmp |= (msix_vector << idx);
5712                 IXGBE_WRITE_REG(hw, IXGBE_VTIVAR(queue >> 1), tmp);
5713         }
5714 }
5715
5716 /**
5717  * set the IVAR registers, mapping interrupt causes to vectors
5718  * @param hw
5719  *  pointer to ixgbe_hw struct
5720  * @direction
5721  *  0 for Rx, 1 for Tx, -1 for other causes
5722  * @queue
5723  *  queue to map the corresponding interrupt to
5724  * @msix_vector
5725  *  the vector to map to the corresponding queue
5726  */
5727 static void
5728 ixgbe_set_ivar_map(struct ixgbe_hw *hw, int8_t direction,
5729                    uint8_t queue, uint8_t msix_vector)
5730 {
5731         uint32_t tmp, idx;
5732
5733         msix_vector |= IXGBE_IVAR_ALLOC_VAL;
5734         if (hw->mac.type == ixgbe_mac_82598EB) {
5735                 if (direction == -1)
5736                         direction = 0;
5737                 idx = (((direction * 64) + queue) >> 2) & 0x1F;
5738                 tmp = IXGBE_READ_REG(hw, IXGBE_IVAR(idx));
5739                 tmp &= ~(0xFF << (8 * (queue & 0x3)));
5740                 tmp |= (msix_vector << (8 * (queue & 0x3)));
5741                 IXGBE_WRITE_REG(hw, IXGBE_IVAR(idx), tmp);
5742         } else if ((hw->mac.type == ixgbe_mac_82599EB) ||
5743                         (hw->mac.type == ixgbe_mac_X540) ||
5744                         (hw->mac.type == ixgbe_mac_X550)) {
5745                 if (direction == -1) {
5746                         /* other causes */
5747                         idx = ((queue & 1) * 8);
5748                         tmp = IXGBE_READ_REG(hw, IXGBE_IVAR_MISC);
5749                         tmp &= ~(0xFF << idx);
5750                         tmp |= (msix_vector << idx);
5751                         IXGBE_WRITE_REG(hw, IXGBE_IVAR_MISC, tmp);
5752                 } else {
5753                         /* rx or tx causes */
5754                         idx = ((16 * (queue & 1)) + (8 * direction));
5755                         tmp = IXGBE_READ_REG(hw, IXGBE_IVAR(queue >> 1));
5756                         tmp &= ~(0xFF << idx);
5757                         tmp |= (msix_vector << idx);
5758                         IXGBE_WRITE_REG(hw, IXGBE_IVAR(queue >> 1), tmp);
5759                 }
5760         }
5761 }
5762
5763 static void
5764 ixgbevf_configure_msix(struct rte_eth_dev *dev)
5765 {
5766         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
5767         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
5768         struct ixgbe_hw *hw =
5769                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5770         uint32_t q_idx;
5771         uint32_t vector_idx = IXGBE_MISC_VEC_ID;
5772         uint32_t base = IXGBE_MISC_VEC_ID;
5773
5774         /* Configure VF other cause ivar */
5775         ixgbevf_set_ivar_map(hw, -1, 1, vector_idx);
5776
5777         /* won't configure msix register if no mapping is done
5778          * between intr vector and event fd.
5779          */
5780         if (!rte_intr_dp_is_en(intr_handle))
5781                 return;
5782
5783         if (rte_intr_allow_others(intr_handle)) {
5784                 base = IXGBE_RX_VEC_START;
5785                 vector_idx = IXGBE_RX_VEC_START;
5786         }
5787
5788         /* Configure all RX queues of VF */
5789         for (q_idx = 0; q_idx < dev->data->nb_rx_queues; q_idx++) {
5790                 /* Force all queue use vector 0,
5791                  * as IXGBE_VF_MAXMSIVECOTR = 1
5792                  */
5793                 ixgbevf_set_ivar_map(hw, 0, q_idx, vector_idx);
5794                 intr_handle->intr_vec[q_idx] = vector_idx;
5795                 if (vector_idx < base + intr_handle->nb_efd - 1)
5796                         vector_idx++;
5797         }
5798 }
5799
5800 /**
5801  * Sets up the hardware to properly generate MSI-X interrupts
5802  * @hw
5803  *  board private structure
5804  */
5805 static void
5806 ixgbe_configure_msix(struct rte_eth_dev *dev)
5807 {
5808         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
5809         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
5810         struct ixgbe_hw *hw =
5811                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5812         uint32_t queue_id, base = IXGBE_MISC_VEC_ID;
5813         uint32_t vec = IXGBE_MISC_VEC_ID;
5814         uint32_t mask;
5815         uint32_t gpie;
5816
5817         /* won't configure msix register if no mapping is done
5818          * between intr vector and event fd
5819          */
5820         if (!rte_intr_dp_is_en(intr_handle))
5821                 return;
5822
5823         if (rte_intr_allow_others(intr_handle))
5824                 vec = base = IXGBE_RX_VEC_START;
5825
5826         /* setup GPIE for MSI-x mode */
5827         gpie = IXGBE_READ_REG(hw, IXGBE_GPIE);
5828         gpie |= IXGBE_GPIE_MSIX_MODE | IXGBE_GPIE_PBA_SUPPORT |
5829                 IXGBE_GPIE_OCD | IXGBE_GPIE_EIAME;
5830         /* auto clearing and auto setting corresponding bits in EIMS
5831          * when MSI-X interrupt is triggered
5832          */
5833         if (hw->mac.type == ixgbe_mac_82598EB) {
5834                 IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
5835         } else {
5836                 IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(0), 0xFFFFFFFF);
5837                 IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(1), 0xFFFFFFFF);
5838         }
5839         IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
5840
5841         /* Populate the IVAR table and set the ITR values to the
5842          * corresponding register.
5843          */
5844         for (queue_id = 0; queue_id < dev->data->nb_rx_queues;
5845              queue_id++) {
5846                 /* by default, 1:1 mapping */
5847                 ixgbe_set_ivar_map(hw, 0, queue_id, vec);
5848                 intr_handle->intr_vec[queue_id] = vec;
5849                 if (vec < base + intr_handle->nb_efd - 1)
5850                         vec++;
5851         }
5852
5853         switch (hw->mac.type) {
5854         case ixgbe_mac_82598EB:
5855                 ixgbe_set_ivar_map(hw, -1, IXGBE_IVAR_OTHER_CAUSES_INDEX,
5856                                    IXGBE_MISC_VEC_ID);
5857                 break;
5858         case ixgbe_mac_82599EB:
5859         case ixgbe_mac_X540:
5860         case ixgbe_mac_X550:
5861                 ixgbe_set_ivar_map(hw, -1, 1, IXGBE_MISC_VEC_ID);
5862                 break;
5863         default:
5864                 break;
5865         }
5866         IXGBE_WRITE_REG(hw, IXGBE_EITR(IXGBE_MISC_VEC_ID),
5867                         IXGBE_MIN_INTER_INTERRUPT_INTERVAL_DEFAULT & 0xFFF);
5868
5869         /* set up to autoclear timer, and the vectors */
5870         mask = IXGBE_EIMS_ENABLE_MASK;
5871         mask &= ~(IXGBE_EIMS_OTHER |
5872                   IXGBE_EIMS_MAILBOX |
5873                   IXGBE_EIMS_LSC);
5874
5875         IXGBE_WRITE_REG(hw, IXGBE_EIAC, mask);
5876 }
5877
5878 int
5879 ixgbe_set_queue_rate_limit(struct rte_eth_dev *dev,
5880                            uint16_t queue_idx, uint16_t tx_rate)
5881 {
5882         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5883         uint32_t rf_dec, rf_int;
5884         uint32_t bcnrc_val;
5885         uint16_t link_speed = dev->data->dev_link.link_speed;
5886
5887         if (queue_idx >= hw->mac.max_tx_queues)
5888                 return -EINVAL;
5889
5890         if (tx_rate != 0) {
5891                 /* Calculate the rate factor values to set */
5892                 rf_int = (uint32_t)link_speed / (uint32_t)tx_rate;
5893                 rf_dec = (uint32_t)link_speed % (uint32_t)tx_rate;
5894                 rf_dec = (rf_dec << IXGBE_RTTBCNRC_RF_INT_SHIFT) / tx_rate;
5895
5896                 bcnrc_val = IXGBE_RTTBCNRC_RS_ENA;
5897                 bcnrc_val |= ((rf_int << IXGBE_RTTBCNRC_RF_INT_SHIFT) &
5898                                 IXGBE_RTTBCNRC_RF_INT_MASK_M);
5899                 bcnrc_val |= (rf_dec & IXGBE_RTTBCNRC_RF_DEC_MASK);
5900         } else {
5901                 bcnrc_val = 0;
5902         }
5903
5904         /*
5905          * Set global transmit compensation time to the MMW_SIZE in RTTBCNRM
5906          * register. MMW_SIZE=0x014 if 9728-byte jumbo is supported, otherwise
5907          * set as 0x4.
5908          */
5909         if ((dev->data->dev_conf.rxmode.jumbo_frame == 1) &&
5910                 (dev->data->dev_conf.rxmode.max_rx_pkt_len >=
5911                                 IXGBE_MAX_JUMBO_FRAME_SIZE))
5912                 IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRM,
5913                         IXGBE_MMW_SIZE_JUMBO_FRAME);
5914         else
5915                 IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRM,
5916                         IXGBE_MMW_SIZE_DEFAULT);
5917
5918         /* Set RTTBCNRC of queue X */
5919         IXGBE_WRITE_REG(hw, IXGBE_RTTDQSEL, queue_idx);
5920         IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRC, bcnrc_val);
5921         IXGBE_WRITE_FLUSH(hw);
5922
5923         return 0;
5924 }
5925
5926 static int
5927 ixgbevf_add_mac_addr(struct rte_eth_dev *dev, struct ether_addr *mac_addr,
5928                      __attribute__((unused)) uint32_t index,
5929                      __attribute__((unused)) uint32_t pool)
5930 {
5931         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5932         int diag;
5933
5934         /*
5935          * On a 82599 VF, adding again the same MAC addr is not an idempotent
5936          * operation. Trap this case to avoid exhausting the [very limited]
5937          * set of PF resources used to store VF MAC addresses.
5938          */
5939         if (memcmp(hw->mac.perm_addr, mac_addr, sizeof(struct ether_addr)) == 0)
5940                 return -1;
5941         diag = ixgbevf_set_uc_addr_vf(hw, 2, mac_addr->addr_bytes);
5942         if (diag != 0)
5943                 PMD_DRV_LOG(ERR, "Unable to add MAC address "
5944                             "%02x:%02x:%02x:%02x:%02x:%02x - diag=%d",
5945                             mac_addr->addr_bytes[0],
5946                             mac_addr->addr_bytes[1],
5947                             mac_addr->addr_bytes[2],
5948                             mac_addr->addr_bytes[3],
5949                             mac_addr->addr_bytes[4],
5950                             mac_addr->addr_bytes[5],
5951                             diag);
5952         return diag;
5953 }
5954
5955 static void
5956 ixgbevf_remove_mac_addr(struct rte_eth_dev *dev, uint32_t index)
5957 {
5958         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5959         struct ether_addr *perm_addr = (struct ether_addr *) hw->mac.perm_addr;
5960         struct ether_addr *mac_addr;
5961         uint32_t i;
5962         int diag;
5963
5964         /*
5965          * The IXGBE_VF_SET_MACVLAN command of the ixgbe-pf driver does
5966          * not support the deletion of a given MAC address.
5967          * Instead, it imposes to delete all MAC addresses, then to add again
5968          * all MAC addresses with the exception of the one to be deleted.
5969          */
5970         (void) ixgbevf_set_uc_addr_vf(hw, 0, NULL);
5971
5972         /*
5973          * Add again all MAC addresses, with the exception of the deleted one
5974          * and of the permanent MAC address.
5975          */
5976         for (i = 0, mac_addr = dev->data->mac_addrs;
5977              i < hw->mac.num_rar_entries; i++, mac_addr++) {
5978                 /* Skip the deleted MAC address */
5979                 if (i == index)
5980                         continue;
5981                 /* Skip NULL MAC addresses */
5982                 if (is_zero_ether_addr(mac_addr))
5983                         continue;
5984                 /* Skip the permanent MAC address */
5985                 if (memcmp(perm_addr, mac_addr, sizeof(struct ether_addr)) == 0)
5986                         continue;
5987                 diag = ixgbevf_set_uc_addr_vf(hw, 2, mac_addr->addr_bytes);
5988                 if (diag != 0)
5989                         PMD_DRV_LOG(ERR,
5990                                     "Adding again MAC address "
5991                                     "%02x:%02x:%02x:%02x:%02x:%02x failed "
5992                                     "diag=%d",
5993                                     mac_addr->addr_bytes[0],
5994                                     mac_addr->addr_bytes[1],
5995                                     mac_addr->addr_bytes[2],
5996                                     mac_addr->addr_bytes[3],
5997                                     mac_addr->addr_bytes[4],
5998                                     mac_addr->addr_bytes[5],
5999                                     diag);
6000         }
6001 }
6002
6003 static void
6004 ixgbevf_set_default_mac_addr(struct rte_eth_dev *dev, struct ether_addr *addr)
6005 {
6006         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
6007
6008         hw->mac.ops.set_rar(hw, 0, (void *)addr, 0, 0);
6009 }
6010
6011 int
6012 ixgbe_syn_filter_set(struct rte_eth_dev *dev,
6013                         struct rte_eth_syn_filter *filter,
6014                         bool add)
6015 {
6016         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
6017         struct ixgbe_filter_info *filter_info =
6018                 IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
6019         uint32_t syn_info;
6020         uint32_t synqf;
6021
6022         if (filter->queue >= IXGBE_MAX_RX_QUEUE_NUM)
6023                 return -EINVAL;
6024
6025         syn_info = filter_info->syn_info;
6026
6027         if (add) {
6028                 if (syn_info & IXGBE_SYN_FILTER_ENABLE)
6029                         return -EINVAL;
6030                 synqf = (uint32_t)(((filter->queue << IXGBE_SYN_FILTER_QUEUE_SHIFT) &
6031                         IXGBE_SYN_FILTER_QUEUE) | IXGBE_SYN_FILTER_ENABLE);
6032
6033                 if (filter->hig_pri)
6034                         synqf |= IXGBE_SYN_FILTER_SYNQFP;
6035                 else
6036                         synqf &= ~IXGBE_SYN_FILTER_SYNQFP;
6037         } else {
6038                 synqf = IXGBE_READ_REG(hw, IXGBE_SYNQF);
6039                 if (!(syn_info & IXGBE_SYN_FILTER_ENABLE))
6040                         return -ENOENT;
6041                 synqf &= ~(IXGBE_SYN_FILTER_QUEUE | IXGBE_SYN_FILTER_ENABLE);
6042         }
6043
6044         filter_info->syn_info = synqf;
6045         IXGBE_WRITE_REG(hw, IXGBE_SYNQF, synqf);
6046         IXGBE_WRITE_FLUSH(hw);
6047         return 0;
6048 }
6049
6050 static int
6051 ixgbe_syn_filter_get(struct rte_eth_dev *dev,
6052                         struct rte_eth_syn_filter *filter)
6053 {
6054         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
6055         uint32_t synqf = IXGBE_READ_REG(hw, IXGBE_SYNQF);
6056
6057         if (synqf & IXGBE_SYN_FILTER_ENABLE) {
6058                 filter->hig_pri = (synqf & IXGBE_SYN_FILTER_SYNQFP) ? 1 : 0;
6059                 filter->queue = (uint16_t)((synqf & IXGBE_SYN_FILTER_QUEUE) >> 1);
6060                 return 0;
6061         }
6062         return -ENOENT;
6063 }
6064
6065 static int
6066 ixgbe_syn_filter_handle(struct rte_eth_dev *dev,
6067                         enum rte_filter_op filter_op,
6068                         void *arg)
6069 {
6070         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
6071         int ret;
6072
6073         MAC_TYPE_FILTER_SUP(hw->mac.type);
6074
6075         if (filter_op == RTE_ETH_FILTER_NOP)
6076                 return 0;
6077
6078         if (arg == NULL) {
6079                 PMD_DRV_LOG(ERR, "arg shouldn't be NULL for operation %u",
6080                             filter_op);
6081                 return -EINVAL;
6082         }
6083
6084         switch (filter_op) {
6085         case RTE_ETH_FILTER_ADD:
6086                 ret = ixgbe_syn_filter_set(dev,
6087                                 (struct rte_eth_syn_filter *)arg,
6088                                 TRUE);
6089                 break;
6090         case RTE_ETH_FILTER_DELETE:
6091                 ret = ixgbe_syn_filter_set(dev,
6092                                 (struct rte_eth_syn_filter *)arg,
6093                                 FALSE);
6094                 break;
6095         case RTE_ETH_FILTER_GET:
6096                 ret = ixgbe_syn_filter_get(dev,
6097                                 (struct rte_eth_syn_filter *)arg);
6098                 break;
6099         default:
6100                 PMD_DRV_LOG(ERR, "unsupported operation %u", filter_op);
6101                 ret = -EINVAL;
6102                 break;
6103         }
6104
6105         return ret;
6106 }
6107
6108
6109 static inline enum ixgbe_5tuple_protocol
6110 convert_protocol_type(uint8_t protocol_value)
6111 {
6112         if (protocol_value == IPPROTO_TCP)
6113                 return IXGBE_FILTER_PROTOCOL_TCP;
6114         else if (protocol_value == IPPROTO_UDP)
6115                 return IXGBE_FILTER_PROTOCOL_UDP;
6116         else if (protocol_value == IPPROTO_SCTP)
6117                 return IXGBE_FILTER_PROTOCOL_SCTP;
6118         else
6119                 return IXGBE_FILTER_PROTOCOL_NONE;
6120 }
6121
6122 /* inject a 5-tuple filter to HW */
6123 static inline void
6124 ixgbe_inject_5tuple_filter(struct rte_eth_dev *dev,
6125                            struct ixgbe_5tuple_filter *filter)
6126 {
6127         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
6128         int i;
6129         uint32_t ftqf, sdpqf;
6130         uint32_t l34timir = 0;
6131         uint8_t mask = 0xff;
6132
6133         i = filter->index;
6134
6135         sdpqf = (uint32_t)(filter->filter_info.dst_port <<
6136                                 IXGBE_SDPQF_DSTPORT_SHIFT);
6137         sdpqf = sdpqf | (filter->filter_info.src_port & IXGBE_SDPQF_SRCPORT);
6138
6139         ftqf = (uint32_t)(filter->filter_info.proto &
6140                 IXGBE_FTQF_PROTOCOL_MASK);
6141         ftqf |= (uint32_t)((filter->filter_info.priority &
6142                 IXGBE_FTQF_PRIORITY_MASK) << IXGBE_FTQF_PRIORITY_SHIFT);
6143         if (filter->filter_info.src_ip_mask == 0) /* 0 means compare. */
6144                 mask &= IXGBE_FTQF_SOURCE_ADDR_MASK;
6145         if (filter->filter_info.dst_ip_mask == 0)
6146                 mask &= IXGBE_FTQF_DEST_ADDR_MASK;
6147         if (filter->filter_info.src_port_mask == 0)
6148                 mask &= IXGBE_FTQF_SOURCE_PORT_MASK;
6149         if (filter->filter_info.dst_port_mask == 0)
6150                 mask &= IXGBE_FTQF_DEST_PORT_MASK;
6151         if (filter->filter_info.proto_mask == 0)
6152                 mask &= IXGBE_FTQF_PROTOCOL_COMP_MASK;
6153         ftqf |= mask << IXGBE_FTQF_5TUPLE_MASK_SHIFT;
6154         ftqf |= IXGBE_FTQF_POOL_MASK_EN;
6155         ftqf |= IXGBE_FTQF_QUEUE_ENABLE;
6156
6157         IXGBE_WRITE_REG(hw, IXGBE_DAQF(i), filter->filter_info.dst_ip);
6158         IXGBE_WRITE_REG(hw, IXGBE_SAQF(i), filter->filter_info.src_ip);
6159         IXGBE_WRITE_REG(hw, IXGBE_SDPQF(i), sdpqf);
6160         IXGBE_WRITE_REG(hw, IXGBE_FTQF(i), ftqf);
6161
6162         l34timir |= IXGBE_L34T_IMIR_RESERVE;
6163         l34timir |= (uint32_t)(filter->queue <<
6164                                 IXGBE_L34T_IMIR_QUEUE_SHIFT);
6165         IXGBE_WRITE_REG(hw, IXGBE_L34T_IMIR(i), l34timir);
6166 }
6167
6168 /*
6169  * add a 5tuple filter
6170  *
6171  * @param
6172  * dev: Pointer to struct rte_eth_dev.
6173  * index: the index the filter allocates.
6174  * filter: ponter to the filter that will be added.
6175  * rx_queue: the queue id the filter assigned to.
6176  *
6177  * @return
6178  *    - On success, zero.
6179  *    - On failure, a negative value.
6180  */
6181 static int
6182 ixgbe_add_5tuple_filter(struct rte_eth_dev *dev,
6183                         struct ixgbe_5tuple_filter *filter)
6184 {
6185         struct ixgbe_filter_info *filter_info =
6186                 IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
6187         int i, idx, shift;
6188
6189         /*
6190          * look for an unused 5tuple filter index,
6191          * and insert the filter to list.
6192          */
6193         for (i = 0; i < IXGBE_MAX_FTQF_FILTERS; i++) {
6194                 idx = i / (sizeof(uint32_t) * NBBY);
6195                 shift = i % (sizeof(uint32_t) * NBBY);
6196                 if (!(filter_info->fivetuple_mask[idx] & (1 << shift))) {
6197                         filter_info->fivetuple_mask[idx] |= 1 << shift;
6198                         filter->index = i;
6199                         TAILQ_INSERT_TAIL(&filter_info->fivetuple_list,
6200                                           filter,
6201                                           entries);
6202                         break;
6203                 }
6204         }
6205         if (i >= IXGBE_MAX_FTQF_FILTERS) {
6206                 PMD_DRV_LOG(ERR, "5tuple filters are full.");
6207                 return -ENOSYS;
6208         }
6209
6210         ixgbe_inject_5tuple_filter(dev, filter);
6211
6212         return 0;
6213 }
6214
6215 /*
6216  * remove a 5tuple filter
6217  *
6218  * @param
6219  * dev: Pointer to struct rte_eth_dev.
6220  * filter: the pointer of the filter will be removed.
6221  */
6222 static void
6223 ixgbe_remove_5tuple_filter(struct rte_eth_dev *dev,
6224                         struct ixgbe_5tuple_filter *filter)
6225 {
6226         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
6227         struct ixgbe_filter_info *filter_info =
6228                 IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
6229         uint16_t index = filter->index;
6230
6231         filter_info->fivetuple_mask[index / (sizeof(uint32_t) * NBBY)] &=
6232                                 ~(1 << (index % (sizeof(uint32_t) * NBBY)));
6233         TAILQ_REMOVE(&filter_info->fivetuple_list, filter, entries);
6234         rte_free(filter);
6235
6236         IXGBE_WRITE_REG(hw, IXGBE_DAQF(index), 0);
6237         IXGBE_WRITE_REG(hw, IXGBE_SAQF(index), 0);
6238         IXGBE_WRITE_REG(hw, IXGBE_SDPQF(index), 0);
6239         IXGBE_WRITE_REG(hw, IXGBE_FTQF(index), 0);
6240         IXGBE_WRITE_REG(hw, IXGBE_L34T_IMIR(index), 0);
6241 }
6242
6243 static int
6244 ixgbevf_dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu)
6245 {
6246         struct ixgbe_hw *hw;
6247         uint32_t max_frame = mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
6248         struct rte_eth_rxmode *rx_conf = &dev->data->dev_conf.rxmode;
6249
6250         hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
6251
6252         if ((mtu < ETHER_MIN_MTU) || (max_frame > ETHER_MAX_JUMBO_FRAME_LEN))
6253                 return -EINVAL;
6254
6255         /* refuse mtu that requires the support of scattered packets when this
6256          * feature has not been enabled before.
6257          */
6258         if (!rx_conf->enable_scatter &&
6259             (max_frame + 2 * IXGBE_VLAN_TAG_SIZE >
6260              dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM))
6261                 return -EINVAL;
6262
6263         /*
6264          * When supported by the underlying PF driver, use the IXGBE_VF_SET_MTU
6265          * request of the version 2.0 of the mailbox API.
6266          * For now, use the IXGBE_VF_SET_LPE request of the version 1.0
6267          * of the mailbox API.
6268          * This call to IXGBE_SET_LPE action won't work with ixgbe pf drivers
6269          * prior to 3.11.33 which contains the following change:
6270          * "ixgbe: Enable jumbo frames support w/ SR-IOV"
6271          */
6272         ixgbevf_rlpml_set_vf(hw, max_frame);
6273
6274         /* update max frame size */
6275         dev->data->dev_conf.rxmode.max_rx_pkt_len = max_frame;
6276         return 0;
6277 }
6278
6279 static inline struct ixgbe_5tuple_filter *
6280 ixgbe_5tuple_filter_lookup(struct ixgbe_5tuple_filter_list *filter_list,
6281                         struct ixgbe_5tuple_filter_info *key)
6282 {
6283         struct ixgbe_5tuple_filter *it;
6284
6285         TAILQ_FOREACH(it, filter_list, entries) {
6286                 if (memcmp(key, &it->filter_info,
6287                         sizeof(struct ixgbe_5tuple_filter_info)) == 0) {
6288                         return it;
6289                 }
6290         }
6291         return NULL;
6292 }
6293
6294 /* translate elements in struct rte_eth_ntuple_filter to struct ixgbe_5tuple_filter_info*/
6295 static inline int
6296 ntuple_filter_to_5tuple(struct rte_eth_ntuple_filter *filter,
6297                         struct ixgbe_5tuple_filter_info *filter_info)
6298 {
6299         if (filter->queue >= IXGBE_MAX_RX_QUEUE_NUM ||
6300                 filter->priority > IXGBE_5TUPLE_MAX_PRI ||
6301                 filter->priority < IXGBE_5TUPLE_MIN_PRI)
6302                 return -EINVAL;
6303
6304         switch (filter->dst_ip_mask) {
6305         case UINT32_MAX:
6306                 filter_info->dst_ip_mask = 0;
6307                 filter_info->dst_ip = filter->dst_ip;
6308                 break;
6309         case 0:
6310                 filter_info->dst_ip_mask = 1;
6311                 break;
6312         default:
6313                 PMD_DRV_LOG(ERR, "invalid dst_ip mask.");
6314                 return -EINVAL;
6315         }
6316
6317         switch (filter->src_ip_mask) {
6318         case UINT32_MAX:
6319                 filter_info->src_ip_mask = 0;
6320                 filter_info->src_ip = filter->src_ip;
6321                 break;
6322         case 0:
6323                 filter_info->src_ip_mask = 1;
6324                 break;
6325         default:
6326                 PMD_DRV_LOG(ERR, "invalid src_ip mask.");
6327                 return -EINVAL;
6328         }
6329
6330         switch (filter->dst_port_mask) {
6331         case UINT16_MAX:
6332                 filter_info->dst_port_mask = 0;
6333                 filter_info->dst_port = filter->dst_port;
6334                 break;
6335         case 0:
6336                 filter_info->dst_port_mask = 1;
6337                 break;
6338         default:
6339                 PMD_DRV_LOG(ERR, "invalid dst_port mask.");
6340                 return -EINVAL;
6341         }
6342
6343         switch (filter->src_port_mask) {
6344         case UINT16_MAX:
6345                 filter_info->src_port_mask = 0;
6346                 filter_info->src_port = filter->src_port;
6347                 break;
6348         case 0:
6349                 filter_info->src_port_mask = 1;
6350                 break;
6351         default:
6352                 PMD_DRV_LOG(ERR, "invalid src_port mask.");
6353                 return -EINVAL;
6354         }
6355
6356         switch (filter->proto_mask) {
6357         case UINT8_MAX:
6358                 filter_info->proto_mask = 0;
6359                 filter_info->proto =
6360                         convert_protocol_type(filter->proto);
6361                 break;
6362         case 0:
6363                 filter_info->proto_mask = 1;
6364                 break;
6365         default:
6366                 PMD_DRV_LOG(ERR, "invalid protocol mask.");
6367                 return -EINVAL;
6368         }
6369
6370         filter_info->priority = (uint8_t)filter->priority;
6371         return 0;
6372 }
6373
6374 /*
6375  * add or delete a ntuple filter
6376  *
6377  * @param
6378  * dev: Pointer to struct rte_eth_dev.
6379  * ntuple_filter: Pointer to struct rte_eth_ntuple_filter
6380  * add: if true, add filter, if false, remove filter
6381  *
6382  * @return
6383  *    - On success, zero.
6384  *    - On failure, a negative value.
6385  */
6386 int
6387 ixgbe_add_del_ntuple_filter(struct rte_eth_dev *dev,
6388                         struct rte_eth_ntuple_filter *ntuple_filter,
6389                         bool add)
6390 {
6391         struct ixgbe_filter_info *filter_info =
6392                 IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
6393         struct ixgbe_5tuple_filter_info filter_5tuple;
6394         struct ixgbe_5tuple_filter *filter;
6395         int ret;
6396
6397         if (ntuple_filter->flags != RTE_5TUPLE_FLAGS) {
6398                 PMD_DRV_LOG(ERR, "only 5tuple is supported.");
6399                 return -EINVAL;
6400         }
6401
6402         memset(&filter_5tuple, 0, sizeof(struct ixgbe_5tuple_filter_info));
6403         ret = ntuple_filter_to_5tuple(ntuple_filter, &filter_5tuple);
6404         if (ret < 0)
6405                 return ret;
6406
6407         filter = ixgbe_5tuple_filter_lookup(&filter_info->fivetuple_list,
6408                                          &filter_5tuple);
6409         if (filter != NULL && add) {
6410                 PMD_DRV_LOG(ERR, "filter exists.");
6411                 return -EEXIST;
6412         }
6413         if (filter == NULL && !add) {
6414                 PMD_DRV_LOG(ERR, "filter doesn't exist.");
6415                 return -ENOENT;
6416         }
6417
6418         if (add) {
6419                 filter = rte_zmalloc("ixgbe_5tuple_filter",
6420                                 sizeof(struct ixgbe_5tuple_filter), 0);
6421                 if (filter == NULL)
6422                         return -ENOMEM;
6423                 rte_memcpy(&filter->filter_info,
6424                                  &filter_5tuple,
6425                                  sizeof(struct ixgbe_5tuple_filter_info));
6426                 filter->queue = ntuple_filter->queue;
6427                 ret = ixgbe_add_5tuple_filter(dev, filter);
6428                 if (ret < 0) {
6429                         rte_free(filter);
6430                         return ret;
6431                 }
6432         } else
6433                 ixgbe_remove_5tuple_filter(dev, filter);
6434
6435         return 0;
6436 }
6437
6438 /*
6439  * get a ntuple filter
6440  *
6441  * @param
6442  * dev: Pointer to struct rte_eth_dev.
6443  * ntuple_filter: Pointer to struct rte_eth_ntuple_filter
6444  *
6445  * @return
6446  *    - On success, zero.
6447  *    - On failure, a negative value.
6448  */
6449 static int
6450 ixgbe_get_ntuple_filter(struct rte_eth_dev *dev,
6451                         struct rte_eth_ntuple_filter *ntuple_filter)
6452 {
6453         struct ixgbe_filter_info *filter_info =
6454                 IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
6455         struct ixgbe_5tuple_filter_info filter_5tuple;
6456         struct ixgbe_5tuple_filter *filter;
6457         int ret;
6458
6459         if (ntuple_filter->flags != RTE_5TUPLE_FLAGS) {
6460                 PMD_DRV_LOG(ERR, "only 5tuple is supported.");
6461                 return -EINVAL;
6462         }
6463
6464         memset(&filter_5tuple, 0, sizeof(struct ixgbe_5tuple_filter_info));
6465         ret = ntuple_filter_to_5tuple(ntuple_filter, &filter_5tuple);
6466         if (ret < 0)
6467                 return ret;
6468
6469         filter = ixgbe_5tuple_filter_lookup(&filter_info->fivetuple_list,
6470                                          &filter_5tuple);
6471         if (filter == NULL) {
6472                 PMD_DRV_LOG(ERR, "filter doesn't exist.");
6473                 return -ENOENT;
6474         }
6475         ntuple_filter->queue = filter->queue;
6476         return 0;
6477 }
6478
6479 /*
6480  * ixgbe_ntuple_filter_handle - Handle operations for ntuple filter.
6481  * @dev: pointer to rte_eth_dev structure
6482  * @filter_op:operation will be taken.
6483  * @arg: a pointer to specific structure corresponding to the filter_op
6484  *
6485  * @return
6486  *    - On success, zero.
6487  *    - On failure, a negative value.
6488  */
6489 static int
6490 ixgbe_ntuple_filter_handle(struct rte_eth_dev *dev,
6491                                 enum rte_filter_op filter_op,
6492                                 void *arg)
6493 {
6494         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
6495         int ret;
6496
6497         MAC_TYPE_FILTER_SUP_EXT(hw->mac.type);
6498
6499         if (filter_op == RTE_ETH_FILTER_NOP)
6500                 return 0;
6501
6502         if (arg == NULL) {
6503                 PMD_DRV_LOG(ERR, "arg shouldn't be NULL for operation %u.",
6504                             filter_op);
6505                 return -EINVAL;
6506         }
6507
6508         switch (filter_op) {
6509         case RTE_ETH_FILTER_ADD:
6510                 ret = ixgbe_add_del_ntuple_filter(dev,
6511                         (struct rte_eth_ntuple_filter *)arg,
6512                         TRUE);
6513                 break;
6514         case RTE_ETH_FILTER_DELETE:
6515                 ret = ixgbe_add_del_ntuple_filter(dev,
6516                         (struct rte_eth_ntuple_filter *)arg,
6517                         FALSE);
6518                 break;
6519         case RTE_ETH_FILTER_GET:
6520                 ret = ixgbe_get_ntuple_filter(dev,
6521                         (struct rte_eth_ntuple_filter *)arg);
6522                 break;
6523         default:
6524                 PMD_DRV_LOG(ERR, "unsupported operation %u.", filter_op);
6525                 ret = -EINVAL;
6526                 break;
6527         }
6528         return ret;
6529 }
6530
6531 int
6532 ixgbe_add_del_ethertype_filter(struct rte_eth_dev *dev,
6533                         struct rte_eth_ethertype_filter *filter,
6534                         bool add)
6535 {
6536         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
6537         struct ixgbe_filter_info *filter_info =
6538                 IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
6539         uint32_t etqf = 0;
6540         uint32_t etqs = 0;
6541         int ret;
6542         struct ixgbe_ethertype_filter ethertype_filter;
6543
6544         if (filter->queue >= IXGBE_MAX_RX_QUEUE_NUM)
6545                 return -EINVAL;
6546
6547         if (filter->ether_type == ETHER_TYPE_IPv4 ||
6548                 filter->ether_type == ETHER_TYPE_IPv6) {
6549                 PMD_DRV_LOG(ERR, "unsupported ether_type(0x%04x) in"
6550                         " ethertype filter.", filter->ether_type);
6551                 return -EINVAL;
6552         }
6553
6554         if (filter->flags & RTE_ETHTYPE_FLAGS_MAC) {
6555                 PMD_DRV_LOG(ERR, "mac compare is unsupported.");
6556                 return -EINVAL;
6557         }
6558         if (filter->flags & RTE_ETHTYPE_FLAGS_DROP) {
6559                 PMD_DRV_LOG(ERR, "drop option is unsupported.");
6560                 return -EINVAL;
6561         }
6562
6563         ret = ixgbe_ethertype_filter_lookup(filter_info, filter->ether_type);
6564         if (ret >= 0 && add) {
6565                 PMD_DRV_LOG(ERR, "ethertype (0x%04x) filter exists.",
6566                             filter->ether_type);
6567                 return -EEXIST;
6568         }
6569         if (ret < 0 && !add) {
6570                 PMD_DRV_LOG(ERR, "ethertype (0x%04x) filter doesn't exist.",
6571                             filter->ether_type);
6572                 return -ENOENT;
6573         }
6574
6575         if (add) {
6576                 etqf = IXGBE_ETQF_FILTER_EN;
6577                 etqf |= (uint32_t)filter->ether_type;
6578                 etqs |= (uint32_t)((filter->queue <<
6579                                     IXGBE_ETQS_RX_QUEUE_SHIFT) &
6580                                     IXGBE_ETQS_RX_QUEUE);
6581                 etqs |= IXGBE_ETQS_QUEUE_EN;
6582
6583                 ethertype_filter.ethertype = filter->ether_type;
6584                 ethertype_filter.etqf = etqf;
6585                 ethertype_filter.etqs = etqs;
6586                 ethertype_filter.conf = FALSE;
6587                 ret = ixgbe_ethertype_filter_insert(filter_info,
6588                                                     &ethertype_filter);
6589                 if (ret < 0) {
6590                         PMD_DRV_LOG(ERR, "ethertype filters are full.");
6591                         return -ENOSPC;
6592                 }
6593         } else {
6594                 ret = ixgbe_ethertype_filter_remove(filter_info, (uint8_t)ret);
6595                 if (ret < 0)
6596                         return -ENOSYS;
6597         }
6598         IXGBE_WRITE_REG(hw, IXGBE_ETQF(ret), etqf);
6599         IXGBE_WRITE_REG(hw, IXGBE_ETQS(ret), etqs);
6600         IXGBE_WRITE_FLUSH(hw);
6601
6602         return 0;
6603 }
6604
6605 static int
6606 ixgbe_get_ethertype_filter(struct rte_eth_dev *dev,
6607                         struct rte_eth_ethertype_filter *filter)
6608 {
6609         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
6610         struct ixgbe_filter_info *filter_info =
6611                 IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
6612         uint32_t etqf, etqs;
6613         int ret;
6614
6615         ret = ixgbe_ethertype_filter_lookup(filter_info, filter->ether_type);
6616         if (ret < 0) {
6617                 PMD_DRV_LOG(ERR, "ethertype (0x%04x) filter doesn't exist.",
6618                             filter->ether_type);
6619                 return -ENOENT;
6620         }
6621
6622         etqf = IXGBE_READ_REG(hw, IXGBE_ETQF(ret));
6623         if (etqf & IXGBE_ETQF_FILTER_EN) {
6624                 etqs = IXGBE_READ_REG(hw, IXGBE_ETQS(ret));
6625                 filter->ether_type = etqf & IXGBE_ETQF_ETHERTYPE;
6626                 filter->flags = 0;
6627                 filter->queue = (etqs & IXGBE_ETQS_RX_QUEUE) >>
6628                                IXGBE_ETQS_RX_QUEUE_SHIFT;
6629                 return 0;
6630         }
6631         return -ENOENT;
6632 }
6633
6634 /*
6635  * ixgbe_ethertype_filter_handle - Handle operations for ethertype filter.
6636  * @dev: pointer to rte_eth_dev structure
6637  * @filter_op:operation will be taken.
6638  * @arg: a pointer to specific structure corresponding to the filter_op
6639  */
6640 static int
6641 ixgbe_ethertype_filter_handle(struct rte_eth_dev *dev,
6642                                 enum rte_filter_op filter_op,
6643                                 void *arg)
6644 {
6645         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
6646         int ret;
6647
6648         MAC_TYPE_FILTER_SUP(hw->mac.type);
6649
6650         if (filter_op == RTE_ETH_FILTER_NOP)
6651                 return 0;
6652
6653         if (arg == NULL) {
6654                 PMD_DRV_LOG(ERR, "arg shouldn't be NULL for operation %u.",
6655                             filter_op);
6656                 return -EINVAL;
6657         }
6658
6659         switch (filter_op) {
6660         case RTE_ETH_FILTER_ADD:
6661                 ret = ixgbe_add_del_ethertype_filter(dev,
6662                         (struct rte_eth_ethertype_filter *)arg,
6663                         TRUE);
6664                 break;
6665         case RTE_ETH_FILTER_DELETE:
6666                 ret = ixgbe_add_del_ethertype_filter(dev,
6667                         (struct rte_eth_ethertype_filter *)arg,
6668                         FALSE);
6669                 break;
6670         case RTE_ETH_FILTER_GET:
6671                 ret = ixgbe_get_ethertype_filter(dev,
6672                         (struct rte_eth_ethertype_filter *)arg);
6673                 break;
6674         default:
6675                 PMD_DRV_LOG(ERR, "unsupported operation %u.", filter_op);
6676                 ret = -EINVAL;
6677                 break;
6678         }
6679         return ret;
6680 }
6681
6682 static int
6683 ixgbe_dev_filter_ctrl(struct rte_eth_dev *dev,
6684                      enum rte_filter_type filter_type,
6685                      enum rte_filter_op filter_op,
6686                      void *arg)
6687 {
6688         int ret = 0;
6689
6690         switch (filter_type) {
6691         case RTE_ETH_FILTER_NTUPLE:
6692                 ret = ixgbe_ntuple_filter_handle(dev, filter_op, arg);
6693                 break;
6694         case RTE_ETH_FILTER_ETHERTYPE:
6695                 ret = ixgbe_ethertype_filter_handle(dev, filter_op, arg);
6696                 break;
6697         case RTE_ETH_FILTER_SYN:
6698                 ret = ixgbe_syn_filter_handle(dev, filter_op, arg);
6699                 break;
6700         case RTE_ETH_FILTER_FDIR:
6701                 ret = ixgbe_fdir_ctrl_func(dev, filter_op, arg);
6702                 break;
6703         case RTE_ETH_FILTER_L2_TUNNEL:
6704                 ret = ixgbe_dev_l2_tunnel_filter_handle(dev, filter_op, arg);
6705                 break;
6706         case RTE_ETH_FILTER_GENERIC:
6707                 if (filter_op != RTE_ETH_FILTER_GET)
6708                         return -EINVAL;
6709                 *(const void **)arg = &ixgbe_flow_ops;
6710                 break;
6711         default:
6712                 PMD_DRV_LOG(WARNING, "Filter type (%d) not supported",
6713                                                         filter_type);
6714                 ret = -EINVAL;
6715                 break;
6716         }
6717
6718         return ret;
6719 }
6720
6721 static u8 *
6722 ixgbe_dev_addr_list_itr(__attribute__((unused)) struct ixgbe_hw *hw,
6723                         u8 **mc_addr_ptr, u32 *vmdq)
6724 {
6725         u8 *mc_addr;
6726
6727         *vmdq = 0;
6728         mc_addr = *mc_addr_ptr;
6729         *mc_addr_ptr = (mc_addr + sizeof(struct ether_addr));
6730         return mc_addr;
6731 }
6732
6733 static int
6734 ixgbe_dev_set_mc_addr_list(struct rte_eth_dev *dev,
6735                           struct ether_addr *mc_addr_set,
6736                           uint32_t nb_mc_addr)
6737 {
6738         struct ixgbe_hw *hw;
6739         u8 *mc_addr_list;
6740
6741         hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
6742         mc_addr_list = (u8 *)mc_addr_set;
6743         return ixgbe_update_mc_addr_list(hw, mc_addr_list, nb_mc_addr,
6744                                          ixgbe_dev_addr_list_itr, TRUE);
6745 }
6746
6747 static uint64_t
6748 ixgbe_read_systime_cyclecounter(struct rte_eth_dev *dev)
6749 {
6750         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
6751         uint64_t systime_cycles;
6752
6753         switch (hw->mac.type) {
6754         case ixgbe_mac_X550:
6755         case ixgbe_mac_X550EM_x:
6756         case ixgbe_mac_X550EM_a:
6757                 /* SYSTIMEL stores ns and SYSTIMEH stores seconds. */
6758                 systime_cycles = (uint64_t)IXGBE_READ_REG(hw, IXGBE_SYSTIML);
6759                 systime_cycles += (uint64_t)IXGBE_READ_REG(hw, IXGBE_SYSTIMH)
6760                                 * NSEC_PER_SEC;
6761                 break;
6762         default:
6763                 systime_cycles = (uint64_t)IXGBE_READ_REG(hw, IXGBE_SYSTIML);
6764                 systime_cycles |= (uint64_t)IXGBE_READ_REG(hw, IXGBE_SYSTIMH)
6765                                 << 32;
6766         }
6767
6768         return systime_cycles;
6769 }
6770
6771 static uint64_t
6772 ixgbe_read_rx_tstamp_cyclecounter(struct rte_eth_dev *dev)
6773 {
6774         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
6775         uint64_t rx_tstamp_cycles;
6776
6777         switch (hw->mac.type) {
6778         case ixgbe_mac_X550:
6779         case ixgbe_mac_X550EM_x:
6780         case ixgbe_mac_X550EM_a:
6781                 /* RXSTMPL stores ns and RXSTMPH stores seconds. */
6782                 rx_tstamp_cycles = (uint64_t)IXGBE_READ_REG(hw, IXGBE_RXSTMPL);
6783                 rx_tstamp_cycles += (uint64_t)IXGBE_READ_REG(hw, IXGBE_RXSTMPH)
6784                                 * NSEC_PER_SEC;
6785                 break;
6786         default:
6787                 /* RXSTMPL stores ns and RXSTMPH stores seconds. */
6788                 rx_tstamp_cycles = (uint64_t)IXGBE_READ_REG(hw, IXGBE_RXSTMPL);
6789                 rx_tstamp_cycles |= (uint64_t)IXGBE_READ_REG(hw, IXGBE_RXSTMPH)
6790                                 << 32;
6791         }
6792
6793         return rx_tstamp_cycles;
6794 }
6795
6796 static uint64_t
6797 ixgbe_read_tx_tstamp_cyclecounter(struct rte_eth_dev *dev)
6798 {
6799         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
6800         uint64_t tx_tstamp_cycles;
6801
6802         switch (hw->mac.type) {
6803         case ixgbe_mac_X550:
6804         case ixgbe_mac_X550EM_x:
6805         case ixgbe_mac_X550EM_a:
6806                 /* TXSTMPL stores ns and TXSTMPH stores seconds. */
6807                 tx_tstamp_cycles = (uint64_t)IXGBE_READ_REG(hw, IXGBE_TXSTMPL);
6808                 tx_tstamp_cycles += (uint64_t)IXGBE_READ_REG(hw, IXGBE_TXSTMPH)
6809                                 * NSEC_PER_SEC;
6810                 break;
6811         default:
6812                 /* TXSTMPL stores ns and TXSTMPH stores seconds. */
6813                 tx_tstamp_cycles = (uint64_t)IXGBE_READ_REG(hw, IXGBE_TXSTMPL);
6814                 tx_tstamp_cycles |= (uint64_t)IXGBE_READ_REG(hw, IXGBE_TXSTMPH)
6815                                 << 32;
6816         }
6817
6818         return tx_tstamp_cycles;
6819 }
6820
6821 static void
6822 ixgbe_start_timecounters(struct rte_eth_dev *dev)
6823 {
6824         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
6825         struct ixgbe_adapter *adapter =
6826                 (struct ixgbe_adapter *)dev->data->dev_private;
6827         struct rte_eth_link link;
6828         uint32_t incval = 0;
6829         uint32_t shift = 0;
6830
6831         /* Get current link speed. */
6832         memset(&link, 0, sizeof(link));
6833         ixgbe_dev_link_update(dev, 1);
6834         rte_ixgbe_dev_atomic_read_link_status(dev, &link);
6835
6836         switch (link.link_speed) {
6837         case ETH_SPEED_NUM_100M:
6838                 incval = IXGBE_INCVAL_100;
6839                 shift = IXGBE_INCVAL_SHIFT_100;
6840                 break;
6841         case ETH_SPEED_NUM_1G:
6842                 incval = IXGBE_INCVAL_1GB;
6843                 shift = IXGBE_INCVAL_SHIFT_1GB;
6844                 break;
6845         case ETH_SPEED_NUM_10G:
6846         default:
6847                 incval = IXGBE_INCVAL_10GB;
6848                 shift = IXGBE_INCVAL_SHIFT_10GB;
6849                 break;
6850         }
6851
6852         switch (hw->mac.type) {
6853         case ixgbe_mac_X550:
6854         case ixgbe_mac_X550EM_x:
6855         case ixgbe_mac_X550EM_a:
6856                 /* Independent of link speed. */
6857                 incval = 1;
6858                 /* Cycles read will be interpreted as ns. */
6859                 shift = 0;
6860                 /* Fall-through */
6861         case ixgbe_mac_X540:
6862                 IXGBE_WRITE_REG(hw, IXGBE_TIMINCA, incval);
6863                 break;
6864         case ixgbe_mac_82599EB:
6865                 incval >>= IXGBE_INCVAL_SHIFT_82599;
6866                 shift -= IXGBE_INCVAL_SHIFT_82599;
6867                 IXGBE_WRITE_REG(hw, IXGBE_TIMINCA,
6868                                 (1 << IXGBE_INCPER_SHIFT_82599) | incval);
6869                 break;
6870         default:
6871                 /* Not supported. */
6872                 return;
6873         }
6874
6875         memset(&adapter->systime_tc, 0, sizeof(struct rte_timecounter));
6876         memset(&adapter->rx_tstamp_tc, 0, sizeof(struct rte_timecounter));
6877         memset(&adapter->tx_tstamp_tc, 0, sizeof(struct rte_timecounter));
6878
6879         adapter->systime_tc.cc_mask = IXGBE_CYCLECOUNTER_MASK;
6880         adapter->systime_tc.cc_shift = shift;
6881         adapter->systime_tc.nsec_mask = (1ULL << shift) - 1;
6882
6883         adapter->rx_tstamp_tc.cc_mask = IXGBE_CYCLECOUNTER_MASK;
6884         adapter->rx_tstamp_tc.cc_shift = shift;
6885         adapter->rx_tstamp_tc.nsec_mask = (1ULL << shift) - 1;
6886
6887         adapter->tx_tstamp_tc.cc_mask = IXGBE_CYCLECOUNTER_MASK;
6888         adapter->tx_tstamp_tc.cc_shift = shift;
6889         adapter->tx_tstamp_tc.nsec_mask = (1ULL << shift) - 1;
6890 }
6891
6892 static int
6893 ixgbe_timesync_adjust_time(struct rte_eth_dev *dev, int64_t delta)
6894 {
6895         struct ixgbe_adapter *adapter =
6896                         (struct ixgbe_adapter *)dev->data->dev_private;
6897
6898         adapter->systime_tc.nsec += delta;
6899         adapter->rx_tstamp_tc.nsec += delta;
6900         adapter->tx_tstamp_tc.nsec += delta;
6901
6902         return 0;
6903 }
6904
6905 static int
6906 ixgbe_timesync_write_time(struct rte_eth_dev *dev, const struct timespec *ts)
6907 {
6908         uint64_t ns;
6909         struct ixgbe_adapter *adapter =
6910                         (struct ixgbe_adapter *)dev->data->dev_private;
6911
6912         ns = rte_timespec_to_ns(ts);
6913         /* Set the timecounters to a new value. */
6914         adapter->systime_tc.nsec = ns;
6915         adapter->rx_tstamp_tc.nsec = ns;
6916         adapter->tx_tstamp_tc.nsec = ns;
6917
6918         return 0;
6919 }
6920
6921 static int
6922 ixgbe_timesync_read_time(struct rte_eth_dev *dev, struct timespec *ts)
6923 {
6924         uint64_t ns, systime_cycles;
6925         struct ixgbe_adapter *adapter =
6926                         (struct ixgbe_adapter *)dev->data->dev_private;
6927
6928         systime_cycles = ixgbe_read_systime_cyclecounter(dev);
6929         ns = rte_timecounter_update(&adapter->systime_tc, systime_cycles);
6930         *ts = rte_ns_to_timespec(ns);
6931
6932         return 0;
6933 }
6934
6935 static int
6936 ixgbe_timesync_enable(struct rte_eth_dev *dev)
6937 {
6938         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
6939         uint32_t tsync_ctl;
6940         uint32_t tsauxc;
6941
6942         /* Stop the timesync system time. */
6943         IXGBE_WRITE_REG(hw, IXGBE_TIMINCA, 0x0);
6944         /* Reset the timesync system time value. */
6945         IXGBE_WRITE_REG(hw, IXGBE_SYSTIML, 0x0);
6946         IXGBE_WRITE_REG(hw, IXGBE_SYSTIMH, 0x0);
6947
6948         /* Enable system time for platforms where it isn't on by default. */
6949         tsauxc = IXGBE_READ_REG(hw, IXGBE_TSAUXC);
6950         tsauxc &= ~IXGBE_TSAUXC_DISABLE_SYSTIME;
6951         IXGBE_WRITE_REG(hw, IXGBE_TSAUXC, tsauxc);
6952
6953         ixgbe_start_timecounters(dev);
6954
6955         /* Enable L2 filtering of IEEE1588/802.1AS Ethernet frame types. */
6956         IXGBE_WRITE_REG(hw, IXGBE_ETQF(IXGBE_ETQF_FILTER_1588),
6957                         (ETHER_TYPE_1588 |
6958                          IXGBE_ETQF_FILTER_EN |
6959                          IXGBE_ETQF_1588));
6960
6961         /* Enable timestamping of received PTP packets. */
6962         tsync_ctl = IXGBE_READ_REG(hw, IXGBE_TSYNCRXCTL);
6963         tsync_ctl |= IXGBE_TSYNCRXCTL_ENABLED;
6964         IXGBE_WRITE_REG(hw, IXGBE_TSYNCRXCTL, tsync_ctl);
6965
6966         /* Enable timestamping of transmitted PTP packets. */
6967         tsync_ctl = IXGBE_READ_REG(hw, IXGBE_TSYNCTXCTL);
6968         tsync_ctl |= IXGBE_TSYNCTXCTL_ENABLED;
6969         IXGBE_WRITE_REG(hw, IXGBE_TSYNCTXCTL, tsync_ctl);
6970
6971         IXGBE_WRITE_FLUSH(hw);
6972
6973         return 0;
6974 }
6975
6976 static int
6977 ixgbe_timesync_disable(struct rte_eth_dev *dev)
6978 {
6979         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
6980         uint32_t tsync_ctl;
6981
6982         /* Disable timestamping of transmitted PTP packets. */
6983         tsync_ctl = IXGBE_READ_REG(hw, IXGBE_TSYNCTXCTL);
6984         tsync_ctl &= ~IXGBE_TSYNCTXCTL_ENABLED;
6985         IXGBE_WRITE_REG(hw, IXGBE_TSYNCTXCTL, tsync_ctl);
6986
6987         /* Disable timestamping of received PTP packets. */
6988         tsync_ctl = IXGBE_READ_REG(hw, IXGBE_TSYNCRXCTL);
6989         tsync_ctl &= ~IXGBE_TSYNCRXCTL_ENABLED;
6990         IXGBE_WRITE_REG(hw, IXGBE_TSYNCRXCTL, tsync_ctl);
6991
6992         /* Disable L2 filtering of IEEE1588/802.1AS Ethernet frame types. */
6993         IXGBE_WRITE_REG(hw, IXGBE_ETQF(IXGBE_ETQF_FILTER_1588), 0);
6994
6995         /* Stop incrementating the System Time registers. */
6996         IXGBE_WRITE_REG(hw, IXGBE_TIMINCA, 0);
6997
6998         return 0;
6999 }
7000
7001 static int
7002 ixgbe_timesync_read_rx_timestamp(struct rte_eth_dev *dev,
7003                                  struct timespec *timestamp,
7004                                  uint32_t flags __rte_unused)
7005 {
7006         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
7007         struct ixgbe_adapter *adapter =
7008                 (struct ixgbe_adapter *)dev->data->dev_private;
7009         uint32_t tsync_rxctl;
7010         uint64_t rx_tstamp_cycles;
7011         uint64_t ns;
7012
7013         tsync_rxctl = IXGBE_READ_REG(hw, IXGBE_TSYNCRXCTL);
7014         if ((tsync_rxctl & IXGBE_TSYNCRXCTL_VALID) == 0)
7015                 return -EINVAL;
7016
7017         rx_tstamp_cycles = ixgbe_read_rx_tstamp_cyclecounter(dev);
7018         ns = rte_timecounter_update(&adapter->rx_tstamp_tc, rx_tstamp_cycles);
7019         *timestamp = rte_ns_to_timespec(ns);
7020
7021         return  0;
7022 }
7023
7024 static int
7025 ixgbe_timesync_read_tx_timestamp(struct rte_eth_dev *dev,
7026                                  struct timespec *timestamp)
7027 {
7028         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
7029         struct ixgbe_adapter *adapter =
7030                 (struct ixgbe_adapter *)dev->data->dev_private;
7031         uint32_t tsync_txctl;
7032         uint64_t tx_tstamp_cycles;
7033         uint64_t ns;
7034
7035         tsync_txctl = IXGBE_READ_REG(hw, IXGBE_TSYNCTXCTL);
7036         if ((tsync_txctl & IXGBE_TSYNCTXCTL_VALID) == 0)
7037                 return -EINVAL;
7038
7039         tx_tstamp_cycles = ixgbe_read_tx_tstamp_cyclecounter(dev);
7040         ns = rte_timecounter_update(&adapter->tx_tstamp_tc, tx_tstamp_cycles);
7041         *timestamp = rte_ns_to_timespec(ns);
7042
7043         return 0;
7044 }
7045
7046 static int
7047 ixgbe_get_reg_length(struct rte_eth_dev *dev)
7048 {
7049         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
7050         int count = 0;
7051         int g_ind = 0;
7052         const struct reg_info *reg_group;
7053         const struct reg_info **reg_set = (hw->mac.type == ixgbe_mac_82598EB) ?
7054                                     ixgbe_regs_mac_82598EB : ixgbe_regs_others;
7055
7056         while ((reg_group = reg_set[g_ind++]))
7057                 count += ixgbe_regs_group_count(reg_group);
7058
7059         return count;
7060 }
7061
7062 static int
7063 ixgbevf_get_reg_length(struct rte_eth_dev *dev __rte_unused)
7064 {
7065         int count = 0;
7066         int g_ind = 0;
7067         const struct reg_info *reg_group;
7068
7069         while ((reg_group = ixgbevf_regs[g_ind++]))
7070                 count += ixgbe_regs_group_count(reg_group);
7071
7072         return count;
7073 }
7074
7075 static int
7076 ixgbe_get_regs(struct rte_eth_dev *dev,
7077               struct rte_dev_reg_info *regs)
7078 {
7079         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
7080         uint32_t *data = regs->data;
7081         int g_ind = 0;
7082         int count = 0;
7083         const struct reg_info *reg_group;
7084         const struct reg_info **reg_set = (hw->mac.type == ixgbe_mac_82598EB) ?
7085                                     ixgbe_regs_mac_82598EB : ixgbe_regs_others;
7086
7087         if (data == NULL) {
7088                 regs->length = ixgbe_get_reg_length(dev);
7089                 regs->width = sizeof(uint32_t);
7090                 return 0;
7091         }
7092
7093         /* Support only full register dump */
7094         if ((regs->length == 0) ||
7095             (regs->length == (uint32_t)ixgbe_get_reg_length(dev))) {
7096                 regs->version = hw->mac.type << 24 | hw->revision_id << 16 |
7097                         hw->device_id;
7098                 while ((reg_group = reg_set[g_ind++]))
7099                         count += ixgbe_read_regs_group(dev, &data[count],
7100                                 reg_group);
7101                 return 0;
7102         }
7103
7104         return -ENOTSUP;
7105 }
7106
7107 static int
7108 ixgbevf_get_regs(struct rte_eth_dev *dev,
7109                 struct rte_dev_reg_info *regs)
7110 {
7111         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
7112         uint32_t *data = regs->data;
7113         int g_ind = 0;
7114         int count = 0;
7115         const struct reg_info *reg_group;
7116
7117         if (data == NULL) {
7118                 regs->length = ixgbevf_get_reg_length(dev);
7119                 regs->width = sizeof(uint32_t);
7120                 return 0;
7121         }
7122
7123         /* Support only full register dump */
7124         if ((regs->length == 0) ||
7125             (regs->length == (uint32_t)ixgbevf_get_reg_length(dev))) {
7126                 regs->version = hw->mac.type << 24 | hw->revision_id << 16 |
7127                         hw->device_id;
7128                 while ((reg_group = ixgbevf_regs[g_ind++]))
7129                         count += ixgbe_read_regs_group(dev, &data[count],
7130                                                       reg_group);
7131                 return 0;
7132         }
7133
7134         return -ENOTSUP;
7135 }
7136
7137 static int
7138 ixgbe_get_eeprom_length(struct rte_eth_dev *dev)
7139 {
7140         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
7141
7142         /* Return unit is byte count */
7143         return hw->eeprom.word_size * 2;
7144 }
7145
7146 static int
7147 ixgbe_get_eeprom(struct rte_eth_dev *dev,
7148                 struct rte_dev_eeprom_info *in_eeprom)
7149 {
7150         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
7151         struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
7152         uint16_t *data = in_eeprom->data;
7153         int first, length;
7154
7155         first = in_eeprom->offset >> 1;
7156         length = in_eeprom->length >> 1;
7157         if ((first > hw->eeprom.word_size) ||
7158             ((first + length) > hw->eeprom.word_size))
7159                 return -EINVAL;
7160
7161         in_eeprom->magic = hw->vendor_id | (hw->device_id << 16);
7162
7163         return eeprom->ops.read_buffer(hw, first, length, data);
7164 }
7165
7166 static int
7167 ixgbe_set_eeprom(struct rte_eth_dev *dev,
7168                 struct rte_dev_eeprom_info *in_eeprom)
7169 {
7170         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
7171         struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
7172         uint16_t *data = in_eeprom->data;
7173         int first, length;
7174
7175         first = in_eeprom->offset >> 1;
7176         length = in_eeprom->length >> 1;
7177         if ((first > hw->eeprom.word_size) ||
7178             ((first + length) > hw->eeprom.word_size))
7179                 return -EINVAL;
7180
7181         in_eeprom->magic = hw->vendor_id | (hw->device_id << 16);
7182
7183         return eeprom->ops.write_buffer(hw,  first, length, data);
7184 }
7185
7186 uint16_t
7187 ixgbe_reta_size_get(enum ixgbe_mac_type mac_type) {
7188         switch (mac_type) {
7189         case ixgbe_mac_X550:
7190         case ixgbe_mac_X550EM_x:
7191         case ixgbe_mac_X550EM_a:
7192                 return ETH_RSS_RETA_SIZE_512;
7193         case ixgbe_mac_X550_vf:
7194         case ixgbe_mac_X550EM_x_vf:
7195         case ixgbe_mac_X550EM_a_vf:
7196                 return ETH_RSS_RETA_SIZE_64;
7197         default:
7198                 return ETH_RSS_RETA_SIZE_128;
7199         }
7200 }
7201
7202 uint32_t
7203 ixgbe_reta_reg_get(enum ixgbe_mac_type mac_type, uint16_t reta_idx) {
7204         switch (mac_type) {
7205         case ixgbe_mac_X550:
7206         case ixgbe_mac_X550EM_x:
7207         case ixgbe_mac_X550EM_a:
7208                 if (reta_idx < ETH_RSS_RETA_SIZE_128)
7209                         return IXGBE_RETA(reta_idx >> 2);
7210                 else
7211                         return IXGBE_ERETA((reta_idx - ETH_RSS_RETA_SIZE_128) >> 2);
7212         case ixgbe_mac_X550_vf:
7213         case ixgbe_mac_X550EM_x_vf:
7214         case ixgbe_mac_X550EM_a_vf:
7215                 return IXGBE_VFRETA(reta_idx >> 2);
7216         default:
7217                 return IXGBE_RETA(reta_idx >> 2);
7218         }
7219 }
7220
7221 uint32_t
7222 ixgbe_mrqc_reg_get(enum ixgbe_mac_type mac_type) {
7223         switch (mac_type) {
7224         case ixgbe_mac_X550_vf:
7225         case ixgbe_mac_X550EM_x_vf:
7226         case ixgbe_mac_X550EM_a_vf:
7227                 return IXGBE_VFMRQC;
7228         default:
7229                 return IXGBE_MRQC;
7230         }
7231 }
7232
7233 uint32_t
7234 ixgbe_rssrk_reg_get(enum ixgbe_mac_type mac_type, uint8_t i) {
7235         switch (mac_type) {
7236         case ixgbe_mac_X550_vf:
7237         case ixgbe_mac_X550EM_x_vf:
7238         case ixgbe_mac_X550EM_a_vf:
7239                 return IXGBE_VFRSSRK(i);
7240         default:
7241                 return IXGBE_RSSRK(i);
7242         }
7243 }
7244
7245 bool
7246 ixgbe_rss_update_sp(enum ixgbe_mac_type mac_type) {
7247         switch (mac_type) {
7248         case ixgbe_mac_82599_vf:
7249         case ixgbe_mac_X540_vf:
7250                 return 0;
7251         default:
7252                 return 1;
7253         }
7254 }
7255
7256 static int
7257 ixgbe_dev_get_dcb_info(struct rte_eth_dev *dev,
7258                         struct rte_eth_dcb_info *dcb_info)
7259 {
7260         struct ixgbe_dcb_config *dcb_config =
7261                         IXGBE_DEV_PRIVATE_TO_DCB_CFG(dev->data->dev_private);
7262         struct ixgbe_dcb_tc_config *tc;
7263         struct rte_eth_dcb_tc_queue_mapping *tc_queue;
7264         uint8_t nb_tcs;
7265         uint8_t i, j;
7266
7267         if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_DCB_FLAG)
7268                 dcb_info->nb_tcs = dcb_config->num_tcs.pg_tcs;
7269         else
7270                 dcb_info->nb_tcs = 1;
7271
7272         tc_queue = &dcb_info->tc_queue;
7273         nb_tcs = dcb_info->nb_tcs;
7274
7275         if (dcb_config->vt_mode) { /* vt is enabled*/
7276                 struct rte_eth_vmdq_dcb_conf *vmdq_rx_conf =
7277                                 &dev->data->dev_conf.rx_adv_conf.vmdq_dcb_conf;
7278                 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++)
7279                         dcb_info->prio_tc[i] = vmdq_rx_conf->dcb_tc[i];
7280                 if (RTE_ETH_DEV_SRIOV(dev).active > 0) {
7281                         for (j = 0; j < nb_tcs; j++) {
7282                                 tc_queue->tc_rxq[0][j].base = j;
7283                                 tc_queue->tc_rxq[0][j].nb_queue = 1;
7284                                 tc_queue->tc_txq[0][j].base = j;
7285                                 tc_queue->tc_txq[0][j].nb_queue = 1;
7286                         }
7287                 } else {
7288                         for (i = 0; i < vmdq_rx_conf->nb_queue_pools; i++) {
7289                                 for (j = 0; j < nb_tcs; j++) {
7290                                         tc_queue->tc_rxq[i][j].base =
7291                                                 i * nb_tcs + j;
7292                                         tc_queue->tc_rxq[i][j].nb_queue = 1;
7293                                         tc_queue->tc_txq[i][j].base =
7294                                                 i * nb_tcs + j;
7295                                         tc_queue->tc_txq[i][j].nb_queue = 1;
7296                                 }
7297                         }
7298                 }
7299         } else { /* vt is disabled*/
7300                 struct rte_eth_dcb_rx_conf *rx_conf =
7301                                 &dev->data->dev_conf.rx_adv_conf.dcb_rx_conf;
7302                 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++)
7303                         dcb_info->prio_tc[i] = rx_conf->dcb_tc[i];
7304                 if (dcb_info->nb_tcs == ETH_4_TCS) {
7305                         for (i = 0; i < dcb_info->nb_tcs; i++) {
7306                                 dcb_info->tc_queue.tc_rxq[0][i].base = i * 32;
7307                                 dcb_info->tc_queue.tc_rxq[0][i].nb_queue = 16;
7308                         }
7309                         dcb_info->tc_queue.tc_txq[0][0].base = 0;
7310                         dcb_info->tc_queue.tc_txq[0][1].base = 64;
7311                         dcb_info->tc_queue.tc_txq[0][2].base = 96;
7312                         dcb_info->tc_queue.tc_txq[0][3].base = 112;
7313                         dcb_info->tc_queue.tc_txq[0][0].nb_queue = 64;
7314                         dcb_info->tc_queue.tc_txq[0][1].nb_queue = 32;
7315                         dcb_info->tc_queue.tc_txq[0][2].nb_queue = 16;
7316                         dcb_info->tc_queue.tc_txq[0][3].nb_queue = 16;
7317                 } else if (dcb_info->nb_tcs == ETH_8_TCS) {
7318                         for (i = 0; i < dcb_info->nb_tcs; i++) {
7319                                 dcb_info->tc_queue.tc_rxq[0][i].base = i * 16;
7320                                 dcb_info->tc_queue.tc_rxq[0][i].nb_queue = 16;
7321                         }
7322                         dcb_info->tc_queue.tc_txq[0][0].base = 0;
7323                         dcb_info->tc_queue.tc_txq[0][1].base = 32;
7324                         dcb_info->tc_queue.tc_txq[0][2].base = 64;
7325                         dcb_info->tc_queue.tc_txq[0][3].base = 80;
7326                         dcb_info->tc_queue.tc_txq[0][4].base = 96;
7327                         dcb_info->tc_queue.tc_txq[0][5].base = 104;
7328                         dcb_info->tc_queue.tc_txq[0][6].base = 112;
7329                         dcb_info->tc_queue.tc_txq[0][7].base = 120;
7330                         dcb_info->tc_queue.tc_txq[0][0].nb_queue = 32;
7331                         dcb_info->tc_queue.tc_txq[0][1].nb_queue = 32;
7332                         dcb_info->tc_queue.tc_txq[0][2].nb_queue = 16;
7333                         dcb_info->tc_queue.tc_txq[0][3].nb_queue = 16;
7334                         dcb_info->tc_queue.tc_txq[0][4].nb_queue = 8;
7335                         dcb_info->tc_queue.tc_txq[0][5].nb_queue = 8;
7336                         dcb_info->tc_queue.tc_txq[0][6].nb_queue = 8;
7337                         dcb_info->tc_queue.tc_txq[0][7].nb_queue = 8;
7338                 }
7339         }
7340         for (i = 0; i < dcb_info->nb_tcs; i++) {
7341                 tc = &dcb_config->tc_config[i];
7342                 dcb_info->tc_bws[i] = tc->path[IXGBE_DCB_TX_CONFIG].bwg_percent;
7343         }
7344         return 0;
7345 }
7346
7347 /* Update e-tag ether type */
7348 static int
7349 ixgbe_update_e_tag_eth_type(struct ixgbe_hw *hw,
7350                             uint16_t ether_type)
7351 {
7352         uint32_t etag_etype;
7353
7354         if (hw->mac.type != ixgbe_mac_X550 &&
7355             hw->mac.type != ixgbe_mac_X550EM_x &&
7356             hw->mac.type != ixgbe_mac_X550EM_a) {
7357                 return -ENOTSUP;
7358         }
7359
7360         etag_etype = IXGBE_READ_REG(hw, IXGBE_ETAG_ETYPE);
7361         etag_etype &= ~IXGBE_ETAG_ETYPE_MASK;
7362         etag_etype |= ether_type;
7363         IXGBE_WRITE_REG(hw, IXGBE_ETAG_ETYPE, etag_etype);
7364         IXGBE_WRITE_FLUSH(hw);
7365
7366         return 0;
7367 }
7368
7369 /* Config l2 tunnel ether type */
7370 static int
7371 ixgbe_dev_l2_tunnel_eth_type_conf(struct rte_eth_dev *dev,
7372                                   struct rte_eth_l2_tunnel_conf *l2_tunnel)
7373 {
7374         int ret = 0;
7375         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
7376         struct ixgbe_l2_tn_info *l2_tn_info =
7377                 IXGBE_DEV_PRIVATE_TO_L2_TN_INFO(dev->data->dev_private);
7378
7379         if (l2_tunnel == NULL)
7380                 return -EINVAL;
7381
7382         switch (l2_tunnel->l2_tunnel_type) {
7383         case RTE_L2_TUNNEL_TYPE_E_TAG:
7384                 l2_tn_info->e_tag_ether_type = l2_tunnel->ether_type;
7385                 ret = ixgbe_update_e_tag_eth_type(hw, l2_tunnel->ether_type);
7386                 break;
7387         default:
7388                 PMD_DRV_LOG(ERR, "Invalid tunnel type");
7389                 ret = -EINVAL;
7390                 break;
7391         }
7392
7393         return ret;
7394 }
7395
7396 /* Enable e-tag tunnel */
7397 static int
7398 ixgbe_e_tag_enable(struct ixgbe_hw *hw)
7399 {
7400         uint32_t etag_etype;
7401
7402         if (hw->mac.type != ixgbe_mac_X550 &&
7403             hw->mac.type != ixgbe_mac_X550EM_x &&
7404             hw->mac.type != ixgbe_mac_X550EM_a) {
7405                 return -ENOTSUP;
7406         }
7407
7408         etag_etype = IXGBE_READ_REG(hw, IXGBE_ETAG_ETYPE);
7409         etag_etype |= IXGBE_ETAG_ETYPE_VALID;
7410         IXGBE_WRITE_REG(hw, IXGBE_ETAG_ETYPE, etag_etype);
7411         IXGBE_WRITE_FLUSH(hw);
7412
7413         return 0;
7414 }
7415
7416 /* Enable l2 tunnel */
7417 static int
7418 ixgbe_dev_l2_tunnel_enable(struct rte_eth_dev *dev,
7419                            enum rte_eth_tunnel_type l2_tunnel_type)
7420 {
7421         int ret = 0;
7422         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
7423         struct ixgbe_l2_tn_info *l2_tn_info =
7424                 IXGBE_DEV_PRIVATE_TO_L2_TN_INFO(dev->data->dev_private);
7425
7426         switch (l2_tunnel_type) {
7427         case RTE_L2_TUNNEL_TYPE_E_TAG:
7428                 l2_tn_info->e_tag_en = TRUE;
7429                 ret = ixgbe_e_tag_enable(hw);
7430                 break;
7431         default:
7432                 PMD_DRV_LOG(ERR, "Invalid tunnel type");
7433                 ret = -EINVAL;
7434                 break;
7435         }
7436
7437         return ret;
7438 }
7439
7440 /* Disable e-tag tunnel */
7441 static int
7442 ixgbe_e_tag_disable(struct ixgbe_hw *hw)
7443 {
7444         uint32_t etag_etype;
7445
7446         if (hw->mac.type != ixgbe_mac_X550 &&
7447             hw->mac.type != ixgbe_mac_X550EM_x &&
7448             hw->mac.type != ixgbe_mac_X550EM_a) {
7449                 return -ENOTSUP;
7450         }
7451
7452         etag_etype = IXGBE_READ_REG(hw, IXGBE_ETAG_ETYPE);
7453         etag_etype &= ~IXGBE_ETAG_ETYPE_VALID;
7454         IXGBE_WRITE_REG(hw, IXGBE_ETAG_ETYPE, etag_etype);
7455         IXGBE_WRITE_FLUSH(hw);
7456
7457         return 0;
7458 }
7459
7460 /* Disable l2 tunnel */
7461 static int
7462 ixgbe_dev_l2_tunnel_disable(struct rte_eth_dev *dev,
7463                             enum rte_eth_tunnel_type l2_tunnel_type)
7464 {
7465         int ret = 0;
7466         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
7467         struct ixgbe_l2_tn_info *l2_tn_info =
7468                 IXGBE_DEV_PRIVATE_TO_L2_TN_INFO(dev->data->dev_private);
7469
7470         switch (l2_tunnel_type) {
7471         case RTE_L2_TUNNEL_TYPE_E_TAG:
7472                 l2_tn_info->e_tag_en = FALSE;
7473                 ret = ixgbe_e_tag_disable(hw);
7474                 break;
7475         default:
7476                 PMD_DRV_LOG(ERR, "Invalid tunnel type");
7477                 ret = -EINVAL;
7478                 break;
7479         }
7480
7481         return ret;
7482 }
7483
7484 static int
7485 ixgbe_e_tag_filter_del(struct rte_eth_dev *dev,
7486                        struct rte_eth_l2_tunnel_conf *l2_tunnel)
7487 {
7488         int ret = 0;
7489         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
7490         uint32_t i, rar_entries;
7491         uint32_t rar_low, rar_high;
7492
7493         if (hw->mac.type != ixgbe_mac_X550 &&
7494             hw->mac.type != ixgbe_mac_X550EM_x &&
7495             hw->mac.type != ixgbe_mac_X550EM_a) {
7496                 return -ENOTSUP;
7497         }
7498
7499         rar_entries = ixgbe_get_num_rx_addrs(hw);
7500
7501         for (i = 1; i < rar_entries; i++) {
7502                 rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(i));
7503                 rar_low  = IXGBE_READ_REG(hw, IXGBE_RAL(i));
7504                 if ((rar_high & IXGBE_RAH_AV) &&
7505                     (rar_high & IXGBE_RAH_ADTYPE) &&
7506                     ((rar_low & IXGBE_RAL_ETAG_FILTER_MASK) ==
7507                      l2_tunnel->tunnel_id)) {
7508                         IXGBE_WRITE_REG(hw, IXGBE_RAL(i), 0);
7509                         IXGBE_WRITE_REG(hw, IXGBE_RAH(i), 0);
7510
7511                         ixgbe_clear_vmdq(hw, i, IXGBE_CLEAR_VMDQ_ALL);
7512
7513                         return ret;
7514                 }
7515         }
7516
7517         return ret;
7518 }
7519
7520 static int
7521 ixgbe_e_tag_filter_add(struct rte_eth_dev *dev,
7522                        struct rte_eth_l2_tunnel_conf *l2_tunnel)
7523 {
7524         int ret = 0;
7525         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
7526         uint32_t i, rar_entries;
7527         uint32_t rar_low, rar_high;
7528
7529         if (hw->mac.type != ixgbe_mac_X550 &&
7530             hw->mac.type != ixgbe_mac_X550EM_x &&
7531             hw->mac.type != ixgbe_mac_X550EM_a) {
7532                 return -ENOTSUP;
7533         }
7534
7535         /* One entry for one tunnel. Try to remove potential existing entry. */
7536         ixgbe_e_tag_filter_del(dev, l2_tunnel);
7537
7538         rar_entries = ixgbe_get_num_rx_addrs(hw);
7539
7540         for (i = 1; i < rar_entries; i++) {
7541                 rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(i));
7542                 if (rar_high & IXGBE_RAH_AV) {
7543                         continue;
7544                 } else {
7545                         ixgbe_set_vmdq(hw, i, l2_tunnel->pool);
7546                         rar_high = IXGBE_RAH_AV | IXGBE_RAH_ADTYPE;
7547                         rar_low = l2_tunnel->tunnel_id;
7548
7549                         IXGBE_WRITE_REG(hw, IXGBE_RAL(i), rar_low);
7550                         IXGBE_WRITE_REG(hw, IXGBE_RAH(i), rar_high);
7551
7552                         return ret;
7553                 }
7554         }
7555
7556         PMD_INIT_LOG(NOTICE, "The table of E-tag forwarding rule is full."
7557                      " Please remove a rule before adding a new one.");
7558         return -EINVAL;
7559 }
7560
7561 static inline struct ixgbe_l2_tn_filter *
7562 ixgbe_l2_tn_filter_lookup(struct ixgbe_l2_tn_info *l2_tn_info,
7563                           struct ixgbe_l2_tn_key *key)
7564 {
7565         int ret;
7566
7567         ret = rte_hash_lookup(l2_tn_info->hash_handle, (const void *)key);
7568         if (ret < 0)
7569                 return NULL;
7570
7571         return l2_tn_info->hash_map[ret];
7572 }
7573
7574 static inline int
7575 ixgbe_insert_l2_tn_filter(struct ixgbe_l2_tn_info *l2_tn_info,
7576                           struct ixgbe_l2_tn_filter *l2_tn_filter)
7577 {
7578         int ret;
7579
7580         ret = rte_hash_add_key(l2_tn_info->hash_handle,
7581                                &l2_tn_filter->key);
7582
7583         if (ret < 0) {
7584                 PMD_DRV_LOG(ERR,
7585                             "Failed to insert L2 tunnel filter"
7586                             " to hash table %d!",
7587                             ret);
7588                 return ret;
7589         }
7590
7591         l2_tn_info->hash_map[ret] = l2_tn_filter;
7592
7593         TAILQ_INSERT_TAIL(&l2_tn_info->l2_tn_list, l2_tn_filter, entries);
7594
7595         return 0;
7596 }
7597
7598 static inline int
7599 ixgbe_remove_l2_tn_filter(struct ixgbe_l2_tn_info *l2_tn_info,
7600                           struct ixgbe_l2_tn_key *key)
7601 {
7602         int ret;
7603         struct ixgbe_l2_tn_filter *l2_tn_filter;
7604
7605         ret = rte_hash_del_key(l2_tn_info->hash_handle, key);
7606
7607         if (ret < 0) {
7608                 PMD_DRV_LOG(ERR,
7609                             "No such L2 tunnel filter to delete %d!",
7610                             ret);
7611                 return ret;
7612         }
7613
7614         l2_tn_filter = l2_tn_info->hash_map[ret];
7615         l2_tn_info->hash_map[ret] = NULL;
7616
7617         TAILQ_REMOVE(&l2_tn_info->l2_tn_list, l2_tn_filter, entries);
7618         rte_free(l2_tn_filter);
7619
7620         return 0;
7621 }
7622
7623 /* Add l2 tunnel filter */
7624 int
7625 ixgbe_dev_l2_tunnel_filter_add(struct rte_eth_dev *dev,
7626                                struct rte_eth_l2_tunnel_conf *l2_tunnel,
7627                                bool restore)
7628 {
7629         int ret;
7630         struct ixgbe_l2_tn_info *l2_tn_info =
7631                 IXGBE_DEV_PRIVATE_TO_L2_TN_INFO(dev->data->dev_private);
7632         struct ixgbe_l2_tn_key key;
7633         struct ixgbe_l2_tn_filter *node;
7634
7635         if (!restore) {
7636                 key.l2_tn_type = l2_tunnel->l2_tunnel_type;
7637                 key.tn_id = l2_tunnel->tunnel_id;
7638
7639                 node = ixgbe_l2_tn_filter_lookup(l2_tn_info, &key);
7640
7641                 if (node) {
7642                         PMD_DRV_LOG(ERR,
7643                                     "The L2 tunnel filter already exists!");
7644                         return -EINVAL;
7645                 }
7646
7647                 node = rte_zmalloc("ixgbe_l2_tn",
7648                                    sizeof(struct ixgbe_l2_tn_filter),
7649                                    0);
7650                 if (!node)
7651                         return -ENOMEM;
7652
7653                 rte_memcpy(&node->key,
7654                                  &key,
7655                                  sizeof(struct ixgbe_l2_tn_key));
7656                 node->pool = l2_tunnel->pool;
7657                 ret = ixgbe_insert_l2_tn_filter(l2_tn_info, node);
7658                 if (ret < 0) {
7659                         rte_free(node);
7660                         return ret;
7661                 }
7662         }
7663
7664         switch (l2_tunnel->l2_tunnel_type) {
7665         case RTE_L2_TUNNEL_TYPE_E_TAG:
7666                 ret = ixgbe_e_tag_filter_add(dev, l2_tunnel);
7667                 break;
7668         default:
7669                 PMD_DRV_LOG(ERR, "Invalid tunnel type");
7670                 ret = -EINVAL;
7671                 break;
7672         }
7673
7674         if ((!restore) && (ret < 0))
7675                 (void)ixgbe_remove_l2_tn_filter(l2_tn_info, &key);
7676
7677         return ret;
7678 }
7679
7680 /* Delete l2 tunnel filter */
7681 int
7682 ixgbe_dev_l2_tunnel_filter_del(struct rte_eth_dev *dev,
7683                                struct rte_eth_l2_tunnel_conf *l2_tunnel)
7684 {
7685         int ret;
7686         struct ixgbe_l2_tn_info *l2_tn_info =
7687                 IXGBE_DEV_PRIVATE_TO_L2_TN_INFO(dev->data->dev_private);
7688         struct ixgbe_l2_tn_key key;
7689
7690         key.l2_tn_type = l2_tunnel->l2_tunnel_type;
7691         key.tn_id = l2_tunnel->tunnel_id;
7692         ret = ixgbe_remove_l2_tn_filter(l2_tn_info, &key);
7693         if (ret < 0)
7694                 return ret;
7695
7696         switch (l2_tunnel->l2_tunnel_type) {
7697         case RTE_L2_TUNNEL_TYPE_E_TAG:
7698                 ret = ixgbe_e_tag_filter_del(dev, l2_tunnel);
7699                 break;
7700         default:
7701                 PMD_DRV_LOG(ERR, "Invalid tunnel type");
7702                 ret = -EINVAL;
7703                 break;
7704         }
7705
7706         return ret;
7707 }
7708
7709 /**
7710  * ixgbe_dev_l2_tunnel_filter_handle - Handle operations for l2 tunnel filter.
7711  * @dev: pointer to rte_eth_dev structure
7712  * @filter_op:operation will be taken.
7713  * @arg: a pointer to specific structure corresponding to the filter_op
7714  */
7715 static int
7716 ixgbe_dev_l2_tunnel_filter_handle(struct rte_eth_dev *dev,
7717                                   enum rte_filter_op filter_op,
7718                                   void *arg)
7719 {
7720         int ret;
7721
7722         if (filter_op == RTE_ETH_FILTER_NOP)
7723                 return 0;
7724
7725         if (arg == NULL) {
7726                 PMD_DRV_LOG(ERR, "arg shouldn't be NULL for operation %u.",
7727                             filter_op);
7728                 return -EINVAL;
7729         }
7730
7731         switch (filter_op) {
7732         case RTE_ETH_FILTER_ADD:
7733                 ret = ixgbe_dev_l2_tunnel_filter_add
7734                         (dev,
7735                          (struct rte_eth_l2_tunnel_conf *)arg,
7736                          FALSE);
7737                 break;
7738         case RTE_ETH_FILTER_DELETE:
7739                 ret = ixgbe_dev_l2_tunnel_filter_del
7740                         (dev,
7741                          (struct rte_eth_l2_tunnel_conf *)arg);
7742                 break;
7743         default:
7744                 PMD_DRV_LOG(ERR, "unsupported operation %u.", filter_op);
7745                 ret = -EINVAL;
7746                 break;
7747         }
7748         return ret;
7749 }
7750
7751 static int
7752 ixgbe_e_tag_forwarding_en_dis(struct rte_eth_dev *dev, bool en)
7753 {
7754         int ret = 0;
7755         uint32_t ctrl;
7756         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
7757
7758         if (hw->mac.type != ixgbe_mac_X550 &&
7759             hw->mac.type != ixgbe_mac_X550EM_x &&
7760             hw->mac.type != ixgbe_mac_X550EM_a) {
7761                 return -ENOTSUP;
7762         }
7763
7764         ctrl = IXGBE_READ_REG(hw, IXGBE_VT_CTL);
7765         ctrl &= ~IXGBE_VT_CTL_POOLING_MODE_MASK;
7766         if (en)
7767                 ctrl |= IXGBE_VT_CTL_POOLING_MODE_ETAG;
7768         IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, ctrl);
7769
7770         return ret;
7771 }
7772
7773 /* Enable l2 tunnel forwarding */
7774 static int
7775 ixgbe_dev_l2_tunnel_forwarding_enable
7776         (struct rte_eth_dev *dev,
7777          enum rte_eth_tunnel_type l2_tunnel_type)
7778 {
7779         struct ixgbe_l2_tn_info *l2_tn_info =
7780                 IXGBE_DEV_PRIVATE_TO_L2_TN_INFO(dev->data->dev_private);
7781         int ret = 0;
7782
7783         switch (l2_tunnel_type) {
7784         case RTE_L2_TUNNEL_TYPE_E_TAG:
7785                 l2_tn_info->e_tag_fwd_en = TRUE;
7786                 ret = ixgbe_e_tag_forwarding_en_dis(dev, 1);
7787                 break;
7788         default:
7789                 PMD_DRV_LOG(ERR, "Invalid tunnel type");
7790                 ret = -EINVAL;
7791                 break;
7792         }
7793
7794         return ret;
7795 }
7796
7797 /* Disable l2 tunnel forwarding */
7798 static int
7799 ixgbe_dev_l2_tunnel_forwarding_disable
7800         (struct rte_eth_dev *dev,
7801          enum rte_eth_tunnel_type l2_tunnel_type)
7802 {
7803         struct ixgbe_l2_tn_info *l2_tn_info =
7804                 IXGBE_DEV_PRIVATE_TO_L2_TN_INFO(dev->data->dev_private);
7805         int ret = 0;
7806
7807         switch (l2_tunnel_type) {
7808         case RTE_L2_TUNNEL_TYPE_E_TAG:
7809                 l2_tn_info->e_tag_fwd_en = FALSE;
7810                 ret = ixgbe_e_tag_forwarding_en_dis(dev, 0);
7811                 break;
7812         default:
7813                 PMD_DRV_LOG(ERR, "Invalid tunnel type");
7814                 ret = -EINVAL;
7815                 break;
7816         }
7817
7818         return ret;
7819 }
7820
7821 static int
7822 ixgbe_e_tag_insertion_en_dis(struct rte_eth_dev *dev,
7823                              struct rte_eth_l2_tunnel_conf *l2_tunnel,
7824                              bool en)
7825 {
7826         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
7827         int ret = 0;
7828         uint32_t vmtir, vmvir;
7829         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
7830
7831         if (l2_tunnel->vf_id >= pci_dev->max_vfs) {
7832                 PMD_DRV_LOG(ERR,
7833                             "VF id %u should be less than %u",
7834                             l2_tunnel->vf_id,
7835                             pci_dev->max_vfs);
7836                 return -EINVAL;
7837         }
7838
7839         if (hw->mac.type != ixgbe_mac_X550 &&
7840             hw->mac.type != ixgbe_mac_X550EM_x &&
7841             hw->mac.type != ixgbe_mac_X550EM_a) {
7842                 return -ENOTSUP;
7843         }
7844
7845         if (en)
7846                 vmtir = l2_tunnel->tunnel_id;
7847         else
7848                 vmtir = 0;
7849
7850         IXGBE_WRITE_REG(hw, IXGBE_VMTIR(l2_tunnel->vf_id), vmtir);
7851
7852         vmvir = IXGBE_READ_REG(hw, IXGBE_VMVIR(l2_tunnel->vf_id));
7853         vmvir &= ~IXGBE_VMVIR_TAGA_MASK;
7854         if (en)
7855                 vmvir |= IXGBE_VMVIR_TAGA_ETAG_INSERT;
7856         IXGBE_WRITE_REG(hw, IXGBE_VMVIR(l2_tunnel->vf_id), vmvir);
7857
7858         return ret;
7859 }
7860
7861 /* Enable l2 tunnel tag insertion */
7862 static int
7863 ixgbe_dev_l2_tunnel_insertion_enable(struct rte_eth_dev *dev,
7864                                      struct rte_eth_l2_tunnel_conf *l2_tunnel)
7865 {
7866         int ret = 0;
7867
7868         switch (l2_tunnel->l2_tunnel_type) {
7869         case RTE_L2_TUNNEL_TYPE_E_TAG:
7870                 ret = ixgbe_e_tag_insertion_en_dis(dev, l2_tunnel, 1);
7871                 break;
7872         default:
7873                 PMD_DRV_LOG(ERR, "Invalid tunnel type");
7874                 ret = -EINVAL;
7875                 break;
7876         }
7877
7878         return ret;
7879 }
7880
7881 /* Disable l2 tunnel tag insertion */
7882 static int
7883 ixgbe_dev_l2_tunnel_insertion_disable
7884         (struct rte_eth_dev *dev,
7885          struct rte_eth_l2_tunnel_conf *l2_tunnel)
7886 {
7887         int ret = 0;
7888
7889         switch (l2_tunnel->l2_tunnel_type) {
7890         case RTE_L2_TUNNEL_TYPE_E_TAG:
7891                 ret = ixgbe_e_tag_insertion_en_dis(dev, l2_tunnel, 0);
7892                 break;
7893         default:
7894                 PMD_DRV_LOG(ERR, "Invalid tunnel type");
7895                 ret = -EINVAL;
7896                 break;
7897         }
7898
7899         return ret;
7900 }
7901
7902 static int
7903 ixgbe_e_tag_stripping_en_dis(struct rte_eth_dev *dev,
7904                              bool en)
7905 {
7906         int ret = 0;
7907         uint32_t qde;
7908         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
7909
7910         if (hw->mac.type != ixgbe_mac_X550 &&
7911             hw->mac.type != ixgbe_mac_X550EM_x &&
7912             hw->mac.type != ixgbe_mac_X550EM_a) {
7913                 return -ENOTSUP;
7914         }
7915
7916         qde = IXGBE_READ_REG(hw, IXGBE_QDE);
7917         if (en)
7918                 qde |= IXGBE_QDE_STRIP_TAG;
7919         else
7920                 qde &= ~IXGBE_QDE_STRIP_TAG;
7921         qde &= ~IXGBE_QDE_READ;
7922         qde |= IXGBE_QDE_WRITE;
7923         IXGBE_WRITE_REG(hw, IXGBE_QDE, qde);
7924
7925         return ret;
7926 }
7927
7928 /* Enable l2 tunnel tag stripping */
7929 static int
7930 ixgbe_dev_l2_tunnel_stripping_enable
7931         (struct rte_eth_dev *dev,
7932          enum rte_eth_tunnel_type l2_tunnel_type)
7933 {
7934         int ret = 0;
7935
7936         switch (l2_tunnel_type) {
7937         case RTE_L2_TUNNEL_TYPE_E_TAG:
7938                 ret = ixgbe_e_tag_stripping_en_dis(dev, 1);
7939                 break;
7940         default:
7941                 PMD_DRV_LOG(ERR, "Invalid tunnel type");
7942                 ret = -EINVAL;
7943                 break;
7944         }
7945
7946         return ret;
7947 }
7948
7949 /* Disable l2 tunnel tag stripping */
7950 static int
7951 ixgbe_dev_l2_tunnel_stripping_disable
7952         (struct rte_eth_dev *dev,
7953          enum rte_eth_tunnel_type l2_tunnel_type)
7954 {
7955         int ret = 0;
7956
7957         switch (l2_tunnel_type) {
7958         case RTE_L2_TUNNEL_TYPE_E_TAG:
7959                 ret = ixgbe_e_tag_stripping_en_dis(dev, 0);
7960                 break;
7961         default:
7962                 PMD_DRV_LOG(ERR, "Invalid tunnel type");
7963                 ret = -EINVAL;
7964                 break;
7965         }
7966
7967         return ret;
7968 }
7969
7970 /* Enable/disable l2 tunnel offload functions */
7971 static int
7972 ixgbe_dev_l2_tunnel_offload_set
7973         (struct rte_eth_dev *dev,
7974          struct rte_eth_l2_tunnel_conf *l2_tunnel,
7975          uint32_t mask,
7976          uint8_t en)
7977 {
7978         int ret = 0;
7979
7980         if (l2_tunnel == NULL)
7981                 return -EINVAL;
7982
7983         ret = -EINVAL;
7984         if (mask & ETH_L2_TUNNEL_ENABLE_MASK) {
7985                 if (en)
7986                         ret = ixgbe_dev_l2_tunnel_enable(
7987                                 dev,
7988                                 l2_tunnel->l2_tunnel_type);
7989                 else
7990                         ret = ixgbe_dev_l2_tunnel_disable(
7991                                 dev,
7992                                 l2_tunnel->l2_tunnel_type);
7993         }
7994
7995         if (mask & ETH_L2_TUNNEL_INSERTION_MASK) {
7996                 if (en)
7997                         ret = ixgbe_dev_l2_tunnel_insertion_enable(
7998                                 dev,
7999                                 l2_tunnel);
8000                 else
8001                         ret = ixgbe_dev_l2_tunnel_insertion_disable(
8002                                 dev,
8003                                 l2_tunnel);
8004         }
8005
8006         if (mask & ETH_L2_TUNNEL_STRIPPING_MASK) {
8007                 if (en)
8008                         ret = ixgbe_dev_l2_tunnel_stripping_enable(
8009                                 dev,
8010                                 l2_tunnel->l2_tunnel_type);
8011                 else
8012                         ret = ixgbe_dev_l2_tunnel_stripping_disable(
8013                                 dev,
8014                                 l2_tunnel->l2_tunnel_type);
8015         }
8016
8017         if (mask & ETH_L2_TUNNEL_FORWARDING_MASK) {
8018                 if (en)
8019                         ret = ixgbe_dev_l2_tunnel_forwarding_enable(
8020                                 dev,
8021                                 l2_tunnel->l2_tunnel_type);
8022                 else
8023                         ret = ixgbe_dev_l2_tunnel_forwarding_disable(
8024                                 dev,
8025                                 l2_tunnel->l2_tunnel_type);
8026         }
8027
8028         return ret;
8029 }
8030
8031 static int
8032 ixgbe_update_vxlan_port(struct ixgbe_hw *hw,
8033                         uint16_t port)
8034 {
8035         IXGBE_WRITE_REG(hw, IXGBE_VXLANCTRL, port);
8036         IXGBE_WRITE_FLUSH(hw);
8037
8038         return 0;
8039 }
8040
8041 /* There's only one register for VxLAN UDP port.
8042  * So, we cannot add several ports. Will update it.
8043  */
8044 static int
8045 ixgbe_add_vxlan_port(struct ixgbe_hw *hw,
8046                      uint16_t port)
8047 {
8048         if (port == 0) {
8049                 PMD_DRV_LOG(ERR, "Add VxLAN port 0 is not allowed.");
8050                 return -EINVAL;
8051         }
8052
8053         return ixgbe_update_vxlan_port(hw, port);
8054 }
8055
8056 /* We cannot delete the VxLAN port. For there's a register for VxLAN
8057  * UDP port, it must have a value.
8058  * So, will reset it to the original value 0.
8059  */
8060 static int
8061 ixgbe_del_vxlan_port(struct ixgbe_hw *hw,
8062                      uint16_t port)
8063 {
8064         uint16_t cur_port;
8065
8066         cur_port = (uint16_t)IXGBE_READ_REG(hw, IXGBE_VXLANCTRL);
8067
8068         if (cur_port != port) {
8069                 PMD_DRV_LOG(ERR, "Port %u does not exist.", port);
8070                 return -EINVAL;
8071         }
8072
8073         return ixgbe_update_vxlan_port(hw, 0);
8074 }
8075
8076 /* Add UDP tunneling port */
8077 static int
8078 ixgbe_dev_udp_tunnel_port_add(struct rte_eth_dev *dev,
8079                               struct rte_eth_udp_tunnel *udp_tunnel)
8080 {
8081         int ret = 0;
8082         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
8083
8084         if (hw->mac.type != ixgbe_mac_X550 &&
8085             hw->mac.type != ixgbe_mac_X550EM_x &&
8086             hw->mac.type != ixgbe_mac_X550EM_a) {
8087                 return -ENOTSUP;
8088         }
8089
8090         if (udp_tunnel == NULL)
8091                 return -EINVAL;
8092
8093         switch (udp_tunnel->prot_type) {
8094         case RTE_TUNNEL_TYPE_VXLAN:
8095                 ret = ixgbe_add_vxlan_port(hw, udp_tunnel->udp_port);
8096                 break;
8097
8098         case RTE_TUNNEL_TYPE_GENEVE:
8099         case RTE_TUNNEL_TYPE_TEREDO:
8100                 PMD_DRV_LOG(ERR, "Tunnel type is not supported now.");
8101                 ret = -EINVAL;
8102                 break;
8103
8104         default:
8105                 PMD_DRV_LOG(ERR, "Invalid tunnel type");
8106                 ret = -EINVAL;
8107                 break;
8108         }
8109
8110         return ret;
8111 }
8112
8113 /* Remove UDP tunneling port */
8114 static int
8115 ixgbe_dev_udp_tunnel_port_del(struct rte_eth_dev *dev,
8116                               struct rte_eth_udp_tunnel *udp_tunnel)
8117 {
8118         int ret = 0;
8119         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
8120
8121         if (hw->mac.type != ixgbe_mac_X550 &&
8122             hw->mac.type != ixgbe_mac_X550EM_x &&
8123             hw->mac.type != ixgbe_mac_X550EM_a) {
8124                 return -ENOTSUP;
8125         }
8126
8127         if (udp_tunnel == NULL)
8128                 return -EINVAL;
8129
8130         switch (udp_tunnel->prot_type) {
8131         case RTE_TUNNEL_TYPE_VXLAN:
8132                 ret = ixgbe_del_vxlan_port(hw, udp_tunnel->udp_port);
8133                 break;
8134         case RTE_TUNNEL_TYPE_GENEVE:
8135         case RTE_TUNNEL_TYPE_TEREDO:
8136                 PMD_DRV_LOG(ERR, "Tunnel type is not supported now.");
8137                 ret = -EINVAL;
8138                 break;
8139         default:
8140                 PMD_DRV_LOG(ERR, "Invalid tunnel type");
8141                 ret = -EINVAL;
8142                 break;
8143         }
8144
8145         return ret;
8146 }
8147
8148 static void
8149 ixgbevf_dev_allmulticast_enable(struct rte_eth_dev *dev)
8150 {
8151         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
8152
8153         hw->mac.ops.update_xcast_mode(hw, IXGBEVF_XCAST_MODE_ALLMULTI);
8154 }
8155
8156 static void
8157 ixgbevf_dev_allmulticast_disable(struct rte_eth_dev *dev)
8158 {
8159         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
8160
8161         hw->mac.ops.update_xcast_mode(hw, IXGBEVF_XCAST_MODE_MULTI);
8162 }
8163
8164 static void ixgbevf_mbx_process(struct rte_eth_dev *dev)
8165 {
8166         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
8167         u32 in_msg = 0;
8168
8169         if (ixgbe_read_mbx(hw, &in_msg, 1, 0))
8170                 return;
8171
8172         /* PF reset VF event */
8173         if (in_msg == IXGBE_PF_CONTROL_MSG)
8174                 _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_RESET,
8175                                               NULL, NULL);
8176 }
8177
8178 static int
8179 ixgbevf_dev_interrupt_get_status(struct rte_eth_dev *dev)
8180 {
8181         uint32_t eicr;
8182         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
8183         struct ixgbe_interrupt *intr =
8184                 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
8185         ixgbevf_intr_disable(hw);
8186
8187         /* read-on-clear nic registers here */
8188         eicr = IXGBE_READ_REG(hw, IXGBE_VTEICR);
8189         intr->flags = 0;
8190
8191         /* only one misc vector supported - mailbox */
8192         eicr &= IXGBE_VTEICR_MASK;
8193         if (eicr == IXGBE_MISC_VEC_ID)
8194                 intr->flags |= IXGBE_FLAG_MAILBOX;
8195
8196         return 0;
8197 }
8198
8199 static int
8200 ixgbevf_dev_interrupt_action(struct rte_eth_dev *dev)
8201 {
8202         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
8203         struct ixgbe_interrupt *intr =
8204                 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
8205
8206         if (intr->flags & IXGBE_FLAG_MAILBOX) {
8207                 ixgbevf_mbx_process(dev);
8208                 intr->flags &= ~IXGBE_FLAG_MAILBOX;
8209         }
8210
8211         ixgbevf_intr_enable(hw);
8212
8213         return 0;
8214 }
8215
8216 static void
8217 ixgbevf_dev_interrupt_handler(void *param)
8218 {
8219         struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
8220
8221         ixgbevf_dev_interrupt_get_status(dev);
8222         ixgbevf_dev_interrupt_action(dev);
8223 }
8224
8225 /**
8226  *  ixgbe_disable_sec_tx_path_generic - Stops the transmit data path
8227  *  @hw: pointer to hardware structure
8228  *
8229  *  Stops the transmit data path and waits for the HW to internally empty
8230  *  the Tx security block
8231  **/
8232 int ixgbe_disable_sec_tx_path_generic(struct ixgbe_hw *hw)
8233 {
8234 #define IXGBE_MAX_SECTX_POLL 40
8235
8236         int i;
8237         int sectxreg;
8238
8239         sectxreg = IXGBE_READ_REG(hw, IXGBE_SECTXCTRL);
8240         sectxreg |= IXGBE_SECTXCTRL_TX_DIS;
8241         IXGBE_WRITE_REG(hw, IXGBE_SECTXCTRL, sectxreg);
8242         for (i = 0; i < IXGBE_MAX_SECTX_POLL; i++) {
8243                 sectxreg = IXGBE_READ_REG(hw, IXGBE_SECTXSTAT);
8244                 if (sectxreg & IXGBE_SECTXSTAT_SECTX_RDY)
8245                         break;
8246                 /* Use interrupt-safe sleep just in case */
8247                 usec_delay(1000);
8248         }
8249
8250         /* For informational purposes only */
8251         if (i >= IXGBE_MAX_SECTX_POLL)
8252                 PMD_DRV_LOG(DEBUG, "Tx unit being enabled before security "
8253                          "path fully disabled.  Continuing with init.");
8254
8255         return IXGBE_SUCCESS;
8256 }
8257
8258 /**
8259  *  ixgbe_enable_sec_tx_path_generic - Enables the transmit data path
8260  *  @hw: pointer to hardware structure
8261  *
8262  *  Enables the transmit data path.
8263  **/
8264 int ixgbe_enable_sec_tx_path_generic(struct ixgbe_hw *hw)
8265 {
8266         uint32_t sectxreg;
8267
8268         sectxreg = IXGBE_READ_REG(hw, IXGBE_SECTXCTRL);
8269         sectxreg &= ~IXGBE_SECTXCTRL_TX_DIS;
8270         IXGBE_WRITE_REG(hw, IXGBE_SECTXCTRL, sectxreg);
8271         IXGBE_WRITE_FLUSH(hw);
8272
8273         return IXGBE_SUCCESS;
8274 }
8275
8276 /* restore n-tuple filter */
8277 static inline void
8278 ixgbe_ntuple_filter_restore(struct rte_eth_dev *dev)
8279 {
8280         struct ixgbe_filter_info *filter_info =
8281                 IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
8282         struct ixgbe_5tuple_filter *node;
8283
8284         TAILQ_FOREACH(node, &filter_info->fivetuple_list, entries) {
8285                 ixgbe_inject_5tuple_filter(dev, node);
8286         }
8287 }
8288
8289 /* restore ethernet type filter */
8290 static inline void
8291 ixgbe_ethertype_filter_restore(struct rte_eth_dev *dev)
8292 {
8293         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
8294         struct ixgbe_filter_info *filter_info =
8295                 IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
8296         int i;
8297
8298         for (i = 0; i < IXGBE_MAX_ETQF_FILTERS; i++) {
8299                 if (filter_info->ethertype_mask & (1 << i)) {
8300                         IXGBE_WRITE_REG(hw, IXGBE_ETQF(i),
8301                                         filter_info->ethertype_filters[i].etqf);
8302                         IXGBE_WRITE_REG(hw, IXGBE_ETQS(i),
8303                                         filter_info->ethertype_filters[i].etqs);
8304                         IXGBE_WRITE_FLUSH(hw);
8305                 }
8306         }
8307 }
8308
8309 /* restore SYN filter */
8310 static inline void
8311 ixgbe_syn_filter_restore(struct rte_eth_dev *dev)
8312 {
8313         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
8314         struct ixgbe_filter_info *filter_info =
8315                 IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
8316         uint32_t synqf;
8317
8318         synqf = filter_info->syn_info;
8319
8320         if (synqf & IXGBE_SYN_FILTER_ENABLE) {
8321                 IXGBE_WRITE_REG(hw, IXGBE_SYNQF, synqf);
8322                 IXGBE_WRITE_FLUSH(hw);
8323         }
8324 }
8325
8326 /* restore L2 tunnel filter */
8327 static inline void
8328 ixgbe_l2_tn_filter_restore(struct rte_eth_dev *dev)
8329 {
8330         struct ixgbe_l2_tn_info *l2_tn_info =
8331                 IXGBE_DEV_PRIVATE_TO_L2_TN_INFO(dev->data->dev_private);
8332         struct ixgbe_l2_tn_filter *node;
8333         struct rte_eth_l2_tunnel_conf l2_tn_conf;
8334
8335         TAILQ_FOREACH(node, &l2_tn_info->l2_tn_list, entries) {
8336                 l2_tn_conf.l2_tunnel_type = node->key.l2_tn_type;
8337                 l2_tn_conf.tunnel_id      = node->key.tn_id;
8338                 l2_tn_conf.pool           = node->pool;
8339                 (void)ixgbe_dev_l2_tunnel_filter_add(dev, &l2_tn_conf, TRUE);
8340         }
8341 }
8342
8343 static int
8344 ixgbe_filter_restore(struct rte_eth_dev *dev)
8345 {
8346         ixgbe_ntuple_filter_restore(dev);
8347         ixgbe_ethertype_filter_restore(dev);
8348         ixgbe_syn_filter_restore(dev);
8349         ixgbe_fdir_filter_restore(dev);
8350         ixgbe_l2_tn_filter_restore(dev);
8351
8352         return 0;
8353 }
8354
8355 static void
8356 ixgbe_l2_tunnel_conf(struct rte_eth_dev *dev)
8357 {
8358         struct ixgbe_l2_tn_info *l2_tn_info =
8359                 IXGBE_DEV_PRIVATE_TO_L2_TN_INFO(dev->data->dev_private);
8360         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
8361
8362         if (l2_tn_info->e_tag_en)
8363                 (void)ixgbe_e_tag_enable(hw);
8364
8365         if (l2_tn_info->e_tag_fwd_en)
8366                 (void)ixgbe_e_tag_forwarding_en_dis(dev, 1);
8367
8368         (void)ixgbe_update_e_tag_eth_type(hw, l2_tn_info->e_tag_ether_type);
8369 }
8370
8371 /* remove all the n-tuple filters */
8372 void
8373 ixgbe_clear_all_ntuple_filter(struct rte_eth_dev *dev)
8374 {
8375         struct ixgbe_filter_info *filter_info =
8376                 IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
8377         struct ixgbe_5tuple_filter *p_5tuple;
8378
8379         while ((p_5tuple = TAILQ_FIRST(&filter_info->fivetuple_list)))
8380                 ixgbe_remove_5tuple_filter(dev, p_5tuple);
8381 }
8382
8383 /* remove all the ether type filters */
8384 void
8385 ixgbe_clear_all_ethertype_filter(struct rte_eth_dev *dev)
8386 {
8387         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
8388         struct ixgbe_filter_info *filter_info =
8389                 IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
8390         int i;
8391
8392         for (i = 0; i < IXGBE_MAX_ETQF_FILTERS; i++) {
8393                 if (filter_info->ethertype_mask & (1 << i) &&
8394                     !filter_info->ethertype_filters[i].conf) {
8395                         (void)ixgbe_ethertype_filter_remove(filter_info,
8396                                                             (uint8_t)i);
8397                         IXGBE_WRITE_REG(hw, IXGBE_ETQF(i), 0);
8398                         IXGBE_WRITE_REG(hw, IXGBE_ETQS(i), 0);
8399                         IXGBE_WRITE_FLUSH(hw);
8400                 }
8401         }
8402 }
8403
8404 /* remove the SYN filter */
8405 void
8406 ixgbe_clear_syn_filter(struct rte_eth_dev *dev)
8407 {
8408         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
8409         struct ixgbe_filter_info *filter_info =
8410                 IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
8411
8412         if (filter_info->syn_info & IXGBE_SYN_FILTER_ENABLE) {
8413                 filter_info->syn_info = 0;
8414
8415                 IXGBE_WRITE_REG(hw, IXGBE_SYNQF, 0);
8416                 IXGBE_WRITE_FLUSH(hw);
8417         }
8418 }
8419
8420 /* remove all the L2 tunnel filters */
8421 int
8422 ixgbe_clear_all_l2_tn_filter(struct rte_eth_dev *dev)
8423 {
8424         struct ixgbe_l2_tn_info *l2_tn_info =
8425                 IXGBE_DEV_PRIVATE_TO_L2_TN_INFO(dev->data->dev_private);
8426         struct ixgbe_l2_tn_filter *l2_tn_filter;
8427         struct rte_eth_l2_tunnel_conf l2_tn_conf;
8428         int ret = 0;
8429
8430         while ((l2_tn_filter = TAILQ_FIRST(&l2_tn_info->l2_tn_list))) {
8431                 l2_tn_conf.l2_tunnel_type = l2_tn_filter->key.l2_tn_type;
8432                 l2_tn_conf.tunnel_id      = l2_tn_filter->key.tn_id;
8433                 l2_tn_conf.pool           = l2_tn_filter->pool;
8434                 ret = ixgbe_dev_l2_tunnel_filter_del(dev, &l2_tn_conf);
8435                 if (ret < 0)
8436                         return ret;
8437         }
8438
8439         return 0;
8440 }
8441
8442 RTE_PMD_REGISTER_PCI(net_ixgbe, rte_ixgbe_pmd);
8443 RTE_PMD_REGISTER_PCI_TABLE(net_ixgbe, pci_id_ixgbe_map);
8444 RTE_PMD_REGISTER_KMOD_DEP(net_ixgbe, "* igb_uio | uio_pci_generic | vfio-pci");
8445 RTE_PMD_REGISTER_PCI(net_ixgbe_vf, rte_ixgbevf_pmd);
8446 RTE_PMD_REGISTER_PCI_TABLE(net_ixgbe_vf, pci_id_ixgbevf_map);
8447 RTE_PMD_REGISTER_KMOD_DEP(net_ixgbe_vf, "* igb_uio | vfio-pci");