net/ixgbe: improve link state check on VF
[dpdk.git] / drivers / net / ixgbe / ixgbe_ethdev.c
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright(c) 2010-2017 Intel Corporation. All rights reserved.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33
34 #include <sys/queue.h>
35 #include <stdio.h>
36 #include <errno.h>
37 #include <stdint.h>
38 #include <string.h>
39 #include <unistd.h>
40 #include <stdarg.h>
41 #include <inttypes.h>
42 #include <netinet/in.h>
43 #include <rte_byteorder.h>
44 #include <rte_common.h>
45 #include <rte_cycles.h>
46
47 #include <rte_interrupts.h>
48 #include <rte_log.h>
49 #include <rte_debug.h>
50 #include <rte_pci.h>
51 #include <rte_atomic.h>
52 #include <rte_branch_prediction.h>
53 #include <rte_memory.h>
54 #include <rte_memzone.h>
55 #include <rte_eal.h>
56 #include <rte_alarm.h>
57 #include <rte_ether.h>
58 #include <rte_ethdev.h>
59 #include <rte_ethdev_pci.h>
60 #include <rte_atomic.h>
61 #include <rte_malloc.h>
62 #include <rte_random.h>
63 #include <rte_dev.h>
64 #include <rte_hash_crc.h>
65
66 #include "ixgbe_logs.h"
67 #include "base/ixgbe_api.h"
68 #include "base/ixgbe_vf.h"
69 #include "base/ixgbe_common.h"
70 #include "ixgbe_ethdev.h"
71 #include "ixgbe_bypass.h"
72 #include "ixgbe_rxtx.h"
73 #include "base/ixgbe_type.h"
74 #include "base/ixgbe_phy.h"
75 #include "ixgbe_regs.h"
76
77 /*
78  * High threshold controlling when to start sending XOFF frames. Must be at
79  * least 8 bytes less than receive packet buffer size. This value is in units
80  * of 1024 bytes.
81  */
82 #define IXGBE_FC_HI    0x80
83
84 /*
85  * Low threshold controlling when to start sending XON frames. This value is
86  * in units of 1024 bytes.
87  */
88 #define IXGBE_FC_LO    0x40
89
90 /* Default minimum inter-interrupt interval for EITR configuration */
91 #define IXGBE_MIN_INTER_INTERRUPT_INTERVAL_DEFAULT    0x79E
92
93 /* Timer value included in XOFF frames. */
94 #define IXGBE_FC_PAUSE 0x680
95
96 #define IXGBE_LINK_DOWN_CHECK_TIMEOUT 4000 /* ms */
97 #define IXGBE_LINK_UP_CHECK_TIMEOUT   1000 /* ms */
98 #define IXGBE_VMDQ_NUM_UC_MAC         4096 /* Maximum nb. of UC MAC addr. */
99
100 #define IXGBE_MMW_SIZE_DEFAULT        0x4
101 #define IXGBE_MMW_SIZE_JUMBO_FRAME    0x14
102 #define IXGBE_MAX_RING_DESC           4096 /* replicate define from rxtx */
103
104 /*
105  *  Default values for RX/TX configuration
106  */
107 #define IXGBE_DEFAULT_RX_FREE_THRESH  32
108 #define IXGBE_DEFAULT_RX_PTHRESH      8
109 #define IXGBE_DEFAULT_RX_HTHRESH      8
110 #define IXGBE_DEFAULT_RX_WTHRESH      0
111
112 #define IXGBE_DEFAULT_TX_FREE_THRESH  32
113 #define IXGBE_DEFAULT_TX_PTHRESH      32
114 #define IXGBE_DEFAULT_TX_HTHRESH      0
115 #define IXGBE_DEFAULT_TX_WTHRESH      0
116 #define IXGBE_DEFAULT_TX_RSBIT_THRESH 32
117
118 /* Bit shift and mask */
119 #define IXGBE_4_BIT_WIDTH  (CHAR_BIT / 2)
120 #define IXGBE_4_BIT_MASK   RTE_LEN2MASK(IXGBE_4_BIT_WIDTH, uint8_t)
121 #define IXGBE_8_BIT_WIDTH  CHAR_BIT
122 #define IXGBE_8_BIT_MASK   UINT8_MAX
123
124 #define IXGBEVF_PMD_NAME "rte_ixgbevf_pmd" /* PMD name */
125
126 #define IXGBE_QUEUE_STAT_COUNTERS (sizeof(hw_stats->qprc) / sizeof(hw_stats->qprc[0]))
127
128 #define IXGBE_HKEY_MAX_INDEX 10
129
130 /* Additional timesync values. */
131 #define NSEC_PER_SEC             1000000000L
132 #define IXGBE_INCVAL_10GB        0x66666666
133 #define IXGBE_INCVAL_1GB         0x40000000
134 #define IXGBE_INCVAL_100         0x50000000
135 #define IXGBE_INCVAL_SHIFT_10GB  28
136 #define IXGBE_INCVAL_SHIFT_1GB   24
137 #define IXGBE_INCVAL_SHIFT_100   21
138 #define IXGBE_INCVAL_SHIFT_82599 7
139 #define IXGBE_INCPER_SHIFT_82599 24
140
141 #define IXGBE_CYCLECOUNTER_MASK   0xffffffffffffffffULL
142
143 #define IXGBE_VT_CTL_POOLING_MODE_MASK         0x00030000
144 #define IXGBE_VT_CTL_POOLING_MODE_ETAG         0x00010000
145 #define DEFAULT_ETAG_ETYPE                     0x893f
146 #define IXGBE_ETAG_ETYPE                       0x00005084
147 #define IXGBE_ETAG_ETYPE_MASK                  0x0000ffff
148 #define IXGBE_ETAG_ETYPE_VALID                 0x80000000
149 #define IXGBE_RAH_ADTYPE                       0x40000000
150 #define IXGBE_RAL_ETAG_FILTER_MASK             0x00003fff
151 #define IXGBE_VMVIR_TAGA_MASK                  0x18000000
152 #define IXGBE_VMVIR_TAGA_ETAG_INSERT           0x08000000
153 #define IXGBE_VMTIR(_i) (0x00017000 + ((_i) * 4)) /* 64 of these (0-63) */
154 #define IXGBE_QDE_STRIP_TAG                    0x00000004
155 #define IXGBE_VTEICR_MASK                      0x07
156
157 #define IXGBE_EXVET_VET_EXT_SHIFT              16
158 #define IXGBE_DMATXCTL_VT_MASK                 0xFFFF0000
159
160 static int eth_ixgbe_dev_init(struct rte_eth_dev *eth_dev);
161 static int eth_ixgbe_dev_uninit(struct rte_eth_dev *eth_dev);
162 static int ixgbe_fdir_filter_init(struct rte_eth_dev *eth_dev);
163 static int ixgbe_fdir_filter_uninit(struct rte_eth_dev *eth_dev);
164 static int ixgbe_l2_tn_filter_init(struct rte_eth_dev *eth_dev);
165 static int ixgbe_l2_tn_filter_uninit(struct rte_eth_dev *eth_dev);
166 static int ixgbe_ntuple_filter_uninit(struct rte_eth_dev *eth_dev);
167 static int  ixgbe_dev_configure(struct rte_eth_dev *dev);
168 static int  ixgbe_dev_start(struct rte_eth_dev *dev);
169 static void ixgbe_dev_stop(struct rte_eth_dev *dev);
170 static int  ixgbe_dev_set_link_up(struct rte_eth_dev *dev);
171 static int  ixgbe_dev_set_link_down(struct rte_eth_dev *dev);
172 static void ixgbe_dev_close(struct rte_eth_dev *dev);
173 static void ixgbe_dev_promiscuous_enable(struct rte_eth_dev *dev);
174 static void ixgbe_dev_promiscuous_disable(struct rte_eth_dev *dev);
175 static void ixgbe_dev_allmulticast_enable(struct rte_eth_dev *dev);
176 static void ixgbe_dev_allmulticast_disable(struct rte_eth_dev *dev);
177 static int ixgbe_dev_link_update(struct rte_eth_dev *dev,
178                                 int wait_to_complete);
179 static void ixgbe_dev_stats_get(struct rte_eth_dev *dev,
180                                 struct rte_eth_stats *stats);
181 static int ixgbe_dev_xstats_get(struct rte_eth_dev *dev,
182                                 struct rte_eth_xstat *xstats, unsigned n);
183 static int ixgbevf_dev_xstats_get(struct rte_eth_dev *dev,
184                                   struct rte_eth_xstat *xstats, unsigned n);
185 static int
186 ixgbe_dev_xstats_get_by_id(struct rte_eth_dev *dev, const uint64_t *ids,
187                 uint64_t *values, unsigned int n);
188 static void ixgbe_dev_stats_reset(struct rte_eth_dev *dev);
189 static void ixgbe_dev_xstats_reset(struct rte_eth_dev *dev);
190 static int ixgbe_dev_xstats_get_names(struct rte_eth_dev *dev,
191         struct rte_eth_xstat_name *xstats_names,
192         unsigned int size);
193 static int ixgbevf_dev_xstats_get_names(struct rte_eth_dev *dev,
194         struct rte_eth_xstat_name *xstats_names, unsigned limit);
195 static int ixgbe_dev_xstats_get_names_by_id(
196         struct rte_eth_dev *dev,
197         struct rte_eth_xstat_name *xstats_names,
198         const uint64_t *ids,
199         unsigned int limit);
200 static int ixgbe_dev_queue_stats_mapping_set(struct rte_eth_dev *eth_dev,
201                                              uint16_t queue_id,
202                                              uint8_t stat_idx,
203                                              uint8_t is_rx);
204 static int ixgbe_fw_version_get(struct rte_eth_dev *dev, char *fw_version,
205                                  size_t fw_size);
206 static void ixgbe_dev_info_get(struct rte_eth_dev *dev,
207                                struct rte_eth_dev_info *dev_info);
208 static const uint32_t *ixgbe_dev_supported_ptypes_get(struct rte_eth_dev *dev);
209 static void ixgbevf_dev_info_get(struct rte_eth_dev *dev,
210                                  struct rte_eth_dev_info *dev_info);
211 static int ixgbe_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu);
212
213 static int ixgbe_vlan_filter_set(struct rte_eth_dev *dev,
214                 uint16_t vlan_id, int on);
215 static int ixgbe_vlan_tpid_set(struct rte_eth_dev *dev,
216                                enum rte_vlan_type vlan_type,
217                                uint16_t tpid_id);
218 static void ixgbe_vlan_hw_strip_bitmap_set(struct rte_eth_dev *dev,
219                 uint16_t queue, bool on);
220 static void ixgbe_vlan_strip_queue_set(struct rte_eth_dev *dev, uint16_t queue,
221                 int on);
222 static void ixgbe_vlan_offload_set(struct rte_eth_dev *dev, int mask);
223 static void ixgbe_vlan_hw_strip_enable(struct rte_eth_dev *dev, uint16_t queue);
224 static void ixgbe_vlan_hw_strip_disable(struct rte_eth_dev *dev, uint16_t queue);
225 static void ixgbe_vlan_hw_extend_enable(struct rte_eth_dev *dev);
226 static void ixgbe_vlan_hw_extend_disable(struct rte_eth_dev *dev);
227
228 static int ixgbe_dev_led_on(struct rte_eth_dev *dev);
229 static int ixgbe_dev_led_off(struct rte_eth_dev *dev);
230 static int ixgbe_flow_ctrl_get(struct rte_eth_dev *dev,
231                                struct rte_eth_fc_conf *fc_conf);
232 static int ixgbe_flow_ctrl_set(struct rte_eth_dev *dev,
233                                struct rte_eth_fc_conf *fc_conf);
234 static int ixgbe_priority_flow_ctrl_set(struct rte_eth_dev *dev,
235                 struct rte_eth_pfc_conf *pfc_conf);
236 static int ixgbe_dev_rss_reta_update(struct rte_eth_dev *dev,
237                         struct rte_eth_rss_reta_entry64 *reta_conf,
238                         uint16_t reta_size);
239 static int ixgbe_dev_rss_reta_query(struct rte_eth_dev *dev,
240                         struct rte_eth_rss_reta_entry64 *reta_conf,
241                         uint16_t reta_size);
242 static void ixgbe_dev_link_status_print(struct rte_eth_dev *dev);
243 static int ixgbe_dev_lsc_interrupt_setup(struct rte_eth_dev *dev);
244 static int ixgbe_dev_macsec_interrupt_setup(struct rte_eth_dev *dev);
245 static int ixgbe_dev_rxq_interrupt_setup(struct rte_eth_dev *dev);
246 static int ixgbe_dev_interrupt_get_status(struct rte_eth_dev *dev);
247 static int ixgbe_dev_interrupt_action(struct rte_eth_dev *dev,
248                                       struct rte_intr_handle *handle);
249 static void ixgbe_dev_interrupt_handler(void *param);
250 static void ixgbe_dev_interrupt_delayed_handler(void *param);
251 static int ixgbe_add_rar(struct rte_eth_dev *dev, struct ether_addr *mac_addr,
252                          uint32_t index, uint32_t pool);
253 static void ixgbe_remove_rar(struct rte_eth_dev *dev, uint32_t index);
254 static void ixgbe_set_default_mac_addr(struct rte_eth_dev *dev,
255                                            struct ether_addr *mac_addr);
256 static void ixgbe_dcb_init(struct ixgbe_hw *hw, struct ixgbe_dcb_config *dcb_config);
257 static bool is_device_supported(struct rte_eth_dev *dev,
258                                 struct rte_pci_driver *drv);
259
260 /* For Virtual Function support */
261 static int eth_ixgbevf_dev_init(struct rte_eth_dev *eth_dev);
262 static int eth_ixgbevf_dev_uninit(struct rte_eth_dev *eth_dev);
263 static int  ixgbevf_dev_configure(struct rte_eth_dev *dev);
264 static int  ixgbevf_dev_start(struct rte_eth_dev *dev);
265 static int ixgbevf_dev_link_update(struct rte_eth_dev *dev,
266                                    int wait_to_complete);
267 static void ixgbevf_dev_stop(struct rte_eth_dev *dev);
268 static void ixgbevf_dev_close(struct rte_eth_dev *dev);
269 static void ixgbevf_intr_disable(struct ixgbe_hw *hw);
270 static void ixgbevf_intr_enable(struct ixgbe_hw *hw);
271 static void ixgbevf_dev_stats_get(struct rte_eth_dev *dev,
272                 struct rte_eth_stats *stats);
273 static void ixgbevf_dev_stats_reset(struct rte_eth_dev *dev);
274 static int ixgbevf_vlan_filter_set(struct rte_eth_dev *dev,
275                 uint16_t vlan_id, int on);
276 static void ixgbevf_vlan_strip_queue_set(struct rte_eth_dev *dev,
277                 uint16_t queue, int on);
278 static void ixgbevf_vlan_offload_set(struct rte_eth_dev *dev, int mask);
279 static void ixgbevf_set_vfta_all(struct rte_eth_dev *dev, bool on);
280 static int ixgbevf_dev_rx_queue_intr_enable(struct rte_eth_dev *dev,
281                                             uint16_t queue_id);
282 static int ixgbevf_dev_rx_queue_intr_disable(struct rte_eth_dev *dev,
283                                              uint16_t queue_id);
284 static void ixgbevf_set_ivar_map(struct ixgbe_hw *hw, int8_t direction,
285                                  uint8_t queue, uint8_t msix_vector);
286 static void ixgbevf_configure_msix(struct rte_eth_dev *dev);
287 static void ixgbevf_dev_allmulticast_enable(struct rte_eth_dev *dev);
288 static void ixgbevf_dev_allmulticast_disable(struct rte_eth_dev *dev);
289
290 /* For Eth VMDQ APIs support */
291 static int ixgbe_uc_hash_table_set(struct rte_eth_dev *dev, struct
292                 ether_addr * mac_addr, uint8_t on);
293 static int ixgbe_uc_all_hash_table_set(struct rte_eth_dev *dev, uint8_t on);
294 static int ixgbe_mirror_rule_set(struct rte_eth_dev *dev,
295                 struct rte_eth_mirror_conf *mirror_conf,
296                 uint8_t rule_id, uint8_t on);
297 static int ixgbe_mirror_rule_reset(struct rte_eth_dev *dev,
298                 uint8_t rule_id);
299 static int ixgbe_dev_rx_queue_intr_enable(struct rte_eth_dev *dev,
300                                           uint16_t queue_id);
301 static int ixgbe_dev_rx_queue_intr_disable(struct rte_eth_dev *dev,
302                                            uint16_t queue_id);
303 static void ixgbe_set_ivar_map(struct ixgbe_hw *hw, int8_t direction,
304                                uint8_t queue, uint8_t msix_vector);
305 static void ixgbe_configure_msix(struct rte_eth_dev *dev);
306
307 static int ixgbe_set_queue_rate_limit(struct rte_eth_dev *dev,
308                 uint16_t queue_idx, uint16_t tx_rate);
309
310 static int ixgbevf_add_mac_addr(struct rte_eth_dev *dev,
311                                 struct ether_addr *mac_addr,
312                                 uint32_t index, uint32_t pool);
313 static void ixgbevf_remove_mac_addr(struct rte_eth_dev *dev, uint32_t index);
314 static void ixgbevf_set_default_mac_addr(struct rte_eth_dev *dev,
315                                              struct ether_addr *mac_addr);
316 static int ixgbe_syn_filter_get(struct rte_eth_dev *dev,
317                         struct rte_eth_syn_filter *filter);
318 static int ixgbe_syn_filter_handle(struct rte_eth_dev *dev,
319                         enum rte_filter_op filter_op,
320                         void *arg);
321 static int ixgbe_add_5tuple_filter(struct rte_eth_dev *dev,
322                         struct ixgbe_5tuple_filter *filter);
323 static void ixgbe_remove_5tuple_filter(struct rte_eth_dev *dev,
324                         struct ixgbe_5tuple_filter *filter);
325 static int ixgbe_ntuple_filter_handle(struct rte_eth_dev *dev,
326                                 enum rte_filter_op filter_op,
327                                 void *arg);
328 static int ixgbe_get_ntuple_filter(struct rte_eth_dev *dev,
329                         struct rte_eth_ntuple_filter *filter);
330 static int ixgbe_ethertype_filter_handle(struct rte_eth_dev *dev,
331                                 enum rte_filter_op filter_op,
332                                 void *arg);
333 static int ixgbe_get_ethertype_filter(struct rte_eth_dev *dev,
334                         struct rte_eth_ethertype_filter *filter);
335 static int ixgbe_dev_filter_ctrl(struct rte_eth_dev *dev,
336                      enum rte_filter_type filter_type,
337                      enum rte_filter_op filter_op,
338                      void *arg);
339 static int ixgbevf_dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu);
340
341 static int ixgbe_dev_set_mc_addr_list(struct rte_eth_dev *dev,
342                                       struct ether_addr *mc_addr_set,
343                                       uint32_t nb_mc_addr);
344 static int ixgbe_dev_get_dcb_info(struct rte_eth_dev *dev,
345                                    struct rte_eth_dcb_info *dcb_info);
346
347 static int ixgbe_get_reg_length(struct rte_eth_dev *dev);
348 static int ixgbe_get_regs(struct rte_eth_dev *dev,
349                             struct rte_dev_reg_info *regs);
350 static int ixgbe_get_eeprom_length(struct rte_eth_dev *dev);
351 static int ixgbe_get_eeprom(struct rte_eth_dev *dev,
352                                 struct rte_dev_eeprom_info *eeprom);
353 static int ixgbe_set_eeprom(struct rte_eth_dev *dev,
354                                 struct rte_dev_eeprom_info *eeprom);
355
356 static int ixgbevf_get_reg_length(struct rte_eth_dev *dev);
357 static int ixgbevf_get_regs(struct rte_eth_dev *dev,
358                                 struct rte_dev_reg_info *regs);
359
360 static int ixgbe_timesync_enable(struct rte_eth_dev *dev);
361 static int ixgbe_timesync_disable(struct rte_eth_dev *dev);
362 static int ixgbe_timesync_read_rx_timestamp(struct rte_eth_dev *dev,
363                                             struct timespec *timestamp,
364                                             uint32_t flags);
365 static int ixgbe_timesync_read_tx_timestamp(struct rte_eth_dev *dev,
366                                             struct timespec *timestamp);
367 static int ixgbe_timesync_adjust_time(struct rte_eth_dev *dev, int64_t delta);
368 static int ixgbe_timesync_read_time(struct rte_eth_dev *dev,
369                                    struct timespec *timestamp);
370 static int ixgbe_timesync_write_time(struct rte_eth_dev *dev,
371                                    const struct timespec *timestamp);
372 static void ixgbevf_dev_interrupt_handler(void *param);
373
374 static int ixgbe_dev_l2_tunnel_eth_type_conf
375         (struct rte_eth_dev *dev, struct rte_eth_l2_tunnel_conf *l2_tunnel);
376 static int ixgbe_dev_l2_tunnel_offload_set
377         (struct rte_eth_dev *dev,
378          struct rte_eth_l2_tunnel_conf *l2_tunnel,
379          uint32_t mask,
380          uint8_t en);
381 static int ixgbe_dev_l2_tunnel_filter_handle(struct rte_eth_dev *dev,
382                                              enum rte_filter_op filter_op,
383                                              void *arg);
384
385 static int ixgbe_dev_udp_tunnel_port_add(struct rte_eth_dev *dev,
386                                          struct rte_eth_udp_tunnel *udp_tunnel);
387 static int ixgbe_dev_udp_tunnel_port_del(struct rte_eth_dev *dev,
388                                          struct rte_eth_udp_tunnel *udp_tunnel);
389 static int ixgbe_filter_restore(struct rte_eth_dev *dev);
390 static void ixgbe_l2_tunnel_conf(struct rte_eth_dev *dev);
391
392 /*
393  * Define VF Stats MACRO for Non "cleared on read" register
394  */
395 #define UPDATE_VF_STAT(reg, last, cur)                          \
396 {                                                               \
397         uint32_t latest = IXGBE_READ_REG(hw, reg);              \
398         cur += (latest - last) & UINT_MAX;                      \
399         last = latest;                                          \
400 }
401
402 #define UPDATE_VF_STAT_36BIT(lsb, msb, last, cur)                \
403 {                                                                \
404         u64 new_lsb = IXGBE_READ_REG(hw, lsb);                   \
405         u64 new_msb = IXGBE_READ_REG(hw, msb);                   \
406         u64 latest = ((new_msb << 32) | new_lsb);                \
407         cur += (0x1000000000LL + latest - last) & 0xFFFFFFFFFLL; \
408         last = latest;                                           \
409 }
410
411 #define IXGBE_SET_HWSTRIP(h, q) do {\
412                 uint32_t idx = (q) / (sizeof((h)->bitmap[0]) * NBBY); \
413                 uint32_t bit = (q) % (sizeof((h)->bitmap[0]) * NBBY); \
414                 (h)->bitmap[idx] |= 1 << bit;\
415         } while (0)
416
417 #define IXGBE_CLEAR_HWSTRIP(h, q) do {\
418                 uint32_t idx = (q) / (sizeof((h)->bitmap[0]) * NBBY); \
419                 uint32_t bit = (q) % (sizeof((h)->bitmap[0]) * NBBY); \
420                 (h)->bitmap[idx] &= ~(1 << bit);\
421         } while (0)
422
423 #define IXGBE_GET_HWSTRIP(h, q, r) do {\
424                 uint32_t idx = (q) / (sizeof((h)->bitmap[0]) * NBBY); \
425                 uint32_t bit = (q) % (sizeof((h)->bitmap[0]) * NBBY); \
426                 (r) = (h)->bitmap[idx] >> bit & 1;\
427         } while (0)
428
429 /*
430  * The set of PCI devices this driver supports
431  */
432 static const struct rte_pci_id pci_id_ixgbe_map[] = {
433         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598) },
434         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_BX) },
435         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_DUAL_PORT) },
436         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_SINGLE_PORT) },
437         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT) },
438         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT2) },
439         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_SFP_LOM) },
440         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_CX4) },
441         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_CX4_DUAL_PORT) },
442         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_DA_DUAL_PORT) },
443         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM) },
444         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_XF_LR) },
445         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4) },
446         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4_MEZZ) },
447         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KR) },
448         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_COMBO_BACKPLANE) },
449         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_SUBDEV_ID_82599_KX4_KR_MEZZ) },
450         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_CX4) },
451         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP) },
452         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_SUBDEV_ID_82599_SFP) },
453         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_SUBDEV_ID_82599_RNDC) },
454         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_SUBDEV_ID_82599_560FLR) },
455         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_SUBDEV_ID_82599_ECNA_DP) },
456         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_BACKPLANE_FCOE) },
457         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_FCOE) },
458         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_EM) },
459         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_SF2) },
460         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_SF_QP) },
461         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_QSFP_SF_QP) },
462         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599EN_SFP) },
463         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_XAUI_LOM) },
464         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_T3_LOM) },
465         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_LS) },
466         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T) },
467         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T1) },
468         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_SFP) },
469         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_10G_T) },
470         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_1G_T) },
471         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550T) },
472         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550T1) },
473         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_KR) },
474         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_KR_L) },
475         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SFP_N) },
476         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SGMII) },
477         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SGMII_L) },
478         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_10G_T) },
479         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_QSFP) },
480         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_QSFP_N) },
481         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SFP) },
482         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_1G_T) },
483         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_1G_T_L) },
484         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_KX4) },
485         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_KR) },
486 #ifdef RTE_LIBRTE_IXGBE_BYPASS
487         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_BYPASS) },
488 #endif
489         { .vendor_id = 0, /* sentinel */ },
490 };
491
492 /*
493  * The set of PCI devices this driver supports (for 82599 VF)
494  */
495 static const struct rte_pci_id pci_id_ixgbevf_map[] = {
496         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_VF) },
497         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_VF_HV) },
498         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540_VF) },
499         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540_VF_HV) },
500         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550_VF_HV) },
501         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550_VF) },
502         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_VF) },
503         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_VF_HV) },
504         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_VF) },
505         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_VF_HV) },
506         { .vendor_id = 0, /* sentinel */ },
507 };
508
509 static const struct rte_eth_desc_lim rx_desc_lim = {
510         .nb_max = IXGBE_MAX_RING_DESC,
511         .nb_min = IXGBE_MIN_RING_DESC,
512         .nb_align = IXGBE_RXD_ALIGN,
513 };
514
515 static const struct rte_eth_desc_lim tx_desc_lim = {
516         .nb_max = IXGBE_MAX_RING_DESC,
517         .nb_min = IXGBE_MIN_RING_DESC,
518         .nb_align = IXGBE_TXD_ALIGN,
519         .nb_seg_max = IXGBE_TX_MAX_SEG,
520         .nb_mtu_seg_max = IXGBE_TX_MAX_SEG,
521 };
522
523 static const struct eth_dev_ops ixgbe_eth_dev_ops = {
524         .dev_configure        = ixgbe_dev_configure,
525         .dev_start            = ixgbe_dev_start,
526         .dev_stop             = ixgbe_dev_stop,
527         .dev_set_link_up    = ixgbe_dev_set_link_up,
528         .dev_set_link_down  = ixgbe_dev_set_link_down,
529         .dev_close            = ixgbe_dev_close,
530         .promiscuous_enable   = ixgbe_dev_promiscuous_enable,
531         .promiscuous_disable  = ixgbe_dev_promiscuous_disable,
532         .allmulticast_enable  = ixgbe_dev_allmulticast_enable,
533         .allmulticast_disable = ixgbe_dev_allmulticast_disable,
534         .link_update          = ixgbe_dev_link_update,
535         .stats_get            = ixgbe_dev_stats_get,
536         .xstats_get           = ixgbe_dev_xstats_get,
537         .xstats_get_by_id     = ixgbe_dev_xstats_get_by_id,
538         .stats_reset          = ixgbe_dev_stats_reset,
539         .xstats_reset         = ixgbe_dev_xstats_reset,
540         .xstats_get_names     = ixgbe_dev_xstats_get_names,
541         .xstats_get_names_by_id = ixgbe_dev_xstats_get_names_by_id,
542         .queue_stats_mapping_set = ixgbe_dev_queue_stats_mapping_set,
543         .fw_version_get       = ixgbe_fw_version_get,
544         .dev_infos_get        = ixgbe_dev_info_get,
545         .dev_supported_ptypes_get = ixgbe_dev_supported_ptypes_get,
546         .mtu_set              = ixgbe_dev_mtu_set,
547         .vlan_filter_set      = ixgbe_vlan_filter_set,
548         .vlan_tpid_set        = ixgbe_vlan_tpid_set,
549         .vlan_offload_set     = ixgbe_vlan_offload_set,
550         .vlan_strip_queue_set = ixgbe_vlan_strip_queue_set,
551         .rx_queue_start       = ixgbe_dev_rx_queue_start,
552         .rx_queue_stop        = ixgbe_dev_rx_queue_stop,
553         .tx_queue_start       = ixgbe_dev_tx_queue_start,
554         .tx_queue_stop        = ixgbe_dev_tx_queue_stop,
555         .rx_queue_setup       = ixgbe_dev_rx_queue_setup,
556         .rx_queue_intr_enable = ixgbe_dev_rx_queue_intr_enable,
557         .rx_queue_intr_disable = ixgbe_dev_rx_queue_intr_disable,
558         .rx_queue_release     = ixgbe_dev_rx_queue_release,
559         .rx_queue_count       = ixgbe_dev_rx_queue_count,
560         .rx_descriptor_done   = ixgbe_dev_rx_descriptor_done,
561         .rx_descriptor_status = ixgbe_dev_rx_descriptor_status,
562         .tx_descriptor_status = ixgbe_dev_tx_descriptor_status,
563         .tx_queue_setup       = ixgbe_dev_tx_queue_setup,
564         .tx_queue_release     = ixgbe_dev_tx_queue_release,
565         .dev_led_on           = ixgbe_dev_led_on,
566         .dev_led_off          = ixgbe_dev_led_off,
567         .flow_ctrl_get        = ixgbe_flow_ctrl_get,
568         .flow_ctrl_set        = ixgbe_flow_ctrl_set,
569         .priority_flow_ctrl_set = ixgbe_priority_flow_ctrl_set,
570         .mac_addr_add         = ixgbe_add_rar,
571         .mac_addr_remove      = ixgbe_remove_rar,
572         .mac_addr_set         = ixgbe_set_default_mac_addr,
573         .uc_hash_table_set    = ixgbe_uc_hash_table_set,
574         .uc_all_hash_table_set  = ixgbe_uc_all_hash_table_set,
575         .mirror_rule_set      = ixgbe_mirror_rule_set,
576         .mirror_rule_reset    = ixgbe_mirror_rule_reset,
577         .set_queue_rate_limit = ixgbe_set_queue_rate_limit,
578         .reta_update          = ixgbe_dev_rss_reta_update,
579         .reta_query           = ixgbe_dev_rss_reta_query,
580         .rss_hash_update      = ixgbe_dev_rss_hash_update,
581         .rss_hash_conf_get    = ixgbe_dev_rss_hash_conf_get,
582         .filter_ctrl          = ixgbe_dev_filter_ctrl,
583         .set_mc_addr_list     = ixgbe_dev_set_mc_addr_list,
584         .rxq_info_get         = ixgbe_rxq_info_get,
585         .txq_info_get         = ixgbe_txq_info_get,
586         .timesync_enable      = ixgbe_timesync_enable,
587         .timesync_disable     = ixgbe_timesync_disable,
588         .timesync_read_rx_timestamp = ixgbe_timesync_read_rx_timestamp,
589         .timesync_read_tx_timestamp = ixgbe_timesync_read_tx_timestamp,
590         .get_reg              = ixgbe_get_regs,
591         .get_eeprom_length    = ixgbe_get_eeprom_length,
592         .get_eeprom           = ixgbe_get_eeprom,
593         .set_eeprom           = ixgbe_set_eeprom,
594         .get_dcb_info         = ixgbe_dev_get_dcb_info,
595         .timesync_adjust_time = ixgbe_timesync_adjust_time,
596         .timesync_read_time   = ixgbe_timesync_read_time,
597         .timesync_write_time  = ixgbe_timesync_write_time,
598         .l2_tunnel_eth_type_conf = ixgbe_dev_l2_tunnel_eth_type_conf,
599         .l2_tunnel_offload_set   = ixgbe_dev_l2_tunnel_offload_set,
600         .udp_tunnel_port_add  = ixgbe_dev_udp_tunnel_port_add,
601         .udp_tunnel_port_del  = ixgbe_dev_udp_tunnel_port_del,
602 };
603
604 /*
605  * dev_ops for virtual function, bare necessities for basic vf
606  * operation have been implemented
607  */
608 static const struct eth_dev_ops ixgbevf_eth_dev_ops = {
609         .dev_configure        = ixgbevf_dev_configure,
610         .dev_start            = ixgbevf_dev_start,
611         .dev_stop             = ixgbevf_dev_stop,
612         .link_update          = ixgbevf_dev_link_update,
613         .stats_get            = ixgbevf_dev_stats_get,
614         .xstats_get           = ixgbevf_dev_xstats_get,
615         .stats_reset          = ixgbevf_dev_stats_reset,
616         .xstats_reset         = ixgbevf_dev_stats_reset,
617         .xstats_get_names     = ixgbevf_dev_xstats_get_names,
618         .dev_close            = ixgbevf_dev_close,
619         .allmulticast_enable  = ixgbevf_dev_allmulticast_enable,
620         .allmulticast_disable = ixgbevf_dev_allmulticast_disable,
621         .dev_infos_get        = ixgbevf_dev_info_get,
622         .dev_supported_ptypes_get = ixgbe_dev_supported_ptypes_get,
623         .mtu_set              = ixgbevf_dev_set_mtu,
624         .vlan_filter_set      = ixgbevf_vlan_filter_set,
625         .vlan_strip_queue_set = ixgbevf_vlan_strip_queue_set,
626         .vlan_offload_set     = ixgbevf_vlan_offload_set,
627         .rx_queue_setup       = ixgbe_dev_rx_queue_setup,
628         .rx_queue_release     = ixgbe_dev_rx_queue_release,
629         .rx_descriptor_done   = ixgbe_dev_rx_descriptor_done,
630         .rx_descriptor_status = ixgbe_dev_rx_descriptor_status,
631         .tx_descriptor_status = ixgbe_dev_tx_descriptor_status,
632         .tx_queue_setup       = ixgbe_dev_tx_queue_setup,
633         .tx_queue_release     = ixgbe_dev_tx_queue_release,
634         .rx_queue_intr_enable = ixgbevf_dev_rx_queue_intr_enable,
635         .rx_queue_intr_disable = ixgbevf_dev_rx_queue_intr_disable,
636         .mac_addr_add         = ixgbevf_add_mac_addr,
637         .mac_addr_remove      = ixgbevf_remove_mac_addr,
638         .set_mc_addr_list     = ixgbe_dev_set_mc_addr_list,
639         .rxq_info_get         = ixgbe_rxq_info_get,
640         .txq_info_get         = ixgbe_txq_info_get,
641         .mac_addr_set         = ixgbevf_set_default_mac_addr,
642         .get_reg              = ixgbevf_get_regs,
643         .reta_update          = ixgbe_dev_rss_reta_update,
644         .reta_query           = ixgbe_dev_rss_reta_query,
645         .rss_hash_update      = ixgbe_dev_rss_hash_update,
646         .rss_hash_conf_get    = ixgbe_dev_rss_hash_conf_get,
647 };
648
649 /* store statistics names and its offset in stats structure */
650 struct rte_ixgbe_xstats_name_off {
651         char name[RTE_ETH_XSTATS_NAME_SIZE];
652         unsigned offset;
653 };
654
655 static const struct rte_ixgbe_xstats_name_off rte_ixgbe_stats_strings[] = {
656         {"rx_crc_errors", offsetof(struct ixgbe_hw_stats, crcerrs)},
657         {"rx_illegal_byte_errors", offsetof(struct ixgbe_hw_stats, illerrc)},
658         {"rx_error_bytes", offsetof(struct ixgbe_hw_stats, errbc)},
659         {"mac_local_errors", offsetof(struct ixgbe_hw_stats, mlfc)},
660         {"mac_remote_errors", offsetof(struct ixgbe_hw_stats, mrfc)},
661         {"rx_length_errors", offsetof(struct ixgbe_hw_stats, rlec)},
662         {"tx_xon_packets", offsetof(struct ixgbe_hw_stats, lxontxc)},
663         {"rx_xon_packets", offsetof(struct ixgbe_hw_stats, lxonrxc)},
664         {"tx_xoff_packets", offsetof(struct ixgbe_hw_stats, lxofftxc)},
665         {"rx_xoff_packets", offsetof(struct ixgbe_hw_stats, lxoffrxc)},
666         {"rx_size_64_packets", offsetof(struct ixgbe_hw_stats, prc64)},
667         {"rx_size_65_to_127_packets", offsetof(struct ixgbe_hw_stats, prc127)},
668         {"rx_size_128_to_255_packets", offsetof(struct ixgbe_hw_stats, prc255)},
669         {"rx_size_256_to_511_packets", offsetof(struct ixgbe_hw_stats, prc511)},
670         {"rx_size_512_to_1023_packets", offsetof(struct ixgbe_hw_stats,
671                 prc1023)},
672         {"rx_size_1024_to_max_packets", offsetof(struct ixgbe_hw_stats,
673                 prc1522)},
674         {"rx_broadcast_packets", offsetof(struct ixgbe_hw_stats, bprc)},
675         {"rx_multicast_packets", offsetof(struct ixgbe_hw_stats, mprc)},
676         {"rx_fragment_errors", offsetof(struct ixgbe_hw_stats, rfc)},
677         {"rx_undersize_errors", offsetof(struct ixgbe_hw_stats, ruc)},
678         {"rx_oversize_errors", offsetof(struct ixgbe_hw_stats, roc)},
679         {"rx_jabber_errors", offsetof(struct ixgbe_hw_stats, rjc)},
680         {"rx_management_packets", offsetof(struct ixgbe_hw_stats, mngprc)},
681         {"rx_management_dropped", offsetof(struct ixgbe_hw_stats, mngpdc)},
682         {"tx_management_packets", offsetof(struct ixgbe_hw_stats, mngptc)},
683         {"rx_total_packets", offsetof(struct ixgbe_hw_stats, tpr)},
684         {"rx_total_bytes", offsetof(struct ixgbe_hw_stats, tor)},
685         {"tx_total_packets", offsetof(struct ixgbe_hw_stats, tpt)},
686         {"tx_size_64_packets", offsetof(struct ixgbe_hw_stats, ptc64)},
687         {"tx_size_65_to_127_packets", offsetof(struct ixgbe_hw_stats, ptc127)},
688         {"tx_size_128_to_255_packets", offsetof(struct ixgbe_hw_stats, ptc255)},
689         {"tx_size_256_to_511_packets", offsetof(struct ixgbe_hw_stats, ptc511)},
690         {"tx_size_512_to_1023_packets", offsetof(struct ixgbe_hw_stats,
691                 ptc1023)},
692         {"tx_size_1024_to_max_packets", offsetof(struct ixgbe_hw_stats,
693                 ptc1522)},
694         {"tx_multicast_packets", offsetof(struct ixgbe_hw_stats, mptc)},
695         {"tx_broadcast_packets", offsetof(struct ixgbe_hw_stats, bptc)},
696         {"rx_mac_short_packet_dropped", offsetof(struct ixgbe_hw_stats, mspdc)},
697         {"rx_l3_l4_xsum_error", offsetof(struct ixgbe_hw_stats, xec)},
698
699         {"flow_director_added_filters", offsetof(struct ixgbe_hw_stats,
700                 fdirustat_add)},
701         {"flow_director_removed_filters", offsetof(struct ixgbe_hw_stats,
702                 fdirustat_remove)},
703         {"flow_director_filter_add_errors", offsetof(struct ixgbe_hw_stats,
704                 fdirfstat_fadd)},
705         {"flow_director_filter_remove_errors", offsetof(struct ixgbe_hw_stats,
706                 fdirfstat_fremove)},
707         {"flow_director_matched_filters", offsetof(struct ixgbe_hw_stats,
708                 fdirmatch)},
709         {"flow_director_missed_filters", offsetof(struct ixgbe_hw_stats,
710                 fdirmiss)},
711
712         {"rx_fcoe_crc_errors", offsetof(struct ixgbe_hw_stats, fccrc)},
713         {"rx_fcoe_dropped", offsetof(struct ixgbe_hw_stats, fcoerpdc)},
714         {"rx_fcoe_mbuf_allocation_errors", offsetof(struct ixgbe_hw_stats,
715                 fclast)},
716         {"rx_fcoe_packets", offsetof(struct ixgbe_hw_stats, fcoeprc)},
717         {"tx_fcoe_packets", offsetof(struct ixgbe_hw_stats, fcoeptc)},
718         {"rx_fcoe_bytes", offsetof(struct ixgbe_hw_stats, fcoedwrc)},
719         {"tx_fcoe_bytes", offsetof(struct ixgbe_hw_stats, fcoedwtc)},
720         {"rx_fcoe_no_direct_data_placement", offsetof(struct ixgbe_hw_stats,
721                 fcoe_noddp)},
722         {"rx_fcoe_no_direct_data_placement_ext_buff",
723                 offsetof(struct ixgbe_hw_stats, fcoe_noddp_ext_buff)},
724
725         {"tx_flow_control_xon_packets", offsetof(struct ixgbe_hw_stats,
726                 lxontxc)},
727         {"rx_flow_control_xon_packets", offsetof(struct ixgbe_hw_stats,
728                 lxonrxc)},
729         {"tx_flow_control_xoff_packets", offsetof(struct ixgbe_hw_stats,
730                 lxofftxc)},
731         {"rx_flow_control_xoff_packets", offsetof(struct ixgbe_hw_stats,
732                 lxoffrxc)},
733         {"rx_total_missed_packets", offsetof(struct ixgbe_hw_stats, mpctotal)},
734 };
735
736 #define IXGBE_NB_HW_STATS (sizeof(rte_ixgbe_stats_strings) / \
737                            sizeof(rte_ixgbe_stats_strings[0]))
738
739 /* MACsec statistics */
740 static const struct rte_ixgbe_xstats_name_off rte_ixgbe_macsec_strings[] = {
741         {"out_pkts_untagged", offsetof(struct ixgbe_macsec_stats,
742                 out_pkts_untagged)},
743         {"out_pkts_encrypted", offsetof(struct ixgbe_macsec_stats,
744                 out_pkts_encrypted)},
745         {"out_pkts_protected", offsetof(struct ixgbe_macsec_stats,
746                 out_pkts_protected)},
747         {"out_octets_encrypted", offsetof(struct ixgbe_macsec_stats,
748                 out_octets_encrypted)},
749         {"out_octets_protected", offsetof(struct ixgbe_macsec_stats,
750                 out_octets_protected)},
751         {"in_pkts_untagged", offsetof(struct ixgbe_macsec_stats,
752                 in_pkts_untagged)},
753         {"in_pkts_badtag", offsetof(struct ixgbe_macsec_stats,
754                 in_pkts_badtag)},
755         {"in_pkts_nosci", offsetof(struct ixgbe_macsec_stats,
756                 in_pkts_nosci)},
757         {"in_pkts_unknownsci", offsetof(struct ixgbe_macsec_stats,
758                 in_pkts_unknownsci)},
759         {"in_octets_decrypted", offsetof(struct ixgbe_macsec_stats,
760                 in_octets_decrypted)},
761         {"in_octets_validated", offsetof(struct ixgbe_macsec_stats,
762                 in_octets_validated)},
763         {"in_pkts_unchecked", offsetof(struct ixgbe_macsec_stats,
764                 in_pkts_unchecked)},
765         {"in_pkts_delayed", offsetof(struct ixgbe_macsec_stats,
766                 in_pkts_delayed)},
767         {"in_pkts_late", offsetof(struct ixgbe_macsec_stats,
768                 in_pkts_late)},
769         {"in_pkts_ok", offsetof(struct ixgbe_macsec_stats,
770                 in_pkts_ok)},
771         {"in_pkts_invalid", offsetof(struct ixgbe_macsec_stats,
772                 in_pkts_invalid)},
773         {"in_pkts_notvalid", offsetof(struct ixgbe_macsec_stats,
774                 in_pkts_notvalid)},
775         {"in_pkts_unusedsa", offsetof(struct ixgbe_macsec_stats,
776                 in_pkts_unusedsa)},
777         {"in_pkts_notusingsa", offsetof(struct ixgbe_macsec_stats,
778                 in_pkts_notusingsa)},
779 };
780
781 #define IXGBE_NB_MACSEC_STATS (sizeof(rte_ixgbe_macsec_strings) / \
782                            sizeof(rte_ixgbe_macsec_strings[0]))
783
784 /* Per-queue statistics */
785 static const struct rte_ixgbe_xstats_name_off rte_ixgbe_rxq_strings[] = {
786         {"mbuf_allocation_errors", offsetof(struct ixgbe_hw_stats, rnbc)},
787         {"dropped", offsetof(struct ixgbe_hw_stats, mpc)},
788         {"xon_packets", offsetof(struct ixgbe_hw_stats, pxonrxc)},
789         {"xoff_packets", offsetof(struct ixgbe_hw_stats, pxoffrxc)},
790 };
791
792 #define IXGBE_NB_RXQ_PRIO_STATS (sizeof(rte_ixgbe_rxq_strings) / \
793                            sizeof(rte_ixgbe_rxq_strings[0]))
794 #define IXGBE_NB_RXQ_PRIO_VALUES 8
795
796 static const struct rte_ixgbe_xstats_name_off rte_ixgbe_txq_strings[] = {
797         {"xon_packets", offsetof(struct ixgbe_hw_stats, pxontxc)},
798         {"xoff_packets", offsetof(struct ixgbe_hw_stats, pxofftxc)},
799         {"xon_to_xoff_packets", offsetof(struct ixgbe_hw_stats,
800                 pxon2offc)},
801 };
802
803 #define IXGBE_NB_TXQ_PRIO_STATS (sizeof(rte_ixgbe_txq_strings) / \
804                            sizeof(rte_ixgbe_txq_strings[0]))
805 #define IXGBE_NB_TXQ_PRIO_VALUES 8
806
807 static const struct rte_ixgbe_xstats_name_off rte_ixgbevf_stats_strings[] = {
808         {"rx_multicast_packets", offsetof(struct ixgbevf_hw_stats, vfmprc)},
809 };
810
811 #define IXGBEVF_NB_XSTATS (sizeof(rte_ixgbevf_stats_strings) /  \
812                 sizeof(rte_ixgbevf_stats_strings[0]))
813
814 /**
815  * Atomically reads the link status information from global
816  * structure rte_eth_dev.
817  *
818  * @param dev
819  *   - Pointer to the structure rte_eth_dev to read from.
820  *   - Pointer to the buffer to be saved with the link status.
821  *
822  * @return
823  *   - On success, zero.
824  *   - On failure, negative value.
825  */
826 static inline int
827 rte_ixgbe_dev_atomic_read_link_status(struct rte_eth_dev *dev,
828                                 struct rte_eth_link *link)
829 {
830         struct rte_eth_link *dst = link;
831         struct rte_eth_link *src = &(dev->data->dev_link);
832
833         if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
834                                         *(uint64_t *)src) == 0)
835                 return -1;
836
837         return 0;
838 }
839
840 /**
841  * Atomically writes the link status information into global
842  * structure rte_eth_dev.
843  *
844  * @param dev
845  *   - Pointer to the structure rte_eth_dev to read from.
846  *   - Pointer to the buffer to be saved with the link status.
847  *
848  * @return
849  *   - On success, zero.
850  *   - On failure, negative value.
851  */
852 static inline int
853 rte_ixgbe_dev_atomic_write_link_status(struct rte_eth_dev *dev,
854                                 struct rte_eth_link *link)
855 {
856         struct rte_eth_link *dst = &(dev->data->dev_link);
857         struct rte_eth_link *src = link;
858
859         if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
860                                         *(uint64_t *)src) == 0)
861                 return -1;
862
863         return 0;
864 }
865
866 /*
867  * This function is the same as ixgbe_is_sfp() in base/ixgbe.h.
868  */
869 static inline int
870 ixgbe_is_sfp(struct ixgbe_hw *hw)
871 {
872         switch (hw->phy.type) {
873         case ixgbe_phy_sfp_avago:
874         case ixgbe_phy_sfp_ftl:
875         case ixgbe_phy_sfp_intel:
876         case ixgbe_phy_sfp_unknown:
877         case ixgbe_phy_sfp_passive_tyco:
878         case ixgbe_phy_sfp_passive_unknown:
879                 return 1;
880         default:
881                 return 0;
882         }
883 }
884
885 static inline int32_t
886 ixgbe_pf_reset_hw(struct ixgbe_hw *hw)
887 {
888         uint32_t ctrl_ext;
889         int32_t status;
890
891         status = ixgbe_reset_hw(hw);
892
893         ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
894         /* Set PF Reset Done bit so PF/VF Mail Ops can work */
895         ctrl_ext |= IXGBE_CTRL_EXT_PFRSTD;
896         IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
897         IXGBE_WRITE_FLUSH(hw);
898
899         if (status == IXGBE_ERR_SFP_NOT_PRESENT)
900                 status = IXGBE_SUCCESS;
901         return status;
902 }
903
904 static inline void
905 ixgbe_enable_intr(struct rte_eth_dev *dev)
906 {
907         struct ixgbe_interrupt *intr =
908                 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
909         struct ixgbe_hw *hw =
910                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
911
912         IXGBE_WRITE_REG(hw, IXGBE_EIMS, intr->mask);
913         IXGBE_WRITE_FLUSH(hw);
914 }
915
916 /*
917  * This function is based on ixgbe_disable_intr() in base/ixgbe.h.
918  */
919 static void
920 ixgbe_disable_intr(struct ixgbe_hw *hw)
921 {
922         PMD_INIT_FUNC_TRACE();
923
924         if (hw->mac.type == ixgbe_mac_82598EB) {
925                 IXGBE_WRITE_REG(hw, IXGBE_EIMC, ~0);
926         } else {
927                 IXGBE_WRITE_REG(hw, IXGBE_EIMC, 0xFFFF0000);
928                 IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(0), ~0);
929                 IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(1), ~0);
930         }
931         IXGBE_WRITE_FLUSH(hw);
932 }
933
934 /*
935  * This function resets queue statistics mapping registers.
936  * From Niantic datasheet, Initialization of Statistics section:
937  * "...if software requires the queue counters, the RQSMR and TQSM registers
938  * must be re-programmed following a device reset.
939  */
940 static void
941 ixgbe_reset_qstat_mappings(struct ixgbe_hw *hw)
942 {
943         uint32_t i;
944
945         for (i = 0; i != IXGBE_NB_STAT_MAPPING_REGS; i++) {
946                 IXGBE_WRITE_REG(hw, IXGBE_RQSMR(i), 0);
947                 IXGBE_WRITE_REG(hw, IXGBE_TQSM(i), 0);
948         }
949 }
950
951
952 static int
953 ixgbe_dev_queue_stats_mapping_set(struct rte_eth_dev *eth_dev,
954                                   uint16_t queue_id,
955                                   uint8_t stat_idx,
956                                   uint8_t is_rx)
957 {
958 #define QSM_REG_NB_BITS_PER_QMAP_FIELD 8
959 #define NB_QMAP_FIELDS_PER_QSM_REG 4
960 #define QMAP_FIELD_RESERVED_BITS_MASK 0x0f
961
962         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
963         struct ixgbe_stat_mapping_registers *stat_mappings =
964                 IXGBE_DEV_PRIVATE_TO_STAT_MAPPINGS(eth_dev->data->dev_private);
965         uint32_t qsmr_mask = 0;
966         uint32_t clearing_mask = QMAP_FIELD_RESERVED_BITS_MASK;
967         uint32_t q_map;
968         uint8_t n, offset;
969
970         if ((hw->mac.type != ixgbe_mac_82599EB) &&
971                 (hw->mac.type != ixgbe_mac_X540) &&
972                 (hw->mac.type != ixgbe_mac_X550) &&
973                 (hw->mac.type != ixgbe_mac_X550EM_x) &&
974                 (hw->mac.type != ixgbe_mac_X550EM_a))
975                 return -ENOSYS;
976
977         PMD_INIT_LOG(DEBUG, "Setting port %d, %s queue_id %d to stat index %d",
978                      (int)(eth_dev->data->port_id), is_rx ? "RX" : "TX",
979                      queue_id, stat_idx);
980
981         n = (uint8_t)(queue_id / NB_QMAP_FIELDS_PER_QSM_REG);
982         if (n >= IXGBE_NB_STAT_MAPPING_REGS) {
983                 PMD_INIT_LOG(ERR, "Nb of stat mapping registers exceeded");
984                 return -EIO;
985         }
986         offset = (uint8_t)(queue_id % NB_QMAP_FIELDS_PER_QSM_REG);
987
988         /* Now clear any previous stat_idx set */
989         clearing_mask <<= (QSM_REG_NB_BITS_PER_QMAP_FIELD * offset);
990         if (!is_rx)
991                 stat_mappings->tqsm[n] &= ~clearing_mask;
992         else
993                 stat_mappings->rqsmr[n] &= ~clearing_mask;
994
995         q_map = (uint32_t)stat_idx;
996         q_map &= QMAP_FIELD_RESERVED_BITS_MASK;
997         qsmr_mask = q_map << (QSM_REG_NB_BITS_PER_QMAP_FIELD * offset);
998         if (!is_rx)
999                 stat_mappings->tqsm[n] |= qsmr_mask;
1000         else
1001                 stat_mappings->rqsmr[n] |= qsmr_mask;
1002
1003         PMD_INIT_LOG(DEBUG, "Set port %d, %s queue_id %d to stat index %d",
1004                      (int)(eth_dev->data->port_id), is_rx ? "RX" : "TX",
1005                      queue_id, stat_idx);
1006         PMD_INIT_LOG(DEBUG, "%s[%d] = 0x%08x", is_rx ? "RQSMR" : "TQSM", n,
1007                      is_rx ? stat_mappings->rqsmr[n] : stat_mappings->tqsm[n]);
1008
1009         /* Now write the mapping in the appropriate register */
1010         if (is_rx) {
1011                 PMD_INIT_LOG(DEBUG, "Write 0x%x to RX IXGBE stat mapping reg:%d",
1012                              stat_mappings->rqsmr[n], n);
1013                 IXGBE_WRITE_REG(hw, IXGBE_RQSMR(n), stat_mappings->rqsmr[n]);
1014         } else {
1015                 PMD_INIT_LOG(DEBUG, "Write 0x%x to TX IXGBE stat mapping reg:%d",
1016                              stat_mappings->tqsm[n], n);
1017                 IXGBE_WRITE_REG(hw, IXGBE_TQSM(n), stat_mappings->tqsm[n]);
1018         }
1019         return 0;
1020 }
1021
1022 static void
1023 ixgbe_restore_statistics_mapping(struct rte_eth_dev *dev)
1024 {
1025         struct ixgbe_stat_mapping_registers *stat_mappings =
1026                 IXGBE_DEV_PRIVATE_TO_STAT_MAPPINGS(dev->data->dev_private);
1027         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1028         int i;
1029
1030         /* write whatever was in stat mapping table to the NIC */
1031         for (i = 0; i < IXGBE_NB_STAT_MAPPING_REGS; i++) {
1032                 /* rx */
1033                 IXGBE_WRITE_REG(hw, IXGBE_RQSMR(i), stat_mappings->rqsmr[i]);
1034
1035                 /* tx */
1036                 IXGBE_WRITE_REG(hw, IXGBE_TQSM(i), stat_mappings->tqsm[i]);
1037         }
1038 }
1039
1040 static void
1041 ixgbe_dcb_init(struct ixgbe_hw *hw, struct ixgbe_dcb_config *dcb_config)
1042 {
1043         uint8_t i;
1044         struct ixgbe_dcb_tc_config *tc;
1045         uint8_t dcb_max_tc = IXGBE_DCB_MAX_TRAFFIC_CLASS;
1046
1047         dcb_config->num_tcs.pg_tcs = dcb_max_tc;
1048         dcb_config->num_tcs.pfc_tcs = dcb_max_tc;
1049         for (i = 0; i < dcb_max_tc; i++) {
1050                 tc = &dcb_config->tc_config[i];
1051                 tc->path[IXGBE_DCB_TX_CONFIG].bwg_id = i;
1052                 tc->path[IXGBE_DCB_TX_CONFIG].bwg_percent =
1053                                  (uint8_t)(100/dcb_max_tc + (i & 1));
1054                 tc->path[IXGBE_DCB_RX_CONFIG].bwg_id = i;
1055                 tc->path[IXGBE_DCB_RX_CONFIG].bwg_percent =
1056                                  (uint8_t)(100/dcb_max_tc + (i & 1));
1057                 tc->pfc = ixgbe_dcb_pfc_disabled;
1058         }
1059
1060         /* Initialize default user to priority mapping, UPx->TC0 */
1061         tc = &dcb_config->tc_config[0];
1062         tc->path[IXGBE_DCB_TX_CONFIG].up_to_tc_bitmap = 0xFF;
1063         tc->path[IXGBE_DCB_RX_CONFIG].up_to_tc_bitmap = 0xFF;
1064         for (i = 0; i < IXGBE_DCB_MAX_BW_GROUP; i++) {
1065                 dcb_config->bw_percentage[IXGBE_DCB_TX_CONFIG][i] = 100;
1066                 dcb_config->bw_percentage[IXGBE_DCB_RX_CONFIG][i] = 100;
1067         }
1068         dcb_config->rx_pba_cfg = ixgbe_dcb_pba_equal;
1069         dcb_config->pfc_mode_enable = false;
1070         dcb_config->vt_mode = true;
1071         dcb_config->round_robin_enable = false;
1072         /* support all DCB capabilities in 82599 */
1073         dcb_config->support.capabilities = 0xFF;
1074
1075         /*we only support 4 Tcs for X540, X550 */
1076         if (hw->mac.type == ixgbe_mac_X540 ||
1077                 hw->mac.type == ixgbe_mac_X550 ||
1078                 hw->mac.type == ixgbe_mac_X550EM_x ||
1079                 hw->mac.type == ixgbe_mac_X550EM_a) {
1080                 dcb_config->num_tcs.pg_tcs = 4;
1081                 dcb_config->num_tcs.pfc_tcs = 4;
1082         }
1083 }
1084
1085 /*
1086  * Ensure that all locks are released before first NVM or PHY access
1087  */
1088 static void
1089 ixgbe_swfw_lock_reset(struct ixgbe_hw *hw)
1090 {
1091         uint16_t mask;
1092
1093         /*
1094          * Phy lock should not fail in this early stage. If this is the case,
1095          * it is due to an improper exit of the application.
1096          * So force the release of the faulty lock. Release of common lock
1097          * is done automatically by swfw_sync function.
1098          */
1099         mask = IXGBE_GSSR_PHY0_SM << hw->bus.func;
1100         if (ixgbe_acquire_swfw_semaphore(hw, mask) < 0) {
1101                 PMD_DRV_LOG(DEBUG, "SWFW phy%d lock released", hw->bus.func);
1102         }
1103         ixgbe_release_swfw_semaphore(hw, mask);
1104
1105         /*
1106          * These ones are more tricky since they are common to all ports; but
1107          * swfw_sync retries last long enough (1s) to be almost sure that if
1108          * lock can not be taken it is due to an improper lock of the
1109          * semaphore.
1110          */
1111         mask = IXGBE_GSSR_EEP_SM | IXGBE_GSSR_MAC_CSR_SM | IXGBE_GSSR_SW_MNG_SM;
1112         if (ixgbe_acquire_swfw_semaphore(hw, mask) < 0) {
1113                 PMD_DRV_LOG(DEBUG, "SWFW common locks released");
1114         }
1115         ixgbe_release_swfw_semaphore(hw, mask);
1116 }
1117
1118 /*
1119  * This function is based on code in ixgbe_attach() in base/ixgbe.c.
1120  * It returns 0 on success.
1121  */
1122 static int
1123 eth_ixgbe_dev_init(struct rte_eth_dev *eth_dev)
1124 {
1125         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
1126         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
1127         struct ixgbe_hw *hw =
1128                 IXGBE_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
1129         struct ixgbe_vfta *shadow_vfta =
1130                 IXGBE_DEV_PRIVATE_TO_VFTA(eth_dev->data->dev_private);
1131         struct ixgbe_hwstrip *hwstrip =
1132                 IXGBE_DEV_PRIVATE_TO_HWSTRIP_BITMAP(eth_dev->data->dev_private);
1133         struct ixgbe_dcb_config *dcb_config =
1134                 IXGBE_DEV_PRIVATE_TO_DCB_CFG(eth_dev->data->dev_private);
1135         struct ixgbe_filter_info *filter_info =
1136                 IXGBE_DEV_PRIVATE_TO_FILTER_INFO(eth_dev->data->dev_private);
1137         struct ixgbe_bw_conf *bw_conf =
1138                 IXGBE_DEV_PRIVATE_TO_BW_CONF(eth_dev->data->dev_private);
1139         uint32_t ctrl_ext;
1140         uint16_t csum;
1141         int diag, i;
1142
1143         PMD_INIT_FUNC_TRACE();
1144
1145         eth_dev->dev_ops = &ixgbe_eth_dev_ops;
1146         eth_dev->rx_pkt_burst = &ixgbe_recv_pkts;
1147         eth_dev->tx_pkt_burst = &ixgbe_xmit_pkts;
1148         eth_dev->tx_pkt_prepare = &ixgbe_prep_pkts;
1149
1150         /*
1151          * For secondary processes, we don't initialise any further as primary
1152          * has already done this work. Only check we don't need a different
1153          * RX and TX function.
1154          */
1155         if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
1156                 struct ixgbe_tx_queue *txq;
1157                 /* TX queue function in primary, set by last queue initialized
1158                  * Tx queue may not initialized by primary process
1159                  */
1160                 if (eth_dev->data->tx_queues) {
1161                         txq = eth_dev->data->tx_queues[eth_dev->data->nb_tx_queues-1];
1162                         ixgbe_set_tx_function(eth_dev, txq);
1163                 } else {
1164                         /* Use default TX function if we get here */
1165                         PMD_INIT_LOG(NOTICE, "No TX queues configured yet. "
1166                                      "Using default TX function.");
1167                 }
1168
1169                 ixgbe_set_rx_function(eth_dev);
1170
1171                 return 0;
1172         }
1173
1174         rte_eth_copy_pci_info(eth_dev, pci_dev);
1175         eth_dev->data->dev_flags |= RTE_ETH_DEV_DETACHABLE;
1176
1177         /* Vendor and Device ID need to be set before init of shared code */
1178         hw->device_id = pci_dev->id.device_id;
1179         hw->vendor_id = pci_dev->id.vendor_id;
1180         hw->hw_addr = (void *)pci_dev->mem_resource[0].addr;
1181         hw->allow_unsupported_sfp = 1;
1182
1183         /* Initialize the shared code (base driver) */
1184 #ifdef RTE_LIBRTE_IXGBE_BYPASS
1185         diag = ixgbe_bypass_init_shared_code(hw);
1186 #else
1187         diag = ixgbe_init_shared_code(hw);
1188 #endif /* RTE_LIBRTE_IXGBE_BYPASS */
1189
1190         if (diag != IXGBE_SUCCESS) {
1191                 PMD_INIT_LOG(ERR, "Shared code init failed: %d", diag);
1192                 return -EIO;
1193         }
1194
1195         /* pick up the PCI bus settings for reporting later */
1196         ixgbe_get_bus_info(hw);
1197
1198         /* Unlock any pending hardware semaphore */
1199         ixgbe_swfw_lock_reset(hw);
1200
1201         /* Initialize DCB configuration*/
1202         memset(dcb_config, 0, sizeof(struct ixgbe_dcb_config));
1203         ixgbe_dcb_init(hw, dcb_config);
1204         /* Get Hardware Flow Control setting */
1205         hw->fc.requested_mode = ixgbe_fc_full;
1206         hw->fc.current_mode = ixgbe_fc_full;
1207         hw->fc.pause_time = IXGBE_FC_PAUSE;
1208         for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
1209                 hw->fc.low_water[i] = IXGBE_FC_LO;
1210                 hw->fc.high_water[i] = IXGBE_FC_HI;
1211         }
1212         hw->fc.send_xon = 1;
1213
1214         /* Make sure we have a good EEPROM before we read from it */
1215         diag = ixgbe_validate_eeprom_checksum(hw, &csum);
1216         if (diag != IXGBE_SUCCESS) {
1217                 PMD_INIT_LOG(ERR, "The EEPROM checksum is not valid: %d", diag);
1218                 return -EIO;
1219         }
1220
1221 #ifdef RTE_LIBRTE_IXGBE_BYPASS
1222         diag = ixgbe_bypass_init_hw(hw);
1223 #else
1224         diag = ixgbe_init_hw(hw);
1225 #endif /* RTE_LIBRTE_IXGBE_BYPASS */
1226
1227         /*
1228          * Devices with copper phys will fail to initialise if ixgbe_init_hw()
1229          * is called too soon after the kernel driver unbinding/binding occurs.
1230          * The failure occurs in ixgbe_identify_phy_generic() for all devices,
1231          * but for non-copper devies, ixgbe_identify_sfp_module_generic() is
1232          * also called. See ixgbe_identify_phy_82599(). The reason for the
1233          * failure is not known, and only occuts when virtualisation features
1234          * are disabled in the bios. A delay of 100ms  was found to be enough by
1235          * trial-and-error, and is doubled to be safe.
1236          */
1237         if (diag && (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_copper)) {
1238                 rte_delay_ms(200);
1239                 diag = ixgbe_init_hw(hw);
1240         }
1241
1242         if (diag == IXGBE_ERR_SFP_NOT_PRESENT)
1243                 diag = IXGBE_SUCCESS;
1244
1245         if (diag == IXGBE_ERR_EEPROM_VERSION) {
1246                 PMD_INIT_LOG(ERR, "This device is a pre-production adapter/"
1247                              "LOM.  Please be aware there may be issues associated "
1248                              "with your hardware.");
1249                 PMD_INIT_LOG(ERR, "If you are experiencing problems "
1250                              "please contact your Intel or hardware representative "
1251                              "who provided you with this hardware.");
1252         } else if (diag == IXGBE_ERR_SFP_NOT_SUPPORTED)
1253                 PMD_INIT_LOG(ERR, "Unsupported SFP+ Module");
1254         if (diag) {
1255                 PMD_INIT_LOG(ERR, "Hardware Initialization Failure: %d", diag);
1256                 return -EIO;
1257         }
1258
1259         /* Reset the hw statistics */
1260         ixgbe_dev_stats_reset(eth_dev);
1261
1262         /* disable interrupt */
1263         ixgbe_disable_intr(hw);
1264
1265         /* reset mappings for queue statistics hw counters*/
1266         ixgbe_reset_qstat_mappings(hw);
1267
1268         /* Allocate memory for storing MAC addresses */
1269         eth_dev->data->mac_addrs = rte_zmalloc("ixgbe", ETHER_ADDR_LEN *
1270                                                hw->mac.num_rar_entries, 0);
1271         if (eth_dev->data->mac_addrs == NULL) {
1272                 PMD_INIT_LOG(ERR,
1273                              "Failed to allocate %u bytes needed to store "
1274                              "MAC addresses",
1275                              ETHER_ADDR_LEN * hw->mac.num_rar_entries);
1276                 return -ENOMEM;
1277         }
1278         /* Copy the permanent MAC address */
1279         ether_addr_copy((struct ether_addr *) hw->mac.perm_addr,
1280                         &eth_dev->data->mac_addrs[0]);
1281
1282         /* Allocate memory for storing hash filter MAC addresses */
1283         eth_dev->data->hash_mac_addrs = rte_zmalloc("ixgbe", ETHER_ADDR_LEN *
1284                                                     IXGBE_VMDQ_NUM_UC_MAC, 0);
1285         if (eth_dev->data->hash_mac_addrs == NULL) {
1286                 PMD_INIT_LOG(ERR,
1287                              "Failed to allocate %d bytes needed to store MAC addresses",
1288                              ETHER_ADDR_LEN * IXGBE_VMDQ_NUM_UC_MAC);
1289                 return -ENOMEM;
1290         }
1291
1292         /* initialize the vfta */
1293         memset(shadow_vfta, 0, sizeof(*shadow_vfta));
1294
1295         /* initialize the hw strip bitmap*/
1296         memset(hwstrip, 0, sizeof(*hwstrip));
1297
1298         /* initialize PF if max_vfs not zero */
1299         ixgbe_pf_host_init(eth_dev);
1300
1301         ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
1302         /* let hardware know driver is loaded */
1303         ctrl_ext |= IXGBE_CTRL_EXT_DRV_LOAD;
1304         /* Set PF Reset Done bit so PF/VF Mail Ops can work */
1305         ctrl_ext |= IXGBE_CTRL_EXT_PFRSTD;
1306         IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
1307         IXGBE_WRITE_FLUSH(hw);
1308
1309         if (ixgbe_is_sfp(hw) && hw->phy.sfp_type != ixgbe_sfp_type_not_present)
1310                 PMD_INIT_LOG(DEBUG, "MAC: %d, PHY: %d, SFP+: %d",
1311                              (int) hw->mac.type, (int) hw->phy.type,
1312                              (int) hw->phy.sfp_type);
1313         else
1314                 PMD_INIT_LOG(DEBUG, "MAC: %d, PHY: %d",
1315                              (int) hw->mac.type, (int) hw->phy.type);
1316
1317         PMD_INIT_LOG(DEBUG, "port %d vendorID=0x%x deviceID=0x%x",
1318                      eth_dev->data->port_id, pci_dev->id.vendor_id,
1319                      pci_dev->id.device_id);
1320
1321         rte_intr_callback_register(intr_handle,
1322                                    ixgbe_dev_interrupt_handler, eth_dev);
1323
1324         /* enable uio/vfio intr/eventfd mapping */
1325         rte_intr_enable(intr_handle);
1326
1327         /* enable support intr */
1328         ixgbe_enable_intr(eth_dev);
1329
1330         /* initialize filter info */
1331         memset(filter_info, 0,
1332                sizeof(struct ixgbe_filter_info));
1333
1334         /* initialize 5tuple filter list */
1335         TAILQ_INIT(&filter_info->fivetuple_list);
1336
1337         /* initialize flow director filter list & hash */
1338         ixgbe_fdir_filter_init(eth_dev);
1339
1340         /* initialize l2 tunnel filter list & hash */
1341         ixgbe_l2_tn_filter_init(eth_dev);
1342
1343         TAILQ_INIT(&filter_ntuple_list);
1344         TAILQ_INIT(&filter_ethertype_list);
1345         TAILQ_INIT(&filter_syn_list);
1346         TAILQ_INIT(&filter_fdir_list);
1347         TAILQ_INIT(&filter_l2_tunnel_list);
1348         TAILQ_INIT(&ixgbe_flow_list);
1349
1350         /* initialize bandwidth configuration info */
1351         memset(bw_conf, 0, sizeof(struct ixgbe_bw_conf));
1352
1353         return 0;
1354 }
1355
1356 static int
1357 eth_ixgbe_dev_uninit(struct rte_eth_dev *eth_dev)
1358 {
1359         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
1360         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
1361         struct ixgbe_hw *hw;
1362
1363         PMD_INIT_FUNC_TRACE();
1364
1365         if (rte_eal_process_type() != RTE_PROC_PRIMARY)
1366                 return -EPERM;
1367
1368         hw = IXGBE_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
1369
1370         if (hw->adapter_stopped == 0)
1371                 ixgbe_dev_close(eth_dev);
1372
1373         eth_dev->dev_ops = NULL;
1374         eth_dev->rx_pkt_burst = NULL;
1375         eth_dev->tx_pkt_burst = NULL;
1376
1377         /* Unlock any pending hardware semaphore */
1378         ixgbe_swfw_lock_reset(hw);
1379
1380         /* disable uio intr before callback unregister */
1381         rte_intr_disable(intr_handle);
1382         rte_intr_callback_unregister(intr_handle,
1383                                      ixgbe_dev_interrupt_handler, eth_dev);
1384
1385         /* uninitialize PF if max_vfs not zero */
1386         ixgbe_pf_host_uninit(eth_dev);
1387
1388         rte_free(eth_dev->data->mac_addrs);
1389         eth_dev->data->mac_addrs = NULL;
1390
1391         rte_free(eth_dev->data->hash_mac_addrs);
1392         eth_dev->data->hash_mac_addrs = NULL;
1393
1394         /* remove all the fdir filters & hash */
1395         ixgbe_fdir_filter_uninit(eth_dev);
1396
1397         /* remove all the L2 tunnel filters & hash */
1398         ixgbe_l2_tn_filter_uninit(eth_dev);
1399
1400         /* Remove all ntuple filters of the device */
1401         ixgbe_ntuple_filter_uninit(eth_dev);
1402
1403         /* clear all the filters list */
1404         ixgbe_filterlist_flush();
1405
1406         return 0;
1407 }
1408
1409 static int ixgbe_ntuple_filter_uninit(struct rte_eth_dev *eth_dev)
1410 {
1411         struct ixgbe_filter_info *filter_info =
1412                 IXGBE_DEV_PRIVATE_TO_FILTER_INFO(eth_dev->data->dev_private);
1413         struct ixgbe_5tuple_filter *p_5tuple;
1414
1415         while ((p_5tuple = TAILQ_FIRST(&filter_info->fivetuple_list))) {
1416                 TAILQ_REMOVE(&filter_info->fivetuple_list,
1417                              p_5tuple,
1418                              entries);
1419                 rte_free(p_5tuple);
1420         }
1421         memset(filter_info->fivetuple_mask, 0,
1422                sizeof(uint32_t) * IXGBE_5TUPLE_ARRAY_SIZE);
1423
1424         return 0;
1425 }
1426
1427 static int ixgbe_fdir_filter_uninit(struct rte_eth_dev *eth_dev)
1428 {
1429         struct ixgbe_hw_fdir_info *fdir_info =
1430                 IXGBE_DEV_PRIVATE_TO_FDIR_INFO(eth_dev->data->dev_private);
1431         struct ixgbe_fdir_filter *fdir_filter;
1432
1433                 if (fdir_info->hash_map)
1434                 rte_free(fdir_info->hash_map);
1435         if (fdir_info->hash_handle)
1436                 rte_hash_free(fdir_info->hash_handle);
1437
1438         while ((fdir_filter = TAILQ_FIRST(&fdir_info->fdir_list))) {
1439                 TAILQ_REMOVE(&fdir_info->fdir_list,
1440                              fdir_filter,
1441                              entries);
1442                 rte_free(fdir_filter);
1443         }
1444
1445         return 0;
1446 }
1447
1448 static int ixgbe_l2_tn_filter_uninit(struct rte_eth_dev *eth_dev)
1449 {
1450         struct ixgbe_l2_tn_info *l2_tn_info =
1451                 IXGBE_DEV_PRIVATE_TO_L2_TN_INFO(eth_dev->data->dev_private);
1452         struct ixgbe_l2_tn_filter *l2_tn_filter;
1453
1454         if (l2_tn_info->hash_map)
1455                 rte_free(l2_tn_info->hash_map);
1456         if (l2_tn_info->hash_handle)
1457                 rte_hash_free(l2_tn_info->hash_handle);
1458
1459         while ((l2_tn_filter = TAILQ_FIRST(&l2_tn_info->l2_tn_list))) {
1460                 TAILQ_REMOVE(&l2_tn_info->l2_tn_list,
1461                              l2_tn_filter,
1462                              entries);
1463                 rte_free(l2_tn_filter);
1464         }
1465
1466         return 0;
1467 }
1468
1469 static int ixgbe_fdir_filter_init(struct rte_eth_dev *eth_dev)
1470 {
1471         struct ixgbe_hw_fdir_info *fdir_info =
1472                 IXGBE_DEV_PRIVATE_TO_FDIR_INFO(eth_dev->data->dev_private);
1473         char fdir_hash_name[RTE_HASH_NAMESIZE];
1474         struct rte_hash_parameters fdir_hash_params = {
1475                 .name = fdir_hash_name,
1476                 .entries = IXGBE_MAX_FDIR_FILTER_NUM,
1477                 .key_len = sizeof(union ixgbe_atr_input),
1478                 .hash_func = rte_hash_crc,
1479                 .hash_func_init_val = 0,
1480                 .socket_id = rte_socket_id(),
1481         };
1482
1483         TAILQ_INIT(&fdir_info->fdir_list);
1484         snprintf(fdir_hash_name, RTE_HASH_NAMESIZE,
1485                  "fdir_%s", eth_dev->device->name);
1486         fdir_info->hash_handle = rte_hash_create(&fdir_hash_params);
1487         if (!fdir_info->hash_handle) {
1488                 PMD_INIT_LOG(ERR, "Failed to create fdir hash table!");
1489                 return -EINVAL;
1490         }
1491         fdir_info->hash_map = rte_zmalloc("ixgbe",
1492                                           sizeof(struct ixgbe_fdir_filter *) *
1493                                           IXGBE_MAX_FDIR_FILTER_NUM,
1494                                           0);
1495         if (!fdir_info->hash_map) {
1496                 PMD_INIT_LOG(ERR,
1497                              "Failed to allocate memory for fdir hash map!");
1498                 return -ENOMEM;
1499         }
1500         fdir_info->mask_added = FALSE;
1501
1502         return 0;
1503 }
1504
1505 static int ixgbe_l2_tn_filter_init(struct rte_eth_dev *eth_dev)
1506 {
1507         struct ixgbe_l2_tn_info *l2_tn_info =
1508                 IXGBE_DEV_PRIVATE_TO_L2_TN_INFO(eth_dev->data->dev_private);
1509         char l2_tn_hash_name[RTE_HASH_NAMESIZE];
1510         struct rte_hash_parameters l2_tn_hash_params = {
1511                 .name = l2_tn_hash_name,
1512                 .entries = IXGBE_MAX_L2_TN_FILTER_NUM,
1513                 .key_len = sizeof(struct ixgbe_l2_tn_key),
1514                 .hash_func = rte_hash_crc,
1515                 .hash_func_init_val = 0,
1516                 .socket_id = rte_socket_id(),
1517         };
1518
1519         TAILQ_INIT(&l2_tn_info->l2_tn_list);
1520         snprintf(l2_tn_hash_name, RTE_HASH_NAMESIZE,
1521                  "l2_tn_%s", eth_dev->device->name);
1522         l2_tn_info->hash_handle = rte_hash_create(&l2_tn_hash_params);
1523         if (!l2_tn_info->hash_handle) {
1524                 PMD_INIT_LOG(ERR, "Failed to create L2 TN hash table!");
1525                 return -EINVAL;
1526         }
1527         l2_tn_info->hash_map = rte_zmalloc("ixgbe",
1528                                    sizeof(struct ixgbe_l2_tn_filter *) *
1529                                    IXGBE_MAX_L2_TN_FILTER_NUM,
1530                                    0);
1531         if (!l2_tn_info->hash_map) {
1532                 PMD_INIT_LOG(ERR,
1533                         "Failed to allocate memory for L2 TN hash map!");
1534                 return -ENOMEM;
1535         }
1536         l2_tn_info->e_tag_en = FALSE;
1537         l2_tn_info->e_tag_fwd_en = FALSE;
1538         l2_tn_info->e_tag_ether_type = DEFAULT_ETAG_ETYPE;
1539
1540         return 0;
1541 }
1542 /*
1543  * Negotiate mailbox API version with the PF.
1544  * After reset API version is always set to the basic one (ixgbe_mbox_api_10).
1545  * Then we try to negotiate starting with the most recent one.
1546  * If all negotiation attempts fail, then we will proceed with
1547  * the default one (ixgbe_mbox_api_10).
1548  */
1549 static void
1550 ixgbevf_negotiate_api(struct ixgbe_hw *hw)
1551 {
1552         int32_t i;
1553
1554         /* start with highest supported, proceed down */
1555         static const enum ixgbe_pfvf_api_rev sup_ver[] = {
1556                 ixgbe_mbox_api_12,
1557                 ixgbe_mbox_api_11,
1558                 ixgbe_mbox_api_10,
1559         };
1560
1561         for (i = 0;
1562                         i != RTE_DIM(sup_ver) &&
1563                         ixgbevf_negotiate_api_version(hw, sup_ver[i]) != 0;
1564                         i++)
1565                 ;
1566 }
1567
1568 static void
1569 generate_random_mac_addr(struct ether_addr *mac_addr)
1570 {
1571         uint64_t random;
1572
1573         /* Set Organizationally Unique Identifier (OUI) prefix. */
1574         mac_addr->addr_bytes[0] = 0x00;
1575         mac_addr->addr_bytes[1] = 0x09;
1576         mac_addr->addr_bytes[2] = 0xC0;
1577         /* Force indication of locally assigned MAC address. */
1578         mac_addr->addr_bytes[0] |= ETHER_LOCAL_ADMIN_ADDR;
1579         /* Generate the last 3 bytes of the MAC address with a random number. */
1580         random = rte_rand();
1581         memcpy(&mac_addr->addr_bytes[3], &random, 3);
1582 }
1583
1584 /*
1585  * Virtual Function device init
1586  */
1587 static int
1588 eth_ixgbevf_dev_init(struct rte_eth_dev *eth_dev)
1589 {
1590         int diag;
1591         uint32_t tc, tcs;
1592         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
1593         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
1594         struct ixgbe_hw *hw =
1595                 IXGBE_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
1596         struct ixgbe_vfta *shadow_vfta =
1597                 IXGBE_DEV_PRIVATE_TO_VFTA(eth_dev->data->dev_private);
1598         struct ixgbe_hwstrip *hwstrip =
1599                 IXGBE_DEV_PRIVATE_TO_HWSTRIP_BITMAP(eth_dev->data->dev_private);
1600         struct ether_addr *perm_addr = (struct ether_addr *) hw->mac.perm_addr;
1601
1602         PMD_INIT_FUNC_TRACE();
1603
1604         eth_dev->dev_ops = &ixgbevf_eth_dev_ops;
1605         eth_dev->rx_pkt_burst = &ixgbe_recv_pkts;
1606         eth_dev->tx_pkt_burst = &ixgbe_xmit_pkts;
1607
1608         /* for secondary processes, we don't initialise any further as primary
1609          * has already done this work. Only check we don't need a different
1610          * RX function
1611          */
1612         if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
1613                 struct ixgbe_tx_queue *txq;
1614                 /* TX queue function in primary, set by last queue initialized
1615                  * Tx queue may not initialized by primary process
1616                  */
1617                 if (eth_dev->data->tx_queues) {
1618                         txq = eth_dev->data->tx_queues[eth_dev->data->nb_tx_queues - 1];
1619                         ixgbe_set_tx_function(eth_dev, txq);
1620                 } else {
1621                         /* Use default TX function if we get here */
1622                         PMD_INIT_LOG(NOTICE,
1623                                      "No TX queues configured yet. Using default TX function.");
1624                 }
1625
1626                 ixgbe_set_rx_function(eth_dev);
1627
1628                 return 0;
1629         }
1630
1631         rte_eth_copy_pci_info(eth_dev, pci_dev);
1632         eth_dev->data->dev_flags |= RTE_ETH_DEV_DETACHABLE;
1633
1634         hw->device_id = pci_dev->id.device_id;
1635         hw->vendor_id = pci_dev->id.vendor_id;
1636         hw->hw_addr = (void *)pci_dev->mem_resource[0].addr;
1637
1638         /* initialize the vfta */
1639         memset(shadow_vfta, 0, sizeof(*shadow_vfta));
1640
1641         /* initialize the hw strip bitmap*/
1642         memset(hwstrip, 0, sizeof(*hwstrip));
1643
1644         /* Initialize the shared code (base driver) */
1645         diag = ixgbe_init_shared_code(hw);
1646         if (diag != IXGBE_SUCCESS) {
1647                 PMD_INIT_LOG(ERR, "Shared code init failed for ixgbevf: %d", diag);
1648                 return -EIO;
1649         }
1650
1651         /* init_mailbox_params */
1652         hw->mbx.ops.init_params(hw);
1653
1654         /* Reset the hw statistics */
1655         ixgbevf_dev_stats_reset(eth_dev);
1656
1657         /* Disable the interrupts for VF */
1658         ixgbevf_intr_disable(hw);
1659
1660         hw->mac.num_rar_entries = 128; /* The MAX of the underlying PF */
1661         diag = hw->mac.ops.reset_hw(hw);
1662
1663         /*
1664          * The VF reset operation returns the IXGBE_ERR_INVALID_MAC_ADDR when
1665          * the underlying PF driver has not assigned a MAC address to the VF.
1666          * In this case, assign a random MAC address.
1667          */
1668         if ((diag != IXGBE_SUCCESS) && (diag != IXGBE_ERR_INVALID_MAC_ADDR)) {
1669                 PMD_INIT_LOG(ERR, "VF Initialization Failure: %d", diag);
1670                 return diag;
1671         }
1672
1673         /* negotiate mailbox API version to use with the PF. */
1674         ixgbevf_negotiate_api(hw);
1675
1676         /* Get Rx/Tx queue count via mailbox, which is ready after reset_hw */
1677         ixgbevf_get_queues(hw, &tcs, &tc);
1678
1679         /* Allocate memory for storing MAC addresses */
1680         eth_dev->data->mac_addrs = rte_zmalloc("ixgbevf", ETHER_ADDR_LEN *
1681                                                hw->mac.num_rar_entries, 0);
1682         if (eth_dev->data->mac_addrs == NULL) {
1683                 PMD_INIT_LOG(ERR,
1684                              "Failed to allocate %u bytes needed to store "
1685                              "MAC addresses",
1686                              ETHER_ADDR_LEN * hw->mac.num_rar_entries);
1687                 return -ENOMEM;
1688         }
1689
1690         /* Generate a random MAC address, if none was assigned by PF. */
1691         if (is_zero_ether_addr(perm_addr)) {
1692                 generate_random_mac_addr(perm_addr);
1693                 diag = ixgbe_set_rar_vf(hw, 1, perm_addr->addr_bytes, 0, 1);
1694                 if (diag) {
1695                         rte_free(eth_dev->data->mac_addrs);
1696                         eth_dev->data->mac_addrs = NULL;
1697                         return diag;
1698                 }
1699                 PMD_INIT_LOG(INFO, "\tVF MAC address not assigned by Host PF");
1700                 PMD_INIT_LOG(INFO, "\tAssign randomly generated MAC address "
1701                              "%02x:%02x:%02x:%02x:%02x:%02x",
1702                              perm_addr->addr_bytes[0],
1703                              perm_addr->addr_bytes[1],
1704                              perm_addr->addr_bytes[2],
1705                              perm_addr->addr_bytes[3],
1706                              perm_addr->addr_bytes[4],
1707                              perm_addr->addr_bytes[5]);
1708         }
1709
1710         /* Copy the permanent MAC address */
1711         ether_addr_copy(perm_addr, &eth_dev->data->mac_addrs[0]);
1712
1713         /* reset the hardware with the new settings */
1714         diag = hw->mac.ops.start_hw(hw);
1715         switch (diag) {
1716         case  0:
1717                 break;
1718
1719         default:
1720                 PMD_INIT_LOG(ERR, "VF Initialization Failure: %d", diag);
1721                 return -EIO;
1722         }
1723
1724         rte_intr_callback_register(intr_handle,
1725                                    ixgbevf_dev_interrupt_handler, eth_dev);
1726         rte_intr_enable(intr_handle);
1727         ixgbevf_intr_enable(hw);
1728
1729         PMD_INIT_LOG(DEBUG, "port %d vendorID=0x%x deviceID=0x%x mac.type=%s",
1730                      eth_dev->data->port_id, pci_dev->id.vendor_id,
1731                      pci_dev->id.device_id, "ixgbe_mac_82599_vf");
1732
1733         return 0;
1734 }
1735
1736 /* Virtual Function device uninit */
1737
1738 static int
1739 eth_ixgbevf_dev_uninit(struct rte_eth_dev *eth_dev)
1740 {
1741         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
1742         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
1743         struct ixgbe_hw *hw;
1744
1745         PMD_INIT_FUNC_TRACE();
1746
1747         if (rte_eal_process_type() != RTE_PROC_PRIMARY)
1748                 return -EPERM;
1749
1750         hw = IXGBE_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
1751
1752         if (hw->adapter_stopped == 0)
1753                 ixgbevf_dev_close(eth_dev);
1754
1755         eth_dev->dev_ops = NULL;
1756         eth_dev->rx_pkt_burst = NULL;
1757         eth_dev->tx_pkt_burst = NULL;
1758
1759         /* Disable the interrupts for VF */
1760         ixgbevf_intr_disable(hw);
1761
1762         rte_free(eth_dev->data->mac_addrs);
1763         eth_dev->data->mac_addrs = NULL;
1764
1765         rte_intr_disable(intr_handle);
1766         rte_intr_callback_unregister(intr_handle,
1767                                      ixgbevf_dev_interrupt_handler, eth_dev);
1768
1769         return 0;
1770 }
1771
1772 static int eth_ixgbe_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
1773         struct rte_pci_device *pci_dev)
1774 {
1775         return rte_eth_dev_pci_generic_probe(pci_dev,
1776                 sizeof(struct ixgbe_adapter), eth_ixgbe_dev_init);
1777 }
1778
1779 static int eth_ixgbe_pci_remove(struct rte_pci_device *pci_dev)
1780 {
1781         return rte_eth_dev_pci_generic_remove(pci_dev, eth_ixgbe_dev_uninit);
1782 }
1783
1784 static struct rte_pci_driver rte_ixgbe_pmd = {
1785         .id_table = pci_id_ixgbe_map,
1786         .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC,
1787         .probe = eth_ixgbe_pci_probe,
1788         .remove = eth_ixgbe_pci_remove,
1789 };
1790
1791 static int eth_ixgbevf_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
1792         struct rte_pci_device *pci_dev)
1793 {
1794         return rte_eth_dev_pci_generic_probe(pci_dev,
1795                 sizeof(struct ixgbe_adapter), eth_ixgbevf_dev_init);
1796 }
1797
1798 static int eth_ixgbevf_pci_remove(struct rte_pci_device *pci_dev)
1799 {
1800         return rte_eth_dev_pci_generic_remove(pci_dev, eth_ixgbevf_dev_uninit);
1801 }
1802
1803 /*
1804  * virtual function driver struct
1805  */
1806 static struct rte_pci_driver rte_ixgbevf_pmd = {
1807         .id_table = pci_id_ixgbevf_map,
1808         .drv_flags = RTE_PCI_DRV_NEED_MAPPING,
1809         .probe = eth_ixgbevf_pci_probe,
1810         .remove = eth_ixgbevf_pci_remove,
1811 };
1812
1813 static int
1814 ixgbe_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
1815 {
1816         struct ixgbe_hw *hw =
1817                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1818         struct ixgbe_vfta *shadow_vfta =
1819                 IXGBE_DEV_PRIVATE_TO_VFTA(dev->data->dev_private);
1820         uint32_t vfta;
1821         uint32_t vid_idx;
1822         uint32_t vid_bit;
1823
1824         vid_idx = (uint32_t) ((vlan_id >> 5) & 0x7F);
1825         vid_bit = (uint32_t) (1 << (vlan_id & 0x1F));
1826         vfta = IXGBE_READ_REG(hw, IXGBE_VFTA(vid_idx));
1827         if (on)
1828                 vfta |= vid_bit;
1829         else
1830                 vfta &= ~vid_bit;
1831         IXGBE_WRITE_REG(hw, IXGBE_VFTA(vid_idx), vfta);
1832
1833         /* update local VFTA copy */
1834         shadow_vfta->vfta[vid_idx] = vfta;
1835
1836         return 0;
1837 }
1838
1839 static void
1840 ixgbe_vlan_strip_queue_set(struct rte_eth_dev *dev, uint16_t queue, int on)
1841 {
1842         if (on)
1843                 ixgbe_vlan_hw_strip_enable(dev, queue);
1844         else
1845                 ixgbe_vlan_hw_strip_disable(dev, queue);
1846 }
1847
1848 static int
1849 ixgbe_vlan_tpid_set(struct rte_eth_dev *dev,
1850                     enum rte_vlan_type vlan_type,
1851                     uint16_t tpid)
1852 {
1853         struct ixgbe_hw *hw =
1854                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1855         int ret = 0;
1856         uint32_t reg;
1857         uint32_t qinq;
1858
1859         qinq = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
1860         qinq &= IXGBE_DMATXCTL_GDV;
1861
1862         switch (vlan_type) {
1863         case ETH_VLAN_TYPE_INNER:
1864                 if (qinq) {
1865                         reg = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
1866                         reg = (reg & (~IXGBE_VLNCTRL_VET)) | (uint32_t)tpid;
1867                         IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, reg);
1868                         reg = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
1869                         reg = (reg & (~IXGBE_DMATXCTL_VT_MASK))
1870                                 | ((uint32_t)tpid << IXGBE_DMATXCTL_VT_SHIFT);
1871                         IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, reg);
1872                 } else {
1873                         ret = -ENOTSUP;
1874                         PMD_DRV_LOG(ERR, "Inner type is not supported"
1875                                     " by single VLAN");
1876                 }
1877                 break;
1878         case ETH_VLAN_TYPE_OUTER:
1879                 if (qinq) {
1880                         /* Only the high 16-bits is valid */
1881                         IXGBE_WRITE_REG(hw, IXGBE_EXVET, (uint32_t)tpid <<
1882                                         IXGBE_EXVET_VET_EXT_SHIFT);
1883                 } else {
1884                         reg = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
1885                         reg = (reg & (~IXGBE_VLNCTRL_VET)) | (uint32_t)tpid;
1886                         IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, reg);
1887                         reg = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
1888                         reg = (reg & (~IXGBE_DMATXCTL_VT_MASK))
1889                                 | ((uint32_t)tpid << IXGBE_DMATXCTL_VT_SHIFT);
1890                         IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, reg);
1891                 }
1892
1893                 break;
1894         default:
1895                 ret = -EINVAL;
1896                 PMD_DRV_LOG(ERR, "Unsupported VLAN type %d", vlan_type);
1897                 break;
1898         }
1899
1900         return ret;
1901 }
1902
1903 void
1904 ixgbe_vlan_hw_filter_disable(struct rte_eth_dev *dev)
1905 {
1906         struct ixgbe_hw *hw =
1907                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1908         uint32_t vlnctrl;
1909
1910         PMD_INIT_FUNC_TRACE();
1911
1912         /* Filter Table Disable */
1913         vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
1914         vlnctrl &= ~IXGBE_VLNCTRL_VFE;
1915
1916         IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
1917 }
1918
1919 void
1920 ixgbe_vlan_hw_filter_enable(struct rte_eth_dev *dev)
1921 {
1922         struct ixgbe_hw *hw =
1923                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1924         struct ixgbe_vfta *shadow_vfta =
1925                 IXGBE_DEV_PRIVATE_TO_VFTA(dev->data->dev_private);
1926         uint32_t vlnctrl;
1927         uint16_t i;
1928
1929         PMD_INIT_FUNC_TRACE();
1930
1931         /* Filter Table Enable */
1932         vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
1933         vlnctrl &= ~IXGBE_VLNCTRL_CFIEN;
1934         vlnctrl |= IXGBE_VLNCTRL_VFE;
1935
1936         IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
1937
1938         /* write whatever is in local vfta copy */
1939         for (i = 0; i < IXGBE_VFTA_SIZE; i++)
1940                 IXGBE_WRITE_REG(hw, IXGBE_VFTA(i), shadow_vfta->vfta[i]);
1941 }
1942
1943 static void
1944 ixgbe_vlan_hw_strip_bitmap_set(struct rte_eth_dev *dev, uint16_t queue, bool on)
1945 {
1946         struct ixgbe_hwstrip *hwstrip =
1947                 IXGBE_DEV_PRIVATE_TO_HWSTRIP_BITMAP(dev->data->dev_private);
1948         struct ixgbe_rx_queue *rxq;
1949
1950         if (queue >= IXGBE_MAX_RX_QUEUE_NUM)
1951                 return;
1952
1953         if (on)
1954                 IXGBE_SET_HWSTRIP(hwstrip, queue);
1955         else
1956                 IXGBE_CLEAR_HWSTRIP(hwstrip, queue);
1957
1958         if (queue >= dev->data->nb_rx_queues)
1959                 return;
1960
1961         rxq = dev->data->rx_queues[queue];
1962
1963         if (on)
1964                 rxq->vlan_flags = PKT_RX_VLAN_PKT | PKT_RX_VLAN_STRIPPED;
1965         else
1966                 rxq->vlan_flags = PKT_RX_VLAN_PKT;
1967 }
1968
1969 static void
1970 ixgbe_vlan_hw_strip_disable(struct rte_eth_dev *dev, uint16_t queue)
1971 {
1972         struct ixgbe_hw *hw =
1973                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1974         uint32_t ctrl;
1975
1976         PMD_INIT_FUNC_TRACE();
1977
1978         if (hw->mac.type == ixgbe_mac_82598EB) {
1979                 /* No queue level support */
1980                 PMD_INIT_LOG(NOTICE, "82598EB not support queue level hw strip");
1981                 return;
1982         }
1983
1984         /* Other 10G NIC, the VLAN strip can be setup per queue in RXDCTL */
1985         ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(queue));
1986         ctrl &= ~IXGBE_RXDCTL_VME;
1987         IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(queue), ctrl);
1988
1989         /* record those setting for HW strip per queue */
1990         ixgbe_vlan_hw_strip_bitmap_set(dev, queue, 0);
1991 }
1992
1993 static void
1994 ixgbe_vlan_hw_strip_enable(struct rte_eth_dev *dev, uint16_t queue)
1995 {
1996         struct ixgbe_hw *hw =
1997                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1998         uint32_t ctrl;
1999
2000         PMD_INIT_FUNC_TRACE();
2001
2002         if (hw->mac.type == ixgbe_mac_82598EB) {
2003                 /* No queue level supported */
2004                 PMD_INIT_LOG(NOTICE, "82598EB not support queue level hw strip");
2005                 return;
2006         }
2007
2008         /* Other 10G NIC, the VLAN strip can be setup per queue in RXDCTL */
2009         ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(queue));
2010         ctrl |= IXGBE_RXDCTL_VME;
2011         IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(queue), ctrl);
2012
2013         /* record those setting for HW strip per queue */
2014         ixgbe_vlan_hw_strip_bitmap_set(dev, queue, 1);
2015 }
2016
2017 void
2018 ixgbe_vlan_hw_strip_disable_all(struct rte_eth_dev *dev)
2019 {
2020         struct ixgbe_hw *hw =
2021                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2022         uint32_t ctrl;
2023         uint16_t i;
2024         struct ixgbe_rx_queue *rxq;
2025
2026         PMD_INIT_FUNC_TRACE();
2027
2028         if (hw->mac.type == ixgbe_mac_82598EB) {
2029                 ctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
2030                 ctrl &= ~IXGBE_VLNCTRL_VME;
2031                 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, ctrl);
2032         } else {
2033                 /* Other 10G NIC, the VLAN strip can be setup per queue in RXDCTL */
2034                 for (i = 0; i < dev->data->nb_rx_queues; i++) {
2035                         rxq = dev->data->rx_queues[i];
2036                         ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxq->reg_idx));
2037                         ctrl &= ~IXGBE_RXDCTL_VME;
2038                         IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxq->reg_idx), ctrl);
2039
2040                         /* record those setting for HW strip per queue */
2041                         ixgbe_vlan_hw_strip_bitmap_set(dev, i, 0);
2042                 }
2043         }
2044 }
2045
2046 void
2047 ixgbe_vlan_hw_strip_enable_all(struct rte_eth_dev *dev)
2048 {
2049         struct ixgbe_hw *hw =
2050                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2051         uint32_t ctrl;
2052         uint16_t i;
2053         struct ixgbe_rx_queue *rxq;
2054
2055         PMD_INIT_FUNC_TRACE();
2056
2057         if (hw->mac.type == ixgbe_mac_82598EB) {
2058                 ctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
2059                 ctrl |= IXGBE_VLNCTRL_VME;
2060                 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, ctrl);
2061         } else {
2062                 /* Other 10G NIC, the VLAN strip can be setup per queue in RXDCTL */
2063                 for (i = 0; i < dev->data->nb_rx_queues; i++) {
2064                         rxq = dev->data->rx_queues[i];
2065                         ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxq->reg_idx));
2066                         ctrl |= IXGBE_RXDCTL_VME;
2067                         IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxq->reg_idx), ctrl);
2068
2069                         /* record those setting for HW strip per queue */
2070                         ixgbe_vlan_hw_strip_bitmap_set(dev, i, 1);
2071                 }
2072         }
2073 }
2074
2075 static void
2076 ixgbe_vlan_hw_extend_disable(struct rte_eth_dev *dev)
2077 {
2078         struct ixgbe_hw *hw =
2079                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2080         uint32_t ctrl;
2081
2082         PMD_INIT_FUNC_TRACE();
2083
2084         /* DMATXCTRL: Geric Double VLAN Disable */
2085         ctrl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
2086         ctrl &= ~IXGBE_DMATXCTL_GDV;
2087         IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, ctrl);
2088
2089         /* CTRL_EXT: Global Double VLAN Disable */
2090         ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
2091         ctrl &= ~IXGBE_EXTENDED_VLAN;
2092         IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl);
2093
2094 }
2095
2096 static void
2097 ixgbe_vlan_hw_extend_enable(struct rte_eth_dev *dev)
2098 {
2099         struct ixgbe_hw *hw =
2100                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2101         uint32_t ctrl;
2102
2103         PMD_INIT_FUNC_TRACE();
2104
2105         /* DMATXCTRL: Geric Double VLAN Enable */
2106         ctrl  = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
2107         ctrl |= IXGBE_DMATXCTL_GDV;
2108         IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, ctrl);
2109
2110         /* CTRL_EXT: Global Double VLAN Enable */
2111         ctrl  = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
2112         ctrl |= IXGBE_EXTENDED_VLAN;
2113         IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl);
2114
2115         /* Clear pooling mode of PFVTCTL. It's required by X550. */
2116         if (hw->mac.type == ixgbe_mac_X550 ||
2117             hw->mac.type == ixgbe_mac_X550EM_x ||
2118             hw->mac.type == ixgbe_mac_X550EM_a) {
2119                 ctrl = IXGBE_READ_REG(hw, IXGBE_VT_CTL);
2120                 ctrl &= ~IXGBE_VT_CTL_POOLING_MODE_MASK;
2121                 IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, ctrl);
2122         }
2123
2124         /*
2125          * VET EXT field in the EXVET register = 0x8100 by default
2126          * So no need to change. Same to VT field of DMATXCTL register
2127          */
2128 }
2129
2130 static void
2131 ixgbe_vlan_offload_set(struct rte_eth_dev *dev, int mask)
2132 {
2133         if (mask & ETH_VLAN_STRIP_MASK) {
2134                 if (dev->data->dev_conf.rxmode.hw_vlan_strip)
2135                         ixgbe_vlan_hw_strip_enable_all(dev);
2136                 else
2137                         ixgbe_vlan_hw_strip_disable_all(dev);
2138         }
2139
2140         if (mask & ETH_VLAN_FILTER_MASK) {
2141                 if (dev->data->dev_conf.rxmode.hw_vlan_filter)
2142                         ixgbe_vlan_hw_filter_enable(dev);
2143                 else
2144                         ixgbe_vlan_hw_filter_disable(dev);
2145         }
2146
2147         if (mask & ETH_VLAN_EXTEND_MASK) {
2148                 if (dev->data->dev_conf.rxmode.hw_vlan_extend)
2149                         ixgbe_vlan_hw_extend_enable(dev);
2150                 else
2151                         ixgbe_vlan_hw_extend_disable(dev);
2152         }
2153 }
2154
2155 static void
2156 ixgbe_vmdq_vlan_hw_filter_enable(struct rte_eth_dev *dev)
2157 {
2158         struct ixgbe_hw *hw =
2159                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2160         /* VLNCTRL: enable vlan filtering and allow all vlan tags through */
2161         uint32_t vlanctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
2162
2163         vlanctrl |= IXGBE_VLNCTRL_VFE; /* enable vlan filters */
2164         IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlanctrl);
2165 }
2166
2167 static int
2168 ixgbe_check_vf_rss_rxq_num(struct rte_eth_dev *dev, uint16_t nb_rx_q)
2169 {
2170         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
2171
2172         switch (nb_rx_q) {
2173         case 1:
2174         case 2:
2175                 RTE_ETH_DEV_SRIOV(dev).active = ETH_64_POOLS;
2176                 break;
2177         case 4:
2178                 RTE_ETH_DEV_SRIOV(dev).active = ETH_32_POOLS;
2179                 break;
2180         default:
2181                 return -EINVAL;
2182         }
2183
2184         RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool = nb_rx_q;
2185         RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx = pci_dev->max_vfs * nb_rx_q;
2186
2187         return 0;
2188 }
2189
2190 static int
2191 ixgbe_check_mq_mode(struct rte_eth_dev *dev)
2192 {
2193         struct rte_eth_conf *dev_conf = &dev->data->dev_conf;
2194         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2195         uint16_t nb_rx_q = dev->data->nb_rx_queues;
2196         uint16_t nb_tx_q = dev->data->nb_tx_queues;
2197
2198         if (RTE_ETH_DEV_SRIOV(dev).active != 0) {
2199                 /* check multi-queue mode */
2200                 switch (dev_conf->rxmode.mq_mode) {
2201                 case ETH_MQ_RX_VMDQ_DCB:
2202                         PMD_INIT_LOG(INFO, "ETH_MQ_RX_VMDQ_DCB mode supported in SRIOV");
2203                         break;
2204                 case ETH_MQ_RX_VMDQ_DCB_RSS:
2205                         /* DCB/RSS VMDQ in SRIOV mode, not implement yet */
2206                         PMD_INIT_LOG(ERR, "SRIOV active,"
2207                                         " unsupported mq_mode rx %d.",
2208                                         dev_conf->rxmode.mq_mode);
2209                         return -EINVAL;
2210                 case ETH_MQ_RX_RSS:
2211                 case ETH_MQ_RX_VMDQ_RSS:
2212                         dev->data->dev_conf.rxmode.mq_mode = ETH_MQ_RX_VMDQ_RSS;
2213                         if (nb_rx_q <= RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool)
2214                                 if (ixgbe_check_vf_rss_rxq_num(dev, nb_rx_q)) {
2215                                         PMD_INIT_LOG(ERR, "SRIOV is active,"
2216                                                 " invalid queue number"
2217                                                 " for VMDQ RSS, allowed"
2218                                                 " value are 1, 2 or 4.");
2219                                         return -EINVAL;
2220                                 }
2221                         break;
2222                 case ETH_MQ_RX_VMDQ_ONLY:
2223                 case ETH_MQ_RX_NONE:
2224                         /* if nothing mq mode configure, use default scheme */
2225                         dev->data->dev_conf.rxmode.mq_mode = ETH_MQ_RX_VMDQ_ONLY;
2226                         if (RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool > 1)
2227                                 RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool = 1;
2228                         break;
2229                 default: /* ETH_MQ_RX_DCB, ETH_MQ_RX_DCB_RSS or ETH_MQ_TX_DCB*/
2230                         /* SRIOV only works in VMDq enable mode */
2231                         PMD_INIT_LOG(ERR, "SRIOV is active,"
2232                                         " wrong mq_mode rx %d.",
2233                                         dev_conf->rxmode.mq_mode);
2234                         return -EINVAL;
2235                 }
2236
2237                 switch (dev_conf->txmode.mq_mode) {
2238                 case ETH_MQ_TX_VMDQ_DCB:
2239                         PMD_INIT_LOG(INFO, "ETH_MQ_TX_VMDQ_DCB mode supported in SRIOV");
2240                         dev->data->dev_conf.txmode.mq_mode = ETH_MQ_TX_VMDQ_DCB;
2241                         break;
2242                 default: /* ETH_MQ_TX_VMDQ_ONLY or ETH_MQ_TX_NONE */
2243                         dev->data->dev_conf.txmode.mq_mode = ETH_MQ_TX_VMDQ_ONLY;
2244                         break;
2245                 }
2246
2247                 /* check valid queue number */
2248                 if ((nb_rx_q > RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool) ||
2249                     (nb_tx_q > RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool)) {
2250                         PMD_INIT_LOG(ERR, "SRIOV is active,"
2251                                         " nb_rx_q=%d nb_tx_q=%d queue number"
2252                                         " must be less than or equal to %d.",
2253                                         nb_rx_q, nb_tx_q,
2254                                         RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool);
2255                         return -EINVAL;
2256                 }
2257         } else {
2258                 if (dev_conf->rxmode.mq_mode == ETH_MQ_RX_VMDQ_DCB_RSS) {
2259                         PMD_INIT_LOG(ERR, "VMDQ+DCB+RSS mq_mode is"
2260                                           " not supported.");
2261                         return -EINVAL;
2262                 }
2263                 /* check configuration for vmdb+dcb mode */
2264                 if (dev_conf->rxmode.mq_mode == ETH_MQ_RX_VMDQ_DCB) {
2265                         const struct rte_eth_vmdq_dcb_conf *conf;
2266
2267                         if (nb_rx_q != IXGBE_VMDQ_DCB_NB_QUEUES) {
2268                                 PMD_INIT_LOG(ERR, "VMDQ+DCB, nb_rx_q != %d.",
2269                                                 IXGBE_VMDQ_DCB_NB_QUEUES);
2270                                 return -EINVAL;
2271                         }
2272                         conf = &dev_conf->rx_adv_conf.vmdq_dcb_conf;
2273                         if (!(conf->nb_queue_pools == ETH_16_POOLS ||
2274                                conf->nb_queue_pools == ETH_32_POOLS)) {
2275                                 PMD_INIT_LOG(ERR, "VMDQ+DCB selected,"
2276                                                 " nb_queue_pools must be %d or %d.",
2277                                                 ETH_16_POOLS, ETH_32_POOLS);
2278                                 return -EINVAL;
2279                         }
2280                 }
2281                 if (dev_conf->txmode.mq_mode == ETH_MQ_TX_VMDQ_DCB) {
2282                         const struct rte_eth_vmdq_dcb_tx_conf *conf;
2283
2284                         if (nb_tx_q != IXGBE_VMDQ_DCB_NB_QUEUES) {
2285                                 PMD_INIT_LOG(ERR, "VMDQ+DCB, nb_tx_q != %d",
2286                                                  IXGBE_VMDQ_DCB_NB_QUEUES);
2287                                 return -EINVAL;
2288                         }
2289                         conf = &dev_conf->tx_adv_conf.vmdq_dcb_tx_conf;
2290                         if (!(conf->nb_queue_pools == ETH_16_POOLS ||
2291                                conf->nb_queue_pools == ETH_32_POOLS)) {
2292                                 PMD_INIT_LOG(ERR, "VMDQ+DCB selected,"
2293                                                 " nb_queue_pools != %d and"
2294                                                 " nb_queue_pools != %d.",
2295                                                 ETH_16_POOLS, ETH_32_POOLS);
2296                                 return -EINVAL;
2297                         }
2298                 }
2299
2300                 /* For DCB mode check our configuration before we go further */
2301                 if (dev_conf->rxmode.mq_mode == ETH_MQ_RX_DCB) {
2302                         const struct rte_eth_dcb_rx_conf *conf;
2303
2304                         if (nb_rx_q != IXGBE_DCB_NB_QUEUES) {
2305                                 PMD_INIT_LOG(ERR, "DCB selected, nb_rx_q != %d.",
2306                                                  IXGBE_DCB_NB_QUEUES);
2307                                 return -EINVAL;
2308                         }
2309                         conf = &dev_conf->rx_adv_conf.dcb_rx_conf;
2310                         if (!(conf->nb_tcs == ETH_4_TCS ||
2311                                conf->nb_tcs == ETH_8_TCS)) {
2312                                 PMD_INIT_LOG(ERR, "DCB selected, nb_tcs != %d"
2313                                                 " and nb_tcs != %d.",
2314                                                 ETH_4_TCS, ETH_8_TCS);
2315                                 return -EINVAL;
2316                         }
2317                 }
2318
2319                 if (dev_conf->txmode.mq_mode == ETH_MQ_TX_DCB) {
2320                         const struct rte_eth_dcb_tx_conf *conf;
2321
2322                         if (nb_tx_q != IXGBE_DCB_NB_QUEUES) {
2323                                 PMD_INIT_LOG(ERR, "DCB, nb_tx_q != %d.",
2324                                                  IXGBE_DCB_NB_QUEUES);
2325                                 return -EINVAL;
2326                         }
2327                         conf = &dev_conf->tx_adv_conf.dcb_tx_conf;
2328                         if (!(conf->nb_tcs == ETH_4_TCS ||
2329                                conf->nb_tcs == ETH_8_TCS)) {
2330                                 PMD_INIT_LOG(ERR, "DCB selected, nb_tcs != %d"
2331                                                 " and nb_tcs != %d.",
2332                                                 ETH_4_TCS, ETH_8_TCS);
2333                                 return -EINVAL;
2334                         }
2335                 }
2336
2337                 /*
2338                  * When DCB/VT is off, maximum number of queues changes,
2339                  * except for 82598EB, which remains constant.
2340                  */
2341                 if (dev_conf->txmode.mq_mode == ETH_MQ_TX_NONE &&
2342                                 hw->mac.type != ixgbe_mac_82598EB) {
2343                         if (nb_tx_q > IXGBE_NONE_MODE_TX_NB_QUEUES) {
2344                                 PMD_INIT_LOG(ERR,
2345                                              "Neither VT nor DCB are enabled, "
2346                                              "nb_tx_q > %d.",
2347                                              IXGBE_NONE_MODE_TX_NB_QUEUES);
2348                                 return -EINVAL;
2349                         }
2350                 }
2351         }
2352         return 0;
2353 }
2354
2355 static int
2356 ixgbe_dev_configure(struct rte_eth_dev *dev)
2357 {
2358         struct ixgbe_interrupt *intr =
2359                 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
2360         struct ixgbe_adapter *adapter =
2361                 (struct ixgbe_adapter *)dev->data->dev_private;
2362         int ret;
2363
2364         PMD_INIT_FUNC_TRACE();
2365         /* multipe queue mode checking */
2366         ret  = ixgbe_check_mq_mode(dev);
2367         if (ret != 0) {
2368                 PMD_DRV_LOG(ERR, "ixgbe_check_mq_mode fails with %d.",
2369                             ret);
2370                 return ret;
2371         }
2372
2373         /* set flag to update link status after init */
2374         intr->flags |= IXGBE_FLAG_NEED_LINK_UPDATE;
2375
2376         /*
2377          * Initialize to TRUE. If any of Rx queues doesn't meet the bulk
2378          * allocation or vector Rx preconditions we will reset it.
2379          */
2380         adapter->rx_bulk_alloc_allowed = true;
2381         adapter->rx_vec_allowed = true;
2382
2383         return 0;
2384 }
2385
2386 static void
2387 ixgbe_dev_phy_intr_setup(struct rte_eth_dev *dev)
2388 {
2389         struct ixgbe_hw *hw =
2390                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2391         struct ixgbe_interrupt *intr =
2392                 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
2393         uint32_t gpie;
2394
2395         /* only set up it on X550EM_X */
2396         if (hw->mac.type == ixgbe_mac_X550EM_x) {
2397                 gpie = IXGBE_READ_REG(hw, IXGBE_GPIE);
2398                 gpie |= IXGBE_SDP0_GPIEN_X550EM_x;
2399                 IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
2400                 if (hw->phy.type == ixgbe_phy_x550em_ext_t)
2401                         intr->mask |= IXGBE_EICR_GPI_SDP0_X550EM_x;
2402         }
2403 }
2404
2405 int
2406 ixgbe_set_vf_rate_limit(struct rte_eth_dev *dev, uint16_t vf,
2407                         uint16_t tx_rate, uint64_t q_msk)
2408 {
2409         struct ixgbe_hw *hw;
2410         struct ixgbe_vf_info *vfinfo;
2411         struct rte_eth_link link;
2412         uint8_t  nb_q_per_pool;
2413         uint32_t queue_stride;
2414         uint32_t queue_idx, idx = 0, vf_idx;
2415         uint32_t queue_end;
2416         uint16_t total_rate = 0;
2417         struct rte_pci_device *pci_dev;
2418
2419         pci_dev = RTE_ETH_DEV_TO_PCI(dev);
2420         rte_eth_link_get_nowait(dev->data->port_id, &link);
2421
2422         if (vf >= pci_dev->max_vfs)
2423                 return -EINVAL;
2424
2425         if (tx_rate > link.link_speed)
2426                 return -EINVAL;
2427
2428         if (q_msk == 0)
2429                 return 0;
2430
2431         hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2432         vfinfo = *(IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private));
2433         nb_q_per_pool = RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool;
2434         queue_stride = IXGBE_MAX_RX_QUEUE_NUM / RTE_ETH_DEV_SRIOV(dev).active;
2435         queue_idx = vf * queue_stride;
2436         queue_end = queue_idx + nb_q_per_pool - 1;
2437         if (queue_end >= hw->mac.max_tx_queues)
2438                 return -EINVAL;
2439
2440         if (vfinfo) {
2441                 for (vf_idx = 0; vf_idx < pci_dev->max_vfs; vf_idx++) {
2442                         if (vf_idx == vf)
2443                                 continue;
2444                         for (idx = 0; idx < RTE_DIM(vfinfo[vf_idx].tx_rate);
2445                                 idx++)
2446                                 total_rate += vfinfo[vf_idx].tx_rate[idx];
2447                 }
2448         } else {
2449                 return -EINVAL;
2450         }
2451
2452         /* Store tx_rate for this vf. */
2453         for (idx = 0; idx < nb_q_per_pool; idx++) {
2454                 if (((uint64_t)0x1 << idx) & q_msk) {
2455                         if (vfinfo[vf].tx_rate[idx] != tx_rate)
2456                                 vfinfo[vf].tx_rate[idx] = tx_rate;
2457                         total_rate += tx_rate;
2458                 }
2459         }
2460
2461         if (total_rate > dev->data->dev_link.link_speed) {
2462                 /* Reset stored TX rate of the VF if it causes exceed
2463                  * link speed.
2464                  */
2465                 memset(vfinfo[vf].tx_rate, 0, sizeof(vfinfo[vf].tx_rate));
2466                 return -EINVAL;
2467         }
2468
2469         /* Set RTTBCNRC of each queue/pool for vf X  */
2470         for (; queue_idx <= queue_end; queue_idx++) {
2471                 if (0x1 & q_msk)
2472                         ixgbe_set_queue_rate_limit(dev, queue_idx, tx_rate);
2473                 q_msk = q_msk >> 1;
2474         }
2475
2476         return 0;
2477 }
2478
2479 /*
2480  * Configure device link speed and setup link.
2481  * It returns 0 on success.
2482  */
2483 static int
2484 ixgbe_dev_start(struct rte_eth_dev *dev)
2485 {
2486         struct ixgbe_hw *hw =
2487                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2488         struct ixgbe_vf_info *vfinfo =
2489                 *IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private);
2490         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
2491         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
2492         uint32_t intr_vector = 0;
2493         int err, link_up = 0, negotiate = 0;
2494         uint32_t speed = 0;
2495         int mask = 0;
2496         int status;
2497         uint16_t vf, idx;
2498         uint32_t *link_speeds;
2499
2500         PMD_INIT_FUNC_TRACE();
2501
2502         /* IXGBE devices don't support:
2503         *    - half duplex (checked afterwards for valid speeds)
2504         *    - fixed speed: TODO implement
2505         */
2506         if (dev->data->dev_conf.link_speeds & ETH_LINK_SPEED_FIXED) {
2507                 PMD_INIT_LOG(ERR, "Invalid link_speeds for port %hhu; fix speed not supported",
2508                              dev->data->port_id);
2509                 return -EINVAL;
2510         }
2511
2512         /* disable uio/vfio intr/eventfd mapping */
2513         rte_intr_disable(intr_handle);
2514
2515         /* stop adapter */
2516         hw->adapter_stopped = 0;
2517         ixgbe_stop_adapter(hw);
2518
2519         /* reinitialize adapter
2520          * this calls reset and start
2521          */
2522         status = ixgbe_pf_reset_hw(hw);
2523         if (status != 0)
2524                 return -1;
2525         hw->mac.ops.start_hw(hw);
2526         hw->mac.get_link_status = true;
2527
2528         /* configure PF module if SRIOV enabled */
2529         ixgbe_pf_host_configure(dev);
2530
2531         ixgbe_dev_phy_intr_setup(dev);
2532
2533         /* check and configure queue intr-vector mapping */
2534         if ((rte_intr_cap_multiple(intr_handle) ||
2535              !RTE_ETH_DEV_SRIOV(dev).active) &&
2536             dev->data->dev_conf.intr_conf.rxq != 0) {
2537                 intr_vector = dev->data->nb_rx_queues;
2538                 if (intr_vector > IXGBE_MAX_INTR_QUEUE_NUM) {
2539                         PMD_INIT_LOG(ERR, "At most %d intr queues supported",
2540                                         IXGBE_MAX_INTR_QUEUE_NUM);
2541                         return -ENOTSUP;
2542                 }
2543                 if (rte_intr_efd_enable(intr_handle, intr_vector))
2544                         return -1;
2545         }
2546
2547         if (rte_intr_dp_is_en(intr_handle) && !intr_handle->intr_vec) {
2548                 intr_handle->intr_vec =
2549                         rte_zmalloc("intr_vec",
2550                                     dev->data->nb_rx_queues * sizeof(int), 0);
2551                 if (intr_handle->intr_vec == NULL) {
2552                         PMD_INIT_LOG(ERR, "Failed to allocate %d rx_queues"
2553                                      " intr_vec", dev->data->nb_rx_queues);
2554                         return -ENOMEM;
2555                 }
2556         }
2557
2558         /* confiugre msix for sleep until rx interrupt */
2559         ixgbe_configure_msix(dev);
2560
2561         /* initialize transmission unit */
2562         ixgbe_dev_tx_init(dev);
2563
2564         /* This can fail when allocating mbufs for descriptor rings */
2565         err = ixgbe_dev_rx_init(dev);
2566         if (err) {
2567                 PMD_INIT_LOG(ERR, "Unable to initialize RX hardware");
2568                 goto error;
2569         }
2570
2571     mask = ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK |
2572                 ETH_VLAN_EXTEND_MASK;
2573         ixgbe_vlan_offload_set(dev, mask);
2574
2575         if (dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_VMDQ_ONLY) {
2576                 /* Enable vlan filtering for VMDq */
2577                 ixgbe_vmdq_vlan_hw_filter_enable(dev);
2578         }
2579
2580         /* Configure DCB hw */
2581         ixgbe_configure_dcb(dev);
2582
2583         if (dev->data->dev_conf.fdir_conf.mode != RTE_FDIR_MODE_NONE) {
2584                 err = ixgbe_fdir_configure(dev);
2585                 if (err)
2586                         goto error;
2587         }
2588
2589         /* Restore vf rate limit */
2590         if (vfinfo != NULL) {
2591                 for (vf = 0; vf < pci_dev->max_vfs; vf++)
2592                         for (idx = 0; idx < IXGBE_MAX_QUEUE_NUM_PER_VF; idx++)
2593                                 if (vfinfo[vf].tx_rate[idx] != 0)
2594                                         ixgbe_set_vf_rate_limit(
2595                                                 dev, vf,
2596                                                 vfinfo[vf].tx_rate[idx],
2597                                                 1 << idx);
2598         }
2599
2600         ixgbe_restore_statistics_mapping(dev);
2601
2602         err = ixgbe_dev_rxtx_start(dev);
2603         if (err < 0) {
2604                 PMD_INIT_LOG(ERR, "Unable to start rxtx queues");
2605                 goto error;
2606         }
2607
2608         /* Skip link setup if loopback mode is enabled for 82599. */
2609         if (hw->mac.type == ixgbe_mac_82599EB &&
2610                         dev->data->dev_conf.lpbk_mode == IXGBE_LPBK_82599_TX_RX)
2611                 goto skip_link_setup;
2612
2613         if (ixgbe_is_sfp(hw) && hw->phy.multispeed_fiber) {
2614                 err = hw->mac.ops.setup_sfp(hw);
2615                 if (err)
2616                         goto error;
2617         }
2618
2619         if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_copper) {
2620                 /* Turn on the copper */
2621                 ixgbe_set_phy_power(hw, true);
2622         } else {
2623                 /* Turn on the laser */
2624                 ixgbe_enable_tx_laser(hw);
2625         }
2626
2627         err = ixgbe_check_link(hw, &speed, &link_up, 0);
2628         if (err)
2629                 goto error;
2630         dev->data->dev_link.link_status = link_up;
2631
2632         err = ixgbe_get_link_capabilities(hw, &speed, &negotiate);
2633         if (err)
2634                 goto error;
2635
2636         link_speeds = &dev->data->dev_conf.link_speeds;
2637         if (*link_speeds & ~(ETH_LINK_SPEED_100M | ETH_LINK_SPEED_1G |
2638                         ETH_LINK_SPEED_10G)) {
2639                 PMD_INIT_LOG(ERR, "Invalid link setting");
2640                 goto error;
2641         }
2642
2643         speed = 0x0;
2644         if (*link_speeds == ETH_LINK_SPEED_AUTONEG) {
2645                 speed = (hw->mac.type != ixgbe_mac_82598EB) ?
2646                                 IXGBE_LINK_SPEED_82599_AUTONEG :
2647                                 IXGBE_LINK_SPEED_82598_AUTONEG;
2648         } else {
2649                 if (*link_speeds & ETH_LINK_SPEED_10G)
2650                         speed |= IXGBE_LINK_SPEED_10GB_FULL;
2651                 if (*link_speeds & ETH_LINK_SPEED_1G)
2652                         speed |= IXGBE_LINK_SPEED_1GB_FULL;
2653                 if (*link_speeds & ETH_LINK_SPEED_100M)
2654                         speed |= IXGBE_LINK_SPEED_100_FULL;
2655         }
2656
2657         err = ixgbe_setup_link(hw, speed, link_up);
2658         if (err)
2659                 goto error;
2660
2661 skip_link_setup:
2662
2663         if (rte_intr_allow_others(intr_handle)) {
2664                 /* check if lsc interrupt is enabled */
2665                 if (dev->data->dev_conf.intr_conf.lsc != 0)
2666                         ixgbe_dev_lsc_interrupt_setup(dev);
2667                 ixgbe_dev_macsec_interrupt_setup(dev);
2668         } else {
2669                 rte_intr_callback_unregister(intr_handle,
2670                                              ixgbe_dev_interrupt_handler, dev);
2671                 if (dev->data->dev_conf.intr_conf.lsc != 0)
2672                         PMD_INIT_LOG(INFO, "lsc won't enable because of"
2673                                      " no intr multiplex");
2674         }
2675
2676         /* check if rxq interrupt is enabled */
2677         if (dev->data->dev_conf.intr_conf.rxq != 0 &&
2678             rte_intr_dp_is_en(intr_handle))
2679                 ixgbe_dev_rxq_interrupt_setup(dev);
2680
2681         /* enable uio/vfio intr/eventfd mapping */
2682         rte_intr_enable(intr_handle);
2683
2684         /* resume enabled intr since hw reset */
2685         ixgbe_enable_intr(dev);
2686         ixgbe_l2_tunnel_conf(dev);
2687         ixgbe_filter_restore(dev);
2688
2689         return 0;
2690
2691 error:
2692         PMD_INIT_LOG(ERR, "failure in ixgbe_dev_start(): %d", err);
2693         ixgbe_dev_clear_queues(dev);
2694         return -EIO;
2695 }
2696
2697 /*
2698  * Stop device: disable rx and tx functions to allow for reconfiguring.
2699  */
2700 static void
2701 ixgbe_dev_stop(struct rte_eth_dev *dev)
2702 {
2703         struct rte_eth_link link;
2704         struct ixgbe_hw *hw =
2705                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2706         struct ixgbe_vf_info *vfinfo =
2707                 *IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private);
2708         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
2709         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
2710         int vf;
2711
2712         PMD_INIT_FUNC_TRACE();
2713
2714         /* disable interrupts */
2715         ixgbe_disable_intr(hw);
2716
2717         /* reset the NIC */
2718         ixgbe_pf_reset_hw(hw);
2719         hw->adapter_stopped = 0;
2720
2721         /* stop adapter */
2722         ixgbe_stop_adapter(hw);
2723
2724         for (vf = 0; vfinfo != NULL && vf < pci_dev->max_vfs; vf++)
2725                 vfinfo[vf].clear_to_send = false;
2726
2727         if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_copper) {
2728                 /* Turn off the copper */
2729                 ixgbe_set_phy_power(hw, false);
2730         } else {
2731                 /* Turn off the laser */
2732                 ixgbe_disable_tx_laser(hw);
2733         }
2734
2735         ixgbe_dev_clear_queues(dev);
2736
2737         /* Clear stored conf */
2738         dev->data->scattered_rx = 0;
2739         dev->data->lro = 0;
2740
2741         /* Clear recorded link status */
2742         memset(&link, 0, sizeof(link));
2743         rte_ixgbe_dev_atomic_write_link_status(dev, &link);
2744
2745         if (!rte_intr_allow_others(intr_handle))
2746                 /* resume to the default handler */
2747                 rte_intr_callback_register(intr_handle,
2748                                            ixgbe_dev_interrupt_handler,
2749                                            (void *)dev);
2750
2751         /* Clean datapath event and queue/vec mapping */
2752         rte_intr_efd_disable(intr_handle);
2753         if (intr_handle->intr_vec != NULL) {
2754                 rte_free(intr_handle->intr_vec);
2755                 intr_handle->intr_vec = NULL;
2756         }
2757 }
2758
2759 /*
2760  * Set device link up: enable tx.
2761  */
2762 static int
2763 ixgbe_dev_set_link_up(struct rte_eth_dev *dev)
2764 {
2765         struct ixgbe_hw *hw =
2766                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2767         if (hw->mac.type == ixgbe_mac_82599EB) {
2768 #ifdef RTE_LIBRTE_IXGBE_BYPASS
2769                 if (hw->device_id == IXGBE_DEV_ID_82599_BYPASS) {
2770                         /* Not suported in bypass mode */
2771                         PMD_INIT_LOG(ERR, "Set link up is not supported "
2772                                      "by device id 0x%x", hw->device_id);
2773                         return -ENOTSUP;
2774                 }
2775 #endif
2776         }
2777
2778         if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_copper) {
2779                 /* Turn on the copper */
2780                 ixgbe_set_phy_power(hw, true);
2781         } else {
2782                 /* Turn on the laser */
2783                 ixgbe_enable_tx_laser(hw);
2784         }
2785
2786         return 0;
2787 }
2788
2789 /*
2790  * Set device link down: disable tx.
2791  */
2792 static int
2793 ixgbe_dev_set_link_down(struct rte_eth_dev *dev)
2794 {
2795         struct ixgbe_hw *hw =
2796                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2797         if (hw->mac.type == ixgbe_mac_82599EB) {
2798 #ifdef RTE_LIBRTE_IXGBE_BYPASS
2799                 if (hw->device_id == IXGBE_DEV_ID_82599_BYPASS) {
2800                         /* Not suported in bypass mode */
2801                         PMD_INIT_LOG(ERR, "Set link down is not supported "
2802                                      "by device id 0x%x", hw->device_id);
2803                         return -ENOTSUP;
2804                 }
2805 #endif
2806         }
2807
2808         if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_copper) {
2809                 /* Turn off the copper */
2810                 ixgbe_set_phy_power(hw, false);
2811         } else {
2812                 /* Turn off the laser */
2813                 ixgbe_disable_tx_laser(hw);
2814         }
2815
2816         return 0;
2817 }
2818
2819 /*
2820  * Reest and stop device.
2821  */
2822 static void
2823 ixgbe_dev_close(struct rte_eth_dev *dev)
2824 {
2825         struct ixgbe_hw *hw =
2826                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2827
2828         PMD_INIT_FUNC_TRACE();
2829
2830         ixgbe_pf_reset_hw(hw);
2831
2832         ixgbe_dev_stop(dev);
2833         hw->adapter_stopped = 1;
2834
2835         ixgbe_dev_free_queues(dev);
2836
2837         ixgbe_disable_pcie_master(hw);
2838
2839         /* reprogram the RAR[0] in case user changed it. */
2840         ixgbe_set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
2841 }
2842
2843 static void
2844 ixgbe_read_stats_registers(struct ixgbe_hw *hw,
2845                            struct ixgbe_hw_stats *hw_stats,
2846                            struct ixgbe_macsec_stats *macsec_stats,
2847                            uint64_t *total_missed_rx, uint64_t *total_qbrc,
2848                            uint64_t *total_qprc, uint64_t *total_qprdc)
2849 {
2850         uint32_t bprc, lxon, lxoff, total;
2851         uint32_t delta_gprc = 0;
2852         unsigned i;
2853         /* Workaround for RX byte count not including CRC bytes when CRC
2854          * strip is enabled. CRC bytes are removed from counters when crc_strip
2855          * is disabled.
2856          */
2857         int crc_strip = (IXGBE_READ_REG(hw, IXGBE_HLREG0) &
2858                         IXGBE_HLREG0_RXCRCSTRP);
2859
2860         hw_stats->crcerrs += IXGBE_READ_REG(hw, IXGBE_CRCERRS);
2861         hw_stats->illerrc += IXGBE_READ_REG(hw, IXGBE_ILLERRC);
2862         hw_stats->errbc += IXGBE_READ_REG(hw, IXGBE_ERRBC);
2863         hw_stats->mspdc += IXGBE_READ_REG(hw, IXGBE_MSPDC);
2864
2865         for (i = 0; i < 8; i++) {
2866                 uint32_t mp = IXGBE_READ_REG(hw, IXGBE_MPC(i));
2867
2868                 /* global total per queue */
2869                 hw_stats->mpc[i] += mp;
2870                 /* Running comprehensive total for stats display */
2871                 *total_missed_rx += hw_stats->mpc[i];
2872                 if (hw->mac.type == ixgbe_mac_82598EB) {
2873                         hw_stats->rnbc[i] +=
2874                             IXGBE_READ_REG(hw, IXGBE_RNBC(i));
2875                         hw_stats->pxonrxc[i] +=
2876                                 IXGBE_READ_REG(hw, IXGBE_PXONRXC(i));
2877                         hw_stats->pxoffrxc[i] +=
2878                                 IXGBE_READ_REG(hw, IXGBE_PXOFFRXC(i));
2879                 } else {
2880                         hw_stats->pxonrxc[i] +=
2881                                 IXGBE_READ_REG(hw, IXGBE_PXONRXCNT(i));
2882                         hw_stats->pxoffrxc[i] +=
2883                                 IXGBE_READ_REG(hw, IXGBE_PXOFFRXCNT(i));
2884                         hw_stats->pxon2offc[i] +=
2885                                 IXGBE_READ_REG(hw, IXGBE_PXON2OFFCNT(i));
2886                 }
2887                 hw_stats->pxontxc[i] +=
2888                     IXGBE_READ_REG(hw, IXGBE_PXONTXC(i));
2889                 hw_stats->pxofftxc[i] +=
2890                     IXGBE_READ_REG(hw, IXGBE_PXOFFTXC(i));
2891         }
2892         for (i = 0; i < IXGBE_QUEUE_STAT_COUNTERS; i++) {
2893                 uint32_t delta_qprc = IXGBE_READ_REG(hw, IXGBE_QPRC(i));
2894                 uint32_t delta_qptc = IXGBE_READ_REG(hw, IXGBE_QPTC(i));
2895                 uint32_t delta_qprdc = IXGBE_READ_REG(hw, IXGBE_QPRDC(i));
2896
2897                 delta_gprc += delta_qprc;
2898
2899                 hw_stats->qprc[i] += delta_qprc;
2900                 hw_stats->qptc[i] += delta_qptc;
2901
2902                 hw_stats->qbrc[i] += IXGBE_READ_REG(hw, IXGBE_QBRC_L(i));
2903                 hw_stats->qbrc[i] +=
2904                     ((uint64_t)IXGBE_READ_REG(hw, IXGBE_QBRC_H(i)) << 32);
2905                 if (crc_strip == 0)
2906                         hw_stats->qbrc[i] -= delta_qprc * ETHER_CRC_LEN;
2907
2908                 hw_stats->qbtc[i] += IXGBE_READ_REG(hw, IXGBE_QBTC_L(i));
2909                 hw_stats->qbtc[i] +=
2910                     ((uint64_t)IXGBE_READ_REG(hw, IXGBE_QBTC_H(i)) << 32);
2911
2912                 hw_stats->qprdc[i] += delta_qprdc;
2913                 *total_qprdc += hw_stats->qprdc[i];
2914
2915                 *total_qprc += hw_stats->qprc[i];
2916                 *total_qbrc += hw_stats->qbrc[i];
2917         }
2918         hw_stats->mlfc += IXGBE_READ_REG(hw, IXGBE_MLFC);
2919         hw_stats->mrfc += IXGBE_READ_REG(hw, IXGBE_MRFC);
2920         hw_stats->rlec += IXGBE_READ_REG(hw, IXGBE_RLEC);
2921
2922         /*
2923          * An errata states that gprc actually counts good + missed packets:
2924          * Workaround to set gprc to summated queue packet receives
2925          */
2926         hw_stats->gprc = *total_qprc;
2927
2928         if (hw->mac.type != ixgbe_mac_82598EB) {
2929                 hw_stats->gorc += IXGBE_READ_REG(hw, IXGBE_GORCL);
2930                 hw_stats->gorc += ((u64)IXGBE_READ_REG(hw, IXGBE_GORCH) << 32);
2931                 hw_stats->gotc += IXGBE_READ_REG(hw, IXGBE_GOTCL);
2932                 hw_stats->gotc += ((u64)IXGBE_READ_REG(hw, IXGBE_GOTCH) << 32);
2933                 hw_stats->tor += IXGBE_READ_REG(hw, IXGBE_TORL);
2934                 hw_stats->tor += ((u64)IXGBE_READ_REG(hw, IXGBE_TORH) << 32);
2935                 hw_stats->lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXCNT);
2936                 hw_stats->lxoffrxc += IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT);
2937         } else {
2938                 hw_stats->lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXC);
2939                 hw_stats->lxoffrxc += IXGBE_READ_REG(hw, IXGBE_LXOFFRXC);
2940                 /* 82598 only has a counter in the high register */
2941                 hw_stats->gorc += IXGBE_READ_REG(hw, IXGBE_GORCH);
2942                 hw_stats->gotc += IXGBE_READ_REG(hw, IXGBE_GOTCH);
2943                 hw_stats->tor += IXGBE_READ_REG(hw, IXGBE_TORH);
2944         }
2945         uint64_t old_tpr = hw_stats->tpr;
2946
2947         hw_stats->tpr += IXGBE_READ_REG(hw, IXGBE_TPR);
2948         hw_stats->tpt += IXGBE_READ_REG(hw, IXGBE_TPT);
2949
2950         if (crc_strip == 0)
2951                 hw_stats->gorc -= delta_gprc * ETHER_CRC_LEN;
2952
2953         uint64_t delta_gptc = IXGBE_READ_REG(hw, IXGBE_GPTC);
2954         hw_stats->gptc += delta_gptc;
2955         hw_stats->gotc -= delta_gptc * ETHER_CRC_LEN;
2956         hw_stats->tor -= (hw_stats->tpr - old_tpr) * ETHER_CRC_LEN;
2957
2958         /*
2959          * Workaround: mprc hardware is incorrectly counting
2960          * broadcasts, so for now we subtract those.
2961          */
2962         bprc = IXGBE_READ_REG(hw, IXGBE_BPRC);
2963         hw_stats->bprc += bprc;
2964         hw_stats->mprc += IXGBE_READ_REG(hw, IXGBE_MPRC);
2965         if (hw->mac.type == ixgbe_mac_82598EB)
2966                 hw_stats->mprc -= bprc;
2967
2968         hw_stats->prc64 += IXGBE_READ_REG(hw, IXGBE_PRC64);
2969         hw_stats->prc127 += IXGBE_READ_REG(hw, IXGBE_PRC127);
2970         hw_stats->prc255 += IXGBE_READ_REG(hw, IXGBE_PRC255);
2971         hw_stats->prc511 += IXGBE_READ_REG(hw, IXGBE_PRC511);
2972         hw_stats->prc1023 += IXGBE_READ_REG(hw, IXGBE_PRC1023);
2973         hw_stats->prc1522 += IXGBE_READ_REG(hw, IXGBE_PRC1522);
2974
2975         lxon = IXGBE_READ_REG(hw, IXGBE_LXONTXC);
2976         hw_stats->lxontxc += lxon;
2977         lxoff = IXGBE_READ_REG(hw, IXGBE_LXOFFTXC);
2978         hw_stats->lxofftxc += lxoff;
2979         total = lxon + lxoff;
2980
2981         hw_stats->mptc += IXGBE_READ_REG(hw, IXGBE_MPTC);
2982         hw_stats->ptc64 += IXGBE_READ_REG(hw, IXGBE_PTC64);
2983         hw_stats->gptc -= total;
2984         hw_stats->mptc -= total;
2985         hw_stats->ptc64 -= total;
2986         hw_stats->gotc -= total * ETHER_MIN_LEN;
2987
2988         hw_stats->ruc += IXGBE_READ_REG(hw, IXGBE_RUC);
2989         hw_stats->rfc += IXGBE_READ_REG(hw, IXGBE_RFC);
2990         hw_stats->roc += IXGBE_READ_REG(hw, IXGBE_ROC);
2991         hw_stats->rjc += IXGBE_READ_REG(hw, IXGBE_RJC);
2992         hw_stats->mngprc += IXGBE_READ_REG(hw, IXGBE_MNGPRC);
2993         hw_stats->mngpdc += IXGBE_READ_REG(hw, IXGBE_MNGPDC);
2994         hw_stats->mngptc += IXGBE_READ_REG(hw, IXGBE_MNGPTC);
2995         hw_stats->ptc127 += IXGBE_READ_REG(hw, IXGBE_PTC127);
2996         hw_stats->ptc255 += IXGBE_READ_REG(hw, IXGBE_PTC255);
2997         hw_stats->ptc511 += IXGBE_READ_REG(hw, IXGBE_PTC511);
2998         hw_stats->ptc1023 += IXGBE_READ_REG(hw, IXGBE_PTC1023);
2999         hw_stats->ptc1522 += IXGBE_READ_REG(hw, IXGBE_PTC1522);
3000         hw_stats->bptc += IXGBE_READ_REG(hw, IXGBE_BPTC);
3001         hw_stats->xec += IXGBE_READ_REG(hw, IXGBE_XEC);
3002         hw_stats->fccrc += IXGBE_READ_REG(hw, IXGBE_FCCRC);
3003         hw_stats->fclast += IXGBE_READ_REG(hw, IXGBE_FCLAST);
3004         /* Only read FCOE on 82599 */
3005         if (hw->mac.type != ixgbe_mac_82598EB) {
3006                 hw_stats->fcoerpdc += IXGBE_READ_REG(hw, IXGBE_FCOERPDC);
3007                 hw_stats->fcoeprc += IXGBE_READ_REG(hw, IXGBE_FCOEPRC);
3008                 hw_stats->fcoeptc += IXGBE_READ_REG(hw, IXGBE_FCOEPTC);
3009                 hw_stats->fcoedwrc += IXGBE_READ_REG(hw, IXGBE_FCOEDWRC);
3010                 hw_stats->fcoedwtc += IXGBE_READ_REG(hw, IXGBE_FCOEDWTC);
3011         }
3012
3013         /* Flow Director Stats registers */
3014         hw_stats->fdirmatch += IXGBE_READ_REG(hw, IXGBE_FDIRMATCH);
3015         hw_stats->fdirmiss += IXGBE_READ_REG(hw, IXGBE_FDIRMISS);
3016
3017         /* MACsec Stats registers */
3018         macsec_stats->out_pkts_untagged += IXGBE_READ_REG(hw, IXGBE_LSECTXUT);
3019         macsec_stats->out_pkts_encrypted +=
3020                 IXGBE_READ_REG(hw, IXGBE_LSECTXPKTE);
3021         macsec_stats->out_pkts_protected +=
3022                 IXGBE_READ_REG(hw, IXGBE_LSECTXPKTP);
3023         macsec_stats->out_octets_encrypted +=
3024                 IXGBE_READ_REG(hw, IXGBE_LSECTXOCTE);
3025         macsec_stats->out_octets_protected +=
3026                 IXGBE_READ_REG(hw, IXGBE_LSECTXOCTP);
3027         macsec_stats->in_pkts_untagged += IXGBE_READ_REG(hw, IXGBE_LSECRXUT);
3028         macsec_stats->in_pkts_badtag += IXGBE_READ_REG(hw, IXGBE_LSECRXBAD);
3029         macsec_stats->in_pkts_nosci += IXGBE_READ_REG(hw, IXGBE_LSECRXNOSCI);
3030         macsec_stats->in_pkts_unknownsci +=
3031                 IXGBE_READ_REG(hw, IXGBE_LSECRXUNSCI);
3032         macsec_stats->in_octets_decrypted +=
3033                 IXGBE_READ_REG(hw, IXGBE_LSECRXOCTD);
3034         macsec_stats->in_octets_validated +=
3035                 IXGBE_READ_REG(hw, IXGBE_LSECRXOCTV);
3036         macsec_stats->in_pkts_unchecked += IXGBE_READ_REG(hw, IXGBE_LSECRXUNCH);
3037         macsec_stats->in_pkts_delayed += IXGBE_READ_REG(hw, IXGBE_LSECRXDELAY);
3038         macsec_stats->in_pkts_late += IXGBE_READ_REG(hw, IXGBE_LSECRXLATE);
3039         for (i = 0; i < 2; i++) {
3040                 macsec_stats->in_pkts_ok +=
3041                         IXGBE_READ_REG(hw, IXGBE_LSECRXOK(i));
3042                 macsec_stats->in_pkts_invalid +=
3043                         IXGBE_READ_REG(hw, IXGBE_LSECRXINV(i));
3044                 macsec_stats->in_pkts_notvalid +=
3045                         IXGBE_READ_REG(hw, IXGBE_LSECRXNV(i));
3046         }
3047         macsec_stats->in_pkts_unusedsa += IXGBE_READ_REG(hw, IXGBE_LSECRXUNSA);
3048         macsec_stats->in_pkts_notusingsa +=
3049                 IXGBE_READ_REG(hw, IXGBE_LSECRXNUSA);
3050 }
3051
3052 /*
3053  * This function is based on ixgbe_update_stats_counters() in ixgbe/ixgbe.c
3054  */
3055 static void
3056 ixgbe_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
3057 {
3058         struct ixgbe_hw *hw =
3059                         IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3060         struct ixgbe_hw_stats *hw_stats =
3061                         IXGBE_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
3062         struct ixgbe_macsec_stats *macsec_stats =
3063                         IXGBE_DEV_PRIVATE_TO_MACSEC_STATS(
3064                                 dev->data->dev_private);
3065         uint64_t total_missed_rx, total_qbrc, total_qprc, total_qprdc;
3066         unsigned i;
3067
3068         total_missed_rx = 0;
3069         total_qbrc = 0;
3070         total_qprc = 0;
3071         total_qprdc = 0;
3072
3073         ixgbe_read_stats_registers(hw, hw_stats, macsec_stats, &total_missed_rx,
3074                         &total_qbrc, &total_qprc, &total_qprdc);
3075
3076         if (stats == NULL)
3077                 return;
3078
3079         /* Fill out the rte_eth_stats statistics structure */
3080         stats->ipackets = total_qprc;
3081         stats->ibytes = total_qbrc;
3082         stats->opackets = hw_stats->gptc;
3083         stats->obytes = hw_stats->gotc;
3084
3085         for (i = 0; i < IXGBE_QUEUE_STAT_COUNTERS; i++) {
3086                 stats->q_ipackets[i] = hw_stats->qprc[i];
3087                 stats->q_opackets[i] = hw_stats->qptc[i];
3088                 stats->q_ibytes[i] = hw_stats->qbrc[i];
3089                 stats->q_obytes[i] = hw_stats->qbtc[i];
3090                 stats->q_errors[i] = hw_stats->qprdc[i];
3091         }
3092
3093         /* Rx Errors */
3094         stats->imissed  = total_missed_rx;
3095         stats->ierrors  = hw_stats->crcerrs +
3096                           hw_stats->mspdc +
3097                           hw_stats->rlec +
3098                           hw_stats->ruc +
3099                           hw_stats->roc +
3100                           hw_stats->illerrc +
3101                           hw_stats->errbc +
3102                           hw_stats->rfc +
3103                           hw_stats->fccrc +
3104                           hw_stats->fclast;
3105
3106         /* Tx Errors */
3107         stats->oerrors  = 0;
3108 }
3109
3110 static void
3111 ixgbe_dev_stats_reset(struct rte_eth_dev *dev)
3112 {
3113         struct ixgbe_hw_stats *stats =
3114                         IXGBE_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
3115
3116         /* HW registers are cleared on read */
3117         ixgbe_dev_stats_get(dev, NULL);
3118
3119         /* Reset software totals */
3120         memset(stats, 0, sizeof(*stats));
3121 }
3122
3123 /* This function calculates the number of xstats based on the current config */
3124 static unsigned
3125 ixgbe_xstats_calc_num(void) {
3126         return IXGBE_NB_HW_STATS + IXGBE_NB_MACSEC_STATS +
3127                 (IXGBE_NB_RXQ_PRIO_STATS * IXGBE_NB_RXQ_PRIO_VALUES) +
3128                 (IXGBE_NB_TXQ_PRIO_STATS * IXGBE_NB_TXQ_PRIO_VALUES);
3129 }
3130
3131 static int ixgbe_dev_xstats_get_names(__rte_unused struct rte_eth_dev *dev,
3132         struct rte_eth_xstat_name *xstats_names, __rte_unused unsigned int size)
3133 {
3134         const unsigned cnt_stats = ixgbe_xstats_calc_num();
3135         unsigned stat, i, count;
3136
3137         if (xstats_names != NULL) {
3138                 count = 0;
3139
3140                 /* Note: limit >= cnt_stats checked upstream
3141                  * in rte_eth_xstats_names()
3142                  */
3143
3144                 /* Extended stats from ixgbe_hw_stats */
3145                 for (i = 0; i < IXGBE_NB_HW_STATS; i++) {
3146                         snprintf(xstats_names[count].name,
3147                                 sizeof(xstats_names[count].name),
3148                                 "%s",
3149                                 rte_ixgbe_stats_strings[i].name);
3150                         count++;
3151                 }
3152
3153                 /* MACsec Stats */
3154                 for (i = 0; i < IXGBE_NB_MACSEC_STATS; i++) {
3155                         snprintf(xstats_names[count].name,
3156                                 sizeof(xstats_names[count].name),
3157                                 "%s",
3158                                 rte_ixgbe_macsec_strings[i].name);
3159                         count++;
3160                 }
3161
3162                 /* RX Priority Stats */
3163                 for (stat = 0; stat < IXGBE_NB_RXQ_PRIO_STATS; stat++) {
3164                         for (i = 0; i < IXGBE_NB_RXQ_PRIO_VALUES; i++) {
3165                                 snprintf(xstats_names[count].name,
3166                                         sizeof(xstats_names[count].name),
3167                                         "rx_priority%u_%s", i,
3168                                         rte_ixgbe_rxq_strings[stat].name);
3169                                 count++;
3170                         }
3171                 }
3172
3173                 /* TX Priority Stats */
3174                 for (stat = 0; stat < IXGBE_NB_TXQ_PRIO_STATS; stat++) {
3175                         for (i = 0; i < IXGBE_NB_TXQ_PRIO_VALUES; i++) {
3176                                 snprintf(xstats_names[count].name,
3177                                         sizeof(xstats_names[count].name),
3178                                         "tx_priority%u_%s", i,
3179                                         rte_ixgbe_txq_strings[stat].name);
3180                                 count++;
3181                         }
3182                 }
3183         }
3184         return cnt_stats;
3185 }
3186
3187 static int ixgbe_dev_xstats_get_names_by_id(
3188         struct rte_eth_dev *dev,
3189         struct rte_eth_xstat_name *xstats_names,
3190         const uint64_t *ids,
3191         unsigned int limit)
3192 {
3193         if (!ids) {
3194                 const unsigned int cnt_stats = ixgbe_xstats_calc_num();
3195                 unsigned int stat, i, count;
3196
3197                 if (xstats_names != NULL) {
3198                         count = 0;
3199
3200                         /* Note: limit >= cnt_stats checked upstream
3201                          * in rte_eth_xstats_names()
3202                          */
3203
3204                         /* Extended stats from ixgbe_hw_stats */
3205                         for (i = 0; i < IXGBE_NB_HW_STATS; i++) {
3206                                 snprintf(xstats_names[count].name,
3207                                         sizeof(xstats_names[count].name),
3208                                         "%s",
3209                                         rte_ixgbe_stats_strings[i].name);
3210                                 count++;
3211                         }
3212
3213                         /* MACsec Stats */
3214                         for (i = 0; i < IXGBE_NB_MACSEC_STATS; i++) {
3215                                 snprintf(xstats_names[count].name,
3216                                         sizeof(xstats_names[count].name),
3217                                         "%s",
3218                                         rte_ixgbe_macsec_strings[i].name);
3219                                 count++;
3220                         }
3221
3222                         /* RX Priority Stats */
3223                         for (stat = 0; stat < IXGBE_NB_RXQ_PRIO_STATS; stat++) {
3224                                 for (i = 0; i < IXGBE_NB_RXQ_PRIO_VALUES; i++) {
3225                                         snprintf(xstats_names[count].name,
3226                                             sizeof(xstats_names[count].name),
3227                                             "rx_priority%u_%s", i,
3228                                             rte_ixgbe_rxq_strings[stat].name);
3229                                         count++;
3230                                 }
3231                         }
3232
3233                         /* TX Priority Stats */
3234                         for (stat = 0; stat < IXGBE_NB_TXQ_PRIO_STATS; stat++) {
3235                                 for (i = 0; i < IXGBE_NB_TXQ_PRIO_VALUES; i++) {
3236                                         snprintf(xstats_names[count].name,
3237                                             sizeof(xstats_names[count].name),
3238                                             "tx_priority%u_%s", i,
3239                                             rte_ixgbe_txq_strings[stat].name);
3240                                         count++;
3241                                 }
3242                         }
3243                 }
3244                 return cnt_stats;
3245         }
3246
3247         uint16_t i;
3248         uint16_t size = ixgbe_xstats_calc_num();
3249         struct rte_eth_xstat_name xstats_names_copy[size];
3250
3251         ixgbe_dev_xstats_get_names_by_id(dev, xstats_names_copy, NULL,
3252                         size);
3253
3254         for (i = 0; i < limit; i++) {
3255                 if (ids[i] >= size) {
3256                         PMD_INIT_LOG(ERR, "id value isn't valid");
3257                         return -1;
3258                 }
3259                 strcpy(xstats_names[i].name,
3260                                 xstats_names_copy[ids[i]].name);
3261         }
3262         return limit;
3263 }
3264
3265 static int ixgbevf_dev_xstats_get_names(__rte_unused struct rte_eth_dev *dev,
3266         struct rte_eth_xstat_name *xstats_names, unsigned limit)
3267 {
3268         unsigned i;
3269
3270         if (limit < IXGBEVF_NB_XSTATS && xstats_names != NULL)
3271                 return -ENOMEM;
3272
3273         if (xstats_names != NULL)
3274                 for (i = 0; i < IXGBEVF_NB_XSTATS; i++)
3275                         snprintf(xstats_names[i].name,
3276                                 sizeof(xstats_names[i].name),
3277                                 "%s", rte_ixgbevf_stats_strings[i].name);
3278         return IXGBEVF_NB_XSTATS;
3279 }
3280
3281 static int
3282 ixgbe_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
3283                                          unsigned n)
3284 {
3285         struct ixgbe_hw *hw =
3286                         IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3287         struct ixgbe_hw_stats *hw_stats =
3288                         IXGBE_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
3289         struct ixgbe_macsec_stats *macsec_stats =
3290                         IXGBE_DEV_PRIVATE_TO_MACSEC_STATS(
3291                                 dev->data->dev_private);
3292         uint64_t total_missed_rx, total_qbrc, total_qprc, total_qprdc;
3293         unsigned i, stat, count = 0;
3294
3295         count = ixgbe_xstats_calc_num();
3296
3297         if (n < count)
3298                 return count;
3299
3300         total_missed_rx = 0;
3301         total_qbrc = 0;
3302         total_qprc = 0;
3303         total_qprdc = 0;
3304
3305         ixgbe_read_stats_registers(hw, hw_stats, macsec_stats, &total_missed_rx,
3306                         &total_qbrc, &total_qprc, &total_qprdc);
3307
3308         /* If this is a reset xstats is NULL, and we have cleared the
3309          * registers by reading them.
3310          */
3311         if (!xstats)
3312                 return 0;
3313
3314         /* Extended stats from ixgbe_hw_stats */
3315         count = 0;
3316         for (i = 0; i < IXGBE_NB_HW_STATS; i++) {
3317                 xstats[count].value = *(uint64_t *)(((char *)hw_stats) +
3318                                 rte_ixgbe_stats_strings[i].offset);
3319                 xstats[count].id = count;
3320                 count++;
3321         }
3322
3323         /* MACsec Stats */
3324         for (i = 0; i < IXGBE_NB_MACSEC_STATS; i++) {
3325                 xstats[count].value = *(uint64_t *)(((char *)macsec_stats) +
3326                                 rte_ixgbe_macsec_strings[i].offset);
3327                 xstats[count].id = count;
3328                 count++;
3329         }
3330
3331         /* RX Priority Stats */
3332         for (stat = 0; stat < IXGBE_NB_RXQ_PRIO_STATS; stat++) {
3333                 for (i = 0; i < IXGBE_NB_RXQ_PRIO_VALUES; i++) {
3334                         xstats[count].value = *(uint64_t *)(((char *)hw_stats) +
3335                                         rte_ixgbe_rxq_strings[stat].offset +
3336                                         (sizeof(uint64_t) * i));
3337                         xstats[count].id = count;
3338                         count++;
3339                 }
3340         }
3341
3342         /* TX Priority Stats */
3343         for (stat = 0; stat < IXGBE_NB_TXQ_PRIO_STATS; stat++) {
3344                 for (i = 0; i < IXGBE_NB_TXQ_PRIO_VALUES; i++) {
3345                         xstats[count].value = *(uint64_t *)(((char *)hw_stats) +
3346                                         rte_ixgbe_txq_strings[stat].offset +
3347                                         (sizeof(uint64_t) * i));
3348                         xstats[count].id = count;
3349                         count++;
3350                 }
3351         }
3352         return count;
3353 }
3354
3355 static int
3356 ixgbe_dev_xstats_get_by_id(struct rte_eth_dev *dev, const uint64_t *ids,
3357                 uint64_t *values, unsigned int n)
3358 {
3359         if (!ids) {
3360                 struct ixgbe_hw *hw =
3361                                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3362                 struct ixgbe_hw_stats *hw_stats =
3363                                 IXGBE_DEV_PRIVATE_TO_STATS(
3364                                                 dev->data->dev_private);
3365                 struct ixgbe_macsec_stats *macsec_stats =
3366                                 IXGBE_DEV_PRIVATE_TO_MACSEC_STATS(
3367                                         dev->data->dev_private);
3368                 uint64_t total_missed_rx, total_qbrc, total_qprc, total_qprdc;
3369                 unsigned int i, stat, count = 0;
3370
3371                 count = ixgbe_xstats_calc_num();
3372
3373                 if (!ids && n < count)
3374                         return count;
3375
3376                 total_missed_rx = 0;
3377                 total_qbrc = 0;
3378                 total_qprc = 0;
3379                 total_qprdc = 0;
3380
3381                 ixgbe_read_stats_registers(hw, hw_stats, macsec_stats,
3382                                 &total_missed_rx, &total_qbrc, &total_qprc,
3383                                 &total_qprdc);
3384
3385                 /* If this is a reset xstats is NULL, and we have cleared the
3386                  * registers by reading them.
3387                  */
3388                 if (!ids && !values)
3389                         return 0;
3390
3391                 /* Extended stats from ixgbe_hw_stats */
3392                 count = 0;
3393                 for (i = 0; i < IXGBE_NB_HW_STATS; i++) {
3394                         values[count] = *(uint64_t *)(((char *)hw_stats) +
3395                                         rte_ixgbe_stats_strings[i].offset);
3396                         count++;
3397                 }
3398
3399                 /* MACsec Stats */
3400                 for (i = 0; i < IXGBE_NB_MACSEC_STATS; i++) {
3401                         values[count] = *(uint64_t *)(((char *)macsec_stats) +
3402                                         rte_ixgbe_macsec_strings[i].offset);
3403                         count++;
3404                 }
3405
3406                 /* RX Priority Stats */
3407                 for (stat = 0; stat < IXGBE_NB_RXQ_PRIO_STATS; stat++) {
3408                         for (i = 0; i < IXGBE_NB_RXQ_PRIO_VALUES; i++) {
3409                                 values[count] =
3410                                         *(uint64_t *)(((char *)hw_stats) +
3411                                         rte_ixgbe_rxq_strings[stat].offset +
3412                                         (sizeof(uint64_t) * i));
3413                                 count++;
3414                         }
3415                 }
3416
3417                 /* TX Priority Stats */
3418                 for (stat = 0; stat < IXGBE_NB_TXQ_PRIO_STATS; stat++) {
3419                         for (i = 0; i < IXGBE_NB_TXQ_PRIO_VALUES; i++) {
3420                                 values[count] =
3421                                         *(uint64_t *)(((char *)hw_stats) +
3422                                         rte_ixgbe_txq_strings[stat].offset +
3423                                         (sizeof(uint64_t) * i));
3424                                 count++;
3425                         }
3426                 }
3427                 return count;
3428         }
3429
3430         uint16_t i;
3431         uint16_t size = ixgbe_xstats_calc_num();
3432         uint64_t values_copy[size];
3433
3434         ixgbe_dev_xstats_get_by_id(dev, NULL, values_copy, size);
3435
3436         for (i = 0; i < n; i++) {
3437                 if (ids[i] >= size) {
3438                         PMD_INIT_LOG(ERR, "id value isn't valid");
3439                         return -1;
3440                 }
3441                 values[i] = values_copy[ids[i]];
3442         }
3443         return n;
3444 }
3445
3446 static void
3447 ixgbe_dev_xstats_reset(struct rte_eth_dev *dev)
3448 {
3449         struct ixgbe_hw_stats *stats =
3450                         IXGBE_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
3451         struct ixgbe_macsec_stats *macsec_stats =
3452                         IXGBE_DEV_PRIVATE_TO_MACSEC_STATS(
3453                                 dev->data->dev_private);
3454
3455         unsigned count = ixgbe_xstats_calc_num();
3456
3457         /* HW registers are cleared on read */
3458         ixgbe_dev_xstats_get(dev, NULL, count);
3459
3460         /* Reset software totals */
3461         memset(stats, 0, sizeof(*stats));
3462         memset(macsec_stats, 0, sizeof(*macsec_stats));
3463 }
3464
3465 static void
3466 ixgbevf_update_stats(struct rte_eth_dev *dev)
3467 {
3468         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3469         struct ixgbevf_hw_stats *hw_stats = (struct ixgbevf_hw_stats *)
3470                           IXGBE_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
3471
3472         /* Good Rx packet, include VF loopback */
3473         UPDATE_VF_STAT(IXGBE_VFGPRC,
3474             hw_stats->last_vfgprc, hw_stats->vfgprc);
3475
3476         /* Good Rx octets, include VF loopback */
3477         UPDATE_VF_STAT_36BIT(IXGBE_VFGORC_LSB, IXGBE_VFGORC_MSB,
3478             hw_stats->last_vfgorc, hw_stats->vfgorc);
3479
3480         /* Good Tx packet, include VF loopback */
3481         UPDATE_VF_STAT(IXGBE_VFGPTC,
3482             hw_stats->last_vfgptc, hw_stats->vfgptc);
3483
3484         /* Good Tx octets, include VF loopback */
3485         UPDATE_VF_STAT_36BIT(IXGBE_VFGOTC_LSB, IXGBE_VFGOTC_MSB,
3486             hw_stats->last_vfgotc, hw_stats->vfgotc);
3487
3488         /* Rx Multicst Packet */
3489         UPDATE_VF_STAT(IXGBE_VFMPRC,
3490             hw_stats->last_vfmprc, hw_stats->vfmprc);
3491 }
3492
3493 static int
3494 ixgbevf_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
3495                        unsigned n)
3496 {
3497         struct ixgbevf_hw_stats *hw_stats = (struct ixgbevf_hw_stats *)
3498                         IXGBE_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
3499         unsigned i;
3500
3501         if (n < IXGBEVF_NB_XSTATS)
3502                 return IXGBEVF_NB_XSTATS;
3503
3504         ixgbevf_update_stats(dev);
3505
3506         if (!xstats)
3507                 return 0;
3508
3509         /* Extended stats */
3510         for (i = 0; i < IXGBEVF_NB_XSTATS; i++) {
3511                 xstats[i].id = i;
3512                 xstats[i].value = *(uint64_t *)(((char *)hw_stats) +
3513                         rte_ixgbevf_stats_strings[i].offset);
3514         }
3515
3516         return IXGBEVF_NB_XSTATS;
3517 }
3518
3519 static void
3520 ixgbevf_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
3521 {
3522         struct ixgbevf_hw_stats *hw_stats = (struct ixgbevf_hw_stats *)
3523                           IXGBE_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
3524
3525         ixgbevf_update_stats(dev);
3526
3527         if (stats == NULL)
3528                 return;
3529
3530         stats->ipackets = hw_stats->vfgprc;
3531         stats->ibytes = hw_stats->vfgorc;
3532         stats->opackets = hw_stats->vfgptc;
3533         stats->obytes = hw_stats->vfgotc;
3534 }
3535
3536 static void
3537 ixgbevf_dev_stats_reset(struct rte_eth_dev *dev)
3538 {
3539         struct ixgbevf_hw_stats *hw_stats = (struct ixgbevf_hw_stats *)
3540                         IXGBE_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
3541
3542         /* Sync HW register to the last stats */
3543         ixgbevf_dev_stats_get(dev, NULL);
3544
3545         /* reset HW current stats*/
3546         hw_stats->vfgprc = 0;
3547         hw_stats->vfgorc = 0;
3548         hw_stats->vfgptc = 0;
3549         hw_stats->vfgotc = 0;
3550 }
3551
3552 static int
3553 ixgbe_fw_version_get(struct rte_eth_dev *dev, char *fw_version, size_t fw_size)
3554 {
3555         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3556         u16 eeprom_verh, eeprom_verl;
3557         u32 etrack_id;
3558         int ret;
3559
3560         ixgbe_read_eeprom(hw, 0x2e, &eeprom_verh);
3561         ixgbe_read_eeprom(hw, 0x2d, &eeprom_verl);
3562
3563         etrack_id = (eeprom_verh << 16) | eeprom_verl;
3564         ret = snprintf(fw_version, fw_size, "0x%08x", etrack_id);
3565
3566         ret += 1; /* add the size of '\0' */
3567         if (fw_size < (u32)ret)
3568                 return ret;
3569         else
3570                 return 0;
3571 }
3572
3573 static void
3574 ixgbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
3575 {
3576         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
3577         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3578         struct rte_eth_conf *dev_conf = &dev->data->dev_conf;
3579
3580         dev_info->pci_dev = pci_dev;
3581         dev_info->max_rx_queues = (uint16_t)hw->mac.max_rx_queues;
3582         dev_info->max_tx_queues = (uint16_t)hw->mac.max_tx_queues;
3583         if (RTE_ETH_DEV_SRIOV(dev).active == 0) {
3584                 /*
3585                  * When DCB/VT is off, maximum number of queues changes,
3586                  * except for 82598EB, which remains constant.
3587                  */
3588                 if (dev_conf->txmode.mq_mode == ETH_MQ_TX_NONE &&
3589                                 hw->mac.type != ixgbe_mac_82598EB)
3590                         dev_info->max_tx_queues = IXGBE_NONE_MODE_TX_NB_QUEUES;
3591         }
3592         dev_info->min_rx_bufsize = 1024; /* cf BSIZEPACKET in SRRCTL register */
3593         dev_info->max_rx_pktlen = 15872; /* includes CRC, cf MAXFRS register */
3594         dev_info->max_mac_addrs = hw->mac.num_rar_entries;
3595         dev_info->max_hash_mac_addrs = IXGBE_VMDQ_NUM_UC_MAC;
3596         dev_info->max_vfs = pci_dev->max_vfs;
3597         if (hw->mac.type == ixgbe_mac_82598EB)
3598                 dev_info->max_vmdq_pools = ETH_16_POOLS;
3599         else
3600                 dev_info->max_vmdq_pools = ETH_64_POOLS;
3601         dev_info->vmdq_queue_num = dev_info->max_rx_queues;
3602         dev_info->rx_offload_capa =
3603                 DEV_RX_OFFLOAD_VLAN_STRIP |
3604                 DEV_RX_OFFLOAD_IPV4_CKSUM |
3605                 DEV_RX_OFFLOAD_UDP_CKSUM  |
3606                 DEV_RX_OFFLOAD_TCP_CKSUM;
3607
3608         /*
3609          * RSC is only supported by 82599 and x540 PF devices in a non-SR-IOV
3610          * mode.
3611          */
3612         if ((hw->mac.type == ixgbe_mac_82599EB ||
3613              hw->mac.type == ixgbe_mac_X540) &&
3614             !RTE_ETH_DEV_SRIOV(dev).active)
3615                 dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_TCP_LRO;
3616
3617         if (hw->mac.type == ixgbe_mac_82599EB ||
3618             hw->mac.type == ixgbe_mac_X540)
3619                 dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_MACSEC_STRIP;
3620
3621         if (hw->mac.type == ixgbe_mac_X550 ||
3622             hw->mac.type == ixgbe_mac_X550EM_x ||
3623             hw->mac.type == ixgbe_mac_X550EM_a)
3624                 dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM;
3625
3626         dev_info->tx_offload_capa =
3627                 DEV_TX_OFFLOAD_VLAN_INSERT |
3628                 DEV_TX_OFFLOAD_IPV4_CKSUM  |
3629                 DEV_TX_OFFLOAD_UDP_CKSUM   |
3630                 DEV_TX_OFFLOAD_TCP_CKSUM   |
3631                 DEV_TX_OFFLOAD_SCTP_CKSUM  |
3632                 DEV_TX_OFFLOAD_TCP_TSO;
3633
3634         if (hw->mac.type == ixgbe_mac_82599EB ||
3635             hw->mac.type == ixgbe_mac_X540)
3636                 dev_info->tx_offload_capa |= DEV_TX_OFFLOAD_MACSEC_INSERT;
3637
3638         if (hw->mac.type == ixgbe_mac_X550 ||
3639             hw->mac.type == ixgbe_mac_X550EM_x ||
3640             hw->mac.type == ixgbe_mac_X550EM_a)
3641                 dev_info->tx_offload_capa |= DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM;
3642
3643         dev_info->default_rxconf = (struct rte_eth_rxconf) {
3644                 .rx_thresh = {
3645                         .pthresh = IXGBE_DEFAULT_RX_PTHRESH,
3646                         .hthresh = IXGBE_DEFAULT_RX_HTHRESH,
3647                         .wthresh = IXGBE_DEFAULT_RX_WTHRESH,
3648                 },
3649                 .rx_free_thresh = IXGBE_DEFAULT_RX_FREE_THRESH,
3650                 .rx_drop_en = 0,
3651         };
3652
3653         dev_info->default_txconf = (struct rte_eth_txconf) {
3654                 .tx_thresh = {
3655                         .pthresh = IXGBE_DEFAULT_TX_PTHRESH,
3656                         .hthresh = IXGBE_DEFAULT_TX_HTHRESH,
3657                         .wthresh = IXGBE_DEFAULT_TX_WTHRESH,
3658                 },
3659                 .tx_free_thresh = IXGBE_DEFAULT_TX_FREE_THRESH,
3660                 .tx_rs_thresh = IXGBE_DEFAULT_TX_RSBIT_THRESH,
3661                 .txq_flags = ETH_TXQ_FLAGS_NOMULTSEGS |
3662                                 ETH_TXQ_FLAGS_NOOFFLOADS,
3663         };
3664
3665         dev_info->rx_desc_lim = rx_desc_lim;
3666         dev_info->tx_desc_lim = tx_desc_lim;
3667
3668         dev_info->hash_key_size = IXGBE_HKEY_MAX_INDEX * sizeof(uint32_t);
3669         dev_info->reta_size = ixgbe_reta_size_get(hw->mac.type);
3670         dev_info->flow_type_rss_offloads = IXGBE_RSS_OFFLOAD_ALL;
3671
3672         dev_info->speed_capa = ETH_LINK_SPEED_1G | ETH_LINK_SPEED_10G;
3673         if (hw->mac.type == ixgbe_mac_X540 ||
3674             hw->mac.type == ixgbe_mac_X540_vf ||
3675             hw->mac.type == ixgbe_mac_X550 ||
3676             hw->mac.type == ixgbe_mac_X550_vf) {
3677                 dev_info->speed_capa |= ETH_LINK_SPEED_100M;
3678         }
3679 }
3680
3681 static const uint32_t *
3682 ixgbe_dev_supported_ptypes_get(struct rte_eth_dev *dev)
3683 {
3684         static const uint32_t ptypes[] = {
3685                 /* For non-vec functions,
3686                  * refers to ixgbe_rxd_pkt_info_to_pkt_type();
3687                  * for vec functions,
3688                  * refers to _recv_raw_pkts_vec().
3689                  */
3690                 RTE_PTYPE_L2_ETHER,
3691                 RTE_PTYPE_L3_IPV4,
3692                 RTE_PTYPE_L3_IPV4_EXT,
3693                 RTE_PTYPE_L3_IPV6,
3694                 RTE_PTYPE_L3_IPV6_EXT,
3695                 RTE_PTYPE_L4_SCTP,
3696                 RTE_PTYPE_L4_TCP,
3697                 RTE_PTYPE_L4_UDP,
3698                 RTE_PTYPE_TUNNEL_IP,
3699                 RTE_PTYPE_INNER_L3_IPV6,
3700                 RTE_PTYPE_INNER_L3_IPV6_EXT,
3701                 RTE_PTYPE_INNER_L4_TCP,
3702                 RTE_PTYPE_INNER_L4_UDP,
3703                 RTE_PTYPE_UNKNOWN
3704         };
3705
3706         if (dev->rx_pkt_burst == ixgbe_recv_pkts ||
3707             dev->rx_pkt_burst == ixgbe_recv_pkts_lro_single_alloc ||
3708             dev->rx_pkt_burst == ixgbe_recv_pkts_lro_bulk_alloc ||
3709             dev->rx_pkt_burst == ixgbe_recv_pkts_bulk_alloc)
3710                 return ptypes;
3711
3712 #if defined(RTE_ARCH_X86)
3713         if (dev->rx_pkt_burst == ixgbe_recv_pkts_vec ||
3714             dev->rx_pkt_burst == ixgbe_recv_scattered_pkts_vec)
3715                 return ptypes;
3716 #endif
3717         return NULL;
3718 }
3719
3720 static void
3721 ixgbevf_dev_info_get(struct rte_eth_dev *dev,
3722                      struct rte_eth_dev_info *dev_info)
3723 {
3724         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
3725         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3726
3727         dev_info->pci_dev = pci_dev;
3728         dev_info->max_rx_queues = (uint16_t)hw->mac.max_rx_queues;
3729         dev_info->max_tx_queues = (uint16_t)hw->mac.max_tx_queues;
3730         dev_info->min_rx_bufsize = 1024; /* cf BSIZEPACKET in SRRCTL reg */
3731         dev_info->max_rx_pktlen = 9728; /* includes CRC, cf MAXFRS reg */
3732         dev_info->max_mac_addrs = hw->mac.num_rar_entries;
3733         dev_info->max_hash_mac_addrs = IXGBE_VMDQ_NUM_UC_MAC;
3734         dev_info->max_vfs = pci_dev->max_vfs;
3735         if (hw->mac.type == ixgbe_mac_82598EB)
3736                 dev_info->max_vmdq_pools = ETH_16_POOLS;
3737         else
3738                 dev_info->max_vmdq_pools = ETH_64_POOLS;
3739         dev_info->rx_offload_capa = DEV_RX_OFFLOAD_VLAN_STRIP |
3740                                 DEV_RX_OFFLOAD_IPV4_CKSUM |
3741                                 DEV_RX_OFFLOAD_UDP_CKSUM  |
3742                                 DEV_RX_OFFLOAD_TCP_CKSUM;
3743         dev_info->tx_offload_capa = DEV_TX_OFFLOAD_VLAN_INSERT |
3744                                 DEV_TX_OFFLOAD_IPV4_CKSUM  |
3745                                 DEV_TX_OFFLOAD_UDP_CKSUM   |
3746                                 DEV_TX_OFFLOAD_TCP_CKSUM   |
3747                                 DEV_TX_OFFLOAD_SCTP_CKSUM  |
3748                                 DEV_TX_OFFLOAD_TCP_TSO;
3749
3750         dev_info->default_rxconf = (struct rte_eth_rxconf) {
3751                 .rx_thresh = {
3752                         .pthresh = IXGBE_DEFAULT_RX_PTHRESH,
3753                         .hthresh = IXGBE_DEFAULT_RX_HTHRESH,
3754                         .wthresh = IXGBE_DEFAULT_RX_WTHRESH,
3755                 },
3756                 .rx_free_thresh = IXGBE_DEFAULT_RX_FREE_THRESH,
3757                 .rx_drop_en = 0,
3758         };
3759
3760         dev_info->default_txconf = (struct rte_eth_txconf) {
3761                 .tx_thresh = {
3762                         .pthresh = IXGBE_DEFAULT_TX_PTHRESH,
3763                         .hthresh = IXGBE_DEFAULT_TX_HTHRESH,
3764                         .wthresh = IXGBE_DEFAULT_TX_WTHRESH,
3765                 },
3766                 .tx_free_thresh = IXGBE_DEFAULT_TX_FREE_THRESH,
3767                 .tx_rs_thresh = IXGBE_DEFAULT_TX_RSBIT_THRESH,
3768                 .txq_flags = ETH_TXQ_FLAGS_NOMULTSEGS |
3769                                 ETH_TXQ_FLAGS_NOOFFLOADS,
3770         };
3771
3772         dev_info->rx_desc_lim = rx_desc_lim;
3773         dev_info->tx_desc_lim = tx_desc_lim;
3774 }
3775
3776 static int
3777 ixgbevf_check_link(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
3778                    int *link_up, int wait_to_complete)
3779 {
3780         /**
3781          * for a quick link status checking, wait_to_compelet == 0,
3782          * skip PF link status checking
3783          */
3784         bool no_pflink_check = wait_to_complete == 0;
3785         struct ixgbe_mbx_info *mbx = &hw->mbx;
3786         struct ixgbe_mac_info *mac = &hw->mac;
3787         uint32_t links_reg, in_msg;
3788         int ret_val = 0;
3789
3790         /* If we were hit with a reset drop the link */
3791         if (!mbx->ops.check_for_rst(hw, 0) || !mbx->timeout)
3792                 mac->get_link_status = true;
3793
3794         if (!mac->get_link_status)
3795                 goto out;
3796
3797         /* if link status is down no point in checking to see if pf is up */
3798         links_reg = IXGBE_READ_REG(hw, IXGBE_VFLINKS);
3799         if (!(links_reg & IXGBE_LINKS_UP))
3800                 goto out;
3801
3802         /* for SFP+ modules and DA cables on 82599 it can take up to 500usecs
3803          * before the link status is correct
3804          */
3805         if (mac->type == ixgbe_mac_82599_vf) {
3806                 int i;
3807
3808                 for (i = 0; i < 5; i++) {
3809                         rte_delay_us(100);
3810                         links_reg = IXGBE_READ_REG(hw, IXGBE_VFLINKS);
3811
3812                         if (!(links_reg & IXGBE_LINKS_UP))
3813                                 goto out;
3814                 }
3815         }
3816
3817         switch (links_reg & IXGBE_LINKS_SPEED_82599) {
3818         case IXGBE_LINKS_SPEED_10G_82599:
3819                 *speed = IXGBE_LINK_SPEED_10GB_FULL;
3820                 if (hw->mac.type >= ixgbe_mac_X550) {
3821                         if (links_reg & IXGBE_LINKS_SPEED_NON_STD)
3822                                 *speed = IXGBE_LINK_SPEED_2_5GB_FULL;
3823                 }
3824                 break;
3825         case IXGBE_LINKS_SPEED_1G_82599:
3826                 *speed = IXGBE_LINK_SPEED_1GB_FULL;
3827                 break;
3828         case IXGBE_LINKS_SPEED_100_82599:
3829                 *speed = IXGBE_LINK_SPEED_100_FULL;
3830                 if (hw->mac.type == ixgbe_mac_X550) {
3831                         if (links_reg & IXGBE_LINKS_SPEED_NON_STD)
3832                                 *speed = IXGBE_LINK_SPEED_5GB_FULL;
3833                 }
3834                 break;
3835         case IXGBE_LINKS_SPEED_10_X550EM_A:
3836                 *speed = IXGBE_LINK_SPEED_UNKNOWN;
3837                 /* Since Reserved in older MAC's */
3838                 if (hw->mac.type >= ixgbe_mac_X550)
3839                         *speed = IXGBE_LINK_SPEED_10_FULL;
3840                 break;
3841         default:
3842                 *speed = IXGBE_LINK_SPEED_UNKNOWN;
3843         }
3844
3845         if (no_pflink_check) {
3846                 if (*speed == IXGBE_LINK_SPEED_UNKNOWN)
3847                         mac->get_link_status = true;
3848                 else
3849                         mac->get_link_status = false;
3850
3851                 goto out;
3852         }
3853         /* if the read failed it could just be a mailbox collision, best wait
3854          * until we are called again and don't report an error
3855          */
3856         if (mbx->ops.read(hw, &in_msg, 1, 0))
3857                 goto out;
3858
3859         if (!(in_msg & IXGBE_VT_MSGTYPE_CTS)) {
3860                 /* msg is not CTS and is NACK we must have lost CTS status */
3861                 if (in_msg & IXGBE_VT_MSGTYPE_NACK)
3862                         ret_val = -1;
3863                 goto out;
3864         }
3865
3866         /* the pf is talking, if we timed out in the past we reinit */
3867         if (!mbx->timeout) {
3868                 ret_val = -1;
3869                 goto out;
3870         }
3871
3872         /* if we passed all the tests above then the link is up and we no
3873          * longer need to check for link
3874          */
3875         mac->get_link_status = false;
3876
3877 out:
3878         *link_up = !mac->get_link_status;
3879         return ret_val;
3880 }
3881
3882 /* return 0 means link status changed, -1 means not changed */
3883 static int
3884 ixgbe_dev_link_update_share(struct rte_eth_dev *dev,
3885                             int wait_to_complete, int vf)
3886 {
3887         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3888         struct rte_eth_link link, old;
3889         ixgbe_link_speed link_speed = IXGBE_LINK_SPEED_UNKNOWN;
3890         struct ixgbe_interrupt *intr =
3891                 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
3892         int link_up;
3893         int diag;
3894         u32 speed = 0;
3895         int wait = 1;
3896         bool autoneg = false;
3897
3898         link.link_status = ETH_LINK_DOWN;
3899         link.link_speed = 0;
3900         link.link_duplex = ETH_LINK_HALF_DUPLEX;
3901         memset(&old, 0, sizeof(old));
3902         rte_ixgbe_dev_atomic_read_link_status(dev, &old);
3903
3904         hw->mac.get_link_status = true;
3905
3906         if ((intr->flags & IXGBE_FLAG_NEED_LINK_CONFIG) &&
3907                 ixgbe_get_media_type(hw) == ixgbe_media_type_fiber) {
3908                 speed = hw->phy.autoneg_advertised;
3909                 if (!speed)
3910                         ixgbe_get_link_capabilities(hw, &speed, &autoneg);
3911                 ixgbe_setup_link(hw, speed, true);
3912         }
3913
3914         /* check if it needs to wait to complete, if lsc interrupt is enabled */
3915         if (wait_to_complete == 0 || dev->data->dev_conf.intr_conf.lsc != 0)
3916                 wait = 0;
3917
3918         if (vf)
3919                 diag = ixgbevf_check_link(hw, &link_speed, &link_up, wait);
3920         else
3921                 diag = ixgbe_check_link(hw, &link_speed, &link_up, wait);
3922
3923         if (diag != 0) {
3924                 link.link_speed = ETH_SPEED_NUM_100M;
3925                 link.link_duplex = ETH_LINK_FULL_DUPLEX;
3926                 rte_ixgbe_dev_atomic_write_link_status(dev, &link);
3927                 if (link.link_status == old.link_status)
3928                         return -1;
3929                 return 0;
3930         }
3931
3932         if (link_up == 0) {
3933                 rte_ixgbe_dev_atomic_write_link_status(dev, &link);
3934                 intr->flags |= IXGBE_FLAG_NEED_LINK_CONFIG;
3935                 if (link.link_status == old.link_status)
3936                         return -1;
3937                 return 0;
3938         }
3939         intr->flags &= ~IXGBE_FLAG_NEED_LINK_CONFIG;
3940         link.link_status = ETH_LINK_UP;
3941         link.link_duplex = ETH_LINK_FULL_DUPLEX;
3942
3943         switch (link_speed) {
3944         default:
3945         case IXGBE_LINK_SPEED_UNKNOWN:
3946                 link.link_duplex = ETH_LINK_FULL_DUPLEX;
3947                 link.link_speed = ETH_SPEED_NUM_100M;
3948                 break;
3949
3950         case IXGBE_LINK_SPEED_100_FULL:
3951                 link.link_speed = ETH_SPEED_NUM_100M;
3952                 break;
3953
3954         case IXGBE_LINK_SPEED_1GB_FULL:
3955                 link.link_speed = ETH_SPEED_NUM_1G;
3956                 break;
3957
3958         case IXGBE_LINK_SPEED_10GB_FULL:
3959                 link.link_speed = ETH_SPEED_NUM_10G;
3960                 break;
3961         }
3962         rte_ixgbe_dev_atomic_write_link_status(dev, &link);
3963
3964         if (link.link_status == old.link_status)
3965                 return -1;
3966
3967         return 0;
3968 }
3969
3970 static int
3971 ixgbe_dev_link_update(struct rte_eth_dev *dev, int wait_to_complete)
3972 {
3973         return ixgbe_dev_link_update_share(dev, wait_to_complete, 0);
3974 }
3975
3976 static int
3977 ixgbevf_dev_link_update(struct rte_eth_dev *dev, int wait_to_complete)
3978 {
3979         return ixgbe_dev_link_update_share(dev, wait_to_complete, 1);
3980 }
3981
3982 static void
3983 ixgbe_dev_promiscuous_enable(struct rte_eth_dev *dev)
3984 {
3985         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3986         uint32_t fctrl;
3987
3988         fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
3989         fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
3990         IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
3991 }
3992
3993 static void
3994 ixgbe_dev_promiscuous_disable(struct rte_eth_dev *dev)
3995 {
3996         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3997         uint32_t fctrl;
3998
3999         fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
4000         fctrl &= (~IXGBE_FCTRL_UPE);
4001         if (dev->data->all_multicast == 1)
4002                 fctrl |= IXGBE_FCTRL_MPE;
4003         else
4004                 fctrl &= (~IXGBE_FCTRL_MPE);
4005         IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
4006 }
4007
4008 static void
4009 ixgbe_dev_allmulticast_enable(struct rte_eth_dev *dev)
4010 {
4011         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4012         uint32_t fctrl;
4013
4014         fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
4015         fctrl |= IXGBE_FCTRL_MPE;
4016         IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
4017 }
4018
4019 static void
4020 ixgbe_dev_allmulticast_disable(struct rte_eth_dev *dev)
4021 {
4022         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4023         uint32_t fctrl;
4024
4025         if (dev->data->promiscuous == 1)
4026                 return; /* must remain in all_multicast mode */
4027
4028         fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
4029         fctrl &= (~IXGBE_FCTRL_MPE);
4030         IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
4031 }
4032
4033 /**
4034  * It clears the interrupt causes and enables the interrupt.
4035  * It will be called once only during nic initialized.
4036  *
4037  * @param dev
4038  *  Pointer to struct rte_eth_dev.
4039  *
4040  * @return
4041  *  - On success, zero.
4042  *  - On failure, a negative value.
4043  */
4044 static int
4045 ixgbe_dev_lsc_interrupt_setup(struct rte_eth_dev *dev)
4046 {
4047         struct ixgbe_interrupt *intr =
4048                 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
4049
4050         ixgbe_dev_link_status_print(dev);
4051         intr->mask |= IXGBE_EICR_LSC;
4052
4053         return 0;
4054 }
4055
4056 /**
4057  * It clears the interrupt causes and enables the interrupt.
4058  * It will be called once only during nic initialized.
4059  *
4060  * @param dev
4061  *  Pointer to struct rte_eth_dev.
4062  *
4063  * @return
4064  *  - On success, zero.
4065  *  - On failure, a negative value.
4066  */
4067 static int
4068 ixgbe_dev_rxq_interrupt_setup(struct rte_eth_dev *dev)
4069 {
4070         struct ixgbe_interrupt *intr =
4071                 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
4072
4073         intr->mask |= IXGBE_EICR_RTX_QUEUE;
4074
4075         return 0;
4076 }
4077
4078 /**
4079  * It clears the interrupt causes and enables the interrupt.
4080  * It will be called once only during nic initialized.
4081  *
4082  * @param dev
4083  *  Pointer to struct rte_eth_dev.
4084  *
4085  * @return
4086  *  - On success, zero.
4087  *  - On failure, a negative value.
4088  */
4089 static int
4090 ixgbe_dev_macsec_interrupt_setup(struct rte_eth_dev *dev)
4091 {
4092         struct ixgbe_interrupt *intr =
4093                 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
4094
4095         intr->mask |= IXGBE_EICR_LINKSEC;
4096
4097         return 0;
4098 }
4099
4100 /*
4101  * It reads ICR and sets flag (IXGBE_EICR_LSC) for the link_update.
4102  *
4103  * @param dev
4104  *  Pointer to struct rte_eth_dev.
4105  *
4106  * @return
4107  *  - On success, zero.
4108  *  - On failure, a negative value.
4109  */
4110 static int
4111 ixgbe_dev_interrupt_get_status(struct rte_eth_dev *dev)
4112 {
4113         uint32_t eicr;
4114         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4115         struct ixgbe_interrupt *intr =
4116                 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
4117
4118         /* clear all cause mask */
4119         ixgbe_disable_intr(hw);
4120
4121         /* read-on-clear nic registers here */
4122         eicr = IXGBE_READ_REG(hw, IXGBE_EICR);
4123         PMD_DRV_LOG(DEBUG, "eicr %x", eicr);
4124
4125         intr->flags = 0;
4126
4127         /* set flag for async link update */
4128         if (eicr & IXGBE_EICR_LSC)
4129                 intr->flags |= IXGBE_FLAG_NEED_LINK_UPDATE;
4130
4131         if (eicr & IXGBE_EICR_MAILBOX)
4132                 intr->flags |= IXGBE_FLAG_MAILBOX;
4133
4134         if (eicr & IXGBE_EICR_LINKSEC)
4135                 intr->flags |= IXGBE_FLAG_MACSEC;
4136
4137         if (hw->mac.type ==  ixgbe_mac_X550EM_x &&
4138             hw->phy.type == ixgbe_phy_x550em_ext_t &&
4139             (eicr & IXGBE_EICR_GPI_SDP0_X550EM_x))
4140                 intr->flags |= IXGBE_FLAG_PHY_INTERRUPT;
4141
4142         return 0;
4143 }
4144
4145 /**
4146  * It gets and then prints the link status.
4147  *
4148  * @param dev
4149  *  Pointer to struct rte_eth_dev.
4150  *
4151  * @return
4152  *  - On success, zero.
4153  *  - On failure, a negative value.
4154  */
4155 static void
4156 ixgbe_dev_link_status_print(struct rte_eth_dev *dev)
4157 {
4158         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
4159         struct rte_eth_link link;
4160
4161         memset(&link, 0, sizeof(link));
4162         rte_ixgbe_dev_atomic_read_link_status(dev, &link);
4163         if (link.link_status) {
4164                 PMD_INIT_LOG(INFO, "Port %d: Link Up - speed %u Mbps - %s",
4165                                         (int)(dev->data->port_id),
4166                                         (unsigned)link.link_speed,
4167                         link.link_duplex == ETH_LINK_FULL_DUPLEX ?
4168                                         "full-duplex" : "half-duplex");
4169         } else {
4170                 PMD_INIT_LOG(INFO, " Port %d: Link Down",
4171                                 (int)(dev->data->port_id));
4172         }
4173         PMD_INIT_LOG(DEBUG, "PCI Address: " PCI_PRI_FMT,
4174                                 pci_dev->addr.domain,
4175                                 pci_dev->addr.bus,
4176                                 pci_dev->addr.devid,
4177                                 pci_dev->addr.function);
4178 }
4179
4180 /*
4181  * It executes link_update after knowing an interrupt occurred.
4182  *
4183  * @param dev
4184  *  Pointer to struct rte_eth_dev.
4185  *
4186  * @return
4187  *  - On success, zero.
4188  *  - On failure, a negative value.
4189  */
4190 static int
4191 ixgbe_dev_interrupt_action(struct rte_eth_dev *dev,
4192                            struct rte_intr_handle *intr_handle)
4193 {
4194         struct ixgbe_interrupt *intr =
4195                 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
4196         int64_t timeout;
4197         struct rte_eth_link link;
4198         struct ixgbe_hw *hw =
4199                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4200
4201         PMD_DRV_LOG(DEBUG, "intr action type %d", intr->flags);
4202
4203         if (intr->flags & IXGBE_FLAG_MAILBOX) {
4204                 ixgbe_pf_mbx_process(dev);
4205                 intr->flags &= ~IXGBE_FLAG_MAILBOX;
4206         }
4207
4208         if (intr->flags & IXGBE_FLAG_PHY_INTERRUPT) {
4209                 ixgbe_handle_lasi(hw);
4210                 intr->flags &= ~IXGBE_FLAG_PHY_INTERRUPT;
4211         }
4212
4213         if (intr->flags & IXGBE_FLAG_NEED_LINK_UPDATE) {
4214                 /* get the link status before link update, for predicting later */
4215                 memset(&link, 0, sizeof(link));
4216                 rte_ixgbe_dev_atomic_read_link_status(dev, &link);
4217
4218                 ixgbe_dev_link_update(dev, 0);
4219
4220                 /* likely to up */
4221                 if (!link.link_status)
4222                         /* handle it 1 sec later, wait it being stable */
4223                         timeout = IXGBE_LINK_UP_CHECK_TIMEOUT;
4224                 /* likely to down */
4225                 else
4226                         /* handle it 4 sec later, wait it being stable */
4227                         timeout = IXGBE_LINK_DOWN_CHECK_TIMEOUT;
4228
4229                 ixgbe_dev_link_status_print(dev);
4230                 if (rte_eal_alarm_set(timeout * 1000,
4231                                       ixgbe_dev_interrupt_delayed_handler, (void *)dev) < 0)
4232                         PMD_DRV_LOG(ERR, "Error setting alarm");
4233                 else {
4234                         /* remember original mask */
4235                         intr->mask_original = intr->mask;
4236                         /* only disable lsc interrupt */
4237                         intr->mask &= ~IXGBE_EIMS_LSC;
4238                 }
4239         }
4240
4241         PMD_DRV_LOG(DEBUG, "enable intr immediately");
4242         ixgbe_enable_intr(dev);
4243         rte_intr_enable(intr_handle);
4244
4245         return 0;
4246 }
4247
4248 /**
4249  * Interrupt handler which shall be registered for alarm callback for delayed
4250  * handling specific interrupt to wait for the stable nic state. As the
4251  * NIC interrupt state is not stable for ixgbe after link is just down,
4252  * it needs to wait 4 seconds to get the stable status.
4253  *
4254  * @param handle
4255  *  Pointer to interrupt handle.
4256  * @param param
4257  *  The address of parameter (struct rte_eth_dev *) regsitered before.
4258  *
4259  * @return
4260  *  void
4261  */
4262 static void
4263 ixgbe_dev_interrupt_delayed_handler(void *param)
4264 {
4265         struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
4266         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
4267         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
4268         struct ixgbe_interrupt *intr =
4269                 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
4270         struct ixgbe_hw *hw =
4271                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4272         uint32_t eicr;
4273
4274         ixgbe_disable_intr(hw);
4275
4276         eicr = IXGBE_READ_REG(hw, IXGBE_EICR);
4277         if (eicr & IXGBE_EICR_MAILBOX)
4278                 ixgbe_pf_mbx_process(dev);
4279
4280         if (intr->flags & IXGBE_FLAG_PHY_INTERRUPT) {
4281                 ixgbe_handle_lasi(hw);
4282                 intr->flags &= ~IXGBE_FLAG_PHY_INTERRUPT;
4283         }
4284
4285         if (intr->flags & IXGBE_FLAG_NEED_LINK_UPDATE) {
4286                 ixgbe_dev_link_update(dev, 0);
4287                 intr->flags &= ~IXGBE_FLAG_NEED_LINK_UPDATE;
4288                 ixgbe_dev_link_status_print(dev);
4289                 _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC,
4290                                               NULL, NULL);
4291         }
4292
4293         if (intr->flags & IXGBE_FLAG_MACSEC) {
4294                 _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_MACSEC,
4295                                               NULL, NULL);
4296                 intr->flags &= ~IXGBE_FLAG_MACSEC;
4297         }
4298
4299         /* restore original mask */
4300         intr->mask = intr->mask_original;
4301         intr->mask_original = 0;
4302
4303         PMD_DRV_LOG(DEBUG, "enable intr in delayed handler S[%08x]", eicr);
4304         ixgbe_enable_intr(dev);
4305         rte_intr_enable(intr_handle);
4306 }
4307
4308 /**
4309  * Interrupt handler triggered by NIC  for handling
4310  * specific interrupt.
4311  *
4312  * @param handle
4313  *  Pointer to interrupt handle.
4314  * @param param
4315  *  The address of parameter (struct rte_eth_dev *) regsitered before.
4316  *
4317  * @return
4318  *  void
4319  */
4320 static void
4321 ixgbe_dev_interrupt_handler(void *param)
4322 {
4323         struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
4324
4325         ixgbe_dev_interrupt_get_status(dev);
4326         ixgbe_dev_interrupt_action(dev, dev->intr_handle);
4327 }
4328
4329 static int
4330 ixgbe_dev_led_on(struct rte_eth_dev *dev)
4331 {
4332         struct ixgbe_hw *hw;
4333
4334         hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4335         return ixgbe_led_on(hw, 0) == IXGBE_SUCCESS ? 0 : -ENOTSUP;
4336 }
4337
4338 static int
4339 ixgbe_dev_led_off(struct rte_eth_dev *dev)
4340 {
4341         struct ixgbe_hw *hw;
4342
4343         hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4344         return ixgbe_led_off(hw, 0) == IXGBE_SUCCESS ? 0 : -ENOTSUP;
4345 }
4346
4347 static int
4348 ixgbe_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
4349 {
4350         struct ixgbe_hw *hw;
4351         uint32_t mflcn_reg;
4352         uint32_t fccfg_reg;
4353         int rx_pause;
4354         int tx_pause;
4355
4356         hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4357
4358         fc_conf->pause_time = hw->fc.pause_time;
4359         fc_conf->high_water = hw->fc.high_water[0];
4360         fc_conf->low_water = hw->fc.low_water[0];
4361         fc_conf->send_xon = hw->fc.send_xon;
4362         fc_conf->autoneg = !hw->fc.disable_fc_autoneg;
4363
4364         /*
4365          * Return rx_pause status according to actual setting of
4366          * MFLCN register.
4367          */
4368         mflcn_reg = IXGBE_READ_REG(hw, IXGBE_MFLCN);
4369         if (mflcn_reg & (IXGBE_MFLCN_RPFCE | IXGBE_MFLCN_RFCE))
4370                 rx_pause = 1;
4371         else
4372                 rx_pause = 0;
4373
4374         /*
4375          * Return tx_pause status according to actual setting of
4376          * FCCFG register.
4377          */
4378         fccfg_reg = IXGBE_READ_REG(hw, IXGBE_FCCFG);
4379         if (fccfg_reg & (IXGBE_FCCFG_TFCE_802_3X | IXGBE_FCCFG_TFCE_PRIORITY))
4380                 tx_pause = 1;
4381         else
4382                 tx_pause = 0;
4383
4384         if (rx_pause && tx_pause)
4385                 fc_conf->mode = RTE_FC_FULL;
4386         else if (rx_pause)
4387                 fc_conf->mode = RTE_FC_RX_PAUSE;
4388         else if (tx_pause)
4389                 fc_conf->mode = RTE_FC_TX_PAUSE;
4390         else
4391                 fc_conf->mode = RTE_FC_NONE;
4392
4393         return 0;
4394 }
4395
4396 static int
4397 ixgbe_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
4398 {
4399         struct ixgbe_hw *hw;
4400         int err;
4401         uint32_t rx_buf_size;
4402         uint32_t max_high_water;
4403         uint32_t mflcn;
4404         enum ixgbe_fc_mode rte_fcmode_2_ixgbe_fcmode[] = {
4405                 ixgbe_fc_none,
4406                 ixgbe_fc_rx_pause,
4407                 ixgbe_fc_tx_pause,
4408                 ixgbe_fc_full
4409         };
4410
4411         PMD_INIT_FUNC_TRACE();
4412
4413         hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4414         rx_buf_size = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(0));
4415         PMD_INIT_LOG(DEBUG, "Rx packet buffer size = 0x%x", rx_buf_size);
4416
4417         /*
4418          * At least reserve one Ethernet frame for watermark
4419          * high_water/low_water in kilo bytes for ixgbe
4420          */
4421         max_high_water = (rx_buf_size - ETHER_MAX_LEN) >> IXGBE_RXPBSIZE_SHIFT;
4422         if ((fc_conf->high_water > max_high_water) ||
4423                 (fc_conf->high_water < fc_conf->low_water)) {
4424                 PMD_INIT_LOG(ERR, "Invalid high/low water setup value in KB");
4425                 PMD_INIT_LOG(ERR, "High_water must <= 0x%x", max_high_water);
4426                 return -EINVAL;
4427         }
4428
4429         hw->fc.requested_mode = rte_fcmode_2_ixgbe_fcmode[fc_conf->mode];
4430         hw->fc.pause_time     = fc_conf->pause_time;
4431         hw->fc.high_water[0]  = fc_conf->high_water;
4432         hw->fc.low_water[0]   = fc_conf->low_water;
4433         hw->fc.send_xon       = fc_conf->send_xon;
4434         hw->fc.disable_fc_autoneg = !fc_conf->autoneg;
4435
4436         err = ixgbe_fc_enable(hw);
4437
4438         /* Not negotiated is not an error case */
4439         if ((err == IXGBE_SUCCESS) || (err == IXGBE_ERR_FC_NOT_NEGOTIATED)) {
4440
4441                 /* check if we want to forward MAC frames - driver doesn't have native
4442                  * capability to do that, so we'll write the registers ourselves */
4443
4444                 mflcn = IXGBE_READ_REG(hw, IXGBE_MFLCN);
4445
4446                 /* set or clear MFLCN.PMCF bit depending on configuration */
4447                 if (fc_conf->mac_ctrl_frame_fwd != 0)
4448                         mflcn |= IXGBE_MFLCN_PMCF;
4449                 else
4450                         mflcn &= ~IXGBE_MFLCN_PMCF;
4451
4452                 IXGBE_WRITE_REG(hw, IXGBE_MFLCN, mflcn);
4453                 IXGBE_WRITE_FLUSH(hw);
4454
4455                 return 0;
4456         }
4457
4458         PMD_INIT_LOG(ERR, "ixgbe_fc_enable = 0x%x", err);
4459         return -EIO;
4460 }
4461
4462 /**
4463  *  ixgbe_pfc_enable_generic - Enable flow control
4464  *  @hw: pointer to hardware structure
4465  *  @tc_num: traffic class number
4466  *  Enable flow control according to the current settings.
4467  */
4468 static int
4469 ixgbe_dcb_pfc_enable_generic(struct ixgbe_hw *hw, uint8_t tc_num)
4470 {
4471         int ret_val = 0;
4472         uint32_t mflcn_reg, fccfg_reg;
4473         uint32_t reg;
4474         uint32_t fcrtl, fcrth;
4475         uint8_t i;
4476         uint8_t nb_rx_en;
4477
4478         /* Validate the water mark configuration */
4479         if (!hw->fc.pause_time) {
4480                 ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
4481                 goto out;
4482         }
4483
4484         /* Low water mark of zero causes XOFF floods */
4485         if (hw->fc.current_mode & ixgbe_fc_tx_pause) {
4486                  /* High/Low water can not be 0 */
4487                 if ((!hw->fc.high_water[tc_num]) || (!hw->fc.low_water[tc_num])) {
4488                         PMD_INIT_LOG(ERR, "Invalid water mark configuration");
4489                         ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
4490                         goto out;
4491                 }
4492
4493                 if (hw->fc.low_water[tc_num] >= hw->fc.high_water[tc_num]) {
4494                         PMD_INIT_LOG(ERR, "Invalid water mark configuration");
4495                         ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
4496                         goto out;
4497                 }
4498         }
4499         /* Negotiate the fc mode to use */
4500         ixgbe_fc_autoneg(hw);
4501
4502         /* Disable any previous flow control settings */
4503         mflcn_reg = IXGBE_READ_REG(hw, IXGBE_MFLCN);
4504         mflcn_reg &= ~(IXGBE_MFLCN_RPFCE_SHIFT | IXGBE_MFLCN_RFCE|IXGBE_MFLCN_RPFCE);
4505
4506         fccfg_reg = IXGBE_READ_REG(hw, IXGBE_FCCFG);
4507         fccfg_reg &= ~(IXGBE_FCCFG_TFCE_802_3X | IXGBE_FCCFG_TFCE_PRIORITY);
4508
4509         switch (hw->fc.current_mode) {
4510         case ixgbe_fc_none:
4511                 /*
4512                  * If the count of enabled RX Priority Flow control >1,
4513                  * and the TX pause can not be disabled
4514                  */
4515                 nb_rx_en = 0;
4516                 for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
4517                         reg = IXGBE_READ_REG(hw, IXGBE_FCRTH_82599(i));
4518                         if (reg & IXGBE_FCRTH_FCEN)
4519                                 nb_rx_en++;
4520                 }
4521                 if (nb_rx_en > 1)
4522                         fccfg_reg |= IXGBE_FCCFG_TFCE_PRIORITY;
4523                 break;
4524         case ixgbe_fc_rx_pause:
4525                 /*
4526                  * Rx Flow control is enabled and Tx Flow control is
4527                  * disabled by software override. Since there really
4528                  * isn't a way to advertise that we are capable of RX
4529                  * Pause ONLY, we will advertise that we support both
4530                  * symmetric and asymmetric Rx PAUSE.  Later, we will
4531                  * disable the adapter's ability to send PAUSE frames.
4532                  */
4533                 mflcn_reg |= IXGBE_MFLCN_RPFCE;
4534                 /*
4535                  * If the count of enabled RX Priority Flow control >1,
4536                  * and the TX pause can not be disabled
4537                  */
4538                 nb_rx_en = 0;
4539                 for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
4540                         reg = IXGBE_READ_REG(hw, IXGBE_FCRTH_82599(i));
4541                         if (reg & IXGBE_FCRTH_FCEN)
4542                                 nb_rx_en++;
4543                 }
4544                 if (nb_rx_en > 1)
4545                         fccfg_reg |= IXGBE_FCCFG_TFCE_PRIORITY;
4546                 break;
4547         case ixgbe_fc_tx_pause:
4548                 /*
4549                  * Tx Flow control is enabled, and Rx Flow control is
4550                  * disabled by software override.
4551                  */
4552                 fccfg_reg |= IXGBE_FCCFG_TFCE_PRIORITY;
4553                 break;
4554         case ixgbe_fc_full:
4555                 /* Flow control (both Rx and Tx) is enabled by SW override. */
4556                 mflcn_reg |= IXGBE_MFLCN_RPFCE;
4557                 fccfg_reg |= IXGBE_FCCFG_TFCE_PRIORITY;
4558                 break;
4559         default:
4560                 PMD_DRV_LOG(DEBUG, "Flow control param set incorrectly");
4561                 ret_val = IXGBE_ERR_CONFIG;
4562                 goto out;
4563         }
4564
4565         /* Set 802.3x based flow control settings. */
4566         mflcn_reg |= IXGBE_MFLCN_DPF;
4567         IXGBE_WRITE_REG(hw, IXGBE_MFLCN, mflcn_reg);
4568         IXGBE_WRITE_REG(hw, IXGBE_FCCFG, fccfg_reg);
4569
4570         /* Set up and enable Rx high/low water mark thresholds, enable XON. */
4571         if ((hw->fc.current_mode & ixgbe_fc_tx_pause) &&
4572                 hw->fc.high_water[tc_num]) {
4573                 fcrtl = (hw->fc.low_water[tc_num] << 10) | IXGBE_FCRTL_XONE;
4574                 IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(tc_num), fcrtl);
4575                 fcrth = (hw->fc.high_water[tc_num] << 10) | IXGBE_FCRTH_FCEN;
4576         } else {
4577                 IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(tc_num), 0);
4578                 /*
4579                  * In order to prevent Tx hangs when the internal Tx
4580                  * switch is enabled we must set the high water mark
4581                  * to the maximum FCRTH value.  This allows the Tx
4582                  * switch to function even under heavy Rx workloads.
4583                  */
4584                 fcrth = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(tc_num)) - 32;
4585         }
4586         IXGBE_WRITE_REG(hw, IXGBE_FCRTH_82599(tc_num), fcrth);
4587
4588         /* Configure pause time (2 TCs per register) */
4589         reg = hw->fc.pause_time * 0x00010001;
4590         for (i = 0; i < (IXGBE_DCB_MAX_TRAFFIC_CLASS / 2); i++)
4591                 IXGBE_WRITE_REG(hw, IXGBE_FCTTV(i), reg);
4592
4593         /* Configure flow control refresh threshold value */
4594         IXGBE_WRITE_REG(hw, IXGBE_FCRTV, hw->fc.pause_time / 2);
4595
4596 out:
4597         return ret_val;
4598 }
4599
4600 static int
4601 ixgbe_dcb_pfc_enable(struct rte_eth_dev *dev, uint8_t tc_num)
4602 {
4603         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4604         int32_t ret_val = IXGBE_NOT_IMPLEMENTED;
4605
4606         if (hw->mac.type != ixgbe_mac_82598EB) {
4607                 ret_val = ixgbe_dcb_pfc_enable_generic(hw, tc_num);
4608         }
4609         return ret_val;
4610 }
4611
4612 static int
4613 ixgbe_priority_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_pfc_conf *pfc_conf)
4614 {
4615         int err;
4616         uint32_t rx_buf_size;
4617         uint32_t max_high_water;
4618         uint8_t tc_num;
4619         uint8_t  map[IXGBE_DCB_MAX_USER_PRIORITY] = { 0 };
4620         struct ixgbe_hw *hw =
4621                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4622         struct ixgbe_dcb_config *dcb_config =
4623                 IXGBE_DEV_PRIVATE_TO_DCB_CFG(dev->data->dev_private);
4624
4625         enum ixgbe_fc_mode rte_fcmode_2_ixgbe_fcmode[] = {
4626                 ixgbe_fc_none,
4627                 ixgbe_fc_rx_pause,
4628                 ixgbe_fc_tx_pause,
4629                 ixgbe_fc_full
4630         };
4631
4632         PMD_INIT_FUNC_TRACE();
4633
4634         ixgbe_dcb_unpack_map_cee(dcb_config, IXGBE_DCB_RX_CONFIG, map);
4635         tc_num = map[pfc_conf->priority];
4636         rx_buf_size = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(tc_num));
4637         PMD_INIT_LOG(DEBUG, "Rx packet buffer size = 0x%x", rx_buf_size);
4638         /*
4639          * At least reserve one Ethernet frame for watermark
4640          * high_water/low_water in kilo bytes for ixgbe
4641          */
4642         max_high_water = (rx_buf_size - ETHER_MAX_LEN) >> IXGBE_RXPBSIZE_SHIFT;
4643         if ((pfc_conf->fc.high_water > max_high_water) ||
4644             (pfc_conf->fc.high_water <= pfc_conf->fc.low_water)) {
4645                 PMD_INIT_LOG(ERR, "Invalid high/low water setup value in KB");
4646                 PMD_INIT_LOG(ERR, "High_water must <= 0x%x", max_high_water);
4647                 return -EINVAL;
4648         }
4649
4650         hw->fc.requested_mode = rte_fcmode_2_ixgbe_fcmode[pfc_conf->fc.mode];
4651         hw->fc.pause_time = pfc_conf->fc.pause_time;
4652         hw->fc.send_xon = pfc_conf->fc.send_xon;
4653         hw->fc.low_water[tc_num] =  pfc_conf->fc.low_water;
4654         hw->fc.high_water[tc_num] = pfc_conf->fc.high_water;
4655
4656         err = ixgbe_dcb_pfc_enable(dev, tc_num);
4657
4658         /* Not negotiated is not an error case */
4659         if ((err == IXGBE_SUCCESS) || (err == IXGBE_ERR_FC_NOT_NEGOTIATED))
4660                 return 0;
4661
4662         PMD_INIT_LOG(ERR, "ixgbe_dcb_pfc_enable = 0x%x", err);
4663         return -EIO;
4664 }
4665
4666 static int
4667 ixgbe_dev_rss_reta_update(struct rte_eth_dev *dev,
4668                           struct rte_eth_rss_reta_entry64 *reta_conf,
4669                           uint16_t reta_size)
4670 {
4671         uint16_t i, sp_reta_size;
4672         uint8_t j, mask;
4673         uint32_t reta, r;
4674         uint16_t idx, shift;
4675         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4676         uint32_t reta_reg;
4677
4678         PMD_INIT_FUNC_TRACE();
4679
4680         if (!ixgbe_rss_update_sp(hw->mac.type)) {
4681                 PMD_DRV_LOG(ERR, "RSS reta update is not supported on this "
4682                         "NIC.");
4683                 return -ENOTSUP;
4684         }
4685
4686         sp_reta_size = ixgbe_reta_size_get(hw->mac.type);
4687         if (reta_size != sp_reta_size) {
4688                 PMD_DRV_LOG(ERR, "The size of hash lookup table configured "
4689                         "(%d) doesn't match the number hardware can supported "
4690                         "(%d)", reta_size, sp_reta_size);
4691                 return -EINVAL;
4692         }
4693
4694         for (i = 0; i < reta_size; i += IXGBE_4_BIT_WIDTH) {
4695                 idx = i / RTE_RETA_GROUP_SIZE;
4696                 shift = i % RTE_RETA_GROUP_SIZE;
4697                 mask = (uint8_t)((reta_conf[idx].mask >> shift) &
4698                                                 IXGBE_4_BIT_MASK);
4699                 if (!mask)
4700                         continue;
4701                 reta_reg = ixgbe_reta_reg_get(hw->mac.type, i);
4702                 if (mask == IXGBE_4_BIT_MASK)
4703                         r = 0;
4704                 else
4705                         r = IXGBE_READ_REG(hw, reta_reg);
4706                 for (j = 0, reta = 0; j < IXGBE_4_BIT_WIDTH; j++) {
4707                         if (mask & (0x1 << j))
4708                                 reta |= reta_conf[idx].reta[shift + j] <<
4709                                                         (CHAR_BIT * j);
4710                         else
4711                                 reta |= r & (IXGBE_8_BIT_MASK <<
4712                                                 (CHAR_BIT * j));
4713                 }
4714                 IXGBE_WRITE_REG(hw, reta_reg, reta);
4715         }
4716
4717         return 0;
4718 }
4719
4720 static int
4721 ixgbe_dev_rss_reta_query(struct rte_eth_dev *dev,
4722                          struct rte_eth_rss_reta_entry64 *reta_conf,
4723                          uint16_t reta_size)
4724 {
4725         uint16_t i, sp_reta_size;
4726         uint8_t j, mask;
4727         uint32_t reta;
4728         uint16_t idx, shift;
4729         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4730         uint32_t reta_reg;
4731
4732         PMD_INIT_FUNC_TRACE();
4733         sp_reta_size = ixgbe_reta_size_get(hw->mac.type);
4734         if (reta_size != sp_reta_size) {
4735                 PMD_DRV_LOG(ERR, "The size of hash lookup table configured "
4736                         "(%d) doesn't match the number hardware can supported "
4737                         "(%d)", reta_size, sp_reta_size);
4738                 return -EINVAL;
4739         }
4740
4741         for (i = 0; i < reta_size; i += IXGBE_4_BIT_WIDTH) {
4742                 idx = i / RTE_RETA_GROUP_SIZE;
4743                 shift = i % RTE_RETA_GROUP_SIZE;
4744                 mask = (uint8_t)((reta_conf[idx].mask >> shift) &
4745                                                 IXGBE_4_BIT_MASK);
4746                 if (!mask)
4747                         continue;
4748
4749                 reta_reg = ixgbe_reta_reg_get(hw->mac.type, i);
4750                 reta = IXGBE_READ_REG(hw, reta_reg);
4751                 for (j = 0; j < IXGBE_4_BIT_WIDTH; j++) {
4752                         if (mask & (0x1 << j))
4753                                 reta_conf[idx].reta[shift + j] =
4754                                         ((reta >> (CHAR_BIT * j)) &
4755                                                 IXGBE_8_BIT_MASK);
4756                 }
4757         }
4758
4759         return 0;
4760 }
4761
4762 static int
4763 ixgbe_add_rar(struct rte_eth_dev *dev, struct ether_addr *mac_addr,
4764                                 uint32_t index, uint32_t pool)
4765 {
4766         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4767         uint32_t enable_addr = 1;
4768
4769         return ixgbe_set_rar(hw, index, mac_addr->addr_bytes,
4770                              pool, enable_addr);
4771 }
4772
4773 static void
4774 ixgbe_remove_rar(struct rte_eth_dev *dev, uint32_t index)
4775 {
4776         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4777
4778         ixgbe_clear_rar(hw, index);
4779 }
4780
4781 static void
4782 ixgbe_set_default_mac_addr(struct rte_eth_dev *dev, struct ether_addr *addr)
4783 {
4784         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
4785
4786         ixgbe_remove_rar(dev, 0);
4787
4788         ixgbe_add_rar(dev, addr, 0, pci_dev->max_vfs);
4789 }
4790
4791 static bool
4792 is_device_supported(struct rte_eth_dev *dev, struct rte_pci_driver *drv)
4793 {
4794         if (strcmp(dev->device->driver->name, drv->driver.name))
4795                 return false;
4796
4797         return true;
4798 }
4799
4800 bool
4801 is_ixgbe_supported(struct rte_eth_dev *dev)
4802 {
4803         return is_device_supported(dev, &rte_ixgbe_pmd);
4804 }
4805
4806 static int
4807 ixgbe_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
4808 {
4809         uint32_t hlreg0;
4810         uint32_t maxfrs;
4811         struct ixgbe_hw *hw;
4812         struct rte_eth_dev_info dev_info;
4813         uint32_t frame_size = mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
4814         struct rte_eth_rxmode *rx_conf = &dev->data->dev_conf.rxmode;
4815
4816         ixgbe_dev_info_get(dev, &dev_info);
4817
4818         /* check that mtu is within the allowed range */
4819         if ((mtu < ETHER_MIN_MTU) || (frame_size > dev_info.max_rx_pktlen))
4820                 return -EINVAL;
4821
4822         /* refuse mtu that requires the support of scattered packets when this
4823          * feature has not been enabled before.
4824          */
4825         if (!rx_conf->enable_scatter &&
4826             (frame_size + 2 * IXGBE_VLAN_TAG_SIZE >
4827              dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM))
4828                 return -EINVAL;
4829
4830         hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4831         hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0);
4832
4833         /* switch to jumbo mode if needed */
4834         if (frame_size > ETHER_MAX_LEN) {
4835                 dev->data->dev_conf.rxmode.jumbo_frame = 1;
4836                 hlreg0 |= IXGBE_HLREG0_JUMBOEN;
4837         } else {
4838                 dev->data->dev_conf.rxmode.jumbo_frame = 0;
4839                 hlreg0 &= ~IXGBE_HLREG0_JUMBOEN;
4840         }
4841         IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0);
4842
4843         /* update max frame size */
4844         dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
4845
4846         maxfrs = IXGBE_READ_REG(hw, IXGBE_MAXFRS);
4847         maxfrs &= 0x0000FFFF;
4848         maxfrs |= (dev->data->dev_conf.rxmode.max_rx_pkt_len << 16);
4849         IXGBE_WRITE_REG(hw, IXGBE_MAXFRS, maxfrs);
4850
4851         return 0;
4852 }
4853
4854 /*
4855  * Virtual Function operations
4856  */
4857 static void
4858 ixgbevf_intr_disable(struct ixgbe_hw *hw)
4859 {
4860         PMD_INIT_FUNC_TRACE();
4861
4862         /* Clear interrupt mask to stop from interrupts being generated */
4863         IXGBE_WRITE_REG(hw, IXGBE_VTEIMC, IXGBE_VF_IRQ_CLEAR_MASK);
4864
4865         IXGBE_WRITE_FLUSH(hw);
4866 }
4867
4868 static void
4869 ixgbevf_intr_enable(struct ixgbe_hw *hw)
4870 {
4871         PMD_INIT_FUNC_TRACE();
4872
4873         /* VF enable interrupt autoclean */
4874         IXGBE_WRITE_REG(hw, IXGBE_VTEIAM, IXGBE_VF_IRQ_ENABLE_MASK);
4875         IXGBE_WRITE_REG(hw, IXGBE_VTEIAC, IXGBE_VF_IRQ_ENABLE_MASK);
4876         IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, IXGBE_VF_IRQ_ENABLE_MASK);
4877
4878         IXGBE_WRITE_FLUSH(hw);
4879 }
4880
4881 static int
4882 ixgbevf_dev_configure(struct rte_eth_dev *dev)
4883 {
4884         struct rte_eth_conf *conf = &dev->data->dev_conf;
4885         struct ixgbe_adapter *adapter =
4886                         (struct ixgbe_adapter *)dev->data->dev_private;
4887
4888         PMD_INIT_LOG(DEBUG, "Configured Virtual Function port id: %d",
4889                      dev->data->port_id);
4890
4891         /*
4892          * VF has no ability to enable/disable HW CRC
4893          * Keep the persistent behavior the same as Host PF
4894          */
4895 #ifndef RTE_LIBRTE_IXGBE_PF_DISABLE_STRIP_CRC
4896         if (!conf->rxmode.hw_strip_crc) {
4897                 PMD_INIT_LOG(NOTICE, "VF can't disable HW CRC Strip");
4898                 conf->rxmode.hw_strip_crc = 1;
4899         }
4900 #else
4901         if (conf->rxmode.hw_strip_crc) {
4902                 PMD_INIT_LOG(NOTICE, "VF can't enable HW CRC Strip");
4903                 conf->rxmode.hw_strip_crc = 0;
4904         }
4905 #endif
4906
4907         /*
4908          * Initialize to TRUE. If any of Rx queues doesn't meet the bulk
4909          * allocation or vector Rx preconditions we will reset it.
4910          */
4911         adapter->rx_bulk_alloc_allowed = true;
4912         adapter->rx_vec_allowed = true;
4913
4914         return 0;
4915 }
4916
4917 static int
4918 ixgbevf_dev_start(struct rte_eth_dev *dev)
4919 {
4920         struct ixgbe_hw *hw =
4921                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4922         uint32_t intr_vector = 0;
4923         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
4924         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
4925
4926         int err, mask = 0;
4927
4928         PMD_INIT_FUNC_TRACE();
4929
4930         hw->mac.ops.reset_hw(hw);
4931         hw->mac.get_link_status = true;
4932
4933         /* negotiate mailbox API version to use with the PF. */
4934         ixgbevf_negotiate_api(hw);
4935
4936         ixgbevf_dev_tx_init(dev);
4937
4938         /* This can fail when allocating mbufs for descriptor rings */
4939         err = ixgbevf_dev_rx_init(dev);
4940         if (err) {
4941                 PMD_INIT_LOG(ERR, "Unable to initialize RX hardware (%d)", err);
4942                 ixgbe_dev_clear_queues(dev);
4943                 return err;
4944         }
4945
4946         /* Set vfta */
4947         ixgbevf_set_vfta_all(dev, 1);
4948
4949         /* Set HW strip */
4950         mask = ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK |
4951                 ETH_VLAN_EXTEND_MASK;
4952         ixgbevf_vlan_offload_set(dev, mask);
4953
4954         ixgbevf_dev_rxtx_start(dev);
4955
4956         /* check and configure queue intr-vector mapping */
4957         if (dev->data->dev_conf.intr_conf.rxq != 0) {
4958                 intr_vector = dev->data->nb_rx_queues;
4959                 if (rte_intr_efd_enable(intr_handle, intr_vector))
4960                         return -1;
4961         }
4962
4963         if (rte_intr_dp_is_en(intr_handle) && !intr_handle->intr_vec) {
4964                 intr_handle->intr_vec =
4965                         rte_zmalloc("intr_vec",
4966                                     dev->data->nb_rx_queues * sizeof(int), 0);
4967                 if (intr_handle->intr_vec == NULL) {
4968                         PMD_INIT_LOG(ERR, "Failed to allocate %d rx_queues"
4969                                      " intr_vec", dev->data->nb_rx_queues);
4970                         return -ENOMEM;
4971                 }
4972         }
4973         ixgbevf_configure_msix(dev);
4974
4975         rte_intr_enable(intr_handle);
4976
4977         /* Re-enable interrupt for VF */
4978         ixgbevf_intr_enable(hw);
4979
4980         return 0;
4981 }
4982
4983 static void
4984 ixgbevf_dev_stop(struct rte_eth_dev *dev)
4985 {
4986         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4987         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
4988         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
4989
4990         PMD_INIT_FUNC_TRACE();
4991
4992         ixgbevf_intr_disable(hw);
4993
4994         hw->adapter_stopped = 1;
4995         ixgbe_stop_adapter(hw);
4996
4997         /*
4998           * Clear what we set, but we still keep shadow_vfta to
4999           * restore after device starts
5000           */
5001         ixgbevf_set_vfta_all(dev, 0);
5002
5003         /* Clear stored conf */
5004         dev->data->scattered_rx = 0;
5005
5006         ixgbe_dev_clear_queues(dev);
5007
5008         /* Clean datapath event and queue/vec mapping */
5009         rte_intr_efd_disable(intr_handle);
5010         if (intr_handle->intr_vec != NULL) {
5011                 rte_free(intr_handle->intr_vec);
5012                 intr_handle->intr_vec = NULL;
5013         }
5014 }
5015
5016 static void
5017 ixgbevf_dev_close(struct rte_eth_dev *dev)
5018 {
5019         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5020
5021         PMD_INIT_FUNC_TRACE();
5022
5023         ixgbe_reset_hw(hw);
5024
5025         ixgbevf_dev_stop(dev);
5026
5027         ixgbe_dev_free_queues(dev);
5028
5029         /**
5030          * Remove the VF MAC address ro ensure
5031          * that the VF traffic goes to the PF
5032          * after stop, close and detach of the VF
5033          **/
5034         ixgbevf_remove_mac_addr(dev, 0);
5035 }
5036
5037 static void ixgbevf_set_vfta_all(struct rte_eth_dev *dev, bool on)
5038 {
5039         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5040         struct ixgbe_vfta *shadow_vfta =
5041                 IXGBE_DEV_PRIVATE_TO_VFTA(dev->data->dev_private);
5042         int i = 0, j = 0, vfta = 0, mask = 1;
5043
5044         for (i = 0; i < IXGBE_VFTA_SIZE; i++) {
5045                 vfta = shadow_vfta->vfta[i];
5046                 if (vfta) {
5047                         mask = 1;
5048                         for (j = 0; j < 32; j++) {
5049                                 if (vfta & mask)
5050                                         ixgbe_set_vfta(hw, (i<<5)+j, 0,
5051                                                        on, false);
5052                                 mask <<= 1;
5053                         }
5054                 }
5055         }
5056
5057 }
5058
5059 static int
5060 ixgbevf_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
5061 {
5062         struct ixgbe_hw *hw =
5063                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5064         struct ixgbe_vfta *shadow_vfta =
5065                 IXGBE_DEV_PRIVATE_TO_VFTA(dev->data->dev_private);
5066         uint32_t vid_idx = 0;
5067         uint32_t vid_bit = 0;
5068         int ret = 0;
5069
5070         PMD_INIT_FUNC_TRACE();
5071
5072         /* vind is not used in VF driver, set to 0, check ixgbe_set_vfta_vf */
5073         ret = ixgbe_set_vfta(hw, vlan_id, 0, !!on, false);
5074         if (ret) {
5075                 PMD_INIT_LOG(ERR, "Unable to set VF vlan");
5076                 return ret;
5077         }
5078         vid_idx = (uint32_t) ((vlan_id >> 5) & 0x7F);
5079         vid_bit = (uint32_t) (1 << (vlan_id & 0x1F));
5080
5081         /* Save what we set and retore it after device reset */
5082         if (on)
5083                 shadow_vfta->vfta[vid_idx] |= vid_bit;
5084         else
5085                 shadow_vfta->vfta[vid_idx] &= ~vid_bit;
5086
5087         return 0;
5088 }
5089
5090 static void
5091 ixgbevf_vlan_strip_queue_set(struct rte_eth_dev *dev, uint16_t queue, int on)
5092 {
5093         struct ixgbe_hw *hw =
5094                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5095         uint32_t ctrl;
5096
5097         PMD_INIT_FUNC_TRACE();
5098
5099         if (queue >= hw->mac.max_rx_queues)
5100                 return;
5101
5102         ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(queue));
5103         if (on)
5104                 ctrl |= IXGBE_RXDCTL_VME;
5105         else
5106                 ctrl &= ~IXGBE_RXDCTL_VME;
5107         IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(queue), ctrl);
5108
5109         ixgbe_vlan_hw_strip_bitmap_set(dev, queue, on);
5110 }
5111
5112 static void
5113 ixgbevf_vlan_offload_set(struct rte_eth_dev *dev, int mask)
5114 {
5115         struct ixgbe_hw *hw =
5116                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5117         uint16_t i;
5118         int on = 0;
5119
5120         /* VF function only support hw strip feature, others are not support */
5121         if (mask & ETH_VLAN_STRIP_MASK) {
5122                 on = !!(dev->data->dev_conf.rxmode.hw_vlan_strip);
5123
5124                 for (i = 0; i < hw->mac.max_rx_queues; i++)
5125                         ixgbevf_vlan_strip_queue_set(dev, i, on);
5126         }
5127 }
5128
5129 int
5130 ixgbe_vt_check(struct ixgbe_hw *hw)
5131 {
5132         uint32_t reg_val;
5133
5134         /* if Virtualization Technology is enabled */
5135         reg_val = IXGBE_READ_REG(hw, IXGBE_VT_CTL);
5136         if (!(reg_val & IXGBE_VT_CTL_VT_ENABLE)) {
5137                 PMD_INIT_LOG(ERR, "VT must be enabled for this setting");
5138                 return -1;
5139         }
5140
5141         return 0;
5142 }
5143
5144 static uint32_t
5145 ixgbe_uta_vector(struct ixgbe_hw *hw, struct ether_addr *uc_addr)
5146 {
5147         uint32_t vector = 0;
5148
5149         switch (hw->mac.mc_filter_type) {
5150         case 0:   /* use bits [47:36] of the address */
5151                 vector = ((uc_addr->addr_bytes[4] >> 4) |
5152                         (((uint16_t)uc_addr->addr_bytes[5]) << 4));
5153                 break;
5154         case 1:   /* use bits [46:35] of the address */
5155                 vector = ((uc_addr->addr_bytes[4] >> 3) |
5156                         (((uint16_t)uc_addr->addr_bytes[5]) << 5));
5157                 break;
5158         case 2:   /* use bits [45:34] of the address */
5159                 vector = ((uc_addr->addr_bytes[4] >> 2) |
5160                         (((uint16_t)uc_addr->addr_bytes[5]) << 6));
5161                 break;
5162         case 3:   /* use bits [43:32] of the address */
5163                 vector = ((uc_addr->addr_bytes[4]) |
5164                         (((uint16_t)uc_addr->addr_bytes[5]) << 8));
5165                 break;
5166         default:  /* Invalid mc_filter_type */
5167                 break;
5168         }
5169
5170         /* vector can only be 12-bits or boundary will be exceeded */
5171         vector &= 0xFFF;
5172         return vector;
5173 }
5174
5175 static int
5176 ixgbe_uc_hash_table_set(struct rte_eth_dev *dev, struct ether_addr *mac_addr,
5177                         uint8_t on)
5178 {
5179         uint32_t vector;
5180         uint32_t uta_idx;
5181         uint32_t reg_val;
5182         uint32_t uta_shift;
5183         uint32_t rc;
5184         const uint32_t ixgbe_uta_idx_mask = 0x7F;
5185         const uint32_t ixgbe_uta_bit_shift = 5;
5186         const uint32_t ixgbe_uta_bit_mask = (0x1 << ixgbe_uta_bit_shift) - 1;
5187         const uint32_t bit1 = 0x1;
5188
5189         struct ixgbe_hw *hw =
5190                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5191         struct ixgbe_uta_info *uta_info =
5192                 IXGBE_DEV_PRIVATE_TO_UTA(dev->data->dev_private);
5193
5194         /* The UTA table only exists on 82599 hardware and newer */
5195         if (hw->mac.type < ixgbe_mac_82599EB)
5196                 return -ENOTSUP;
5197
5198         vector = ixgbe_uta_vector(hw, mac_addr);
5199         uta_idx = (vector >> ixgbe_uta_bit_shift) & ixgbe_uta_idx_mask;
5200         uta_shift = vector & ixgbe_uta_bit_mask;
5201
5202         rc = ((uta_info->uta_shadow[uta_idx] >> uta_shift & bit1) != 0);
5203         if (rc == on)
5204                 return 0;
5205
5206         reg_val = IXGBE_READ_REG(hw, IXGBE_UTA(uta_idx));
5207         if (on) {
5208                 uta_info->uta_in_use++;
5209                 reg_val |= (bit1 << uta_shift);
5210                 uta_info->uta_shadow[uta_idx] |= (bit1 << uta_shift);
5211         } else {
5212                 uta_info->uta_in_use--;
5213                 reg_val &= ~(bit1 << uta_shift);
5214                 uta_info->uta_shadow[uta_idx] &= ~(bit1 << uta_shift);
5215         }
5216
5217         IXGBE_WRITE_REG(hw, IXGBE_UTA(uta_idx), reg_val);
5218
5219         if (uta_info->uta_in_use > 0)
5220                 IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL,
5221                                 IXGBE_MCSTCTRL_MFE | hw->mac.mc_filter_type);
5222         else
5223                 IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, hw->mac.mc_filter_type);
5224
5225         return 0;
5226 }
5227
5228 static int
5229 ixgbe_uc_all_hash_table_set(struct rte_eth_dev *dev, uint8_t on)
5230 {
5231         int i;
5232         struct ixgbe_hw *hw =
5233                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5234         struct ixgbe_uta_info *uta_info =
5235                 IXGBE_DEV_PRIVATE_TO_UTA(dev->data->dev_private);
5236
5237         /* The UTA table only exists on 82599 hardware and newer */
5238         if (hw->mac.type < ixgbe_mac_82599EB)
5239                 return -ENOTSUP;
5240
5241         if (on) {
5242                 for (i = 0; i < ETH_VMDQ_NUM_UC_HASH_ARRAY; i++) {
5243                         uta_info->uta_shadow[i] = ~0;
5244                         IXGBE_WRITE_REG(hw, IXGBE_UTA(i), ~0);
5245                 }
5246         } else {
5247                 for (i = 0; i < ETH_VMDQ_NUM_UC_HASH_ARRAY; i++) {
5248                         uta_info->uta_shadow[i] = 0;
5249                         IXGBE_WRITE_REG(hw, IXGBE_UTA(i), 0);
5250                 }
5251         }
5252         return 0;
5253
5254 }
5255
5256 uint32_t
5257 ixgbe_convert_vm_rx_mask_to_val(uint16_t rx_mask, uint32_t orig_val)
5258 {
5259         uint32_t new_val = orig_val;
5260
5261         if (rx_mask & ETH_VMDQ_ACCEPT_UNTAG)
5262                 new_val |= IXGBE_VMOLR_AUPE;
5263         if (rx_mask & ETH_VMDQ_ACCEPT_HASH_MC)
5264                 new_val |= IXGBE_VMOLR_ROMPE;
5265         if (rx_mask & ETH_VMDQ_ACCEPT_HASH_UC)
5266                 new_val |= IXGBE_VMOLR_ROPE;
5267         if (rx_mask & ETH_VMDQ_ACCEPT_BROADCAST)
5268                 new_val |= IXGBE_VMOLR_BAM;
5269         if (rx_mask & ETH_VMDQ_ACCEPT_MULTICAST)
5270                 new_val |= IXGBE_VMOLR_MPE;
5271
5272         return new_val;
5273 }
5274
5275 #define IXGBE_MRCTL_VPME  0x01 /* Virtual Pool Mirroring. */
5276 #define IXGBE_MRCTL_UPME  0x02 /* Uplink Port Mirroring. */
5277 #define IXGBE_MRCTL_DPME  0x04 /* Downlink Port Mirroring. */
5278 #define IXGBE_MRCTL_VLME  0x08 /* VLAN Mirroring. */
5279 #define IXGBE_INVALID_MIRROR_TYPE(mirror_type) \
5280         ((mirror_type) & ~(uint8_t)(ETH_MIRROR_VIRTUAL_POOL_UP | \
5281         ETH_MIRROR_UPLINK_PORT | ETH_MIRROR_DOWNLINK_PORT | ETH_MIRROR_VLAN))
5282
5283 static int
5284 ixgbe_mirror_rule_set(struct rte_eth_dev *dev,
5285                       struct rte_eth_mirror_conf *mirror_conf,
5286                       uint8_t rule_id, uint8_t on)
5287 {
5288         uint32_t mr_ctl, vlvf;
5289         uint32_t mp_lsb = 0;
5290         uint32_t mv_msb = 0;
5291         uint32_t mv_lsb = 0;
5292         uint32_t mp_msb = 0;
5293         uint8_t i = 0;
5294         int reg_index = 0;
5295         uint64_t vlan_mask = 0;
5296
5297         const uint8_t pool_mask_offset = 32;
5298         const uint8_t vlan_mask_offset = 32;
5299         const uint8_t dst_pool_offset = 8;
5300         const uint8_t rule_mr_offset  = 4;
5301         const uint8_t mirror_rule_mask = 0x0F;
5302
5303         struct ixgbe_mirror_info *mr_info =
5304                         (IXGBE_DEV_PRIVATE_TO_PFDATA(dev->data->dev_private));
5305         struct ixgbe_hw *hw =
5306                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5307         uint8_t mirror_type = 0;
5308
5309         if (ixgbe_vt_check(hw) < 0)
5310                 return -ENOTSUP;
5311
5312         if (rule_id >= IXGBE_MAX_MIRROR_RULES)
5313                 return -EINVAL;
5314
5315         if (IXGBE_INVALID_MIRROR_TYPE(mirror_conf->rule_type)) {
5316                 PMD_DRV_LOG(ERR, "unsupported mirror type 0x%x.",
5317                             mirror_conf->rule_type);
5318                 return -EINVAL;
5319         }
5320
5321         if (mirror_conf->rule_type & ETH_MIRROR_VLAN) {
5322                 mirror_type |= IXGBE_MRCTL_VLME;
5323                 /* Check if vlan id is valid and find conresponding VLAN ID
5324                  * index in VLVF
5325                  */
5326                 for (i = 0; i < IXGBE_VLVF_ENTRIES; i++) {
5327                         if (mirror_conf->vlan.vlan_mask & (1ULL << i)) {
5328                                 /* search vlan id related pool vlan filter
5329                                  * index
5330                                  */
5331                                 reg_index = ixgbe_find_vlvf_slot(
5332                                                 hw,
5333                                                 mirror_conf->vlan.vlan_id[i],
5334                                                 false);
5335                                 if (reg_index < 0)
5336                                         return -EINVAL;
5337                                 vlvf = IXGBE_READ_REG(hw,
5338                                                       IXGBE_VLVF(reg_index));
5339                                 if ((vlvf & IXGBE_VLVF_VIEN) &&
5340                                     ((vlvf & IXGBE_VLVF_VLANID_MASK) ==
5341                                       mirror_conf->vlan.vlan_id[i]))
5342                                         vlan_mask |= (1ULL << reg_index);
5343                                 else
5344                                         return -EINVAL;
5345                         }
5346                 }
5347
5348                 if (on) {
5349                         mv_lsb = vlan_mask & 0xFFFFFFFF;
5350                         mv_msb = vlan_mask >> vlan_mask_offset;
5351
5352                         mr_info->mr_conf[rule_id].vlan.vlan_mask =
5353                                                 mirror_conf->vlan.vlan_mask;
5354                         for (i = 0; i < ETH_VMDQ_MAX_VLAN_FILTERS; i++) {
5355                                 if (mirror_conf->vlan.vlan_mask & (1ULL << i))
5356                                         mr_info->mr_conf[rule_id].vlan.vlan_id[i] =
5357                                                 mirror_conf->vlan.vlan_id[i];
5358                         }
5359                 } else {
5360                         mv_lsb = 0;
5361                         mv_msb = 0;
5362                         mr_info->mr_conf[rule_id].vlan.vlan_mask = 0;
5363                         for (i = 0; i < ETH_VMDQ_MAX_VLAN_FILTERS; i++)
5364                                 mr_info->mr_conf[rule_id].vlan.vlan_id[i] = 0;
5365                 }
5366         }
5367
5368         /**
5369          * if enable pool mirror, write related pool mask register,if disable
5370          * pool mirror, clear PFMRVM register
5371          */
5372         if (mirror_conf->rule_type & ETH_MIRROR_VIRTUAL_POOL_UP) {
5373                 mirror_type |= IXGBE_MRCTL_VPME;
5374                 if (on) {
5375                         mp_lsb = mirror_conf->pool_mask & 0xFFFFFFFF;
5376                         mp_msb = mirror_conf->pool_mask >> pool_mask_offset;
5377                         mr_info->mr_conf[rule_id].pool_mask =
5378                                         mirror_conf->pool_mask;
5379
5380                 } else {
5381                         mp_lsb = 0;
5382                         mp_msb = 0;
5383                         mr_info->mr_conf[rule_id].pool_mask = 0;
5384                 }
5385         }
5386         if (mirror_conf->rule_type & ETH_MIRROR_UPLINK_PORT)
5387                 mirror_type |= IXGBE_MRCTL_UPME;
5388         if (mirror_conf->rule_type & ETH_MIRROR_DOWNLINK_PORT)
5389                 mirror_type |= IXGBE_MRCTL_DPME;
5390
5391         /* read  mirror control register and recalculate it */
5392         mr_ctl = IXGBE_READ_REG(hw, IXGBE_MRCTL(rule_id));
5393
5394         if (on) {
5395                 mr_ctl |= mirror_type;
5396                 mr_ctl &= mirror_rule_mask;
5397                 mr_ctl |= mirror_conf->dst_pool << dst_pool_offset;
5398         } else {
5399                 mr_ctl &= ~(mirror_conf->rule_type & mirror_rule_mask);
5400         }
5401
5402         mr_info->mr_conf[rule_id].rule_type = mirror_conf->rule_type;
5403         mr_info->mr_conf[rule_id].dst_pool = mirror_conf->dst_pool;
5404
5405         /* write mirrror control  register */
5406         IXGBE_WRITE_REG(hw, IXGBE_MRCTL(rule_id), mr_ctl);
5407
5408         /* write pool mirrror control  register */
5409         if (mirror_conf->rule_type == ETH_MIRROR_VIRTUAL_POOL_UP) {
5410                 IXGBE_WRITE_REG(hw, IXGBE_VMRVM(rule_id), mp_lsb);
5411                 IXGBE_WRITE_REG(hw, IXGBE_VMRVM(rule_id + rule_mr_offset),
5412                                 mp_msb);
5413         }
5414         /* write VLAN mirrror control  register */
5415         if (mirror_conf->rule_type == ETH_MIRROR_VLAN) {
5416                 IXGBE_WRITE_REG(hw, IXGBE_VMRVLAN(rule_id), mv_lsb);
5417                 IXGBE_WRITE_REG(hw, IXGBE_VMRVLAN(rule_id + rule_mr_offset),
5418                                 mv_msb);
5419         }
5420
5421         return 0;
5422 }
5423
5424 static int
5425 ixgbe_mirror_rule_reset(struct rte_eth_dev *dev, uint8_t rule_id)
5426 {
5427         int mr_ctl = 0;
5428         uint32_t lsb_val = 0;
5429         uint32_t msb_val = 0;
5430         const uint8_t rule_mr_offset = 4;
5431
5432         struct ixgbe_hw *hw =
5433                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5434         struct ixgbe_mirror_info *mr_info =
5435                 (IXGBE_DEV_PRIVATE_TO_PFDATA(dev->data->dev_private));
5436
5437         if (ixgbe_vt_check(hw) < 0)
5438                 return -ENOTSUP;
5439
5440         memset(&mr_info->mr_conf[rule_id], 0,
5441                sizeof(struct rte_eth_mirror_conf));
5442
5443         /* clear PFVMCTL register */
5444         IXGBE_WRITE_REG(hw, IXGBE_MRCTL(rule_id), mr_ctl);
5445
5446         /* clear pool mask register */
5447         IXGBE_WRITE_REG(hw, IXGBE_VMRVM(rule_id), lsb_val);
5448         IXGBE_WRITE_REG(hw, IXGBE_VMRVM(rule_id + rule_mr_offset), msb_val);
5449
5450         /* clear vlan mask register */
5451         IXGBE_WRITE_REG(hw, IXGBE_VMRVLAN(rule_id), lsb_val);
5452         IXGBE_WRITE_REG(hw, IXGBE_VMRVLAN(rule_id + rule_mr_offset), msb_val);
5453
5454         return 0;
5455 }
5456
5457 static int
5458 ixgbevf_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id)
5459 {
5460         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
5461         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
5462         uint32_t mask;
5463         struct ixgbe_hw *hw =
5464                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5465
5466         mask = IXGBE_READ_REG(hw, IXGBE_VTEIMS);
5467         mask |= (1 << IXGBE_MISC_VEC_ID);
5468         RTE_SET_USED(queue_id);
5469         IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, mask);
5470
5471         rte_intr_enable(intr_handle);
5472
5473         return 0;
5474 }
5475
5476 static int
5477 ixgbevf_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id)
5478 {
5479         uint32_t mask;
5480         struct ixgbe_hw *hw =
5481                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5482
5483         mask = IXGBE_READ_REG(hw, IXGBE_VTEIMS);
5484         mask &= ~(1 << IXGBE_MISC_VEC_ID);
5485         RTE_SET_USED(queue_id);
5486         IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, mask);
5487
5488         return 0;
5489 }
5490
5491 static int
5492 ixgbe_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id)
5493 {
5494         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
5495         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
5496         uint32_t mask;
5497         struct ixgbe_hw *hw =
5498                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5499         struct ixgbe_interrupt *intr =
5500                 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
5501
5502         if (queue_id < 16) {
5503                 ixgbe_disable_intr(hw);
5504                 intr->mask |= (1 << queue_id);
5505                 ixgbe_enable_intr(dev);
5506         } else if (queue_id < 32) {
5507                 mask = IXGBE_READ_REG(hw, IXGBE_EIMS_EX(0));
5508                 mask &= (1 << queue_id);
5509                 IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(0), mask);
5510         } else if (queue_id < 64) {
5511                 mask = IXGBE_READ_REG(hw, IXGBE_EIMS_EX(1));
5512                 mask &= (1 << (queue_id - 32));
5513                 IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(1), mask);
5514         }
5515         rte_intr_enable(intr_handle);
5516
5517         return 0;
5518 }
5519
5520 static int
5521 ixgbe_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id)
5522 {
5523         uint32_t mask;
5524         struct ixgbe_hw *hw =
5525                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5526         struct ixgbe_interrupt *intr =
5527                 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
5528
5529         if (queue_id < 16) {
5530                 ixgbe_disable_intr(hw);
5531                 intr->mask &= ~(1 << queue_id);
5532                 ixgbe_enable_intr(dev);
5533         } else if (queue_id < 32) {
5534                 mask = IXGBE_READ_REG(hw, IXGBE_EIMS_EX(0));
5535                 mask &= ~(1 << queue_id);
5536                 IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(0), mask);
5537         } else if (queue_id < 64) {
5538                 mask = IXGBE_READ_REG(hw, IXGBE_EIMS_EX(1));
5539                 mask &= ~(1 << (queue_id - 32));
5540                 IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(1), mask);
5541         }
5542
5543         return 0;
5544 }
5545
5546 static void
5547 ixgbevf_set_ivar_map(struct ixgbe_hw *hw, int8_t direction,
5548                      uint8_t queue, uint8_t msix_vector)
5549 {
5550         uint32_t tmp, idx;
5551
5552         if (direction == -1) {
5553                 /* other causes */
5554                 msix_vector |= IXGBE_IVAR_ALLOC_VAL;
5555                 tmp = IXGBE_READ_REG(hw, IXGBE_VTIVAR_MISC);
5556                 tmp &= ~0xFF;
5557                 tmp |= msix_vector;
5558                 IXGBE_WRITE_REG(hw, IXGBE_VTIVAR_MISC, tmp);
5559         } else {
5560                 /* rx or tx cause */
5561                 msix_vector |= IXGBE_IVAR_ALLOC_VAL;
5562                 idx = ((16 * (queue & 1)) + (8 * direction));
5563                 tmp = IXGBE_READ_REG(hw, IXGBE_VTIVAR(queue >> 1));
5564                 tmp &= ~(0xFF << idx);
5565                 tmp |= (msix_vector << idx);
5566                 IXGBE_WRITE_REG(hw, IXGBE_VTIVAR(queue >> 1), tmp);
5567         }
5568 }
5569
5570 /**
5571  * set the IVAR registers, mapping interrupt causes to vectors
5572  * @param hw
5573  *  pointer to ixgbe_hw struct
5574  * @direction
5575  *  0 for Rx, 1 for Tx, -1 for other causes
5576  * @queue
5577  *  queue to map the corresponding interrupt to
5578  * @msix_vector
5579  *  the vector to map to the corresponding queue
5580  */
5581 static void
5582 ixgbe_set_ivar_map(struct ixgbe_hw *hw, int8_t direction,
5583                    uint8_t queue, uint8_t msix_vector)
5584 {
5585         uint32_t tmp, idx;
5586
5587         msix_vector |= IXGBE_IVAR_ALLOC_VAL;
5588         if (hw->mac.type == ixgbe_mac_82598EB) {
5589                 if (direction == -1)
5590                         direction = 0;
5591                 idx = (((direction * 64) + queue) >> 2) & 0x1F;
5592                 tmp = IXGBE_READ_REG(hw, IXGBE_IVAR(idx));
5593                 tmp &= ~(0xFF << (8 * (queue & 0x3)));
5594                 tmp |= (msix_vector << (8 * (queue & 0x3)));
5595                 IXGBE_WRITE_REG(hw, IXGBE_IVAR(idx), tmp);
5596         } else if ((hw->mac.type == ixgbe_mac_82599EB) ||
5597                         (hw->mac.type == ixgbe_mac_X540)) {
5598                 if (direction == -1) {
5599                         /* other causes */
5600                         idx = ((queue & 1) * 8);
5601                         tmp = IXGBE_READ_REG(hw, IXGBE_IVAR_MISC);
5602                         tmp &= ~(0xFF << idx);
5603                         tmp |= (msix_vector << idx);
5604                         IXGBE_WRITE_REG(hw, IXGBE_IVAR_MISC, tmp);
5605                 } else {
5606                         /* rx or tx causes */
5607                         idx = ((16 * (queue & 1)) + (8 * direction));
5608                         tmp = IXGBE_READ_REG(hw, IXGBE_IVAR(queue >> 1));
5609                         tmp &= ~(0xFF << idx);
5610                         tmp |= (msix_vector << idx);
5611                         IXGBE_WRITE_REG(hw, IXGBE_IVAR(queue >> 1), tmp);
5612                 }
5613         }
5614 }
5615
5616 static void
5617 ixgbevf_configure_msix(struct rte_eth_dev *dev)
5618 {
5619         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
5620         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
5621         struct ixgbe_hw *hw =
5622                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5623         uint32_t q_idx;
5624         uint32_t vector_idx = IXGBE_MISC_VEC_ID;
5625
5626         /* Configure VF other cause ivar */
5627         ixgbevf_set_ivar_map(hw, -1, 1, vector_idx);
5628
5629         /* won't configure msix register if no mapping is done
5630          * between intr vector and event fd.
5631          */
5632         if (!rte_intr_dp_is_en(intr_handle))
5633                 return;
5634
5635         /* Configure all RX queues of VF */
5636         for (q_idx = 0; q_idx < dev->data->nb_rx_queues; q_idx++) {
5637                 /* Force all queue use vector 0,
5638                  * as IXGBE_VF_MAXMSIVECOTR = 1
5639                  */
5640                 ixgbevf_set_ivar_map(hw, 0, q_idx, vector_idx);
5641                 intr_handle->intr_vec[q_idx] = vector_idx;
5642         }
5643 }
5644
5645 /**
5646  * Sets up the hardware to properly generate MSI-X interrupts
5647  * @hw
5648  *  board private structure
5649  */
5650 static void
5651 ixgbe_configure_msix(struct rte_eth_dev *dev)
5652 {
5653         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
5654         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
5655         struct ixgbe_hw *hw =
5656                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5657         uint32_t queue_id, base = IXGBE_MISC_VEC_ID;
5658         uint32_t vec = IXGBE_MISC_VEC_ID;
5659         uint32_t mask;
5660         uint32_t gpie;
5661
5662         /* won't configure msix register if no mapping is done
5663          * between intr vector and event fd
5664          */
5665         if (!rte_intr_dp_is_en(intr_handle))
5666                 return;
5667
5668         if (rte_intr_allow_others(intr_handle))
5669                 vec = base = IXGBE_RX_VEC_START;
5670
5671         /* setup GPIE for MSI-x mode */
5672         gpie = IXGBE_READ_REG(hw, IXGBE_GPIE);
5673         gpie |= IXGBE_GPIE_MSIX_MODE | IXGBE_GPIE_PBA_SUPPORT |
5674                 IXGBE_GPIE_OCD | IXGBE_GPIE_EIAME;
5675         /* auto clearing and auto setting corresponding bits in EIMS
5676          * when MSI-X interrupt is triggered
5677          */
5678         if (hw->mac.type == ixgbe_mac_82598EB) {
5679                 IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
5680         } else {
5681                 IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(0), 0xFFFFFFFF);
5682                 IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(1), 0xFFFFFFFF);
5683         }
5684         IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
5685
5686         /* Populate the IVAR table and set the ITR values to the
5687          * corresponding register.
5688          */
5689         for (queue_id = 0; queue_id < dev->data->nb_rx_queues;
5690              queue_id++) {
5691                 /* by default, 1:1 mapping */
5692                 ixgbe_set_ivar_map(hw, 0, queue_id, vec);
5693                 intr_handle->intr_vec[queue_id] = vec;
5694                 if (vec < base + intr_handle->nb_efd - 1)
5695                         vec++;
5696         }
5697
5698         switch (hw->mac.type) {
5699         case ixgbe_mac_82598EB:
5700                 ixgbe_set_ivar_map(hw, -1, IXGBE_IVAR_OTHER_CAUSES_INDEX,
5701                                    IXGBE_MISC_VEC_ID);
5702                 break;
5703         case ixgbe_mac_82599EB:
5704         case ixgbe_mac_X540:
5705                 ixgbe_set_ivar_map(hw, -1, 1, IXGBE_MISC_VEC_ID);
5706                 break;
5707         default:
5708                 break;
5709         }
5710         IXGBE_WRITE_REG(hw, IXGBE_EITR(IXGBE_MISC_VEC_ID),
5711                         IXGBE_MIN_INTER_INTERRUPT_INTERVAL_DEFAULT & 0xFFF);
5712
5713         /* set up to autoclear timer, and the vectors */
5714         mask = IXGBE_EIMS_ENABLE_MASK;
5715         mask &= ~(IXGBE_EIMS_OTHER |
5716                   IXGBE_EIMS_MAILBOX |
5717                   IXGBE_EIMS_LSC);
5718
5719         IXGBE_WRITE_REG(hw, IXGBE_EIAC, mask);
5720 }
5721
5722 static int ixgbe_set_queue_rate_limit(struct rte_eth_dev *dev,
5723         uint16_t queue_idx, uint16_t tx_rate)
5724 {
5725         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5726         uint32_t rf_dec, rf_int;
5727         uint32_t bcnrc_val;
5728         uint16_t link_speed = dev->data->dev_link.link_speed;
5729
5730         if (queue_idx >= hw->mac.max_tx_queues)
5731                 return -EINVAL;
5732
5733         if (tx_rate != 0) {
5734                 /* Calculate the rate factor values to set */
5735                 rf_int = (uint32_t)link_speed / (uint32_t)tx_rate;
5736                 rf_dec = (uint32_t)link_speed % (uint32_t)tx_rate;
5737                 rf_dec = (rf_dec << IXGBE_RTTBCNRC_RF_INT_SHIFT) / tx_rate;
5738
5739                 bcnrc_val = IXGBE_RTTBCNRC_RS_ENA;
5740                 bcnrc_val |= ((rf_int << IXGBE_RTTBCNRC_RF_INT_SHIFT) &
5741                                 IXGBE_RTTBCNRC_RF_INT_MASK_M);
5742                 bcnrc_val |= (rf_dec & IXGBE_RTTBCNRC_RF_DEC_MASK);
5743         } else {
5744                 bcnrc_val = 0;
5745         }
5746
5747         /*
5748          * Set global transmit compensation time to the MMW_SIZE in RTTBCNRM
5749          * register. MMW_SIZE=0x014 if 9728-byte jumbo is supported, otherwise
5750          * set as 0x4.
5751          */
5752         if ((dev->data->dev_conf.rxmode.jumbo_frame == 1) &&
5753                 (dev->data->dev_conf.rxmode.max_rx_pkt_len >=
5754                                 IXGBE_MAX_JUMBO_FRAME_SIZE))
5755                 IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRM,
5756                         IXGBE_MMW_SIZE_JUMBO_FRAME);
5757         else
5758                 IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRM,
5759                         IXGBE_MMW_SIZE_DEFAULT);
5760
5761         /* Set RTTBCNRC of queue X */
5762         IXGBE_WRITE_REG(hw, IXGBE_RTTDQSEL, queue_idx);
5763         IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRC, bcnrc_val);
5764         IXGBE_WRITE_FLUSH(hw);
5765
5766         return 0;
5767 }
5768
5769 static int
5770 ixgbevf_add_mac_addr(struct rte_eth_dev *dev, struct ether_addr *mac_addr,
5771                      __attribute__((unused)) uint32_t index,
5772                      __attribute__((unused)) uint32_t pool)
5773 {
5774         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5775         int diag;
5776
5777         /*
5778          * On a 82599 VF, adding again the same MAC addr is not an idempotent
5779          * operation. Trap this case to avoid exhausting the [very limited]
5780          * set of PF resources used to store VF MAC addresses.
5781          */
5782         if (memcmp(hw->mac.perm_addr, mac_addr, sizeof(struct ether_addr)) == 0)
5783                 return -1;
5784         diag = ixgbevf_set_uc_addr_vf(hw, 2, mac_addr->addr_bytes);
5785         if (diag != 0)
5786                 PMD_DRV_LOG(ERR, "Unable to add MAC address "
5787                             "%02x:%02x:%02x:%02x:%02x:%02x - diag=%d",
5788                             mac_addr->addr_bytes[0],
5789                             mac_addr->addr_bytes[1],
5790                             mac_addr->addr_bytes[2],
5791                             mac_addr->addr_bytes[3],
5792                             mac_addr->addr_bytes[4],
5793                             mac_addr->addr_bytes[5],
5794                             diag);
5795         return diag;
5796 }
5797
5798 static void
5799 ixgbevf_remove_mac_addr(struct rte_eth_dev *dev, uint32_t index)
5800 {
5801         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5802         struct ether_addr *perm_addr = (struct ether_addr *) hw->mac.perm_addr;
5803         struct ether_addr *mac_addr;
5804         uint32_t i;
5805         int diag;
5806
5807         /*
5808          * The IXGBE_VF_SET_MACVLAN command of the ixgbe-pf driver does
5809          * not support the deletion of a given MAC address.
5810          * Instead, it imposes to delete all MAC addresses, then to add again
5811          * all MAC addresses with the exception of the one to be deleted.
5812          */
5813         (void) ixgbevf_set_uc_addr_vf(hw, 0, NULL);
5814
5815         /*
5816          * Add again all MAC addresses, with the exception of the deleted one
5817          * and of the permanent MAC address.
5818          */
5819         for (i = 0, mac_addr = dev->data->mac_addrs;
5820              i < hw->mac.num_rar_entries; i++, mac_addr++) {
5821                 /* Skip the deleted MAC address */
5822                 if (i == index)
5823                         continue;
5824                 /* Skip NULL MAC addresses */
5825                 if (is_zero_ether_addr(mac_addr))
5826                         continue;
5827                 /* Skip the permanent MAC address */
5828                 if (memcmp(perm_addr, mac_addr, sizeof(struct ether_addr)) == 0)
5829                         continue;
5830                 diag = ixgbevf_set_uc_addr_vf(hw, 2, mac_addr->addr_bytes);
5831                 if (diag != 0)
5832                         PMD_DRV_LOG(ERR,
5833                                     "Adding again MAC address "
5834                                     "%02x:%02x:%02x:%02x:%02x:%02x failed "
5835                                     "diag=%d",
5836                                     mac_addr->addr_bytes[0],
5837                                     mac_addr->addr_bytes[1],
5838                                     mac_addr->addr_bytes[2],
5839                                     mac_addr->addr_bytes[3],
5840                                     mac_addr->addr_bytes[4],
5841                                     mac_addr->addr_bytes[5],
5842                                     diag);
5843         }
5844 }
5845
5846 static void
5847 ixgbevf_set_default_mac_addr(struct rte_eth_dev *dev, struct ether_addr *addr)
5848 {
5849         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5850
5851         hw->mac.ops.set_rar(hw, 0, (void *)addr, 0, 0);
5852 }
5853
5854 int
5855 ixgbe_syn_filter_set(struct rte_eth_dev *dev,
5856                         struct rte_eth_syn_filter *filter,
5857                         bool add)
5858 {
5859         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5860         struct ixgbe_filter_info *filter_info =
5861                 IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
5862         uint32_t syn_info;
5863         uint32_t synqf;
5864
5865         if (filter->queue >= IXGBE_MAX_RX_QUEUE_NUM)
5866                 return -EINVAL;
5867
5868         syn_info = filter_info->syn_info;
5869
5870         if (add) {
5871                 if (syn_info & IXGBE_SYN_FILTER_ENABLE)
5872                         return -EINVAL;
5873                 synqf = (uint32_t)(((filter->queue << IXGBE_SYN_FILTER_QUEUE_SHIFT) &
5874                         IXGBE_SYN_FILTER_QUEUE) | IXGBE_SYN_FILTER_ENABLE);
5875
5876                 if (filter->hig_pri)
5877                         synqf |= IXGBE_SYN_FILTER_SYNQFP;
5878                 else
5879                         synqf &= ~IXGBE_SYN_FILTER_SYNQFP;
5880         } else {
5881                 synqf = IXGBE_READ_REG(hw, IXGBE_SYNQF);
5882                 if (!(syn_info & IXGBE_SYN_FILTER_ENABLE))
5883                         return -ENOENT;
5884                 synqf &= ~(IXGBE_SYN_FILTER_QUEUE | IXGBE_SYN_FILTER_ENABLE);
5885         }
5886
5887         filter_info->syn_info = synqf;
5888         IXGBE_WRITE_REG(hw, IXGBE_SYNQF, synqf);
5889         IXGBE_WRITE_FLUSH(hw);
5890         return 0;
5891 }
5892
5893 static int
5894 ixgbe_syn_filter_get(struct rte_eth_dev *dev,
5895                         struct rte_eth_syn_filter *filter)
5896 {
5897         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5898         uint32_t synqf = IXGBE_READ_REG(hw, IXGBE_SYNQF);
5899
5900         if (synqf & IXGBE_SYN_FILTER_ENABLE) {
5901                 filter->hig_pri = (synqf & IXGBE_SYN_FILTER_SYNQFP) ? 1 : 0;
5902                 filter->queue = (uint16_t)((synqf & IXGBE_SYN_FILTER_QUEUE) >> 1);
5903                 return 0;
5904         }
5905         return -ENOENT;
5906 }
5907
5908 static int
5909 ixgbe_syn_filter_handle(struct rte_eth_dev *dev,
5910                         enum rte_filter_op filter_op,
5911                         void *arg)
5912 {
5913         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5914         int ret;
5915
5916         MAC_TYPE_FILTER_SUP(hw->mac.type);
5917
5918         if (filter_op == RTE_ETH_FILTER_NOP)
5919                 return 0;
5920
5921         if (arg == NULL) {
5922                 PMD_DRV_LOG(ERR, "arg shouldn't be NULL for operation %u",
5923                             filter_op);
5924                 return -EINVAL;
5925         }
5926
5927         switch (filter_op) {
5928         case RTE_ETH_FILTER_ADD:
5929                 ret = ixgbe_syn_filter_set(dev,
5930                                 (struct rte_eth_syn_filter *)arg,
5931                                 TRUE);
5932                 break;
5933         case RTE_ETH_FILTER_DELETE:
5934                 ret = ixgbe_syn_filter_set(dev,
5935                                 (struct rte_eth_syn_filter *)arg,
5936                                 FALSE);
5937                 break;
5938         case RTE_ETH_FILTER_GET:
5939                 ret = ixgbe_syn_filter_get(dev,
5940                                 (struct rte_eth_syn_filter *)arg);
5941                 break;
5942         default:
5943                 PMD_DRV_LOG(ERR, "unsupported operation %u", filter_op);
5944                 ret = -EINVAL;
5945                 break;
5946         }
5947
5948         return ret;
5949 }
5950
5951
5952 static inline enum ixgbe_5tuple_protocol
5953 convert_protocol_type(uint8_t protocol_value)
5954 {
5955         if (protocol_value == IPPROTO_TCP)
5956                 return IXGBE_FILTER_PROTOCOL_TCP;
5957         else if (protocol_value == IPPROTO_UDP)
5958                 return IXGBE_FILTER_PROTOCOL_UDP;
5959         else if (protocol_value == IPPROTO_SCTP)
5960                 return IXGBE_FILTER_PROTOCOL_SCTP;
5961         else
5962                 return IXGBE_FILTER_PROTOCOL_NONE;
5963 }
5964
5965 /* inject a 5-tuple filter to HW */
5966 static inline void
5967 ixgbe_inject_5tuple_filter(struct rte_eth_dev *dev,
5968                            struct ixgbe_5tuple_filter *filter)
5969 {
5970         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5971         int i;
5972         uint32_t ftqf, sdpqf;
5973         uint32_t l34timir = 0;
5974         uint8_t mask = 0xff;
5975
5976         i = filter->index;
5977
5978         sdpqf = (uint32_t)(filter->filter_info.dst_port <<
5979                                 IXGBE_SDPQF_DSTPORT_SHIFT);
5980         sdpqf = sdpqf | (filter->filter_info.src_port & IXGBE_SDPQF_SRCPORT);
5981
5982         ftqf = (uint32_t)(filter->filter_info.proto &
5983                 IXGBE_FTQF_PROTOCOL_MASK);
5984         ftqf |= (uint32_t)((filter->filter_info.priority &
5985                 IXGBE_FTQF_PRIORITY_MASK) << IXGBE_FTQF_PRIORITY_SHIFT);
5986         if (filter->filter_info.src_ip_mask == 0) /* 0 means compare. */
5987                 mask &= IXGBE_FTQF_SOURCE_ADDR_MASK;
5988         if (filter->filter_info.dst_ip_mask == 0)
5989                 mask &= IXGBE_FTQF_DEST_ADDR_MASK;
5990         if (filter->filter_info.src_port_mask == 0)
5991                 mask &= IXGBE_FTQF_SOURCE_PORT_MASK;
5992         if (filter->filter_info.dst_port_mask == 0)
5993                 mask &= IXGBE_FTQF_DEST_PORT_MASK;
5994         if (filter->filter_info.proto_mask == 0)
5995                 mask &= IXGBE_FTQF_PROTOCOL_COMP_MASK;
5996         ftqf |= mask << IXGBE_FTQF_5TUPLE_MASK_SHIFT;
5997         ftqf |= IXGBE_FTQF_POOL_MASK_EN;
5998         ftqf |= IXGBE_FTQF_QUEUE_ENABLE;
5999
6000         IXGBE_WRITE_REG(hw, IXGBE_DAQF(i), filter->filter_info.dst_ip);
6001         IXGBE_WRITE_REG(hw, IXGBE_SAQF(i), filter->filter_info.src_ip);
6002         IXGBE_WRITE_REG(hw, IXGBE_SDPQF(i), sdpqf);
6003         IXGBE_WRITE_REG(hw, IXGBE_FTQF(i), ftqf);
6004
6005         l34timir |= IXGBE_L34T_IMIR_RESERVE;
6006         l34timir |= (uint32_t)(filter->queue <<
6007                                 IXGBE_L34T_IMIR_QUEUE_SHIFT);
6008         IXGBE_WRITE_REG(hw, IXGBE_L34T_IMIR(i), l34timir);
6009 }
6010
6011 /*
6012  * add a 5tuple filter
6013  *
6014  * @param
6015  * dev: Pointer to struct rte_eth_dev.
6016  * index: the index the filter allocates.
6017  * filter: ponter to the filter that will be added.
6018  * rx_queue: the queue id the filter assigned to.
6019  *
6020  * @return
6021  *    - On success, zero.
6022  *    - On failure, a negative value.
6023  */
6024 static int
6025 ixgbe_add_5tuple_filter(struct rte_eth_dev *dev,
6026                         struct ixgbe_5tuple_filter *filter)
6027 {
6028         struct ixgbe_filter_info *filter_info =
6029                 IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
6030         int i, idx, shift;
6031
6032         /*
6033          * look for an unused 5tuple filter index,
6034          * and insert the filter to list.
6035          */
6036         for (i = 0; i < IXGBE_MAX_FTQF_FILTERS; i++) {
6037                 idx = i / (sizeof(uint32_t) * NBBY);
6038                 shift = i % (sizeof(uint32_t) * NBBY);
6039                 if (!(filter_info->fivetuple_mask[idx] & (1 << shift))) {
6040                         filter_info->fivetuple_mask[idx] |= 1 << shift;
6041                         filter->index = i;
6042                         TAILQ_INSERT_TAIL(&filter_info->fivetuple_list,
6043                                           filter,
6044                                           entries);
6045                         break;
6046                 }
6047         }
6048         if (i >= IXGBE_MAX_FTQF_FILTERS) {
6049                 PMD_DRV_LOG(ERR, "5tuple filters are full.");
6050                 return -ENOSYS;
6051         }
6052
6053         ixgbe_inject_5tuple_filter(dev, filter);
6054
6055         return 0;
6056 }
6057
6058 /*
6059  * remove a 5tuple filter
6060  *
6061  * @param
6062  * dev: Pointer to struct rte_eth_dev.
6063  * filter: the pointer of the filter will be removed.
6064  */
6065 static void
6066 ixgbe_remove_5tuple_filter(struct rte_eth_dev *dev,
6067                         struct ixgbe_5tuple_filter *filter)
6068 {
6069         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
6070         struct ixgbe_filter_info *filter_info =
6071                 IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
6072         uint16_t index = filter->index;
6073
6074         filter_info->fivetuple_mask[index / (sizeof(uint32_t) * NBBY)] &=
6075                                 ~(1 << (index % (sizeof(uint32_t) * NBBY)));
6076         TAILQ_REMOVE(&filter_info->fivetuple_list, filter, entries);
6077         rte_free(filter);
6078
6079         IXGBE_WRITE_REG(hw, IXGBE_DAQF(index), 0);
6080         IXGBE_WRITE_REG(hw, IXGBE_SAQF(index), 0);
6081         IXGBE_WRITE_REG(hw, IXGBE_SDPQF(index), 0);
6082         IXGBE_WRITE_REG(hw, IXGBE_FTQF(index), 0);
6083         IXGBE_WRITE_REG(hw, IXGBE_L34T_IMIR(index), 0);
6084 }
6085
6086 static int
6087 ixgbevf_dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu)
6088 {
6089         struct ixgbe_hw *hw;
6090         uint32_t max_frame = mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
6091         struct rte_eth_rxmode *rx_conf = &dev->data->dev_conf.rxmode;
6092
6093         hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
6094
6095         if ((mtu < ETHER_MIN_MTU) || (max_frame > ETHER_MAX_JUMBO_FRAME_LEN))
6096                 return -EINVAL;
6097
6098         /* refuse mtu that requires the support of scattered packets when this
6099          * feature has not been enabled before.
6100          */
6101         if (!rx_conf->enable_scatter &&
6102             (max_frame + 2 * IXGBE_VLAN_TAG_SIZE >
6103              dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM))
6104                 return -EINVAL;
6105
6106         /*
6107          * When supported by the underlying PF driver, use the IXGBE_VF_SET_MTU
6108          * request of the version 2.0 of the mailbox API.
6109          * For now, use the IXGBE_VF_SET_LPE request of the version 1.0
6110          * of the mailbox API.
6111          * This call to IXGBE_SET_LPE action won't work with ixgbe pf drivers
6112          * prior to 3.11.33 which contains the following change:
6113          * "ixgbe: Enable jumbo frames support w/ SR-IOV"
6114          */
6115         ixgbevf_rlpml_set_vf(hw, max_frame);
6116
6117         /* update max frame size */
6118         dev->data->dev_conf.rxmode.max_rx_pkt_len = max_frame;
6119         return 0;
6120 }
6121
6122 static inline struct ixgbe_5tuple_filter *
6123 ixgbe_5tuple_filter_lookup(struct ixgbe_5tuple_filter_list *filter_list,
6124                         struct ixgbe_5tuple_filter_info *key)
6125 {
6126         struct ixgbe_5tuple_filter *it;
6127
6128         TAILQ_FOREACH(it, filter_list, entries) {
6129                 if (memcmp(key, &it->filter_info,
6130                         sizeof(struct ixgbe_5tuple_filter_info)) == 0) {
6131                         return it;
6132                 }
6133         }
6134         return NULL;
6135 }
6136
6137 /* translate elements in struct rte_eth_ntuple_filter to struct ixgbe_5tuple_filter_info*/
6138 static inline int
6139 ntuple_filter_to_5tuple(struct rte_eth_ntuple_filter *filter,
6140                         struct ixgbe_5tuple_filter_info *filter_info)
6141 {
6142         if (filter->queue >= IXGBE_MAX_RX_QUEUE_NUM ||
6143                 filter->priority > IXGBE_5TUPLE_MAX_PRI ||
6144                 filter->priority < IXGBE_5TUPLE_MIN_PRI)
6145                 return -EINVAL;
6146
6147         switch (filter->dst_ip_mask) {
6148         case UINT32_MAX:
6149                 filter_info->dst_ip_mask = 0;
6150                 filter_info->dst_ip = filter->dst_ip;
6151                 break;
6152         case 0:
6153                 filter_info->dst_ip_mask = 1;
6154                 break;
6155         default:
6156                 PMD_DRV_LOG(ERR, "invalid dst_ip mask.");
6157                 return -EINVAL;
6158         }
6159
6160         switch (filter->src_ip_mask) {
6161         case UINT32_MAX:
6162                 filter_info->src_ip_mask = 0;
6163                 filter_info->src_ip = filter->src_ip;
6164                 break;
6165         case 0:
6166                 filter_info->src_ip_mask = 1;
6167                 break;
6168         default:
6169                 PMD_DRV_LOG(ERR, "invalid src_ip mask.");
6170                 return -EINVAL;
6171         }
6172
6173         switch (filter->dst_port_mask) {
6174         case UINT16_MAX:
6175                 filter_info->dst_port_mask = 0;
6176                 filter_info->dst_port = filter->dst_port;
6177                 break;
6178         case 0:
6179                 filter_info->dst_port_mask = 1;
6180                 break;
6181         default:
6182                 PMD_DRV_LOG(ERR, "invalid dst_port mask.");
6183                 return -EINVAL;
6184         }
6185
6186         switch (filter->src_port_mask) {
6187         case UINT16_MAX:
6188                 filter_info->src_port_mask = 0;
6189                 filter_info->src_port = filter->src_port;
6190                 break;
6191         case 0:
6192                 filter_info->src_port_mask = 1;
6193                 break;
6194         default:
6195                 PMD_DRV_LOG(ERR, "invalid src_port mask.");
6196                 return -EINVAL;
6197         }
6198
6199         switch (filter->proto_mask) {
6200         case UINT8_MAX:
6201                 filter_info->proto_mask = 0;
6202                 filter_info->proto =
6203                         convert_protocol_type(filter->proto);
6204                 break;
6205         case 0:
6206                 filter_info->proto_mask = 1;
6207                 break;
6208         default:
6209                 PMD_DRV_LOG(ERR, "invalid protocol mask.");
6210                 return -EINVAL;
6211         }
6212
6213         filter_info->priority = (uint8_t)filter->priority;
6214         return 0;
6215 }
6216
6217 /*
6218  * add or delete a ntuple filter
6219  *
6220  * @param
6221  * dev: Pointer to struct rte_eth_dev.
6222  * ntuple_filter: Pointer to struct rte_eth_ntuple_filter
6223  * add: if true, add filter, if false, remove filter
6224  *
6225  * @return
6226  *    - On success, zero.
6227  *    - On failure, a negative value.
6228  */
6229 int
6230 ixgbe_add_del_ntuple_filter(struct rte_eth_dev *dev,
6231                         struct rte_eth_ntuple_filter *ntuple_filter,
6232                         bool add)
6233 {
6234         struct ixgbe_filter_info *filter_info =
6235                 IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
6236         struct ixgbe_5tuple_filter_info filter_5tuple;
6237         struct ixgbe_5tuple_filter *filter;
6238         int ret;
6239
6240         if (ntuple_filter->flags != RTE_5TUPLE_FLAGS) {
6241                 PMD_DRV_LOG(ERR, "only 5tuple is supported.");
6242                 return -EINVAL;
6243         }
6244
6245         memset(&filter_5tuple, 0, sizeof(struct ixgbe_5tuple_filter_info));
6246         ret = ntuple_filter_to_5tuple(ntuple_filter, &filter_5tuple);
6247         if (ret < 0)
6248                 return ret;
6249
6250         filter = ixgbe_5tuple_filter_lookup(&filter_info->fivetuple_list,
6251                                          &filter_5tuple);
6252         if (filter != NULL && add) {
6253                 PMD_DRV_LOG(ERR, "filter exists.");
6254                 return -EEXIST;
6255         }
6256         if (filter == NULL && !add) {
6257                 PMD_DRV_LOG(ERR, "filter doesn't exist.");
6258                 return -ENOENT;
6259         }
6260
6261         if (add) {
6262                 filter = rte_zmalloc("ixgbe_5tuple_filter",
6263                                 sizeof(struct ixgbe_5tuple_filter), 0);
6264                 if (filter == NULL)
6265                         return -ENOMEM;
6266                 (void)rte_memcpy(&filter->filter_info,
6267                                  &filter_5tuple,
6268                                  sizeof(struct ixgbe_5tuple_filter_info));
6269                 filter->queue = ntuple_filter->queue;
6270                 ret = ixgbe_add_5tuple_filter(dev, filter);
6271                 if (ret < 0) {
6272                         rte_free(filter);
6273                         return ret;
6274                 }
6275         } else
6276                 ixgbe_remove_5tuple_filter(dev, filter);
6277
6278         return 0;
6279 }
6280
6281 /*
6282  * get a ntuple filter
6283  *
6284  * @param
6285  * dev: Pointer to struct rte_eth_dev.
6286  * ntuple_filter: Pointer to struct rte_eth_ntuple_filter
6287  *
6288  * @return
6289  *    - On success, zero.
6290  *    - On failure, a negative value.
6291  */
6292 static int
6293 ixgbe_get_ntuple_filter(struct rte_eth_dev *dev,
6294                         struct rte_eth_ntuple_filter *ntuple_filter)
6295 {
6296         struct ixgbe_filter_info *filter_info =
6297                 IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
6298         struct ixgbe_5tuple_filter_info filter_5tuple;
6299         struct ixgbe_5tuple_filter *filter;
6300         int ret;
6301
6302         if (ntuple_filter->flags != RTE_5TUPLE_FLAGS) {
6303                 PMD_DRV_LOG(ERR, "only 5tuple is supported.");
6304                 return -EINVAL;
6305         }
6306
6307         memset(&filter_5tuple, 0, sizeof(struct ixgbe_5tuple_filter_info));
6308         ret = ntuple_filter_to_5tuple(ntuple_filter, &filter_5tuple);
6309         if (ret < 0)
6310                 return ret;
6311
6312         filter = ixgbe_5tuple_filter_lookup(&filter_info->fivetuple_list,
6313                                          &filter_5tuple);
6314         if (filter == NULL) {
6315                 PMD_DRV_LOG(ERR, "filter doesn't exist.");
6316                 return -ENOENT;
6317         }
6318         ntuple_filter->queue = filter->queue;
6319         return 0;
6320 }
6321
6322 /*
6323  * ixgbe_ntuple_filter_handle - Handle operations for ntuple filter.
6324  * @dev: pointer to rte_eth_dev structure
6325  * @filter_op:operation will be taken.
6326  * @arg: a pointer to specific structure corresponding to the filter_op
6327  *
6328  * @return
6329  *    - On success, zero.
6330  *    - On failure, a negative value.
6331  */
6332 static int
6333 ixgbe_ntuple_filter_handle(struct rte_eth_dev *dev,
6334                                 enum rte_filter_op filter_op,
6335                                 void *arg)
6336 {
6337         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
6338         int ret;
6339
6340         MAC_TYPE_FILTER_SUP_EXT(hw->mac.type);
6341
6342         if (filter_op == RTE_ETH_FILTER_NOP)
6343                 return 0;
6344
6345         if (arg == NULL) {
6346                 PMD_DRV_LOG(ERR, "arg shouldn't be NULL for operation %u.",
6347                             filter_op);
6348                 return -EINVAL;
6349         }
6350
6351         switch (filter_op) {
6352         case RTE_ETH_FILTER_ADD:
6353                 ret = ixgbe_add_del_ntuple_filter(dev,
6354                         (struct rte_eth_ntuple_filter *)arg,
6355                         TRUE);
6356                 break;
6357         case RTE_ETH_FILTER_DELETE:
6358                 ret = ixgbe_add_del_ntuple_filter(dev,
6359                         (struct rte_eth_ntuple_filter *)arg,
6360                         FALSE);
6361                 break;
6362         case RTE_ETH_FILTER_GET:
6363                 ret = ixgbe_get_ntuple_filter(dev,
6364                         (struct rte_eth_ntuple_filter *)arg);
6365                 break;
6366         default:
6367                 PMD_DRV_LOG(ERR, "unsupported operation %u.", filter_op);
6368                 ret = -EINVAL;
6369                 break;
6370         }
6371         return ret;
6372 }
6373
6374 int
6375 ixgbe_add_del_ethertype_filter(struct rte_eth_dev *dev,
6376                         struct rte_eth_ethertype_filter *filter,
6377                         bool add)
6378 {
6379         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
6380         struct ixgbe_filter_info *filter_info =
6381                 IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
6382         uint32_t etqf = 0;
6383         uint32_t etqs = 0;
6384         int ret;
6385         struct ixgbe_ethertype_filter ethertype_filter;
6386
6387         if (filter->queue >= IXGBE_MAX_RX_QUEUE_NUM)
6388                 return -EINVAL;
6389
6390         if (filter->ether_type == ETHER_TYPE_IPv4 ||
6391                 filter->ether_type == ETHER_TYPE_IPv6) {
6392                 PMD_DRV_LOG(ERR, "unsupported ether_type(0x%04x) in"
6393                         " ethertype filter.", filter->ether_type);
6394                 return -EINVAL;
6395         }
6396
6397         if (filter->flags & RTE_ETHTYPE_FLAGS_MAC) {
6398                 PMD_DRV_LOG(ERR, "mac compare is unsupported.");
6399                 return -EINVAL;
6400         }
6401         if (filter->flags & RTE_ETHTYPE_FLAGS_DROP) {
6402                 PMD_DRV_LOG(ERR, "drop option is unsupported.");
6403                 return -EINVAL;
6404         }
6405
6406         ret = ixgbe_ethertype_filter_lookup(filter_info, filter->ether_type);
6407         if (ret >= 0 && add) {
6408                 PMD_DRV_LOG(ERR, "ethertype (0x%04x) filter exists.",
6409                             filter->ether_type);
6410                 return -EEXIST;
6411         }
6412         if (ret < 0 && !add) {
6413                 PMD_DRV_LOG(ERR, "ethertype (0x%04x) filter doesn't exist.",
6414                             filter->ether_type);
6415                 return -ENOENT;
6416         }
6417
6418         if (add) {
6419                 etqf = IXGBE_ETQF_FILTER_EN;
6420                 etqf |= (uint32_t)filter->ether_type;
6421                 etqs |= (uint32_t)((filter->queue <<
6422                                     IXGBE_ETQS_RX_QUEUE_SHIFT) &
6423                                     IXGBE_ETQS_RX_QUEUE);
6424                 etqs |= IXGBE_ETQS_QUEUE_EN;
6425
6426                 ethertype_filter.ethertype = filter->ether_type;
6427                 ethertype_filter.etqf = etqf;
6428                 ethertype_filter.etqs = etqs;
6429                 ethertype_filter.conf = FALSE;
6430                 ret = ixgbe_ethertype_filter_insert(filter_info,
6431                                                     &ethertype_filter);
6432                 if (ret < 0) {
6433                         PMD_DRV_LOG(ERR, "ethertype filters are full.");
6434                         return -ENOSPC;
6435                 }
6436         } else {
6437                 ret = ixgbe_ethertype_filter_remove(filter_info, (uint8_t)ret);
6438                 if (ret < 0)
6439                         return -ENOSYS;
6440         }
6441         IXGBE_WRITE_REG(hw, IXGBE_ETQF(ret), etqf);
6442         IXGBE_WRITE_REG(hw, IXGBE_ETQS(ret), etqs);
6443         IXGBE_WRITE_FLUSH(hw);
6444
6445         return 0;
6446 }
6447
6448 static int
6449 ixgbe_get_ethertype_filter(struct rte_eth_dev *dev,
6450                         struct rte_eth_ethertype_filter *filter)
6451 {
6452         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
6453         struct ixgbe_filter_info *filter_info =
6454                 IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
6455         uint32_t etqf, etqs;
6456         int ret;
6457
6458         ret = ixgbe_ethertype_filter_lookup(filter_info, filter->ether_type);
6459         if (ret < 0) {
6460                 PMD_DRV_LOG(ERR, "ethertype (0x%04x) filter doesn't exist.",
6461                             filter->ether_type);
6462                 return -ENOENT;
6463         }
6464
6465         etqf = IXGBE_READ_REG(hw, IXGBE_ETQF(ret));
6466         if (etqf & IXGBE_ETQF_FILTER_EN) {
6467                 etqs = IXGBE_READ_REG(hw, IXGBE_ETQS(ret));
6468                 filter->ether_type = etqf & IXGBE_ETQF_ETHERTYPE;
6469                 filter->flags = 0;
6470                 filter->queue = (etqs & IXGBE_ETQS_RX_QUEUE) >>
6471                                IXGBE_ETQS_RX_QUEUE_SHIFT;
6472                 return 0;
6473         }
6474         return -ENOENT;
6475 }
6476
6477 /*
6478  * ixgbe_ethertype_filter_handle - Handle operations for ethertype filter.
6479  * @dev: pointer to rte_eth_dev structure
6480  * @filter_op:operation will be taken.
6481  * @arg: a pointer to specific structure corresponding to the filter_op
6482  */
6483 static int
6484 ixgbe_ethertype_filter_handle(struct rte_eth_dev *dev,
6485                                 enum rte_filter_op filter_op,
6486                                 void *arg)
6487 {
6488         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
6489         int ret;
6490
6491         MAC_TYPE_FILTER_SUP(hw->mac.type);
6492
6493         if (filter_op == RTE_ETH_FILTER_NOP)
6494                 return 0;
6495
6496         if (arg == NULL) {
6497                 PMD_DRV_LOG(ERR, "arg shouldn't be NULL for operation %u.",
6498                             filter_op);
6499                 return -EINVAL;
6500         }
6501
6502         switch (filter_op) {
6503         case RTE_ETH_FILTER_ADD:
6504                 ret = ixgbe_add_del_ethertype_filter(dev,
6505                         (struct rte_eth_ethertype_filter *)arg,
6506                         TRUE);
6507                 break;
6508         case RTE_ETH_FILTER_DELETE:
6509                 ret = ixgbe_add_del_ethertype_filter(dev,
6510                         (struct rte_eth_ethertype_filter *)arg,
6511                         FALSE);
6512                 break;
6513         case RTE_ETH_FILTER_GET:
6514                 ret = ixgbe_get_ethertype_filter(dev,
6515                         (struct rte_eth_ethertype_filter *)arg);
6516                 break;
6517         default:
6518                 PMD_DRV_LOG(ERR, "unsupported operation %u.", filter_op);
6519                 ret = -EINVAL;
6520                 break;
6521         }
6522         return ret;
6523 }
6524
6525 static int
6526 ixgbe_dev_filter_ctrl(struct rte_eth_dev *dev,
6527                      enum rte_filter_type filter_type,
6528                      enum rte_filter_op filter_op,
6529                      void *arg)
6530 {
6531         int ret = 0;
6532
6533         switch (filter_type) {
6534         case RTE_ETH_FILTER_NTUPLE:
6535                 ret = ixgbe_ntuple_filter_handle(dev, filter_op, arg);
6536                 break;
6537         case RTE_ETH_FILTER_ETHERTYPE:
6538                 ret = ixgbe_ethertype_filter_handle(dev, filter_op, arg);
6539                 break;
6540         case RTE_ETH_FILTER_SYN:
6541                 ret = ixgbe_syn_filter_handle(dev, filter_op, arg);
6542                 break;
6543         case RTE_ETH_FILTER_FDIR:
6544                 ret = ixgbe_fdir_ctrl_func(dev, filter_op, arg);
6545                 break;
6546         case RTE_ETH_FILTER_L2_TUNNEL:
6547                 ret = ixgbe_dev_l2_tunnel_filter_handle(dev, filter_op, arg);
6548                 break;
6549         case RTE_ETH_FILTER_GENERIC:
6550                 if (filter_op != RTE_ETH_FILTER_GET)
6551                         return -EINVAL;
6552                 *(const void **)arg = &ixgbe_flow_ops;
6553                 break;
6554         default:
6555                 PMD_DRV_LOG(WARNING, "Filter type (%d) not supported",
6556                                                         filter_type);
6557                 ret = -EINVAL;
6558                 break;
6559         }
6560
6561         return ret;
6562 }
6563
6564 static u8 *
6565 ixgbe_dev_addr_list_itr(__attribute__((unused)) struct ixgbe_hw *hw,
6566                         u8 **mc_addr_ptr, u32 *vmdq)
6567 {
6568         u8 *mc_addr;
6569
6570         *vmdq = 0;
6571         mc_addr = *mc_addr_ptr;
6572         *mc_addr_ptr = (mc_addr + sizeof(struct ether_addr));
6573         return mc_addr;
6574 }
6575
6576 static int
6577 ixgbe_dev_set_mc_addr_list(struct rte_eth_dev *dev,
6578                           struct ether_addr *mc_addr_set,
6579                           uint32_t nb_mc_addr)
6580 {
6581         struct ixgbe_hw *hw;
6582         u8 *mc_addr_list;
6583
6584         hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
6585         mc_addr_list = (u8 *)mc_addr_set;
6586         return ixgbe_update_mc_addr_list(hw, mc_addr_list, nb_mc_addr,
6587                                          ixgbe_dev_addr_list_itr, TRUE);
6588 }
6589
6590 static uint64_t
6591 ixgbe_read_systime_cyclecounter(struct rte_eth_dev *dev)
6592 {
6593         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
6594         uint64_t systime_cycles;
6595
6596         switch (hw->mac.type) {
6597         case ixgbe_mac_X550:
6598         case ixgbe_mac_X550EM_x:
6599         case ixgbe_mac_X550EM_a:
6600                 /* SYSTIMEL stores ns and SYSTIMEH stores seconds. */
6601                 systime_cycles = (uint64_t)IXGBE_READ_REG(hw, IXGBE_SYSTIML);
6602                 systime_cycles += (uint64_t)IXGBE_READ_REG(hw, IXGBE_SYSTIMH)
6603                                 * NSEC_PER_SEC;
6604                 break;
6605         default:
6606                 systime_cycles = (uint64_t)IXGBE_READ_REG(hw, IXGBE_SYSTIML);
6607                 systime_cycles |= (uint64_t)IXGBE_READ_REG(hw, IXGBE_SYSTIMH)
6608                                 << 32;
6609         }
6610
6611         return systime_cycles;
6612 }
6613
6614 static uint64_t
6615 ixgbe_read_rx_tstamp_cyclecounter(struct rte_eth_dev *dev)
6616 {
6617         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
6618         uint64_t rx_tstamp_cycles;
6619
6620         switch (hw->mac.type) {
6621         case ixgbe_mac_X550:
6622         case ixgbe_mac_X550EM_x:
6623         case ixgbe_mac_X550EM_a:
6624                 /* RXSTMPL stores ns and RXSTMPH stores seconds. */
6625                 rx_tstamp_cycles = (uint64_t)IXGBE_READ_REG(hw, IXGBE_RXSTMPL);
6626                 rx_tstamp_cycles += (uint64_t)IXGBE_READ_REG(hw, IXGBE_RXSTMPH)
6627                                 * NSEC_PER_SEC;
6628                 break;
6629         default:
6630                 /* RXSTMPL stores ns and RXSTMPH stores seconds. */
6631                 rx_tstamp_cycles = (uint64_t)IXGBE_READ_REG(hw, IXGBE_RXSTMPL);
6632                 rx_tstamp_cycles |= (uint64_t)IXGBE_READ_REG(hw, IXGBE_RXSTMPH)
6633                                 << 32;
6634         }
6635
6636         return rx_tstamp_cycles;
6637 }
6638
6639 static uint64_t
6640 ixgbe_read_tx_tstamp_cyclecounter(struct rte_eth_dev *dev)
6641 {
6642         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
6643         uint64_t tx_tstamp_cycles;
6644
6645         switch (hw->mac.type) {
6646         case ixgbe_mac_X550:
6647         case ixgbe_mac_X550EM_x:
6648         case ixgbe_mac_X550EM_a:
6649                 /* TXSTMPL stores ns and TXSTMPH stores seconds. */
6650                 tx_tstamp_cycles = (uint64_t)IXGBE_READ_REG(hw, IXGBE_TXSTMPL);
6651                 tx_tstamp_cycles += (uint64_t)IXGBE_READ_REG(hw, IXGBE_TXSTMPH)
6652                                 * NSEC_PER_SEC;
6653                 break;
6654         default:
6655                 /* TXSTMPL stores ns and TXSTMPH stores seconds. */
6656                 tx_tstamp_cycles = (uint64_t)IXGBE_READ_REG(hw, IXGBE_TXSTMPL);
6657                 tx_tstamp_cycles |= (uint64_t)IXGBE_READ_REG(hw, IXGBE_TXSTMPH)
6658                                 << 32;
6659         }
6660
6661         return tx_tstamp_cycles;
6662 }
6663
6664 static void
6665 ixgbe_start_timecounters(struct rte_eth_dev *dev)
6666 {
6667         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
6668         struct ixgbe_adapter *adapter =
6669                 (struct ixgbe_adapter *)dev->data->dev_private;
6670         struct rte_eth_link link;
6671         uint32_t incval = 0;
6672         uint32_t shift = 0;
6673
6674         /* Get current link speed. */
6675         memset(&link, 0, sizeof(link));
6676         ixgbe_dev_link_update(dev, 1);
6677         rte_ixgbe_dev_atomic_read_link_status(dev, &link);
6678
6679         switch (link.link_speed) {
6680         case ETH_SPEED_NUM_100M:
6681                 incval = IXGBE_INCVAL_100;
6682                 shift = IXGBE_INCVAL_SHIFT_100;
6683                 break;
6684         case ETH_SPEED_NUM_1G:
6685                 incval = IXGBE_INCVAL_1GB;
6686                 shift = IXGBE_INCVAL_SHIFT_1GB;
6687                 break;
6688         case ETH_SPEED_NUM_10G:
6689         default:
6690                 incval = IXGBE_INCVAL_10GB;
6691                 shift = IXGBE_INCVAL_SHIFT_10GB;
6692                 break;
6693         }
6694
6695         switch (hw->mac.type) {
6696         case ixgbe_mac_X550:
6697         case ixgbe_mac_X550EM_x:
6698         case ixgbe_mac_X550EM_a:
6699                 /* Independent of link speed. */
6700                 incval = 1;
6701                 /* Cycles read will be interpreted as ns. */
6702                 shift = 0;
6703                 /* Fall-through */
6704         case ixgbe_mac_X540:
6705                 IXGBE_WRITE_REG(hw, IXGBE_TIMINCA, incval);
6706                 break;
6707         case ixgbe_mac_82599EB:
6708                 incval >>= IXGBE_INCVAL_SHIFT_82599;
6709                 shift -= IXGBE_INCVAL_SHIFT_82599;
6710                 IXGBE_WRITE_REG(hw, IXGBE_TIMINCA,
6711                                 (1 << IXGBE_INCPER_SHIFT_82599) | incval);
6712                 break;
6713         default:
6714                 /* Not supported. */
6715                 return;
6716         }
6717
6718         memset(&adapter->systime_tc, 0, sizeof(struct rte_timecounter));
6719         memset(&adapter->rx_tstamp_tc, 0, sizeof(struct rte_timecounter));
6720         memset(&adapter->tx_tstamp_tc, 0, sizeof(struct rte_timecounter));
6721
6722         adapter->systime_tc.cc_mask = IXGBE_CYCLECOUNTER_MASK;
6723         adapter->systime_tc.cc_shift = shift;
6724         adapter->systime_tc.nsec_mask = (1ULL << shift) - 1;
6725
6726         adapter->rx_tstamp_tc.cc_mask = IXGBE_CYCLECOUNTER_MASK;
6727         adapter->rx_tstamp_tc.cc_shift = shift;
6728         adapter->rx_tstamp_tc.nsec_mask = (1ULL << shift) - 1;
6729
6730         adapter->tx_tstamp_tc.cc_mask = IXGBE_CYCLECOUNTER_MASK;
6731         adapter->tx_tstamp_tc.cc_shift = shift;
6732         adapter->tx_tstamp_tc.nsec_mask = (1ULL << shift) - 1;
6733 }
6734
6735 static int
6736 ixgbe_timesync_adjust_time(struct rte_eth_dev *dev, int64_t delta)
6737 {
6738         struct ixgbe_adapter *adapter =
6739                         (struct ixgbe_adapter *)dev->data->dev_private;
6740
6741         adapter->systime_tc.nsec += delta;
6742         adapter->rx_tstamp_tc.nsec += delta;
6743         adapter->tx_tstamp_tc.nsec += delta;
6744
6745         return 0;
6746 }
6747
6748 static int
6749 ixgbe_timesync_write_time(struct rte_eth_dev *dev, const struct timespec *ts)
6750 {
6751         uint64_t ns;
6752         struct ixgbe_adapter *adapter =
6753                         (struct ixgbe_adapter *)dev->data->dev_private;
6754
6755         ns = rte_timespec_to_ns(ts);
6756         /* Set the timecounters to a new value. */
6757         adapter->systime_tc.nsec = ns;
6758         adapter->rx_tstamp_tc.nsec = ns;
6759         adapter->tx_tstamp_tc.nsec = ns;
6760
6761         return 0;
6762 }
6763
6764 static int
6765 ixgbe_timesync_read_time(struct rte_eth_dev *dev, struct timespec *ts)
6766 {
6767         uint64_t ns, systime_cycles;
6768         struct ixgbe_adapter *adapter =
6769                         (struct ixgbe_adapter *)dev->data->dev_private;
6770
6771         systime_cycles = ixgbe_read_systime_cyclecounter(dev);
6772         ns = rte_timecounter_update(&adapter->systime_tc, systime_cycles);
6773         *ts = rte_ns_to_timespec(ns);
6774
6775         return 0;
6776 }
6777
6778 static int
6779 ixgbe_timesync_enable(struct rte_eth_dev *dev)
6780 {
6781         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
6782         uint32_t tsync_ctl;
6783         uint32_t tsauxc;
6784
6785         /* Stop the timesync system time. */
6786         IXGBE_WRITE_REG(hw, IXGBE_TIMINCA, 0x0);
6787         /* Reset the timesync system time value. */
6788         IXGBE_WRITE_REG(hw, IXGBE_SYSTIML, 0x0);
6789         IXGBE_WRITE_REG(hw, IXGBE_SYSTIMH, 0x0);
6790
6791         /* Enable system time for platforms where it isn't on by default. */
6792         tsauxc = IXGBE_READ_REG(hw, IXGBE_TSAUXC);
6793         tsauxc &= ~IXGBE_TSAUXC_DISABLE_SYSTIME;
6794         IXGBE_WRITE_REG(hw, IXGBE_TSAUXC, tsauxc);
6795
6796         ixgbe_start_timecounters(dev);
6797
6798         /* Enable L2 filtering of IEEE1588/802.1AS Ethernet frame types. */
6799         IXGBE_WRITE_REG(hw, IXGBE_ETQF(IXGBE_ETQF_FILTER_1588),
6800                         (ETHER_TYPE_1588 |
6801                          IXGBE_ETQF_FILTER_EN |
6802                          IXGBE_ETQF_1588));
6803
6804         /* Enable timestamping of received PTP packets. */
6805         tsync_ctl = IXGBE_READ_REG(hw, IXGBE_TSYNCRXCTL);
6806         tsync_ctl |= IXGBE_TSYNCRXCTL_ENABLED;
6807         IXGBE_WRITE_REG(hw, IXGBE_TSYNCRXCTL, tsync_ctl);
6808
6809         /* Enable timestamping of transmitted PTP packets. */
6810         tsync_ctl = IXGBE_READ_REG(hw, IXGBE_TSYNCTXCTL);
6811         tsync_ctl |= IXGBE_TSYNCTXCTL_ENABLED;
6812         IXGBE_WRITE_REG(hw, IXGBE_TSYNCTXCTL, tsync_ctl);
6813
6814         IXGBE_WRITE_FLUSH(hw);
6815
6816         return 0;
6817 }
6818
6819 static int
6820 ixgbe_timesync_disable(struct rte_eth_dev *dev)
6821 {
6822         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
6823         uint32_t tsync_ctl;
6824
6825         /* Disable timestamping of transmitted PTP packets. */
6826         tsync_ctl = IXGBE_READ_REG(hw, IXGBE_TSYNCTXCTL);
6827         tsync_ctl &= ~IXGBE_TSYNCTXCTL_ENABLED;
6828         IXGBE_WRITE_REG(hw, IXGBE_TSYNCTXCTL, tsync_ctl);
6829
6830         /* Disable timestamping of received PTP packets. */
6831         tsync_ctl = IXGBE_READ_REG(hw, IXGBE_TSYNCRXCTL);
6832         tsync_ctl &= ~IXGBE_TSYNCRXCTL_ENABLED;
6833         IXGBE_WRITE_REG(hw, IXGBE_TSYNCRXCTL, tsync_ctl);
6834
6835         /* Disable L2 filtering of IEEE1588/802.1AS Ethernet frame types. */
6836         IXGBE_WRITE_REG(hw, IXGBE_ETQF(IXGBE_ETQF_FILTER_1588), 0);
6837
6838         /* Stop incrementating the System Time registers. */
6839         IXGBE_WRITE_REG(hw, IXGBE_TIMINCA, 0);
6840
6841         return 0;
6842 }
6843
6844 static int
6845 ixgbe_timesync_read_rx_timestamp(struct rte_eth_dev *dev,
6846                                  struct timespec *timestamp,
6847                                  uint32_t flags __rte_unused)
6848 {
6849         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
6850         struct ixgbe_adapter *adapter =
6851                 (struct ixgbe_adapter *)dev->data->dev_private;
6852         uint32_t tsync_rxctl;
6853         uint64_t rx_tstamp_cycles;
6854         uint64_t ns;
6855
6856         tsync_rxctl = IXGBE_READ_REG(hw, IXGBE_TSYNCRXCTL);
6857         if ((tsync_rxctl & IXGBE_TSYNCRXCTL_VALID) == 0)
6858                 return -EINVAL;
6859
6860         rx_tstamp_cycles = ixgbe_read_rx_tstamp_cyclecounter(dev);
6861         ns = rte_timecounter_update(&adapter->rx_tstamp_tc, rx_tstamp_cycles);
6862         *timestamp = rte_ns_to_timespec(ns);
6863
6864         return  0;
6865 }
6866
6867 static int
6868 ixgbe_timesync_read_tx_timestamp(struct rte_eth_dev *dev,
6869                                  struct timespec *timestamp)
6870 {
6871         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
6872         struct ixgbe_adapter *adapter =
6873                 (struct ixgbe_adapter *)dev->data->dev_private;
6874         uint32_t tsync_txctl;
6875         uint64_t tx_tstamp_cycles;
6876         uint64_t ns;
6877
6878         tsync_txctl = IXGBE_READ_REG(hw, IXGBE_TSYNCTXCTL);
6879         if ((tsync_txctl & IXGBE_TSYNCTXCTL_VALID) == 0)
6880                 return -EINVAL;
6881
6882         tx_tstamp_cycles = ixgbe_read_tx_tstamp_cyclecounter(dev);
6883         ns = rte_timecounter_update(&adapter->tx_tstamp_tc, tx_tstamp_cycles);
6884         *timestamp = rte_ns_to_timespec(ns);
6885
6886         return 0;
6887 }
6888
6889 static int
6890 ixgbe_get_reg_length(struct rte_eth_dev *dev)
6891 {
6892         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
6893         int count = 0;
6894         int g_ind = 0;
6895         const struct reg_info *reg_group;
6896         const struct reg_info **reg_set = (hw->mac.type == ixgbe_mac_82598EB) ?
6897                                     ixgbe_regs_mac_82598EB : ixgbe_regs_others;
6898
6899         while ((reg_group = reg_set[g_ind++]))
6900                 count += ixgbe_regs_group_count(reg_group);
6901
6902         return count;
6903 }
6904
6905 static int
6906 ixgbevf_get_reg_length(struct rte_eth_dev *dev __rte_unused)
6907 {
6908         int count = 0;
6909         int g_ind = 0;
6910         const struct reg_info *reg_group;
6911
6912         while ((reg_group = ixgbevf_regs[g_ind++]))
6913                 count += ixgbe_regs_group_count(reg_group);
6914
6915         return count;
6916 }
6917
6918 static int
6919 ixgbe_get_regs(struct rte_eth_dev *dev,
6920               struct rte_dev_reg_info *regs)
6921 {
6922         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
6923         uint32_t *data = regs->data;
6924         int g_ind = 0;
6925         int count = 0;
6926         const struct reg_info *reg_group;
6927         const struct reg_info **reg_set = (hw->mac.type == ixgbe_mac_82598EB) ?
6928                                     ixgbe_regs_mac_82598EB : ixgbe_regs_others;
6929
6930         if (data == NULL) {
6931                 regs->length = ixgbe_get_reg_length(dev);
6932                 regs->width = sizeof(uint32_t);
6933                 return 0;
6934         }
6935
6936         /* Support only full register dump */
6937         if ((regs->length == 0) ||
6938             (regs->length == (uint32_t)ixgbe_get_reg_length(dev))) {
6939                 regs->version = hw->mac.type << 24 | hw->revision_id << 16 |
6940                         hw->device_id;
6941                 while ((reg_group = reg_set[g_ind++]))
6942                         count += ixgbe_read_regs_group(dev, &data[count],
6943                                 reg_group);
6944                 return 0;
6945         }
6946
6947         return -ENOTSUP;
6948 }
6949
6950 static int
6951 ixgbevf_get_regs(struct rte_eth_dev *dev,
6952                 struct rte_dev_reg_info *regs)
6953 {
6954         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
6955         uint32_t *data = regs->data;
6956         int g_ind = 0;
6957         int count = 0;
6958         const struct reg_info *reg_group;
6959
6960         if (data == NULL) {
6961                 regs->length = ixgbevf_get_reg_length(dev);
6962                 regs->width = sizeof(uint32_t);
6963                 return 0;
6964         }
6965
6966         /* Support only full register dump */
6967         if ((regs->length == 0) ||
6968             (regs->length == (uint32_t)ixgbevf_get_reg_length(dev))) {
6969                 regs->version = hw->mac.type << 24 | hw->revision_id << 16 |
6970                         hw->device_id;
6971                 while ((reg_group = ixgbevf_regs[g_ind++]))
6972                         count += ixgbe_read_regs_group(dev, &data[count],
6973                                                       reg_group);
6974                 return 0;
6975         }
6976
6977         return -ENOTSUP;
6978 }
6979
6980 static int
6981 ixgbe_get_eeprom_length(struct rte_eth_dev *dev)
6982 {
6983         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
6984
6985         /* Return unit is byte count */
6986         return hw->eeprom.word_size * 2;
6987 }
6988
6989 static int
6990 ixgbe_get_eeprom(struct rte_eth_dev *dev,
6991                 struct rte_dev_eeprom_info *in_eeprom)
6992 {
6993         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
6994         struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
6995         uint16_t *data = in_eeprom->data;
6996         int first, length;
6997
6998         first = in_eeprom->offset >> 1;
6999         length = in_eeprom->length >> 1;
7000         if ((first > hw->eeprom.word_size) ||
7001             ((first + length) > hw->eeprom.word_size))
7002                 return -EINVAL;
7003
7004         in_eeprom->magic = hw->vendor_id | (hw->device_id << 16);
7005
7006         return eeprom->ops.read_buffer(hw, first, length, data);
7007 }
7008
7009 static int
7010 ixgbe_set_eeprom(struct rte_eth_dev *dev,
7011                 struct rte_dev_eeprom_info *in_eeprom)
7012 {
7013         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
7014         struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
7015         uint16_t *data = in_eeprom->data;
7016         int first, length;
7017
7018         first = in_eeprom->offset >> 1;
7019         length = in_eeprom->length >> 1;
7020         if ((first > hw->eeprom.word_size) ||
7021             ((first + length) > hw->eeprom.word_size))
7022                 return -EINVAL;
7023
7024         in_eeprom->magic = hw->vendor_id | (hw->device_id << 16);
7025
7026         return eeprom->ops.write_buffer(hw,  first, length, data);
7027 }
7028
7029 uint16_t
7030 ixgbe_reta_size_get(enum ixgbe_mac_type mac_type) {
7031         switch (mac_type) {
7032         case ixgbe_mac_X550:
7033         case ixgbe_mac_X550EM_x:
7034         case ixgbe_mac_X550EM_a:
7035                 return ETH_RSS_RETA_SIZE_512;
7036         case ixgbe_mac_X550_vf:
7037         case ixgbe_mac_X550EM_x_vf:
7038         case ixgbe_mac_X550EM_a_vf:
7039                 return ETH_RSS_RETA_SIZE_64;
7040         default:
7041                 return ETH_RSS_RETA_SIZE_128;
7042         }
7043 }
7044
7045 uint32_t
7046 ixgbe_reta_reg_get(enum ixgbe_mac_type mac_type, uint16_t reta_idx) {
7047         switch (mac_type) {
7048         case ixgbe_mac_X550:
7049         case ixgbe_mac_X550EM_x:
7050         case ixgbe_mac_X550EM_a:
7051                 if (reta_idx < ETH_RSS_RETA_SIZE_128)
7052                         return IXGBE_RETA(reta_idx >> 2);
7053                 else
7054                         return IXGBE_ERETA((reta_idx - ETH_RSS_RETA_SIZE_128) >> 2);
7055         case ixgbe_mac_X550_vf:
7056         case ixgbe_mac_X550EM_x_vf:
7057         case ixgbe_mac_X550EM_a_vf:
7058                 return IXGBE_VFRETA(reta_idx >> 2);
7059         default:
7060                 return IXGBE_RETA(reta_idx >> 2);
7061         }
7062 }
7063
7064 uint32_t
7065 ixgbe_mrqc_reg_get(enum ixgbe_mac_type mac_type) {
7066         switch (mac_type) {
7067         case ixgbe_mac_X550_vf:
7068         case ixgbe_mac_X550EM_x_vf:
7069         case ixgbe_mac_X550EM_a_vf:
7070                 return IXGBE_VFMRQC;
7071         default:
7072                 return IXGBE_MRQC;
7073         }
7074 }
7075
7076 uint32_t
7077 ixgbe_rssrk_reg_get(enum ixgbe_mac_type mac_type, uint8_t i) {
7078         switch (mac_type) {
7079         case ixgbe_mac_X550_vf:
7080         case ixgbe_mac_X550EM_x_vf:
7081         case ixgbe_mac_X550EM_a_vf:
7082                 return IXGBE_VFRSSRK(i);
7083         default:
7084                 return IXGBE_RSSRK(i);
7085         }
7086 }
7087
7088 bool
7089 ixgbe_rss_update_sp(enum ixgbe_mac_type mac_type) {
7090         switch (mac_type) {
7091         case ixgbe_mac_82599_vf:
7092         case ixgbe_mac_X540_vf:
7093                 return 0;
7094         default:
7095                 return 1;
7096         }
7097 }
7098
7099 static int
7100 ixgbe_dev_get_dcb_info(struct rte_eth_dev *dev,
7101                         struct rte_eth_dcb_info *dcb_info)
7102 {
7103         struct ixgbe_dcb_config *dcb_config =
7104                         IXGBE_DEV_PRIVATE_TO_DCB_CFG(dev->data->dev_private);
7105         struct ixgbe_dcb_tc_config *tc;
7106         uint8_t i, j;
7107
7108         if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_DCB_FLAG)
7109                 dcb_info->nb_tcs = dcb_config->num_tcs.pg_tcs;
7110         else
7111                 dcb_info->nb_tcs = 1;
7112
7113         if (dcb_config->vt_mode) { /* vt is enabled*/
7114                 struct rte_eth_vmdq_dcb_conf *vmdq_rx_conf =
7115                                 &dev->data->dev_conf.rx_adv_conf.vmdq_dcb_conf;
7116                 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++)
7117                         dcb_info->prio_tc[i] = vmdq_rx_conf->dcb_tc[i];
7118                 for (i = 0; i < vmdq_rx_conf->nb_queue_pools; i++) {
7119                         for (j = 0; j < dcb_info->nb_tcs; j++) {
7120                                 dcb_info->tc_queue.tc_rxq[i][j].base =
7121                                                 i * dcb_info->nb_tcs + j;
7122                                 dcb_info->tc_queue.tc_rxq[i][j].nb_queue = 1;
7123                                 dcb_info->tc_queue.tc_txq[i][j].base =
7124                                                 i * dcb_info->nb_tcs + j;
7125                                 dcb_info->tc_queue.tc_txq[i][j].nb_queue = 1;
7126                         }
7127                 }
7128         } else { /* vt is disabled*/
7129                 struct rte_eth_dcb_rx_conf *rx_conf =
7130                                 &dev->data->dev_conf.rx_adv_conf.dcb_rx_conf;
7131                 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++)
7132                         dcb_info->prio_tc[i] = rx_conf->dcb_tc[i];
7133                 if (dcb_info->nb_tcs == ETH_4_TCS) {
7134                         for (i = 0; i < dcb_info->nb_tcs; i++) {
7135                                 dcb_info->tc_queue.tc_rxq[0][i].base = i * 32;
7136                                 dcb_info->tc_queue.tc_rxq[0][i].nb_queue = 16;
7137                         }
7138                         dcb_info->tc_queue.tc_txq[0][0].base = 0;
7139                         dcb_info->tc_queue.tc_txq[0][1].base = 64;
7140                         dcb_info->tc_queue.tc_txq[0][2].base = 96;
7141                         dcb_info->tc_queue.tc_txq[0][3].base = 112;
7142                         dcb_info->tc_queue.tc_txq[0][0].nb_queue = 64;
7143                         dcb_info->tc_queue.tc_txq[0][1].nb_queue = 32;
7144                         dcb_info->tc_queue.tc_txq[0][2].nb_queue = 16;
7145                         dcb_info->tc_queue.tc_txq[0][3].nb_queue = 16;
7146                 } else if (dcb_info->nb_tcs == ETH_8_TCS) {
7147                         for (i = 0; i < dcb_info->nb_tcs; i++) {
7148                                 dcb_info->tc_queue.tc_rxq[0][i].base = i * 16;
7149                                 dcb_info->tc_queue.tc_rxq[0][i].nb_queue = 16;
7150                         }
7151                         dcb_info->tc_queue.tc_txq[0][0].base = 0;
7152                         dcb_info->tc_queue.tc_txq[0][1].base = 32;
7153                         dcb_info->tc_queue.tc_txq[0][2].base = 64;
7154                         dcb_info->tc_queue.tc_txq[0][3].base = 80;
7155                         dcb_info->tc_queue.tc_txq[0][4].base = 96;
7156                         dcb_info->tc_queue.tc_txq[0][5].base = 104;
7157                         dcb_info->tc_queue.tc_txq[0][6].base = 112;
7158                         dcb_info->tc_queue.tc_txq[0][7].base = 120;
7159                         dcb_info->tc_queue.tc_txq[0][0].nb_queue = 32;
7160                         dcb_info->tc_queue.tc_txq[0][1].nb_queue = 32;
7161                         dcb_info->tc_queue.tc_txq[0][2].nb_queue = 16;
7162                         dcb_info->tc_queue.tc_txq[0][3].nb_queue = 16;
7163                         dcb_info->tc_queue.tc_txq[0][4].nb_queue = 8;
7164                         dcb_info->tc_queue.tc_txq[0][5].nb_queue = 8;
7165                         dcb_info->tc_queue.tc_txq[0][6].nb_queue = 8;
7166                         dcb_info->tc_queue.tc_txq[0][7].nb_queue = 8;
7167                 }
7168         }
7169         for (i = 0; i < dcb_info->nb_tcs; i++) {
7170                 tc = &dcb_config->tc_config[i];
7171                 dcb_info->tc_bws[i] = tc->path[IXGBE_DCB_TX_CONFIG].bwg_percent;
7172         }
7173         return 0;
7174 }
7175
7176 /* Update e-tag ether type */
7177 static int
7178 ixgbe_update_e_tag_eth_type(struct ixgbe_hw *hw,
7179                             uint16_t ether_type)
7180 {
7181         uint32_t etag_etype;
7182
7183         if (hw->mac.type != ixgbe_mac_X550 &&
7184             hw->mac.type != ixgbe_mac_X550EM_x &&
7185             hw->mac.type != ixgbe_mac_X550EM_a) {
7186                 return -ENOTSUP;
7187         }
7188
7189         etag_etype = IXGBE_READ_REG(hw, IXGBE_ETAG_ETYPE);
7190         etag_etype &= ~IXGBE_ETAG_ETYPE_MASK;
7191         etag_etype |= ether_type;
7192         IXGBE_WRITE_REG(hw, IXGBE_ETAG_ETYPE, etag_etype);
7193         IXGBE_WRITE_FLUSH(hw);
7194
7195         return 0;
7196 }
7197
7198 /* Config l2 tunnel ether type */
7199 static int
7200 ixgbe_dev_l2_tunnel_eth_type_conf(struct rte_eth_dev *dev,
7201                                   struct rte_eth_l2_tunnel_conf *l2_tunnel)
7202 {
7203         int ret = 0;
7204         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
7205         struct ixgbe_l2_tn_info *l2_tn_info =
7206                 IXGBE_DEV_PRIVATE_TO_L2_TN_INFO(dev->data->dev_private);
7207
7208         if (l2_tunnel == NULL)
7209                 return -EINVAL;
7210
7211         switch (l2_tunnel->l2_tunnel_type) {
7212         case RTE_L2_TUNNEL_TYPE_E_TAG:
7213                 l2_tn_info->e_tag_ether_type = l2_tunnel->ether_type;
7214                 ret = ixgbe_update_e_tag_eth_type(hw, l2_tunnel->ether_type);
7215                 break;
7216         default:
7217                 PMD_DRV_LOG(ERR, "Invalid tunnel type");
7218                 ret = -EINVAL;
7219                 break;
7220         }
7221
7222         return ret;
7223 }
7224
7225 /* Enable e-tag tunnel */
7226 static int
7227 ixgbe_e_tag_enable(struct ixgbe_hw *hw)
7228 {
7229         uint32_t etag_etype;
7230
7231         if (hw->mac.type != ixgbe_mac_X550 &&
7232             hw->mac.type != ixgbe_mac_X550EM_x &&
7233             hw->mac.type != ixgbe_mac_X550EM_a) {
7234                 return -ENOTSUP;
7235         }
7236
7237         etag_etype = IXGBE_READ_REG(hw, IXGBE_ETAG_ETYPE);
7238         etag_etype |= IXGBE_ETAG_ETYPE_VALID;
7239         IXGBE_WRITE_REG(hw, IXGBE_ETAG_ETYPE, etag_etype);
7240         IXGBE_WRITE_FLUSH(hw);
7241
7242         return 0;
7243 }
7244
7245 /* Enable l2 tunnel */
7246 static int
7247 ixgbe_dev_l2_tunnel_enable(struct rte_eth_dev *dev,
7248                            enum rte_eth_tunnel_type l2_tunnel_type)
7249 {
7250         int ret = 0;
7251         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
7252         struct ixgbe_l2_tn_info *l2_tn_info =
7253                 IXGBE_DEV_PRIVATE_TO_L2_TN_INFO(dev->data->dev_private);
7254
7255         switch (l2_tunnel_type) {
7256         case RTE_L2_TUNNEL_TYPE_E_TAG:
7257                 l2_tn_info->e_tag_en = TRUE;
7258                 ret = ixgbe_e_tag_enable(hw);
7259                 break;
7260         default:
7261                 PMD_DRV_LOG(ERR, "Invalid tunnel type");
7262                 ret = -EINVAL;
7263                 break;
7264         }
7265
7266         return ret;
7267 }
7268
7269 /* Disable e-tag tunnel */
7270 static int
7271 ixgbe_e_tag_disable(struct ixgbe_hw *hw)
7272 {
7273         uint32_t etag_etype;
7274
7275         if (hw->mac.type != ixgbe_mac_X550 &&
7276             hw->mac.type != ixgbe_mac_X550EM_x &&
7277             hw->mac.type != ixgbe_mac_X550EM_a) {
7278                 return -ENOTSUP;
7279         }
7280
7281         etag_etype = IXGBE_READ_REG(hw, IXGBE_ETAG_ETYPE);
7282         etag_etype &= ~IXGBE_ETAG_ETYPE_VALID;
7283         IXGBE_WRITE_REG(hw, IXGBE_ETAG_ETYPE, etag_etype);
7284         IXGBE_WRITE_FLUSH(hw);
7285
7286         return 0;
7287 }
7288
7289 /* Disable l2 tunnel */
7290 static int
7291 ixgbe_dev_l2_tunnel_disable(struct rte_eth_dev *dev,
7292                             enum rte_eth_tunnel_type l2_tunnel_type)
7293 {
7294         int ret = 0;
7295         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
7296         struct ixgbe_l2_tn_info *l2_tn_info =
7297                 IXGBE_DEV_PRIVATE_TO_L2_TN_INFO(dev->data->dev_private);
7298
7299         switch (l2_tunnel_type) {
7300         case RTE_L2_TUNNEL_TYPE_E_TAG:
7301                 l2_tn_info->e_tag_en = FALSE;
7302                 ret = ixgbe_e_tag_disable(hw);
7303                 break;
7304         default:
7305                 PMD_DRV_LOG(ERR, "Invalid tunnel type");
7306                 ret = -EINVAL;
7307                 break;
7308         }
7309
7310         return ret;
7311 }
7312
7313 static int
7314 ixgbe_e_tag_filter_del(struct rte_eth_dev *dev,
7315                        struct rte_eth_l2_tunnel_conf *l2_tunnel)
7316 {
7317         int ret = 0;
7318         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
7319         uint32_t i, rar_entries;
7320         uint32_t rar_low, rar_high;
7321
7322         if (hw->mac.type != ixgbe_mac_X550 &&
7323             hw->mac.type != ixgbe_mac_X550EM_x &&
7324             hw->mac.type != ixgbe_mac_X550EM_a) {
7325                 return -ENOTSUP;
7326         }
7327
7328         rar_entries = ixgbe_get_num_rx_addrs(hw);
7329
7330         for (i = 1; i < rar_entries; i++) {
7331                 rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(i));
7332                 rar_low  = IXGBE_READ_REG(hw, IXGBE_RAL(i));
7333                 if ((rar_high & IXGBE_RAH_AV) &&
7334                     (rar_high & IXGBE_RAH_ADTYPE) &&
7335                     ((rar_low & IXGBE_RAL_ETAG_FILTER_MASK) ==
7336                      l2_tunnel->tunnel_id)) {
7337                         IXGBE_WRITE_REG(hw, IXGBE_RAL(i), 0);
7338                         IXGBE_WRITE_REG(hw, IXGBE_RAH(i), 0);
7339
7340                         ixgbe_clear_vmdq(hw, i, IXGBE_CLEAR_VMDQ_ALL);
7341
7342                         return ret;
7343                 }
7344         }
7345
7346         return ret;
7347 }
7348
7349 static int
7350 ixgbe_e_tag_filter_add(struct rte_eth_dev *dev,
7351                        struct rte_eth_l2_tunnel_conf *l2_tunnel)
7352 {
7353         int ret = 0;
7354         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
7355         uint32_t i, rar_entries;
7356         uint32_t rar_low, rar_high;
7357
7358         if (hw->mac.type != ixgbe_mac_X550 &&
7359             hw->mac.type != ixgbe_mac_X550EM_x &&
7360             hw->mac.type != ixgbe_mac_X550EM_a) {
7361                 return -ENOTSUP;
7362         }
7363
7364         /* One entry for one tunnel. Try to remove potential existing entry. */
7365         ixgbe_e_tag_filter_del(dev, l2_tunnel);
7366
7367         rar_entries = ixgbe_get_num_rx_addrs(hw);
7368
7369         for (i = 1; i < rar_entries; i++) {
7370                 rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(i));
7371                 if (rar_high & IXGBE_RAH_AV) {
7372                         continue;
7373                 } else {
7374                         ixgbe_set_vmdq(hw, i, l2_tunnel->pool);
7375                         rar_high = IXGBE_RAH_AV | IXGBE_RAH_ADTYPE;
7376                         rar_low = l2_tunnel->tunnel_id;
7377
7378                         IXGBE_WRITE_REG(hw, IXGBE_RAL(i), rar_low);
7379                         IXGBE_WRITE_REG(hw, IXGBE_RAH(i), rar_high);
7380
7381                         return ret;
7382                 }
7383         }
7384
7385         PMD_INIT_LOG(NOTICE, "The table of E-tag forwarding rule is full."
7386                      " Please remove a rule before adding a new one.");
7387         return -EINVAL;
7388 }
7389
7390 static inline struct ixgbe_l2_tn_filter *
7391 ixgbe_l2_tn_filter_lookup(struct ixgbe_l2_tn_info *l2_tn_info,
7392                           struct ixgbe_l2_tn_key *key)
7393 {
7394         int ret;
7395
7396         ret = rte_hash_lookup(l2_tn_info->hash_handle, (const void *)key);
7397         if (ret < 0)
7398                 return NULL;
7399
7400         return l2_tn_info->hash_map[ret];
7401 }
7402
7403 static inline int
7404 ixgbe_insert_l2_tn_filter(struct ixgbe_l2_tn_info *l2_tn_info,
7405                           struct ixgbe_l2_tn_filter *l2_tn_filter)
7406 {
7407         int ret;
7408
7409         ret = rte_hash_add_key(l2_tn_info->hash_handle,
7410                                &l2_tn_filter->key);
7411
7412         if (ret < 0) {
7413                 PMD_DRV_LOG(ERR,
7414                             "Failed to insert L2 tunnel filter"
7415                             " to hash table %d!",
7416                             ret);
7417                 return ret;
7418         }
7419
7420         l2_tn_info->hash_map[ret] = l2_tn_filter;
7421
7422         TAILQ_INSERT_TAIL(&l2_tn_info->l2_tn_list, l2_tn_filter, entries);
7423
7424         return 0;
7425 }
7426
7427 static inline int
7428 ixgbe_remove_l2_tn_filter(struct ixgbe_l2_tn_info *l2_tn_info,
7429                           struct ixgbe_l2_tn_key *key)
7430 {
7431         int ret;
7432         struct ixgbe_l2_tn_filter *l2_tn_filter;
7433
7434         ret = rte_hash_del_key(l2_tn_info->hash_handle, key);
7435
7436         if (ret < 0) {
7437                 PMD_DRV_LOG(ERR,
7438                             "No such L2 tunnel filter to delete %d!",
7439                             ret);
7440                 return ret;
7441         }
7442
7443         l2_tn_filter = l2_tn_info->hash_map[ret];
7444         l2_tn_info->hash_map[ret] = NULL;
7445
7446         TAILQ_REMOVE(&l2_tn_info->l2_tn_list, l2_tn_filter, entries);
7447         rte_free(l2_tn_filter);
7448
7449         return 0;
7450 }
7451
7452 /* Add l2 tunnel filter */
7453 int
7454 ixgbe_dev_l2_tunnel_filter_add(struct rte_eth_dev *dev,
7455                                struct rte_eth_l2_tunnel_conf *l2_tunnel,
7456                                bool restore)
7457 {
7458         int ret;
7459         struct ixgbe_l2_tn_info *l2_tn_info =
7460                 IXGBE_DEV_PRIVATE_TO_L2_TN_INFO(dev->data->dev_private);
7461         struct ixgbe_l2_tn_key key;
7462         struct ixgbe_l2_tn_filter *node;
7463
7464         if (!restore) {
7465                 key.l2_tn_type = l2_tunnel->l2_tunnel_type;
7466                 key.tn_id = l2_tunnel->tunnel_id;
7467
7468                 node = ixgbe_l2_tn_filter_lookup(l2_tn_info, &key);
7469
7470                 if (node) {
7471                         PMD_DRV_LOG(ERR,
7472                                     "The L2 tunnel filter already exists!");
7473                         return -EINVAL;
7474                 }
7475
7476                 node = rte_zmalloc("ixgbe_l2_tn",
7477                                    sizeof(struct ixgbe_l2_tn_filter),
7478                                    0);
7479                 if (!node)
7480                         return -ENOMEM;
7481
7482                 (void)rte_memcpy(&node->key,
7483                                  &key,
7484                                  sizeof(struct ixgbe_l2_tn_key));
7485                 node->pool = l2_tunnel->pool;
7486                 ret = ixgbe_insert_l2_tn_filter(l2_tn_info, node);
7487                 if (ret < 0) {
7488                         rte_free(node);
7489                         return ret;
7490                 }
7491         }
7492
7493         switch (l2_tunnel->l2_tunnel_type) {
7494         case RTE_L2_TUNNEL_TYPE_E_TAG:
7495                 ret = ixgbe_e_tag_filter_add(dev, l2_tunnel);
7496                 break;
7497         default:
7498                 PMD_DRV_LOG(ERR, "Invalid tunnel type");
7499                 ret = -EINVAL;
7500                 break;
7501         }
7502
7503         if ((!restore) && (ret < 0))
7504                 (void)ixgbe_remove_l2_tn_filter(l2_tn_info, &key);
7505
7506         return ret;
7507 }
7508
7509 /* Delete l2 tunnel filter */
7510 int
7511 ixgbe_dev_l2_tunnel_filter_del(struct rte_eth_dev *dev,
7512                                struct rte_eth_l2_tunnel_conf *l2_tunnel)
7513 {
7514         int ret;
7515         struct ixgbe_l2_tn_info *l2_tn_info =
7516                 IXGBE_DEV_PRIVATE_TO_L2_TN_INFO(dev->data->dev_private);
7517         struct ixgbe_l2_tn_key key;
7518
7519         key.l2_tn_type = l2_tunnel->l2_tunnel_type;
7520         key.tn_id = l2_tunnel->tunnel_id;
7521         ret = ixgbe_remove_l2_tn_filter(l2_tn_info, &key);
7522         if (ret < 0)
7523                 return ret;
7524
7525         switch (l2_tunnel->l2_tunnel_type) {
7526         case RTE_L2_TUNNEL_TYPE_E_TAG:
7527                 ret = ixgbe_e_tag_filter_del(dev, l2_tunnel);
7528                 break;
7529         default:
7530                 PMD_DRV_LOG(ERR, "Invalid tunnel type");
7531                 ret = -EINVAL;
7532                 break;
7533         }
7534
7535         return ret;
7536 }
7537
7538 /**
7539  * ixgbe_dev_l2_tunnel_filter_handle - Handle operations for l2 tunnel filter.
7540  * @dev: pointer to rte_eth_dev structure
7541  * @filter_op:operation will be taken.
7542  * @arg: a pointer to specific structure corresponding to the filter_op
7543  */
7544 static int
7545 ixgbe_dev_l2_tunnel_filter_handle(struct rte_eth_dev *dev,
7546                                   enum rte_filter_op filter_op,
7547                                   void *arg)
7548 {
7549         int ret;
7550
7551         if (filter_op == RTE_ETH_FILTER_NOP)
7552                 return 0;
7553
7554         if (arg == NULL) {
7555                 PMD_DRV_LOG(ERR, "arg shouldn't be NULL for operation %u.",
7556                             filter_op);
7557                 return -EINVAL;
7558         }
7559
7560         switch (filter_op) {
7561         case RTE_ETH_FILTER_ADD:
7562                 ret = ixgbe_dev_l2_tunnel_filter_add
7563                         (dev,
7564                          (struct rte_eth_l2_tunnel_conf *)arg,
7565                          FALSE);
7566                 break;
7567         case RTE_ETH_FILTER_DELETE:
7568                 ret = ixgbe_dev_l2_tunnel_filter_del
7569                         (dev,
7570                          (struct rte_eth_l2_tunnel_conf *)arg);
7571                 break;
7572         default:
7573                 PMD_DRV_LOG(ERR, "unsupported operation %u.", filter_op);
7574                 ret = -EINVAL;
7575                 break;
7576         }
7577         return ret;
7578 }
7579
7580 static int
7581 ixgbe_e_tag_forwarding_en_dis(struct rte_eth_dev *dev, bool en)
7582 {
7583         int ret = 0;
7584         uint32_t ctrl;
7585         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
7586
7587         if (hw->mac.type != ixgbe_mac_X550 &&
7588             hw->mac.type != ixgbe_mac_X550EM_x &&
7589             hw->mac.type != ixgbe_mac_X550EM_a) {
7590                 return -ENOTSUP;
7591         }
7592
7593         ctrl = IXGBE_READ_REG(hw, IXGBE_VT_CTL);
7594         ctrl &= ~IXGBE_VT_CTL_POOLING_MODE_MASK;
7595         if (en)
7596                 ctrl |= IXGBE_VT_CTL_POOLING_MODE_ETAG;
7597         IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, ctrl);
7598
7599         return ret;
7600 }
7601
7602 /* Enable l2 tunnel forwarding */
7603 static int
7604 ixgbe_dev_l2_tunnel_forwarding_enable
7605         (struct rte_eth_dev *dev,
7606          enum rte_eth_tunnel_type l2_tunnel_type)
7607 {
7608         struct ixgbe_l2_tn_info *l2_tn_info =
7609                 IXGBE_DEV_PRIVATE_TO_L2_TN_INFO(dev->data->dev_private);
7610         int ret = 0;
7611
7612         switch (l2_tunnel_type) {
7613         case RTE_L2_TUNNEL_TYPE_E_TAG:
7614                 l2_tn_info->e_tag_fwd_en = TRUE;
7615                 ret = ixgbe_e_tag_forwarding_en_dis(dev, 1);
7616                 break;
7617         default:
7618                 PMD_DRV_LOG(ERR, "Invalid tunnel type");
7619                 ret = -EINVAL;
7620                 break;
7621         }
7622
7623         return ret;
7624 }
7625
7626 /* Disable l2 tunnel forwarding */
7627 static int
7628 ixgbe_dev_l2_tunnel_forwarding_disable
7629         (struct rte_eth_dev *dev,
7630          enum rte_eth_tunnel_type l2_tunnel_type)
7631 {
7632         struct ixgbe_l2_tn_info *l2_tn_info =
7633                 IXGBE_DEV_PRIVATE_TO_L2_TN_INFO(dev->data->dev_private);
7634         int ret = 0;
7635
7636         switch (l2_tunnel_type) {
7637         case RTE_L2_TUNNEL_TYPE_E_TAG:
7638                 l2_tn_info->e_tag_fwd_en = FALSE;
7639                 ret = ixgbe_e_tag_forwarding_en_dis(dev, 0);
7640                 break;
7641         default:
7642                 PMD_DRV_LOG(ERR, "Invalid tunnel type");
7643                 ret = -EINVAL;
7644                 break;
7645         }
7646
7647         return ret;
7648 }
7649
7650 static int
7651 ixgbe_e_tag_insertion_en_dis(struct rte_eth_dev *dev,
7652                              struct rte_eth_l2_tunnel_conf *l2_tunnel,
7653                              bool en)
7654 {
7655         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
7656         int ret = 0;
7657         uint32_t vmtir, vmvir;
7658         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
7659
7660         if (l2_tunnel->vf_id >= pci_dev->max_vfs) {
7661                 PMD_DRV_LOG(ERR,
7662                             "VF id %u should be less than %u",
7663                             l2_tunnel->vf_id,
7664                             pci_dev->max_vfs);
7665                 return -EINVAL;
7666         }
7667
7668         if (hw->mac.type != ixgbe_mac_X550 &&
7669             hw->mac.type != ixgbe_mac_X550EM_x &&
7670             hw->mac.type != ixgbe_mac_X550EM_a) {
7671                 return -ENOTSUP;
7672         }
7673
7674         if (en)
7675                 vmtir = l2_tunnel->tunnel_id;
7676         else
7677                 vmtir = 0;
7678
7679         IXGBE_WRITE_REG(hw, IXGBE_VMTIR(l2_tunnel->vf_id), vmtir);
7680
7681         vmvir = IXGBE_READ_REG(hw, IXGBE_VMVIR(l2_tunnel->vf_id));
7682         vmvir &= ~IXGBE_VMVIR_TAGA_MASK;
7683         if (en)
7684                 vmvir |= IXGBE_VMVIR_TAGA_ETAG_INSERT;
7685         IXGBE_WRITE_REG(hw, IXGBE_VMVIR(l2_tunnel->vf_id), vmvir);
7686
7687         return ret;
7688 }
7689
7690 /* Enable l2 tunnel tag insertion */
7691 static int
7692 ixgbe_dev_l2_tunnel_insertion_enable(struct rte_eth_dev *dev,
7693                                      struct rte_eth_l2_tunnel_conf *l2_tunnel)
7694 {
7695         int ret = 0;
7696
7697         switch (l2_tunnel->l2_tunnel_type) {
7698         case RTE_L2_TUNNEL_TYPE_E_TAG:
7699                 ret = ixgbe_e_tag_insertion_en_dis(dev, l2_tunnel, 1);
7700                 break;
7701         default:
7702                 PMD_DRV_LOG(ERR, "Invalid tunnel type");
7703                 ret = -EINVAL;
7704                 break;
7705         }
7706
7707         return ret;
7708 }
7709
7710 /* Disable l2 tunnel tag insertion */
7711 static int
7712 ixgbe_dev_l2_tunnel_insertion_disable
7713         (struct rte_eth_dev *dev,
7714          struct rte_eth_l2_tunnel_conf *l2_tunnel)
7715 {
7716         int ret = 0;
7717
7718         switch (l2_tunnel->l2_tunnel_type) {
7719         case RTE_L2_TUNNEL_TYPE_E_TAG:
7720                 ret = ixgbe_e_tag_insertion_en_dis(dev, l2_tunnel, 0);
7721                 break;
7722         default:
7723                 PMD_DRV_LOG(ERR, "Invalid tunnel type");
7724                 ret = -EINVAL;
7725                 break;
7726         }
7727
7728         return ret;
7729 }
7730
7731 static int
7732 ixgbe_e_tag_stripping_en_dis(struct rte_eth_dev *dev,
7733                              bool en)
7734 {
7735         int ret = 0;
7736         uint32_t qde;
7737         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
7738
7739         if (hw->mac.type != ixgbe_mac_X550 &&
7740             hw->mac.type != ixgbe_mac_X550EM_x &&
7741             hw->mac.type != ixgbe_mac_X550EM_a) {
7742                 return -ENOTSUP;
7743         }
7744
7745         qde = IXGBE_READ_REG(hw, IXGBE_QDE);
7746         if (en)
7747                 qde |= IXGBE_QDE_STRIP_TAG;
7748         else
7749                 qde &= ~IXGBE_QDE_STRIP_TAG;
7750         qde &= ~IXGBE_QDE_READ;
7751         qde |= IXGBE_QDE_WRITE;
7752         IXGBE_WRITE_REG(hw, IXGBE_QDE, qde);
7753
7754         return ret;
7755 }
7756
7757 /* Enable l2 tunnel tag stripping */
7758 static int
7759 ixgbe_dev_l2_tunnel_stripping_enable
7760         (struct rte_eth_dev *dev,
7761          enum rte_eth_tunnel_type l2_tunnel_type)
7762 {
7763         int ret = 0;
7764
7765         switch (l2_tunnel_type) {
7766         case RTE_L2_TUNNEL_TYPE_E_TAG:
7767                 ret = ixgbe_e_tag_stripping_en_dis(dev, 1);
7768                 break;
7769         default:
7770                 PMD_DRV_LOG(ERR, "Invalid tunnel type");
7771                 ret = -EINVAL;
7772                 break;
7773         }
7774
7775         return ret;
7776 }
7777
7778 /* Disable l2 tunnel tag stripping */
7779 static int
7780 ixgbe_dev_l2_tunnel_stripping_disable
7781         (struct rte_eth_dev *dev,
7782          enum rte_eth_tunnel_type l2_tunnel_type)
7783 {
7784         int ret = 0;
7785
7786         switch (l2_tunnel_type) {
7787         case RTE_L2_TUNNEL_TYPE_E_TAG:
7788                 ret = ixgbe_e_tag_stripping_en_dis(dev, 0);
7789                 break;
7790         default:
7791                 PMD_DRV_LOG(ERR, "Invalid tunnel type");
7792                 ret = -EINVAL;
7793                 break;
7794         }
7795
7796         return ret;
7797 }
7798
7799 /* Enable/disable l2 tunnel offload functions */
7800 static int
7801 ixgbe_dev_l2_tunnel_offload_set
7802         (struct rte_eth_dev *dev,
7803          struct rte_eth_l2_tunnel_conf *l2_tunnel,
7804          uint32_t mask,
7805          uint8_t en)
7806 {
7807         int ret = 0;
7808
7809         if (l2_tunnel == NULL)
7810                 return -EINVAL;
7811
7812         ret = -EINVAL;
7813         if (mask & ETH_L2_TUNNEL_ENABLE_MASK) {
7814                 if (en)
7815                         ret = ixgbe_dev_l2_tunnel_enable(
7816                                 dev,
7817                                 l2_tunnel->l2_tunnel_type);
7818                 else
7819                         ret = ixgbe_dev_l2_tunnel_disable(
7820                                 dev,
7821                                 l2_tunnel->l2_tunnel_type);
7822         }
7823
7824         if (mask & ETH_L2_TUNNEL_INSERTION_MASK) {
7825                 if (en)
7826                         ret = ixgbe_dev_l2_tunnel_insertion_enable(
7827                                 dev,
7828                                 l2_tunnel);
7829                 else
7830                         ret = ixgbe_dev_l2_tunnel_insertion_disable(
7831                                 dev,
7832                                 l2_tunnel);
7833         }
7834
7835         if (mask & ETH_L2_TUNNEL_STRIPPING_MASK) {
7836                 if (en)
7837                         ret = ixgbe_dev_l2_tunnel_stripping_enable(
7838                                 dev,
7839                                 l2_tunnel->l2_tunnel_type);
7840                 else
7841                         ret = ixgbe_dev_l2_tunnel_stripping_disable(
7842                                 dev,
7843                                 l2_tunnel->l2_tunnel_type);
7844         }
7845
7846         if (mask & ETH_L2_TUNNEL_FORWARDING_MASK) {
7847                 if (en)
7848                         ret = ixgbe_dev_l2_tunnel_forwarding_enable(
7849                                 dev,
7850                                 l2_tunnel->l2_tunnel_type);
7851                 else
7852                         ret = ixgbe_dev_l2_tunnel_forwarding_disable(
7853                                 dev,
7854                                 l2_tunnel->l2_tunnel_type);
7855         }
7856
7857         return ret;
7858 }
7859
7860 static int
7861 ixgbe_update_vxlan_port(struct ixgbe_hw *hw,
7862                         uint16_t port)
7863 {
7864         IXGBE_WRITE_REG(hw, IXGBE_VXLANCTRL, port);
7865         IXGBE_WRITE_FLUSH(hw);
7866
7867         return 0;
7868 }
7869
7870 /* There's only one register for VxLAN UDP port.
7871  * So, we cannot add several ports. Will update it.
7872  */
7873 static int
7874 ixgbe_add_vxlan_port(struct ixgbe_hw *hw,
7875                      uint16_t port)
7876 {
7877         if (port == 0) {
7878                 PMD_DRV_LOG(ERR, "Add VxLAN port 0 is not allowed.");
7879                 return -EINVAL;
7880         }
7881
7882         return ixgbe_update_vxlan_port(hw, port);
7883 }
7884
7885 /* We cannot delete the VxLAN port. For there's a register for VxLAN
7886  * UDP port, it must have a value.
7887  * So, will reset it to the original value 0.
7888  */
7889 static int
7890 ixgbe_del_vxlan_port(struct ixgbe_hw *hw,
7891                      uint16_t port)
7892 {
7893         uint16_t cur_port;
7894
7895         cur_port = (uint16_t)IXGBE_READ_REG(hw, IXGBE_VXLANCTRL);
7896
7897         if (cur_port != port) {
7898                 PMD_DRV_LOG(ERR, "Port %u does not exist.", port);
7899                 return -EINVAL;
7900         }
7901
7902         return ixgbe_update_vxlan_port(hw, 0);
7903 }
7904
7905 /* Add UDP tunneling port */
7906 static int
7907 ixgbe_dev_udp_tunnel_port_add(struct rte_eth_dev *dev,
7908                               struct rte_eth_udp_tunnel *udp_tunnel)
7909 {
7910         int ret = 0;
7911         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
7912
7913         if (hw->mac.type != ixgbe_mac_X550 &&
7914             hw->mac.type != ixgbe_mac_X550EM_x &&
7915             hw->mac.type != ixgbe_mac_X550EM_a) {
7916                 return -ENOTSUP;
7917         }
7918
7919         if (udp_tunnel == NULL)
7920                 return -EINVAL;
7921
7922         switch (udp_tunnel->prot_type) {
7923         case RTE_TUNNEL_TYPE_VXLAN:
7924                 ret = ixgbe_add_vxlan_port(hw, udp_tunnel->udp_port);
7925                 break;
7926
7927         case RTE_TUNNEL_TYPE_GENEVE:
7928         case RTE_TUNNEL_TYPE_TEREDO:
7929                 PMD_DRV_LOG(ERR, "Tunnel type is not supported now.");
7930                 ret = -EINVAL;
7931                 break;
7932
7933         default:
7934                 PMD_DRV_LOG(ERR, "Invalid tunnel type");
7935                 ret = -EINVAL;
7936                 break;
7937         }
7938
7939         return ret;
7940 }
7941
7942 /* Remove UDP tunneling port */
7943 static int
7944 ixgbe_dev_udp_tunnel_port_del(struct rte_eth_dev *dev,
7945                               struct rte_eth_udp_tunnel *udp_tunnel)
7946 {
7947         int ret = 0;
7948         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
7949
7950         if (hw->mac.type != ixgbe_mac_X550 &&
7951             hw->mac.type != ixgbe_mac_X550EM_x &&
7952             hw->mac.type != ixgbe_mac_X550EM_a) {
7953                 return -ENOTSUP;
7954         }
7955
7956         if (udp_tunnel == NULL)
7957                 return -EINVAL;
7958
7959         switch (udp_tunnel->prot_type) {
7960         case RTE_TUNNEL_TYPE_VXLAN:
7961                 ret = ixgbe_del_vxlan_port(hw, udp_tunnel->udp_port);
7962                 break;
7963         case RTE_TUNNEL_TYPE_GENEVE:
7964         case RTE_TUNNEL_TYPE_TEREDO:
7965                 PMD_DRV_LOG(ERR, "Tunnel type is not supported now.");
7966                 ret = -EINVAL;
7967                 break;
7968         default:
7969                 PMD_DRV_LOG(ERR, "Invalid tunnel type");
7970                 ret = -EINVAL;
7971                 break;
7972         }
7973
7974         return ret;
7975 }
7976
7977 static void
7978 ixgbevf_dev_allmulticast_enable(struct rte_eth_dev *dev)
7979 {
7980         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
7981
7982         hw->mac.ops.update_xcast_mode(hw, IXGBEVF_XCAST_MODE_ALLMULTI);
7983 }
7984
7985 static void
7986 ixgbevf_dev_allmulticast_disable(struct rte_eth_dev *dev)
7987 {
7988         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
7989
7990         hw->mac.ops.update_xcast_mode(hw, IXGBEVF_XCAST_MODE_MULTI);
7991 }
7992
7993 static void ixgbevf_mbx_process(struct rte_eth_dev *dev)
7994 {
7995         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
7996         u32 in_msg = 0;
7997
7998         if (ixgbe_read_mbx(hw, &in_msg, 1, 0))
7999                 return;
8000
8001         /* PF reset VF event */
8002         if (in_msg == IXGBE_PF_CONTROL_MSG)
8003                 _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_RESET,
8004                                               NULL, NULL);
8005 }
8006
8007 static int
8008 ixgbevf_dev_interrupt_get_status(struct rte_eth_dev *dev)
8009 {
8010         uint32_t eicr;
8011         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
8012         struct ixgbe_interrupt *intr =
8013                 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
8014         ixgbevf_intr_disable(hw);
8015
8016         /* read-on-clear nic registers here */
8017         eicr = IXGBE_READ_REG(hw, IXGBE_VTEICR);
8018         intr->flags = 0;
8019
8020         /* only one misc vector supported - mailbox */
8021         eicr &= IXGBE_VTEICR_MASK;
8022         if (eicr == IXGBE_MISC_VEC_ID)
8023                 intr->flags |= IXGBE_FLAG_MAILBOX;
8024
8025         return 0;
8026 }
8027
8028 static int
8029 ixgbevf_dev_interrupt_action(struct rte_eth_dev *dev)
8030 {
8031         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
8032         struct ixgbe_interrupt *intr =
8033                 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
8034
8035         if (intr->flags & IXGBE_FLAG_MAILBOX) {
8036                 ixgbevf_mbx_process(dev);
8037                 intr->flags &= ~IXGBE_FLAG_MAILBOX;
8038         }
8039
8040         ixgbevf_intr_enable(hw);
8041
8042         return 0;
8043 }
8044
8045 static void
8046 ixgbevf_dev_interrupt_handler(void *param)
8047 {
8048         struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
8049
8050         ixgbevf_dev_interrupt_get_status(dev);
8051         ixgbevf_dev_interrupt_action(dev);
8052 }
8053
8054 /**
8055  *  ixgbe_disable_sec_tx_path_generic - Stops the transmit data path
8056  *  @hw: pointer to hardware structure
8057  *
8058  *  Stops the transmit data path and waits for the HW to internally empty
8059  *  the Tx security block
8060  **/
8061 int ixgbe_disable_sec_tx_path_generic(struct ixgbe_hw *hw)
8062 {
8063 #define IXGBE_MAX_SECTX_POLL 40
8064
8065         int i;
8066         int sectxreg;
8067
8068         sectxreg = IXGBE_READ_REG(hw, IXGBE_SECTXCTRL);
8069         sectxreg |= IXGBE_SECTXCTRL_TX_DIS;
8070         IXGBE_WRITE_REG(hw, IXGBE_SECTXCTRL, sectxreg);
8071         for (i = 0; i < IXGBE_MAX_SECTX_POLL; i++) {
8072                 sectxreg = IXGBE_READ_REG(hw, IXGBE_SECTXSTAT);
8073                 if (sectxreg & IXGBE_SECTXSTAT_SECTX_RDY)
8074                         break;
8075                 /* Use interrupt-safe sleep just in case */
8076                 usec_delay(1000);
8077         }
8078
8079         /* For informational purposes only */
8080         if (i >= IXGBE_MAX_SECTX_POLL)
8081                 PMD_DRV_LOG(DEBUG, "Tx unit being enabled before security "
8082                          "path fully disabled.  Continuing with init.");
8083
8084         return IXGBE_SUCCESS;
8085 }
8086
8087 /**
8088  *  ixgbe_enable_sec_tx_path_generic - Enables the transmit data path
8089  *  @hw: pointer to hardware structure
8090  *
8091  *  Enables the transmit data path.
8092  **/
8093 int ixgbe_enable_sec_tx_path_generic(struct ixgbe_hw *hw)
8094 {
8095         uint32_t sectxreg;
8096
8097         sectxreg = IXGBE_READ_REG(hw, IXGBE_SECTXCTRL);
8098         sectxreg &= ~IXGBE_SECTXCTRL_TX_DIS;
8099         IXGBE_WRITE_REG(hw, IXGBE_SECTXCTRL, sectxreg);
8100         IXGBE_WRITE_FLUSH(hw);
8101
8102         return IXGBE_SUCCESS;
8103 }
8104
8105 /* restore n-tuple filter */
8106 static inline void
8107 ixgbe_ntuple_filter_restore(struct rte_eth_dev *dev)
8108 {
8109         struct ixgbe_filter_info *filter_info =
8110                 IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
8111         struct ixgbe_5tuple_filter *node;
8112
8113         TAILQ_FOREACH(node, &filter_info->fivetuple_list, entries) {
8114                 ixgbe_inject_5tuple_filter(dev, node);
8115         }
8116 }
8117
8118 /* restore ethernet type filter */
8119 static inline void
8120 ixgbe_ethertype_filter_restore(struct rte_eth_dev *dev)
8121 {
8122         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
8123         struct ixgbe_filter_info *filter_info =
8124                 IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
8125         int i;
8126
8127         for (i = 0; i < IXGBE_MAX_ETQF_FILTERS; i++) {
8128                 if (filter_info->ethertype_mask & (1 << i)) {
8129                         IXGBE_WRITE_REG(hw, IXGBE_ETQF(i),
8130                                         filter_info->ethertype_filters[i].etqf);
8131                         IXGBE_WRITE_REG(hw, IXGBE_ETQS(i),
8132                                         filter_info->ethertype_filters[i].etqs);
8133                         IXGBE_WRITE_FLUSH(hw);
8134                 }
8135         }
8136 }
8137
8138 /* restore SYN filter */
8139 static inline void
8140 ixgbe_syn_filter_restore(struct rte_eth_dev *dev)
8141 {
8142         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
8143         struct ixgbe_filter_info *filter_info =
8144                 IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
8145         uint32_t synqf;
8146
8147         synqf = filter_info->syn_info;
8148
8149         if (synqf & IXGBE_SYN_FILTER_ENABLE) {
8150                 IXGBE_WRITE_REG(hw, IXGBE_SYNQF, synqf);
8151                 IXGBE_WRITE_FLUSH(hw);
8152         }
8153 }
8154
8155 /* restore L2 tunnel filter */
8156 static inline void
8157 ixgbe_l2_tn_filter_restore(struct rte_eth_dev *dev)
8158 {
8159         struct ixgbe_l2_tn_info *l2_tn_info =
8160                 IXGBE_DEV_PRIVATE_TO_L2_TN_INFO(dev->data->dev_private);
8161         struct ixgbe_l2_tn_filter *node;
8162         struct rte_eth_l2_tunnel_conf l2_tn_conf;
8163
8164         TAILQ_FOREACH(node, &l2_tn_info->l2_tn_list, entries) {
8165                 l2_tn_conf.l2_tunnel_type = node->key.l2_tn_type;
8166                 l2_tn_conf.tunnel_id      = node->key.tn_id;
8167                 l2_tn_conf.pool           = node->pool;
8168                 (void)ixgbe_dev_l2_tunnel_filter_add(dev, &l2_tn_conf, TRUE);
8169         }
8170 }
8171
8172 static int
8173 ixgbe_filter_restore(struct rte_eth_dev *dev)
8174 {
8175         ixgbe_ntuple_filter_restore(dev);
8176         ixgbe_ethertype_filter_restore(dev);
8177         ixgbe_syn_filter_restore(dev);
8178         ixgbe_fdir_filter_restore(dev);
8179         ixgbe_l2_tn_filter_restore(dev);
8180
8181         return 0;
8182 }
8183
8184 static void
8185 ixgbe_l2_tunnel_conf(struct rte_eth_dev *dev)
8186 {
8187         struct ixgbe_l2_tn_info *l2_tn_info =
8188                 IXGBE_DEV_PRIVATE_TO_L2_TN_INFO(dev->data->dev_private);
8189         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
8190
8191         if (l2_tn_info->e_tag_en)
8192                 (void)ixgbe_e_tag_enable(hw);
8193
8194         if (l2_tn_info->e_tag_fwd_en)
8195                 (void)ixgbe_e_tag_forwarding_en_dis(dev, 1);
8196
8197         (void)ixgbe_update_e_tag_eth_type(hw, l2_tn_info->e_tag_ether_type);
8198 }
8199
8200 /* remove all the n-tuple filters */
8201 void
8202 ixgbe_clear_all_ntuple_filter(struct rte_eth_dev *dev)
8203 {
8204         struct ixgbe_filter_info *filter_info =
8205                 IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
8206         struct ixgbe_5tuple_filter *p_5tuple;
8207
8208         while ((p_5tuple = TAILQ_FIRST(&filter_info->fivetuple_list)))
8209                 ixgbe_remove_5tuple_filter(dev, p_5tuple);
8210 }
8211
8212 /* remove all the ether type filters */
8213 void
8214 ixgbe_clear_all_ethertype_filter(struct rte_eth_dev *dev)
8215 {
8216         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
8217         struct ixgbe_filter_info *filter_info =
8218                 IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
8219         int i;
8220
8221         for (i = 0; i < IXGBE_MAX_ETQF_FILTERS; i++) {
8222                 if (filter_info->ethertype_mask & (1 << i) &&
8223                     !filter_info->ethertype_filters[i].conf) {
8224                         (void)ixgbe_ethertype_filter_remove(filter_info,
8225                                                             (uint8_t)i);
8226                         IXGBE_WRITE_REG(hw, IXGBE_ETQF(i), 0);
8227                         IXGBE_WRITE_REG(hw, IXGBE_ETQS(i), 0);
8228                         IXGBE_WRITE_FLUSH(hw);
8229                 }
8230         }
8231 }
8232
8233 /* remove the SYN filter */
8234 void
8235 ixgbe_clear_syn_filter(struct rte_eth_dev *dev)
8236 {
8237         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
8238         struct ixgbe_filter_info *filter_info =
8239                 IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
8240
8241         if (filter_info->syn_info & IXGBE_SYN_FILTER_ENABLE) {
8242                 filter_info->syn_info = 0;
8243
8244                 IXGBE_WRITE_REG(hw, IXGBE_SYNQF, 0);
8245                 IXGBE_WRITE_FLUSH(hw);
8246         }
8247 }
8248
8249 /* remove all the L2 tunnel filters */
8250 int
8251 ixgbe_clear_all_l2_tn_filter(struct rte_eth_dev *dev)
8252 {
8253         struct ixgbe_l2_tn_info *l2_tn_info =
8254                 IXGBE_DEV_PRIVATE_TO_L2_TN_INFO(dev->data->dev_private);
8255         struct ixgbe_l2_tn_filter *l2_tn_filter;
8256         struct rte_eth_l2_tunnel_conf l2_tn_conf;
8257         int ret = 0;
8258
8259         while ((l2_tn_filter = TAILQ_FIRST(&l2_tn_info->l2_tn_list))) {
8260                 l2_tn_conf.l2_tunnel_type = l2_tn_filter->key.l2_tn_type;
8261                 l2_tn_conf.tunnel_id      = l2_tn_filter->key.tn_id;
8262                 l2_tn_conf.pool           = l2_tn_filter->pool;
8263                 ret = ixgbe_dev_l2_tunnel_filter_del(dev, &l2_tn_conf);
8264                 if (ret < 0)
8265                         return ret;
8266         }
8267
8268         return 0;
8269 }
8270
8271 RTE_PMD_REGISTER_PCI(net_ixgbe, rte_ixgbe_pmd);
8272 RTE_PMD_REGISTER_PCI_TABLE(net_ixgbe, pci_id_ixgbe_map);
8273 RTE_PMD_REGISTER_KMOD_DEP(net_ixgbe, "* igb_uio | uio_pci_generic | vfio-pci");
8274 RTE_PMD_REGISTER_PCI(net_ixgbe_vf, rte_ixgbevf_pmd);
8275 RTE_PMD_REGISTER_PCI_TABLE(net_ixgbe_vf, pci_id_ixgbevf_map);
8276 RTE_PMD_REGISTER_KMOD_DEP(net_ixgbe_vf, "* igb_uio | vfio-pci");