net/ixgbe: remove MTU setting limitation
[dpdk.git] / drivers / net / ixgbe / ixgbe_ethdev.c
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright(c) 2010-2017 Intel Corporation. All rights reserved.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33
34 #include <sys/queue.h>
35 #include <stdio.h>
36 #include <errno.h>
37 #include <stdint.h>
38 #include <string.h>
39 #include <unistd.h>
40 #include <stdarg.h>
41 #include <inttypes.h>
42 #include <netinet/in.h>
43 #include <rte_byteorder.h>
44 #include <rte_common.h>
45 #include <rte_cycles.h>
46
47 #include <rte_interrupts.h>
48 #include <rte_log.h>
49 #include <rte_debug.h>
50 #include <rte_pci.h>
51 #include <rte_atomic.h>
52 #include <rte_branch_prediction.h>
53 #include <rte_memory.h>
54 #include <rte_memzone.h>
55 #include <rte_eal.h>
56 #include <rte_alarm.h>
57 #include <rte_ether.h>
58 #include <rte_ethdev.h>
59 #include <rte_ethdev_pci.h>
60 #include <rte_atomic.h>
61 #include <rte_malloc.h>
62 #include <rte_random.h>
63 #include <rte_dev.h>
64 #include <rte_hash_crc.h>
65
66 #include "ixgbe_logs.h"
67 #include "base/ixgbe_api.h"
68 #include "base/ixgbe_vf.h"
69 #include "base/ixgbe_common.h"
70 #include "ixgbe_ethdev.h"
71 #include "ixgbe_bypass.h"
72 #include "ixgbe_rxtx.h"
73 #include "base/ixgbe_type.h"
74 #include "base/ixgbe_phy.h"
75 #include "ixgbe_regs.h"
76
77 /*
78  * High threshold controlling when to start sending XOFF frames. Must be at
79  * least 8 bytes less than receive packet buffer size. This value is in units
80  * of 1024 bytes.
81  */
82 #define IXGBE_FC_HI    0x80
83
84 /*
85  * Low threshold controlling when to start sending XON frames. This value is
86  * in units of 1024 bytes.
87  */
88 #define IXGBE_FC_LO    0x40
89
90 /* Default minimum inter-interrupt interval for EITR configuration */
91 #define IXGBE_MIN_INTER_INTERRUPT_INTERVAL_DEFAULT    0x79E
92
93 /* Timer value included in XOFF frames. */
94 #define IXGBE_FC_PAUSE 0x680
95
96 #define IXGBE_LINK_DOWN_CHECK_TIMEOUT 4000 /* ms */
97 #define IXGBE_LINK_UP_CHECK_TIMEOUT   1000 /* ms */
98 #define IXGBE_VMDQ_NUM_UC_MAC         4096 /* Maximum nb. of UC MAC addr. */
99
100 #define IXGBE_MMW_SIZE_DEFAULT        0x4
101 #define IXGBE_MMW_SIZE_JUMBO_FRAME    0x14
102 #define IXGBE_MAX_RING_DESC           4096 /* replicate define from rxtx */
103
104 /*
105  *  Default values for RX/TX configuration
106  */
107 #define IXGBE_DEFAULT_RX_FREE_THRESH  32
108 #define IXGBE_DEFAULT_RX_PTHRESH      8
109 #define IXGBE_DEFAULT_RX_HTHRESH      8
110 #define IXGBE_DEFAULT_RX_WTHRESH      0
111
112 #define IXGBE_DEFAULT_TX_FREE_THRESH  32
113 #define IXGBE_DEFAULT_TX_PTHRESH      32
114 #define IXGBE_DEFAULT_TX_HTHRESH      0
115 #define IXGBE_DEFAULT_TX_WTHRESH      0
116 #define IXGBE_DEFAULT_TX_RSBIT_THRESH 32
117
118 /* Bit shift and mask */
119 #define IXGBE_4_BIT_WIDTH  (CHAR_BIT / 2)
120 #define IXGBE_4_BIT_MASK   RTE_LEN2MASK(IXGBE_4_BIT_WIDTH, uint8_t)
121 #define IXGBE_8_BIT_WIDTH  CHAR_BIT
122 #define IXGBE_8_BIT_MASK   UINT8_MAX
123
124 #define IXGBEVF_PMD_NAME "rte_ixgbevf_pmd" /* PMD name */
125
126 #define IXGBE_QUEUE_STAT_COUNTERS (sizeof(hw_stats->qprc) / sizeof(hw_stats->qprc[0]))
127
128 #define IXGBE_HKEY_MAX_INDEX 10
129
130 /* Additional timesync values. */
131 #define NSEC_PER_SEC             1000000000L
132 #define IXGBE_INCVAL_10GB        0x66666666
133 #define IXGBE_INCVAL_1GB         0x40000000
134 #define IXGBE_INCVAL_100         0x50000000
135 #define IXGBE_INCVAL_SHIFT_10GB  28
136 #define IXGBE_INCVAL_SHIFT_1GB   24
137 #define IXGBE_INCVAL_SHIFT_100   21
138 #define IXGBE_INCVAL_SHIFT_82599 7
139 #define IXGBE_INCPER_SHIFT_82599 24
140
141 #define IXGBE_CYCLECOUNTER_MASK   0xffffffffffffffffULL
142
143 #define IXGBE_VT_CTL_POOLING_MODE_MASK         0x00030000
144 #define IXGBE_VT_CTL_POOLING_MODE_ETAG         0x00010000
145 #define DEFAULT_ETAG_ETYPE                     0x893f
146 #define IXGBE_ETAG_ETYPE                       0x00005084
147 #define IXGBE_ETAG_ETYPE_MASK                  0x0000ffff
148 #define IXGBE_ETAG_ETYPE_VALID                 0x80000000
149 #define IXGBE_RAH_ADTYPE                       0x40000000
150 #define IXGBE_RAL_ETAG_FILTER_MASK             0x00003fff
151 #define IXGBE_VMVIR_TAGA_MASK                  0x18000000
152 #define IXGBE_VMVIR_TAGA_ETAG_INSERT           0x08000000
153 #define IXGBE_VMTIR(_i) (0x00017000 + ((_i) * 4)) /* 64 of these (0-63) */
154 #define IXGBE_QDE_STRIP_TAG                    0x00000004
155 #define IXGBE_VTEICR_MASK                      0x07
156
157 #define IXGBE_EXVET_VET_EXT_SHIFT              16
158 #define IXGBE_DMATXCTL_VT_MASK                 0xFFFF0000
159
160 static int eth_ixgbe_dev_init(struct rte_eth_dev *eth_dev);
161 static int eth_ixgbe_dev_uninit(struct rte_eth_dev *eth_dev);
162 static int ixgbe_fdir_filter_init(struct rte_eth_dev *eth_dev);
163 static int ixgbe_fdir_filter_uninit(struct rte_eth_dev *eth_dev);
164 static int ixgbe_l2_tn_filter_init(struct rte_eth_dev *eth_dev);
165 static int ixgbe_l2_tn_filter_uninit(struct rte_eth_dev *eth_dev);
166 static int ixgbe_ntuple_filter_uninit(struct rte_eth_dev *eth_dev);
167 static int  ixgbe_dev_configure(struct rte_eth_dev *dev);
168 static int  ixgbe_dev_start(struct rte_eth_dev *dev);
169 static void ixgbe_dev_stop(struct rte_eth_dev *dev);
170 static int  ixgbe_dev_set_link_up(struct rte_eth_dev *dev);
171 static int  ixgbe_dev_set_link_down(struct rte_eth_dev *dev);
172 static void ixgbe_dev_close(struct rte_eth_dev *dev);
173 static void ixgbe_dev_promiscuous_enable(struct rte_eth_dev *dev);
174 static void ixgbe_dev_promiscuous_disable(struct rte_eth_dev *dev);
175 static void ixgbe_dev_allmulticast_enable(struct rte_eth_dev *dev);
176 static void ixgbe_dev_allmulticast_disable(struct rte_eth_dev *dev);
177 static int ixgbe_dev_link_update(struct rte_eth_dev *dev,
178                                 int wait_to_complete);
179 static void ixgbe_dev_stats_get(struct rte_eth_dev *dev,
180                                 struct rte_eth_stats *stats);
181 static int ixgbe_dev_xstats_get(struct rte_eth_dev *dev,
182                                 struct rte_eth_xstat *xstats, unsigned n);
183 static int ixgbevf_dev_xstats_get(struct rte_eth_dev *dev,
184                                   struct rte_eth_xstat *xstats, unsigned n);
185 static int
186 ixgbe_dev_xstats_get_by_id(struct rte_eth_dev *dev, const uint64_t *ids,
187                 uint64_t *values, unsigned int n);
188 static void ixgbe_dev_stats_reset(struct rte_eth_dev *dev);
189 static void ixgbe_dev_xstats_reset(struct rte_eth_dev *dev);
190 static int ixgbe_dev_xstats_get_names(struct rte_eth_dev *dev,
191         struct rte_eth_xstat_name *xstats_names,
192         unsigned int size);
193 static int ixgbevf_dev_xstats_get_names(struct rte_eth_dev *dev,
194         struct rte_eth_xstat_name *xstats_names, unsigned limit);
195 static int ixgbe_dev_xstats_get_names_by_id(
196         struct rte_eth_dev *dev,
197         struct rte_eth_xstat_name *xstats_names,
198         const uint64_t *ids,
199         unsigned int limit);
200 static int ixgbe_dev_queue_stats_mapping_set(struct rte_eth_dev *eth_dev,
201                                              uint16_t queue_id,
202                                              uint8_t stat_idx,
203                                              uint8_t is_rx);
204 static int ixgbe_fw_version_get(struct rte_eth_dev *dev, char *fw_version,
205                                  size_t fw_size);
206 static void ixgbe_dev_info_get(struct rte_eth_dev *dev,
207                                struct rte_eth_dev_info *dev_info);
208 static const uint32_t *ixgbe_dev_supported_ptypes_get(struct rte_eth_dev *dev);
209 static void ixgbevf_dev_info_get(struct rte_eth_dev *dev,
210                                  struct rte_eth_dev_info *dev_info);
211 static int ixgbe_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu);
212
213 static int ixgbe_vlan_filter_set(struct rte_eth_dev *dev,
214                 uint16_t vlan_id, int on);
215 static int ixgbe_vlan_tpid_set(struct rte_eth_dev *dev,
216                                enum rte_vlan_type vlan_type,
217                                uint16_t tpid_id);
218 static void ixgbe_vlan_hw_strip_bitmap_set(struct rte_eth_dev *dev,
219                 uint16_t queue, bool on);
220 static void ixgbe_vlan_strip_queue_set(struct rte_eth_dev *dev, uint16_t queue,
221                 int on);
222 static void ixgbe_vlan_offload_set(struct rte_eth_dev *dev, int mask);
223 static void ixgbe_vlan_hw_strip_enable(struct rte_eth_dev *dev, uint16_t queue);
224 static void ixgbe_vlan_hw_strip_disable(struct rte_eth_dev *dev, uint16_t queue);
225 static void ixgbe_vlan_hw_extend_enable(struct rte_eth_dev *dev);
226 static void ixgbe_vlan_hw_extend_disable(struct rte_eth_dev *dev);
227
228 static int ixgbe_dev_led_on(struct rte_eth_dev *dev);
229 static int ixgbe_dev_led_off(struct rte_eth_dev *dev);
230 static int ixgbe_flow_ctrl_get(struct rte_eth_dev *dev,
231                                struct rte_eth_fc_conf *fc_conf);
232 static int ixgbe_flow_ctrl_set(struct rte_eth_dev *dev,
233                                struct rte_eth_fc_conf *fc_conf);
234 static int ixgbe_priority_flow_ctrl_set(struct rte_eth_dev *dev,
235                 struct rte_eth_pfc_conf *pfc_conf);
236 static int ixgbe_dev_rss_reta_update(struct rte_eth_dev *dev,
237                         struct rte_eth_rss_reta_entry64 *reta_conf,
238                         uint16_t reta_size);
239 static int ixgbe_dev_rss_reta_query(struct rte_eth_dev *dev,
240                         struct rte_eth_rss_reta_entry64 *reta_conf,
241                         uint16_t reta_size);
242 static void ixgbe_dev_link_status_print(struct rte_eth_dev *dev);
243 static int ixgbe_dev_lsc_interrupt_setup(struct rte_eth_dev *dev);
244 static int ixgbe_dev_macsec_interrupt_setup(struct rte_eth_dev *dev);
245 static int ixgbe_dev_rxq_interrupt_setup(struct rte_eth_dev *dev);
246 static int ixgbe_dev_interrupt_get_status(struct rte_eth_dev *dev);
247 static int ixgbe_dev_interrupt_action(struct rte_eth_dev *dev,
248                                       struct rte_intr_handle *handle);
249 static void ixgbe_dev_interrupt_handler(void *param);
250 static void ixgbe_dev_interrupt_delayed_handler(void *param);
251 static int ixgbe_add_rar(struct rte_eth_dev *dev, struct ether_addr *mac_addr,
252                          uint32_t index, uint32_t pool);
253 static void ixgbe_remove_rar(struct rte_eth_dev *dev, uint32_t index);
254 static void ixgbe_set_default_mac_addr(struct rte_eth_dev *dev,
255                                            struct ether_addr *mac_addr);
256 static void ixgbe_dcb_init(struct ixgbe_hw *hw, struct ixgbe_dcb_config *dcb_config);
257 static bool is_device_supported(struct rte_eth_dev *dev,
258                                 struct rte_pci_driver *drv);
259
260 /* For Virtual Function support */
261 static int eth_ixgbevf_dev_init(struct rte_eth_dev *eth_dev);
262 static int eth_ixgbevf_dev_uninit(struct rte_eth_dev *eth_dev);
263 static int  ixgbevf_dev_configure(struct rte_eth_dev *dev);
264 static int  ixgbevf_dev_start(struct rte_eth_dev *dev);
265 static int ixgbevf_dev_link_update(struct rte_eth_dev *dev,
266                                    int wait_to_complete);
267 static void ixgbevf_dev_stop(struct rte_eth_dev *dev);
268 static void ixgbevf_dev_close(struct rte_eth_dev *dev);
269 static void ixgbevf_intr_disable(struct ixgbe_hw *hw);
270 static void ixgbevf_intr_enable(struct ixgbe_hw *hw);
271 static void ixgbevf_dev_stats_get(struct rte_eth_dev *dev,
272                 struct rte_eth_stats *stats);
273 static void ixgbevf_dev_stats_reset(struct rte_eth_dev *dev);
274 static int ixgbevf_vlan_filter_set(struct rte_eth_dev *dev,
275                 uint16_t vlan_id, int on);
276 static void ixgbevf_vlan_strip_queue_set(struct rte_eth_dev *dev,
277                 uint16_t queue, int on);
278 static void ixgbevf_vlan_offload_set(struct rte_eth_dev *dev, int mask);
279 static void ixgbevf_set_vfta_all(struct rte_eth_dev *dev, bool on);
280 static int ixgbevf_dev_rx_queue_intr_enable(struct rte_eth_dev *dev,
281                                             uint16_t queue_id);
282 static int ixgbevf_dev_rx_queue_intr_disable(struct rte_eth_dev *dev,
283                                              uint16_t queue_id);
284 static void ixgbevf_set_ivar_map(struct ixgbe_hw *hw, int8_t direction,
285                                  uint8_t queue, uint8_t msix_vector);
286 static void ixgbevf_configure_msix(struct rte_eth_dev *dev);
287 static void ixgbevf_dev_allmulticast_enable(struct rte_eth_dev *dev);
288 static void ixgbevf_dev_allmulticast_disable(struct rte_eth_dev *dev);
289
290 /* For Eth VMDQ APIs support */
291 static int ixgbe_uc_hash_table_set(struct rte_eth_dev *dev, struct
292                 ether_addr * mac_addr, uint8_t on);
293 static int ixgbe_uc_all_hash_table_set(struct rte_eth_dev *dev, uint8_t on);
294 static int ixgbe_mirror_rule_set(struct rte_eth_dev *dev,
295                 struct rte_eth_mirror_conf *mirror_conf,
296                 uint8_t rule_id, uint8_t on);
297 static int ixgbe_mirror_rule_reset(struct rte_eth_dev *dev,
298                 uint8_t rule_id);
299 static int ixgbe_dev_rx_queue_intr_enable(struct rte_eth_dev *dev,
300                                           uint16_t queue_id);
301 static int ixgbe_dev_rx_queue_intr_disable(struct rte_eth_dev *dev,
302                                            uint16_t queue_id);
303 static void ixgbe_set_ivar_map(struct ixgbe_hw *hw, int8_t direction,
304                                uint8_t queue, uint8_t msix_vector);
305 static void ixgbe_configure_msix(struct rte_eth_dev *dev);
306
307 static int ixgbe_set_queue_rate_limit(struct rte_eth_dev *dev,
308                 uint16_t queue_idx, uint16_t tx_rate);
309
310 static int ixgbevf_add_mac_addr(struct rte_eth_dev *dev,
311                                 struct ether_addr *mac_addr,
312                                 uint32_t index, uint32_t pool);
313 static void ixgbevf_remove_mac_addr(struct rte_eth_dev *dev, uint32_t index);
314 static void ixgbevf_set_default_mac_addr(struct rte_eth_dev *dev,
315                                              struct ether_addr *mac_addr);
316 static int ixgbe_syn_filter_get(struct rte_eth_dev *dev,
317                         struct rte_eth_syn_filter *filter);
318 static int ixgbe_syn_filter_handle(struct rte_eth_dev *dev,
319                         enum rte_filter_op filter_op,
320                         void *arg);
321 static int ixgbe_add_5tuple_filter(struct rte_eth_dev *dev,
322                         struct ixgbe_5tuple_filter *filter);
323 static void ixgbe_remove_5tuple_filter(struct rte_eth_dev *dev,
324                         struct ixgbe_5tuple_filter *filter);
325 static int ixgbe_ntuple_filter_handle(struct rte_eth_dev *dev,
326                                 enum rte_filter_op filter_op,
327                                 void *arg);
328 static int ixgbe_get_ntuple_filter(struct rte_eth_dev *dev,
329                         struct rte_eth_ntuple_filter *filter);
330 static int ixgbe_ethertype_filter_handle(struct rte_eth_dev *dev,
331                                 enum rte_filter_op filter_op,
332                                 void *arg);
333 static int ixgbe_get_ethertype_filter(struct rte_eth_dev *dev,
334                         struct rte_eth_ethertype_filter *filter);
335 static int ixgbe_dev_filter_ctrl(struct rte_eth_dev *dev,
336                      enum rte_filter_type filter_type,
337                      enum rte_filter_op filter_op,
338                      void *arg);
339 static int ixgbevf_dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu);
340
341 static int ixgbe_dev_set_mc_addr_list(struct rte_eth_dev *dev,
342                                       struct ether_addr *mc_addr_set,
343                                       uint32_t nb_mc_addr);
344 static int ixgbe_dev_get_dcb_info(struct rte_eth_dev *dev,
345                                    struct rte_eth_dcb_info *dcb_info);
346
347 static int ixgbe_get_reg_length(struct rte_eth_dev *dev);
348 static int ixgbe_get_regs(struct rte_eth_dev *dev,
349                             struct rte_dev_reg_info *regs);
350 static int ixgbe_get_eeprom_length(struct rte_eth_dev *dev);
351 static int ixgbe_get_eeprom(struct rte_eth_dev *dev,
352                                 struct rte_dev_eeprom_info *eeprom);
353 static int ixgbe_set_eeprom(struct rte_eth_dev *dev,
354                                 struct rte_dev_eeprom_info *eeprom);
355
356 static int ixgbevf_get_reg_length(struct rte_eth_dev *dev);
357 static int ixgbevf_get_regs(struct rte_eth_dev *dev,
358                                 struct rte_dev_reg_info *regs);
359
360 static int ixgbe_timesync_enable(struct rte_eth_dev *dev);
361 static int ixgbe_timesync_disable(struct rte_eth_dev *dev);
362 static int ixgbe_timesync_read_rx_timestamp(struct rte_eth_dev *dev,
363                                             struct timespec *timestamp,
364                                             uint32_t flags);
365 static int ixgbe_timesync_read_tx_timestamp(struct rte_eth_dev *dev,
366                                             struct timespec *timestamp);
367 static int ixgbe_timesync_adjust_time(struct rte_eth_dev *dev, int64_t delta);
368 static int ixgbe_timesync_read_time(struct rte_eth_dev *dev,
369                                    struct timespec *timestamp);
370 static int ixgbe_timesync_write_time(struct rte_eth_dev *dev,
371                                    const struct timespec *timestamp);
372 static void ixgbevf_dev_interrupt_handler(void *param);
373
374 static int ixgbe_dev_l2_tunnel_eth_type_conf
375         (struct rte_eth_dev *dev, struct rte_eth_l2_tunnel_conf *l2_tunnel);
376 static int ixgbe_dev_l2_tunnel_offload_set
377         (struct rte_eth_dev *dev,
378          struct rte_eth_l2_tunnel_conf *l2_tunnel,
379          uint32_t mask,
380          uint8_t en);
381 static int ixgbe_dev_l2_tunnel_filter_handle(struct rte_eth_dev *dev,
382                                              enum rte_filter_op filter_op,
383                                              void *arg);
384
385 static int ixgbe_dev_udp_tunnel_port_add(struct rte_eth_dev *dev,
386                                          struct rte_eth_udp_tunnel *udp_tunnel);
387 static int ixgbe_dev_udp_tunnel_port_del(struct rte_eth_dev *dev,
388                                          struct rte_eth_udp_tunnel *udp_tunnel);
389 static int ixgbe_filter_restore(struct rte_eth_dev *dev);
390 static void ixgbe_l2_tunnel_conf(struct rte_eth_dev *dev);
391
392 /*
393  * Define VF Stats MACRO for Non "cleared on read" register
394  */
395 #define UPDATE_VF_STAT(reg, last, cur)                          \
396 {                                                               \
397         uint32_t latest = IXGBE_READ_REG(hw, reg);              \
398         cur += (latest - last) & UINT_MAX;                      \
399         last = latest;                                          \
400 }
401
402 #define UPDATE_VF_STAT_36BIT(lsb, msb, last, cur)                \
403 {                                                                \
404         u64 new_lsb = IXGBE_READ_REG(hw, lsb);                   \
405         u64 new_msb = IXGBE_READ_REG(hw, msb);                   \
406         u64 latest = ((new_msb << 32) | new_lsb);                \
407         cur += (0x1000000000LL + latest - last) & 0xFFFFFFFFFLL; \
408         last = latest;                                           \
409 }
410
411 #define IXGBE_SET_HWSTRIP(h, q) do {\
412                 uint32_t idx = (q) / (sizeof((h)->bitmap[0]) * NBBY); \
413                 uint32_t bit = (q) % (sizeof((h)->bitmap[0]) * NBBY); \
414                 (h)->bitmap[idx] |= 1 << bit;\
415         } while (0)
416
417 #define IXGBE_CLEAR_HWSTRIP(h, q) do {\
418                 uint32_t idx = (q) / (sizeof((h)->bitmap[0]) * NBBY); \
419                 uint32_t bit = (q) % (sizeof((h)->bitmap[0]) * NBBY); \
420                 (h)->bitmap[idx] &= ~(1 << bit);\
421         } while (0)
422
423 #define IXGBE_GET_HWSTRIP(h, q, r) do {\
424                 uint32_t idx = (q) / (sizeof((h)->bitmap[0]) * NBBY); \
425                 uint32_t bit = (q) % (sizeof((h)->bitmap[0]) * NBBY); \
426                 (r) = (h)->bitmap[idx] >> bit & 1;\
427         } while (0)
428
429 /*
430  * The set of PCI devices this driver supports
431  */
432 static const struct rte_pci_id pci_id_ixgbe_map[] = {
433         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598) },
434         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_BX) },
435         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_DUAL_PORT) },
436         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_SINGLE_PORT) },
437         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT) },
438         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT2) },
439         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_SFP_LOM) },
440         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_CX4) },
441         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_CX4_DUAL_PORT) },
442         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_DA_DUAL_PORT) },
443         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM) },
444         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_XF_LR) },
445         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4) },
446         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4_MEZZ) },
447         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KR) },
448         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_COMBO_BACKPLANE) },
449         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_SUBDEV_ID_82599_KX4_KR_MEZZ) },
450         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_CX4) },
451         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP) },
452         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_SUBDEV_ID_82599_SFP) },
453         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_SUBDEV_ID_82599_RNDC) },
454         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_SUBDEV_ID_82599_560FLR) },
455         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_SUBDEV_ID_82599_ECNA_DP) },
456         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_BACKPLANE_FCOE) },
457         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_FCOE) },
458         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_EM) },
459         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_SF2) },
460         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_SF_QP) },
461         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_QSFP_SF_QP) },
462         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599EN_SFP) },
463         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_XAUI_LOM) },
464         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_T3_LOM) },
465         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_LS) },
466         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T) },
467         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T1) },
468         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_SFP) },
469         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_10G_T) },
470         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_1G_T) },
471         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550T) },
472         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550T1) },
473         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_KR) },
474         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_KR_L) },
475         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SFP_N) },
476         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SGMII) },
477         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SGMII_L) },
478         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_10G_T) },
479         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_QSFP) },
480         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_QSFP_N) },
481         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SFP) },
482         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_1G_T) },
483         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_1G_T_L) },
484         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_KX4) },
485         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_KR) },
486 #ifdef RTE_LIBRTE_IXGBE_BYPASS
487         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_BYPASS) },
488 #endif
489         { .vendor_id = 0, /* sentinel */ },
490 };
491
492 /*
493  * The set of PCI devices this driver supports (for 82599 VF)
494  */
495 static const struct rte_pci_id pci_id_ixgbevf_map[] = {
496         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_VF) },
497         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_VF_HV) },
498         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540_VF) },
499         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540_VF_HV) },
500         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550_VF_HV) },
501         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550_VF) },
502         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_VF) },
503         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_VF_HV) },
504         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_VF) },
505         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_VF_HV) },
506         { .vendor_id = 0, /* sentinel */ },
507 };
508
509 static const struct rte_eth_desc_lim rx_desc_lim = {
510         .nb_max = IXGBE_MAX_RING_DESC,
511         .nb_min = IXGBE_MIN_RING_DESC,
512         .nb_align = IXGBE_RXD_ALIGN,
513 };
514
515 static const struct rte_eth_desc_lim tx_desc_lim = {
516         .nb_max = IXGBE_MAX_RING_DESC,
517         .nb_min = IXGBE_MIN_RING_DESC,
518         .nb_align = IXGBE_TXD_ALIGN,
519         .nb_seg_max = IXGBE_TX_MAX_SEG,
520         .nb_mtu_seg_max = IXGBE_TX_MAX_SEG,
521 };
522
523 static const struct eth_dev_ops ixgbe_eth_dev_ops = {
524         .dev_configure        = ixgbe_dev_configure,
525         .dev_start            = ixgbe_dev_start,
526         .dev_stop             = ixgbe_dev_stop,
527         .dev_set_link_up    = ixgbe_dev_set_link_up,
528         .dev_set_link_down  = ixgbe_dev_set_link_down,
529         .dev_close            = ixgbe_dev_close,
530         .promiscuous_enable   = ixgbe_dev_promiscuous_enable,
531         .promiscuous_disable  = ixgbe_dev_promiscuous_disable,
532         .allmulticast_enable  = ixgbe_dev_allmulticast_enable,
533         .allmulticast_disable = ixgbe_dev_allmulticast_disable,
534         .link_update          = ixgbe_dev_link_update,
535         .stats_get            = ixgbe_dev_stats_get,
536         .xstats_get           = ixgbe_dev_xstats_get,
537         .xstats_get_by_id     = ixgbe_dev_xstats_get_by_id,
538         .stats_reset          = ixgbe_dev_stats_reset,
539         .xstats_reset         = ixgbe_dev_xstats_reset,
540         .xstats_get_names     = ixgbe_dev_xstats_get_names,
541         .xstats_get_names_by_id = ixgbe_dev_xstats_get_names_by_id,
542         .queue_stats_mapping_set = ixgbe_dev_queue_stats_mapping_set,
543         .fw_version_get       = ixgbe_fw_version_get,
544         .dev_infos_get        = ixgbe_dev_info_get,
545         .dev_supported_ptypes_get = ixgbe_dev_supported_ptypes_get,
546         .mtu_set              = ixgbe_dev_mtu_set,
547         .vlan_filter_set      = ixgbe_vlan_filter_set,
548         .vlan_tpid_set        = ixgbe_vlan_tpid_set,
549         .vlan_offload_set     = ixgbe_vlan_offload_set,
550         .vlan_strip_queue_set = ixgbe_vlan_strip_queue_set,
551         .rx_queue_start       = ixgbe_dev_rx_queue_start,
552         .rx_queue_stop        = ixgbe_dev_rx_queue_stop,
553         .tx_queue_start       = ixgbe_dev_tx_queue_start,
554         .tx_queue_stop        = ixgbe_dev_tx_queue_stop,
555         .rx_queue_setup       = ixgbe_dev_rx_queue_setup,
556         .rx_queue_intr_enable = ixgbe_dev_rx_queue_intr_enable,
557         .rx_queue_intr_disable = ixgbe_dev_rx_queue_intr_disable,
558         .rx_queue_release     = ixgbe_dev_rx_queue_release,
559         .rx_queue_count       = ixgbe_dev_rx_queue_count,
560         .rx_descriptor_done   = ixgbe_dev_rx_descriptor_done,
561         .rx_descriptor_status = ixgbe_dev_rx_descriptor_status,
562         .tx_descriptor_status = ixgbe_dev_tx_descriptor_status,
563         .tx_queue_setup       = ixgbe_dev_tx_queue_setup,
564         .tx_queue_release     = ixgbe_dev_tx_queue_release,
565         .dev_led_on           = ixgbe_dev_led_on,
566         .dev_led_off          = ixgbe_dev_led_off,
567         .flow_ctrl_get        = ixgbe_flow_ctrl_get,
568         .flow_ctrl_set        = ixgbe_flow_ctrl_set,
569         .priority_flow_ctrl_set = ixgbe_priority_flow_ctrl_set,
570         .mac_addr_add         = ixgbe_add_rar,
571         .mac_addr_remove      = ixgbe_remove_rar,
572         .mac_addr_set         = ixgbe_set_default_mac_addr,
573         .uc_hash_table_set    = ixgbe_uc_hash_table_set,
574         .uc_all_hash_table_set  = ixgbe_uc_all_hash_table_set,
575         .mirror_rule_set      = ixgbe_mirror_rule_set,
576         .mirror_rule_reset    = ixgbe_mirror_rule_reset,
577         .set_queue_rate_limit = ixgbe_set_queue_rate_limit,
578         .reta_update          = ixgbe_dev_rss_reta_update,
579         .reta_query           = ixgbe_dev_rss_reta_query,
580         .rss_hash_update      = ixgbe_dev_rss_hash_update,
581         .rss_hash_conf_get    = ixgbe_dev_rss_hash_conf_get,
582         .filter_ctrl          = ixgbe_dev_filter_ctrl,
583         .set_mc_addr_list     = ixgbe_dev_set_mc_addr_list,
584         .rxq_info_get         = ixgbe_rxq_info_get,
585         .txq_info_get         = ixgbe_txq_info_get,
586         .timesync_enable      = ixgbe_timesync_enable,
587         .timesync_disable     = ixgbe_timesync_disable,
588         .timesync_read_rx_timestamp = ixgbe_timesync_read_rx_timestamp,
589         .timesync_read_tx_timestamp = ixgbe_timesync_read_tx_timestamp,
590         .get_reg              = ixgbe_get_regs,
591         .get_eeprom_length    = ixgbe_get_eeprom_length,
592         .get_eeprom           = ixgbe_get_eeprom,
593         .set_eeprom           = ixgbe_set_eeprom,
594         .get_dcb_info         = ixgbe_dev_get_dcb_info,
595         .timesync_adjust_time = ixgbe_timesync_adjust_time,
596         .timesync_read_time   = ixgbe_timesync_read_time,
597         .timesync_write_time  = ixgbe_timesync_write_time,
598         .l2_tunnel_eth_type_conf = ixgbe_dev_l2_tunnel_eth_type_conf,
599         .l2_tunnel_offload_set   = ixgbe_dev_l2_tunnel_offload_set,
600         .udp_tunnel_port_add  = ixgbe_dev_udp_tunnel_port_add,
601         .udp_tunnel_port_del  = ixgbe_dev_udp_tunnel_port_del,
602 };
603
604 /*
605  * dev_ops for virtual function, bare necessities for basic vf
606  * operation have been implemented
607  */
608 static const struct eth_dev_ops ixgbevf_eth_dev_ops = {
609         .dev_configure        = ixgbevf_dev_configure,
610         .dev_start            = ixgbevf_dev_start,
611         .dev_stop             = ixgbevf_dev_stop,
612         .link_update          = ixgbevf_dev_link_update,
613         .stats_get            = ixgbevf_dev_stats_get,
614         .xstats_get           = ixgbevf_dev_xstats_get,
615         .stats_reset          = ixgbevf_dev_stats_reset,
616         .xstats_reset         = ixgbevf_dev_stats_reset,
617         .xstats_get_names     = ixgbevf_dev_xstats_get_names,
618         .dev_close            = ixgbevf_dev_close,
619         .allmulticast_enable  = ixgbevf_dev_allmulticast_enable,
620         .allmulticast_disable = ixgbevf_dev_allmulticast_disable,
621         .dev_infos_get        = ixgbevf_dev_info_get,
622         .dev_supported_ptypes_get = ixgbe_dev_supported_ptypes_get,
623         .mtu_set              = ixgbevf_dev_set_mtu,
624         .vlan_filter_set      = ixgbevf_vlan_filter_set,
625         .vlan_strip_queue_set = ixgbevf_vlan_strip_queue_set,
626         .vlan_offload_set     = ixgbevf_vlan_offload_set,
627         .rx_queue_setup       = ixgbe_dev_rx_queue_setup,
628         .rx_queue_release     = ixgbe_dev_rx_queue_release,
629         .rx_descriptor_done   = ixgbe_dev_rx_descriptor_done,
630         .rx_descriptor_status = ixgbe_dev_rx_descriptor_status,
631         .tx_descriptor_status = ixgbe_dev_tx_descriptor_status,
632         .tx_queue_setup       = ixgbe_dev_tx_queue_setup,
633         .tx_queue_release     = ixgbe_dev_tx_queue_release,
634         .rx_queue_intr_enable = ixgbevf_dev_rx_queue_intr_enable,
635         .rx_queue_intr_disable = ixgbevf_dev_rx_queue_intr_disable,
636         .mac_addr_add         = ixgbevf_add_mac_addr,
637         .mac_addr_remove      = ixgbevf_remove_mac_addr,
638         .set_mc_addr_list     = ixgbe_dev_set_mc_addr_list,
639         .rxq_info_get         = ixgbe_rxq_info_get,
640         .txq_info_get         = ixgbe_txq_info_get,
641         .mac_addr_set         = ixgbevf_set_default_mac_addr,
642         .get_reg              = ixgbevf_get_regs,
643         .reta_update          = ixgbe_dev_rss_reta_update,
644         .reta_query           = ixgbe_dev_rss_reta_query,
645         .rss_hash_update      = ixgbe_dev_rss_hash_update,
646         .rss_hash_conf_get    = ixgbe_dev_rss_hash_conf_get,
647 };
648
649 /* store statistics names and its offset in stats structure */
650 struct rte_ixgbe_xstats_name_off {
651         char name[RTE_ETH_XSTATS_NAME_SIZE];
652         unsigned offset;
653 };
654
655 static const struct rte_ixgbe_xstats_name_off rte_ixgbe_stats_strings[] = {
656         {"rx_crc_errors", offsetof(struct ixgbe_hw_stats, crcerrs)},
657         {"rx_illegal_byte_errors", offsetof(struct ixgbe_hw_stats, illerrc)},
658         {"rx_error_bytes", offsetof(struct ixgbe_hw_stats, errbc)},
659         {"mac_local_errors", offsetof(struct ixgbe_hw_stats, mlfc)},
660         {"mac_remote_errors", offsetof(struct ixgbe_hw_stats, mrfc)},
661         {"rx_length_errors", offsetof(struct ixgbe_hw_stats, rlec)},
662         {"tx_xon_packets", offsetof(struct ixgbe_hw_stats, lxontxc)},
663         {"rx_xon_packets", offsetof(struct ixgbe_hw_stats, lxonrxc)},
664         {"tx_xoff_packets", offsetof(struct ixgbe_hw_stats, lxofftxc)},
665         {"rx_xoff_packets", offsetof(struct ixgbe_hw_stats, lxoffrxc)},
666         {"rx_size_64_packets", offsetof(struct ixgbe_hw_stats, prc64)},
667         {"rx_size_65_to_127_packets", offsetof(struct ixgbe_hw_stats, prc127)},
668         {"rx_size_128_to_255_packets", offsetof(struct ixgbe_hw_stats, prc255)},
669         {"rx_size_256_to_511_packets", offsetof(struct ixgbe_hw_stats, prc511)},
670         {"rx_size_512_to_1023_packets", offsetof(struct ixgbe_hw_stats,
671                 prc1023)},
672         {"rx_size_1024_to_max_packets", offsetof(struct ixgbe_hw_stats,
673                 prc1522)},
674         {"rx_broadcast_packets", offsetof(struct ixgbe_hw_stats, bprc)},
675         {"rx_multicast_packets", offsetof(struct ixgbe_hw_stats, mprc)},
676         {"rx_fragment_errors", offsetof(struct ixgbe_hw_stats, rfc)},
677         {"rx_undersize_errors", offsetof(struct ixgbe_hw_stats, ruc)},
678         {"rx_oversize_errors", offsetof(struct ixgbe_hw_stats, roc)},
679         {"rx_jabber_errors", offsetof(struct ixgbe_hw_stats, rjc)},
680         {"rx_management_packets", offsetof(struct ixgbe_hw_stats, mngprc)},
681         {"rx_management_dropped", offsetof(struct ixgbe_hw_stats, mngpdc)},
682         {"tx_management_packets", offsetof(struct ixgbe_hw_stats, mngptc)},
683         {"rx_total_packets", offsetof(struct ixgbe_hw_stats, tpr)},
684         {"rx_total_bytes", offsetof(struct ixgbe_hw_stats, tor)},
685         {"tx_total_packets", offsetof(struct ixgbe_hw_stats, tpt)},
686         {"tx_size_64_packets", offsetof(struct ixgbe_hw_stats, ptc64)},
687         {"tx_size_65_to_127_packets", offsetof(struct ixgbe_hw_stats, ptc127)},
688         {"tx_size_128_to_255_packets", offsetof(struct ixgbe_hw_stats, ptc255)},
689         {"tx_size_256_to_511_packets", offsetof(struct ixgbe_hw_stats, ptc511)},
690         {"tx_size_512_to_1023_packets", offsetof(struct ixgbe_hw_stats,
691                 ptc1023)},
692         {"tx_size_1024_to_max_packets", offsetof(struct ixgbe_hw_stats,
693                 ptc1522)},
694         {"tx_multicast_packets", offsetof(struct ixgbe_hw_stats, mptc)},
695         {"tx_broadcast_packets", offsetof(struct ixgbe_hw_stats, bptc)},
696         {"rx_mac_short_packet_dropped", offsetof(struct ixgbe_hw_stats, mspdc)},
697         {"rx_l3_l4_xsum_error", offsetof(struct ixgbe_hw_stats, xec)},
698
699         {"flow_director_added_filters", offsetof(struct ixgbe_hw_stats,
700                 fdirustat_add)},
701         {"flow_director_removed_filters", offsetof(struct ixgbe_hw_stats,
702                 fdirustat_remove)},
703         {"flow_director_filter_add_errors", offsetof(struct ixgbe_hw_stats,
704                 fdirfstat_fadd)},
705         {"flow_director_filter_remove_errors", offsetof(struct ixgbe_hw_stats,
706                 fdirfstat_fremove)},
707         {"flow_director_matched_filters", offsetof(struct ixgbe_hw_stats,
708                 fdirmatch)},
709         {"flow_director_missed_filters", offsetof(struct ixgbe_hw_stats,
710                 fdirmiss)},
711
712         {"rx_fcoe_crc_errors", offsetof(struct ixgbe_hw_stats, fccrc)},
713         {"rx_fcoe_dropped", offsetof(struct ixgbe_hw_stats, fcoerpdc)},
714         {"rx_fcoe_mbuf_allocation_errors", offsetof(struct ixgbe_hw_stats,
715                 fclast)},
716         {"rx_fcoe_packets", offsetof(struct ixgbe_hw_stats, fcoeprc)},
717         {"tx_fcoe_packets", offsetof(struct ixgbe_hw_stats, fcoeptc)},
718         {"rx_fcoe_bytes", offsetof(struct ixgbe_hw_stats, fcoedwrc)},
719         {"tx_fcoe_bytes", offsetof(struct ixgbe_hw_stats, fcoedwtc)},
720         {"rx_fcoe_no_direct_data_placement", offsetof(struct ixgbe_hw_stats,
721                 fcoe_noddp)},
722         {"rx_fcoe_no_direct_data_placement_ext_buff",
723                 offsetof(struct ixgbe_hw_stats, fcoe_noddp_ext_buff)},
724
725         {"tx_flow_control_xon_packets", offsetof(struct ixgbe_hw_stats,
726                 lxontxc)},
727         {"rx_flow_control_xon_packets", offsetof(struct ixgbe_hw_stats,
728                 lxonrxc)},
729         {"tx_flow_control_xoff_packets", offsetof(struct ixgbe_hw_stats,
730                 lxofftxc)},
731         {"rx_flow_control_xoff_packets", offsetof(struct ixgbe_hw_stats,
732                 lxoffrxc)},
733         {"rx_total_missed_packets", offsetof(struct ixgbe_hw_stats, mpctotal)},
734 };
735
736 #define IXGBE_NB_HW_STATS (sizeof(rte_ixgbe_stats_strings) / \
737                            sizeof(rte_ixgbe_stats_strings[0]))
738
739 /* MACsec statistics */
740 static const struct rte_ixgbe_xstats_name_off rte_ixgbe_macsec_strings[] = {
741         {"out_pkts_untagged", offsetof(struct ixgbe_macsec_stats,
742                 out_pkts_untagged)},
743         {"out_pkts_encrypted", offsetof(struct ixgbe_macsec_stats,
744                 out_pkts_encrypted)},
745         {"out_pkts_protected", offsetof(struct ixgbe_macsec_stats,
746                 out_pkts_protected)},
747         {"out_octets_encrypted", offsetof(struct ixgbe_macsec_stats,
748                 out_octets_encrypted)},
749         {"out_octets_protected", offsetof(struct ixgbe_macsec_stats,
750                 out_octets_protected)},
751         {"in_pkts_untagged", offsetof(struct ixgbe_macsec_stats,
752                 in_pkts_untagged)},
753         {"in_pkts_badtag", offsetof(struct ixgbe_macsec_stats,
754                 in_pkts_badtag)},
755         {"in_pkts_nosci", offsetof(struct ixgbe_macsec_stats,
756                 in_pkts_nosci)},
757         {"in_pkts_unknownsci", offsetof(struct ixgbe_macsec_stats,
758                 in_pkts_unknownsci)},
759         {"in_octets_decrypted", offsetof(struct ixgbe_macsec_stats,
760                 in_octets_decrypted)},
761         {"in_octets_validated", offsetof(struct ixgbe_macsec_stats,
762                 in_octets_validated)},
763         {"in_pkts_unchecked", offsetof(struct ixgbe_macsec_stats,
764                 in_pkts_unchecked)},
765         {"in_pkts_delayed", offsetof(struct ixgbe_macsec_stats,
766                 in_pkts_delayed)},
767         {"in_pkts_late", offsetof(struct ixgbe_macsec_stats,
768                 in_pkts_late)},
769         {"in_pkts_ok", offsetof(struct ixgbe_macsec_stats,
770                 in_pkts_ok)},
771         {"in_pkts_invalid", offsetof(struct ixgbe_macsec_stats,
772                 in_pkts_invalid)},
773         {"in_pkts_notvalid", offsetof(struct ixgbe_macsec_stats,
774                 in_pkts_notvalid)},
775         {"in_pkts_unusedsa", offsetof(struct ixgbe_macsec_stats,
776                 in_pkts_unusedsa)},
777         {"in_pkts_notusingsa", offsetof(struct ixgbe_macsec_stats,
778                 in_pkts_notusingsa)},
779 };
780
781 #define IXGBE_NB_MACSEC_STATS (sizeof(rte_ixgbe_macsec_strings) / \
782                            sizeof(rte_ixgbe_macsec_strings[0]))
783
784 /* Per-queue statistics */
785 static const struct rte_ixgbe_xstats_name_off rte_ixgbe_rxq_strings[] = {
786         {"mbuf_allocation_errors", offsetof(struct ixgbe_hw_stats, rnbc)},
787         {"dropped", offsetof(struct ixgbe_hw_stats, mpc)},
788         {"xon_packets", offsetof(struct ixgbe_hw_stats, pxonrxc)},
789         {"xoff_packets", offsetof(struct ixgbe_hw_stats, pxoffrxc)},
790 };
791
792 #define IXGBE_NB_RXQ_PRIO_STATS (sizeof(rte_ixgbe_rxq_strings) / \
793                            sizeof(rte_ixgbe_rxq_strings[0]))
794 #define IXGBE_NB_RXQ_PRIO_VALUES 8
795
796 static const struct rte_ixgbe_xstats_name_off rte_ixgbe_txq_strings[] = {
797         {"xon_packets", offsetof(struct ixgbe_hw_stats, pxontxc)},
798         {"xoff_packets", offsetof(struct ixgbe_hw_stats, pxofftxc)},
799         {"xon_to_xoff_packets", offsetof(struct ixgbe_hw_stats,
800                 pxon2offc)},
801 };
802
803 #define IXGBE_NB_TXQ_PRIO_STATS (sizeof(rte_ixgbe_txq_strings) / \
804                            sizeof(rte_ixgbe_txq_strings[0]))
805 #define IXGBE_NB_TXQ_PRIO_VALUES 8
806
807 static const struct rte_ixgbe_xstats_name_off rte_ixgbevf_stats_strings[] = {
808         {"rx_multicast_packets", offsetof(struct ixgbevf_hw_stats, vfmprc)},
809 };
810
811 #define IXGBEVF_NB_XSTATS (sizeof(rte_ixgbevf_stats_strings) /  \
812                 sizeof(rte_ixgbevf_stats_strings[0]))
813
814 /**
815  * Atomically reads the link status information from global
816  * structure rte_eth_dev.
817  *
818  * @param dev
819  *   - Pointer to the structure rte_eth_dev to read from.
820  *   - Pointer to the buffer to be saved with the link status.
821  *
822  * @return
823  *   - On success, zero.
824  *   - On failure, negative value.
825  */
826 static inline int
827 rte_ixgbe_dev_atomic_read_link_status(struct rte_eth_dev *dev,
828                                 struct rte_eth_link *link)
829 {
830         struct rte_eth_link *dst = link;
831         struct rte_eth_link *src = &(dev->data->dev_link);
832
833         if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
834                                         *(uint64_t *)src) == 0)
835                 return -1;
836
837         return 0;
838 }
839
840 /**
841  * Atomically writes the link status information into global
842  * structure rte_eth_dev.
843  *
844  * @param dev
845  *   - Pointer to the structure rte_eth_dev to read from.
846  *   - Pointer to the buffer to be saved with the link status.
847  *
848  * @return
849  *   - On success, zero.
850  *   - On failure, negative value.
851  */
852 static inline int
853 rte_ixgbe_dev_atomic_write_link_status(struct rte_eth_dev *dev,
854                                 struct rte_eth_link *link)
855 {
856         struct rte_eth_link *dst = &(dev->data->dev_link);
857         struct rte_eth_link *src = link;
858
859         if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
860                                         *(uint64_t *)src) == 0)
861                 return -1;
862
863         return 0;
864 }
865
866 /*
867  * This function is the same as ixgbe_is_sfp() in base/ixgbe.h.
868  */
869 static inline int
870 ixgbe_is_sfp(struct ixgbe_hw *hw)
871 {
872         switch (hw->phy.type) {
873         case ixgbe_phy_sfp_avago:
874         case ixgbe_phy_sfp_ftl:
875         case ixgbe_phy_sfp_intel:
876         case ixgbe_phy_sfp_unknown:
877         case ixgbe_phy_sfp_passive_tyco:
878         case ixgbe_phy_sfp_passive_unknown:
879                 return 1;
880         default:
881                 return 0;
882         }
883 }
884
885 static inline int32_t
886 ixgbe_pf_reset_hw(struct ixgbe_hw *hw)
887 {
888         uint32_t ctrl_ext;
889         int32_t status;
890
891         status = ixgbe_reset_hw(hw);
892
893         ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
894         /* Set PF Reset Done bit so PF/VF Mail Ops can work */
895         ctrl_ext |= IXGBE_CTRL_EXT_PFRSTD;
896         IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
897         IXGBE_WRITE_FLUSH(hw);
898
899         if (status == IXGBE_ERR_SFP_NOT_PRESENT)
900                 status = IXGBE_SUCCESS;
901         return status;
902 }
903
904 static inline void
905 ixgbe_enable_intr(struct rte_eth_dev *dev)
906 {
907         struct ixgbe_interrupt *intr =
908                 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
909         struct ixgbe_hw *hw =
910                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
911
912         IXGBE_WRITE_REG(hw, IXGBE_EIMS, intr->mask);
913         IXGBE_WRITE_FLUSH(hw);
914 }
915
916 /*
917  * This function is based on ixgbe_disable_intr() in base/ixgbe.h.
918  */
919 static void
920 ixgbe_disable_intr(struct ixgbe_hw *hw)
921 {
922         PMD_INIT_FUNC_TRACE();
923
924         if (hw->mac.type == ixgbe_mac_82598EB) {
925                 IXGBE_WRITE_REG(hw, IXGBE_EIMC, ~0);
926         } else {
927                 IXGBE_WRITE_REG(hw, IXGBE_EIMC, 0xFFFF0000);
928                 IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(0), ~0);
929                 IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(1), ~0);
930         }
931         IXGBE_WRITE_FLUSH(hw);
932 }
933
934 /*
935  * This function resets queue statistics mapping registers.
936  * From Niantic datasheet, Initialization of Statistics section:
937  * "...if software requires the queue counters, the RQSMR and TQSM registers
938  * must be re-programmed following a device reset.
939  */
940 static void
941 ixgbe_reset_qstat_mappings(struct ixgbe_hw *hw)
942 {
943         uint32_t i;
944
945         for (i = 0; i != IXGBE_NB_STAT_MAPPING_REGS; i++) {
946                 IXGBE_WRITE_REG(hw, IXGBE_RQSMR(i), 0);
947                 IXGBE_WRITE_REG(hw, IXGBE_TQSM(i), 0);
948         }
949 }
950
951
952 static int
953 ixgbe_dev_queue_stats_mapping_set(struct rte_eth_dev *eth_dev,
954                                   uint16_t queue_id,
955                                   uint8_t stat_idx,
956                                   uint8_t is_rx)
957 {
958 #define QSM_REG_NB_BITS_PER_QMAP_FIELD 8
959 #define NB_QMAP_FIELDS_PER_QSM_REG 4
960 #define QMAP_FIELD_RESERVED_BITS_MASK 0x0f
961
962         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
963         struct ixgbe_stat_mapping_registers *stat_mappings =
964                 IXGBE_DEV_PRIVATE_TO_STAT_MAPPINGS(eth_dev->data->dev_private);
965         uint32_t qsmr_mask = 0;
966         uint32_t clearing_mask = QMAP_FIELD_RESERVED_BITS_MASK;
967         uint32_t q_map;
968         uint8_t n, offset;
969
970         if ((hw->mac.type != ixgbe_mac_82599EB) &&
971                 (hw->mac.type != ixgbe_mac_X540) &&
972                 (hw->mac.type != ixgbe_mac_X550) &&
973                 (hw->mac.type != ixgbe_mac_X550EM_x) &&
974                 (hw->mac.type != ixgbe_mac_X550EM_a))
975                 return -ENOSYS;
976
977         PMD_INIT_LOG(DEBUG, "Setting port %d, %s queue_id %d to stat index %d",
978                      (int)(eth_dev->data->port_id), is_rx ? "RX" : "TX",
979                      queue_id, stat_idx);
980
981         n = (uint8_t)(queue_id / NB_QMAP_FIELDS_PER_QSM_REG);
982         if (n >= IXGBE_NB_STAT_MAPPING_REGS) {
983                 PMD_INIT_LOG(ERR, "Nb of stat mapping registers exceeded");
984                 return -EIO;
985         }
986         offset = (uint8_t)(queue_id % NB_QMAP_FIELDS_PER_QSM_REG);
987
988         /* Now clear any previous stat_idx set */
989         clearing_mask <<= (QSM_REG_NB_BITS_PER_QMAP_FIELD * offset);
990         if (!is_rx)
991                 stat_mappings->tqsm[n] &= ~clearing_mask;
992         else
993                 stat_mappings->rqsmr[n] &= ~clearing_mask;
994
995         q_map = (uint32_t)stat_idx;
996         q_map &= QMAP_FIELD_RESERVED_BITS_MASK;
997         qsmr_mask = q_map << (QSM_REG_NB_BITS_PER_QMAP_FIELD * offset);
998         if (!is_rx)
999                 stat_mappings->tqsm[n] |= qsmr_mask;
1000         else
1001                 stat_mappings->rqsmr[n] |= qsmr_mask;
1002
1003         PMD_INIT_LOG(DEBUG, "Set port %d, %s queue_id %d to stat index %d",
1004                      (int)(eth_dev->data->port_id), is_rx ? "RX" : "TX",
1005                      queue_id, stat_idx);
1006         PMD_INIT_LOG(DEBUG, "%s[%d] = 0x%08x", is_rx ? "RQSMR" : "TQSM", n,
1007                      is_rx ? stat_mappings->rqsmr[n] : stat_mappings->tqsm[n]);
1008
1009         /* Now write the mapping in the appropriate register */
1010         if (is_rx) {
1011                 PMD_INIT_LOG(DEBUG, "Write 0x%x to RX IXGBE stat mapping reg:%d",
1012                              stat_mappings->rqsmr[n], n);
1013                 IXGBE_WRITE_REG(hw, IXGBE_RQSMR(n), stat_mappings->rqsmr[n]);
1014         } else {
1015                 PMD_INIT_LOG(DEBUG, "Write 0x%x to TX IXGBE stat mapping reg:%d",
1016                              stat_mappings->tqsm[n], n);
1017                 IXGBE_WRITE_REG(hw, IXGBE_TQSM(n), stat_mappings->tqsm[n]);
1018         }
1019         return 0;
1020 }
1021
1022 static void
1023 ixgbe_restore_statistics_mapping(struct rte_eth_dev *dev)
1024 {
1025         struct ixgbe_stat_mapping_registers *stat_mappings =
1026                 IXGBE_DEV_PRIVATE_TO_STAT_MAPPINGS(dev->data->dev_private);
1027         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1028         int i;
1029
1030         /* write whatever was in stat mapping table to the NIC */
1031         for (i = 0; i < IXGBE_NB_STAT_MAPPING_REGS; i++) {
1032                 /* rx */
1033                 IXGBE_WRITE_REG(hw, IXGBE_RQSMR(i), stat_mappings->rqsmr[i]);
1034
1035                 /* tx */
1036                 IXGBE_WRITE_REG(hw, IXGBE_TQSM(i), stat_mappings->tqsm[i]);
1037         }
1038 }
1039
1040 static void
1041 ixgbe_dcb_init(struct ixgbe_hw *hw, struct ixgbe_dcb_config *dcb_config)
1042 {
1043         uint8_t i;
1044         struct ixgbe_dcb_tc_config *tc;
1045         uint8_t dcb_max_tc = IXGBE_DCB_MAX_TRAFFIC_CLASS;
1046
1047         dcb_config->num_tcs.pg_tcs = dcb_max_tc;
1048         dcb_config->num_tcs.pfc_tcs = dcb_max_tc;
1049         for (i = 0; i < dcb_max_tc; i++) {
1050                 tc = &dcb_config->tc_config[i];
1051                 tc->path[IXGBE_DCB_TX_CONFIG].bwg_id = i;
1052                 tc->path[IXGBE_DCB_TX_CONFIG].bwg_percent =
1053                                  (uint8_t)(100/dcb_max_tc + (i & 1));
1054                 tc->path[IXGBE_DCB_RX_CONFIG].bwg_id = i;
1055                 tc->path[IXGBE_DCB_RX_CONFIG].bwg_percent =
1056                                  (uint8_t)(100/dcb_max_tc + (i & 1));
1057                 tc->pfc = ixgbe_dcb_pfc_disabled;
1058         }
1059
1060         /* Initialize default user to priority mapping, UPx->TC0 */
1061         tc = &dcb_config->tc_config[0];
1062         tc->path[IXGBE_DCB_TX_CONFIG].up_to_tc_bitmap = 0xFF;
1063         tc->path[IXGBE_DCB_RX_CONFIG].up_to_tc_bitmap = 0xFF;
1064         for (i = 0; i < IXGBE_DCB_MAX_BW_GROUP; i++) {
1065                 dcb_config->bw_percentage[IXGBE_DCB_TX_CONFIG][i] = 100;
1066                 dcb_config->bw_percentage[IXGBE_DCB_RX_CONFIG][i] = 100;
1067         }
1068         dcb_config->rx_pba_cfg = ixgbe_dcb_pba_equal;
1069         dcb_config->pfc_mode_enable = false;
1070         dcb_config->vt_mode = true;
1071         dcb_config->round_robin_enable = false;
1072         /* support all DCB capabilities in 82599 */
1073         dcb_config->support.capabilities = 0xFF;
1074
1075         /*we only support 4 Tcs for X540, X550 */
1076         if (hw->mac.type == ixgbe_mac_X540 ||
1077                 hw->mac.type == ixgbe_mac_X550 ||
1078                 hw->mac.type == ixgbe_mac_X550EM_x ||
1079                 hw->mac.type == ixgbe_mac_X550EM_a) {
1080                 dcb_config->num_tcs.pg_tcs = 4;
1081                 dcb_config->num_tcs.pfc_tcs = 4;
1082         }
1083 }
1084
1085 /*
1086  * Ensure that all locks are released before first NVM or PHY access
1087  */
1088 static void
1089 ixgbe_swfw_lock_reset(struct ixgbe_hw *hw)
1090 {
1091         uint16_t mask;
1092
1093         /*
1094          * Phy lock should not fail in this early stage. If this is the case,
1095          * it is due to an improper exit of the application.
1096          * So force the release of the faulty lock. Release of common lock
1097          * is done automatically by swfw_sync function.
1098          */
1099         mask = IXGBE_GSSR_PHY0_SM << hw->bus.func;
1100         if (ixgbe_acquire_swfw_semaphore(hw, mask) < 0) {
1101                 PMD_DRV_LOG(DEBUG, "SWFW phy%d lock released", hw->bus.func);
1102         }
1103         ixgbe_release_swfw_semaphore(hw, mask);
1104
1105         /*
1106          * These ones are more tricky since they are common to all ports; but
1107          * swfw_sync retries last long enough (1s) to be almost sure that if
1108          * lock can not be taken it is due to an improper lock of the
1109          * semaphore.
1110          */
1111         mask = IXGBE_GSSR_EEP_SM | IXGBE_GSSR_MAC_CSR_SM | IXGBE_GSSR_SW_MNG_SM;
1112         if (ixgbe_acquire_swfw_semaphore(hw, mask) < 0) {
1113                 PMD_DRV_LOG(DEBUG, "SWFW common locks released");
1114         }
1115         ixgbe_release_swfw_semaphore(hw, mask);
1116 }
1117
1118 /*
1119  * This function is based on code in ixgbe_attach() in base/ixgbe.c.
1120  * It returns 0 on success.
1121  */
1122 static int
1123 eth_ixgbe_dev_init(struct rte_eth_dev *eth_dev)
1124 {
1125         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
1126         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
1127         struct ixgbe_hw *hw =
1128                 IXGBE_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
1129         struct ixgbe_vfta *shadow_vfta =
1130                 IXGBE_DEV_PRIVATE_TO_VFTA(eth_dev->data->dev_private);
1131         struct ixgbe_hwstrip *hwstrip =
1132                 IXGBE_DEV_PRIVATE_TO_HWSTRIP_BITMAP(eth_dev->data->dev_private);
1133         struct ixgbe_dcb_config *dcb_config =
1134                 IXGBE_DEV_PRIVATE_TO_DCB_CFG(eth_dev->data->dev_private);
1135         struct ixgbe_filter_info *filter_info =
1136                 IXGBE_DEV_PRIVATE_TO_FILTER_INFO(eth_dev->data->dev_private);
1137         struct ixgbe_bw_conf *bw_conf =
1138                 IXGBE_DEV_PRIVATE_TO_BW_CONF(eth_dev->data->dev_private);
1139         uint32_t ctrl_ext;
1140         uint16_t csum;
1141         int diag, i;
1142
1143         PMD_INIT_FUNC_TRACE();
1144
1145         eth_dev->dev_ops = &ixgbe_eth_dev_ops;
1146         eth_dev->rx_pkt_burst = &ixgbe_recv_pkts;
1147         eth_dev->tx_pkt_burst = &ixgbe_xmit_pkts;
1148         eth_dev->tx_pkt_prepare = &ixgbe_prep_pkts;
1149
1150         /*
1151          * For secondary processes, we don't initialise any further as primary
1152          * has already done this work. Only check we don't need a different
1153          * RX and TX function.
1154          */
1155         if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
1156                 struct ixgbe_tx_queue *txq;
1157                 /* TX queue function in primary, set by last queue initialized
1158                  * Tx queue may not initialized by primary process
1159                  */
1160                 if (eth_dev->data->tx_queues) {
1161                         txq = eth_dev->data->tx_queues[eth_dev->data->nb_tx_queues-1];
1162                         ixgbe_set_tx_function(eth_dev, txq);
1163                 } else {
1164                         /* Use default TX function if we get here */
1165                         PMD_INIT_LOG(NOTICE, "No TX queues configured yet. "
1166                                      "Using default TX function.");
1167                 }
1168
1169                 ixgbe_set_rx_function(eth_dev);
1170
1171                 return 0;
1172         }
1173
1174         rte_eth_copy_pci_info(eth_dev, pci_dev);
1175         eth_dev->data->dev_flags |= RTE_ETH_DEV_DETACHABLE;
1176
1177         /* Vendor and Device ID need to be set before init of shared code */
1178         hw->device_id = pci_dev->id.device_id;
1179         hw->vendor_id = pci_dev->id.vendor_id;
1180         hw->hw_addr = (void *)pci_dev->mem_resource[0].addr;
1181         hw->allow_unsupported_sfp = 1;
1182
1183         /* Initialize the shared code (base driver) */
1184 #ifdef RTE_LIBRTE_IXGBE_BYPASS
1185         diag = ixgbe_bypass_init_shared_code(hw);
1186 #else
1187         diag = ixgbe_init_shared_code(hw);
1188 #endif /* RTE_LIBRTE_IXGBE_BYPASS */
1189
1190         if (diag != IXGBE_SUCCESS) {
1191                 PMD_INIT_LOG(ERR, "Shared code init failed: %d", diag);
1192                 return -EIO;
1193         }
1194
1195         /* pick up the PCI bus settings for reporting later */
1196         ixgbe_get_bus_info(hw);
1197
1198         /* Unlock any pending hardware semaphore */
1199         ixgbe_swfw_lock_reset(hw);
1200
1201         /* Initialize DCB configuration*/
1202         memset(dcb_config, 0, sizeof(struct ixgbe_dcb_config));
1203         ixgbe_dcb_init(hw, dcb_config);
1204         /* Get Hardware Flow Control setting */
1205         hw->fc.requested_mode = ixgbe_fc_full;
1206         hw->fc.current_mode = ixgbe_fc_full;
1207         hw->fc.pause_time = IXGBE_FC_PAUSE;
1208         for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
1209                 hw->fc.low_water[i] = IXGBE_FC_LO;
1210                 hw->fc.high_water[i] = IXGBE_FC_HI;
1211         }
1212         hw->fc.send_xon = 1;
1213
1214         /* Make sure we have a good EEPROM before we read from it */
1215         diag = ixgbe_validate_eeprom_checksum(hw, &csum);
1216         if (diag != IXGBE_SUCCESS) {
1217                 PMD_INIT_LOG(ERR, "The EEPROM checksum is not valid: %d", diag);
1218                 return -EIO;
1219         }
1220
1221 #ifdef RTE_LIBRTE_IXGBE_BYPASS
1222         diag = ixgbe_bypass_init_hw(hw);
1223 #else
1224         diag = ixgbe_init_hw(hw);
1225 #endif /* RTE_LIBRTE_IXGBE_BYPASS */
1226
1227         /*
1228          * Devices with copper phys will fail to initialise if ixgbe_init_hw()
1229          * is called too soon after the kernel driver unbinding/binding occurs.
1230          * The failure occurs in ixgbe_identify_phy_generic() for all devices,
1231          * but for non-copper devies, ixgbe_identify_sfp_module_generic() is
1232          * also called. See ixgbe_identify_phy_82599(). The reason for the
1233          * failure is not known, and only occuts when virtualisation features
1234          * are disabled in the bios. A delay of 100ms  was found to be enough by
1235          * trial-and-error, and is doubled to be safe.
1236          */
1237         if (diag && (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_copper)) {
1238                 rte_delay_ms(200);
1239                 diag = ixgbe_init_hw(hw);
1240         }
1241
1242         if (diag == IXGBE_ERR_SFP_NOT_PRESENT)
1243                 diag = IXGBE_SUCCESS;
1244
1245         if (diag == IXGBE_ERR_EEPROM_VERSION) {
1246                 PMD_INIT_LOG(ERR, "This device is a pre-production adapter/"
1247                              "LOM.  Please be aware there may be issues associated "
1248                              "with your hardware.");
1249                 PMD_INIT_LOG(ERR, "If you are experiencing problems "
1250                              "please contact your Intel or hardware representative "
1251                              "who provided you with this hardware.");
1252         } else if (diag == IXGBE_ERR_SFP_NOT_SUPPORTED)
1253                 PMD_INIT_LOG(ERR, "Unsupported SFP+ Module");
1254         if (diag) {
1255                 PMD_INIT_LOG(ERR, "Hardware Initialization Failure: %d", diag);
1256                 return -EIO;
1257         }
1258
1259         /* Reset the hw statistics */
1260         ixgbe_dev_stats_reset(eth_dev);
1261
1262         /* disable interrupt */
1263         ixgbe_disable_intr(hw);
1264
1265         /* reset mappings for queue statistics hw counters*/
1266         ixgbe_reset_qstat_mappings(hw);
1267
1268         /* Allocate memory for storing MAC addresses */
1269         eth_dev->data->mac_addrs = rte_zmalloc("ixgbe", ETHER_ADDR_LEN *
1270                                                hw->mac.num_rar_entries, 0);
1271         if (eth_dev->data->mac_addrs == NULL) {
1272                 PMD_INIT_LOG(ERR,
1273                              "Failed to allocate %u bytes needed to store "
1274                              "MAC addresses",
1275                              ETHER_ADDR_LEN * hw->mac.num_rar_entries);
1276                 return -ENOMEM;
1277         }
1278         /* Copy the permanent MAC address */
1279         ether_addr_copy((struct ether_addr *) hw->mac.perm_addr,
1280                         &eth_dev->data->mac_addrs[0]);
1281
1282         /* Allocate memory for storing hash filter MAC addresses */
1283         eth_dev->data->hash_mac_addrs = rte_zmalloc("ixgbe", ETHER_ADDR_LEN *
1284                                                     IXGBE_VMDQ_NUM_UC_MAC, 0);
1285         if (eth_dev->data->hash_mac_addrs == NULL) {
1286                 PMD_INIT_LOG(ERR,
1287                              "Failed to allocate %d bytes needed to store MAC addresses",
1288                              ETHER_ADDR_LEN * IXGBE_VMDQ_NUM_UC_MAC);
1289                 return -ENOMEM;
1290         }
1291
1292         /* initialize the vfta */
1293         memset(shadow_vfta, 0, sizeof(*shadow_vfta));
1294
1295         /* initialize the hw strip bitmap*/
1296         memset(hwstrip, 0, sizeof(*hwstrip));
1297
1298         /* initialize PF if max_vfs not zero */
1299         ixgbe_pf_host_init(eth_dev);
1300
1301         ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
1302         /* let hardware know driver is loaded */
1303         ctrl_ext |= IXGBE_CTRL_EXT_DRV_LOAD;
1304         /* Set PF Reset Done bit so PF/VF Mail Ops can work */
1305         ctrl_ext |= IXGBE_CTRL_EXT_PFRSTD;
1306         IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
1307         IXGBE_WRITE_FLUSH(hw);
1308
1309         if (ixgbe_is_sfp(hw) && hw->phy.sfp_type != ixgbe_sfp_type_not_present)
1310                 PMD_INIT_LOG(DEBUG, "MAC: %d, PHY: %d, SFP+: %d",
1311                              (int) hw->mac.type, (int) hw->phy.type,
1312                              (int) hw->phy.sfp_type);
1313         else
1314                 PMD_INIT_LOG(DEBUG, "MAC: %d, PHY: %d",
1315                              (int) hw->mac.type, (int) hw->phy.type);
1316
1317         PMD_INIT_LOG(DEBUG, "port %d vendorID=0x%x deviceID=0x%x",
1318                      eth_dev->data->port_id, pci_dev->id.vendor_id,
1319                      pci_dev->id.device_id);
1320
1321         rte_intr_callback_register(intr_handle,
1322                                    ixgbe_dev_interrupt_handler, eth_dev);
1323
1324         /* enable uio/vfio intr/eventfd mapping */
1325         rte_intr_enable(intr_handle);
1326
1327         /* enable support intr */
1328         ixgbe_enable_intr(eth_dev);
1329
1330         /* initialize filter info */
1331         memset(filter_info, 0,
1332                sizeof(struct ixgbe_filter_info));
1333
1334         /* initialize 5tuple filter list */
1335         TAILQ_INIT(&filter_info->fivetuple_list);
1336
1337         /* initialize flow director filter list & hash */
1338         ixgbe_fdir_filter_init(eth_dev);
1339
1340         /* initialize l2 tunnel filter list & hash */
1341         ixgbe_l2_tn_filter_init(eth_dev);
1342
1343         TAILQ_INIT(&filter_ntuple_list);
1344         TAILQ_INIT(&filter_ethertype_list);
1345         TAILQ_INIT(&filter_syn_list);
1346         TAILQ_INIT(&filter_fdir_list);
1347         TAILQ_INIT(&filter_l2_tunnel_list);
1348         TAILQ_INIT(&ixgbe_flow_list);
1349
1350         /* initialize bandwidth configuration info */
1351         memset(bw_conf, 0, sizeof(struct ixgbe_bw_conf));
1352
1353         return 0;
1354 }
1355
1356 static int
1357 eth_ixgbe_dev_uninit(struct rte_eth_dev *eth_dev)
1358 {
1359         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
1360         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
1361         struct ixgbe_hw *hw;
1362
1363         PMD_INIT_FUNC_TRACE();
1364
1365         if (rte_eal_process_type() != RTE_PROC_PRIMARY)
1366                 return -EPERM;
1367
1368         hw = IXGBE_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
1369
1370         if (hw->adapter_stopped == 0)
1371                 ixgbe_dev_close(eth_dev);
1372
1373         eth_dev->dev_ops = NULL;
1374         eth_dev->rx_pkt_burst = NULL;
1375         eth_dev->tx_pkt_burst = NULL;
1376
1377         /* Unlock any pending hardware semaphore */
1378         ixgbe_swfw_lock_reset(hw);
1379
1380         /* disable uio intr before callback unregister */
1381         rte_intr_disable(intr_handle);
1382         rte_intr_callback_unregister(intr_handle,
1383                                      ixgbe_dev_interrupt_handler, eth_dev);
1384
1385         /* uninitialize PF if max_vfs not zero */
1386         ixgbe_pf_host_uninit(eth_dev);
1387
1388         rte_free(eth_dev->data->mac_addrs);
1389         eth_dev->data->mac_addrs = NULL;
1390
1391         rte_free(eth_dev->data->hash_mac_addrs);
1392         eth_dev->data->hash_mac_addrs = NULL;
1393
1394         /* remove all the fdir filters & hash */
1395         ixgbe_fdir_filter_uninit(eth_dev);
1396
1397         /* remove all the L2 tunnel filters & hash */
1398         ixgbe_l2_tn_filter_uninit(eth_dev);
1399
1400         /* Remove all ntuple filters of the device */
1401         ixgbe_ntuple_filter_uninit(eth_dev);
1402
1403         /* clear all the filters list */
1404         ixgbe_filterlist_flush();
1405
1406         return 0;
1407 }
1408
1409 static int ixgbe_ntuple_filter_uninit(struct rte_eth_dev *eth_dev)
1410 {
1411         struct ixgbe_filter_info *filter_info =
1412                 IXGBE_DEV_PRIVATE_TO_FILTER_INFO(eth_dev->data->dev_private);
1413         struct ixgbe_5tuple_filter *p_5tuple;
1414
1415         while ((p_5tuple = TAILQ_FIRST(&filter_info->fivetuple_list))) {
1416                 TAILQ_REMOVE(&filter_info->fivetuple_list,
1417                              p_5tuple,
1418                              entries);
1419                 rte_free(p_5tuple);
1420         }
1421         memset(filter_info->fivetuple_mask, 0,
1422                sizeof(uint32_t) * IXGBE_5TUPLE_ARRAY_SIZE);
1423
1424         return 0;
1425 }
1426
1427 static int ixgbe_fdir_filter_uninit(struct rte_eth_dev *eth_dev)
1428 {
1429         struct ixgbe_hw_fdir_info *fdir_info =
1430                 IXGBE_DEV_PRIVATE_TO_FDIR_INFO(eth_dev->data->dev_private);
1431         struct ixgbe_fdir_filter *fdir_filter;
1432
1433                 if (fdir_info->hash_map)
1434                 rte_free(fdir_info->hash_map);
1435         if (fdir_info->hash_handle)
1436                 rte_hash_free(fdir_info->hash_handle);
1437
1438         while ((fdir_filter = TAILQ_FIRST(&fdir_info->fdir_list))) {
1439                 TAILQ_REMOVE(&fdir_info->fdir_list,
1440                              fdir_filter,
1441                              entries);
1442                 rte_free(fdir_filter);
1443         }
1444
1445         return 0;
1446 }
1447
1448 static int ixgbe_l2_tn_filter_uninit(struct rte_eth_dev *eth_dev)
1449 {
1450         struct ixgbe_l2_tn_info *l2_tn_info =
1451                 IXGBE_DEV_PRIVATE_TO_L2_TN_INFO(eth_dev->data->dev_private);
1452         struct ixgbe_l2_tn_filter *l2_tn_filter;
1453
1454         if (l2_tn_info->hash_map)
1455                 rte_free(l2_tn_info->hash_map);
1456         if (l2_tn_info->hash_handle)
1457                 rte_hash_free(l2_tn_info->hash_handle);
1458
1459         while ((l2_tn_filter = TAILQ_FIRST(&l2_tn_info->l2_tn_list))) {
1460                 TAILQ_REMOVE(&l2_tn_info->l2_tn_list,
1461                              l2_tn_filter,
1462                              entries);
1463                 rte_free(l2_tn_filter);
1464         }
1465
1466         return 0;
1467 }
1468
1469 static int ixgbe_fdir_filter_init(struct rte_eth_dev *eth_dev)
1470 {
1471         struct ixgbe_hw_fdir_info *fdir_info =
1472                 IXGBE_DEV_PRIVATE_TO_FDIR_INFO(eth_dev->data->dev_private);
1473         char fdir_hash_name[RTE_HASH_NAMESIZE];
1474         struct rte_hash_parameters fdir_hash_params = {
1475                 .name = fdir_hash_name,
1476                 .entries = IXGBE_MAX_FDIR_FILTER_NUM,
1477                 .key_len = sizeof(union ixgbe_atr_input),
1478                 .hash_func = rte_hash_crc,
1479                 .hash_func_init_val = 0,
1480                 .socket_id = rte_socket_id(),
1481         };
1482
1483         TAILQ_INIT(&fdir_info->fdir_list);
1484         snprintf(fdir_hash_name, RTE_HASH_NAMESIZE,
1485                  "fdir_%s", eth_dev->device->name);
1486         fdir_info->hash_handle = rte_hash_create(&fdir_hash_params);
1487         if (!fdir_info->hash_handle) {
1488                 PMD_INIT_LOG(ERR, "Failed to create fdir hash table!");
1489                 return -EINVAL;
1490         }
1491         fdir_info->hash_map = rte_zmalloc("ixgbe",
1492                                           sizeof(struct ixgbe_fdir_filter *) *
1493                                           IXGBE_MAX_FDIR_FILTER_NUM,
1494                                           0);
1495         if (!fdir_info->hash_map) {
1496                 PMD_INIT_LOG(ERR,
1497                              "Failed to allocate memory for fdir hash map!");
1498                 return -ENOMEM;
1499         }
1500         fdir_info->mask_added = FALSE;
1501
1502         return 0;
1503 }
1504
1505 static int ixgbe_l2_tn_filter_init(struct rte_eth_dev *eth_dev)
1506 {
1507         struct ixgbe_l2_tn_info *l2_tn_info =
1508                 IXGBE_DEV_PRIVATE_TO_L2_TN_INFO(eth_dev->data->dev_private);
1509         char l2_tn_hash_name[RTE_HASH_NAMESIZE];
1510         struct rte_hash_parameters l2_tn_hash_params = {
1511                 .name = l2_tn_hash_name,
1512                 .entries = IXGBE_MAX_L2_TN_FILTER_NUM,
1513                 .key_len = sizeof(struct ixgbe_l2_tn_key),
1514                 .hash_func = rte_hash_crc,
1515                 .hash_func_init_val = 0,
1516                 .socket_id = rte_socket_id(),
1517         };
1518
1519         TAILQ_INIT(&l2_tn_info->l2_tn_list);
1520         snprintf(l2_tn_hash_name, RTE_HASH_NAMESIZE,
1521                  "l2_tn_%s", eth_dev->device->name);
1522         l2_tn_info->hash_handle = rte_hash_create(&l2_tn_hash_params);
1523         if (!l2_tn_info->hash_handle) {
1524                 PMD_INIT_LOG(ERR, "Failed to create L2 TN hash table!");
1525                 return -EINVAL;
1526         }
1527         l2_tn_info->hash_map = rte_zmalloc("ixgbe",
1528                                    sizeof(struct ixgbe_l2_tn_filter *) *
1529                                    IXGBE_MAX_L2_TN_FILTER_NUM,
1530                                    0);
1531         if (!l2_tn_info->hash_map) {
1532                 PMD_INIT_LOG(ERR,
1533                         "Failed to allocate memory for L2 TN hash map!");
1534                 return -ENOMEM;
1535         }
1536         l2_tn_info->e_tag_en = FALSE;
1537         l2_tn_info->e_tag_fwd_en = FALSE;
1538         l2_tn_info->e_tag_ether_type = DEFAULT_ETAG_ETYPE;
1539
1540         return 0;
1541 }
1542 /*
1543  * Negotiate mailbox API version with the PF.
1544  * After reset API version is always set to the basic one (ixgbe_mbox_api_10).
1545  * Then we try to negotiate starting with the most recent one.
1546  * If all negotiation attempts fail, then we will proceed with
1547  * the default one (ixgbe_mbox_api_10).
1548  */
1549 static void
1550 ixgbevf_negotiate_api(struct ixgbe_hw *hw)
1551 {
1552         int32_t i;
1553
1554         /* start with highest supported, proceed down */
1555         static const enum ixgbe_pfvf_api_rev sup_ver[] = {
1556                 ixgbe_mbox_api_12,
1557                 ixgbe_mbox_api_11,
1558                 ixgbe_mbox_api_10,
1559         };
1560
1561         for (i = 0;
1562                         i != RTE_DIM(sup_ver) &&
1563                         ixgbevf_negotiate_api_version(hw, sup_ver[i]) != 0;
1564                         i++)
1565                 ;
1566 }
1567
1568 static void
1569 generate_random_mac_addr(struct ether_addr *mac_addr)
1570 {
1571         uint64_t random;
1572
1573         /* Set Organizationally Unique Identifier (OUI) prefix. */
1574         mac_addr->addr_bytes[0] = 0x00;
1575         mac_addr->addr_bytes[1] = 0x09;
1576         mac_addr->addr_bytes[2] = 0xC0;
1577         /* Force indication of locally assigned MAC address. */
1578         mac_addr->addr_bytes[0] |= ETHER_LOCAL_ADMIN_ADDR;
1579         /* Generate the last 3 bytes of the MAC address with a random number. */
1580         random = rte_rand();
1581         memcpy(&mac_addr->addr_bytes[3], &random, 3);
1582 }
1583
1584 /*
1585  * Virtual Function device init
1586  */
1587 static int
1588 eth_ixgbevf_dev_init(struct rte_eth_dev *eth_dev)
1589 {
1590         int diag;
1591         uint32_t tc, tcs;
1592         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
1593         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
1594         struct ixgbe_hw *hw =
1595                 IXGBE_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
1596         struct ixgbe_vfta *shadow_vfta =
1597                 IXGBE_DEV_PRIVATE_TO_VFTA(eth_dev->data->dev_private);
1598         struct ixgbe_hwstrip *hwstrip =
1599                 IXGBE_DEV_PRIVATE_TO_HWSTRIP_BITMAP(eth_dev->data->dev_private);
1600         struct ether_addr *perm_addr = (struct ether_addr *) hw->mac.perm_addr;
1601
1602         PMD_INIT_FUNC_TRACE();
1603
1604         eth_dev->dev_ops = &ixgbevf_eth_dev_ops;
1605         eth_dev->rx_pkt_burst = &ixgbe_recv_pkts;
1606         eth_dev->tx_pkt_burst = &ixgbe_xmit_pkts;
1607
1608         /* for secondary processes, we don't initialise any further as primary
1609          * has already done this work. Only check we don't need a different
1610          * RX function
1611          */
1612         if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
1613                 struct ixgbe_tx_queue *txq;
1614                 /* TX queue function in primary, set by last queue initialized
1615                  * Tx queue may not initialized by primary process
1616                  */
1617                 if (eth_dev->data->tx_queues) {
1618                         txq = eth_dev->data->tx_queues[eth_dev->data->nb_tx_queues - 1];
1619                         ixgbe_set_tx_function(eth_dev, txq);
1620                 } else {
1621                         /* Use default TX function if we get here */
1622                         PMD_INIT_LOG(NOTICE,
1623                                      "No TX queues configured yet. Using default TX function.");
1624                 }
1625
1626                 ixgbe_set_rx_function(eth_dev);
1627
1628                 return 0;
1629         }
1630
1631         rte_eth_copy_pci_info(eth_dev, pci_dev);
1632         eth_dev->data->dev_flags |= RTE_ETH_DEV_DETACHABLE;
1633
1634         hw->device_id = pci_dev->id.device_id;
1635         hw->vendor_id = pci_dev->id.vendor_id;
1636         hw->hw_addr = (void *)pci_dev->mem_resource[0].addr;
1637
1638         /* initialize the vfta */
1639         memset(shadow_vfta, 0, sizeof(*shadow_vfta));
1640
1641         /* initialize the hw strip bitmap*/
1642         memset(hwstrip, 0, sizeof(*hwstrip));
1643
1644         /* Initialize the shared code (base driver) */
1645         diag = ixgbe_init_shared_code(hw);
1646         if (diag != IXGBE_SUCCESS) {
1647                 PMD_INIT_LOG(ERR, "Shared code init failed for ixgbevf: %d", diag);
1648                 return -EIO;
1649         }
1650
1651         /* init_mailbox_params */
1652         hw->mbx.ops.init_params(hw);
1653
1654         /* Reset the hw statistics */
1655         ixgbevf_dev_stats_reset(eth_dev);
1656
1657         /* Disable the interrupts for VF */
1658         ixgbevf_intr_disable(hw);
1659
1660         hw->mac.num_rar_entries = 128; /* The MAX of the underlying PF */
1661         diag = hw->mac.ops.reset_hw(hw);
1662
1663         /*
1664          * The VF reset operation returns the IXGBE_ERR_INVALID_MAC_ADDR when
1665          * the underlying PF driver has not assigned a MAC address to the VF.
1666          * In this case, assign a random MAC address.
1667          */
1668         if ((diag != IXGBE_SUCCESS) && (diag != IXGBE_ERR_INVALID_MAC_ADDR)) {
1669                 PMD_INIT_LOG(ERR, "VF Initialization Failure: %d", diag);
1670                 return diag;
1671         }
1672
1673         /* negotiate mailbox API version to use with the PF. */
1674         ixgbevf_negotiate_api(hw);
1675
1676         /* Get Rx/Tx queue count via mailbox, which is ready after reset_hw */
1677         ixgbevf_get_queues(hw, &tcs, &tc);
1678
1679         /* Allocate memory for storing MAC addresses */
1680         eth_dev->data->mac_addrs = rte_zmalloc("ixgbevf", ETHER_ADDR_LEN *
1681                                                hw->mac.num_rar_entries, 0);
1682         if (eth_dev->data->mac_addrs == NULL) {
1683                 PMD_INIT_LOG(ERR,
1684                              "Failed to allocate %u bytes needed to store "
1685                              "MAC addresses",
1686                              ETHER_ADDR_LEN * hw->mac.num_rar_entries);
1687                 return -ENOMEM;
1688         }
1689
1690         /* Generate a random MAC address, if none was assigned by PF. */
1691         if (is_zero_ether_addr(perm_addr)) {
1692                 generate_random_mac_addr(perm_addr);
1693                 diag = ixgbe_set_rar_vf(hw, 1, perm_addr->addr_bytes, 0, 1);
1694                 if (diag) {
1695                         rte_free(eth_dev->data->mac_addrs);
1696                         eth_dev->data->mac_addrs = NULL;
1697                         return diag;
1698                 }
1699                 PMD_INIT_LOG(INFO, "\tVF MAC address not assigned by Host PF");
1700                 PMD_INIT_LOG(INFO, "\tAssign randomly generated MAC address "
1701                              "%02x:%02x:%02x:%02x:%02x:%02x",
1702                              perm_addr->addr_bytes[0],
1703                              perm_addr->addr_bytes[1],
1704                              perm_addr->addr_bytes[2],
1705                              perm_addr->addr_bytes[3],
1706                              perm_addr->addr_bytes[4],
1707                              perm_addr->addr_bytes[5]);
1708         }
1709
1710         /* Copy the permanent MAC address */
1711         ether_addr_copy(perm_addr, &eth_dev->data->mac_addrs[0]);
1712
1713         /* reset the hardware with the new settings */
1714         diag = hw->mac.ops.start_hw(hw);
1715         switch (diag) {
1716         case  0:
1717                 break;
1718
1719         default:
1720                 PMD_INIT_LOG(ERR, "VF Initialization Failure: %d", diag);
1721                 return -EIO;
1722         }
1723
1724         rte_intr_callback_register(intr_handle,
1725                                    ixgbevf_dev_interrupt_handler, eth_dev);
1726         rte_intr_enable(intr_handle);
1727         ixgbevf_intr_enable(hw);
1728
1729         PMD_INIT_LOG(DEBUG, "port %d vendorID=0x%x deviceID=0x%x mac.type=%s",
1730                      eth_dev->data->port_id, pci_dev->id.vendor_id,
1731                      pci_dev->id.device_id, "ixgbe_mac_82599_vf");
1732
1733         return 0;
1734 }
1735
1736 /* Virtual Function device uninit */
1737
1738 static int
1739 eth_ixgbevf_dev_uninit(struct rte_eth_dev *eth_dev)
1740 {
1741         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
1742         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
1743         struct ixgbe_hw *hw;
1744
1745         PMD_INIT_FUNC_TRACE();
1746
1747         if (rte_eal_process_type() != RTE_PROC_PRIMARY)
1748                 return -EPERM;
1749
1750         hw = IXGBE_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
1751
1752         if (hw->adapter_stopped == 0)
1753                 ixgbevf_dev_close(eth_dev);
1754
1755         eth_dev->dev_ops = NULL;
1756         eth_dev->rx_pkt_burst = NULL;
1757         eth_dev->tx_pkt_burst = NULL;
1758
1759         /* Disable the interrupts for VF */
1760         ixgbevf_intr_disable(hw);
1761
1762         rte_free(eth_dev->data->mac_addrs);
1763         eth_dev->data->mac_addrs = NULL;
1764
1765         rte_intr_disable(intr_handle);
1766         rte_intr_callback_unregister(intr_handle,
1767                                      ixgbevf_dev_interrupt_handler, eth_dev);
1768
1769         return 0;
1770 }
1771
1772 static int eth_ixgbe_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
1773         struct rte_pci_device *pci_dev)
1774 {
1775         return rte_eth_dev_pci_generic_probe(pci_dev,
1776                 sizeof(struct ixgbe_adapter), eth_ixgbe_dev_init);
1777 }
1778
1779 static int eth_ixgbe_pci_remove(struct rte_pci_device *pci_dev)
1780 {
1781         return rte_eth_dev_pci_generic_remove(pci_dev, eth_ixgbe_dev_uninit);
1782 }
1783
1784 static struct rte_pci_driver rte_ixgbe_pmd = {
1785         .id_table = pci_id_ixgbe_map,
1786         .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC,
1787         .probe = eth_ixgbe_pci_probe,
1788         .remove = eth_ixgbe_pci_remove,
1789 };
1790
1791 static int eth_ixgbevf_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
1792         struct rte_pci_device *pci_dev)
1793 {
1794         return rte_eth_dev_pci_generic_probe(pci_dev,
1795                 sizeof(struct ixgbe_adapter), eth_ixgbevf_dev_init);
1796 }
1797
1798 static int eth_ixgbevf_pci_remove(struct rte_pci_device *pci_dev)
1799 {
1800         return rte_eth_dev_pci_generic_remove(pci_dev, eth_ixgbevf_dev_uninit);
1801 }
1802
1803 /*
1804  * virtual function driver struct
1805  */
1806 static struct rte_pci_driver rte_ixgbevf_pmd = {
1807         .id_table = pci_id_ixgbevf_map,
1808         .drv_flags = RTE_PCI_DRV_NEED_MAPPING,
1809         .probe = eth_ixgbevf_pci_probe,
1810         .remove = eth_ixgbevf_pci_remove,
1811 };
1812
1813 static int
1814 ixgbe_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
1815 {
1816         struct ixgbe_hw *hw =
1817                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1818         struct ixgbe_vfta *shadow_vfta =
1819                 IXGBE_DEV_PRIVATE_TO_VFTA(dev->data->dev_private);
1820         uint32_t vfta;
1821         uint32_t vid_idx;
1822         uint32_t vid_bit;
1823
1824         vid_idx = (uint32_t) ((vlan_id >> 5) & 0x7F);
1825         vid_bit = (uint32_t) (1 << (vlan_id & 0x1F));
1826         vfta = IXGBE_READ_REG(hw, IXGBE_VFTA(vid_idx));
1827         if (on)
1828                 vfta |= vid_bit;
1829         else
1830                 vfta &= ~vid_bit;
1831         IXGBE_WRITE_REG(hw, IXGBE_VFTA(vid_idx), vfta);
1832
1833         /* update local VFTA copy */
1834         shadow_vfta->vfta[vid_idx] = vfta;
1835
1836         return 0;
1837 }
1838
1839 static void
1840 ixgbe_vlan_strip_queue_set(struct rte_eth_dev *dev, uint16_t queue, int on)
1841 {
1842         if (on)
1843                 ixgbe_vlan_hw_strip_enable(dev, queue);
1844         else
1845                 ixgbe_vlan_hw_strip_disable(dev, queue);
1846 }
1847
1848 static int
1849 ixgbe_vlan_tpid_set(struct rte_eth_dev *dev,
1850                     enum rte_vlan_type vlan_type,
1851                     uint16_t tpid)
1852 {
1853         struct ixgbe_hw *hw =
1854                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1855         int ret = 0;
1856         uint32_t reg;
1857         uint32_t qinq;
1858
1859         qinq = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
1860         qinq &= IXGBE_DMATXCTL_GDV;
1861
1862         switch (vlan_type) {
1863         case ETH_VLAN_TYPE_INNER:
1864                 if (qinq) {
1865                         reg = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
1866                         reg = (reg & (~IXGBE_VLNCTRL_VET)) | (uint32_t)tpid;
1867                         IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, reg);
1868                         reg = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
1869                         reg = (reg & (~IXGBE_DMATXCTL_VT_MASK))
1870                                 | ((uint32_t)tpid << IXGBE_DMATXCTL_VT_SHIFT);
1871                         IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, reg);
1872                 } else {
1873                         ret = -ENOTSUP;
1874                         PMD_DRV_LOG(ERR, "Inner type is not supported"
1875                                     " by single VLAN");
1876                 }
1877                 break;
1878         case ETH_VLAN_TYPE_OUTER:
1879                 if (qinq) {
1880                         /* Only the high 16-bits is valid */
1881                         IXGBE_WRITE_REG(hw, IXGBE_EXVET, (uint32_t)tpid <<
1882                                         IXGBE_EXVET_VET_EXT_SHIFT);
1883                 } else {
1884                         reg = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
1885                         reg = (reg & (~IXGBE_VLNCTRL_VET)) | (uint32_t)tpid;
1886                         IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, reg);
1887                         reg = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
1888                         reg = (reg & (~IXGBE_DMATXCTL_VT_MASK))
1889                                 | ((uint32_t)tpid << IXGBE_DMATXCTL_VT_SHIFT);
1890                         IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, reg);
1891                 }
1892
1893                 break;
1894         default:
1895                 ret = -EINVAL;
1896                 PMD_DRV_LOG(ERR, "Unsupported VLAN type %d", vlan_type);
1897                 break;
1898         }
1899
1900         return ret;
1901 }
1902
1903 void
1904 ixgbe_vlan_hw_filter_disable(struct rte_eth_dev *dev)
1905 {
1906         struct ixgbe_hw *hw =
1907                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1908         uint32_t vlnctrl;
1909
1910         PMD_INIT_FUNC_TRACE();
1911
1912         /* Filter Table Disable */
1913         vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
1914         vlnctrl &= ~IXGBE_VLNCTRL_VFE;
1915
1916         IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
1917 }
1918
1919 void
1920 ixgbe_vlan_hw_filter_enable(struct rte_eth_dev *dev)
1921 {
1922         struct ixgbe_hw *hw =
1923                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1924         struct ixgbe_vfta *shadow_vfta =
1925                 IXGBE_DEV_PRIVATE_TO_VFTA(dev->data->dev_private);
1926         uint32_t vlnctrl;
1927         uint16_t i;
1928
1929         PMD_INIT_FUNC_TRACE();
1930
1931         /* Filter Table Enable */
1932         vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
1933         vlnctrl &= ~IXGBE_VLNCTRL_CFIEN;
1934         vlnctrl |= IXGBE_VLNCTRL_VFE;
1935
1936         IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
1937
1938         /* write whatever is in local vfta copy */
1939         for (i = 0; i < IXGBE_VFTA_SIZE; i++)
1940                 IXGBE_WRITE_REG(hw, IXGBE_VFTA(i), shadow_vfta->vfta[i]);
1941 }
1942
1943 static void
1944 ixgbe_vlan_hw_strip_bitmap_set(struct rte_eth_dev *dev, uint16_t queue, bool on)
1945 {
1946         struct ixgbe_hwstrip *hwstrip =
1947                 IXGBE_DEV_PRIVATE_TO_HWSTRIP_BITMAP(dev->data->dev_private);
1948         struct ixgbe_rx_queue *rxq;
1949
1950         if (queue >= IXGBE_MAX_RX_QUEUE_NUM)
1951                 return;
1952
1953         if (on)
1954                 IXGBE_SET_HWSTRIP(hwstrip, queue);
1955         else
1956                 IXGBE_CLEAR_HWSTRIP(hwstrip, queue);
1957
1958         if (queue >= dev->data->nb_rx_queues)
1959                 return;
1960
1961         rxq = dev->data->rx_queues[queue];
1962
1963         if (on)
1964                 rxq->vlan_flags = PKT_RX_VLAN_PKT | PKT_RX_VLAN_STRIPPED;
1965         else
1966                 rxq->vlan_flags = PKT_RX_VLAN_PKT;
1967 }
1968
1969 static void
1970 ixgbe_vlan_hw_strip_disable(struct rte_eth_dev *dev, uint16_t queue)
1971 {
1972         struct ixgbe_hw *hw =
1973                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1974         uint32_t ctrl;
1975
1976         PMD_INIT_FUNC_TRACE();
1977
1978         if (hw->mac.type == ixgbe_mac_82598EB) {
1979                 /* No queue level support */
1980                 PMD_INIT_LOG(NOTICE, "82598EB not support queue level hw strip");
1981                 return;
1982         }
1983
1984         /* Other 10G NIC, the VLAN strip can be setup per queue in RXDCTL */
1985         ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(queue));
1986         ctrl &= ~IXGBE_RXDCTL_VME;
1987         IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(queue), ctrl);
1988
1989         /* record those setting for HW strip per queue */
1990         ixgbe_vlan_hw_strip_bitmap_set(dev, queue, 0);
1991 }
1992
1993 static void
1994 ixgbe_vlan_hw_strip_enable(struct rte_eth_dev *dev, uint16_t queue)
1995 {
1996         struct ixgbe_hw *hw =
1997                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1998         uint32_t ctrl;
1999
2000         PMD_INIT_FUNC_TRACE();
2001
2002         if (hw->mac.type == ixgbe_mac_82598EB) {
2003                 /* No queue level supported */
2004                 PMD_INIT_LOG(NOTICE, "82598EB not support queue level hw strip");
2005                 return;
2006         }
2007
2008         /* Other 10G NIC, the VLAN strip can be setup per queue in RXDCTL */
2009         ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(queue));
2010         ctrl |= IXGBE_RXDCTL_VME;
2011         IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(queue), ctrl);
2012
2013         /* record those setting for HW strip per queue */
2014         ixgbe_vlan_hw_strip_bitmap_set(dev, queue, 1);
2015 }
2016
2017 void
2018 ixgbe_vlan_hw_strip_disable_all(struct rte_eth_dev *dev)
2019 {
2020         struct ixgbe_hw *hw =
2021                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2022         uint32_t ctrl;
2023         uint16_t i;
2024         struct ixgbe_rx_queue *rxq;
2025
2026         PMD_INIT_FUNC_TRACE();
2027
2028         if (hw->mac.type == ixgbe_mac_82598EB) {
2029                 ctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
2030                 ctrl &= ~IXGBE_VLNCTRL_VME;
2031                 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, ctrl);
2032         } else {
2033                 /* Other 10G NIC, the VLAN strip can be setup per queue in RXDCTL */
2034                 for (i = 0; i < dev->data->nb_rx_queues; i++) {
2035                         rxq = dev->data->rx_queues[i];
2036                         ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxq->reg_idx));
2037                         ctrl &= ~IXGBE_RXDCTL_VME;
2038                         IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxq->reg_idx), ctrl);
2039
2040                         /* record those setting for HW strip per queue */
2041                         ixgbe_vlan_hw_strip_bitmap_set(dev, i, 0);
2042                 }
2043         }
2044 }
2045
2046 void
2047 ixgbe_vlan_hw_strip_enable_all(struct rte_eth_dev *dev)
2048 {
2049         struct ixgbe_hw *hw =
2050                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2051         uint32_t ctrl;
2052         uint16_t i;
2053         struct ixgbe_rx_queue *rxq;
2054
2055         PMD_INIT_FUNC_TRACE();
2056
2057         if (hw->mac.type == ixgbe_mac_82598EB) {
2058                 ctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
2059                 ctrl |= IXGBE_VLNCTRL_VME;
2060                 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, ctrl);
2061         } else {
2062                 /* Other 10G NIC, the VLAN strip can be setup per queue in RXDCTL */
2063                 for (i = 0; i < dev->data->nb_rx_queues; i++) {
2064                         rxq = dev->data->rx_queues[i];
2065                         ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxq->reg_idx));
2066                         ctrl |= IXGBE_RXDCTL_VME;
2067                         IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxq->reg_idx), ctrl);
2068
2069                         /* record those setting for HW strip per queue */
2070                         ixgbe_vlan_hw_strip_bitmap_set(dev, i, 1);
2071                 }
2072         }
2073 }
2074
2075 static void
2076 ixgbe_vlan_hw_extend_disable(struct rte_eth_dev *dev)
2077 {
2078         struct ixgbe_hw *hw =
2079                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2080         uint32_t ctrl;
2081
2082         PMD_INIT_FUNC_TRACE();
2083
2084         /* DMATXCTRL: Geric Double VLAN Disable */
2085         ctrl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
2086         ctrl &= ~IXGBE_DMATXCTL_GDV;
2087         IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, ctrl);
2088
2089         /* CTRL_EXT: Global Double VLAN Disable */
2090         ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
2091         ctrl &= ~IXGBE_EXTENDED_VLAN;
2092         IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl);
2093
2094 }
2095
2096 static void
2097 ixgbe_vlan_hw_extend_enable(struct rte_eth_dev *dev)
2098 {
2099         struct ixgbe_hw *hw =
2100                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2101         uint32_t ctrl;
2102
2103         PMD_INIT_FUNC_TRACE();
2104
2105         /* DMATXCTRL: Geric Double VLAN Enable */
2106         ctrl  = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
2107         ctrl |= IXGBE_DMATXCTL_GDV;
2108         IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, ctrl);
2109
2110         /* CTRL_EXT: Global Double VLAN Enable */
2111         ctrl  = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
2112         ctrl |= IXGBE_EXTENDED_VLAN;
2113         IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl);
2114
2115         /* Clear pooling mode of PFVTCTL. It's required by X550. */
2116         if (hw->mac.type == ixgbe_mac_X550 ||
2117             hw->mac.type == ixgbe_mac_X550EM_x ||
2118             hw->mac.type == ixgbe_mac_X550EM_a) {
2119                 ctrl = IXGBE_READ_REG(hw, IXGBE_VT_CTL);
2120                 ctrl &= ~IXGBE_VT_CTL_POOLING_MODE_MASK;
2121                 IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, ctrl);
2122         }
2123
2124         /*
2125          * VET EXT field in the EXVET register = 0x8100 by default
2126          * So no need to change. Same to VT field of DMATXCTL register
2127          */
2128 }
2129
2130 static void
2131 ixgbe_vlan_offload_set(struct rte_eth_dev *dev, int mask)
2132 {
2133         if (mask & ETH_VLAN_STRIP_MASK) {
2134                 if (dev->data->dev_conf.rxmode.hw_vlan_strip)
2135                         ixgbe_vlan_hw_strip_enable_all(dev);
2136                 else
2137                         ixgbe_vlan_hw_strip_disable_all(dev);
2138         }
2139
2140         if (mask & ETH_VLAN_FILTER_MASK) {
2141                 if (dev->data->dev_conf.rxmode.hw_vlan_filter)
2142                         ixgbe_vlan_hw_filter_enable(dev);
2143                 else
2144                         ixgbe_vlan_hw_filter_disable(dev);
2145         }
2146
2147         if (mask & ETH_VLAN_EXTEND_MASK) {
2148                 if (dev->data->dev_conf.rxmode.hw_vlan_extend)
2149                         ixgbe_vlan_hw_extend_enable(dev);
2150                 else
2151                         ixgbe_vlan_hw_extend_disable(dev);
2152         }
2153 }
2154
2155 static void
2156 ixgbe_vmdq_vlan_hw_filter_enable(struct rte_eth_dev *dev)
2157 {
2158         struct ixgbe_hw *hw =
2159                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2160         /* VLNCTRL: enable vlan filtering and allow all vlan tags through */
2161         uint32_t vlanctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
2162
2163         vlanctrl |= IXGBE_VLNCTRL_VFE; /* enable vlan filters */
2164         IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlanctrl);
2165 }
2166
2167 static int
2168 ixgbe_check_vf_rss_rxq_num(struct rte_eth_dev *dev, uint16_t nb_rx_q)
2169 {
2170         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
2171
2172         switch (nb_rx_q) {
2173         case 1:
2174         case 2:
2175                 RTE_ETH_DEV_SRIOV(dev).active = ETH_64_POOLS;
2176                 break;
2177         case 4:
2178                 RTE_ETH_DEV_SRIOV(dev).active = ETH_32_POOLS;
2179                 break;
2180         default:
2181                 return -EINVAL;
2182         }
2183
2184         RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool = nb_rx_q;
2185         RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx = pci_dev->max_vfs * nb_rx_q;
2186
2187         return 0;
2188 }
2189
2190 static int
2191 ixgbe_check_mq_mode(struct rte_eth_dev *dev)
2192 {
2193         struct rte_eth_conf *dev_conf = &dev->data->dev_conf;
2194         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2195         uint16_t nb_rx_q = dev->data->nb_rx_queues;
2196         uint16_t nb_tx_q = dev->data->nb_tx_queues;
2197
2198         if (RTE_ETH_DEV_SRIOV(dev).active != 0) {
2199                 /* check multi-queue mode */
2200                 switch (dev_conf->rxmode.mq_mode) {
2201                 case ETH_MQ_RX_VMDQ_DCB:
2202                         PMD_INIT_LOG(INFO, "ETH_MQ_RX_VMDQ_DCB mode supported in SRIOV");
2203                         break;
2204                 case ETH_MQ_RX_VMDQ_DCB_RSS:
2205                         /* DCB/RSS VMDQ in SRIOV mode, not implement yet */
2206                         PMD_INIT_LOG(ERR, "SRIOV active,"
2207                                         " unsupported mq_mode rx %d.",
2208                                         dev_conf->rxmode.mq_mode);
2209                         return -EINVAL;
2210                 case ETH_MQ_RX_RSS:
2211                 case ETH_MQ_RX_VMDQ_RSS:
2212                         dev->data->dev_conf.rxmode.mq_mode = ETH_MQ_RX_VMDQ_RSS;
2213                         if (nb_rx_q <= RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool)
2214                                 if (ixgbe_check_vf_rss_rxq_num(dev, nb_rx_q)) {
2215                                         PMD_INIT_LOG(ERR, "SRIOV is active,"
2216                                                 " invalid queue number"
2217                                                 " for VMDQ RSS, allowed"
2218                                                 " value are 1, 2 or 4.");
2219                                         return -EINVAL;
2220                                 }
2221                         break;
2222                 case ETH_MQ_RX_VMDQ_ONLY:
2223                 case ETH_MQ_RX_NONE:
2224                         /* if nothing mq mode configure, use default scheme */
2225                         dev->data->dev_conf.rxmode.mq_mode = ETH_MQ_RX_VMDQ_ONLY;
2226                         if (RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool > 1)
2227                                 RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool = 1;
2228                         break;
2229                 default: /* ETH_MQ_RX_DCB, ETH_MQ_RX_DCB_RSS or ETH_MQ_TX_DCB*/
2230                         /* SRIOV only works in VMDq enable mode */
2231                         PMD_INIT_LOG(ERR, "SRIOV is active,"
2232                                         " wrong mq_mode rx %d.",
2233                                         dev_conf->rxmode.mq_mode);
2234                         return -EINVAL;
2235                 }
2236
2237                 switch (dev_conf->txmode.mq_mode) {
2238                 case ETH_MQ_TX_VMDQ_DCB:
2239                         PMD_INIT_LOG(INFO, "ETH_MQ_TX_VMDQ_DCB mode supported in SRIOV");
2240                         dev->data->dev_conf.txmode.mq_mode = ETH_MQ_TX_VMDQ_DCB;
2241                         break;
2242                 default: /* ETH_MQ_TX_VMDQ_ONLY or ETH_MQ_TX_NONE */
2243                         dev->data->dev_conf.txmode.mq_mode = ETH_MQ_TX_VMDQ_ONLY;
2244                         break;
2245                 }
2246
2247                 /* check valid queue number */
2248                 if ((nb_rx_q > RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool) ||
2249                     (nb_tx_q > RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool)) {
2250                         PMD_INIT_LOG(ERR, "SRIOV is active,"
2251                                         " nb_rx_q=%d nb_tx_q=%d queue number"
2252                                         " must be less than or equal to %d.",
2253                                         nb_rx_q, nb_tx_q,
2254                                         RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool);
2255                         return -EINVAL;
2256                 }
2257         } else {
2258                 if (dev_conf->rxmode.mq_mode == ETH_MQ_RX_VMDQ_DCB_RSS) {
2259                         PMD_INIT_LOG(ERR, "VMDQ+DCB+RSS mq_mode is"
2260                                           " not supported.");
2261                         return -EINVAL;
2262                 }
2263                 /* check configuration for vmdb+dcb mode */
2264                 if (dev_conf->rxmode.mq_mode == ETH_MQ_RX_VMDQ_DCB) {
2265                         const struct rte_eth_vmdq_dcb_conf *conf;
2266
2267                         if (nb_rx_q != IXGBE_VMDQ_DCB_NB_QUEUES) {
2268                                 PMD_INIT_LOG(ERR, "VMDQ+DCB, nb_rx_q != %d.",
2269                                                 IXGBE_VMDQ_DCB_NB_QUEUES);
2270                                 return -EINVAL;
2271                         }
2272                         conf = &dev_conf->rx_adv_conf.vmdq_dcb_conf;
2273                         if (!(conf->nb_queue_pools == ETH_16_POOLS ||
2274                                conf->nb_queue_pools == ETH_32_POOLS)) {
2275                                 PMD_INIT_LOG(ERR, "VMDQ+DCB selected,"
2276                                                 " nb_queue_pools must be %d or %d.",
2277                                                 ETH_16_POOLS, ETH_32_POOLS);
2278                                 return -EINVAL;
2279                         }
2280                 }
2281                 if (dev_conf->txmode.mq_mode == ETH_MQ_TX_VMDQ_DCB) {
2282                         const struct rte_eth_vmdq_dcb_tx_conf *conf;
2283
2284                         if (nb_tx_q != IXGBE_VMDQ_DCB_NB_QUEUES) {
2285                                 PMD_INIT_LOG(ERR, "VMDQ+DCB, nb_tx_q != %d",
2286                                                  IXGBE_VMDQ_DCB_NB_QUEUES);
2287                                 return -EINVAL;
2288                         }
2289                         conf = &dev_conf->tx_adv_conf.vmdq_dcb_tx_conf;
2290                         if (!(conf->nb_queue_pools == ETH_16_POOLS ||
2291                                conf->nb_queue_pools == ETH_32_POOLS)) {
2292                                 PMD_INIT_LOG(ERR, "VMDQ+DCB selected,"
2293                                                 " nb_queue_pools != %d and"
2294                                                 " nb_queue_pools != %d.",
2295                                                 ETH_16_POOLS, ETH_32_POOLS);
2296                                 return -EINVAL;
2297                         }
2298                 }
2299
2300                 /* For DCB mode check our configuration before we go further */
2301                 if (dev_conf->rxmode.mq_mode == ETH_MQ_RX_DCB) {
2302                         const struct rte_eth_dcb_rx_conf *conf;
2303
2304                         if (nb_rx_q != IXGBE_DCB_NB_QUEUES) {
2305                                 PMD_INIT_LOG(ERR, "DCB selected, nb_rx_q != %d.",
2306                                                  IXGBE_DCB_NB_QUEUES);
2307                                 return -EINVAL;
2308                         }
2309                         conf = &dev_conf->rx_adv_conf.dcb_rx_conf;
2310                         if (!(conf->nb_tcs == ETH_4_TCS ||
2311                                conf->nb_tcs == ETH_8_TCS)) {
2312                                 PMD_INIT_LOG(ERR, "DCB selected, nb_tcs != %d"
2313                                                 " and nb_tcs != %d.",
2314                                                 ETH_4_TCS, ETH_8_TCS);
2315                                 return -EINVAL;
2316                         }
2317                 }
2318
2319                 if (dev_conf->txmode.mq_mode == ETH_MQ_TX_DCB) {
2320                         const struct rte_eth_dcb_tx_conf *conf;
2321
2322                         if (nb_tx_q != IXGBE_DCB_NB_QUEUES) {
2323                                 PMD_INIT_LOG(ERR, "DCB, nb_tx_q != %d.",
2324                                                  IXGBE_DCB_NB_QUEUES);
2325                                 return -EINVAL;
2326                         }
2327                         conf = &dev_conf->tx_adv_conf.dcb_tx_conf;
2328                         if (!(conf->nb_tcs == ETH_4_TCS ||
2329                                conf->nb_tcs == ETH_8_TCS)) {
2330                                 PMD_INIT_LOG(ERR, "DCB selected, nb_tcs != %d"
2331                                                 " and nb_tcs != %d.",
2332                                                 ETH_4_TCS, ETH_8_TCS);
2333                                 return -EINVAL;
2334                         }
2335                 }
2336
2337                 /*
2338                  * When DCB/VT is off, maximum number of queues changes,
2339                  * except for 82598EB, which remains constant.
2340                  */
2341                 if (dev_conf->txmode.mq_mode == ETH_MQ_TX_NONE &&
2342                                 hw->mac.type != ixgbe_mac_82598EB) {
2343                         if (nb_tx_q > IXGBE_NONE_MODE_TX_NB_QUEUES) {
2344                                 PMD_INIT_LOG(ERR,
2345                                              "Neither VT nor DCB are enabled, "
2346                                              "nb_tx_q > %d.",
2347                                              IXGBE_NONE_MODE_TX_NB_QUEUES);
2348                                 return -EINVAL;
2349                         }
2350                 }
2351         }
2352         return 0;
2353 }
2354
2355 static int
2356 ixgbe_dev_configure(struct rte_eth_dev *dev)
2357 {
2358         struct ixgbe_interrupt *intr =
2359                 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
2360         struct ixgbe_adapter *adapter =
2361                 (struct ixgbe_adapter *)dev->data->dev_private;
2362         int ret;
2363
2364         PMD_INIT_FUNC_TRACE();
2365         /* multipe queue mode checking */
2366         ret  = ixgbe_check_mq_mode(dev);
2367         if (ret != 0) {
2368                 PMD_DRV_LOG(ERR, "ixgbe_check_mq_mode fails with %d.",
2369                             ret);
2370                 return ret;
2371         }
2372
2373         /* set flag to update link status after init */
2374         intr->flags |= IXGBE_FLAG_NEED_LINK_UPDATE;
2375
2376         /*
2377          * Initialize to TRUE. If any of Rx queues doesn't meet the bulk
2378          * allocation or vector Rx preconditions we will reset it.
2379          */
2380         adapter->rx_bulk_alloc_allowed = true;
2381         adapter->rx_vec_allowed = true;
2382
2383         return 0;
2384 }
2385
2386 static void
2387 ixgbe_dev_phy_intr_setup(struct rte_eth_dev *dev)
2388 {
2389         struct ixgbe_hw *hw =
2390                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2391         struct ixgbe_interrupt *intr =
2392                 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
2393         uint32_t gpie;
2394
2395         /* only set up it on X550EM_X */
2396         if (hw->mac.type == ixgbe_mac_X550EM_x) {
2397                 gpie = IXGBE_READ_REG(hw, IXGBE_GPIE);
2398                 gpie |= IXGBE_SDP0_GPIEN_X550EM_x;
2399                 IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
2400                 if (hw->phy.type == ixgbe_phy_x550em_ext_t)
2401                         intr->mask |= IXGBE_EICR_GPI_SDP0_X550EM_x;
2402         }
2403 }
2404
2405 int
2406 ixgbe_set_vf_rate_limit(struct rte_eth_dev *dev, uint16_t vf,
2407                         uint16_t tx_rate, uint64_t q_msk)
2408 {
2409         struct ixgbe_hw *hw;
2410         struct ixgbe_vf_info *vfinfo;
2411         struct rte_eth_link link;
2412         uint8_t  nb_q_per_pool;
2413         uint32_t queue_stride;
2414         uint32_t queue_idx, idx = 0, vf_idx;
2415         uint32_t queue_end;
2416         uint16_t total_rate = 0;
2417         struct rte_pci_device *pci_dev;
2418
2419         pci_dev = RTE_ETH_DEV_TO_PCI(dev);
2420         rte_eth_link_get_nowait(dev->data->port_id, &link);
2421
2422         if (vf >= pci_dev->max_vfs)
2423                 return -EINVAL;
2424
2425         if (tx_rate > link.link_speed)
2426                 return -EINVAL;
2427
2428         if (q_msk == 0)
2429                 return 0;
2430
2431         hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2432         vfinfo = *(IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private));
2433         nb_q_per_pool = RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool;
2434         queue_stride = IXGBE_MAX_RX_QUEUE_NUM / RTE_ETH_DEV_SRIOV(dev).active;
2435         queue_idx = vf * queue_stride;
2436         queue_end = queue_idx + nb_q_per_pool - 1;
2437         if (queue_end >= hw->mac.max_tx_queues)
2438                 return -EINVAL;
2439
2440         if (vfinfo) {
2441                 for (vf_idx = 0; vf_idx < pci_dev->max_vfs; vf_idx++) {
2442                         if (vf_idx == vf)
2443                                 continue;
2444                         for (idx = 0; idx < RTE_DIM(vfinfo[vf_idx].tx_rate);
2445                                 idx++)
2446                                 total_rate += vfinfo[vf_idx].tx_rate[idx];
2447                 }
2448         } else {
2449                 return -EINVAL;
2450         }
2451
2452         /* Store tx_rate for this vf. */
2453         for (idx = 0; idx < nb_q_per_pool; idx++) {
2454                 if (((uint64_t)0x1 << idx) & q_msk) {
2455                         if (vfinfo[vf].tx_rate[idx] != tx_rate)
2456                                 vfinfo[vf].tx_rate[idx] = tx_rate;
2457                         total_rate += tx_rate;
2458                 }
2459         }
2460
2461         if (total_rate > dev->data->dev_link.link_speed) {
2462                 /* Reset stored TX rate of the VF if it causes exceed
2463                  * link speed.
2464                  */
2465                 memset(vfinfo[vf].tx_rate, 0, sizeof(vfinfo[vf].tx_rate));
2466                 return -EINVAL;
2467         }
2468
2469         /* Set RTTBCNRC of each queue/pool for vf X  */
2470         for (; queue_idx <= queue_end; queue_idx++) {
2471                 if (0x1 & q_msk)
2472                         ixgbe_set_queue_rate_limit(dev, queue_idx, tx_rate);
2473                 q_msk = q_msk >> 1;
2474         }
2475
2476         return 0;
2477 }
2478
2479 /*
2480  * Configure device link speed and setup link.
2481  * It returns 0 on success.
2482  */
2483 static int
2484 ixgbe_dev_start(struct rte_eth_dev *dev)
2485 {
2486         struct ixgbe_hw *hw =
2487                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2488         struct ixgbe_vf_info *vfinfo =
2489                 *IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private);
2490         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
2491         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
2492         uint32_t intr_vector = 0;
2493         int err, link_up = 0, negotiate = 0;
2494         uint32_t speed = 0;
2495         int mask = 0;
2496         int status;
2497         uint16_t vf, idx;
2498         uint32_t *link_speeds;
2499
2500         PMD_INIT_FUNC_TRACE();
2501
2502         /* IXGBE devices don't support:
2503         *    - half duplex (checked afterwards for valid speeds)
2504         *    - fixed speed: TODO implement
2505         */
2506         if (dev->data->dev_conf.link_speeds & ETH_LINK_SPEED_FIXED) {
2507                 PMD_INIT_LOG(ERR, "Invalid link_speeds for port %hhu; fix speed not supported",
2508                              dev->data->port_id);
2509                 return -EINVAL;
2510         }
2511
2512         /* disable uio/vfio intr/eventfd mapping */
2513         rte_intr_disable(intr_handle);
2514
2515         /* stop adapter */
2516         hw->adapter_stopped = 0;
2517         ixgbe_stop_adapter(hw);
2518
2519         /* reinitialize adapter
2520          * this calls reset and start
2521          */
2522         status = ixgbe_pf_reset_hw(hw);
2523         if (status != 0)
2524                 return -1;
2525         hw->mac.ops.start_hw(hw);
2526         hw->mac.get_link_status = true;
2527
2528         /* configure PF module if SRIOV enabled */
2529         ixgbe_pf_host_configure(dev);
2530
2531         ixgbe_dev_phy_intr_setup(dev);
2532
2533         /* check and configure queue intr-vector mapping */
2534         if ((rte_intr_cap_multiple(intr_handle) ||
2535              !RTE_ETH_DEV_SRIOV(dev).active) &&
2536             dev->data->dev_conf.intr_conf.rxq != 0) {
2537                 intr_vector = dev->data->nb_rx_queues;
2538                 if (intr_vector > IXGBE_MAX_INTR_QUEUE_NUM) {
2539                         PMD_INIT_LOG(ERR, "At most %d intr queues supported",
2540                                         IXGBE_MAX_INTR_QUEUE_NUM);
2541                         return -ENOTSUP;
2542                 }
2543                 if (rte_intr_efd_enable(intr_handle, intr_vector))
2544                         return -1;
2545         }
2546
2547         if (rte_intr_dp_is_en(intr_handle) && !intr_handle->intr_vec) {
2548                 intr_handle->intr_vec =
2549                         rte_zmalloc("intr_vec",
2550                                     dev->data->nb_rx_queues * sizeof(int), 0);
2551                 if (intr_handle->intr_vec == NULL) {
2552                         PMD_INIT_LOG(ERR, "Failed to allocate %d rx_queues"
2553                                      " intr_vec", dev->data->nb_rx_queues);
2554                         return -ENOMEM;
2555                 }
2556         }
2557
2558         /* confiugre msix for sleep until rx interrupt */
2559         ixgbe_configure_msix(dev);
2560
2561         /* initialize transmission unit */
2562         ixgbe_dev_tx_init(dev);
2563
2564         /* This can fail when allocating mbufs for descriptor rings */
2565         err = ixgbe_dev_rx_init(dev);
2566         if (err) {
2567                 PMD_INIT_LOG(ERR, "Unable to initialize RX hardware");
2568                 goto error;
2569         }
2570
2571     mask = ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK |
2572                 ETH_VLAN_EXTEND_MASK;
2573         ixgbe_vlan_offload_set(dev, mask);
2574
2575         if (dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_VMDQ_ONLY) {
2576                 /* Enable vlan filtering for VMDq */
2577                 ixgbe_vmdq_vlan_hw_filter_enable(dev);
2578         }
2579
2580         /* Configure DCB hw */
2581         ixgbe_configure_dcb(dev);
2582
2583         if (dev->data->dev_conf.fdir_conf.mode != RTE_FDIR_MODE_NONE) {
2584                 err = ixgbe_fdir_configure(dev);
2585                 if (err)
2586                         goto error;
2587         }
2588
2589         /* Restore vf rate limit */
2590         if (vfinfo != NULL) {
2591                 for (vf = 0; vf < pci_dev->max_vfs; vf++)
2592                         for (idx = 0; idx < IXGBE_MAX_QUEUE_NUM_PER_VF; idx++)
2593                                 if (vfinfo[vf].tx_rate[idx] != 0)
2594                                         ixgbe_set_vf_rate_limit(
2595                                                 dev, vf,
2596                                                 vfinfo[vf].tx_rate[idx],
2597                                                 1 << idx);
2598         }
2599
2600         ixgbe_restore_statistics_mapping(dev);
2601
2602         err = ixgbe_dev_rxtx_start(dev);
2603         if (err < 0) {
2604                 PMD_INIT_LOG(ERR, "Unable to start rxtx queues");
2605                 goto error;
2606         }
2607
2608         /* Skip link setup if loopback mode is enabled for 82599. */
2609         if (hw->mac.type == ixgbe_mac_82599EB &&
2610                         dev->data->dev_conf.lpbk_mode == IXGBE_LPBK_82599_TX_RX)
2611                 goto skip_link_setup;
2612
2613         if (ixgbe_is_sfp(hw) && hw->phy.multispeed_fiber) {
2614                 err = hw->mac.ops.setup_sfp(hw);
2615                 if (err)
2616                         goto error;
2617         }
2618
2619         if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_copper) {
2620                 /* Turn on the copper */
2621                 ixgbe_set_phy_power(hw, true);
2622         } else {
2623                 /* Turn on the laser */
2624                 ixgbe_enable_tx_laser(hw);
2625         }
2626
2627         err = ixgbe_check_link(hw, &speed, &link_up, 0);
2628         if (err)
2629                 goto error;
2630         dev->data->dev_link.link_status = link_up;
2631
2632         err = ixgbe_get_link_capabilities(hw, &speed, &negotiate);
2633         if (err)
2634                 goto error;
2635
2636         link_speeds = &dev->data->dev_conf.link_speeds;
2637         if (*link_speeds & ~(ETH_LINK_SPEED_100M | ETH_LINK_SPEED_1G |
2638                         ETH_LINK_SPEED_10G)) {
2639                 PMD_INIT_LOG(ERR, "Invalid link setting");
2640                 goto error;
2641         }
2642
2643         speed = 0x0;
2644         if (*link_speeds == ETH_LINK_SPEED_AUTONEG) {
2645                 speed = (hw->mac.type != ixgbe_mac_82598EB) ?
2646                                 IXGBE_LINK_SPEED_82599_AUTONEG :
2647                                 IXGBE_LINK_SPEED_82598_AUTONEG;
2648         } else {
2649                 if (*link_speeds & ETH_LINK_SPEED_10G)
2650                         speed |= IXGBE_LINK_SPEED_10GB_FULL;
2651                 if (*link_speeds & ETH_LINK_SPEED_1G)
2652                         speed |= IXGBE_LINK_SPEED_1GB_FULL;
2653                 if (*link_speeds & ETH_LINK_SPEED_100M)
2654                         speed |= IXGBE_LINK_SPEED_100_FULL;
2655         }
2656
2657         err = ixgbe_setup_link(hw, speed, link_up);
2658         if (err)
2659                 goto error;
2660
2661 skip_link_setup:
2662
2663         if (rte_intr_allow_others(intr_handle)) {
2664                 /* check if lsc interrupt is enabled */
2665                 if (dev->data->dev_conf.intr_conf.lsc != 0)
2666                         ixgbe_dev_lsc_interrupt_setup(dev);
2667                 ixgbe_dev_macsec_interrupt_setup(dev);
2668         } else {
2669                 rte_intr_callback_unregister(intr_handle,
2670                                              ixgbe_dev_interrupt_handler, dev);
2671                 if (dev->data->dev_conf.intr_conf.lsc != 0)
2672                         PMD_INIT_LOG(INFO, "lsc won't enable because of"
2673                                      " no intr multiplex");
2674         }
2675
2676         /* check if rxq interrupt is enabled */
2677         if (dev->data->dev_conf.intr_conf.rxq != 0 &&
2678             rte_intr_dp_is_en(intr_handle))
2679                 ixgbe_dev_rxq_interrupt_setup(dev);
2680
2681         /* enable uio/vfio intr/eventfd mapping */
2682         rte_intr_enable(intr_handle);
2683
2684         /* resume enabled intr since hw reset */
2685         ixgbe_enable_intr(dev);
2686         ixgbe_l2_tunnel_conf(dev);
2687         ixgbe_filter_restore(dev);
2688
2689         return 0;
2690
2691 error:
2692         PMD_INIT_LOG(ERR, "failure in ixgbe_dev_start(): %d", err);
2693         ixgbe_dev_clear_queues(dev);
2694         return -EIO;
2695 }
2696
2697 /*
2698  * Stop device: disable rx and tx functions to allow for reconfiguring.
2699  */
2700 static void
2701 ixgbe_dev_stop(struct rte_eth_dev *dev)
2702 {
2703         struct rte_eth_link link;
2704         struct ixgbe_hw *hw =
2705                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2706         struct ixgbe_vf_info *vfinfo =
2707                 *IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private);
2708         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
2709         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
2710         int vf;
2711
2712         PMD_INIT_FUNC_TRACE();
2713
2714         /* disable interrupts */
2715         ixgbe_disable_intr(hw);
2716
2717         /* reset the NIC */
2718         ixgbe_pf_reset_hw(hw);
2719         hw->adapter_stopped = 0;
2720
2721         /* stop adapter */
2722         ixgbe_stop_adapter(hw);
2723
2724         for (vf = 0; vfinfo != NULL && vf < pci_dev->max_vfs; vf++)
2725                 vfinfo[vf].clear_to_send = false;
2726
2727         if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_copper) {
2728                 /* Turn off the copper */
2729                 ixgbe_set_phy_power(hw, false);
2730         } else {
2731                 /* Turn off the laser */
2732                 ixgbe_disable_tx_laser(hw);
2733         }
2734
2735         ixgbe_dev_clear_queues(dev);
2736
2737         /* Clear stored conf */
2738         dev->data->scattered_rx = 0;
2739         dev->data->lro = 0;
2740
2741         /* Clear recorded link status */
2742         memset(&link, 0, sizeof(link));
2743         rte_ixgbe_dev_atomic_write_link_status(dev, &link);
2744
2745         if (!rte_intr_allow_others(intr_handle))
2746                 /* resume to the default handler */
2747                 rte_intr_callback_register(intr_handle,
2748                                            ixgbe_dev_interrupt_handler,
2749                                            (void *)dev);
2750
2751         /* Clean datapath event and queue/vec mapping */
2752         rte_intr_efd_disable(intr_handle);
2753         if (intr_handle->intr_vec != NULL) {
2754                 rte_free(intr_handle->intr_vec);
2755                 intr_handle->intr_vec = NULL;
2756         }
2757 }
2758
2759 /*
2760  * Set device link up: enable tx.
2761  */
2762 static int
2763 ixgbe_dev_set_link_up(struct rte_eth_dev *dev)
2764 {
2765         struct ixgbe_hw *hw =
2766                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2767         if (hw->mac.type == ixgbe_mac_82599EB) {
2768 #ifdef RTE_LIBRTE_IXGBE_BYPASS
2769                 if (hw->device_id == IXGBE_DEV_ID_82599_BYPASS) {
2770                         /* Not suported in bypass mode */
2771                         PMD_INIT_LOG(ERR, "Set link up is not supported "
2772                                      "by device id 0x%x", hw->device_id);
2773                         return -ENOTSUP;
2774                 }
2775 #endif
2776         }
2777
2778         if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_copper) {
2779                 /* Turn on the copper */
2780                 ixgbe_set_phy_power(hw, true);
2781         } else {
2782                 /* Turn on the laser */
2783                 ixgbe_enable_tx_laser(hw);
2784         }
2785
2786         return 0;
2787 }
2788
2789 /*
2790  * Set device link down: disable tx.
2791  */
2792 static int
2793 ixgbe_dev_set_link_down(struct rte_eth_dev *dev)
2794 {
2795         struct ixgbe_hw *hw =
2796                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2797         if (hw->mac.type == ixgbe_mac_82599EB) {
2798 #ifdef RTE_LIBRTE_IXGBE_BYPASS
2799                 if (hw->device_id == IXGBE_DEV_ID_82599_BYPASS) {
2800                         /* Not suported in bypass mode */
2801                         PMD_INIT_LOG(ERR, "Set link down is not supported "
2802                                      "by device id 0x%x", hw->device_id);
2803                         return -ENOTSUP;
2804                 }
2805 #endif
2806         }
2807
2808         if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_copper) {
2809                 /* Turn off the copper */
2810                 ixgbe_set_phy_power(hw, false);
2811         } else {
2812                 /* Turn off the laser */
2813                 ixgbe_disable_tx_laser(hw);
2814         }
2815
2816         return 0;
2817 }
2818
2819 /*
2820  * Reest and stop device.
2821  */
2822 static void
2823 ixgbe_dev_close(struct rte_eth_dev *dev)
2824 {
2825         struct ixgbe_hw *hw =
2826                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2827
2828         PMD_INIT_FUNC_TRACE();
2829
2830         ixgbe_pf_reset_hw(hw);
2831
2832         ixgbe_dev_stop(dev);
2833         hw->adapter_stopped = 1;
2834
2835         ixgbe_dev_free_queues(dev);
2836
2837         ixgbe_disable_pcie_master(hw);
2838
2839         /* reprogram the RAR[0] in case user changed it. */
2840         ixgbe_set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
2841 }
2842
2843 static void
2844 ixgbe_read_stats_registers(struct ixgbe_hw *hw,
2845                            struct ixgbe_hw_stats *hw_stats,
2846                            struct ixgbe_macsec_stats *macsec_stats,
2847                            uint64_t *total_missed_rx, uint64_t *total_qbrc,
2848                            uint64_t *total_qprc, uint64_t *total_qprdc)
2849 {
2850         uint32_t bprc, lxon, lxoff, total;
2851         uint32_t delta_gprc = 0;
2852         unsigned i;
2853         /* Workaround for RX byte count not including CRC bytes when CRC
2854          * strip is enabled. CRC bytes are removed from counters when crc_strip
2855          * is disabled.
2856          */
2857         int crc_strip = (IXGBE_READ_REG(hw, IXGBE_HLREG0) &
2858                         IXGBE_HLREG0_RXCRCSTRP);
2859
2860         hw_stats->crcerrs += IXGBE_READ_REG(hw, IXGBE_CRCERRS);
2861         hw_stats->illerrc += IXGBE_READ_REG(hw, IXGBE_ILLERRC);
2862         hw_stats->errbc += IXGBE_READ_REG(hw, IXGBE_ERRBC);
2863         hw_stats->mspdc += IXGBE_READ_REG(hw, IXGBE_MSPDC);
2864
2865         for (i = 0; i < 8; i++) {
2866                 uint32_t mp = IXGBE_READ_REG(hw, IXGBE_MPC(i));
2867
2868                 /* global total per queue */
2869                 hw_stats->mpc[i] += mp;
2870                 /* Running comprehensive total for stats display */
2871                 *total_missed_rx += hw_stats->mpc[i];
2872                 if (hw->mac.type == ixgbe_mac_82598EB) {
2873                         hw_stats->rnbc[i] +=
2874                             IXGBE_READ_REG(hw, IXGBE_RNBC(i));
2875                         hw_stats->pxonrxc[i] +=
2876                                 IXGBE_READ_REG(hw, IXGBE_PXONRXC(i));
2877                         hw_stats->pxoffrxc[i] +=
2878                                 IXGBE_READ_REG(hw, IXGBE_PXOFFRXC(i));
2879                 } else {
2880                         hw_stats->pxonrxc[i] +=
2881                                 IXGBE_READ_REG(hw, IXGBE_PXONRXCNT(i));
2882                         hw_stats->pxoffrxc[i] +=
2883                                 IXGBE_READ_REG(hw, IXGBE_PXOFFRXCNT(i));
2884                         hw_stats->pxon2offc[i] +=
2885                                 IXGBE_READ_REG(hw, IXGBE_PXON2OFFCNT(i));
2886                 }
2887                 hw_stats->pxontxc[i] +=
2888                     IXGBE_READ_REG(hw, IXGBE_PXONTXC(i));
2889                 hw_stats->pxofftxc[i] +=
2890                     IXGBE_READ_REG(hw, IXGBE_PXOFFTXC(i));
2891         }
2892         for (i = 0; i < IXGBE_QUEUE_STAT_COUNTERS; i++) {
2893                 uint32_t delta_qprc = IXGBE_READ_REG(hw, IXGBE_QPRC(i));
2894                 uint32_t delta_qptc = IXGBE_READ_REG(hw, IXGBE_QPTC(i));
2895                 uint32_t delta_qprdc = IXGBE_READ_REG(hw, IXGBE_QPRDC(i));
2896
2897                 delta_gprc += delta_qprc;
2898
2899                 hw_stats->qprc[i] += delta_qprc;
2900                 hw_stats->qptc[i] += delta_qptc;
2901
2902                 hw_stats->qbrc[i] += IXGBE_READ_REG(hw, IXGBE_QBRC_L(i));
2903                 hw_stats->qbrc[i] +=
2904                     ((uint64_t)IXGBE_READ_REG(hw, IXGBE_QBRC_H(i)) << 32);
2905                 if (crc_strip == 0)
2906                         hw_stats->qbrc[i] -= delta_qprc * ETHER_CRC_LEN;
2907
2908                 hw_stats->qbtc[i] += IXGBE_READ_REG(hw, IXGBE_QBTC_L(i));
2909                 hw_stats->qbtc[i] +=
2910                     ((uint64_t)IXGBE_READ_REG(hw, IXGBE_QBTC_H(i)) << 32);
2911
2912                 hw_stats->qprdc[i] += delta_qprdc;
2913                 *total_qprdc += hw_stats->qprdc[i];
2914
2915                 *total_qprc += hw_stats->qprc[i];
2916                 *total_qbrc += hw_stats->qbrc[i];
2917         }
2918         hw_stats->mlfc += IXGBE_READ_REG(hw, IXGBE_MLFC);
2919         hw_stats->mrfc += IXGBE_READ_REG(hw, IXGBE_MRFC);
2920         hw_stats->rlec += IXGBE_READ_REG(hw, IXGBE_RLEC);
2921
2922         /*
2923          * An errata states that gprc actually counts good + missed packets:
2924          * Workaround to set gprc to summated queue packet receives
2925          */
2926         hw_stats->gprc = *total_qprc;
2927
2928         if (hw->mac.type != ixgbe_mac_82598EB) {
2929                 hw_stats->gorc += IXGBE_READ_REG(hw, IXGBE_GORCL);
2930                 hw_stats->gorc += ((u64)IXGBE_READ_REG(hw, IXGBE_GORCH) << 32);
2931                 hw_stats->gotc += IXGBE_READ_REG(hw, IXGBE_GOTCL);
2932                 hw_stats->gotc += ((u64)IXGBE_READ_REG(hw, IXGBE_GOTCH) << 32);
2933                 hw_stats->tor += IXGBE_READ_REG(hw, IXGBE_TORL);
2934                 hw_stats->tor += ((u64)IXGBE_READ_REG(hw, IXGBE_TORH) << 32);
2935                 hw_stats->lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXCNT);
2936                 hw_stats->lxoffrxc += IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT);
2937         } else {
2938                 hw_stats->lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXC);
2939                 hw_stats->lxoffrxc += IXGBE_READ_REG(hw, IXGBE_LXOFFRXC);
2940                 /* 82598 only has a counter in the high register */
2941                 hw_stats->gorc += IXGBE_READ_REG(hw, IXGBE_GORCH);
2942                 hw_stats->gotc += IXGBE_READ_REG(hw, IXGBE_GOTCH);
2943                 hw_stats->tor += IXGBE_READ_REG(hw, IXGBE_TORH);
2944         }
2945         uint64_t old_tpr = hw_stats->tpr;
2946
2947         hw_stats->tpr += IXGBE_READ_REG(hw, IXGBE_TPR);
2948         hw_stats->tpt += IXGBE_READ_REG(hw, IXGBE_TPT);
2949
2950         if (crc_strip == 0)
2951                 hw_stats->gorc -= delta_gprc * ETHER_CRC_LEN;
2952
2953         uint64_t delta_gptc = IXGBE_READ_REG(hw, IXGBE_GPTC);
2954         hw_stats->gptc += delta_gptc;
2955         hw_stats->gotc -= delta_gptc * ETHER_CRC_LEN;
2956         hw_stats->tor -= (hw_stats->tpr - old_tpr) * ETHER_CRC_LEN;
2957
2958         /*
2959          * Workaround: mprc hardware is incorrectly counting
2960          * broadcasts, so for now we subtract those.
2961          */
2962         bprc = IXGBE_READ_REG(hw, IXGBE_BPRC);
2963         hw_stats->bprc += bprc;
2964         hw_stats->mprc += IXGBE_READ_REG(hw, IXGBE_MPRC);
2965         if (hw->mac.type == ixgbe_mac_82598EB)
2966                 hw_stats->mprc -= bprc;
2967
2968         hw_stats->prc64 += IXGBE_READ_REG(hw, IXGBE_PRC64);
2969         hw_stats->prc127 += IXGBE_READ_REG(hw, IXGBE_PRC127);
2970         hw_stats->prc255 += IXGBE_READ_REG(hw, IXGBE_PRC255);
2971         hw_stats->prc511 += IXGBE_READ_REG(hw, IXGBE_PRC511);
2972         hw_stats->prc1023 += IXGBE_READ_REG(hw, IXGBE_PRC1023);
2973         hw_stats->prc1522 += IXGBE_READ_REG(hw, IXGBE_PRC1522);
2974
2975         lxon = IXGBE_READ_REG(hw, IXGBE_LXONTXC);
2976         hw_stats->lxontxc += lxon;
2977         lxoff = IXGBE_READ_REG(hw, IXGBE_LXOFFTXC);
2978         hw_stats->lxofftxc += lxoff;
2979         total = lxon + lxoff;
2980
2981         hw_stats->mptc += IXGBE_READ_REG(hw, IXGBE_MPTC);
2982         hw_stats->ptc64 += IXGBE_READ_REG(hw, IXGBE_PTC64);
2983         hw_stats->gptc -= total;
2984         hw_stats->mptc -= total;
2985         hw_stats->ptc64 -= total;
2986         hw_stats->gotc -= total * ETHER_MIN_LEN;
2987
2988         hw_stats->ruc += IXGBE_READ_REG(hw, IXGBE_RUC);
2989         hw_stats->rfc += IXGBE_READ_REG(hw, IXGBE_RFC);
2990         hw_stats->roc += IXGBE_READ_REG(hw, IXGBE_ROC);
2991         hw_stats->rjc += IXGBE_READ_REG(hw, IXGBE_RJC);
2992         hw_stats->mngprc += IXGBE_READ_REG(hw, IXGBE_MNGPRC);
2993         hw_stats->mngpdc += IXGBE_READ_REG(hw, IXGBE_MNGPDC);
2994         hw_stats->mngptc += IXGBE_READ_REG(hw, IXGBE_MNGPTC);
2995         hw_stats->ptc127 += IXGBE_READ_REG(hw, IXGBE_PTC127);
2996         hw_stats->ptc255 += IXGBE_READ_REG(hw, IXGBE_PTC255);
2997         hw_stats->ptc511 += IXGBE_READ_REG(hw, IXGBE_PTC511);
2998         hw_stats->ptc1023 += IXGBE_READ_REG(hw, IXGBE_PTC1023);
2999         hw_stats->ptc1522 += IXGBE_READ_REG(hw, IXGBE_PTC1522);
3000         hw_stats->bptc += IXGBE_READ_REG(hw, IXGBE_BPTC);
3001         hw_stats->xec += IXGBE_READ_REG(hw, IXGBE_XEC);
3002         hw_stats->fccrc += IXGBE_READ_REG(hw, IXGBE_FCCRC);
3003         hw_stats->fclast += IXGBE_READ_REG(hw, IXGBE_FCLAST);
3004         /* Only read FCOE on 82599 */
3005         if (hw->mac.type != ixgbe_mac_82598EB) {
3006                 hw_stats->fcoerpdc += IXGBE_READ_REG(hw, IXGBE_FCOERPDC);
3007                 hw_stats->fcoeprc += IXGBE_READ_REG(hw, IXGBE_FCOEPRC);
3008                 hw_stats->fcoeptc += IXGBE_READ_REG(hw, IXGBE_FCOEPTC);
3009                 hw_stats->fcoedwrc += IXGBE_READ_REG(hw, IXGBE_FCOEDWRC);
3010                 hw_stats->fcoedwtc += IXGBE_READ_REG(hw, IXGBE_FCOEDWTC);
3011         }
3012
3013         /* Flow Director Stats registers */
3014         hw_stats->fdirmatch += IXGBE_READ_REG(hw, IXGBE_FDIRMATCH);
3015         hw_stats->fdirmiss += IXGBE_READ_REG(hw, IXGBE_FDIRMISS);
3016
3017         /* MACsec Stats registers */
3018         macsec_stats->out_pkts_untagged += IXGBE_READ_REG(hw, IXGBE_LSECTXUT);
3019         macsec_stats->out_pkts_encrypted +=
3020                 IXGBE_READ_REG(hw, IXGBE_LSECTXPKTE);
3021         macsec_stats->out_pkts_protected +=
3022                 IXGBE_READ_REG(hw, IXGBE_LSECTXPKTP);
3023         macsec_stats->out_octets_encrypted +=
3024                 IXGBE_READ_REG(hw, IXGBE_LSECTXOCTE);
3025         macsec_stats->out_octets_protected +=
3026                 IXGBE_READ_REG(hw, IXGBE_LSECTXOCTP);
3027         macsec_stats->in_pkts_untagged += IXGBE_READ_REG(hw, IXGBE_LSECRXUT);
3028         macsec_stats->in_pkts_badtag += IXGBE_READ_REG(hw, IXGBE_LSECRXBAD);
3029         macsec_stats->in_pkts_nosci += IXGBE_READ_REG(hw, IXGBE_LSECRXNOSCI);
3030         macsec_stats->in_pkts_unknownsci +=
3031                 IXGBE_READ_REG(hw, IXGBE_LSECRXUNSCI);
3032         macsec_stats->in_octets_decrypted +=
3033                 IXGBE_READ_REG(hw, IXGBE_LSECRXOCTD);
3034         macsec_stats->in_octets_validated +=
3035                 IXGBE_READ_REG(hw, IXGBE_LSECRXOCTV);
3036         macsec_stats->in_pkts_unchecked += IXGBE_READ_REG(hw, IXGBE_LSECRXUNCH);
3037         macsec_stats->in_pkts_delayed += IXGBE_READ_REG(hw, IXGBE_LSECRXDELAY);
3038         macsec_stats->in_pkts_late += IXGBE_READ_REG(hw, IXGBE_LSECRXLATE);
3039         for (i = 0; i < 2; i++) {
3040                 macsec_stats->in_pkts_ok +=
3041                         IXGBE_READ_REG(hw, IXGBE_LSECRXOK(i));
3042                 macsec_stats->in_pkts_invalid +=
3043                         IXGBE_READ_REG(hw, IXGBE_LSECRXINV(i));
3044                 macsec_stats->in_pkts_notvalid +=
3045                         IXGBE_READ_REG(hw, IXGBE_LSECRXNV(i));
3046         }
3047         macsec_stats->in_pkts_unusedsa += IXGBE_READ_REG(hw, IXGBE_LSECRXUNSA);
3048         macsec_stats->in_pkts_notusingsa +=
3049                 IXGBE_READ_REG(hw, IXGBE_LSECRXNUSA);
3050 }
3051
3052 /*
3053  * This function is based on ixgbe_update_stats_counters() in ixgbe/ixgbe.c
3054  */
3055 static void
3056 ixgbe_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
3057 {
3058         struct ixgbe_hw *hw =
3059                         IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3060         struct ixgbe_hw_stats *hw_stats =
3061                         IXGBE_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
3062         struct ixgbe_macsec_stats *macsec_stats =
3063                         IXGBE_DEV_PRIVATE_TO_MACSEC_STATS(
3064                                 dev->data->dev_private);
3065         uint64_t total_missed_rx, total_qbrc, total_qprc, total_qprdc;
3066         unsigned i;
3067
3068         total_missed_rx = 0;
3069         total_qbrc = 0;
3070         total_qprc = 0;
3071         total_qprdc = 0;
3072
3073         ixgbe_read_stats_registers(hw, hw_stats, macsec_stats, &total_missed_rx,
3074                         &total_qbrc, &total_qprc, &total_qprdc);
3075
3076         if (stats == NULL)
3077                 return;
3078
3079         /* Fill out the rte_eth_stats statistics structure */
3080         stats->ipackets = total_qprc;
3081         stats->ibytes = total_qbrc;
3082         stats->opackets = hw_stats->gptc;
3083         stats->obytes = hw_stats->gotc;
3084
3085         for (i = 0; i < IXGBE_QUEUE_STAT_COUNTERS; i++) {
3086                 stats->q_ipackets[i] = hw_stats->qprc[i];
3087                 stats->q_opackets[i] = hw_stats->qptc[i];
3088                 stats->q_ibytes[i] = hw_stats->qbrc[i];
3089                 stats->q_obytes[i] = hw_stats->qbtc[i];
3090                 stats->q_errors[i] = hw_stats->qprdc[i];
3091         }
3092
3093         /* Rx Errors */
3094         stats->imissed  = total_missed_rx;
3095         stats->ierrors  = hw_stats->crcerrs +
3096                           hw_stats->mspdc +
3097                           hw_stats->rlec +
3098                           hw_stats->ruc +
3099                           hw_stats->roc +
3100                           hw_stats->illerrc +
3101                           hw_stats->errbc +
3102                           hw_stats->rfc +
3103                           hw_stats->fccrc +
3104                           hw_stats->fclast;
3105
3106         /* Tx Errors */
3107         stats->oerrors  = 0;
3108 }
3109
3110 static void
3111 ixgbe_dev_stats_reset(struct rte_eth_dev *dev)
3112 {
3113         struct ixgbe_hw_stats *stats =
3114                         IXGBE_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
3115
3116         /* HW registers are cleared on read */
3117         ixgbe_dev_stats_get(dev, NULL);
3118
3119         /* Reset software totals */
3120         memset(stats, 0, sizeof(*stats));
3121 }
3122
3123 /* This function calculates the number of xstats based on the current config */
3124 static unsigned
3125 ixgbe_xstats_calc_num(void) {
3126         return IXGBE_NB_HW_STATS + IXGBE_NB_MACSEC_STATS +
3127                 (IXGBE_NB_RXQ_PRIO_STATS * IXGBE_NB_RXQ_PRIO_VALUES) +
3128                 (IXGBE_NB_TXQ_PRIO_STATS * IXGBE_NB_TXQ_PRIO_VALUES);
3129 }
3130
3131 static int ixgbe_dev_xstats_get_names(__rte_unused struct rte_eth_dev *dev,
3132         struct rte_eth_xstat_name *xstats_names, __rte_unused unsigned int size)
3133 {
3134         const unsigned cnt_stats = ixgbe_xstats_calc_num();
3135         unsigned stat, i, count;
3136
3137         if (xstats_names != NULL) {
3138                 count = 0;
3139
3140                 /* Note: limit >= cnt_stats checked upstream
3141                  * in rte_eth_xstats_names()
3142                  */
3143
3144                 /* Extended stats from ixgbe_hw_stats */
3145                 for (i = 0; i < IXGBE_NB_HW_STATS; i++) {
3146                         snprintf(xstats_names[count].name,
3147                                 sizeof(xstats_names[count].name),
3148                                 "%s",
3149                                 rte_ixgbe_stats_strings[i].name);
3150                         count++;
3151                 }
3152
3153                 /* MACsec Stats */
3154                 for (i = 0; i < IXGBE_NB_MACSEC_STATS; i++) {
3155                         snprintf(xstats_names[count].name,
3156                                 sizeof(xstats_names[count].name),
3157                                 "%s",
3158                                 rte_ixgbe_macsec_strings[i].name);
3159                         count++;
3160                 }
3161
3162                 /* RX Priority Stats */
3163                 for (stat = 0; stat < IXGBE_NB_RXQ_PRIO_STATS; stat++) {
3164                         for (i = 0; i < IXGBE_NB_RXQ_PRIO_VALUES; i++) {
3165                                 snprintf(xstats_names[count].name,
3166                                         sizeof(xstats_names[count].name),
3167                                         "rx_priority%u_%s", i,
3168                                         rte_ixgbe_rxq_strings[stat].name);
3169                                 count++;
3170                         }
3171                 }
3172
3173                 /* TX Priority Stats */
3174                 for (stat = 0; stat < IXGBE_NB_TXQ_PRIO_STATS; stat++) {
3175                         for (i = 0; i < IXGBE_NB_TXQ_PRIO_VALUES; i++) {
3176                                 snprintf(xstats_names[count].name,
3177                                         sizeof(xstats_names[count].name),
3178                                         "tx_priority%u_%s", i,
3179                                         rte_ixgbe_txq_strings[stat].name);
3180                                 count++;
3181                         }
3182                 }
3183         }
3184         return cnt_stats;
3185 }
3186
3187 static int ixgbe_dev_xstats_get_names_by_id(
3188         struct rte_eth_dev *dev,
3189         struct rte_eth_xstat_name *xstats_names,
3190         const uint64_t *ids,
3191         unsigned int limit)
3192 {
3193         if (!ids) {
3194                 const unsigned int cnt_stats = ixgbe_xstats_calc_num();
3195                 unsigned int stat, i, count;
3196
3197                 if (xstats_names != NULL) {
3198                         count = 0;
3199
3200                         /* Note: limit >= cnt_stats checked upstream
3201                          * in rte_eth_xstats_names()
3202                          */
3203
3204                         /* Extended stats from ixgbe_hw_stats */
3205                         for (i = 0; i < IXGBE_NB_HW_STATS; i++) {
3206                                 snprintf(xstats_names[count].name,
3207                                         sizeof(xstats_names[count].name),
3208                                         "%s",
3209                                         rte_ixgbe_stats_strings[i].name);
3210                                 count++;
3211                         }
3212
3213                         /* MACsec Stats */
3214                         for (i = 0; i < IXGBE_NB_MACSEC_STATS; i++) {
3215                                 snprintf(xstats_names[count].name,
3216                                         sizeof(xstats_names[count].name),
3217                                         "%s",
3218                                         rte_ixgbe_macsec_strings[i].name);
3219                                 count++;
3220                         }
3221
3222                         /* RX Priority Stats */
3223                         for (stat = 0; stat < IXGBE_NB_RXQ_PRIO_STATS; stat++) {
3224                                 for (i = 0; i < IXGBE_NB_RXQ_PRIO_VALUES; i++) {
3225                                         snprintf(xstats_names[count].name,
3226                                             sizeof(xstats_names[count].name),
3227                                             "rx_priority%u_%s", i,
3228                                             rte_ixgbe_rxq_strings[stat].name);
3229                                         count++;
3230                                 }
3231                         }
3232
3233                         /* TX Priority Stats */
3234                         for (stat = 0; stat < IXGBE_NB_TXQ_PRIO_STATS; stat++) {
3235                                 for (i = 0; i < IXGBE_NB_TXQ_PRIO_VALUES; i++) {
3236                                         snprintf(xstats_names[count].name,
3237                                             sizeof(xstats_names[count].name),
3238                                             "tx_priority%u_%s", i,
3239                                             rte_ixgbe_txq_strings[stat].name);
3240                                         count++;
3241                                 }
3242                         }
3243                 }
3244                 return cnt_stats;
3245         }
3246
3247         uint16_t i;
3248         uint16_t size = ixgbe_xstats_calc_num();
3249         struct rte_eth_xstat_name xstats_names_copy[size];
3250
3251         ixgbe_dev_xstats_get_names_by_id(dev, xstats_names_copy, NULL,
3252                         size);
3253
3254         for (i = 0; i < limit; i++) {
3255                 if (ids[i] >= size) {
3256                         PMD_INIT_LOG(ERR, "id value isn't valid");
3257                         return -1;
3258                 }
3259                 strcpy(xstats_names[i].name,
3260                                 xstats_names_copy[ids[i]].name);
3261         }
3262         return limit;
3263 }
3264
3265 static int ixgbevf_dev_xstats_get_names(__rte_unused struct rte_eth_dev *dev,
3266         struct rte_eth_xstat_name *xstats_names, unsigned limit)
3267 {
3268         unsigned i;
3269
3270         if (limit < IXGBEVF_NB_XSTATS && xstats_names != NULL)
3271                 return -ENOMEM;
3272
3273         if (xstats_names != NULL)
3274                 for (i = 0; i < IXGBEVF_NB_XSTATS; i++)
3275                         snprintf(xstats_names[i].name,
3276                                 sizeof(xstats_names[i].name),
3277                                 "%s", rte_ixgbevf_stats_strings[i].name);
3278         return IXGBEVF_NB_XSTATS;
3279 }
3280
3281 static int
3282 ixgbe_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
3283                                          unsigned n)
3284 {
3285         struct ixgbe_hw *hw =
3286                         IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3287         struct ixgbe_hw_stats *hw_stats =
3288                         IXGBE_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
3289         struct ixgbe_macsec_stats *macsec_stats =
3290                         IXGBE_DEV_PRIVATE_TO_MACSEC_STATS(
3291                                 dev->data->dev_private);
3292         uint64_t total_missed_rx, total_qbrc, total_qprc, total_qprdc;
3293         unsigned i, stat, count = 0;
3294
3295         count = ixgbe_xstats_calc_num();
3296
3297         if (n < count)
3298                 return count;
3299
3300         total_missed_rx = 0;
3301         total_qbrc = 0;
3302         total_qprc = 0;
3303         total_qprdc = 0;
3304
3305         ixgbe_read_stats_registers(hw, hw_stats, macsec_stats, &total_missed_rx,
3306                         &total_qbrc, &total_qprc, &total_qprdc);
3307
3308         /* If this is a reset xstats is NULL, and we have cleared the
3309          * registers by reading them.
3310          */
3311         if (!xstats)
3312                 return 0;
3313
3314         /* Extended stats from ixgbe_hw_stats */
3315         count = 0;
3316         for (i = 0; i < IXGBE_NB_HW_STATS; i++) {
3317                 xstats[count].value = *(uint64_t *)(((char *)hw_stats) +
3318                                 rte_ixgbe_stats_strings[i].offset);
3319                 xstats[count].id = count;
3320                 count++;
3321         }
3322
3323         /* MACsec Stats */
3324         for (i = 0; i < IXGBE_NB_MACSEC_STATS; i++) {
3325                 xstats[count].value = *(uint64_t *)(((char *)macsec_stats) +
3326                                 rte_ixgbe_macsec_strings[i].offset);
3327                 xstats[count].id = count;
3328                 count++;
3329         }
3330
3331         /* RX Priority Stats */
3332         for (stat = 0; stat < IXGBE_NB_RXQ_PRIO_STATS; stat++) {
3333                 for (i = 0; i < IXGBE_NB_RXQ_PRIO_VALUES; i++) {
3334                         xstats[count].value = *(uint64_t *)(((char *)hw_stats) +
3335                                         rte_ixgbe_rxq_strings[stat].offset +
3336                                         (sizeof(uint64_t) * i));
3337                         xstats[count].id = count;
3338                         count++;
3339                 }
3340         }
3341
3342         /* TX Priority Stats */
3343         for (stat = 0; stat < IXGBE_NB_TXQ_PRIO_STATS; stat++) {
3344                 for (i = 0; i < IXGBE_NB_TXQ_PRIO_VALUES; i++) {
3345                         xstats[count].value = *(uint64_t *)(((char *)hw_stats) +
3346                                         rte_ixgbe_txq_strings[stat].offset +
3347                                         (sizeof(uint64_t) * i));
3348                         xstats[count].id = count;
3349                         count++;
3350                 }
3351         }
3352         return count;
3353 }
3354
3355 static int
3356 ixgbe_dev_xstats_get_by_id(struct rte_eth_dev *dev, const uint64_t *ids,
3357                 uint64_t *values, unsigned int n)
3358 {
3359         if (!ids) {
3360                 struct ixgbe_hw *hw =
3361                                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3362                 struct ixgbe_hw_stats *hw_stats =
3363                                 IXGBE_DEV_PRIVATE_TO_STATS(
3364                                                 dev->data->dev_private);
3365                 struct ixgbe_macsec_stats *macsec_stats =
3366                                 IXGBE_DEV_PRIVATE_TO_MACSEC_STATS(
3367                                         dev->data->dev_private);
3368                 uint64_t total_missed_rx, total_qbrc, total_qprc, total_qprdc;
3369                 unsigned int i, stat, count = 0;
3370
3371                 count = ixgbe_xstats_calc_num();
3372
3373                 if (!ids && n < count)
3374                         return count;
3375
3376                 total_missed_rx = 0;
3377                 total_qbrc = 0;
3378                 total_qprc = 0;
3379                 total_qprdc = 0;
3380
3381                 ixgbe_read_stats_registers(hw, hw_stats, macsec_stats,
3382                                 &total_missed_rx, &total_qbrc, &total_qprc,
3383                                 &total_qprdc);
3384
3385                 /* If this is a reset xstats is NULL, and we have cleared the
3386                  * registers by reading them.
3387                  */
3388                 if (!ids && !values)
3389                         return 0;
3390
3391                 /* Extended stats from ixgbe_hw_stats */
3392                 count = 0;
3393                 for (i = 0; i < IXGBE_NB_HW_STATS; i++) {
3394                         values[count] = *(uint64_t *)(((char *)hw_stats) +
3395                                         rte_ixgbe_stats_strings[i].offset);
3396                         count++;
3397                 }
3398
3399                 /* MACsec Stats */
3400                 for (i = 0; i < IXGBE_NB_MACSEC_STATS; i++) {
3401                         values[count] = *(uint64_t *)(((char *)macsec_stats) +
3402                                         rte_ixgbe_macsec_strings[i].offset);
3403                         count++;
3404                 }
3405
3406                 /* RX Priority Stats */
3407                 for (stat = 0; stat < IXGBE_NB_RXQ_PRIO_STATS; stat++) {
3408                         for (i = 0; i < IXGBE_NB_RXQ_PRIO_VALUES; i++) {
3409                                 values[count] =
3410                                         *(uint64_t *)(((char *)hw_stats) +
3411                                         rte_ixgbe_rxq_strings[stat].offset +
3412                                         (sizeof(uint64_t) * i));
3413                                 count++;
3414                         }
3415                 }
3416
3417                 /* TX Priority Stats */
3418                 for (stat = 0; stat < IXGBE_NB_TXQ_PRIO_STATS; stat++) {
3419                         for (i = 0; i < IXGBE_NB_TXQ_PRIO_VALUES; i++) {
3420                                 values[count] =
3421                                         *(uint64_t *)(((char *)hw_stats) +
3422                                         rte_ixgbe_txq_strings[stat].offset +
3423                                         (sizeof(uint64_t) * i));
3424                                 count++;
3425                         }
3426                 }
3427                 return count;
3428         }
3429
3430         uint16_t i;
3431         uint16_t size = ixgbe_xstats_calc_num();
3432         uint64_t values_copy[size];
3433
3434         ixgbe_dev_xstats_get_by_id(dev, NULL, values_copy, size);
3435
3436         for (i = 0; i < n; i++) {
3437                 if (ids[i] >= size) {
3438                         PMD_INIT_LOG(ERR, "id value isn't valid");
3439                         return -1;
3440                 }
3441                 values[i] = values_copy[ids[i]];
3442         }
3443         return n;
3444 }
3445
3446 static void
3447 ixgbe_dev_xstats_reset(struct rte_eth_dev *dev)
3448 {
3449         struct ixgbe_hw_stats *stats =
3450                         IXGBE_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
3451         struct ixgbe_macsec_stats *macsec_stats =
3452                         IXGBE_DEV_PRIVATE_TO_MACSEC_STATS(
3453                                 dev->data->dev_private);
3454
3455         unsigned count = ixgbe_xstats_calc_num();
3456
3457         /* HW registers are cleared on read */
3458         ixgbe_dev_xstats_get(dev, NULL, count);
3459
3460         /* Reset software totals */
3461         memset(stats, 0, sizeof(*stats));
3462         memset(macsec_stats, 0, sizeof(*macsec_stats));
3463 }
3464
3465 static void
3466 ixgbevf_update_stats(struct rte_eth_dev *dev)
3467 {
3468         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3469         struct ixgbevf_hw_stats *hw_stats = (struct ixgbevf_hw_stats *)
3470                           IXGBE_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
3471
3472         /* Good Rx packet, include VF loopback */
3473         UPDATE_VF_STAT(IXGBE_VFGPRC,
3474             hw_stats->last_vfgprc, hw_stats->vfgprc);
3475
3476         /* Good Rx octets, include VF loopback */
3477         UPDATE_VF_STAT_36BIT(IXGBE_VFGORC_LSB, IXGBE_VFGORC_MSB,
3478             hw_stats->last_vfgorc, hw_stats->vfgorc);
3479
3480         /* Good Tx packet, include VF loopback */
3481         UPDATE_VF_STAT(IXGBE_VFGPTC,
3482             hw_stats->last_vfgptc, hw_stats->vfgptc);
3483
3484         /* Good Tx octets, include VF loopback */
3485         UPDATE_VF_STAT_36BIT(IXGBE_VFGOTC_LSB, IXGBE_VFGOTC_MSB,
3486             hw_stats->last_vfgotc, hw_stats->vfgotc);
3487
3488         /* Rx Multicst Packet */
3489         UPDATE_VF_STAT(IXGBE_VFMPRC,
3490             hw_stats->last_vfmprc, hw_stats->vfmprc);
3491 }
3492
3493 static int
3494 ixgbevf_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
3495                        unsigned n)
3496 {
3497         struct ixgbevf_hw_stats *hw_stats = (struct ixgbevf_hw_stats *)
3498                         IXGBE_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
3499         unsigned i;
3500
3501         if (n < IXGBEVF_NB_XSTATS)
3502                 return IXGBEVF_NB_XSTATS;
3503
3504         ixgbevf_update_stats(dev);
3505
3506         if (!xstats)
3507                 return 0;
3508
3509         /* Extended stats */
3510         for (i = 0; i < IXGBEVF_NB_XSTATS; i++) {
3511                 xstats[i].id = i;
3512                 xstats[i].value = *(uint64_t *)(((char *)hw_stats) +
3513                         rte_ixgbevf_stats_strings[i].offset);
3514         }
3515
3516         return IXGBEVF_NB_XSTATS;
3517 }
3518
3519 static void
3520 ixgbevf_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
3521 {
3522         struct ixgbevf_hw_stats *hw_stats = (struct ixgbevf_hw_stats *)
3523                           IXGBE_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
3524
3525         ixgbevf_update_stats(dev);
3526
3527         if (stats == NULL)
3528                 return;
3529
3530         stats->ipackets = hw_stats->vfgprc;
3531         stats->ibytes = hw_stats->vfgorc;
3532         stats->opackets = hw_stats->vfgptc;
3533         stats->obytes = hw_stats->vfgotc;
3534 }
3535
3536 static void
3537 ixgbevf_dev_stats_reset(struct rte_eth_dev *dev)
3538 {
3539         struct ixgbevf_hw_stats *hw_stats = (struct ixgbevf_hw_stats *)
3540                         IXGBE_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
3541
3542         /* Sync HW register to the last stats */
3543         ixgbevf_dev_stats_get(dev, NULL);
3544
3545         /* reset HW current stats*/
3546         hw_stats->vfgprc = 0;
3547         hw_stats->vfgorc = 0;
3548         hw_stats->vfgptc = 0;
3549         hw_stats->vfgotc = 0;
3550 }
3551
3552 static int
3553 ixgbe_fw_version_get(struct rte_eth_dev *dev, char *fw_version, size_t fw_size)
3554 {
3555         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3556         u16 eeprom_verh, eeprom_verl;
3557         u32 etrack_id;
3558         int ret;
3559
3560         ixgbe_read_eeprom(hw, 0x2e, &eeprom_verh);
3561         ixgbe_read_eeprom(hw, 0x2d, &eeprom_verl);
3562
3563         etrack_id = (eeprom_verh << 16) | eeprom_verl;
3564         ret = snprintf(fw_version, fw_size, "0x%08x", etrack_id);
3565
3566         ret += 1; /* add the size of '\0' */
3567         if (fw_size < (u32)ret)
3568                 return ret;
3569         else
3570                 return 0;
3571 }
3572
3573 static void
3574 ixgbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
3575 {
3576         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
3577         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3578         struct rte_eth_conf *dev_conf = &dev->data->dev_conf;
3579
3580         dev_info->pci_dev = pci_dev;
3581         dev_info->max_rx_queues = (uint16_t)hw->mac.max_rx_queues;
3582         dev_info->max_tx_queues = (uint16_t)hw->mac.max_tx_queues;
3583         if (RTE_ETH_DEV_SRIOV(dev).active == 0) {
3584                 /*
3585                  * When DCB/VT is off, maximum number of queues changes,
3586                  * except for 82598EB, which remains constant.
3587                  */
3588                 if (dev_conf->txmode.mq_mode == ETH_MQ_TX_NONE &&
3589                                 hw->mac.type != ixgbe_mac_82598EB)
3590                         dev_info->max_tx_queues = IXGBE_NONE_MODE_TX_NB_QUEUES;
3591         }
3592         dev_info->min_rx_bufsize = 1024; /* cf BSIZEPACKET in SRRCTL register */
3593         dev_info->max_rx_pktlen = 15872; /* includes CRC, cf MAXFRS register */
3594         dev_info->max_mac_addrs = hw->mac.num_rar_entries;
3595         dev_info->max_hash_mac_addrs = IXGBE_VMDQ_NUM_UC_MAC;
3596         dev_info->max_vfs = pci_dev->max_vfs;
3597         if (hw->mac.type == ixgbe_mac_82598EB)
3598                 dev_info->max_vmdq_pools = ETH_16_POOLS;
3599         else
3600                 dev_info->max_vmdq_pools = ETH_64_POOLS;
3601         dev_info->vmdq_queue_num = dev_info->max_rx_queues;
3602         dev_info->rx_offload_capa =
3603                 DEV_RX_OFFLOAD_VLAN_STRIP |
3604                 DEV_RX_OFFLOAD_IPV4_CKSUM |
3605                 DEV_RX_OFFLOAD_UDP_CKSUM  |
3606                 DEV_RX_OFFLOAD_TCP_CKSUM;
3607
3608         /*
3609          * RSC is only supported by 82599 and x540 PF devices in a non-SR-IOV
3610          * mode.
3611          */
3612         if ((hw->mac.type == ixgbe_mac_82599EB ||
3613              hw->mac.type == ixgbe_mac_X540) &&
3614             !RTE_ETH_DEV_SRIOV(dev).active)
3615                 dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_TCP_LRO;
3616
3617         if (hw->mac.type == ixgbe_mac_82599EB ||
3618             hw->mac.type == ixgbe_mac_X540)
3619                 dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_MACSEC_STRIP;
3620
3621         if (hw->mac.type == ixgbe_mac_X550 ||
3622             hw->mac.type == ixgbe_mac_X550EM_x ||
3623             hw->mac.type == ixgbe_mac_X550EM_a)
3624                 dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM;
3625
3626         dev_info->tx_offload_capa =
3627                 DEV_TX_OFFLOAD_VLAN_INSERT |
3628                 DEV_TX_OFFLOAD_IPV4_CKSUM  |
3629                 DEV_TX_OFFLOAD_UDP_CKSUM   |
3630                 DEV_TX_OFFLOAD_TCP_CKSUM   |
3631                 DEV_TX_OFFLOAD_SCTP_CKSUM  |
3632                 DEV_TX_OFFLOAD_TCP_TSO;
3633
3634         if (hw->mac.type == ixgbe_mac_82599EB ||
3635             hw->mac.type == ixgbe_mac_X540)
3636                 dev_info->tx_offload_capa |= DEV_TX_OFFLOAD_MACSEC_INSERT;
3637
3638         if (hw->mac.type == ixgbe_mac_X550 ||
3639             hw->mac.type == ixgbe_mac_X550EM_x ||
3640             hw->mac.type == ixgbe_mac_X550EM_a)
3641                 dev_info->tx_offload_capa |= DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM;
3642
3643         dev_info->default_rxconf = (struct rte_eth_rxconf) {
3644                 .rx_thresh = {
3645                         .pthresh = IXGBE_DEFAULT_RX_PTHRESH,
3646                         .hthresh = IXGBE_DEFAULT_RX_HTHRESH,
3647                         .wthresh = IXGBE_DEFAULT_RX_WTHRESH,
3648                 },
3649                 .rx_free_thresh = IXGBE_DEFAULT_RX_FREE_THRESH,
3650                 .rx_drop_en = 0,
3651         };
3652
3653         dev_info->default_txconf = (struct rte_eth_txconf) {
3654                 .tx_thresh = {
3655                         .pthresh = IXGBE_DEFAULT_TX_PTHRESH,
3656                         .hthresh = IXGBE_DEFAULT_TX_HTHRESH,
3657                         .wthresh = IXGBE_DEFAULT_TX_WTHRESH,
3658                 },
3659                 .tx_free_thresh = IXGBE_DEFAULT_TX_FREE_THRESH,
3660                 .tx_rs_thresh = IXGBE_DEFAULT_TX_RSBIT_THRESH,
3661                 .txq_flags = ETH_TXQ_FLAGS_NOMULTSEGS |
3662                                 ETH_TXQ_FLAGS_NOOFFLOADS,
3663         };
3664
3665         dev_info->rx_desc_lim = rx_desc_lim;
3666         dev_info->tx_desc_lim = tx_desc_lim;
3667
3668         dev_info->hash_key_size = IXGBE_HKEY_MAX_INDEX * sizeof(uint32_t);
3669         dev_info->reta_size = ixgbe_reta_size_get(hw->mac.type);
3670         dev_info->flow_type_rss_offloads = IXGBE_RSS_OFFLOAD_ALL;
3671
3672         dev_info->speed_capa = ETH_LINK_SPEED_1G | ETH_LINK_SPEED_10G;
3673         if (hw->mac.type == ixgbe_mac_X540 ||
3674             hw->mac.type == ixgbe_mac_X540_vf ||
3675             hw->mac.type == ixgbe_mac_X550 ||
3676             hw->mac.type == ixgbe_mac_X550_vf) {
3677                 dev_info->speed_capa |= ETH_LINK_SPEED_100M;
3678         }
3679 }
3680
3681 static const uint32_t *
3682 ixgbe_dev_supported_ptypes_get(struct rte_eth_dev *dev)
3683 {
3684         static const uint32_t ptypes[] = {
3685                 /* For non-vec functions,
3686                  * refers to ixgbe_rxd_pkt_info_to_pkt_type();
3687                  * for vec functions,
3688                  * refers to _recv_raw_pkts_vec().
3689                  */
3690                 RTE_PTYPE_L2_ETHER,
3691                 RTE_PTYPE_L3_IPV4,
3692                 RTE_PTYPE_L3_IPV4_EXT,
3693                 RTE_PTYPE_L3_IPV6,
3694                 RTE_PTYPE_L3_IPV6_EXT,
3695                 RTE_PTYPE_L4_SCTP,
3696                 RTE_PTYPE_L4_TCP,
3697                 RTE_PTYPE_L4_UDP,
3698                 RTE_PTYPE_TUNNEL_IP,
3699                 RTE_PTYPE_INNER_L3_IPV6,
3700                 RTE_PTYPE_INNER_L3_IPV6_EXT,
3701                 RTE_PTYPE_INNER_L4_TCP,
3702                 RTE_PTYPE_INNER_L4_UDP,
3703                 RTE_PTYPE_UNKNOWN
3704         };
3705
3706         if (dev->rx_pkt_burst == ixgbe_recv_pkts ||
3707             dev->rx_pkt_burst == ixgbe_recv_pkts_lro_single_alloc ||
3708             dev->rx_pkt_burst == ixgbe_recv_pkts_lro_bulk_alloc ||
3709             dev->rx_pkt_burst == ixgbe_recv_pkts_bulk_alloc)
3710                 return ptypes;
3711
3712 #if defined(RTE_ARCH_X86)
3713         if (dev->rx_pkt_burst == ixgbe_recv_pkts_vec ||
3714             dev->rx_pkt_burst == ixgbe_recv_scattered_pkts_vec)
3715                 return ptypes;
3716 #endif
3717         return NULL;
3718 }
3719
3720 static void
3721 ixgbevf_dev_info_get(struct rte_eth_dev *dev,
3722                      struct rte_eth_dev_info *dev_info)
3723 {
3724         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
3725         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3726
3727         dev_info->pci_dev = pci_dev;
3728         dev_info->max_rx_queues = (uint16_t)hw->mac.max_rx_queues;
3729         dev_info->max_tx_queues = (uint16_t)hw->mac.max_tx_queues;
3730         dev_info->min_rx_bufsize = 1024; /* cf BSIZEPACKET in SRRCTL reg */
3731         dev_info->max_rx_pktlen = 9728; /* includes CRC, cf MAXFRS reg */
3732         dev_info->max_mac_addrs = hw->mac.num_rar_entries;
3733         dev_info->max_hash_mac_addrs = IXGBE_VMDQ_NUM_UC_MAC;
3734         dev_info->max_vfs = pci_dev->max_vfs;
3735         if (hw->mac.type == ixgbe_mac_82598EB)
3736                 dev_info->max_vmdq_pools = ETH_16_POOLS;
3737         else
3738                 dev_info->max_vmdq_pools = ETH_64_POOLS;
3739         dev_info->rx_offload_capa = DEV_RX_OFFLOAD_VLAN_STRIP |
3740                                 DEV_RX_OFFLOAD_IPV4_CKSUM |
3741                                 DEV_RX_OFFLOAD_UDP_CKSUM  |
3742                                 DEV_RX_OFFLOAD_TCP_CKSUM;
3743         dev_info->tx_offload_capa = DEV_TX_OFFLOAD_VLAN_INSERT |
3744                                 DEV_TX_OFFLOAD_IPV4_CKSUM  |
3745                                 DEV_TX_OFFLOAD_UDP_CKSUM   |
3746                                 DEV_TX_OFFLOAD_TCP_CKSUM   |
3747                                 DEV_TX_OFFLOAD_SCTP_CKSUM  |
3748                                 DEV_TX_OFFLOAD_TCP_TSO;
3749
3750         dev_info->default_rxconf = (struct rte_eth_rxconf) {
3751                 .rx_thresh = {
3752                         .pthresh = IXGBE_DEFAULT_RX_PTHRESH,
3753                         .hthresh = IXGBE_DEFAULT_RX_HTHRESH,
3754                         .wthresh = IXGBE_DEFAULT_RX_WTHRESH,
3755                 },
3756                 .rx_free_thresh = IXGBE_DEFAULT_RX_FREE_THRESH,
3757                 .rx_drop_en = 0,
3758         };
3759
3760         dev_info->default_txconf = (struct rte_eth_txconf) {
3761                 .tx_thresh = {
3762                         .pthresh = IXGBE_DEFAULT_TX_PTHRESH,
3763                         .hthresh = IXGBE_DEFAULT_TX_HTHRESH,
3764                         .wthresh = IXGBE_DEFAULT_TX_WTHRESH,
3765                 },
3766                 .tx_free_thresh = IXGBE_DEFAULT_TX_FREE_THRESH,
3767                 .tx_rs_thresh = IXGBE_DEFAULT_TX_RSBIT_THRESH,
3768                 .txq_flags = ETH_TXQ_FLAGS_NOMULTSEGS |
3769                                 ETH_TXQ_FLAGS_NOOFFLOADS,
3770         };
3771
3772         dev_info->rx_desc_lim = rx_desc_lim;
3773         dev_info->tx_desc_lim = tx_desc_lim;
3774 }
3775
3776 static int
3777 ixgbevf_check_link(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
3778                    int *link_up, int wait_to_complete)
3779 {
3780         /**
3781          * for a quick link status checking, wait_to_compelet == 0,
3782          * skip PF link status checking
3783          */
3784         bool no_pflink_check = wait_to_complete == 0;
3785         struct ixgbe_mbx_info *mbx = &hw->mbx;
3786         struct ixgbe_mac_info *mac = &hw->mac;
3787         uint32_t links_reg, in_msg;
3788         int ret_val = 0;
3789
3790         /* If we were hit with a reset drop the link */
3791         if (!mbx->ops.check_for_rst(hw, 0) || !mbx->timeout)
3792                 mac->get_link_status = true;
3793
3794         if (!mac->get_link_status)
3795                 goto out;
3796
3797         /* if link status is down no point in checking to see if pf is up */
3798         links_reg = IXGBE_READ_REG(hw, IXGBE_VFLINKS);
3799         if (!(links_reg & IXGBE_LINKS_UP))
3800                 goto out;
3801
3802         /* for SFP+ modules and DA cables on 82599 it can take up to 500usecs
3803          * before the link status is correct
3804          */
3805         if (mac->type == ixgbe_mac_82599_vf) {
3806                 int i;
3807
3808                 for (i = 0; i < 5; i++) {
3809                         rte_delay_us(100);
3810                         links_reg = IXGBE_READ_REG(hw, IXGBE_VFLINKS);
3811
3812                         if (!(links_reg & IXGBE_LINKS_UP))
3813                                 goto out;
3814                 }
3815         }
3816
3817         switch (links_reg & IXGBE_LINKS_SPEED_82599) {
3818         case IXGBE_LINKS_SPEED_10G_82599:
3819                 *speed = IXGBE_LINK_SPEED_10GB_FULL;
3820                 if (hw->mac.type >= ixgbe_mac_X550) {
3821                         if (links_reg & IXGBE_LINKS_SPEED_NON_STD)
3822                                 *speed = IXGBE_LINK_SPEED_2_5GB_FULL;
3823                 }
3824                 break;
3825         case IXGBE_LINKS_SPEED_1G_82599:
3826                 *speed = IXGBE_LINK_SPEED_1GB_FULL;
3827                 break;
3828         case IXGBE_LINKS_SPEED_100_82599:
3829                 *speed = IXGBE_LINK_SPEED_100_FULL;
3830                 if (hw->mac.type == ixgbe_mac_X550) {
3831                         if (links_reg & IXGBE_LINKS_SPEED_NON_STD)
3832                                 *speed = IXGBE_LINK_SPEED_5GB_FULL;
3833                 }
3834                 break;
3835         case IXGBE_LINKS_SPEED_10_X550EM_A:
3836                 *speed = IXGBE_LINK_SPEED_UNKNOWN;
3837                 /* Since Reserved in older MAC's */
3838                 if (hw->mac.type >= ixgbe_mac_X550)
3839                         *speed = IXGBE_LINK_SPEED_10_FULL;
3840                 break;
3841         default:
3842                 *speed = IXGBE_LINK_SPEED_UNKNOWN;
3843         }
3844
3845         if (no_pflink_check) {
3846                 if (*speed == IXGBE_LINK_SPEED_UNKNOWN)
3847                         mac->get_link_status = true;
3848                 else
3849                         mac->get_link_status = false;
3850
3851                 goto out;
3852         }
3853         /* if the read failed it could just be a mailbox collision, best wait
3854          * until we are called again and don't report an error
3855          */
3856         if (mbx->ops.read(hw, &in_msg, 1, 0))
3857                 goto out;
3858
3859         if (!(in_msg & IXGBE_VT_MSGTYPE_CTS)) {
3860                 /* msg is not CTS and is NACK we must have lost CTS status */
3861                 if (in_msg & IXGBE_VT_MSGTYPE_NACK)
3862                         ret_val = -1;
3863                 goto out;
3864         }
3865
3866         /* the pf is talking, if we timed out in the past we reinit */
3867         if (!mbx->timeout) {
3868                 ret_val = -1;
3869                 goto out;
3870         }
3871
3872         /* if we passed all the tests above then the link is up and we no
3873          * longer need to check for link
3874          */
3875         mac->get_link_status = false;
3876
3877 out:
3878         *link_up = !mac->get_link_status;
3879         return ret_val;
3880 }
3881
3882 /* return 0 means link status changed, -1 means not changed */
3883 static int
3884 ixgbe_dev_link_update_share(struct rte_eth_dev *dev,
3885                             int wait_to_complete, int vf)
3886 {
3887         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3888         struct rte_eth_link link, old;
3889         ixgbe_link_speed link_speed = IXGBE_LINK_SPEED_UNKNOWN;
3890         struct ixgbe_interrupt *intr =
3891                 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
3892         int link_up;
3893         int diag;
3894         u32 speed = 0;
3895         int wait = 1;
3896         bool autoneg = false;
3897
3898         link.link_status = ETH_LINK_DOWN;
3899         link.link_speed = 0;
3900         link.link_duplex = ETH_LINK_HALF_DUPLEX;
3901         memset(&old, 0, sizeof(old));
3902         rte_ixgbe_dev_atomic_read_link_status(dev, &old);
3903
3904         hw->mac.get_link_status = true;
3905
3906         if ((intr->flags & IXGBE_FLAG_NEED_LINK_CONFIG) &&
3907                 ixgbe_get_media_type(hw) == ixgbe_media_type_fiber) {
3908                 speed = hw->phy.autoneg_advertised;
3909                 if (!speed)
3910                         ixgbe_get_link_capabilities(hw, &speed, &autoneg);
3911                 ixgbe_setup_link(hw, speed, true);
3912         }
3913
3914         /* check if it needs to wait to complete, if lsc interrupt is enabled */
3915         if (wait_to_complete == 0 || dev->data->dev_conf.intr_conf.lsc != 0)
3916                 wait = 0;
3917
3918         if (vf)
3919                 diag = ixgbevf_check_link(hw, &link_speed, &link_up, wait);
3920         else
3921                 diag = ixgbe_check_link(hw, &link_speed, &link_up, wait);
3922
3923         if (diag != 0) {
3924                 link.link_speed = ETH_SPEED_NUM_100M;
3925                 link.link_duplex = ETH_LINK_FULL_DUPLEX;
3926                 rte_ixgbe_dev_atomic_write_link_status(dev, &link);
3927                 if (link.link_status == old.link_status)
3928                         return -1;
3929                 return 0;
3930         }
3931
3932         if (link_up == 0) {
3933                 rte_ixgbe_dev_atomic_write_link_status(dev, &link);
3934                 intr->flags |= IXGBE_FLAG_NEED_LINK_CONFIG;
3935                 if (link.link_status == old.link_status)
3936                         return -1;
3937                 return 0;
3938         }
3939         intr->flags &= ~IXGBE_FLAG_NEED_LINK_CONFIG;
3940         link.link_status = ETH_LINK_UP;
3941         link.link_duplex = ETH_LINK_FULL_DUPLEX;
3942
3943         switch (link_speed) {
3944         default:
3945         case IXGBE_LINK_SPEED_UNKNOWN:
3946                 link.link_duplex = ETH_LINK_FULL_DUPLEX;
3947                 link.link_speed = ETH_SPEED_NUM_100M;
3948                 break;
3949
3950         case IXGBE_LINK_SPEED_100_FULL:
3951                 link.link_speed = ETH_SPEED_NUM_100M;
3952                 break;
3953
3954         case IXGBE_LINK_SPEED_1GB_FULL:
3955                 link.link_speed = ETH_SPEED_NUM_1G;
3956                 break;
3957
3958         case IXGBE_LINK_SPEED_10GB_FULL:
3959                 link.link_speed = ETH_SPEED_NUM_10G;
3960                 break;
3961         }
3962         rte_ixgbe_dev_atomic_write_link_status(dev, &link);
3963
3964         if (link.link_status == old.link_status)
3965                 return -1;
3966
3967         return 0;
3968 }
3969
3970 static int
3971 ixgbe_dev_link_update(struct rte_eth_dev *dev, int wait_to_complete)
3972 {
3973         return ixgbe_dev_link_update_share(dev, wait_to_complete, 0);
3974 }
3975
3976 static int
3977 ixgbevf_dev_link_update(struct rte_eth_dev *dev, int wait_to_complete)
3978 {
3979         return ixgbe_dev_link_update_share(dev, wait_to_complete, 1);
3980 }
3981
3982 static void
3983 ixgbe_dev_promiscuous_enable(struct rte_eth_dev *dev)
3984 {
3985         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3986         uint32_t fctrl;
3987
3988         fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
3989         fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
3990         IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
3991 }
3992
3993 static void
3994 ixgbe_dev_promiscuous_disable(struct rte_eth_dev *dev)
3995 {
3996         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3997         uint32_t fctrl;
3998
3999         fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
4000         fctrl &= (~IXGBE_FCTRL_UPE);
4001         if (dev->data->all_multicast == 1)
4002                 fctrl |= IXGBE_FCTRL_MPE;
4003         else
4004                 fctrl &= (~IXGBE_FCTRL_MPE);
4005         IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
4006 }
4007
4008 static void
4009 ixgbe_dev_allmulticast_enable(struct rte_eth_dev *dev)
4010 {
4011         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4012         uint32_t fctrl;
4013
4014         fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
4015         fctrl |= IXGBE_FCTRL_MPE;
4016         IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
4017 }
4018
4019 static void
4020 ixgbe_dev_allmulticast_disable(struct rte_eth_dev *dev)
4021 {
4022         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4023         uint32_t fctrl;
4024
4025         if (dev->data->promiscuous == 1)
4026                 return; /* must remain in all_multicast mode */
4027
4028         fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
4029         fctrl &= (~IXGBE_FCTRL_MPE);
4030         IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
4031 }
4032
4033 /**
4034  * It clears the interrupt causes and enables the interrupt.
4035  * It will be called once only during nic initialized.
4036  *
4037  * @param dev
4038  *  Pointer to struct rte_eth_dev.
4039  *
4040  * @return
4041  *  - On success, zero.
4042  *  - On failure, a negative value.
4043  */
4044 static int
4045 ixgbe_dev_lsc_interrupt_setup(struct rte_eth_dev *dev)
4046 {
4047         struct ixgbe_interrupt *intr =
4048                 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
4049
4050         ixgbe_dev_link_status_print(dev);
4051         intr->mask |= IXGBE_EICR_LSC;
4052
4053         return 0;
4054 }
4055
4056 /**
4057  * It clears the interrupt causes and enables the interrupt.
4058  * It will be called once only during nic initialized.
4059  *
4060  * @param dev
4061  *  Pointer to struct rte_eth_dev.
4062  *
4063  * @return
4064  *  - On success, zero.
4065  *  - On failure, a negative value.
4066  */
4067 static int
4068 ixgbe_dev_rxq_interrupt_setup(struct rte_eth_dev *dev)
4069 {
4070         struct ixgbe_interrupt *intr =
4071                 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
4072
4073         intr->mask |= IXGBE_EICR_RTX_QUEUE;
4074
4075         return 0;
4076 }
4077
4078 /**
4079  * It clears the interrupt causes and enables the interrupt.
4080  * It will be called once only during nic initialized.
4081  *
4082  * @param dev
4083  *  Pointer to struct rte_eth_dev.
4084  *
4085  * @return
4086  *  - On success, zero.
4087  *  - On failure, a negative value.
4088  */
4089 static int
4090 ixgbe_dev_macsec_interrupt_setup(struct rte_eth_dev *dev)
4091 {
4092         struct ixgbe_interrupt *intr =
4093                 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
4094
4095         intr->mask |= IXGBE_EICR_LINKSEC;
4096
4097         return 0;
4098 }
4099
4100 /*
4101  * It reads ICR and sets flag (IXGBE_EICR_LSC) for the link_update.
4102  *
4103  * @param dev
4104  *  Pointer to struct rte_eth_dev.
4105  *
4106  * @return
4107  *  - On success, zero.
4108  *  - On failure, a negative value.
4109  */
4110 static int
4111 ixgbe_dev_interrupt_get_status(struct rte_eth_dev *dev)
4112 {
4113         uint32_t eicr;
4114         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4115         struct ixgbe_interrupt *intr =
4116                 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
4117
4118         /* clear all cause mask */
4119         ixgbe_disable_intr(hw);
4120
4121         /* read-on-clear nic registers here */
4122         eicr = IXGBE_READ_REG(hw, IXGBE_EICR);
4123         PMD_DRV_LOG(DEBUG, "eicr %x", eicr);
4124
4125         intr->flags = 0;
4126
4127         /* set flag for async link update */
4128         if (eicr & IXGBE_EICR_LSC)
4129                 intr->flags |= IXGBE_FLAG_NEED_LINK_UPDATE;
4130
4131         if (eicr & IXGBE_EICR_MAILBOX)
4132                 intr->flags |= IXGBE_FLAG_MAILBOX;
4133
4134         if (eicr & IXGBE_EICR_LINKSEC)
4135                 intr->flags |= IXGBE_FLAG_MACSEC;
4136
4137         if (hw->mac.type ==  ixgbe_mac_X550EM_x &&
4138             hw->phy.type == ixgbe_phy_x550em_ext_t &&
4139             (eicr & IXGBE_EICR_GPI_SDP0_X550EM_x))
4140                 intr->flags |= IXGBE_FLAG_PHY_INTERRUPT;
4141
4142         return 0;
4143 }
4144
4145 /**
4146  * It gets and then prints the link status.
4147  *
4148  * @param dev
4149  *  Pointer to struct rte_eth_dev.
4150  *
4151  * @return
4152  *  - On success, zero.
4153  *  - On failure, a negative value.
4154  */
4155 static void
4156 ixgbe_dev_link_status_print(struct rte_eth_dev *dev)
4157 {
4158         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
4159         struct rte_eth_link link;
4160
4161         memset(&link, 0, sizeof(link));
4162         rte_ixgbe_dev_atomic_read_link_status(dev, &link);
4163         if (link.link_status) {
4164                 PMD_INIT_LOG(INFO, "Port %d: Link Up - speed %u Mbps - %s",
4165                                         (int)(dev->data->port_id),
4166                                         (unsigned)link.link_speed,
4167                         link.link_duplex == ETH_LINK_FULL_DUPLEX ?
4168                                         "full-duplex" : "half-duplex");
4169         } else {
4170                 PMD_INIT_LOG(INFO, " Port %d: Link Down",
4171                                 (int)(dev->data->port_id));
4172         }
4173         PMD_INIT_LOG(DEBUG, "PCI Address: " PCI_PRI_FMT,
4174                                 pci_dev->addr.domain,
4175                                 pci_dev->addr.bus,
4176                                 pci_dev->addr.devid,
4177                                 pci_dev->addr.function);
4178 }
4179
4180 /*
4181  * It executes link_update after knowing an interrupt occurred.
4182  *
4183  * @param dev
4184  *  Pointer to struct rte_eth_dev.
4185  *
4186  * @return
4187  *  - On success, zero.
4188  *  - On failure, a negative value.
4189  */
4190 static int
4191 ixgbe_dev_interrupt_action(struct rte_eth_dev *dev,
4192                            struct rte_intr_handle *intr_handle)
4193 {
4194         struct ixgbe_interrupt *intr =
4195                 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
4196         int64_t timeout;
4197         struct rte_eth_link link;
4198         struct ixgbe_hw *hw =
4199                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4200
4201         PMD_DRV_LOG(DEBUG, "intr action type %d", intr->flags);
4202
4203         if (intr->flags & IXGBE_FLAG_MAILBOX) {
4204                 ixgbe_pf_mbx_process(dev);
4205                 intr->flags &= ~IXGBE_FLAG_MAILBOX;
4206         }
4207
4208         if (intr->flags & IXGBE_FLAG_PHY_INTERRUPT) {
4209                 ixgbe_handle_lasi(hw);
4210                 intr->flags &= ~IXGBE_FLAG_PHY_INTERRUPT;
4211         }
4212
4213         if (intr->flags & IXGBE_FLAG_NEED_LINK_UPDATE) {
4214                 /* get the link status before link update, for predicting later */
4215                 memset(&link, 0, sizeof(link));
4216                 rte_ixgbe_dev_atomic_read_link_status(dev, &link);
4217
4218                 ixgbe_dev_link_update(dev, 0);
4219
4220                 /* likely to up */
4221                 if (!link.link_status)
4222                         /* handle it 1 sec later, wait it being stable */
4223                         timeout = IXGBE_LINK_UP_CHECK_TIMEOUT;
4224                 /* likely to down */
4225                 else
4226                         /* handle it 4 sec later, wait it being stable */
4227                         timeout = IXGBE_LINK_DOWN_CHECK_TIMEOUT;
4228
4229                 ixgbe_dev_link_status_print(dev);
4230                 if (rte_eal_alarm_set(timeout * 1000,
4231                                       ixgbe_dev_interrupt_delayed_handler, (void *)dev) < 0)
4232                         PMD_DRV_LOG(ERR, "Error setting alarm");
4233                 else {
4234                         /* remember original mask */
4235                         intr->mask_original = intr->mask;
4236                         /* only disable lsc interrupt */
4237                         intr->mask &= ~IXGBE_EIMS_LSC;
4238                 }
4239         }
4240
4241         PMD_DRV_LOG(DEBUG, "enable intr immediately");
4242         ixgbe_enable_intr(dev);
4243         rte_intr_enable(intr_handle);
4244
4245         return 0;
4246 }
4247
4248 /**
4249  * Interrupt handler which shall be registered for alarm callback for delayed
4250  * handling specific interrupt to wait for the stable nic state. As the
4251  * NIC interrupt state is not stable for ixgbe after link is just down,
4252  * it needs to wait 4 seconds to get the stable status.
4253  *
4254  * @param handle
4255  *  Pointer to interrupt handle.
4256  * @param param
4257  *  The address of parameter (struct rte_eth_dev *) regsitered before.
4258  *
4259  * @return
4260  *  void
4261  */
4262 static void
4263 ixgbe_dev_interrupt_delayed_handler(void *param)
4264 {
4265         struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
4266         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
4267         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
4268         struct ixgbe_interrupt *intr =
4269                 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
4270         struct ixgbe_hw *hw =
4271                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4272         uint32_t eicr;
4273
4274         ixgbe_disable_intr(hw);
4275
4276         eicr = IXGBE_READ_REG(hw, IXGBE_EICR);
4277         if (eicr & IXGBE_EICR_MAILBOX)
4278                 ixgbe_pf_mbx_process(dev);
4279
4280         if (intr->flags & IXGBE_FLAG_PHY_INTERRUPT) {
4281                 ixgbe_handle_lasi(hw);
4282                 intr->flags &= ~IXGBE_FLAG_PHY_INTERRUPT;
4283         }
4284
4285         if (intr->flags & IXGBE_FLAG_NEED_LINK_UPDATE) {
4286                 ixgbe_dev_link_update(dev, 0);
4287                 intr->flags &= ~IXGBE_FLAG_NEED_LINK_UPDATE;
4288                 ixgbe_dev_link_status_print(dev);
4289                 _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC,
4290                                               NULL, NULL);
4291         }
4292
4293         if (intr->flags & IXGBE_FLAG_MACSEC) {
4294                 _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_MACSEC,
4295                                               NULL, NULL);
4296                 intr->flags &= ~IXGBE_FLAG_MACSEC;
4297         }
4298
4299         /* restore original mask */
4300         intr->mask = intr->mask_original;
4301         intr->mask_original = 0;
4302
4303         PMD_DRV_LOG(DEBUG, "enable intr in delayed handler S[%08x]", eicr);
4304         ixgbe_enable_intr(dev);
4305         rte_intr_enable(intr_handle);
4306 }
4307
4308 /**
4309  * Interrupt handler triggered by NIC  for handling
4310  * specific interrupt.
4311  *
4312  * @param handle
4313  *  Pointer to interrupt handle.
4314  * @param param
4315  *  The address of parameter (struct rte_eth_dev *) regsitered before.
4316  *
4317  * @return
4318  *  void
4319  */
4320 static void
4321 ixgbe_dev_interrupt_handler(void *param)
4322 {
4323         struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
4324
4325         ixgbe_dev_interrupt_get_status(dev);
4326         ixgbe_dev_interrupt_action(dev, dev->intr_handle);
4327 }
4328
4329 static int
4330 ixgbe_dev_led_on(struct rte_eth_dev *dev)
4331 {
4332         struct ixgbe_hw *hw;
4333
4334         hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4335         return ixgbe_led_on(hw, 0) == IXGBE_SUCCESS ? 0 : -ENOTSUP;
4336 }
4337
4338 static int
4339 ixgbe_dev_led_off(struct rte_eth_dev *dev)
4340 {
4341         struct ixgbe_hw *hw;
4342
4343         hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4344         return ixgbe_led_off(hw, 0) == IXGBE_SUCCESS ? 0 : -ENOTSUP;
4345 }
4346
4347 static int
4348 ixgbe_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
4349 {
4350         struct ixgbe_hw *hw;
4351         uint32_t mflcn_reg;
4352         uint32_t fccfg_reg;
4353         int rx_pause;
4354         int tx_pause;
4355
4356         hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4357
4358         fc_conf->pause_time = hw->fc.pause_time;
4359         fc_conf->high_water = hw->fc.high_water[0];
4360         fc_conf->low_water = hw->fc.low_water[0];
4361         fc_conf->send_xon = hw->fc.send_xon;
4362         fc_conf->autoneg = !hw->fc.disable_fc_autoneg;
4363
4364         /*
4365          * Return rx_pause status according to actual setting of
4366          * MFLCN register.
4367          */
4368         mflcn_reg = IXGBE_READ_REG(hw, IXGBE_MFLCN);
4369         if (mflcn_reg & (IXGBE_MFLCN_RPFCE | IXGBE_MFLCN_RFCE))
4370                 rx_pause = 1;
4371         else
4372                 rx_pause = 0;
4373
4374         /*
4375          * Return tx_pause status according to actual setting of
4376          * FCCFG register.
4377          */
4378         fccfg_reg = IXGBE_READ_REG(hw, IXGBE_FCCFG);
4379         if (fccfg_reg & (IXGBE_FCCFG_TFCE_802_3X | IXGBE_FCCFG_TFCE_PRIORITY))
4380                 tx_pause = 1;
4381         else
4382                 tx_pause = 0;
4383
4384         if (rx_pause && tx_pause)
4385                 fc_conf->mode = RTE_FC_FULL;
4386         else if (rx_pause)
4387                 fc_conf->mode = RTE_FC_RX_PAUSE;
4388         else if (tx_pause)
4389                 fc_conf->mode = RTE_FC_TX_PAUSE;
4390         else
4391                 fc_conf->mode = RTE_FC_NONE;
4392
4393         return 0;
4394 }
4395
4396 static int
4397 ixgbe_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
4398 {
4399         struct ixgbe_hw *hw;
4400         int err;
4401         uint32_t rx_buf_size;
4402         uint32_t max_high_water;
4403         uint32_t mflcn;
4404         enum ixgbe_fc_mode rte_fcmode_2_ixgbe_fcmode[] = {
4405                 ixgbe_fc_none,
4406                 ixgbe_fc_rx_pause,
4407                 ixgbe_fc_tx_pause,
4408                 ixgbe_fc_full
4409         };
4410
4411         PMD_INIT_FUNC_TRACE();
4412
4413         hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4414         rx_buf_size = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(0));
4415         PMD_INIT_LOG(DEBUG, "Rx packet buffer size = 0x%x", rx_buf_size);
4416
4417         /*
4418          * At least reserve one Ethernet frame for watermark
4419          * high_water/low_water in kilo bytes for ixgbe
4420          */
4421         max_high_water = (rx_buf_size - ETHER_MAX_LEN) >> IXGBE_RXPBSIZE_SHIFT;
4422         if ((fc_conf->high_water > max_high_water) ||
4423                 (fc_conf->high_water < fc_conf->low_water)) {
4424                 PMD_INIT_LOG(ERR, "Invalid high/low water setup value in KB");
4425                 PMD_INIT_LOG(ERR, "High_water must <= 0x%x", max_high_water);
4426                 return -EINVAL;
4427         }
4428
4429         hw->fc.requested_mode = rte_fcmode_2_ixgbe_fcmode[fc_conf->mode];
4430         hw->fc.pause_time     = fc_conf->pause_time;
4431         hw->fc.high_water[0]  = fc_conf->high_water;
4432         hw->fc.low_water[0]   = fc_conf->low_water;
4433         hw->fc.send_xon       = fc_conf->send_xon;
4434         hw->fc.disable_fc_autoneg = !fc_conf->autoneg;
4435
4436         err = ixgbe_fc_enable(hw);
4437
4438         /* Not negotiated is not an error case */
4439         if ((err == IXGBE_SUCCESS) || (err == IXGBE_ERR_FC_NOT_NEGOTIATED)) {
4440
4441                 /* check if we want to forward MAC frames - driver doesn't have native
4442                  * capability to do that, so we'll write the registers ourselves */
4443
4444                 mflcn = IXGBE_READ_REG(hw, IXGBE_MFLCN);
4445
4446                 /* set or clear MFLCN.PMCF bit depending on configuration */
4447                 if (fc_conf->mac_ctrl_frame_fwd != 0)
4448                         mflcn |= IXGBE_MFLCN_PMCF;
4449                 else
4450                         mflcn &= ~IXGBE_MFLCN_PMCF;
4451
4452                 IXGBE_WRITE_REG(hw, IXGBE_MFLCN, mflcn);
4453                 IXGBE_WRITE_FLUSH(hw);
4454
4455                 return 0;
4456         }
4457
4458         PMD_INIT_LOG(ERR, "ixgbe_fc_enable = 0x%x", err);
4459         return -EIO;
4460 }
4461
4462 /**
4463  *  ixgbe_pfc_enable_generic - Enable flow control
4464  *  @hw: pointer to hardware structure
4465  *  @tc_num: traffic class number
4466  *  Enable flow control according to the current settings.
4467  */
4468 static int
4469 ixgbe_dcb_pfc_enable_generic(struct ixgbe_hw *hw, uint8_t tc_num)
4470 {
4471         int ret_val = 0;
4472         uint32_t mflcn_reg, fccfg_reg;
4473         uint32_t reg;
4474         uint32_t fcrtl, fcrth;
4475         uint8_t i;
4476         uint8_t nb_rx_en;
4477
4478         /* Validate the water mark configuration */
4479         if (!hw->fc.pause_time) {
4480                 ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
4481                 goto out;
4482         }
4483
4484         /* Low water mark of zero causes XOFF floods */
4485         if (hw->fc.current_mode & ixgbe_fc_tx_pause) {
4486                  /* High/Low water can not be 0 */
4487                 if ((!hw->fc.high_water[tc_num]) || (!hw->fc.low_water[tc_num])) {
4488                         PMD_INIT_LOG(ERR, "Invalid water mark configuration");
4489                         ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
4490                         goto out;
4491                 }
4492
4493                 if (hw->fc.low_water[tc_num] >= hw->fc.high_water[tc_num]) {
4494                         PMD_INIT_LOG(ERR, "Invalid water mark configuration");
4495                         ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
4496                         goto out;
4497                 }
4498         }
4499         /* Negotiate the fc mode to use */
4500         ixgbe_fc_autoneg(hw);
4501
4502         /* Disable any previous flow control settings */
4503         mflcn_reg = IXGBE_READ_REG(hw, IXGBE_MFLCN);
4504         mflcn_reg &= ~(IXGBE_MFLCN_RPFCE_SHIFT | IXGBE_MFLCN_RFCE|IXGBE_MFLCN_RPFCE);
4505
4506         fccfg_reg = IXGBE_READ_REG(hw, IXGBE_FCCFG);
4507         fccfg_reg &= ~(IXGBE_FCCFG_TFCE_802_3X | IXGBE_FCCFG_TFCE_PRIORITY);
4508
4509         switch (hw->fc.current_mode) {
4510         case ixgbe_fc_none:
4511                 /*
4512                  * If the count of enabled RX Priority Flow control >1,
4513                  * and the TX pause can not be disabled
4514                  */
4515                 nb_rx_en = 0;
4516                 for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
4517                         reg = IXGBE_READ_REG(hw, IXGBE_FCRTH_82599(i));
4518                         if (reg & IXGBE_FCRTH_FCEN)
4519                                 nb_rx_en++;
4520                 }
4521                 if (nb_rx_en > 1)
4522                         fccfg_reg |= IXGBE_FCCFG_TFCE_PRIORITY;
4523                 break;
4524         case ixgbe_fc_rx_pause:
4525                 /*
4526                  * Rx Flow control is enabled and Tx Flow control is
4527                  * disabled by software override. Since there really
4528                  * isn't a way to advertise that we are capable of RX
4529                  * Pause ONLY, we will advertise that we support both
4530                  * symmetric and asymmetric Rx PAUSE.  Later, we will
4531                  * disable the adapter's ability to send PAUSE frames.
4532                  */
4533                 mflcn_reg |= IXGBE_MFLCN_RPFCE;
4534                 /*
4535                  * If the count of enabled RX Priority Flow control >1,
4536                  * and the TX pause can not be disabled
4537                  */
4538                 nb_rx_en = 0;
4539                 for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
4540                         reg = IXGBE_READ_REG(hw, IXGBE_FCRTH_82599(i));
4541                         if (reg & IXGBE_FCRTH_FCEN)
4542                                 nb_rx_en++;
4543                 }
4544                 if (nb_rx_en > 1)
4545                         fccfg_reg |= IXGBE_FCCFG_TFCE_PRIORITY;
4546                 break;
4547         case ixgbe_fc_tx_pause:
4548                 /*
4549                  * Tx Flow control is enabled, and Rx Flow control is
4550                  * disabled by software override.
4551                  */
4552                 fccfg_reg |= IXGBE_FCCFG_TFCE_PRIORITY;
4553                 break;
4554         case ixgbe_fc_full:
4555                 /* Flow control (both Rx and Tx) is enabled by SW override. */
4556                 mflcn_reg |= IXGBE_MFLCN_RPFCE;
4557                 fccfg_reg |= IXGBE_FCCFG_TFCE_PRIORITY;
4558                 break;
4559         default:
4560                 PMD_DRV_LOG(DEBUG, "Flow control param set incorrectly");
4561                 ret_val = IXGBE_ERR_CONFIG;
4562                 goto out;
4563         }
4564
4565         /* Set 802.3x based flow control settings. */
4566         mflcn_reg |= IXGBE_MFLCN_DPF;
4567         IXGBE_WRITE_REG(hw, IXGBE_MFLCN, mflcn_reg);
4568         IXGBE_WRITE_REG(hw, IXGBE_FCCFG, fccfg_reg);
4569
4570         /* Set up and enable Rx high/low water mark thresholds, enable XON. */
4571         if ((hw->fc.current_mode & ixgbe_fc_tx_pause) &&
4572                 hw->fc.high_water[tc_num]) {
4573                 fcrtl = (hw->fc.low_water[tc_num] << 10) | IXGBE_FCRTL_XONE;
4574                 IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(tc_num), fcrtl);
4575                 fcrth = (hw->fc.high_water[tc_num] << 10) | IXGBE_FCRTH_FCEN;
4576         } else {
4577                 IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(tc_num), 0);
4578                 /*
4579                  * In order to prevent Tx hangs when the internal Tx
4580                  * switch is enabled we must set the high water mark
4581                  * to the maximum FCRTH value.  This allows the Tx
4582                  * switch to function even under heavy Rx workloads.
4583                  */
4584                 fcrth = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(tc_num)) - 32;
4585         }
4586         IXGBE_WRITE_REG(hw, IXGBE_FCRTH_82599(tc_num), fcrth);
4587
4588         /* Configure pause time (2 TCs per register) */
4589         reg = hw->fc.pause_time * 0x00010001;
4590         for (i = 0; i < (IXGBE_DCB_MAX_TRAFFIC_CLASS / 2); i++)
4591                 IXGBE_WRITE_REG(hw, IXGBE_FCTTV(i), reg);
4592
4593         /* Configure flow control refresh threshold value */
4594         IXGBE_WRITE_REG(hw, IXGBE_FCRTV, hw->fc.pause_time / 2);
4595
4596 out:
4597         return ret_val;
4598 }
4599
4600 static int
4601 ixgbe_dcb_pfc_enable(struct rte_eth_dev *dev, uint8_t tc_num)
4602 {
4603         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4604         int32_t ret_val = IXGBE_NOT_IMPLEMENTED;
4605
4606         if (hw->mac.type != ixgbe_mac_82598EB) {
4607                 ret_val = ixgbe_dcb_pfc_enable_generic(hw, tc_num);
4608         }
4609         return ret_val;
4610 }
4611
4612 static int
4613 ixgbe_priority_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_pfc_conf *pfc_conf)
4614 {
4615         int err;
4616         uint32_t rx_buf_size;
4617         uint32_t max_high_water;
4618         uint8_t tc_num;
4619         uint8_t  map[IXGBE_DCB_MAX_USER_PRIORITY] = { 0 };
4620         struct ixgbe_hw *hw =
4621                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4622         struct ixgbe_dcb_config *dcb_config =
4623                 IXGBE_DEV_PRIVATE_TO_DCB_CFG(dev->data->dev_private);
4624
4625         enum ixgbe_fc_mode rte_fcmode_2_ixgbe_fcmode[] = {
4626                 ixgbe_fc_none,
4627                 ixgbe_fc_rx_pause,
4628                 ixgbe_fc_tx_pause,
4629                 ixgbe_fc_full
4630         };
4631
4632         PMD_INIT_FUNC_TRACE();
4633
4634         ixgbe_dcb_unpack_map_cee(dcb_config, IXGBE_DCB_RX_CONFIG, map);
4635         tc_num = map[pfc_conf->priority];
4636         rx_buf_size = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(tc_num));
4637         PMD_INIT_LOG(DEBUG, "Rx packet buffer size = 0x%x", rx_buf_size);
4638         /*
4639          * At least reserve one Ethernet frame for watermark
4640          * high_water/low_water in kilo bytes for ixgbe
4641          */
4642         max_high_water = (rx_buf_size - ETHER_MAX_LEN) >> IXGBE_RXPBSIZE_SHIFT;
4643         if ((pfc_conf->fc.high_water > max_high_water) ||
4644             (pfc_conf->fc.high_water <= pfc_conf->fc.low_water)) {
4645                 PMD_INIT_LOG(ERR, "Invalid high/low water setup value in KB");
4646                 PMD_INIT_LOG(ERR, "High_water must <= 0x%x", max_high_water);
4647                 return -EINVAL;
4648         }
4649
4650         hw->fc.requested_mode = rte_fcmode_2_ixgbe_fcmode[pfc_conf->fc.mode];
4651         hw->fc.pause_time = pfc_conf->fc.pause_time;
4652         hw->fc.send_xon = pfc_conf->fc.send_xon;
4653         hw->fc.low_water[tc_num] =  pfc_conf->fc.low_water;
4654         hw->fc.high_water[tc_num] = pfc_conf->fc.high_water;
4655
4656         err = ixgbe_dcb_pfc_enable(dev, tc_num);
4657
4658         /* Not negotiated is not an error case */
4659         if ((err == IXGBE_SUCCESS) || (err == IXGBE_ERR_FC_NOT_NEGOTIATED))
4660                 return 0;
4661
4662         PMD_INIT_LOG(ERR, "ixgbe_dcb_pfc_enable = 0x%x", err);
4663         return -EIO;
4664 }
4665
4666 static int
4667 ixgbe_dev_rss_reta_update(struct rte_eth_dev *dev,
4668                           struct rte_eth_rss_reta_entry64 *reta_conf,
4669                           uint16_t reta_size)
4670 {
4671         uint16_t i, sp_reta_size;
4672         uint8_t j, mask;
4673         uint32_t reta, r;
4674         uint16_t idx, shift;
4675         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4676         uint32_t reta_reg;
4677
4678         PMD_INIT_FUNC_TRACE();
4679
4680         if (!ixgbe_rss_update_sp(hw->mac.type)) {
4681                 PMD_DRV_LOG(ERR, "RSS reta update is not supported on this "
4682                         "NIC.");
4683                 return -ENOTSUP;
4684         }
4685
4686         sp_reta_size = ixgbe_reta_size_get(hw->mac.type);
4687         if (reta_size != sp_reta_size) {
4688                 PMD_DRV_LOG(ERR, "The size of hash lookup table configured "
4689                         "(%d) doesn't match the number hardware can supported "
4690                         "(%d)", reta_size, sp_reta_size);
4691                 return -EINVAL;
4692         }
4693
4694         for (i = 0; i < reta_size; i += IXGBE_4_BIT_WIDTH) {
4695                 idx = i / RTE_RETA_GROUP_SIZE;
4696                 shift = i % RTE_RETA_GROUP_SIZE;
4697                 mask = (uint8_t)((reta_conf[idx].mask >> shift) &
4698                                                 IXGBE_4_BIT_MASK);
4699                 if (!mask)
4700                         continue;
4701                 reta_reg = ixgbe_reta_reg_get(hw->mac.type, i);
4702                 if (mask == IXGBE_4_BIT_MASK)
4703                         r = 0;
4704                 else
4705                         r = IXGBE_READ_REG(hw, reta_reg);
4706                 for (j = 0, reta = 0; j < IXGBE_4_BIT_WIDTH; j++) {
4707                         if (mask & (0x1 << j))
4708                                 reta |= reta_conf[idx].reta[shift + j] <<
4709                                                         (CHAR_BIT * j);
4710                         else
4711                                 reta |= r & (IXGBE_8_BIT_MASK <<
4712                                                 (CHAR_BIT * j));
4713                 }
4714                 IXGBE_WRITE_REG(hw, reta_reg, reta);
4715         }
4716
4717         return 0;
4718 }
4719
4720 static int
4721 ixgbe_dev_rss_reta_query(struct rte_eth_dev *dev,
4722                          struct rte_eth_rss_reta_entry64 *reta_conf,
4723                          uint16_t reta_size)
4724 {
4725         uint16_t i, sp_reta_size;
4726         uint8_t j, mask;
4727         uint32_t reta;
4728         uint16_t idx, shift;
4729         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4730         uint32_t reta_reg;
4731
4732         PMD_INIT_FUNC_TRACE();
4733         sp_reta_size = ixgbe_reta_size_get(hw->mac.type);
4734         if (reta_size != sp_reta_size) {
4735                 PMD_DRV_LOG(ERR, "The size of hash lookup table configured "
4736                         "(%d) doesn't match the number hardware can supported "
4737                         "(%d)", reta_size, sp_reta_size);
4738                 return -EINVAL;
4739         }
4740
4741         for (i = 0; i < reta_size; i += IXGBE_4_BIT_WIDTH) {
4742                 idx = i / RTE_RETA_GROUP_SIZE;
4743                 shift = i % RTE_RETA_GROUP_SIZE;
4744                 mask = (uint8_t)((reta_conf[idx].mask >> shift) &
4745                                                 IXGBE_4_BIT_MASK);
4746                 if (!mask)
4747                         continue;
4748
4749                 reta_reg = ixgbe_reta_reg_get(hw->mac.type, i);
4750                 reta = IXGBE_READ_REG(hw, reta_reg);
4751                 for (j = 0; j < IXGBE_4_BIT_WIDTH; j++) {
4752                         if (mask & (0x1 << j))
4753                                 reta_conf[idx].reta[shift + j] =
4754                                         ((reta >> (CHAR_BIT * j)) &
4755                                                 IXGBE_8_BIT_MASK);
4756                 }
4757         }
4758
4759         return 0;
4760 }
4761
4762 static int
4763 ixgbe_add_rar(struct rte_eth_dev *dev, struct ether_addr *mac_addr,
4764                                 uint32_t index, uint32_t pool)
4765 {
4766         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4767         uint32_t enable_addr = 1;
4768
4769         return ixgbe_set_rar(hw, index, mac_addr->addr_bytes,
4770                              pool, enable_addr);
4771 }
4772
4773 static void
4774 ixgbe_remove_rar(struct rte_eth_dev *dev, uint32_t index)
4775 {
4776         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4777
4778         ixgbe_clear_rar(hw, index);
4779 }
4780
4781 static void
4782 ixgbe_set_default_mac_addr(struct rte_eth_dev *dev, struct ether_addr *addr)
4783 {
4784         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
4785
4786         ixgbe_remove_rar(dev, 0);
4787
4788         ixgbe_add_rar(dev, addr, 0, pci_dev->max_vfs);
4789 }
4790
4791 static bool
4792 is_device_supported(struct rte_eth_dev *dev, struct rte_pci_driver *drv)
4793 {
4794         if (strcmp(dev->device->driver->name, drv->driver.name))
4795                 return false;
4796
4797         return true;
4798 }
4799
4800 bool
4801 is_ixgbe_supported(struct rte_eth_dev *dev)
4802 {
4803         return is_device_supported(dev, &rte_ixgbe_pmd);
4804 }
4805
4806 static int
4807 ixgbe_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
4808 {
4809         uint32_t hlreg0;
4810         uint32_t maxfrs;
4811         struct ixgbe_hw *hw;
4812         struct rte_eth_dev_info dev_info;
4813         uint32_t frame_size = mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
4814         struct rte_eth_dev_data *dev_data = dev->data;
4815
4816         ixgbe_dev_info_get(dev, &dev_info);
4817
4818         /* check that mtu is within the allowed range */
4819         if ((mtu < ETHER_MIN_MTU) || (frame_size > dev_info.max_rx_pktlen))
4820                 return -EINVAL;
4821
4822         /* If device is started, refuse mtu that requires the support of
4823          * scattered packets when this feature has not been enabled before.
4824          */
4825         if (dev_data->dev_started && !dev_data->scattered_rx &&
4826             (frame_size + 2 * IXGBE_VLAN_TAG_SIZE >
4827              dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM)) {
4828                 PMD_INIT_LOG(ERR, "Stop port first.");
4829                 return -EINVAL;
4830         }
4831
4832         hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4833         hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0);
4834
4835         /* switch to jumbo mode if needed */
4836         if (frame_size > ETHER_MAX_LEN) {
4837                 dev->data->dev_conf.rxmode.jumbo_frame = 1;
4838                 hlreg0 |= IXGBE_HLREG0_JUMBOEN;
4839         } else {
4840                 dev->data->dev_conf.rxmode.jumbo_frame = 0;
4841                 hlreg0 &= ~IXGBE_HLREG0_JUMBOEN;
4842         }
4843         IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0);
4844
4845         /* update max frame size */
4846         dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
4847
4848         maxfrs = IXGBE_READ_REG(hw, IXGBE_MAXFRS);
4849         maxfrs &= 0x0000FFFF;
4850         maxfrs |= (dev->data->dev_conf.rxmode.max_rx_pkt_len << 16);
4851         IXGBE_WRITE_REG(hw, IXGBE_MAXFRS, maxfrs);
4852
4853         return 0;
4854 }
4855
4856 /*
4857  * Virtual Function operations
4858  */
4859 static void
4860 ixgbevf_intr_disable(struct ixgbe_hw *hw)
4861 {
4862         PMD_INIT_FUNC_TRACE();
4863
4864         /* Clear interrupt mask to stop from interrupts being generated */
4865         IXGBE_WRITE_REG(hw, IXGBE_VTEIMC, IXGBE_VF_IRQ_CLEAR_MASK);
4866
4867         IXGBE_WRITE_FLUSH(hw);
4868 }
4869
4870 static void
4871 ixgbevf_intr_enable(struct ixgbe_hw *hw)
4872 {
4873         PMD_INIT_FUNC_TRACE();
4874
4875         /* VF enable interrupt autoclean */
4876         IXGBE_WRITE_REG(hw, IXGBE_VTEIAM, IXGBE_VF_IRQ_ENABLE_MASK);
4877         IXGBE_WRITE_REG(hw, IXGBE_VTEIAC, IXGBE_VF_IRQ_ENABLE_MASK);
4878         IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, IXGBE_VF_IRQ_ENABLE_MASK);
4879
4880         IXGBE_WRITE_FLUSH(hw);
4881 }
4882
4883 static int
4884 ixgbevf_dev_configure(struct rte_eth_dev *dev)
4885 {
4886         struct rte_eth_conf *conf = &dev->data->dev_conf;
4887         struct ixgbe_adapter *adapter =
4888                         (struct ixgbe_adapter *)dev->data->dev_private;
4889
4890         PMD_INIT_LOG(DEBUG, "Configured Virtual Function port id: %d",
4891                      dev->data->port_id);
4892
4893         /*
4894          * VF has no ability to enable/disable HW CRC
4895          * Keep the persistent behavior the same as Host PF
4896          */
4897 #ifndef RTE_LIBRTE_IXGBE_PF_DISABLE_STRIP_CRC
4898         if (!conf->rxmode.hw_strip_crc) {
4899                 PMD_INIT_LOG(NOTICE, "VF can't disable HW CRC Strip");
4900                 conf->rxmode.hw_strip_crc = 1;
4901         }
4902 #else
4903         if (conf->rxmode.hw_strip_crc) {
4904                 PMD_INIT_LOG(NOTICE, "VF can't enable HW CRC Strip");
4905                 conf->rxmode.hw_strip_crc = 0;
4906         }
4907 #endif
4908
4909         /*
4910          * Initialize to TRUE. If any of Rx queues doesn't meet the bulk
4911          * allocation or vector Rx preconditions we will reset it.
4912          */
4913         adapter->rx_bulk_alloc_allowed = true;
4914         adapter->rx_vec_allowed = true;
4915
4916         return 0;
4917 }
4918
4919 static int
4920 ixgbevf_dev_start(struct rte_eth_dev *dev)
4921 {
4922         struct ixgbe_hw *hw =
4923                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4924         uint32_t intr_vector = 0;
4925         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
4926         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
4927
4928         int err, mask = 0;
4929
4930         PMD_INIT_FUNC_TRACE();
4931
4932         hw->mac.ops.reset_hw(hw);
4933         hw->mac.get_link_status = true;
4934
4935         /* negotiate mailbox API version to use with the PF. */
4936         ixgbevf_negotiate_api(hw);
4937
4938         ixgbevf_dev_tx_init(dev);
4939
4940         /* This can fail when allocating mbufs for descriptor rings */
4941         err = ixgbevf_dev_rx_init(dev);
4942         if (err) {
4943                 PMD_INIT_LOG(ERR, "Unable to initialize RX hardware (%d)", err);
4944                 ixgbe_dev_clear_queues(dev);
4945                 return err;
4946         }
4947
4948         /* Set vfta */
4949         ixgbevf_set_vfta_all(dev, 1);
4950
4951         /* Set HW strip */
4952         mask = ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK |
4953                 ETH_VLAN_EXTEND_MASK;
4954         ixgbevf_vlan_offload_set(dev, mask);
4955
4956         ixgbevf_dev_rxtx_start(dev);
4957
4958         /* check and configure queue intr-vector mapping */
4959         if (dev->data->dev_conf.intr_conf.rxq != 0) {
4960                 intr_vector = dev->data->nb_rx_queues;
4961                 if (rte_intr_efd_enable(intr_handle, intr_vector))
4962                         return -1;
4963         }
4964
4965         if (rte_intr_dp_is_en(intr_handle) && !intr_handle->intr_vec) {
4966                 intr_handle->intr_vec =
4967                         rte_zmalloc("intr_vec",
4968                                     dev->data->nb_rx_queues * sizeof(int), 0);
4969                 if (intr_handle->intr_vec == NULL) {
4970                         PMD_INIT_LOG(ERR, "Failed to allocate %d rx_queues"
4971                                      " intr_vec", dev->data->nb_rx_queues);
4972                         return -ENOMEM;
4973                 }
4974         }
4975         ixgbevf_configure_msix(dev);
4976
4977         rte_intr_enable(intr_handle);
4978
4979         /* Re-enable interrupt for VF */
4980         ixgbevf_intr_enable(hw);
4981
4982         return 0;
4983 }
4984
4985 static void
4986 ixgbevf_dev_stop(struct rte_eth_dev *dev)
4987 {
4988         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4989         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
4990         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
4991
4992         PMD_INIT_FUNC_TRACE();
4993
4994         ixgbevf_intr_disable(hw);
4995
4996         hw->adapter_stopped = 1;
4997         ixgbe_stop_adapter(hw);
4998
4999         /*
5000           * Clear what we set, but we still keep shadow_vfta to
5001           * restore after device starts
5002           */
5003         ixgbevf_set_vfta_all(dev, 0);
5004
5005         /* Clear stored conf */
5006         dev->data->scattered_rx = 0;
5007
5008         ixgbe_dev_clear_queues(dev);
5009
5010         /* Clean datapath event and queue/vec mapping */
5011         rte_intr_efd_disable(intr_handle);
5012         if (intr_handle->intr_vec != NULL) {
5013                 rte_free(intr_handle->intr_vec);
5014                 intr_handle->intr_vec = NULL;
5015         }
5016 }
5017
5018 static void
5019 ixgbevf_dev_close(struct rte_eth_dev *dev)
5020 {
5021         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5022
5023         PMD_INIT_FUNC_TRACE();
5024
5025         ixgbe_reset_hw(hw);
5026
5027         ixgbevf_dev_stop(dev);
5028
5029         ixgbe_dev_free_queues(dev);
5030
5031         /**
5032          * Remove the VF MAC address ro ensure
5033          * that the VF traffic goes to the PF
5034          * after stop, close and detach of the VF
5035          **/
5036         ixgbevf_remove_mac_addr(dev, 0);
5037 }
5038
5039 static void ixgbevf_set_vfta_all(struct rte_eth_dev *dev, bool on)
5040 {
5041         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5042         struct ixgbe_vfta *shadow_vfta =
5043                 IXGBE_DEV_PRIVATE_TO_VFTA(dev->data->dev_private);
5044         int i = 0, j = 0, vfta = 0, mask = 1;
5045
5046         for (i = 0; i < IXGBE_VFTA_SIZE; i++) {
5047                 vfta = shadow_vfta->vfta[i];
5048                 if (vfta) {
5049                         mask = 1;
5050                         for (j = 0; j < 32; j++) {
5051                                 if (vfta & mask)
5052                                         ixgbe_set_vfta(hw, (i<<5)+j, 0,
5053                                                        on, false);
5054                                 mask <<= 1;
5055                         }
5056                 }
5057         }
5058
5059 }
5060
5061 static int
5062 ixgbevf_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
5063 {
5064         struct ixgbe_hw *hw =
5065                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5066         struct ixgbe_vfta *shadow_vfta =
5067                 IXGBE_DEV_PRIVATE_TO_VFTA(dev->data->dev_private);
5068         uint32_t vid_idx = 0;
5069         uint32_t vid_bit = 0;
5070         int ret = 0;
5071
5072         PMD_INIT_FUNC_TRACE();
5073
5074         /* vind is not used in VF driver, set to 0, check ixgbe_set_vfta_vf */
5075         ret = ixgbe_set_vfta(hw, vlan_id, 0, !!on, false);
5076         if (ret) {
5077                 PMD_INIT_LOG(ERR, "Unable to set VF vlan");
5078                 return ret;
5079         }
5080         vid_idx = (uint32_t) ((vlan_id >> 5) & 0x7F);
5081         vid_bit = (uint32_t) (1 << (vlan_id & 0x1F));
5082
5083         /* Save what we set and retore it after device reset */
5084         if (on)
5085                 shadow_vfta->vfta[vid_idx] |= vid_bit;
5086         else
5087                 shadow_vfta->vfta[vid_idx] &= ~vid_bit;
5088
5089         return 0;
5090 }
5091
5092 static void
5093 ixgbevf_vlan_strip_queue_set(struct rte_eth_dev *dev, uint16_t queue, int on)
5094 {
5095         struct ixgbe_hw *hw =
5096                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5097         uint32_t ctrl;
5098
5099         PMD_INIT_FUNC_TRACE();
5100
5101         if (queue >= hw->mac.max_rx_queues)
5102                 return;
5103
5104         ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(queue));
5105         if (on)
5106                 ctrl |= IXGBE_RXDCTL_VME;
5107         else
5108                 ctrl &= ~IXGBE_RXDCTL_VME;
5109         IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(queue), ctrl);
5110
5111         ixgbe_vlan_hw_strip_bitmap_set(dev, queue, on);
5112 }
5113
5114 static void
5115 ixgbevf_vlan_offload_set(struct rte_eth_dev *dev, int mask)
5116 {
5117         struct ixgbe_hw *hw =
5118                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5119         uint16_t i;
5120         int on = 0;
5121
5122         /* VF function only support hw strip feature, others are not support */
5123         if (mask & ETH_VLAN_STRIP_MASK) {
5124                 on = !!(dev->data->dev_conf.rxmode.hw_vlan_strip);
5125
5126                 for (i = 0; i < hw->mac.max_rx_queues; i++)
5127                         ixgbevf_vlan_strip_queue_set(dev, i, on);
5128         }
5129 }
5130
5131 int
5132 ixgbe_vt_check(struct ixgbe_hw *hw)
5133 {
5134         uint32_t reg_val;
5135
5136         /* if Virtualization Technology is enabled */
5137         reg_val = IXGBE_READ_REG(hw, IXGBE_VT_CTL);
5138         if (!(reg_val & IXGBE_VT_CTL_VT_ENABLE)) {
5139                 PMD_INIT_LOG(ERR, "VT must be enabled for this setting");
5140                 return -1;
5141         }
5142
5143         return 0;
5144 }
5145
5146 static uint32_t
5147 ixgbe_uta_vector(struct ixgbe_hw *hw, struct ether_addr *uc_addr)
5148 {
5149         uint32_t vector = 0;
5150
5151         switch (hw->mac.mc_filter_type) {
5152         case 0:   /* use bits [47:36] of the address */
5153                 vector = ((uc_addr->addr_bytes[4] >> 4) |
5154                         (((uint16_t)uc_addr->addr_bytes[5]) << 4));
5155                 break;
5156         case 1:   /* use bits [46:35] of the address */
5157                 vector = ((uc_addr->addr_bytes[4] >> 3) |
5158                         (((uint16_t)uc_addr->addr_bytes[5]) << 5));
5159                 break;
5160         case 2:   /* use bits [45:34] of the address */
5161                 vector = ((uc_addr->addr_bytes[4] >> 2) |
5162                         (((uint16_t)uc_addr->addr_bytes[5]) << 6));
5163                 break;
5164         case 3:   /* use bits [43:32] of the address */
5165                 vector = ((uc_addr->addr_bytes[4]) |
5166                         (((uint16_t)uc_addr->addr_bytes[5]) << 8));
5167                 break;
5168         default:  /* Invalid mc_filter_type */
5169                 break;
5170         }
5171
5172         /* vector can only be 12-bits or boundary will be exceeded */
5173         vector &= 0xFFF;
5174         return vector;
5175 }
5176
5177 static int
5178 ixgbe_uc_hash_table_set(struct rte_eth_dev *dev, struct ether_addr *mac_addr,
5179                         uint8_t on)
5180 {
5181         uint32_t vector;
5182         uint32_t uta_idx;
5183         uint32_t reg_val;
5184         uint32_t uta_shift;
5185         uint32_t rc;
5186         const uint32_t ixgbe_uta_idx_mask = 0x7F;
5187         const uint32_t ixgbe_uta_bit_shift = 5;
5188         const uint32_t ixgbe_uta_bit_mask = (0x1 << ixgbe_uta_bit_shift) - 1;
5189         const uint32_t bit1 = 0x1;
5190
5191         struct ixgbe_hw *hw =
5192                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5193         struct ixgbe_uta_info *uta_info =
5194                 IXGBE_DEV_PRIVATE_TO_UTA(dev->data->dev_private);
5195
5196         /* The UTA table only exists on 82599 hardware and newer */
5197         if (hw->mac.type < ixgbe_mac_82599EB)
5198                 return -ENOTSUP;
5199
5200         vector = ixgbe_uta_vector(hw, mac_addr);
5201         uta_idx = (vector >> ixgbe_uta_bit_shift) & ixgbe_uta_idx_mask;
5202         uta_shift = vector & ixgbe_uta_bit_mask;
5203
5204         rc = ((uta_info->uta_shadow[uta_idx] >> uta_shift & bit1) != 0);
5205         if (rc == on)
5206                 return 0;
5207
5208         reg_val = IXGBE_READ_REG(hw, IXGBE_UTA(uta_idx));
5209         if (on) {
5210                 uta_info->uta_in_use++;
5211                 reg_val |= (bit1 << uta_shift);
5212                 uta_info->uta_shadow[uta_idx] |= (bit1 << uta_shift);
5213         } else {
5214                 uta_info->uta_in_use--;
5215                 reg_val &= ~(bit1 << uta_shift);
5216                 uta_info->uta_shadow[uta_idx] &= ~(bit1 << uta_shift);
5217         }
5218
5219         IXGBE_WRITE_REG(hw, IXGBE_UTA(uta_idx), reg_val);
5220
5221         if (uta_info->uta_in_use > 0)
5222                 IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL,
5223                                 IXGBE_MCSTCTRL_MFE | hw->mac.mc_filter_type);
5224         else
5225                 IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, hw->mac.mc_filter_type);
5226
5227         return 0;
5228 }
5229
5230 static int
5231 ixgbe_uc_all_hash_table_set(struct rte_eth_dev *dev, uint8_t on)
5232 {
5233         int i;
5234         struct ixgbe_hw *hw =
5235                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5236         struct ixgbe_uta_info *uta_info =
5237                 IXGBE_DEV_PRIVATE_TO_UTA(dev->data->dev_private);
5238
5239         /* The UTA table only exists on 82599 hardware and newer */
5240         if (hw->mac.type < ixgbe_mac_82599EB)
5241                 return -ENOTSUP;
5242
5243         if (on) {
5244                 for (i = 0; i < ETH_VMDQ_NUM_UC_HASH_ARRAY; i++) {
5245                         uta_info->uta_shadow[i] = ~0;
5246                         IXGBE_WRITE_REG(hw, IXGBE_UTA(i), ~0);
5247                 }
5248         } else {
5249                 for (i = 0; i < ETH_VMDQ_NUM_UC_HASH_ARRAY; i++) {
5250                         uta_info->uta_shadow[i] = 0;
5251                         IXGBE_WRITE_REG(hw, IXGBE_UTA(i), 0);
5252                 }
5253         }
5254         return 0;
5255
5256 }
5257
5258 uint32_t
5259 ixgbe_convert_vm_rx_mask_to_val(uint16_t rx_mask, uint32_t orig_val)
5260 {
5261         uint32_t new_val = orig_val;
5262
5263         if (rx_mask & ETH_VMDQ_ACCEPT_UNTAG)
5264                 new_val |= IXGBE_VMOLR_AUPE;
5265         if (rx_mask & ETH_VMDQ_ACCEPT_HASH_MC)
5266                 new_val |= IXGBE_VMOLR_ROMPE;
5267         if (rx_mask & ETH_VMDQ_ACCEPT_HASH_UC)
5268                 new_val |= IXGBE_VMOLR_ROPE;
5269         if (rx_mask & ETH_VMDQ_ACCEPT_BROADCAST)
5270                 new_val |= IXGBE_VMOLR_BAM;
5271         if (rx_mask & ETH_VMDQ_ACCEPT_MULTICAST)
5272                 new_val |= IXGBE_VMOLR_MPE;
5273
5274         return new_val;
5275 }
5276
5277 #define IXGBE_MRCTL_VPME  0x01 /* Virtual Pool Mirroring. */
5278 #define IXGBE_MRCTL_UPME  0x02 /* Uplink Port Mirroring. */
5279 #define IXGBE_MRCTL_DPME  0x04 /* Downlink Port Mirroring. */
5280 #define IXGBE_MRCTL_VLME  0x08 /* VLAN Mirroring. */
5281 #define IXGBE_INVALID_MIRROR_TYPE(mirror_type) \
5282         ((mirror_type) & ~(uint8_t)(ETH_MIRROR_VIRTUAL_POOL_UP | \
5283         ETH_MIRROR_UPLINK_PORT | ETH_MIRROR_DOWNLINK_PORT | ETH_MIRROR_VLAN))
5284
5285 static int
5286 ixgbe_mirror_rule_set(struct rte_eth_dev *dev,
5287                       struct rte_eth_mirror_conf *mirror_conf,
5288                       uint8_t rule_id, uint8_t on)
5289 {
5290         uint32_t mr_ctl, vlvf;
5291         uint32_t mp_lsb = 0;
5292         uint32_t mv_msb = 0;
5293         uint32_t mv_lsb = 0;
5294         uint32_t mp_msb = 0;
5295         uint8_t i = 0;
5296         int reg_index = 0;
5297         uint64_t vlan_mask = 0;
5298
5299         const uint8_t pool_mask_offset = 32;
5300         const uint8_t vlan_mask_offset = 32;
5301         const uint8_t dst_pool_offset = 8;
5302         const uint8_t rule_mr_offset  = 4;
5303         const uint8_t mirror_rule_mask = 0x0F;
5304
5305         struct ixgbe_mirror_info *mr_info =
5306                         (IXGBE_DEV_PRIVATE_TO_PFDATA(dev->data->dev_private));
5307         struct ixgbe_hw *hw =
5308                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5309         uint8_t mirror_type = 0;
5310
5311         if (ixgbe_vt_check(hw) < 0)
5312                 return -ENOTSUP;
5313
5314         if (rule_id >= IXGBE_MAX_MIRROR_RULES)
5315                 return -EINVAL;
5316
5317         if (IXGBE_INVALID_MIRROR_TYPE(mirror_conf->rule_type)) {
5318                 PMD_DRV_LOG(ERR, "unsupported mirror type 0x%x.",
5319                             mirror_conf->rule_type);
5320                 return -EINVAL;
5321         }
5322
5323         if (mirror_conf->rule_type & ETH_MIRROR_VLAN) {
5324                 mirror_type |= IXGBE_MRCTL_VLME;
5325                 /* Check if vlan id is valid and find conresponding VLAN ID
5326                  * index in VLVF
5327                  */
5328                 for (i = 0; i < IXGBE_VLVF_ENTRIES; i++) {
5329                         if (mirror_conf->vlan.vlan_mask & (1ULL << i)) {
5330                                 /* search vlan id related pool vlan filter
5331                                  * index
5332                                  */
5333                                 reg_index = ixgbe_find_vlvf_slot(
5334                                                 hw,
5335                                                 mirror_conf->vlan.vlan_id[i],
5336                                                 false);
5337                                 if (reg_index < 0)
5338                                         return -EINVAL;
5339                                 vlvf = IXGBE_READ_REG(hw,
5340                                                       IXGBE_VLVF(reg_index));
5341                                 if ((vlvf & IXGBE_VLVF_VIEN) &&
5342                                     ((vlvf & IXGBE_VLVF_VLANID_MASK) ==
5343                                       mirror_conf->vlan.vlan_id[i]))
5344                                         vlan_mask |= (1ULL << reg_index);
5345                                 else
5346                                         return -EINVAL;
5347                         }
5348                 }
5349
5350                 if (on) {
5351                         mv_lsb = vlan_mask & 0xFFFFFFFF;
5352                         mv_msb = vlan_mask >> vlan_mask_offset;
5353
5354                         mr_info->mr_conf[rule_id].vlan.vlan_mask =
5355                                                 mirror_conf->vlan.vlan_mask;
5356                         for (i = 0; i < ETH_VMDQ_MAX_VLAN_FILTERS; i++) {
5357                                 if (mirror_conf->vlan.vlan_mask & (1ULL << i))
5358                                         mr_info->mr_conf[rule_id].vlan.vlan_id[i] =
5359                                                 mirror_conf->vlan.vlan_id[i];
5360                         }
5361                 } else {
5362                         mv_lsb = 0;
5363                         mv_msb = 0;
5364                         mr_info->mr_conf[rule_id].vlan.vlan_mask = 0;
5365                         for (i = 0; i < ETH_VMDQ_MAX_VLAN_FILTERS; i++)
5366                                 mr_info->mr_conf[rule_id].vlan.vlan_id[i] = 0;
5367                 }
5368         }
5369
5370         /**
5371          * if enable pool mirror, write related pool mask register,if disable
5372          * pool mirror, clear PFMRVM register
5373          */
5374         if (mirror_conf->rule_type & ETH_MIRROR_VIRTUAL_POOL_UP) {
5375                 mirror_type |= IXGBE_MRCTL_VPME;
5376                 if (on) {
5377                         mp_lsb = mirror_conf->pool_mask & 0xFFFFFFFF;
5378                         mp_msb = mirror_conf->pool_mask >> pool_mask_offset;
5379                         mr_info->mr_conf[rule_id].pool_mask =
5380                                         mirror_conf->pool_mask;
5381
5382                 } else {
5383                         mp_lsb = 0;
5384                         mp_msb = 0;
5385                         mr_info->mr_conf[rule_id].pool_mask = 0;
5386                 }
5387         }
5388         if (mirror_conf->rule_type & ETH_MIRROR_UPLINK_PORT)
5389                 mirror_type |= IXGBE_MRCTL_UPME;
5390         if (mirror_conf->rule_type & ETH_MIRROR_DOWNLINK_PORT)
5391                 mirror_type |= IXGBE_MRCTL_DPME;
5392
5393         /* read  mirror control register and recalculate it */
5394         mr_ctl = IXGBE_READ_REG(hw, IXGBE_MRCTL(rule_id));
5395
5396         if (on) {
5397                 mr_ctl |= mirror_type;
5398                 mr_ctl &= mirror_rule_mask;
5399                 mr_ctl |= mirror_conf->dst_pool << dst_pool_offset;
5400         } else {
5401                 mr_ctl &= ~(mirror_conf->rule_type & mirror_rule_mask);
5402         }
5403
5404         mr_info->mr_conf[rule_id].rule_type = mirror_conf->rule_type;
5405         mr_info->mr_conf[rule_id].dst_pool = mirror_conf->dst_pool;
5406
5407         /* write mirrror control  register */
5408         IXGBE_WRITE_REG(hw, IXGBE_MRCTL(rule_id), mr_ctl);
5409
5410         /* write pool mirrror control  register */
5411         if (mirror_conf->rule_type == ETH_MIRROR_VIRTUAL_POOL_UP) {
5412                 IXGBE_WRITE_REG(hw, IXGBE_VMRVM(rule_id), mp_lsb);
5413                 IXGBE_WRITE_REG(hw, IXGBE_VMRVM(rule_id + rule_mr_offset),
5414                                 mp_msb);
5415         }
5416         /* write VLAN mirrror control  register */
5417         if (mirror_conf->rule_type == ETH_MIRROR_VLAN) {
5418                 IXGBE_WRITE_REG(hw, IXGBE_VMRVLAN(rule_id), mv_lsb);
5419                 IXGBE_WRITE_REG(hw, IXGBE_VMRVLAN(rule_id + rule_mr_offset),
5420                                 mv_msb);
5421         }
5422
5423         return 0;
5424 }
5425
5426 static int
5427 ixgbe_mirror_rule_reset(struct rte_eth_dev *dev, uint8_t rule_id)
5428 {
5429         int mr_ctl = 0;
5430         uint32_t lsb_val = 0;
5431         uint32_t msb_val = 0;
5432         const uint8_t rule_mr_offset = 4;
5433
5434         struct ixgbe_hw *hw =
5435                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5436         struct ixgbe_mirror_info *mr_info =
5437                 (IXGBE_DEV_PRIVATE_TO_PFDATA(dev->data->dev_private));
5438
5439         if (ixgbe_vt_check(hw) < 0)
5440                 return -ENOTSUP;
5441
5442         memset(&mr_info->mr_conf[rule_id], 0,
5443                sizeof(struct rte_eth_mirror_conf));
5444
5445         /* clear PFVMCTL register */
5446         IXGBE_WRITE_REG(hw, IXGBE_MRCTL(rule_id), mr_ctl);
5447
5448         /* clear pool mask register */
5449         IXGBE_WRITE_REG(hw, IXGBE_VMRVM(rule_id), lsb_val);
5450         IXGBE_WRITE_REG(hw, IXGBE_VMRVM(rule_id + rule_mr_offset), msb_val);
5451
5452         /* clear vlan mask register */
5453         IXGBE_WRITE_REG(hw, IXGBE_VMRVLAN(rule_id), lsb_val);
5454         IXGBE_WRITE_REG(hw, IXGBE_VMRVLAN(rule_id + rule_mr_offset), msb_val);
5455
5456         return 0;
5457 }
5458
5459 static int
5460 ixgbevf_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id)
5461 {
5462         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
5463         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
5464         uint32_t mask;
5465         struct ixgbe_hw *hw =
5466                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5467
5468         mask = IXGBE_READ_REG(hw, IXGBE_VTEIMS);
5469         mask |= (1 << IXGBE_MISC_VEC_ID);
5470         RTE_SET_USED(queue_id);
5471         IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, mask);
5472
5473         rte_intr_enable(intr_handle);
5474
5475         return 0;
5476 }
5477
5478 static int
5479 ixgbevf_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id)
5480 {
5481         uint32_t mask;
5482         struct ixgbe_hw *hw =
5483                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5484
5485         mask = IXGBE_READ_REG(hw, IXGBE_VTEIMS);
5486         mask &= ~(1 << IXGBE_MISC_VEC_ID);
5487         RTE_SET_USED(queue_id);
5488         IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, mask);
5489
5490         return 0;
5491 }
5492
5493 static int
5494 ixgbe_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id)
5495 {
5496         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
5497         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
5498         uint32_t mask;
5499         struct ixgbe_hw *hw =
5500                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5501         struct ixgbe_interrupt *intr =
5502                 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
5503
5504         if (queue_id < 16) {
5505                 ixgbe_disable_intr(hw);
5506                 intr->mask |= (1 << queue_id);
5507                 ixgbe_enable_intr(dev);
5508         } else if (queue_id < 32) {
5509                 mask = IXGBE_READ_REG(hw, IXGBE_EIMS_EX(0));
5510                 mask &= (1 << queue_id);
5511                 IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(0), mask);
5512         } else if (queue_id < 64) {
5513                 mask = IXGBE_READ_REG(hw, IXGBE_EIMS_EX(1));
5514                 mask &= (1 << (queue_id - 32));
5515                 IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(1), mask);
5516         }
5517         rte_intr_enable(intr_handle);
5518
5519         return 0;
5520 }
5521
5522 static int
5523 ixgbe_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id)
5524 {
5525         uint32_t mask;
5526         struct ixgbe_hw *hw =
5527                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5528         struct ixgbe_interrupt *intr =
5529                 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
5530
5531         if (queue_id < 16) {
5532                 ixgbe_disable_intr(hw);
5533                 intr->mask &= ~(1 << queue_id);
5534                 ixgbe_enable_intr(dev);
5535         } else if (queue_id < 32) {
5536                 mask = IXGBE_READ_REG(hw, IXGBE_EIMS_EX(0));
5537                 mask &= ~(1 << queue_id);
5538                 IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(0), mask);
5539         } else if (queue_id < 64) {
5540                 mask = IXGBE_READ_REG(hw, IXGBE_EIMS_EX(1));
5541                 mask &= ~(1 << (queue_id - 32));
5542                 IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(1), mask);
5543         }
5544
5545         return 0;
5546 }
5547
5548 static void
5549 ixgbevf_set_ivar_map(struct ixgbe_hw *hw, int8_t direction,
5550                      uint8_t queue, uint8_t msix_vector)
5551 {
5552         uint32_t tmp, idx;
5553
5554         if (direction == -1) {
5555                 /* other causes */
5556                 msix_vector |= IXGBE_IVAR_ALLOC_VAL;
5557                 tmp = IXGBE_READ_REG(hw, IXGBE_VTIVAR_MISC);
5558                 tmp &= ~0xFF;
5559                 tmp |= msix_vector;
5560                 IXGBE_WRITE_REG(hw, IXGBE_VTIVAR_MISC, tmp);
5561         } else {
5562                 /* rx or tx cause */
5563                 msix_vector |= IXGBE_IVAR_ALLOC_VAL;
5564                 idx = ((16 * (queue & 1)) + (8 * direction));
5565                 tmp = IXGBE_READ_REG(hw, IXGBE_VTIVAR(queue >> 1));
5566                 tmp &= ~(0xFF << idx);
5567                 tmp |= (msix_vector << idx);
5568                 IXGBE_WRITE_REG(hw, IXGBE_VTIVAR(queue >> 1), tmp);
5569         }
5570 }
5571
5572 /**
5573  * set the IVAR registers, mapping interrupt causes to vectors
5574  * @param hw
5575  *  pointer to ixgbe_hw struct
5576  * @direction
5577  *  0 for Rx, 1 for Tx, -1 for other causes
5578  * @queue
5579  *  queue to map the corresponding interrupt to
5580  * @msix_vector
5581  *  the vector to map to the corresponding queue
5582  */
5583 static void
5584 ixgbe_set_ivar_map(struct ixgbe_hw *hw, int8_t direction,
5585                    uint8_t queue, uint8_t msix_vector)
5586 {
5587         uint32_t tmp, idx;
5588
5589         msix_vector |= IXGBE_IVAR_ALLOC_VAL;
5590         if (hw->mac.type == ixgbe_mac_82598EB) {
5591                 if (direction == -1)
5592                         direction = 0;
5593                 idx = (((direction * 64) + queue) >> 2) & 0x1F;
5594                 tmp = IXGBE_READ_REG(hw, IXGBE_IVAR(idx));
5595                 tmp &= ~(0xFF << (8 * (queue & 0x3)));
5596                 tmp |= (msix_vector << (8 * (queue & 0x3)));
5597                 IXGBE_WRITE_REG(hw, IXGBE_IVAR(idx), tmp);
5598         } else if ((hw->mac.type == ixgbe_mac_82599EB) ||
5599                         (hw->mac.type == ixgbe_mac_X540)) {
5600                 if (direction == -1) {
5601                         /* other causes */
5602                         idx = ((queue & 1) * 8);
5603                         tmp = IXGBE_READ_REG(hw, IXGBE_IVAR_MISC);
5604                         tmp &= ~(0xFF << idx);
5605                         tmp |= (msix_vector << idx);
5606                         IXGBE_WRITE_REG(hw, IXGBE_IVAR_MISC, tmp);
5607                 } else {
5608                         /* rx or tx causes */
5609                         idx = ((16 * (queue & 1)) + (8 * direction));
5610                         tmp = IXGBE_READ_REG(hw, IXGBE_IVAR(queue >> 1));
5611                         tmp &= ~(0xFF << idx);
5612                         tmp |= (msix_vector << idx);
5613                         IXGBE_WRITE_REG(hw, IXGBE_IVAR(queue >> 1), tmp);
5614                 }
5615         }
5616 }
5617
5618 static void
5619 ixgbevf_configure_msix(struct rte_eth_dev *dev)
5620 {
5621         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
5622         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
5623         struct ixgbe_hw *hw =
5624                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5625         uint32_t q_idx;
5626         uint32_t vector_idx = IXGBE_MISC_VEC_ID;
5627
5628         /* Configure VF other cause ivar */
5629         ixgbevf_set_ivar_map(hw, -1, 1, vector_idx);
5630
5631         /* won't configure msix register if no mapping is done
5632          * between intr vector and event fd.
5633          */
5634         if (!rte_intr_dp_is_en(intr_handle))
5635                 return;
5636
5637         /* Configure all RX queues of VF */
5638         for (q_idx = 0; q_idx < dev->data->nb_rx_queues; q_idx++) {
5639                 /* Force all queue use vector 0,
5640                  * as IXGBE_VF_MAXMSIVECOTR = 1
5641                  */
5642                 ixgbevf_set_ivar_map(hw, 0, q_idx, vector_idx);
5643                 intr_handle->intr_vec[q_idx] = vector_idx;
5644         }
5645 }
5646
5647 /**
5648  * Sets up the hardware to properly generate MSI-X interrupts
5649  * @hw
5650  *  board private structure
5651  */
5652 static void
5653 ixgbe_configure_msix(struct rte_eth_dev *dev)
5654 {
5655         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
5656         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
5657         struct ixgbe_hw *hw =
5658                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5659         uint32_t queue_id, base = IXGBE_MISC_VEC_ID;
5660         uint32_t vec = IXGBE_MISC_VEC_ID;
5661         uint32_t mask;
5662         uint32_t gpie;
5663
5664         /* won't configure msix register if no mapping is done
5665          * between intr vector and event fd
5666          */
5667         if (!rte_intr_dp_is_en(intr_handle))
5668                 return;
5669
5670         if (rte_intr_allow_others(intr_handle))
5671                 vec = base = IXGBE_RX_VEC_START;
5672
5673         /* setup GPIE for MSI-x mode */
5674         gpie = IXGBE_READ_REG(hw, IXGBE_GPIE);
5675         gpie |= IXGBE_GPIE_MSIX_MODE | IXGBE_GPIE_PBA_SUPPORT |
5676                 IXGBE_GPIE_OCD | IXGBE_GPIE_EIAME;
5677         /* auto clearing and auto setting corresponding bits in EIMS
5678          * when MSI-X interrupt is triggered
5679          */
5680         if (hw->mac.type == ixgbe_mac_82598EB) {
5681                 IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
5682         } else {
5683                 IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(0), 0xFFFFFFFF);
5684                 IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(1), 0xFFFFFFFF);
5685         }
5686         IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
5687
5688         /* Populate the IVAR table and set the ITR values to the
5689          * corresponding register.
5690          */
5691         for (queue_id = 0; queue_id < dev->data->nb_rx_queues;
5692              queue_id++) {
5693                 /* by default, 1:1 mapping */
5694                 ixgbe_set_ivar_map(hw, 0, queue_id, vec);
5695                 intr_handle->intr_vec[queue_id] = vec;
5696                 if (vec < base + intr_handle->nb_efd - 1)
5697                         vec++;
5698         }
5699
5700         switch (hw->mac.type) {
5701         case ixgbe_mac_82598EB:
5702                 ixgbe_set_ivar_map(hw, -1, IXGBE_IVAR_OTHER_CAUSES_INDEX,
5703                                    IXGBE_MISC_VEC_ID);
5704                 break;
5705         case ixgbe_mac_82599EB:
5706         case ixgbe_mac_X540:
5707                 ixgbe_set_ivar_map(hw, -1, 1, IXGBE_MISC_VEC_ID);
5708                 break;
5709         default:
5710                 break;
5711         }
5712         IXGBE_WRITE_REG(hw, IXGBE_EITR(IXGBE_MISC_VEC_ID),
5713                         IXGBE_MIN_INTER_INTERRUPT_INTERVAL_DEFAULT & 0xFFF);
5714
5715         /* set up to autoclear timer, and the vectors */
5716         mask = IXGBE_EIMS_ENABLE_MASK;
5717         mask &= ~(IXGBE_EIMS_OTHER |
5718                   IXGBE_EIMS_MAILBOX |
5719                   IXGBE_EIMS_LSC);
5720
5721         IXGBE_WRITE_REG(hw, IXGBE_EIAC, mask);
5722 }
5723
5724 static int ixgbe_set_queue_rate_limit(struct rte_eth_dev *dev,
5725         uint16_t queue_idx, uint16_t tx_rate)
5726 {
5727         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5728         uint32_t rf_dec, rf_int;
5729         uint32_t bcnrc_val;
5730         uint16_t link_speed = dev->data->dev_link.link_speed;
5731
5732         if (queue_idx >= hw->mac.max_tx_queues)
5733                 return -EINVAL;
5734
5735         if (tx_rate != 0) {
5736                 /* Calculate the rate factor values to set */
5737                 rf_int = (uint32_t)link_speed / (uint32_t)tx_rate;
5738                 rf_dec = (uint32_t)link_speed % (uint32_t)tx_rate;
5739                 rf_dec = (rf_dec << IXGBE_RTTBCNRC_RF_INT_SHIFT) / tx_rate;
5740
5741                 bcnrc_val = IXGBE_RTTBCNRC_RS_ENA;
5742                 bcnrc_val |= ((rf_int << IXGBE_RTTBCNRC_RF_INT_SHIFT) &
5743                                 IXGBE_RTTBCNRC_RF_INT_MASK_M);
5744                 bcnrc_val |= (rf_dec & IXGBE_RTTBCNRC_RF_DEC_MASK);
5745         } else {
5746                 bcnrc_val = 0;
5747         }
5748
5749         /*
5750          * Set global transmit compensation time to the MMW_SIZE in RTTBCNRM
5751          * register. MMW_SIZE=0x014 if 9728-byte jumbo is supported, otherwise
5752          * set as 0x4.
5753          */
5754         if ((dev->data->dev_conf.rxmode.jumbo_frame == 1) &&
5755                 (dev->data->dev_conf.rxmode.max_rx_pkt_len >=
5756                                 IXGBE_MAX_JUMBO_FRAME_SIZE))
5757                 IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRM,
5758                         IXGBE_MMW_SIZE_JUMBO_FRAME);
5759         else
5760                 IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRM,
5761                         IXGBE_MMW_SIZE_DEFAULT);
5762
5763         /* Set RTTBCNRC of queue X */
5764         IXGBE_WRITE_REG(hw, IXGBE_RTTDQSEL, queue_idx);
5765         IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRC, bcnrc_val);
5766         IXGBE_WRITE_FLUSH(hw);
5767
5768         return 0;
5769 }
5770
5771 static int
5772 ixgbevf_add_mac_addr(struct rte_eth_dev *dev, struct ether_addr *mac_addr,
5773                      __attribute__((unused)) uint32_t index,
5774                      __attribute__((unused)) uint32_t pool)
5775 {
5776         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5777         int diag;
5778
5779         /*
5780          * On a 82599 VF, adding again the same MAC addr is not an idempotent
5781          * operation. Trap this case to avoid exhausting the [very limited]
5782          * set of PF resources used to store VF MAC addresses.
5783          */
5784         if (memcmp(hw->mac.perm_addr, mac_addr, sizeof(struct ether_addr)) == 0)
5785                 return -1;
5786         diag = ixgbevf_set_uc_addr_vf(hw, 2, mac_addr->addr_bytes);
5787         if (diag != 0)
5788                 PMD_DRV_LOG(ERR, "Unable to add MAC address "
5789                             "%02x:%02x:%02x:%02x:%02x:%02x - diag=%d",
5790                             mac_addr->addr_bytes[0],
5791                             mac_addr->addr_bytes[1],
5792                             mac_addr->addr_bytes[2],
5793                             mac_addr->addr_bytes[3],
5794                             mac_addr->addr_bytes[4],
5795                             mac_addr->addr_bytes[5],
5796                             diag);
5797         return diag;
5798 }
5799
5800 static void
5801 ixgbevf_remove_mac_addr(struct rte_eth_dev *dev, uint32_t index)
5802 {
5803         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5804         struct ether_addr *perm_addr = (struct ether_addr *) hw->mac.perm_addr;
5805         struct ether_addr *mac_addr;
5806         uint32_t i;
5807         int diag;
5808
5809         /*
5810          * The IXGBE_VF_SET_MACVLAN command of the ixgbe-pf driver does
5811          * not support the deletion of a given MAC address.
5812          * Instead, it imposes to delete all MAC addresses, then to add again
5813          * all MAC addresses with the exception of the one to be deleted.
5814          */
5815         (void) ixgbevf_set_uc_addr_vf(hw, 0, NULL);
5816
5817         /*
5818          * Add again all MAC addresses, with the exception of the deleted one
5819          * and of the permanent MAC address.
5820          */
5821         for (i = 0, mac_addr = dev->data->mac_addrs;
5822              i < hw->mac.num_rar_entries; i++, mac_addr++) {
5823                 /* Skip the deleted MAC address */
5824                 if (i == index)
5825                         continue;
5826                 /* Skip NULL MAC addresses */
5827                 if (is_zero_ether_addr(mac_addr))
5828                         continue;
5829                 /* Skip the permanent MAC address */
5830                 if (memcmp(perm_addr, mac_addr, sizeof(struct ether_addr)) == 0)
5831                         continue;
5832                 diag = ixgbevf_set_uc_addr_vf(hw, 2, mac_addr->addr_bytes);
5833                 if (diag != 0)
5834                         PMD_DRV_LOG(ERR,
5835                                     "Adding again MAC address "
5836                                     "%02x:%02x:%02x:%02x:%02x:%02x failed "
5837                                     "diag=%d",
5838                                     mac_addr->addr_bytes[0],
5839                                     mac_addr->addr_bytes[1],
5840                                     mac_addr->addr_bytes[2],
5841                                     mac_addr->addr_bytes[3],
5842                                     mac_addr->addr_bytes[4],
5843                                     mac_addr->addr_bytes[5],
5844                                     diag);
5845         }
5846 }
5847
5848 static void
5849 ixgbevf_set_default_mac_addr(struct rte_eth_dev *dev, struct ether_addr *addr)
5850 {
5851         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5852
5853         hw->mac.ops.set_rar(hw, 0, (void *)addr, 0, 0);
5854 }
5855
5856 int
5857 ixgbe_syn_filter_set(struct rte_eth_dev *dev,
5858                         struct rte_eth_syn_filter *filter,
5859                         bool add)
5860 {
5861         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5862         struct ixgbe_filter_info *filter_info =
5863                 IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
5864         uint32_t syn_info;
5865         uint32_t synqf;
5866
5867         if (filter->queue >= IXGBE_MAX_RX_QUEUE_NUM)
5868                 return -EINVAL;
5869
5870         syn_info = filter_info->syn_info;
5871
5872         if (add) {
5873                 if (syn_info & IXGBE_SYN_FILTER_ENABLE)
5874                         return -EINVAL;
5875                 synqf = (uint32_t)(((filter->queue << IXGBE_SYN_FILTER_QUEUE_SHIFT) &
5876                         IXGBE_SYN_FILTER_QUEUE) | IXGBE_SYN_FILTER_ENABLE);
5877
5878                 if (filter->hig_pri)
5879                         synqf |= IXGBE_SYN_FILTER_SYNQFP;
5880                 else
5881                         synqf &= ~IXGBE_SYN_FILTER_SYNQFP;
5882         } else {
5883                 synqf = IXGBE_READ_REG(hw, IXGBE_SYNQF);
5884                 if (!(syn_info & IXGBE_SYN_FILTER_ENABLE))
5885                         return -ENOENT;
5886                 synqf &= ~(IXGBE_SYN_FILTER_QUEUE | IXGBE_SYN_FILTER_ENABLE);
5887         }
5888
5889         filter_info->syn_info = synqf;
5890         IXGBE_WRITE_REG(hw, IXGBE_SYNQF, synqf);
5891         IXGBE_WRITE_FLUSH(hw);
5892         return 0;
5893 }
5894
5895 static int
5896 ixgbe_syn_filter_get(struct rte_eth_dev *dev,
5897                         struct rte_eth_syn_filter *filter)
5898 {
5899         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5900         uint32_t synqf = IXGBE_READ_REG(hw, IXGBE_SYNQF);
5901
5902         if (synqf & IXGBE_SYN_FILTER_ENABLE) {
5903                 filter->hig_pri = (synqf & IXGBE_SYN_FILTER_SYNQFP) ? 1 : 0;
5904                 filter->queue = (uint16_t)((synqf & IXGBE_SYN_FILTER_QUEUE) >> 1);
5905                 return 0;
5906         }
5907         return -ENOENT;
5908 }
5909
5910 static int
5911 ixgbe_syn_filter_handle(struct rte_eth_dev *dev,
5912                         enum rte_filter_op filter_op,
5913                         void *arg)
5914 {
5915         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5916         int ret;
5917
5918         MAC_TYPE_FILTER_SUP(hw->mac.type);
5919
5920         if (filter_op == RTE_ETH_FILTER_NOP)
5921                 return 0;
5922
5923         if (arg == NULL) {
5924                 PMD_DRV_LOG(ERR, "arg shouldn't be NULL for operation %u",
5925                             filter_op);
5926                 return -EINVAL;
5927         }
5928
5929         switch (filter_op) {
5930         case RTE_ETH_FILTER_ADD:
5931                 ret = ixgbe_syn_filter_set(dev,
5932                                 (struct rte_eth_syn_filter *)arg,
5933                                 TRUE);
5934                 break;
5935         case RTE_ETH_FILTER_DELETE:
5936                 ret = ixgbe_syn_filter_set(dev,
5937                                 (struct rte_eth_syn_filter *)arg,
5938                                 FALSE);
5939                 break;
5940         case RTE_ETH_FILTER_GET:
5941                 ret = ixgbe_syn_filter_get(dev,
5942                                 (struct rte_eth_syn_filter *)arg);
5943                 break;
5944         default:
5945                 PMD_DRV_LOG(ERR, "unsupported operation %u", filter_op);
5946                 ret = -EINVAL;
5947                 break;
5948         }
5949
5950         return ret;
5951 }
5952
5953
5954 static inline enum ixgbe_5tuple_protocol
5955 convert_protocol_type(uint8_t protocol_value)
5956 {
5957         if (protocol_value == IPPROTO_TCP)
5958                 return IXGBE_FILTER_PROTOCOL_TCP;
5959         else if (protocol_value == IPPROTO_UDP)
5960                 return IXGBE_FILTER_PROTOCOL_UDP;
5961         else if (protocol_value == IPPROTO_SCTP)
5962                 return IXGBE_FILTER_PROTOCOL_SCTP;
5963         else
5964                 return IXGBE_FILTER_PROTOCOL_NONE;
5965 }
5966
5967 /* inject a 5-tuple filter to HW */
5968 static inline void
5969 ixgbe_inject_5tuple_filter(struct rte_eth_dev *dev,
5970                            struct ixgbe_5tuple_filter *filter)
5971 {
5972         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5973         int i;
5974         uint32_t ftqf, sdpqf;
5975         uint32_t l34timir = 0;
5976         uint8_t mask = 0xff;
5977
5978         i = filter->index;
5979
5980         sdpqf = (uint32_t)(filter->filter_info.dst_port <<
5981                                 IXGBE_SDPQF_DSTPORT_SHIFT);
5982         sdpqf = sdpqf | (filter->filter_info.src_port & IXGBE_SDPQF_SRCPORT);
5983
5984         ftqf = (uint32_t)(filter->filter_info.proto &
5985                 IXGBE_FTQF_PROTOCOL_MASK);
5986         ftqf |= (uint32_t)((filter->filter_info.priority &
5987                 IXGBE_FTQF_PRIORITY_MASK) << IXGBE_FTQF_PRIORITY_SHIFT);
5988         if (filter->filter_info.src_ip_mask == 0) /* 0 means compare. */
5989                 mask &= IXGBE_FTQF_SOURCE_ADDR_MASK;
5990         if (filter->filter_info.dst_ip_mask == 0)
5991                 mask &= IXGBE_FTQF_DEST_ADDR_MASK;
5992         if (filter->filter_info.src_port_mask == 0)
5993                 mask &= IXGBE_FTQF_SOURCE_PORT_MASK;
5994         if (filter->filter_info.dst_port_mask == 0)
5995                 mask &= IXGBE_FTQF_DEST_PORT_MASK;
5996         if (filter->filter_info.proto_mask == 0)
5997                 mask &= IXGBE_FTQF_PROTOCOL_COMP_MASK;
5998         ftqf |= mask << IXGBE_FTQF_5TUPLE_MASK_SHIFT;
5999         ftqf |= IXGBE_FTQF_POOL_MASK_EN;
6000         ftqf |= IXGBE_FTQF_QUEUE_ENABLE;
6001
6002         IXGBE_WRITE_REG(hw, IXGBE_DAQF(i), filter->filter_info.dst_ip);
6003         IXGBE_WRITE_REG(hw, IXGBE_SAQF(i), filter->filter_info.src_ip);
6004         IXGBE_WRITE_REG(hw, IXGBE_SDPQF(i), sdpqf);
6005         IXGBE_WRITE_REG(hw, IXGBE_FTQF(i), ftqf);
6006
6007         l34timir |= IXGBE_L34T_IMIR_RESERVE;
6008         l34timir |= (uint32_t)(filter->queue <<
6009                                 IXGBE_L34T_IMIR_QUEUE_SHIFT);
6010         IXGBE_WRITE_REG(hw, IXGBE_L34T_IMIR(i), l34timir);
6011 }
6012
6013 /*
6014  * add a 5tuple filter
6015  *
6016  * @param
6017  * dev: Pointer to struct rte_eth_dev.
6018  * index: the index the filter allocates.
6019  * filter: ponter to the filter that will be added.
6020  * rx_queue: the queue id the filter assigned to.
6021  *
6022  * @return
6023  *    - On success, zero.
6024  *    - On failure, a negative value.
6025  */
6026 static int
6027 ixgbe_add_5tuple_filter(struct rte_eth_dev *dev,
6028                         struct ixgbe_5tuple_filter *filter)
6029 {
6030         struct ixgbe_filter_info *filter_info =
6031                 IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
6032         int i, idx, shift;
6033
6034         /*
6035          * look for an unused 5tuple filter index,
6036          * and insert the filter to list.
6037          */
6038         for (i = 0; i < IXGBE_MAX_FTQF_FILTERS; i++) {
6039                 idx = i / (sizeof(uint32_t) * NBBY);
6040                 shift = i % (sizeof(uint32_t) * NBBY);
6041                 if (!(filter_info->fivetuple_mask[idx] & (1 << shift))) {
6042                         filter_info->fivetuple_mask[idx] |= 1 << shift;
6043                         filter->index = i;
6044                         TAILQ_INSERT_TAIL(&filter_info->fivetuple_list,
6045                                           filter,
6046                                           entries);
6047                         break;
6048                 }
6049         }
6050         if (i >= IXGBE_MAX_FTQF_FILTERS) {
6051                 PMD_DRV_LOG(ERR, "5tuple filters are full.");
6052                 return -ENOSYS;
6053         }
6054
6055         ixgbe_inject_5tuple_filter(dev, filter);
6056
6057         return 0;
6058 }
6059
6060 /*
6061  * remove a 5tuple filter
6062  *
6063  * @param
6064  * dev: Pointer to struct rte_eth_dev.
6065  * filter: the pointer of the filter will be removed.
6066  */
6067 static void
6068 ixgbe_remove_5tuple_filter(struct rte_eth_dev *dev,
6069                         struct ixgbe_5tuple_filter *filter)
6070 {
6071         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
6072         struct ixgbe_filter_info *filter_info =
6073                 IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
6074         uint16_t index = filter->index;
6075
6076         filter_info->fivetuple_mask[index / (sizeof(uint32_t) * NBBY)] &=
6077                                 ~(1 << (index % (sizeof(uint32_t) * NBBY)));
6078         TAILQ_REMOVE(&filter_info->fivetuple_list, filter, entries);
6079         rte_free(filter);
6080
6081         IXGBE_WRITE_REG(hw, IXGBE_DAQF(index), 0);
6082         IXGBE_WRITE_REG(hw, IXGBE_SAQF(index), 0);
6083         IXGBE_WRITE_REG(hw, IXGBE_SDPQF(index), 0);
6084         IXGBE_WRITE_REG(hw, IXGBE_FTQF(index), 0);
6085         IXGBE_WRITE_REG(hw, IXGBE_L34T_IMIR(index), 0);
6086 }
6087
6088 static int
6089 ixgbevf_dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu)
6090 {
6091         struct ixgbe_hw *hw;
6092         uint32_t max_frame = mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
6093         struct rte_eth_rxmode *rx_conf = &dev->data->dev_conf.rxmode;
6094
6095         hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
6096
6097         if ((mtu < ETHER_MIN_MTU) || (max_frame > ETHER_MAX_JUMBO_FRAME_LEN))
6098                 return -EINVAL;
6099
6100         /* refuse mtu that requires the support of scattered packets when this
6101          * feature has not been enabled before.
6102          */
6103         if (!rx_conf->enable_scatter &&
6104             (max_frame + 2 * IXGBE_VLAN_TAG_SIZE >
6105              dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM))
6106                 return -EINVAL;
6107
6108         /*
6109          * When supported by the underlying PF driver, use the IXGBE_VF_SET_MTU
6110          * request of the version 2.0 of the mailbox API.
6111          * For now, use the IXGBE_VF_SET_LPE request of the version 1.0
6112          * of the mailbox API.
6113          * This call to IXGBE_SET_LPE action won't work with ixgbe pf drivers
6114          * prior to 3.11.33 which contains the following change:
6115          * "ixgbe: Enable jumbo frames support w/ SR-IOV"
6116          */
6117         ixgbevf_rlpml_set_vf(hw, max_frame);
6118
6119         /* update max frame size */
6120         dev->data->dev_conf.rxmode.max_rx_pkt_len = max_frame;
6121         return 0;
6122 }
6123
6124 static inline struct ixgbe_5tuple_filter *
6125 ixgbe_5tuple_filter_lookup(struct ixgbe_5tuple_filter_list *filter_list,
6126                         struct ixgbe_5tuple_filter_info *key)
6127 {
6128         struct ixgbe_5tuple_filter *it;
6129
6130         TAILQ_FOREACH(it, filter_list, entries) {
6131                 if (memcmp(key, &it->filter_info,
6132                         sizeof(struct ixgbe_5tuple_filter_info)) == 0) {
6133                         return it;
6134                 }
6135         }
6136         return NULL;
6137 }
6138
6139 /* translate elements in struct rte_eth_ntuple_filter to struct ixgbe_5tuple_filter_info*/
6140 static inline int
6141 ntuple_filter_to_5tuple(struct rte_eth_ntuple_filter *filter,
6142                         struct ixgbe_5tuple_filter_info *filter_info)
6143 {
6144         if (filter->queue >= IXGBE_MAX_RX_QUEUE_NUM ||
6145                 filter->priority > IXGBE_5TUPLE_MAX_PRI ||
6146                 filter->priority < IXGBE_5TUPLE_MIN_PRI)
6147                 return -EINVAL;
6148
6149         switch (filter->dst_ip_mask) {
6150         case UINT32_MAX:
6151                 filter_info->dst_ip_mask = 0;
6152                 filter_info->dst_ip = filter->dst_ip;
6153                 break;
6154         case 0:
6155                 filter_info->dst_ip_mask = 1;
6156                 break;
6157         default:
6158                 PMD_DRV_LOG(ERR, "invalid dst_ip mask.");
6159                 return -EINVAL;
6160         }
6161
6162         switch (filter->src_ip_mask) {
6163         case UINT32_MAX:
6164                 filter_info->src_ip_mask = 0;
6165                 filter_info->src_ip = filter->src_ip;
6166                 break;
6167         case 0:
6168                 filter_info->src_ip_mask = 1;
6169                 break;
6170         default:
6171                 PMD_DRV_LOG(ERR, "invalid src_ip mask.");
6172                 return -EINVAL;
6173         }
6174
6175         switch (filter->dst_port_mask) {
6176         case UINT16_MAX:
6177                 filter_info->dst_port_mask = 0;
6178                 filter_info->dst_port = filter->dst_port;
6179                 break;
6180         case 0:
6181                 filter_info->dst_port_mask = 1;
6182                 break;
6183         default:
6184                 PMD_DRV_LOG(ERR, "invalid dst_port mask.");
6185                 return -EINVAL;
6186         }
6187
6188         switch (filter->src_port_mask) {
6189         case UINT16_MAX:
6190                 filter_info->src_port_mask = 0;
6191                 filter_info->src_port = filter->src_port;
6192                 break;
6193         case 0:
6194                 filter_info->src_port_mask = 1;
6195                 break;
6196         default:
6197                 PMD_DRV_LOG(ERR, "invalid src_port mask.");
6198                 return -EINVAL;
6199         }
6200
6201         switch (filter->proto_mask) {
6202         case UINT8_MAX:
6203                 filter_info->proto_mask = 0;
6204                 filter_info->proto =
6205                         convert_protocol_type(filter->proto);
6206                 break;
6207         case 0:
6208                 filter_info->proto_mask = 1;
6209                 break;
6210         default:
6211                 PMD_DRV_LOG(ERR, "invalid protocol mask.");
6212                 return -EINVAL;
6213         }
6214
6215         filter_info->priority = (uint8_t)filter->priority;
6216         return 0;
6217 }
6218
6219 /*
6220  * add or delete a ntuple filter
6221  *
6222  * @param
6223  * dev: Pointer to struct rte_eth_dev.
6224  * ntuple_filter: Pointer to struct rte_eth_ntuple_filter
6225  * add: if true, add filter, if false, remove filter
6226  *
6227  * @return
6228  *    - On success, zero.
6229  *    - On failure, a negative value.
6230  */
6231 int
6232 ixgbe_add_del_ntuple_filter(struct rte_eth_dev *dev,
6233                         struct rte_eth_ntuple_filter *ntuple_filter,
6234                         bool add)
6235 {
6236         struct ixgbe_filter_info *filter_info =
6237                 IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
6238         struct ixgbe_5tuple_filter_info filter_5tuple;
6239         struct ixgbe_5tuple_filter *filter;
6240         int ret;
6241
6242         if (ntuple_filter->flags != RTE_5TUPLE_FLAGS) {
6243                 PMD_DRV_LOG(ERR, "only 5tuple is supported.");
6244                 return -EINVAL;
6245         }
6246
6247         memset(&filter_5tuple, 0, sizeof(struct ixgbe_5tuple_filter_info));
6248         ret = ntuple_filter_to_5tuple(ntuple_filter, &filter_5tuple);
6249         if (ret < 0)
6250                 return ret;
6251
6252         filter = ixgbe_5tuple_filter_lookup(&filter_info->fivetuple_list,
6253                                          &filter_5tuple);
6254         if (filter != NULL && add) {
6255                 PMD_DRV_LOG(ERR, "filter exists.");
6256                 return -EEXIST;
6257         }
6258         if (filter == NULL && !add) {
6259                 PMD_DRV_LOG(ERR, "filter doesn't exist.");
6260                 return -ENOENT;
6261         }
6262
6263         if (add) {
6264                 filter = rte_zmalloc("ixgbe_5tuple_filter",
6265                                 sizeof(struct ixgbe_5tuple_filter), 0);
6266                 if (filter == NULL)
6267                         return -ENOMEM;
6268                 (void)rte_memcpy(&filter->filter_info,
6269                                  &filter_5tuple,
6270                                  sizeof(struct ixgbe_5tuple_filter_info));
6271                 filter->queue = ntuple_filter->queue;
6272                 ret = ixgbe_add_5tuple_filter(dev, filter);
6273                 if (ret < 0) {
6274                         rte_free(filter);
6275                         return ret;
6276                 }
6277         } else
6278                 ixgbe_remove_5tuple_filter(dev, filter);
6279
6280         return 0;
6281 }
6282
6283 /*
6284  * get a ntuple filter
6285  *
6286  * @param
6287  * dev: Pointer to struct rte_eth_dev.
6288  * ntuple_filter: Pointer to struct rte_eth_ntuple_filter
6289  *
6290  * @return
6291  *    - On success, zero.
6292  *    - On failure, a negative value.
6293  */
6294 static int
6295 ixgbe_get_ntuple_filter(struct rte_eth_dev *dev,
6296                         struct rte_eth_ntuple_filter *ntuple_filter)
6297 {
6298         struct ixgbe_filter_info *filter_info =
6299                 IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
6300         struct ixgbe_5tuple_filter_info filter_5tuple;
6301         struct ixgbe_5tuple_filter *filter;
6302         int ret;
6303
6304         if (ntuple_filter->flags != RTE_5TUPLE_FLAGS) {
6305                 PMD_DRV_LOG(ERR, "only 5tuple is supported.");
6306                 return -EINVAL;
6307         }
6308
6309         memset(&filter_5tuple, 0, sizeof(struct ixgbe_5tuple_filter_info));
6310         ret = ntuple_filter_to_5tuple(ntuple_filter, &filter_5tuple);
6311         if (ret < 0)
6312                 return ret;
6313
6314         filter = ixgbe_5tuple_filter_lookup(&filter_info->fivetuple_list,
6315                                          &filter_5tuple);
6316         if (filter == NULL) {
6317                 PMD_DRV_LOG(ERR, "filter doesn't exist.");
6318                 return -ENOENT;
6319         }
6320         ntuple_filter->queue = filter->queue;
6321         return 0;
6322 }
6323
6324 /*
6325  * ixgbe_ntuple_filter_handle - Handle operations for ntuple filter.
6326  * @dev: pointer to rte_eth_dev structure
6327  * @filter_op:operation will be taken.
6328  * @arg: a pointer to specific structure corresponding to the filter_op
6329  *
6330  * @return
6331  *    - On success, zero.
6332  *    - On failure, a negative value.
6333  */
6334 static int
6335 ixgbe_ntuple_filter_handle(struct rte_eth_dev *dev,
6336                                 enum rte_filter_op filter_op,
6337                                 void *arg)
6338 {
6339         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
6340         int ret;
6341
6342         MAC_TYPE_FILTER_SUP_EXT(hw->mac.type);
6343
6344         if (filter_op == RTE_ETH_FILTER_NOP)
6345                 return 0;
6346
6347         if (arg == NULL) {
6348                 PMD_DRV_LOG(ERR, "arg shouldn't be NULL for operation %u.",
6349                             filter_op);
6350                 return -EINVAL;
6351         }
6352
6353         switch (filter_op) {
6354         case RTE_ETH_FILTER_ADD:
6355                 ret = ixgbe_add_del_ntuple_filter(dev,
6356                         (struct rte_eth_ntuple_filter *)arg,
6357                         TRUE);
6358                 break;
6359         case RTE_ETH_FILTER_DELETE:
6360                 ret = ixgbe_add_del_ntuple_filter(dev,
6361                         (struct rte_eth_ntuple_filter *)arg,
6362                         FALSE);
6363                 break;
6364         case RTE_ETH_FILTER_GET:
6365                 ret = ixgbe_get_ntuple_filter(dev,
6366                         (struct rte_eth_ntuple_filter *)arg);
6367                 break;
6368         default:
6369                 PMD_DRV_LOG(ERR, "unsupported operation %u.", filter_op);
6370                 ret = -EINVAL;
6371                 break;
6372         }
6373         return ret;
6374 }
6375
6376 int
6377 ixgbe_add_del_ethertype_filter(struct rte_eth_dev *dev,
6378                         struct rte_eth_ethertype_filter *filter,
6379                         bool add)
6380 {
6381         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
6382         struct ixgbe_filter_info *filter_info =
6383                 IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
6384         uint32_t etqf = 0;
6385         uint32_t etqs = 0;
6386         int ret;
6387         struct ixgbe_ethertype_filter ethertype_filter;
6388
6389         if (filter->queue >= IXGBE_MAX_RX_QUEUE_NUM)
6390                 return -EINVAL;
6391
6392         if (filter->ether_type == ETHER_TYPE_IPv4 ||
6393                 filter->ether_type == ETHER_TYPE_IPv6) {
6394                 PMD_DRV_LOG(ERR, "unsupported ether_type(0x%04x) in"
6395                         " ethertype filter.", filter->ether_type);
6396                 return -EINVAL;
6397         }
6398
6399         if (filter->flags & RTE_ETHTYPE_FLAGS_MAC) {
6400                 PMD_DRV_LOG(ERR, "mac compare is unsupported.");
6401                 return -EINVAL;
6402         }
6403         if (filter->flags & RTE_ETHTYPE_FLAGS_DROP) {
6404                 PMD_DRV_LOG(ERR, "drop option is unsupported.");
6405                 return -EINVAL;
6406         }
6407
6408         ret = ixgbe_ethertype_filter_lookup(filter_info, filter->ether_type);
6409         if (ret >= 0 && add) {
6410                 PMD_DRV_LOG(ERR, "ethertype (0x%04x) filter exists.",
6411                             filter->ether_type);
6412                 return -EEXIST;
6413         }
6414         if (ret < 0 && !add) {
6415                 PMD_DRV_LOG(ERR, "ethertype (0x%04x) filter doesn't exist.",
6416                             filter->ether_type);
6417                 return -ENOENT;
6418         }
6419
6420         if (add) {
6421                 etqf = IXGBE_ETQF_FILTER_EN;
6422                 etqf |= (uint32_t)filter->ether_type;
6423                 etqs |= (uint32_t)((filter->queue <<
6424                                     IXGBE_ETQS_RX_QUEUE_SHIFT) &
6425                                     IXGBE_ETQS_RX_QUEUE);
6426                 etqs |= IXGBE_ETQS_QUEUE_EN;
6427
6428                 ethertype_filter.ethertype = filter->ether_type;
6429                 ethertype_filter.etqf = etqf;
6430                 ethertype_filter.etqs = etqs;
6431                 ethertype_filter.conf = FALSE;
6432                 ret = ixgbe_ethertype_filter_insert(filter_info,
6433                                                     &ethertype_filter);
6434                 if (ret < 0) {
6435                         PMD_DRV_LOG(ERR, "ethertype filters are full.");
6436                         return -ENOSPC;
6437                 }
6438         } else {
6439                 ret = ixgbe_ethertype_filter_remove(filter_info, (uint8_t)ret);
6440                 if (ret < 0)
6441                         return -ENOSYS;
6442         }
6443         IXGBE_WRITE_REG(hw, IXGBE_ETQF(ret), etqf);
6444         IXGBE_WRITE_REG(hw, IXGBE_ETQS(ret), etqs);
6445         IXGBE_WRITE_FLUSH(hw);
6446
6447         return 0;
6448 }
6449
6450 static int
6451 ixgbe_get_ethertype_filter(struct rte_eth_dev *dev,
6452                         struct rte_eth_ethertype_filter *filter)
6453 {
6454         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
6455         struct ixgbe_filter_info *filter_info =
6456                 IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
6457         uint32_t etqf, etqs;
6458         int ret;
6459
6460         ret = ixgbe_ethertype_filter_lookup(filter_info, filter->ether_type);
6461         if (ret < 0) {
6462                 PMD_DRV_LOG(ERR, "ethertype (0x%04x) filter doesn't exist.",
6463                             filter->ether_type);
6464                 return -ENOENT;
6465         }
6466
6467         etqf = IXGBE_READ_REG(hw, IXGBE_ETQF(ret));
6468         if (etqf & IXGBE_ETQF_FILTER_EN) {
6469                 etqs = IXGBE_READ_REG(hw, IXGBE_ETQS(ret));
6470                 filter->ether_type = etqf & IXGBE_ETQF_ETHERTYPE;
6471                 filter->flags = 0;
6472                 filter->queue = (etqs & IXGBE_ETQS_RX_QUEUE) >>
6473                                IXGBE_ETQS_RX_QUEUE_SHIFT;
6474                 return 0;
6475         }
6476         return -ENOENT;
6477 }
6478
6479 /*
6480  * ixgbe_ethertype_filter_handle - Handle operations for ethertype filter.
6481  * @dev: pointer to rte_eth_dev structure
6482  * @filter_op:operation will be taken.
6483  * @arg: a pointer to specific structure corresponding to the filter_op
6484  */
6485 static int
6486 ixgbe_ethertype_filter_handle(struct rte_eth_dev *dev,
6487                                 enum rte_filter_op filter_op,
6488                                 void *arg)
6489 {
6490         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
6491         int ret;
6492
6493         MAC_TYPE_FILTER_SUP(hw->mac.type);
6494
6495         if (filter_op == RTE_ETH_FILTER_NOP)
6496                 return 0;
6497
6498         if (arg == NULL) {
6499                 PMD_DRV_LOG(ERR, "arg shouldn't be NULL for operation %u.",
6500                             filter_op);
6501                 return -EINVAL;
6502         }
6503
6504         switch (filter_op) {
6505         case RTE_ETH_FILTER_ADD:
6506                 ret = ixgbe_add_del_ethertype_filter(dev,
6507                         (struct rte_eth_ethertype_filter *)arg,
6508                         TRUE);
6509                 break;
6510         case RTE_ETH_FILTER_DELETE:
6511                 ret = ixgbe_add_del_ethertype_filter(dev,
6512                         (struct rte_eth_ethertype_filter *)arg,
6513                         FALSE);
6514                 break;
6515         case RTE_ETH_FILTER_GET:
6516                 ret = ixgbe_get_ethertype_filter(dev,
6517                         (struct rte_eth_ethertype_filter *)arg);
6518                 break;
6519         default:
6520                 PMD_DRV_LOG(ERR, "unsupported operation %u.", filter_op);
6521                 ret = -EINVAL;
6522                 break;
6523         }
6524         return ret;
6525 }
6526
6527 static int
6528 ixgbe_dev_filter_ctrl(struct rte_eth_dev *dev,
6529                      enum rte_filter_type filter_type,
6530                      enum rte_filter_op filter_op,
6531                      void *arg)
6532 {
6533         int ret = 0;
6534
6535         switch (filter_type) {
6536         case RTE_ETH_FILTER_NTUPLE:
6537                 ret = ixgbe_ntuple_filter_handle(dev, filter_op, arg);
6538                 break;
6539         case RTE_ETH_FILTER_ETHERTYPE:
6540                 ret = ixgbe_ethertype_filter_handle(dev, filter_op, arg);
6541                 break;
6542         case RTE_ETH_FILTER_SYN:
6543                 ret = ixgbe_syn_filter_handle(dev, filter_op, arg);
6544                 break;
6545         case RTE_ETH_FILTER_FDIR:
6546                 ret = ixgbe_fdir_ctrl_func(dev, filter_op, arg);
6547                 break;
6548         case RTE_ETH_FILTER_L2_TUNNEL:
6549                 ret = ixgbe_dev_l2_tunnel_filter_handle(dev, filter_op, arg);
6550                 break;
6551         case RTE_ETH_FILTER_GENERIC:
6552                 if (filter_op != RTE_ETH_FILTER_GET)
6553                         return -EINVAL;
6554                 *(const void **)arg = &ixgbe_flow_ops;
6555                 break;
6556         default:
6557                 PMD_DRV_LOG(WARNING, "Filter type (%d) not supported",
6558                                                         filter_type);
6559                 ret = -EINVAL;
6560                 break;
6561         }
6562
6563         return ret;
6564 }
6565
6566 static u8 *
6567 ixgbe_dev_addr_list_itr(__attribute__((unused)) struct ixgbe_hw *hw,
6568                         u8 **mc_addr_ptr, u32 *vmdq)
6569 {
6570         u8 *mc_addr;
6571
6572         *vmdq = 0;
6573         mc_addr = *mc_addr_ptr;
6574         *mc_addr_ptr = (mc_addr + sizeof(struct ether_addr));
6575         return mc_addr;
6576 }
6577
6578 static int
6579 ixgbe_dev_set_mc_addr_list(struct rte_eth_dev *dev,
6580                           struct ether_addr *mc_addr_set,
6581                           uint32_t nb_mc_addr)
6582 {
6583         struct ixgbe_hw *hw;
6584         u8 *mc_addr_list;
6585
6586         hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
6587         mc_addr_list = (u8 *)mc_addr_set;
6588         return ixgbe_update_mc_addr_list(hw, mc_addr_list, nb_mc_addr,
6589                                          ixgbe_dev_addr_list_itr, TRUE);
6590 }
6591
6592 static uint64_t
6593 ixgbe_read_systime_cyclecounter(struct rte_eth_dev *dev)
6594 {
6595         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
6596         uint64_t systime_cycles;
6597
6598         switch (hw->mac.type) {
6599         case ixgbe_mac_X550:
6600         case ixgbe_mac_X550EM_x:
6601         case ixgbe_mac_X550EM_a:
6602                 /* SYSTIMEL stores ns and SYSTIMEH stores seconds. */
6603                 systime_cycles = (uint64_t)IXGBE_READ_REG(hw, IXGBE_SYSTIML);
6604                 systime_cycles += (uint64_t)IXGBE_READ_REG(hw, IXGBE_SYSTIMH)
6605                                 * NSEC_PER_SEC;
6606                 break;
6607         default:
6608                 systime_cycles = (uint64_t)IXGBE_READ_REG(hw, IXGBE_SYSTIML);
6609                 systime_cycles |= (uint64_t)IXGBE_READ_REG(hw, IXGBE_SYSTIMH)
6610                                 << 32;
6611         }
6612
6613         return systime_cycles;
6614 }
6615
6616 static uint64_t
6617 ixgbe_read_rx_tstamp_cyclecounter(struct rte_eth_dev *dev)
6618 {
6619         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
6620         uint64_t rx_tstamp_cycles;
6621
6622         switch (hw->mac.type) {
6623         case ixgbe_mac_X550:
6624         case ixgbe_mac_X550EM_x:
6625         case ixgbe_mac_X550EM_a:
6626                 /* RXSTMPL stores ns and RXSTMPH stores seconds. */
6627                 rx_tstamp_cycles = (uint64_t)IXGBE_READ_REG(hw, IXGBE_RXSTMPL);
6628                 rx_tstamp_cycles += (uint64_t)IXGBE_READ_REG(hw, IXGBE_RXSTMPH)
6629                                 * NSEC_PER_SEC;
6630                 break;
6631         default:
6632                 /* RXSTMPL stores ns and RXSTMPH stores seconds. */
6633                 rx_tstamp_cycles = (uint64_t)IXGBE_READ_REG(hw, IXGBE_RXSTMPL);
6634                 rx_tstamp_cycles |= (uint64_t)IXGBE_READ_REG(hw, IXGBE_RXSTMPH)
6635                                 << 32;
6636         }
6637
6638         return rx_tstamp_cycles;
6639 }
6640
6641 static uint64_t
6642 ixgbe_read_tx_tstamp_cyclecounter(struct rte_eth_dev *dev)
6643 {
6644         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
6645         uint64_t tx_tstamp_cycles;
6646
6647         switch (hw->mac.type) {
6648         case ixgbe_mac_X550:
6649         case ixgbe_mac_X550EM_x:
6650         case ixgbe_mac_X550EM_a:
6651                 /* TXSTMPL stores ns and TXSTMPH stores seconds. */
6652                 tx_tstamp_cycles = (uint64_t)IXGBE_READ_REG(hw, IXGBE_TXSTMPL);
6653                 tx_tstamp_cycles += (uint64_t)IXGBE_READ_REG(hw, IXGBE_TXSTMPH)
6654                                 * NSEC_PER_SEC;
6655                 break;
6656         default:
6657                 /* TXSTMPL stores ns and TXSTMPH stores seconds. */
6658                 tx_tstamp_cycles = (uint64_t)IXGBE_READ_REG(hw, IXGBE_TXSTMPL);
6659                 tx_tstamp_cycles |= (uint64_t)IXGBE_READ_REG(hw, IXGBE_TXSTMPH)
6660                                 << 32;
6661         }
6662
6663         return tx_tstamp_cycles;
6664 }
6665
6666 static void
6667 ixgbe_start_timecounters(struct rte_eth_dev *dev)
6668 {
6669         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
6670         struct ixgbe_adapter *adapter =
6671                 (struct ixgbe_adapter *)dev->data->dev_private;
6672         struct rte_eth_link link;
6673         uint32_t incval = 0;
6674         uint32_t shift = 0;
6675
6676         /* Get current link speed. */
6677         memset(&link, 0, sizeof(link));
6678         ixgbe_dev_link_update(dev, 1);
6679         rte_ixgbe_dev_atomic_read_link_status(dev, &link);
6680
6681         switch (link.link_speed) {
6682         case ETH_SPEED_NUM_100M:
6683                 incval = IXGBE_INCVAL_100;
6684                 shift = IXGBE_INCVAL_SHIFT_100;
6685                 break;
6686         case ETH_SPEED_NUM_1G:
6687                 incval = IXGBE_INCVAL_1GB;
6688                 shift = IXGBE_INCVAL_SHIFT_1GB;
6689                 break;
6690         case ETH_SPEED_NUM_10G:
6691         default:
6692                 incval = IXGBE_INCVAL_10GB;
6693                 shift = IXGBE_INCVAL_SHIFT_10GB;
6694                 break;
6695         }
6696
6697         switch (hw->mac.type) {
6698         case ixgbe_mac_X550:
6699         case ixgbe_mac_X550EM_x:
6700         case ixgbe_mac_X550EM_a:
6701                 /* Independent of link speed. */
6702                 incval = 1;
6703                 /* Cycles read will be interpreted as ns. */
6704                 shift = 0;
6705                 /* Fall-through */
6706         case ixgbe_mac_X540:
6707                 IXGBE_WRITE_REG(hw, IXGBE_TIMINCA, incval);
6708                 break;
6709         case ixgbe_mac_82599EB:
6710                 incval >>= IXGBE_INCVAL_SHIFT_82599;
6711                 shift -= IXGBE_INCVAL_SHIFT_82599;
6712                 IXGBE_WRITE_REG(hw, IXGBE_TIMINCA,
6713                                 (1 << IXGBE_INCPER_SHIFT_82599) | incval);
6714                 break;
6715         default:
6716                 /* Not supported. */
6717                 return;
6718         }
6719
6720         memset(&adapter->systime_tc, 0, sizeof(struct rte_timecounter));
6721         memset(&adapter->rx_tstamp_tc, 0, sizeof(struct rte_timecounter));
6722         memset(&adapter->tx_tstamp_tc, 0, sizeof(struct rte_timecounter));
6723
6724         adapter->systime_tc.cc_mask = IXGBE_CYCLECOUNTER_MASK;
6725         adapter->systime_tc.cc_shift = shift;
6726         adapter->systime_tc.nsec_mask = (1ULL << shift) - 1;
6727
6728         adapter->rx_tstamp_tc.cc_mask = IXGBE_CYCLECOUNTER_MASK;
6729         adapter->rx_tstamp_tc.cc_shift = shift;
6730         adapter->rx_tstamp_tc.nsec_mask = (1ULL << shift) - 1;
6731
6732         adapter->tx_tstamp_tc.cc_mask = IXGBE_CYCLECOUNTER_MASK;
6733         adapter->tx_tstamp_tc.cc_shift = shift;
6734         adapter->tx_tstamp_tc.nsec_mask = (1ULL << shift) - 1;
6735 }
6736
6737 static int
6738 ixgbe_timesync_adjust_time(struct rte_eth_dev *dev, int64_t delta)
6739 {
6740         struct ixgbe_adapter *adapter =
6741                         (struct ixgbe_adapter *)dev->data->dev_private;
6742
6743         adapter->systime_tc.nsec += delta;
6744         adapter->rx_tstamp_tc.nsec += delta;
6745         adapter->tx_tstamp_tc.nsec += delta;
6746
6747         return 0;
6748 }
6749
6750 static int
6751 ixgbe_timesync_write_time(struct rte_eth_dev *dev, const struct timespec *ts)
6752 {
6753         uint64_t ns;
6754         struct ixgbe_adapter *adapter =
6755                         (struct ixgbe_adapter *)dev->data->dev_private;
6756
6757         ns = rte_timespec_to_ns(ts);
6758         /* Set the timecounters to a new value. */
6759         adapter->systime_tc.nsec = ns;
6760         adapter->rx_tstamp_tc.nsec = ns;
6761         adapter->tx_tstamp_tc.nsec = ns;
6762
6763         return 0;
6764 }
6765
6766 static int
6767 ixgbe_timesync_read_time(struct rte_eth_dev *dev, struct timespec *ts)
6768 {
6769         uint64_t ns, systime_cycles;
6770         struct ixgbe_adapter *adapter =
6771                         (struct ixgbe_adapter *)dev->data->dev_private;
6772
6773         systime_cycles = ixgbe_read_systime_cyclecounter(dev);
6774         ns = rte_timecounter_update(&adapter->systime_tc, systime_cycles);
6775         *ts = rte_ns_to_timespec(ns);
6776
6777         return 0;
6778 }
6779
6780 static int
6781 ixgbe_timesync_enable(struct rte_eth_dev *dev)
6782 {
6783         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
6784         uint32_t tsync_ctl;
6785         uint32_t tsauxc;
6786
6787         /* Stop the timesync system time. */
6788         IXGBE_WRITE_REG(hw, IXGBE_TIMINCA, 0x0);
6789         /* Reset the timesync system time value. */
6790         IXGBE_WRITE_REG(hw, IXGBE_SYSTIML, 0x0);
6791         IXGBE_WRITE_REG(hw, IXGBE_SYSTIMH, 0x0);
6792
6793         /* Enable system time for platforms where it isn't on by default. */
6794         tsauxc = IXGBE_READ_REG(hw, IXGBE_TSAUXC);
6795         tsauxc &= ~IXGBE_TSAUXC_DISABLE_SYSTIME;
6796         IXGBE_WRITE_REG(hw, IXGBE_TSAUXC, tsauxc);
6797
6798         ixgbe_start_timecounters(dev);
6799
6800         /* Enable L2 filtering of IEEE1588/802.1AS Ethernet frame types. */
6801         IXGBE_WRITE_REG(hw, IXGBE_ETQF(IXGBE_ETQF_FILTER_1588),
6802                         (ETHER_TYPE_1588 |
6803                          IXGBE_ETQF_FILTER_EN |
6804                          IXGBE_ETQF_1588));
6805
6806         /* Enable timestamping of received PTP packets. */
6807         tsync_ctl = IXGBE_READ_REG(hw, IXGBE_TSYNCRXCTL);
6808         tsync_ctl |= IXGBE_TSYNCRXCTL_ENABLED;
6809         IXGBE_WRITE_REG(hw, IXGBE_TSYNCRXCTL, tsync_ctl);
6810
6811         /* Enable timestamping of transmitted PTP packets. */
6812         tsync_ctl = IXGBE_READ_REG(hw, IXGBE_TSYNCTXCTL);
6813         tsync_ctl |= IXGBE_TSYNCTXCTL_ENABLED;
6814         IXGBE_WRITE_REG(hw, IXGBE_TSYNCTXCTL, tsync_ctl);
6815
6816         IXGBE_WRITE_FLUSH(hw);
6817
6818         return 0;
6819 }
6820
6821 static int
6822 ixgbe_timesync_disable(struct rte_eth_dev *dev)
6823 {
6824         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
6825         uint32_t tsync_ctl;
6826
6827         /* Disable timestamping of transmitted PTP packets. */
6828         tsync_ctl = IXGBE_READ_REG(hw, IXGBE_TSYNCTXCTL);
6829         tsync_ctl &= ~IXGBE_TSYNCTXCTL_ENABLED;
6830         IXGBE_WRITE_REG(hw, IXGBE_TSYNCTXCTL, tsync_ctl);
6831
6832         /* Disable timestamping of received PTP packets. */
6833         tsync_ctl = IXGBE_READ_REG(hw, IXGBE_TSYNCRXCTL);
6834         tsync_ctl &= ~IXGBE_TSYNCRXCTL_ENABLED;
6835         IXGBE_WRITE_REG(hw, IXGBE_TSYNCRXCTL, tsync_ctl);
6836
6837         /* Disable L2 filtering of IEEE1588/802.1AS Ethernet frame types. */
6838         IXGBE_WRITE_REG(hw, IXGBE_ETQF(IXGBE_ETQF_FILTER_1588), 0);
6839
6840         /* Stop incrementating the System Time registers. */
6841         IXGBE_WRITE_REG(hw, IXGBE_TIMINCA, 0);
6842
6843         return 0;
6844 }
6845
6846 static int
6847 ixgbe_timesync_read_rx_timestamp(struct rte_eth_dev *dev,
6848                                  struct timespec *timestamp,
6849                                  uint32_t flags __rte_unused)
6850 {
6851         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
6852         struct ixgbe_adapter *adapter =
6853                 (struct ixgbe_adapter *)dev->data->dev_private;
6854         uint32_t tsync_rxctl;
6855         uint64_t rx_tstamp_cycles;
6856         uint64_t ns;
6857
6858         tsync_rxctl = IXGBE_READ_REG(hw, IXGBE_TSYNCRXCTL);
6859         if ((tsync_rxctl & IXGBE_TSYNCRXCTL_VALID) == 0)
6860                 return -EINVAL;
6861
6862         rx_tstamp_cycles = ixgbe_read_rx_tstamp_cyclecounter(dev);
6863         ns = rte_timecounter_update(&adapter->rx_tstamp_tc, rx_tstamp_cycles);
6864         *timestamp = rte_ns_to_timespec(ns);
6865
6866         return  0;
6867 }
6868
6869 static int
6870 ixgbe_timesync_read_tx_timestamp(struct rte_eth_dev *dev,
6871                                  struct timespec *timestamp)
6872 {
6873         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
6874         struct ixgbe_adapter *adapter =
6875                 (struct ixgbe_adapter *)dev->data->dev_private;
6876         uint32_t tsync_txctl;
6877         uint64_t tx_tstamp_cycles;
6878         uint64_t ns;
6879
6880         tsync_txctl = IXGBE_READ_REG(hw, IXGBE_TSYNCTXCTL);
6881         if ((tsync_txctl & IXGBE_TSYNCTXCTL_VALID) == 0)
6882                 return -EINVAL;
6883
6884         tx_tstamp_cycles = ixgbe_read_tx_tstamp_cyclecounter(dev);
6885         ns = rte_timecounter_update(&adapter->tx_tstamp_tc, tx_tstamp_cycles);
6886         *timestamp = rte_ns_to_timespec(ns);
6887
6888         return 0;
6889 }
6890
6891 static int
6892 ixgbe_get_reg_length(struct rte_eth_dev *dev)
6893 {
6894         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
6895         int count = 0;
6896         int g_ind = 0;
6897         const struct reg_info *reg_group;
6898         const struct reg_info **reg_set = (hw->mac.type == ixgbe_mac_82598EB) ?
6899                                     ixgbe_regs_mac_82598EB : ixgbe_regs_others;
6900
6901         while ((reg_group = reg_set[g_ind++]))
6902                 count += ixgbe_regs_group_count(reg_group);
6903
6904         return count;
6905 }
6906
6907 static int
6908 ixgbevf_get_reg_length(struct rte_eth_dev *dev __rte_unused)
6909 {
6910         int count = 0;
6911         int g_ind = 0;
6912         const struct reg_info *reg_group;
6913
6914         while ((reg_group = ixgbevf_regs[g_ind++]))
6915                 count += ixgbe_regs_group_count(reg_group);
6916
6917         return count;
6918 }
6919
6920 static int
6921 ixgbe_get_regs(struct rte_eth_dev *dev,
6922               struct rte_dev_reg_info *regs)
6923 {
6924         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
6925         uint32_t *data = regs->data;
6926         int g_ind = 0;
6927         int count = 0;
6928         const struct reg_info *reg_group;
6929         const struct reg_info **reg_set = (hw->mac.type == ixgbe_mac_82598EB) ?
6930                                     ixgbe_regs_mac_82598EB : ixgbe_regs_others;
6931
6932         if (data == NULL) {
6933                 regs->length = ixgbe_get_reg_length(dev);
6934                 regs->width = sizeof(uint32_t);
6935                 return 0;
6936         }
6937
6938         /* Support only full register dump */
6939         if ((regs->length == 0) ||
6940             (regs->length == (uint32_t)ixgbe_get_reg_length(dev))) {
6941                 regs->version = hw->mac.type << 24 | hw->revision_id << 16 |
6942                         hw->device_id;
6943                 while ((reg_group = reg_set[g_ind++]))
6944                         count += ixgbe_read_regs_group(dev, &data[count],
6945                                 reg_group);
6946                 return 0;
6947         }
6948
6949         return -ENOTSUP;
6950 }
6951
6952 static int
6953 ixgbevf_get_regs(struct rte_eth_dev *dev,
6954                 struct rte_dev_reg_info *regs)
6955 {
6956         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
6957         uint32_t *data = regs->data;
6958         int g_ind = 0;
6959         int count = 0;
6960         const struct reg_info *reg_group;
6961
6962         if (data == NULL) {
6963                 regs->length = ixgbevf_get_reg_length(dev);
6964                 regs->width = sizeof(uint32_t);
6965                 return 0;
6966         }
6967
6968         /* Support only full register dump */
6969         if ((regs->length == 0) ||
6970             (regs->length == (uint32_t)ixgbevf_get_reg_length(dev))) {
6971                 regs->version = hw->mac.type << 24 | hw->revision_id << 16 |
6972                         hw->device_id;
6973                 while ((reg_group = ixgbevf_regs[g_ind++]))
6974                         count += ixgbe_read_regs_group(dev, &data[count],
6975                                                       reg_group);
6976                 return 0;
6977         }
6978
6979         return -ENOTSUP;
6980 }
6981
6982 static int
6983 ixgbe_get_eeprom_length(struct rte_eth_dev *dev)
6984 {
6985         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
6986
6987         /* Return unit is byte count */
6988         return hw->eeprom.word_size * 2;
6989 }
6990
6991 static int
6992 ixgbe_get_eeprom(struct rte_eth_dev *dev,
6993                 struct rte_dev_eeprom_info *in_eeprom)
6994 {
6995         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
6996         struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
6997         uint16_t *data = in_eeprom->data;
6998         int first, length;
6999
7000         first = in_eeprom->offset >> 1;
7001         length = in_eeprom->length >> 1;
7002         if ((first > hw->eeprom.word_size) ||
7003             ((first + length) > hw->eeprom.word_size))
7004                 return -EINVAL;
7005
7006         in_eeprom->magic = hw->vendor_id | (hw->device_id << 16);
7007
7008         return eeprom->ops.read_buffer(hw, first, length, data);
7009 }
7010
7011 static int
7012 ixgbe_set_eeprom(struct rte_eth_dev *dev,
7013                 struct rte_dev_eeprom_info *in_eeprom)
7014 {
7015         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
7016         struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
7017         uint16_t *data = in_eeprom->data;
7018         int first, length;
7019
7020         first = in_eeprom->offset >> 1;
7021         length = in_eeprom->length >> 1;
7022         if ((first > hw->eeprom.word_size) ||
7023             ((first + length) > hw->eeprom.word_size))
7024                 return -EINVAL;
7025
7026         in_eeprom->magic = hw->vendor_id | (hw->device_id << 16);
7027
7028         return eeprom->ops.write_buffer(hw,  first, length, data);
7029 }
7030
7031 uint16_t
7032 ixgbe_reta_size_get(enum ixgbe_mac_type mac_type) {
7033         switch (mac_type) {
7034         case ixgbe_mac_X550:
7035         case ixgbe_mac_X550EM_x:
7036         case ixgbe_mac_X550EM_a:
7037                 return ETH_RSS_RETA_SIZE_512;
7038         case ixgbe_mac_X550_vf:
7039         case ixgbe_mac_X550EM_x_vf:
7040         case ixgbe_mac_X550EM_a_vf:
7041                 return ETH_RSS_RETA_SIZE_64;
7042         default:
7043                 return ETH_RSS_RETA_SIZE_128;
7044         }
7045 }
7046
7047 uint32_t
7048 ixgbe_reta_reg_get(enum ixgbe_mac_type mac_type, uint16_t reta_idx) {
7049         switch (mac_type) {
7050         case ixgbe_mac_X550:
7051         case ixgbe_mac_X550EM_x:
7052         case ixgbe_mac_X550EM_a:
7053                 if (reta_idx < ETH_RSS_RETA_SIZE_128)
7054                         return IXGBE_RETA(reta_idx >> 2);
7055                 else
7056                         return IXGBE_ERETA((reta_idx - ETH_RSS_RETA_SIZE_128) >> 2);
7057         case ixgbe_mac_X550_vf:
7058         case ixgbe_mac_X550EM_x_vf:
7059         case ixgbe_mac_X550EM_a_vf:
7060                 return IXGBE_VFRETA(reta_idx >> 2);
7061         default:
7062                 return IXGBE_RETA(reta_idx >> 2);
7063         }
7064 }
7065
7066 uint32_t
7067 ixgbe_mrqc_reg_get(enum ixgbe_mac_type mac_type) {
7068         switch (mac_type) {
7069         case ixgbe_mac_X550_vf:
7070         case ixgbe_mac_X550EM_x_vf:
7071         case ixgbe_mac_X550EM_a_vf:
7072                 return IXGBE_VFMRQC;
7073         default:
7074                 return IXGBE_MRQC;
7075         }
7076 }
7077
7078 uint32_t
7079 ixgbe_rssrk_reg_get(enum ixgbe_mac_type mac_type, uint8_t i) {
7080         switch (mac_type) {
7081         case ixgbe_mac_X550_vf:
7082         case ixgbe_mac_X550EM_x_vf:
7083         case ixgbe_mac_X550EM_a_vf:
7084                 return IXGBE_VFRSSRK(i);
7085         default:
7086                 return IXGBE_RSSRK(i);
7087         }
7088 }
7089
7090 bool
7091 ixgbe_rss_update_sp(enum ixgbe_mac_type mac_type) {
7092         switch (mac_type) {
7093         case ixgbe_mac_82599_vf:
7094         case ixgbe_mac_X540_vf:
7095                 return 0;
7096         default:
7097                 return 1;
7098         }
7099 }
7100
7101 static int
7102 ixgbe_dev_get_dcb_info(struct rte_eth_dev *dev,
7103                         struct rte_eth_dcb_info *dcb_info)
7104 {
7105         struct ixgbe_dcb_config *dcb_config =
7106                         IXGBE_DEV_PRIVATE_TO_DCB_CFG(dev->data->dev_private);
7107         struct ixgbe_dcb_tc_config *tc;
7108         uint8_t i, j;
7109
7110         if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_DCB_FLAG)
7111                 dcb_info->nb_tcs = dcb_config->num_tcs.pg_tcs;
7112         else
7113                 dcb_info->nb_tcs = 1;
7114
7115         if (dcb_config->vt_mode) { /* vt is enabled*/
7116                 struct rte_eth_vmdq_dcb_conf *vmdq_rx_conf =
7117                                 &dev->data->dev_conf.rx_adv_conf.vmdq_dcb_conf;
7118                 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++)
7119                         dcb_info->prio_tc[i] = vmdq_rx_conf->dcb_tc[i];
7120                 for (i = 0; i < vmdq_rx_conf->nb_queue_pools; i++) {
7121                         for (j = 0; j < dcb_info->nb_tcs; j++) {
7122                                 dcb_info->tc_queue.tc_rxq[i][j].base =
7123                                                 i * dcb_info->nb_tcs + j;
7124                                 dcb_info->tc_queue.tc_rxq[i][j].nb_queue = 1;
7125                                 dcb_info->tc_queue.tc_txq[i][j].base =
7126                                                 i * dcb_info->nb_tcs + j;
7127                                 dcb_info->tc_queue.tc_txq[i][j].nb_queue = 1;
7128                         }
7129                 }
7130         } else { /* vt is disabled*/
7131                 struct rte_eth_dcb_rx_conf *rx_conf =
7132                                 &dev->data->dev_conf.rx_adv_conf.dcb_rx_conf;
7133                 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++)
7134                         dcb_info->prio_tc[i] = rx_conf->dcb_tc[i];
7135                 if (dcb_info->nb_tcs == ETH_4_TCS) {
7136                         for (i = 0; i < dcb_info->nb_tcs; i++) {
7137                                 dcb_info->tc_queue.tc_rxq[0][i].base = i * 32;
7138                                 dcb_info->tc_queue.tc_rxq[0][i].nb_queue = 16;
7139                         }
7140                         dcb_info->tc_queue.tc_txq[0][0].base = 0;
7141                         dcb_info->tc_queue.tc_txq[0][1].base = 64;
7142                         dcb_info->tc_queue.tc_txq[0][2].base = 96;
7143                         dcb_info->tc_queue.tc_txq[0][3].base = 112;
7144                         dcb_info->tc_queue.tc_txq[0][0].nb_queue = 64;
7145                         dcb_info->tc_queue.tc_txq[0][1].nb_queue = 32;
7146                         dcb_info->tc_queue.tc_txq[0][2].nb_queue = 16;
7147                         dcb_info->tc_queue.tc_txq[0][3].nb_queue = 16;
7148                 } else if (dcb_info->nb_tcs == ETH_8_TCS) {
7149                         for (i = 0; i < dcb_info->nb_tcs; i++) {
7150                                 dcb_info->tc_queue.tc_rxq[0][i].base = i * 16;
7151                                 dcb_info->tc_queue.tc_rxq[0][i].nb_queue = 16;
7152                         }
7153                         dcb_info->tc_queue.tc_txq[0][0].base = 0;
7154                         dcb_info->tc_queue.tc_txq[0][1].base = 32;
7155                         dcb_info->tc_queue.tc_txq[0][2].base = 64;
7156                         dcb_info->tc_queue.tc_txq[0][3].base = 80;
7157                         dcb_info->tc_queue.tc_txq[0][4].base = 96;
7158                         dcb_info->tc_queue.tc_txq[0][5].base = 104;
7159                         dcb_info->tc_queue.tc_txq[0][6].base = 112;
7160                         dcb_info->tc_queue.tc_txq[0][7].base = 120;
7161                         dcb_info->tc_queue.tc_txq[0][0].nb_queue = 32;
7162                         dcb_info->tc_queue.tc_txq[0][1].nb_queue = 32;
7163                         dcb_info->tc_queue.tc_txq[0][2].nb_queue = 16;
7164                         dcb_info->tc_queue.tc_txq[0][3].nb_queue = 16;
7165                         dcb_info->tc_queue.tc_txq[0][4].nb_queue = 8;
7166                         dcb_info->tc_queue.tc_txq[0][5].nb_queue = 8;
7167                         dcb_info->tc_queue.tc_txq[0][6].nb_queue = 8;
7168                         dcb_info->tc_queue.tc_txq[0][7].nb_queue = 8;
7169                 }
7170         }
7171         for (i = 0; i < dcb_info->nb_tcs; i++) {
7172                 tc = &dcb_config->tc_config[i];
7173                 dcb_info->tc_bws[i] = tc->path[IXGBE_DCB_TX_CONFIG].bwg_percent;
7174         }
7175         return 0;
7176 }
7177
7178 /* Update e-tag ether type */
7179 static int
7180 ixgbe_update_e_tag_eth_type(struct ixgbe_hw *hw,
7181                             uint16_t ether_type)
7182 {
7183         uint32_t etag_etype;
7184
7185         if (hw->mac.type != ixgbe_mac_X550 &&
7186             hw->mac.type != ixgbe_mac_X550EM_x &&
7187             hw->mac.type != ixgbe_mac_X550EM_a) {
7188                 return -ENOTSUP;
7189         }
7190
7191         etag_etype = IXGBE_READ_REG(hw, IXGBE_ETAG_ETYPE);
7192         etag_etype &= ~IXGBE_ETAG_ETYPE_MASK;
7193         etag_etype |= ether_type;
7194         IXGBE_WRITE_REG(hw, IXGBE_ETAG_ETYPE, etag_etype);
7195         IXGBE_WRITE_FLUSH(hw);
7196
7197         return 0;
7198 }
7199
7200 /* Config l2 tunnel ether type */
7201 static int
7202 ixgbe_dev_l2_tunnel_eth_type_conf(struct rte_eth_dev *dev,
7203                                   struct rte_eth_l2_tunnel_conf *l2_tunnel)
7204 {
7205         int ret = 0;
7206         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
7207         struct ixgbe_l2_tn_info *l2_tn_info =
7208                 IXGBE_DEV_PRIVATE_TO_L2_TN_INFO(dev->data->dev_private);
7209
7210         if (l2_tunnel == NULL)
7211                 return -EINVAL;
7212
7213         switch (l2_tunnel->l2_tunnel_type) {
7214         case RTE_L2_TUNNEL_TYPE_E_TAG:
7215                 l2_tn_info->e_tag_ether_type = l2_tunnel->ether_type;
7216                 ret = ixgbe_update_e_tag_eth_type(hw, l2_tunnel->ether_type);
7217                 break;
7218         default:
7219                 PMD_DRV_LOG(ERR, "Invalid tunnel type");
7220                 ret = -EINVAL;
7221                 break;
7222         }
7223
7224         return ret;
7225 }
7226
7227 /* Enable e-tag tunnel */
7228 static int
7229 ixgbe_e_tag_enable(struct ixgbe_hw *hw)
7230 {
7231         uint32_t etag_etype;
7232
7233         if (hw->mac.type != ixgbe_mac_X550 &&
7234             hw->mac.type != ixgbe_mac_X550EM_x &&
7235             hw->mac.type != ixgbe_mac_X550EM_a) {
7236                 return -ENOTSUP;
7237         }
7238
7239         etag_etype = IXGBE_READ_REG(hw, IXGBE_ETAG_ETYPE);
7240         etag_etype |= IXGBE_ETAG_ETYPE_VALID;
7241         IXGBE_WRITE_REG(hw, IXGBE_ETAG_ETYPE, etag_etype);
7242         IXGBE_WRITE_FLUSH(hw);
7243
7244         return 0;
7245 }
7246
7247 /* Enable l2 tunnel */
7248 static int
7249 ixgbe_dev_l2_tunnel_enable(struct rte_eth_dev *dev,
7250                            enum rte_eth_tunnel_type l2_tunnel_type)
7251 {
7252         int ret = 0;
7253         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
7254         struct ixgbe_l2_tn_info *l2_tn_info =
7255                 IXGBE_DEV_PRIVATE_TO_L2_TN_INFO(dev->data->dev_private);
7256
7257         switch (l2_tunnel_type) {
7258         case RTE_L2_TUNNEL_TYPE_E_TAG:
7259                 l2_tn_info->e_tag_en = TRUE;
7260                 ret = ixgbe_e_tag_enable(hw);
7261                 break;
7262         default:
7263                 PMD_DRV_LOG(ERR, "Invalid tunnel type");
7264                 ret = -EINVAL;
7265                 break;
7266         }
7267
7268         return ret;
7269 }
7270
7271 /* Disable e-tag tunnel */
7272 static int
7273 ixgbe_e_tag_disable(struct ixgbe_hw *hw)
7274 {
7275         uint32_t etag_etype;
7276
7277         if (hw->mac.type != ixgbe_mac_X550 &&
7278             hw->mac.type != ixgbe_mac_X550EM_x &&
7279             hw->mac.type != ixgbe_mac_X550EM_a) {
7280                 return -ENOTSUP;
7281         }
7282
7283         etag_etype = IXGBE_READ_REG(hw, IXGBE_ETAG_ETYPE);
7284         etag_etype &= ~IXGBE_ETAG_ETYPE_VALID;
7285         IXGBE_WRITE_REG(hw, IXGBE_ETAG_ETYPE, etag_etype);
7286         IXGBE_WRITE_FLUSH(hw);
7287
7288         return 0;
7289 }
7290
7291 /* Disable l2 tunnel */
7292 static int
7293 ixgbe_dev_l2_tunnel_disable(struct rte_eth_dev *dev,
7294                             enum rte_eth_tunnel_type l2_tunnel_type)
7295 {
7296         int ret = 0;
7297         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
7298         struct ixgbe_l2_tn_info *l2_tn_info =
7299                 IXGBE_DEV_PRIVATE_TO_L2_TN_INFO(dev->data->dev_private);
7300
7301         switch (l2_tunnel_type) {
7302         case RTE_L2_TUNNEL_TYPE_E_TAG:
7303                 l2_tn_info->e_tag_en = FALSE;
7304                 ret = ixgbe_e_tag_disable(hw);
7305                 break;
7306         default:
7307                 PMD_DRV_LOG(ERR, "Invalid tunnel type");
7308                 ret = -EINVAL;
7309                 break;
7310         }
7311
7312         return ret;
7313 }
7314
7315 static int
7316 ixgbe_e_tag_filter_del(struct rte_eth_dev *dev,
7317                        struct rte_eth_l2_tunnel_conf *l2_tunnel)
7318 {
7319         int ret = 0;
7320         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
7321         uint32_t i, rar_entries;
7322         uint32_t rar_low, rar_high;
7323
7324         if (hw->mac.type != ixgbe_mac_X550 &&
7325             hw->mac.type != ixgbe_mac_X550EM_x &&
7326             hw->mac.type != ixgbe_mac_X550EM_a) {
7327                 return -ENOTSUP;
7328         }
7329
7330         rar_entries = ixgbe_get_num_rx_addrs(hw);
7331
7332         for (i = 1; i < rar_entries; i++) {
7333                 rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(i));
7334                 rar_low  = IXGBE_READ_REG(hw, IXGBE_RAL(i));
7335                 if ((rar_high & IXGBE_RAH_AV) &&
7336                     (rar_high & IXGBE_RAH_ADTYPE) &&
7337                     ((rar_low & IXGBE_RAL_ETAG_FILTER_MASK) ==
7338                      l2_tunnel->tunnel_id)) {
7339                         IXGBE_WRITE_REG(hw, IXGBE_RAL(i), 0);
7340                         IXGBE_WRITE_REG(hw, IXGBE_RAH(i), 0);
7341
7342                         ixgbe_clear_vmdq(hw, i, IXGBE_CLEAR_VMDQ_ALL);
7343
7344                         return ret;
7345                 }
7346         }
7347
7348         return ret;
7349 }
7350
7351 static int
7352 ixgbe_e_tag_filter_add(struct rte_eth_dev *dev,
7353                        struct rte_eth_l2_tunnel_conf *l2_tunnel)
7354 {
7355         int ret = 0;
7356         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
7357         uint32_t i, rar_entries;
7358         uint32_t rar_low, rar_high;
7359
7360         if (hw->mac.type != ixgbe_mac_X550 &&
7361             hw->mac.type != ixgbe_mac_X550EM_x &&
7362             hw->mac.type != ixgbe_mac_X550EM_a) {
7363                 return -ENOTSUP;
7364         }
7365
7366         /* One entry for one tunnel. Try to remove potential existing entry. */
7367         ixgbe_e_tag_filter_del(dev, l2_tunnel);
7368
7369         rar_entries = ixgbe_get_num_rx_addrs(hw);
7370
7371         for (i = 1; i < rar_entries; i++) {
7372                 rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(i));
7373                 if (rar_high & IXGBE_RAH_AV) {
7374                         continue;
7375                 } else {
7376                         ixgbe_set_vmdq(hw, i, l2_tunnel->pool);
7377                         rar_high = IXGBE_RAH_AV | IXGBE_RAH_ADTYPE;
7378                         rar_low = l2_tunnel->tunnel_id;
7379
7380                         IXGBE_WRITE_REG(hw, IXGBE_RAL(i), rar_low);
7381                         IXGBE_WRITE_REG(hw, IXGBE_RAH(i), rar_high);
7382
7383                         return ret;
7384                 }
7385         }
7386
7387         PMD_INIT_LOG(NOTICE, "The table of E-tag forwarding rule is full."
7388                      " Please remove a rule before adding a new one.");
7389         return -EINVAL;
7390 }
7391
7392 static inline struct ixgbe_l2_tn_filter *
7393 ixgbe_l2_tn_filter_lookup(struct ixgbe_l2_tn_info *l2_tn_info,
7394                           struct ixgbe_l2_tn_key *key)
7395 {
7396         int ret;
7397
7398         ret = rte_hash_lookup(l2_tn_info->hash_handle, (const void *)key);
7399         if (ret < 0)
7400                 return NULL;
7401
7402         return l2_tn_info->hash_map[ret];
7403 }
7404
7405 static inline int
7406 ixgbe_insert_l2_tn_filter(struct ixgbe_l2_tn_info *l2_tn_info,
7407                           struct ixgbe_l2_tn_filter *l2_tn_filter)
7408 {
7409         int ret;
7410
7411         ret = rte_hash_add_key(l2_tn_info->hash_handle,
7412                                &l2_tn_filter->key);
7413
7414         if (ret < 0) {
7415                 PMD_DRV_LOG(ERR,
7416                             "Failed to insert L2 tunnel filter"
7417                             " to hash table %d!",
7418                             ret);
7419                 return ret;
7420         }
7421
7422         l2_tn_info->hash_map[ret] = l2_tn_filter;
7423
7424         TAILQ_INSERT_TAIL(&l2_tn_info->l2_tn_list, l2_tn_filter, entries);
7425
7426         return 0;
7427 }
7428
7429 static inline int
7430 ixgbe_remove_l2_tn_filter(struct ixgbe_l2_tn_info *l2_tn_info,
7431                           struct ixgbe_l2_tn_key *key)
7432 {
7433         int ret;
7434         struct ixgbe_l2_tn_filter *l2_tn_filter;
7435
7436         ret = rte_hash_del_key(l2_tn_info->hash_handle, key);
7437
7438         if (ret < 0) {
7439                 PMD_DRV_LOG(ERR,
7440                             "No such L2 tunnel filter to delete %d!",
7441                             ret);
7442                 return ret;
7443         }
7444
7445         l2_tn_filter = l2_tn_info->hash_map[ret];
7446         l2_tn_info->hash_map[ret] = NULL;
7447
7448         TAILQ_REMOVE(&l2_tn_info->l2_tn_list, l2_tn_filter, entries);
7449         rte_free(l2_tn_filter);
7450
7451         return 0;
7452 }
7453
7454 /* Add l2 tunnel filter */
7455 int
7456 ixgbe_dev_l2_tunnel_filter_add(struct rte_eth_dev *dev,
7457                                struct rte_eth_l2_tunnel_conf *l2_tunnel,
7458                                bool restore)
7459 {
7460         int ret;
7461         struct ixgbe_l2_tn_info *l2_tn_info =
7462                 IXGBE_DEV_PRIVATE_TO_L2_TN_INFO(dev->data->dev_private);
7463         struct ixgbe_l2_tn_key key;
7464         struct ixgbe_l2_tn_filter *node;
7465
7466         if (!restore) {
7467                 key.l2_tn_type = l2_tunnel->l2_tunnel_type;
7468                 key.tn_id = l2_tunnel->tunnel_id;
7469
7470                 node = ixgbe_l2_tn_filter_lookup(l2_tn_info, &key);
7471
7472                 if (node) {
7473                         PMD_DRV_LOG(ERR,
7474                                     "The L2 tunnel filter already exists!");
7475                         return -EINVAL;
7476                 }
7477
7478                 node = rte_zmalloc("ixgbe_l2_tn",
7479                                    sizeof(struct ixgbe_l2_tn_filter),
7480                                    0);
7481                 if (!node)
7482                         return -ENOMEM;
7483
7484                 (void)rte_memcpy(&node->key,
7485                                  &key,
7486                                  sizeof(struct ixgbe_l2_tn_key));
7487                 node->pool = l2_tunnel->pool;
7488                 ret = ixgbe_insert_l2_tn_filter(l2_tn_info, node);
7489                 if (ret < 0) {
7490                         rte_free(node);
7491                         return ret;
7492                 }
7493         }
7494
7495         switch (l2_tunnel->l2_tunnel_type) {
7496         case RTE_L2_TUNNEL_TYPE_E_TAG:
7497                 ret = ixgbe_e_tag_filter_add(dev, l2_tunnel);
7498                 break;
7499         default:
7500                 PMD_DRV_LOG(ERR, "Invalid tunnel type");
7501                 ret = -EINVAL;
7502                 break;
7503         }
7504
7505         if ((!restore) && (ret < 0))
7506                 (void)ixgbe_remove_l2_tn_filter(l2_tn_info, &key);
7507
7508         return ret;
7509 }
7510
7511 /* Delete l2 tunnel filter */
7512 int
7513 ixgbe_dev_l2_tunnel_filter_del(struct rte_eth_dev *dev,
7514                                struct rte_eth_l2_tunnel_conf *l2_tunnel)
7515 {
7516         int ret;
7517         struct ixgbe_l2_tn_info *l2_tn_info =
7518                 IXGBE_DEV_PRIVATE_TO_L2_TN_INFO(dev->data->dev_private);
7519         struct ixgbe_l2_tn_key key;
7520
7521         key.l2_tn_type = l2_tunnel->l2_tunnel_type;
7522         key.tn_id = l2_tunnel->tunnel_id;
7523         ret = ixgbe_remove_l2_tn_filter(l2_tn_info, &key);
7524         if (ret < 0)
7525                 return ret;
7526
7527         switch (l2_tunnel->l2_tunnel_type) {
7528         case RTE_L2_TUNNEL_TYPE_E_TAG:
7529                 ret = ixgbe_e_tag_filter_del(dev, l2_tunnel);
7530                 break;
7531         default:
7532                 PMD_DRV_LOG(ERR, "Invalid tunnel type");
7533                 ret = -EINVAL;
7534                 break;
7535         }
7536
7537         return ret;
7538 }
7539
7540 /**
7541  * ixgbe_dev_l2_tunnel_filter_handle - Handle operations for l2 tunnel filter.
7542  * @dev: pointer to rte_eth_dev structure
7543  * @filter_op:operation will be taken.
7544  * @arg: a pointer to specific structure corresponding to the filter_op
7545  */
7546 static int
7547 ixgbe_dev_l2_tunnel_filter_handle(struct rte_eth_dev *dev,
7548                                   enum rte_filter_op filter_op,
7549                                   void *arg)
7550 {
7551         int ret;
7552
7553         if (filter_op == RTE_ETH_FILTER_NOP)
7554                 return 0;
7555
7556         if (arg == NULL) {
7557                 PMD_DRV_LOG(ERR, "arg shouldn't be NULL for operation %u.",
7558                             filter_op);
7559                 return -EINVAL;
7560         }
7561
7562         switch (filter_op) {
7563         case RTE_ETH_FILTER_ADD:
7564                 ret = ixgbe_dev_l2_tunnel_filter_add
7565                         (dev,
7566                          (struct rte_eth_l2_tunnel_conf *)arg,
7567                          FALSE);
7568                 break;
7569         case RTE_ETH_FILTER_DELETE:
7570                 ret = ixgbe_dev_l2_tunnel_filter_del
7571                         (dev,
7572                          (struct rte_eth_l2_tunnel_conf *)arg);
7573                 break;
7574         default:
7575                 PMD_DRV_LOG(ERR, "unsupported operation %u.", filter_op);
7576                 ret = -EINVAL;
7577                 break;
7578         }
7579         return ret;
7580 }
7581
7582 static int
7583 ixgbe_e_tag_forwarding_en_dis(struct rte_eth_dev *dev, bool en)
7584 {
7585         int ret = 0;
7586         uint32_t ctrl;
7587         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
7588
7589         if (hw->mac.type != ixgbe_mac_X550 &&
7590             hw->mac.type != ixgbe_mac_X550EM_x &&
7591             hw->mac.type != ixgbe_mac_X550EM_a) {
7592                 return -ENOTSUP;
7593         }
7594
7595         ctrl = IXGBE_READ_REG(hw, IXGBE_VT_CTL);
7596         ctrl &= ~IXGBE_VT_CTL_POOLING_MODE_MASK;
7597         if (en)
7598                 ctrl |= IXGBE_VT_CTL_POOLING_MODE_ETAG;
7599         IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, ctrl);
7600
7601         return ret;
7602 }
7603
7604 /* Enable l2 tunnel forwarding */
7605 static int
7606 ixgbe_dev_l2_tunnel_forwarding_enable
7607         (struct rte_eth_dev *dev,
7608          enum rte_eth_tunnel_type l2_tunnel_type)
7609 {
7610         struct ixgbe_l2_tn_info *l2_tn_info =
7611                 IXGBE_DEV_PRIVATE_TO_L2_TN_INFO(dev->data->dev_private);
7612         int ret = 0;
7613
7614         switch (l2_tunnel_type) {
7615         case RTE_L2_TUNNEL_TYPE_E_TAG:
7616                 l2_tn_info->e_tag_fwd_en = TRUE;
7617                 ret = ixgbe_e_tag_forwarding_en_dis(dev, 1);
7618                 break;
7619         default:
7620                 PMD_DRV_LOG(ERR, "Invalid tunnel type");
7621                 ret = -EINVAL;
7622                 break;
7623         }
7624
7625         return ret;
7626 }
7627
7628 /* Disable l2 tunnel forwarding */
7629 static int
7630 ixgbe_dev_l2_tunnel_forwarding_disable
7631         (struct rte_eth_dev *dev,
7632          enum rte_eth_tunnel_type l2_tunnel_type)
7633 {
7634         struct ixgbe_l2_tn_info *l2_tn_info =
7635                 IXGBE_DEV_PRIVATE_TO_L2_TN_INFO(dev->data->dev_private);
7636         int ret = 0;
7637
7638         switch (l2_tunnel_type) {
7639         case RTE_L2_TUNNEL_TYPE_E_TAG:
7640                 l2_tn_info->e_tag_fwd_en = FALSE;
7641                 ret = ixgbe_e_tag_forwarding_en_dis(dev, 0);
7642                 break;
7643         default:
7644                 PMD_DRV_LOG(ERR, "Invalid tunnel type");
7645                 ret = -EINVAL;
7646                 break;
7647         }
7648
7649         return ret;
7650 }
7651
7652 static int
7653 ixgbe_e_tag_insertion_en_dis(struct rte_eth_dev *dev,
7654                              struct rte_eth_l2_tunnel_conf *l2_tunnel,
7655                              bool en)
7656 {
7657         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
7658         int ret = 0;
7659         uint32_t vmtir, vmvir;
7660         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
7661
7662         if (l2_tunnel->vf_id >= pci_dev->max_vfs) {
7663                 PMD_DRV_LOG(ERR,
7664                             "VF id %u should be less than %u",
7665                             l2_tunnel->vf_id,
7666                             pci_dev->max_vfs);
7667                 return -EINVAL;
7668         }
7669
7670         if (hw->mac.type != ixgbe_mac_X550 &&
7671             hw->mac.type != ixgbe_mac_X550EM_x &&
7672             hw->mac.type != ixgbe_mac_X550EM_a) {
7673                 return -ENOTSUP;
7674         }
7675
7676         if (en)
7677                 vmtir = l2_tunnel->tunnel_id;
7678         else
7679                 vmtir = 0;
7680
7681         IXGBE_WRITE_REG(hw, IXGBE_VMTIR(l2_tunnel->vf_id), vmtir);
7682
7683         vmvir = IXGBE_READ_REG(hw, IXGBE_VMVIR(l2_tunnel->vf_id));
7684         vmvir &= ~IXGBE_VMVIR_TAGA_MASK;
7685         if (en)
7686                 vmvir |= IXGBE_VMVIR_TAGA_ETAG_INSERT;
7687         IXGBE_WRITE_REG(hw, IXGBE_VMVIR(l2_tunnel->vf_id), vmvir);
7688
7689         return ret;
7690 }
7691
7692 /* Enable l2 tunnel tag insertion */
7693 static int
7694 ixgbe_dev_l2_tunnel_insertion_enable(struct rte_eth_dev *dev,
7695                                      struct rte_eth_l2_tunnel_conf *l2_tunnel)
7696 {
7697         int ret = 0;
7698
7699         switch (l2_tunnel->l2_tunnel_type) {
7700         case RTE_L2_TUNNEL_TYPE_E_TAG:
7701                 ret = ixgbe_e_tag_insertion_en_dis(dev, l2_tunnel, 1);
7702                 break;
7703         default:
7704                 PMD_DRV_LOG(ERR, "Invalid tunnel type");
7705                 ret = -EINVAL;
7706                 break;
7707         }
7708
7709         return ret;
7710 }
7711
7712 /* Disable l2 tunnel tag insertion */
7713 static int
7714 ixgbe_dev_l2_tunnel_insertion_disable
7715         (struct rte_eth_dev *dev,
7716          struct rte_eth_l2_tunnel_conf *l2_tunnel)
7717 {
7718         int ret = 0;
7719
7720         switch (l2_tunnel->l2_tunnel_type) {
7721         case RTE_L2_TUNNEL_TYPE_E_TAG:
7722                 ret = ixgbe_e_tag_insertion_en_dis(dev, l2_tunnel, 0);
7723                 break;
7724         default:
7725                 PMD_DRV_LOG(ERR, "Invalid tunnel type");
7726                 ret = -EINVAL;
7727                 break;
7728         }
7729
7730         return ret;
7731 }
7732
7733 static int
7734 ixgbe_e_tag_stripping_en_dis(struct rte_eth_dev *dev,
7735                              bool en)
7736 {
7737         int ret = 0;
7738         uint32_t qde;
7739         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
7740
7741         if (hw->mac.type != ixgbe_mac_X550 &&
7742             hw->mac.type != ixgbe_mac_X550EM_x &&
7743             hw->mac.type != ixgbe_mac_X550EM_a) {
7744                 return -ENOTSUP;
7745         }
7746
7747         qde = IXGBE_READ_REG(hw, IXGBE_QDE);
7748         if (en)
7749                 qde |= IXGBE_QDE_STRIP_TAG;
7750         else
7751                 qde &= ~IXGBE_QDE_STRIP_TAG;
7752         qde &= ~IXGBE_QDE_READ;
7753         qde |= IXGBE_QDE_WRITE;
7754         IXGBE_WRITE_REG(hw, IXGBE_QDE, qde);
7755
7756         return ret;
7757 }
7758
7759 /* Enable l2 tunnel tag stripping */
7760 static int
7761 ixgbe_dev_l2_tunnel_stripping_enable
7762         (struct rte_eth_dev *dev,
7763          enum rte_eth_tunnel_type l2_tunnel_type)
7764 {
7765         int ret = 0;
7766
7767         switch (l2_tunnel_type) {
7768         case RTE_L2_TUNNEL_TYPE_E_TAG:
7769                 ret = ixgbe_e_tag_stripping_en_dis(dev, 1);
7770                 break;
7771         default:
7772                 PMD_DRV_LOG(ERR, "Invalid tunnel type");
7773                 ret = -EINVAL;
7774                 break;
7775         }
7776
7777         return ret;
7778 }
7779
7780 /* Disable l2 tunnel tag stripping */
7781 static int
7782 ixgbe_dev_l2_tunnel_stripping_disable
7783         (struct rte_eth_dev *dev,
7784          enum rte_eth_tunnel_type l2_tunnel_type)
7785 {
7786         int ret = 0;
7787
7788         switch (l2_tunnel_type) {
7789         case RTE_L2_TUNNEL_TYPE_E_TAG:
7790                 ret = ixgbe_e_tag_stripping_en_dis(dev, 0);
7791                 break;
7792         default:
7793                 PMD_DRV_LOG(ERR, "Invalid tunnel type");
7794                 ret = -EINVAL;
7795                 break;
7796         }
7797
7798         return ret;
7799 }
7800
7801 /* Enable/disable l2 tunnel offload functions */
7802 static int
7803 ixgbe_dev_l2_tunnel_offload_set
7804         (struct rte_eth_dev *dev,
7805          struct rte_eth_l2_tunnel_conf *l2_tunnel,
7806          uint32_t mask,
7807          uint8_t en)
7808 {
7809         int ret = 0;
7810
7811         if (l2_tunnel == NULL)
7812                 return -EINVAL;
7813
7814         ret = -EINVAL;
7815         if (mask & ETH_L2_TUNNEL_ENABLE_MASK) {
7816                 if (en)
7817                         ret = ixgbe_dev_l2_tunnel_enable(
7818                                 dev,
7819                                 l2_tunnel->l2_tunnel_type);
7820                 else
7821                         ret = ixgbe_dev_l2_tunnel_disable(
7822                                 dev,
7823                                 l2_tunnel->l2_tunnel_type);
7824         }
7825
7826         if (mask & ETH_L2_TUNNEL_INSERTION_MASK) {
7827                 if (en)
7828                         ret = ixgbe_dev_l2_tunnel_insertion_enable(
7829                                 dev,
7830                                 l2_tunnel);
7831                 else
7832                         ret = ixgbe_dev_l2_tunnel_insertion_disable(
7833                                 dev,
7834                                 l2_tunnel);
7835         }
7836
7837         if (mask & ETH_L2_TUNNEL_STRIPPING_MASK) {
7838                 if (en)
7839                         ret = ixgbe_dev_l2_tunnel_stripping_enable(
7840                                 dev,
7841                                 l2_tunnel->l2_tunnel_type);
7842                 else
7843                         ret = ixgbe_dev_l2_tunnel_stripping_disable(
7844                                 dev,
7845                                 l2_tunnel->l2_tunnel_type);
7846         }
7847
7848         if (mask & ETH_L2_TUNNEL_FORWARDING_MASK) {
7849                 if (en)
7850                         ret = ixgbe_dev_l2_tunnel_forwarding_enable(
7851                                 dev,
7852                                 l2_tunnel->l2_tunnel_type);
7853                 else
7854                         ret = ixgbe_dev_l2_tunnel_forwarding_disable(
7855                                 dev,
7856                                 l2_tunnel->l2_tunnel_type);
7857         }
7858
7859         return ret;
7860 }
7861
7862 static int
7863 ixgbe_update_vxlan_port(struct ixgbe_hw *hw,
7864                         uint16_t port)
7865 {
7866         IXGBE_WRITE_REG(hw, IXGBE_VXLANCTRL, port);
7867         IXGBE_WRITE_FLUSH(hw);
7868
7869         return 0;
7870 }
7871
7872 /* There's only one register for VxLAN UDP port.
7873  * So, we cannot add several ports. Will update it.
7874  */
7875 static int
7876 ixgbe_add_vxlan_port(struct ixgbe_hw *hw,
7877                      uint16_t port)
7878 {
7879         if (port == 0) {
7880                 PMD_DRV_LOG(ERR, "Add VxLAN port 0 is not allowed.");
7881                 return -EINVAL;
7882         }
7883
7884         return ixgbe_update_vxlan_port(hw, port);
7885 }
7886
7887 /* We cannot delete the VxLAN port. For there's a register for VxLAN
7888  * UDP port, it must have a value.
7889  * So, will reset it to the original value 0.
7890  */
7891 static int
7892 ixgbe_del_vxlan_port(struct ixgbe_hw *hw,
7893                      uint16_t port)
7894 {
7895         uint16_t cur_port;
7896
7897         cur_port = (uint16_t)IXGBE_READ_REG(hw, IXGBE_VXLANCTRL);
7898
7899         if (cur_port != port) {
7900                 PMD_DRV_LOG(ERR, "Port %u does not exist.", port);
7901                 return -EINVAL;
7902         }
7903
7904         return ixgbe_update_vxlan_port(hw, 0);
7905 }
7906
7907 /* Add UDP tunneling port */
7908 static int
7909 ixgbe_dev_udp_tunnel_port_add(struct rte_eth_dev *dev,
7910                               struct rte_eth_udp_tunnel *udp_tunnel)
7911 {
7912         int ret = 0;
7913         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
7914
7915         if (hw->mac.type != ixgbe_mac_X550 &&
7916             hw->mac.type != ixgbe_mac_X550EM_x &&
7917             hw->mac.type != ixgbe_mac_X550EM_a) {
7918                 return -ENOTSUP;
7919         }
7920
7921         if (udp_tunnel == NULL)
7922                 return -EINVAL;
7923
7924         switch (udp_tunnel->prot_type) {
7925         case RTE_TUNNEL_TYPE_VXLAN:
7926                 ret = ixgbe_add_vxlan_port(hw, udp_tunnel->udp_port);
7927                 break;
7928
7929         case RTE_TUNNEL_TYPE_GENEVE:
7930         case RTE_TUNNEL_TYPE_TEREDO:
7931                 PMD_DRV_LOG(ERR, "Tunnel type is not supported now.");
7932                 ret = -EINVAL;
7933                 break;
7934
7935         default:
7936                 PMD_DRV_LOG(ERR, "Invalid tunnel type");
7937                 ret = -EINVAL;
7938                 break;
7939         }
7940
7941         return ret;
7942 }
7943
7944 /* Remove UDP tunneling port */
7945 static int
7946 ixgbe_dev_udp_tunnel_port_del(struct rte_eth_dev *dev,
7947                               struct rte_eth_udp_tunnel *udp_tunnel)
7948 {
7949         int ret = 0;
7950         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
7951
7952         if (hw->mac.type != ixgbe_mac_X550 &&
7953             hw->mac.type != ixgbe_mac_X550EM_x &&
7954             hw->mac.type != ixgbe_mac_X550EM_a) {
7955                 return -ENOTSUP;
7956         }
7957
7958         if (udp_tunnel == NULL)
7959                 return -EINVAL;
7960
7961         switch (udp_tunnel->prot_type) {
7962         case RTE_TUNNEL_TYPE_VXLAN:
7963                 ret = ixgbe_del_vxlan_port(hw, udp_tunnel->udp_port);
7964                 break;
7965         case RTE_TUNNEL_TYPE_GENEVE:
7966         case RTE_TUNNEL_TYPE_TEREDO:
7967                 PMD_DRV_LOG(ERR, "Tunnel type is not supported now.");
7968                 ret = -EINVAL;
7969                 break;
7970         default:
7971                 PMD_DRV_LOG(ERR, "Invalid tunnel type");
7972                 ret = -EINVAL;
7973                 break;
7974         }
7975
7976         return ret;
7977 }
7978
7979 static void
7980 ixgbevf_dev_allmulticast_enable(struct rte_eth_dev *dev)
7981 {
7982         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
7983
7984         hw->mac.ops.update_xcast_mode(hw, IXGBEVF_XCAST_MODE_ALLMULTI);
7985 }
7986
7987 static void
7988 ixgbevf_dev_allmulticast_disable(struct rte_eth_dev *dev)
7989 {
7990         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
7991
7992         hw->mac.ops.update_xcast_mode(hw, IXGBEVF_XCAST_MODE_MULTI);
7993 }
7994
7995 static void ixgbevf_mbx_process(struct rte_eth_dev *dev)
7996 {
7997         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
7998         u32 in_msg = 0;
7999
8000         if (ixgbe_read_mbx(hw, &in_msg, 1, 0))
8001                 return;
8002
8003         /* PF reset VF event */
8004         if (in_msg == IXGBE_PF_CONTROL_MSG)
8005                 _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_RESET,
8006                                               NULL, NULL);
8007 }
8008
8009 static int
8010 ixgbevf_dev_interrupt_get_status(struct rte_eth_dev *dev)
8011 {
8012         uint32_t eicr;
8013         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
8014         struct ixgbe_interrupt *intr =
8015                 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
8016         ixgbevf_intr_disable(hw);
8017
8018         /* read-on-clear nic registers here */
8019         eicr = IXGBE_READ_REG(hw, IXGBE_VTEICR);
8020         intr->flags = 0;
8021
8022         /* only one misc vector supported - mailbox */
8023         eicr &= IXGBE_VTEICR_MASK;
8024         if (eicr == IXGBE_MISC_VEC_ID)
8025                 intr->flags |= IXGBE_FLAG_MAILBOX;
8026
8027         return 0;
8028 }
8029
8030 static int
8031 ixgbevf_dev_interrupt_action(struct rte_eth_dev *dev)
8032 {
8033         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
8034         struct ixgbe_interrupt *intr =
8035                 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
8036
8037         if (intr->flags & IXGBE_FLAG_MAILBOX) {
8038                 ixgbevf_mbx_process(dev);
8039                 intr->flags &= ~IXGBE_FLAG_MAILBOX;
8040         }
8041
8042         ixgbevf_intr_enable(hw);
8043
8044         return 0;
8045 }
8046
8047 static void
8048 ixgbevf_dev_interrupt_handler(void *param)
8049 {
8050         struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
8051
8052         ixgbevf_dev_interrupt_get_status(dev);
8053         ixgbevf_dev_interrupt_action(dev);
8054 }
8055
8056 /**
8057  *  ixgbe_disable_sec_tx_path_generic - Stops the transmit data path
8058  *  @hw: pointer to hardware structure
8059  *
8060  *  Stops the transmit data path and waits for the HW to internally empty
8061  *  the Tx security block
8062  **/
8063 int ixgbe_disable_sec_tx_path_generic(struct ixgbe_hw *hw)
8064 {
8065 #define IXGBE_MAX_SECTX_POLL 40
8066
8067         int i;
8068         int sectxreg;
8069
8070         sectxreg = IXGBE_READ_REG(hw, IXGBE_SECTXCTRL);
8071         sectxreg |= IXGBE_SECTXCTRL_TX_DIS;
8072         IXGBE_WRITE_REG(hw, IXGBE_SECTXCTRL, sectxreg);
8073         for (i = 0; i < IXGBE_MAX_SECTX_POLL; i++) {
8074                 sectxreg = IXGBE_READ_REG(hw, IXGBE_SECTXSTAT);
8075                 if (sectxreg & IXGBE_SECTXSTAT_SECTX_RDY)
8076                         break;
8077                 /* Use interrupt-safe sleep just in case */
8078                 usec_delay(1000);
8079         }
8080
8081         /* For informational purposes only */
8082         if (i >= IXGBE_MAX_SECTX_POLL)
8083                 PMD_DRV_LOG(DEBUG, "Tx unit being enabled before security "
8084                          "path fully disabled.  Continuing with init.");
8085
8086         return IXGBE_SUCCESS;
8087 }
8088
8089 /**
8090  *  ixgbe_enable_sec_tx_path_generic - Enables the transmit data path
8091  *  @hw: pointer to hardware structure
8092  *
8093  *  Enables the transmit data path.
8094  **/
8095 int ixgbe_enable_sec_tx_path_generic(struct ixgbe_hw *hw)
8096 {
8097         uint32_t sectxreg;
8098
8099         sectxreg = IXGBE_READ_REG(hw, IXGBE_SECTXCTRL);
8100         sectxreg &= ~IXGBE_SECTXCTRL_TX_DIS;
8101         IXGBE_WRITE_REG(hw, IXGBE_SECTXCTRL, sectxreg);
8102         IXGBE_WRITE_FLUSH(hw);
8103
8104         return IXGBE_SUCCESS;
8105 }
8106
8107 /* restore n-tuple filter */
8108 static inline void
8109 ixgbe_ntuple_filter_restore(struct rte_eth_dev *dev)
8110 {
8111         struct ixgbe_filter_info *filter_info =
8112                 IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
8113         struct ixgbe_5tuple_filter *node;
8114
8115         TAILQ_FOREACH(node, &filter_info->fivetuple_list, entries) {
8116                 ixgbe_inject_5tuple_filter(dev, node);
8117         }
8118 }
8119
8120 /* restore ethernet type filter */
8121 static inline void
8122 ixgbe_ethertype_filter_restore(struct rte_eth_dev *dev)
8123 {
8124         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
8125         struct ixgbe_filter_info *filter_info =
8126                 IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
8127         int i;
8128
8129         for (i = 0; i < IXGBE_MAX_ETQF_FILTERS; i++) {
8130                 if (filter_info->ethertype_mask & (1 << i)) {
8131                         IXGBE_WRITE_REG(hw, IXGBE_ETQF(i),
8132                                         filter_info->ethertype_filters[i].etqf);
8133                         IXGBE_WRITE_REG(hw, IXGBE_ETQS(i),
8134                                         filter_info->ethertype_filters[i].etqs);
8135                         IXGBE_WRITE_FLUSH(hw);
8136                 }
8137         }
8138 }
8139
8140 /* restore SYN filter */
8141 static inline void
8142 ixgbe_syn_filter_restore(struct rte_eth_dev *dev)
8143 {
8144         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
8145         struct ixgbe_filter_info *filter_info =
8146                 IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
8147         uint32_t synqf;
8148
8149         synqf = filter_info->syn_info;
8150
8151         if (synqf & IXGBE_SYN_FILTER_ENABLE) {
8152                 IXGBE_WRITE_REG(hw, IXGBE_SYNQF, synqf);
8153                 IXGBE_WRITE_FLUSH(hw);
8154         }
8155 }
8156
8157 /* restore L2 tunnel filter */
8158 static inline void
8159 ixgbe_l2_tn_filter_restore(struct rte_eth_dev *dev)
8160 {
8161         struct ixgbe_l2_tn_info *l2_tn_info =
8162                 IXGBE_DEV_PRIVATE_TO_L2_TN_INFO(dev->data->dev_private);
8163         struct ixgbe_l2_tn_filter *node;
8164         struct rte_eth_l2_tunnel_conf l2_tn_conf;
8165
8166         TAILQ_FOREACH(node, &l2_tn_info->l2_tn_list, entries) {
8167                 l2_tn_conf.l2_tunnel_type = node->key.l2_tn_type;
8168                 l2_tn_conf.tunnel_id      = node->key.tn_id;
8169                 l2_tn_conf.pool           = node->pool;
8170                 (void)ixgbe_dev_l2_tunnel_filter_add(dev, &l2_tn_conf, TRUE);
8171         }
8172 }
8173
8174 static int
8175 ixgbe_filter_restore(struct rte_eth_dev *dev)
8176 {
8177         ixgbe_ntuple_filter_restore(dev);
8178         ixgbe_ethertype_filter_restore(dev);
8179         ixgbe_syn_filter_restore(dev);
8180         ixgbe_fdir_filter_restore(dev);
8181         ixgbe_l2_tn_filter_restore(dev);
8182
8183         return 0;
8184 }
8185
8186 static void
8187 ixgbe_l2_tunnel_conf(struct rte_eth_dev *dev)
8188 {
8189         struct ixgbe_l2_tn_info *l2_tn_info =
8190                 IXGBE_DEV_PRIVATE_TO_L2_TN_INFO(dev->data->dev_private);
8191         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
8192
8193         if (l2_tn_info->e_tag_en)
8194                 (void)ixgbe_e_tag_enable(hw);
8195
8196         if (l2_tn_info->e_tag_fwd_en)
8197                 (void)ixgbe_e_tag_forwarding_en_dis(dev, 1);
8198
8199         (void)ixgbe_update_e_tag_eth_type(hw, l2_tn_info->e_tag_ether_type);
8200 }
8201
8202 /* remove all the n-tuple filters */
8203 void
8204 ixgbe_clear_all_ntuple_filter(struct rte_eth_dev *dev)
8205 {
8206         struct ixgbe_filter_info *filter_info =
8207                 IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
8208         struct ixgbe_5tuple_filter *p_5tuple;
8209
8210         while ((p_5tuple = TAILQ_FIRST(&filter_info->fivetuple_list)))
8211                 ixgbe_remove_5tuple_filter(dev, p_5tuple);
8212 }
8213
8214 /* remove all the ether type filters */
8215 void
8216 ixgbe_clear_all_ethertype_filter(struct rte_eth_dev *dev)
8217 {
8218         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
8219         struct ixgbe_filter_info *filter_info =
8220                 IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
8221         int i;
8222
8223         for (i = 0; i < IXGBE_MAX_ETQF_FILTERS; i++) {
8224                 if (filter_info->ethertype_mask & (1 << i) &&
8225                     !filter_info->ethertype_filters[i].conf) {
8226                         (void)ixgbe_ethertype_filter_remove(filter_info,
8227                                                             (uint8_t)i);
8228                         IXGBE_WRITE_REG(hw, IXGBE_ETQF(i), 0);
8229                         IXGBE_WRITE_REG(hw, IXGBE_ETQS(i), 0);
8230                         IXGBE_WRITE_FLUSH(hw);
8231                 }
8232         }
8233 }
8234
8235 /* remove the SYN filter */
8236 void
8237 ixgbe_clear_syn_filter(struct rte_eth_dev *dev)
8238 {
8239         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
8240         struct ixgbe_filter_info *filter_info =
8241                 IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
8242
8243         if (filter_info->syn_info & IXGBE_SYN_FILTER_ENABLE) {
8244                 filter_info->syn_info = 0;
8245
8246                 IXGBE_WRITE_REG(hw, IXGBE_SYNQF, 0);
8247                 IXGBE_WRITE_FLUSH(hw);
8248         }
8249 }
8250
8251 /* remove all the L2 tunnel filters */
8252 int
8253 ixgbe_clear_all_l2_tn_filter(struct rte_eth_dev *dev)
8254 {
8255         struct ixgbe_l2_tn_info *l2_tn_info =
8256                 IXGBE_DEV_PRIVATE_TO_L2_TN_INFO(dev->data->dev_private);
8257         struct ixgbe_l2_tn_filter *l2_tn_filter;
8258         struct rte_eth_l2_tunnel_conf l2_tn_conf;
8259         int ret = 0;
8260
8261         while ((l2_tn_filter = TAILQ_FIRST(&l2_tn_info->l2_tn_list))) {
8262                 l2_tn_conf.l2_tunnel_type = l2_tn_filter->key.l2_tn_type;
8263                 l2_tn_conf.tunnel_id      = l2_tn_filter->key.tn_id;
8264                 l2_tn_conf.pool           = l2_tn_filter->pool;
8265                 ret = ixgbe_dev_l2_tunnel_filter_del(dev, &l2_tn_conf);
8266                 if (ret < 0)
8267                         return ret;
8268         }
8269
8270         return 0;
8271 }
8272
8273 RTE_PMD_REGISTER_PCI(net_ixgbe, rte_ixgbe_pmd);
8274 RTE_PMD_REGISTER_PCI_TABLE(net_ixgbe, pci_id_ixgbe_map);
8275 RTE_PMD_REGISTER_KMOD_DEP(net_ixgbe, "* igb_uio | uio_pci_generic | vfio-pci");
8276 RTE_PMD_REGISTER_PCI(net_ixgbe_vf, rte_ixgbevf_pmd);
8277 RTE_PMD_REGISTER_PCI_TABLE(net_ixgbe_vf, pci_id_ixgbevf_map);
8278 RTE_PMD_REGISTER_KMOD_DEP(net_ixgbe_vf, "* igb_uio | vfio-pci");