f74ff642b9699a71cc637057555b93affa5c035c
[dpdk.git] / drivers / net / ixgbe / ixgbe_ethdev.c
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright(c) 2010-2017 Intel Corporation. All rights reserved.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33
34 #include <sys/queue.h>
35 #include <stdio.h>
36 #include <errno.h>
37 #include <stdint.h>
38 #include <string.h>
39 #include <unistd.h>
40 #include <stdarg.h>
41 #include <inttypes.h>
42 #include <netinet/in.h>
43 #include <rte_byteorder.h>
44 #include <rte_common.h>
45 #include <rte_cycles.h>
46
47 #include <rte_interrupts.h>
48 #include <rte_log.h>
49 #include <rte_debug.h>
50 #include <rte_pci.h>
51 #include <rte_atomic.h>
52 #include <rte_branch_prediction.h>
53 #include <rte_memory.h>
54 #include <rte_memzone.h>
55 #include <rte_eal.h>
56 #include <rte_alarm.h>
57 #include <rte_ether.h>
58 #include <rte_ethdev.h>
59 #include <rte_ethdev_pci.h>
60 #include <rte_malloc.h>
61 #include <rte_random.h>
62 #include <rte_dev.h>
63 #include <rte_hash_crc.h>
64 #include <rte_security_driver.h>
65
66 #include "ixgbe_logs.h"
67 #include "base/ixgbe_api.h"
68 #include "base/ixgbe_vf.h"
69 #include "base/ixgbe_common.h"
70 #include "ixgbe_ethdev.h"
71 #include "ixgbe_bypass.h"
72 #include "ixgbe_rxtx.h"
73 #include "base/ixgbe_type.h"
74 #include "base/ixgbe_phy.h"
75 #include "ixgbe_regs.h"
76
77 /*
78  * High threshold controlling when to start sending XOFF frames. Must be at
79  * least 8 bytes less than receive packet buffer size. This value is in units
80  * of 1024 bytes.
81  */
82 #define IXGBE_FC_HI    0x80
83
84 /*
85  * Low threshold controlling when to start sending XON frames. This value is
86  * in units of 1024 bytes.
87  */
88 #define IXGBE_FC_LO    0x40
89
90 /* Default minimum inter-interrupt interval for EITR configuration */
91 #define IXGBE_MIN_INTER_INTERRUPT_INTERVAL_DEFAULT    0x79E
92
93 /* Timer value included in XOFF frames. */
94 #define IXGBE_FC_PAUSE 0x680
95
96 #define IXGBE_LINK_DOWN_CHECK_TIMEOUT 4000 /* ms */
97 #define IXGBE_LINK_UP_CHECK_TIMEOUT   1000 /* ms */
98 #define IXGBE_VMDQ_NUM_UC_MAC         4096 /* Maximum nb. of UC MAC addr. */
99
100 #define IXGBE_MMW_SIZE_DEFAULT        0x4
101 #define IXGBE_MMW_SIZE_JUMBO_FRAME    0x14
102 #define IXGBE_MAX_RING_DESC           4096 /* replicate define from rxtx */
103
104 /*
105  *  Default values for RX/TX configuration
106  */
107 #define IXGBE_DEFAULT_RX_FREE_THRESH  32
108 #define IXGBE_DEFAULT_RX_PTHRESH      8
109 #define IXGBE_DEFAULT_RX_HTHRESH      8
110 #define IXGBE_DEFAULT_RX_WTHRESH      0
111
112 #define IXGBE_DEFAULT_TX_FREE_THRESH  32
113 #define IXGBE_DEFAULT_TX_PTHRESH      32
114 #define IXGBE_DEFAULT_TX_HTHRESH      0
115 #define IXGBE_DEFAULT_TX_WTHRESH      0
116 #define IXGBE_DEFAULT_TX_RSBIT_THRESH 32
117
118 /* Bit shift and mask */
119 #define IXGBE_4_BIT_WIDTH  (CHAR_BIT / 2)
120 #define IXGBE_4_BIT_MASK   RTE_LEN2MASK(IXGBE_4_BIT_WIDTH, uint8_t)
121 #define IXGBE_8_BIT_WIDTH  CHAR_BIT
122 #define IXGBE_8_BIT_MASK   UINT8_MAX
123
124 #define IXGBEVF_PMD_NAME "rte_ixgbevf_pmd" /* PMD name */
125
126 #define IXGBE_QUEUE_STAT_COUNTERS (sizeof(hw_stats->qprc) / sizeof(hw_stats->qprc[0]))
127
128 #define IXGBE_HKEY_MAX_INDEX 10
129
130 /* Additional timesync values. */
131 #define NSEC_PER_SEC             1000000000L
132 #define IXGBE_INCVAL_10GB        0x66666666
133 #define IXGBE_INCVAL_1GB         0x40000000
134 #define IXGBE_INCVAL_100         0x50000000
135 #define IXGBE_INCVAL_SHIFT_10GB  28
136 #define IXGBE_INCVAL_SHIFT_1GB   24
137 #define IXGBE_INCVAL_SHIFT_100   21
138 #define IXGBE_INCVAL_SHIFT_82599 7
139 #define IXGBE_INCPER_SHIFT_82599 24
140
141 #define IXGBE_CYCLECOUNTER_MASK   0xffffffffffffffffULL
142
143 #define IXGBE_VT_CTL_POOLING_MODE_MASK         0x00030000
144 #define IXGBE_VT_CTL_POOLING_MODE_ETAG         0x00010000
145 #define DEFAULT_ETAG_ETYPE                     0x893f
146 #define IXGBE_ETAG_ETYPE                       0x00005084
147 #define IXGBE_ETAG_ETYPE_MASK                  0x0000ffff
148 #define IXGBE_ETAG_ETYPE_VALID                 0x80000000
149 #define IXGBE_RAH_ADTYPE                       0x40000000
150 #define IXGBE_RAL_ETAG_FILTER_MASK             0x00003fff
151 #define IXGBE_VMVIR_TAGA_MASK                  0x18000000
152 #define IXGBE_VMVIR_TAGA_ETAG_INSERT           0x08000000
153 #define IXGBE_VMTIR(_i) (0x00017000 + ((_i) * 4)) /* 64 of these (0-63) */
154 #define IXGBE_QDE_STRIP_TAG                    0x00000004
155 #define IXGBE_VTEICR_MASK                      0x07
156
157 #define IXGBE_EXVET_VET_EXT_SHIFT              16
158 #define IXGBE_DMATXCTL_VT_MASK                 0xFFFF0000
159
160 static int eth_ixgbe_dev_init(struct rte_eth_dev *eth_dev);
161 static int eth_ixgbe_dev_uninit(struct rte_eth_dev *eth_dev);
162 static int ixgbe_fdir_filter_init(struct rte_eth_dev *eth_dev);
163 static int ixgbe_fdir_filter_uninit(struct rte_eth_dev *eth_dev);
164 static int ixgbe_l2_tn_filter_init(struct rte_eth_dev *eth_dev);
165 static int ixgbe_l2_tn_filter_uninit(struct rte_eth_dev *eth_dev);
166 static int ixgbe_ntuple_filter_uninit(struct rte_eth_dev *eth_dev);
167 static int  ixgbe_dev_configure(struct rte_eth_dev *dev);
168 static int  ixgbe_dev_start(struct rte_eth_dev *dev);
169 static void ixgbe_dev_stop(struct rte_eth_dev *dev);
170 static int  ixgbe_dev_set_link_up(struct rte_eth_dev *dev);
171 static int  ixgbe_dev_set_link_down(struct rte_eth_dev *dev);
172 static void ixgbe_dev_close(struct rte_eth_dev *dev);
173 static int  ixgbe_dev_reset(struct rte_eth_dev *dev);
174 static void ixgbe_dev_promiscuous_enable(struct rte_eth_dev *dev);
175 static void ixgbe_dev_promiscuous_disable(struct rte_eth_dev *dev);
176 static void ixgbe_dev_allmulticast_enable(struct rte_eth_dev *dev);
177 static void ixgbe_dev_allmulticast_disable(struct rte_eth_dev *dev);
178 static int ixgbe_dev_link_update(struct rte_eth_dev *dev,
179                                 int wait_to_complete);
180 static int ixgbe_dev_stats_get(struct rte_eth_dev *dev,
181                                 struct rte_eth_stats *stats);
182 static int ixgbe_dev_xstats_get(struct rte_eth_dev *dev,
183                                 struct rte_eth_xstat *xstats, unsigned n);
184 static int ixgbevf_dev_xstats_get(struct rte_eth_dev *dev,
185                                   struct rte_eth_xstat *xstats, unsigned n);
186 static int
187 ixgbe_dev_xstats_get_by_id(struct rte_eth_dev *dev, const uint64_t *ids,
188                 uint64_t *values, unsigned int n);
189 static void ixgbe_dev_stats_reset(struct rte_eth_dev *dev);
190 static void ixgbe_dev_xstats_reset(struct rte_eth_dev *dev);
191 static int ixgbe_dev_xstats_get_names(struct rte_eth_dev *dev,
192         struct rte_eth_xstat_name *xstats_names,
193         unsigned int size);
194 static int ixgbevf_dev_xstats_get_names(struct rte_eth_dev *dev,
195         struct rte_eth_xstat_name *xstats_names, unsigned limit);
196 static int ixgbe_dev_xstats_get_names_by_id(
197         struct rte_eth_dev *dev,
198         struct rte_eth_xstat_name *xstats_names,
199         const uint64_t *ids,
200         unsigned int limit);
201 static int ixgbe_dev_queue_stats_mapping_set(struct rte_eth_dev *eth_dev,
202                                              uint16_t queue_id,
203                                              uint8_t stat_idx,
204                                              uint8_t is_rx);
205 static int ixgbe_fw_version_get(struct rte_eth_dev *dev, char *fw_version,
206                                  size_t fw_size);
207 static void ixgbe_dev_info_get(struct rte_eth_dev *dev,
208                                struct rte_eth_dev_info *dev_info);
209 static const uint32_t *ixgbe_dev_supported_ptypes_get(struct rte_eth_dev *dev);
210 static void ixgbevf_dev_info_get(struct rte_eth_dev *dev,
211                                  struct rte_eth_dev_info *dev_info);
212 static int ixgbe_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu);
213
214 static int ixgbe_vlan_filter_set(struct rte_eth_dev *dev,
215                 uint16_t vlan_id, int on);
216 static int ixgbe_vlan_tpid_set(struct rte_eth_dev *dev,
217                                enum rte_vlan_type vlan_type,
218                                uint16_t tpid_id);
219 static void ixgbe_vlan_hw_strip_bitmap_set(struct rte_eth_dev *dev,
220                 uint16_t queue, bool on);
221 static void ixgbe_vlan_strip_queue_set(struct rte_eth_dev *dev, uint16_t queue,
222                 int on);
223 static void ixgbe_vlan_offload_set(struct rte_eth_dev *dev, int mask);
224 static void ixgbe_vlan_hw_strip_enable(struct rte_eth_dev *dev, uint16_t queue);
225 static void ixgbe_vlan_hw_strip_disable(struct rte_eth_dev *dev, uint16_t queue);
226 static void ixgbe_vlan_hw_extend_enable(struct rte_eth_dev *dev);
227 static void ixgbe_vlan_hw_extend_disable(struct rte_eth_dev *dev);
228
229 static int ixgbe_dev_led_on(struct rte_eth_dev *dev);
230 static int ixgbe_dev_led_off(struct rte_eth_dev *dev);
231 static int ixgbe_flow_ctrl_get(struct rte_eth_dev *dev,
232                                struct rte_eth_fc_conf *fc_conf);
233 static int ixgbe_flow_ctrl_set(struct rte_eth_dev *dev,
234                                struct rte_eth_fc_conf *fc_conf);
235 static int ixgbe_priority_flow_ctrl_set(struct rte_eth_dev *dev,
236                 struct rte_eth_pfc_conf *pfc_conf);
237 static int ixgbe_dev_rss_reta_update(struct rte_eth_dev *dev,
238                         struct rte_eth_rss_reta_entry64 *reta_conf,
239                         uint16_t reta_size);
240 static int ixgbe_dev_rss_reta_query(struct rte_eth_dev *dev,
241                         struct rte_eth_rss_reta_entry64 *reta_conf,
242                         uint16_t reta_size);
243 static void ixgbe_dev_link_status_print(struct rte_eth_dev *dev);
244 static int ixgbe_dev_lsc_interrupt_setup(struct rte_eth_dev *dev, uint8_t on);
245 static int ixgbe_dev_macsec_interrupt_setup(struct rte_eth_dev *dev);
246 static int ixgbe_dev_rxq_interrupt_setup(struct rte_eth_dev *dev);
247 static int ixgbe_dev_interrupt_get_status(struct rte_eth_dev *dev);
248 static int ixgbe_dev_interrupt_action(struct rte_eth_dev *dev,
249                                       struct rte_intr_handle *handle);
250 static void ixgbe_dev_interrupt_handler(void *param);
251 static void ixgbe_dev_interrupt_delayed_handler(void *param);
252 static int ixgbe_add_rar(struct rte_eth_dev *dev, struct ether_addr *mac_addr,
253                          uint32_t index, uint32_t pool);
254 static void ixgbe_remove_rar(struct rte_eth_dev *dev, uint32_t index);
255 static void ixgbe_set_default_mac_addr(struct rte_eth_dev *dev,
256                                            struct ether_addr *mac_addr);
257 static void ixgbe_dcb_init(struct ixgbe_hw *hw, struct ixgbe_dcb_config *dcb_config);
258 static bool is_device_supported(struct rte_eth_dev *dev,
259                                 struct rte_pci_driver *drv);
260
261 /* For Virtual Function support */
262 static int eth_ixgbevf_dev_init(struct rte_eth_dev *eth_dev);
263 static int eth_ixgbevf_dev_uninit(struct rte_eth_dev *eth_dev);
264 static int  ixgbevf_dev_configure(struct rte_eth_dev *dev);
265 static int  ixgbevf_dev_start(struct rte_eth_dev *dev);
266 static int ixgbevf_dev_link_update(struct rte_eth_dev *dev,
267                                    int wait_to_complete);
268 static void ixgbevf_dev_stop(struct rte_eth_dev *dev);
269 static void ixgbevf_dev_close(struct rte_eth_dev *dev);
270 static int  ixgbevf_dev_reset(struct rte_eth_dev *dev);
271 static void ixgbevf_intr_disable(struct ixgbe_hw *hw);
272 static void ixgbevf_intr_enable(struct ixgbe_hw *hw);
273 static int ixgbevf_dev_stats_get(struct rte_eth_dev *dev,
274                 struct rte_eth_stats *stats);
275 static void ixgbevf_dev_stats_reset(struct rte_eth_dev *dev);
276 static int ixgbevf_vlan_filter_set(struct rte_eth_dev *dev,
277                 uint16_t vlan_id, int on);
278 static void ixgbevf_vlan_strip_queue_set(struct rte_eth_dev *dev,
279                 uint16_t queue, int on);
280 static void ixgbevf_vlan_offload_set(struct rte_eth_dev *dev, int mask);
281 static void ixgbevf_set_vfta_all(struct rte_eth_dev *dev, bool on);
282 static int ixgbevf_dev_rx_queue_intr_enable(struct rte_eth_dev *dev,
283                                             uint16_t queue_id);
284 static int ixgbevf_dev_rx_queue_intr_disable(struct rte_eth_dev *dev,
285                                              uint16_t queue_id);
286 static void ixgbevf_set_ivar_map(struct ixgbe_hw *hw, int8_t direction,
287                                  uint8_t queue, uint8_t msix_vector);
288 static void ixgbevf_configure_msix(struct rte_eth_dev *dev);
289 static void ixgbevf_dev_allmulticast_enable(struct rte_eth_dev *dev);
290 static void ixgbevf_dev_allmulticast_disable(struct rte_eth_dev *dev);
291
292 /* For Eth VMDQ APIs support */
293 static int ixgbe_uc_hash_table_set(struct rte_eth_dev *dev, struct
294                 ether_addr * mac_addr, uint8_t on);
295 static int ixgbe_uc_all_hash_table_set(struct rte_eth_dev *dev, uint8_t on);
296 static int ixgbe_mirror_rule_set(struct rte_eth_dev *dev,
297                 struct rte_eth_mirror_conf *mirror_conf,
298                 uint8_t rule_id, uint8_t on);
299 static int ixgbe_mirror_rule_reset(struct rte_eth_dev *dev,
300                 uint8_t rule_id);
301 static int ixgbe_dev_rx_queue_intr_enable(struct rte_eth_dev *dev,
302                                           uint16_t queue_id);
303 static int ixgbe_dev_rx_queue_intr_disable(struct rte_eth_dev *dev,
304                                            uint16_t queue_id);
305 static void ixgbe_set_ivar_map(struct ixgbe_hw *hw, int8_t direction,
306                                uint8_t queue, uint8_t msix_vector);
307 static void ixgbe_configure_msix(struct rte_eth_dev *dev);
308
309 static int ixgbevf_add_mac_addr(struct rte_eth_dev *dev,
310                                 struct ether_addr *mac_addr,
311                                 uint32_t index, uint32_t pool);
312 static void ixgbevf_remove_mac_addr(struct rte_eth_dev *dev, uint32_t index);
313 static void ixgbevf_set_default_mac_addr(struct rte_eth_dev *dev,
314                                              struct ether_addr *mac_addr);
315 static int ixgbe_syn_filter_get(struct rte_eth_dev *dev,
316                         struct rte_eth_syn_filter *filter);
317 static int ixgbe_syn_filter_handle(struct rte_eth_dev *dev,
318                         enum rte_filter_op filter_op,
319                         void *arg);
320 static int ixgbe_add_5tuple_filter(struct rte_eth_dev *dev,
321                         struct ixgbe_5tuple_filter *filter);
322 static void ixgbe_remove_5tuple_filter(struct rte_eth_dev *dev,
323                         struct ixgbe_5tuple_filter *filter);
324 static int ixgbe_ntuple_filter_handle(struct rte_eth_dev *dev,
325                                 enum rte_filter_op filter_op,
326                                 void *arg);
327 static int ixgbe_get_ntuple_filter(struct rte_eth_dev *dev,
328                         struct rte_eth_ntuple_filter *filter);
329 static int ixgbe_ethertype_filter_handle(struct rte_eth_dev *dev,
330                                 enum rte_filter_op filter_op,
331                                 void *arg);
332 static int ixgbe_get_ethertype_filter(struct rte_eth_dev *dev,
333                         struct rte_eth_ethertype_filter *filter);
334 static int ixgbe_dev_filter_ctrl(struct rte_eth_dev *dev,
335                      enum rte_filter_type filter_type,
336                      enum rte_filter_op filter_op,
337                      void *arg);
338 static int ixgbevf_dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu);
339
340 static int ixgbe_dev_set_mc_addr_list(struct rte_eth_dev *dev,
341                                       struct ether_addr *mc_addr_set,
342                                       uint32_t nb_mc_addr);
343 static int ixgbe_dev_get_dcb_info(struct rte_eth_dev *dev,
344                                    struct rte_eth_dcb_info *dcb_info);
345
346 static int ixgbe_get_reg_length(struct rte_eth_dev *dev);
347 static int ixgbe_get_regs(struct rte_eth_dev *dev,
348                             struct rte_dev_reg_info *regs);
349 static int ixgbe_get_eeprom_length(struct rte_eth_dev *dev);
350 static int ixgbe_get_eeprom(struct rte_eth_dev *dev,
351                                 struct rte_dev_eeprom_info *eeprom);
352 static int ixgbe_set_eeprom(struct rte_eth_dev *dev,
353                                 struct rte_dev_eeprom_info *eeprom);
354
355 static int ixgbevf_get_reg_length(struct rte_eth_dev *dev);
356 static int ixgbevf_get_regs(struct rte_eth_dev *dev,
357                                 struct rte_dev_reg_info *regs);
358
359 static int ixgbe_timesync_enable(struct rte_eth_dev *dev);
360 static int ixgbe_timesync_disable(struct rte_eth_dev *dev);
361 static int ixgbe_timesync_read_rx_timestamp(struct rte_eth_dev *dev,
362                                             struct timespec *timestamp,
363                                             uint32_t flags);
364 static int ixgbe_timesync_read_tx_timestamp(struct rte_eth_dev *dev,
365                                             struct timespec *timestamp);
366 static int ixgbe_timesync_adjust_time(struct rte_eth_dev *dev, int64_t delta);
367 static int ixgbe_timesync_read_time(struct rte_eth_dev *dev,
368                                    struct timespec *timestamp);
369 static int ixgbe_timesync_write_time(struct rte_eth_dev *dev,
370                                    const struct timespec *timestamp);
371 static void ixgbevf_dev_interrupt_handler(void *param);
372
373 static int ixgbe_dev_l2_tunnel_eth_type_conf
374         (struct rte_eth_dev *dev, struct rte_eth_l2_tunnel_conf *l2_tunnel);
375 static int ixgbe_dev_l2_tunnel_offload_set
376         (struct rte_eth_dev *dev,
377          struct rte_eth_l2_tunnel_conf *l2_tunnel,
378          uint32_t mask,
379          uint8_t en);
380 static int ixgbe_dev_l2_tunnel_filter_handle(struct rte_eth_dev *dev,
381                                              enum rte_filter_op filter_op,
382                                              void *arg);
383
384 static int ixgbe_dev_udp_tunnel_port_add(struct rte_eth_dev *dev,
385                                          struct rte_eth_udp_tunnel *udp_tunnel);
386 static int ixgbe_dev_udp_tunnel_port_del(struct rte_eth_dev *dev,
387                                          struct rte_eth_udp_tunnel *udp_tunnel);
388 static int ixgbe_filter_restore(struct rte_eth_dev *dev);
389 static void ixgbe_l2_tunnel_conf(struct rte_eth_dev *dev);
390
391 /*
392  * Define VF Stats MACRO for Non "cleared on read" register
393  */
394 #define UPDATE_VF_STAT(reg, last, cur)                          \
395 {                                                               \
396         uint32_t latest = IXGBE_READ_REG(hw, reg);              \
397         cur += (latest - last) & UINT_MAX;                      \
398         last = latest;                                          \
399 }
400
401 #define UPDATE_VF_STAT_36BIT(lsb, msb, last, cur)                \
402 {                                                                \
403         u64 new_lsb = IXGBE_READ_REG(hw, lsb);                   \
404         u64 new_msb = IXGBE_READ_REG(hw, msb);                   \
405         u64 latest = ((new_msb << 32) | new_lsb);                \
406         cur += (0x1000000000LL + latest - last) & 0xFFFFFFFFFLL; \
407         last = latest;                                           \
408 }
409
410 #define IXGBE_SET_HWSTRIP(h, q) do {\
411                 uint32_t idx = (q) / (sizeof((h)->bitmap[0]) * NBBY); \
412                 uint32_t bit = (q) % (sizeof((h)->bitmap[0]) * NBBY); \
413                 (h)->bitmap[idx] |= 1 << bit;\
414         } while (0)
415
416 #define IXGBE_CLEAR_HWSTRIP(h, q) do {\
417                 uint32_t idx = (q) / (sizeof((h)->bitmap[0]) * NBBY); \
418                 uint32_t bit = (q) % (sizeof((h)->bitmap[0]) * NBBY); \
419                 (h)->bitmap[idx] &= ~(1 << bit);\
420         } while (0)
421
422 #define IXGBE_GET_HWSTRIP(h, q, r) do {\
423                 uint32_t idx = (q) / (sizeof((h)->bitmap[0]) * NBBY); \
424                 uint32_t bit = (q) % (sizeof((h)->bitmap[0]) * NBBY); \
425                 (r) = (h)->bitmap[idx] >> bit & 1;\
426         } while (0)
427
428 /*
429  * The set of PCI devices this driver supports
430  */
431 static const struct rte_pci_id pci_id_ixgbe_map[] = {
432         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598) },
433         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_BX) },
434         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_DUAL_PORT) },
435         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_SINGLE_PORT) },
436         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT) },
437         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT2) },
438         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_SFP_LOM) },
439         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_CX4) },
440         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_CX4_DUAL_PORT) },
441         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_DA_DUAL_PORT) },
442         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM) },
443         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_XF_LR) },
444         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4) },
445         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4_MEZZ) },
446         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KR) },
447         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_COMBO_BACKPLANE) },
448         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_CX4) },
449         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP) },
450         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_BACKPLANE_FCOE) },
451         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_FCOE) },
452         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_EM) },
453         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_SF2) },
454         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_SF_QP) },
455         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_QSFP_SF_QP) },
456         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599EN_SFP) },
457         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_XAUI_LOM) },
458         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_T3_LOM) },
459         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_LS) },
460         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T) },
461         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T1) },
462         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_SFP) },
463         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_10G_T) },
464         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_1G_T) },
465         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550T) },
466         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550T1) },
467         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_KR) },
468         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_KR_L) },
469         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SFP_N) },
470         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SGMII) },
471         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SGMII_L) },
472         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_10G_T) },
473         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_QSFP) },
474         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_QSFP_N) },
475         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SFP) },
476         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_1G_T) },
477         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_1G_T_L) },
478         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_KX4) },
479         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_KR) },
480 #ifdef RTE_LIBRTE_IXGBE_BYPASS
481         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_BYPASS) },
482 #endif
483         { .vendor_id = 0, /* sentinel */ },
484 };
485
486 /*
487  * The set of PCI devices this driver supports (for 82599 VF)
488  */
489 static const struct rte_pci_id pci_id_ixgbevf_map[] = {
490         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_VF) },
491         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_VF_HV) },
492         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540_VF) },
493         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540_VF_HV) },
494         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550_VF_HV) },
495         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550_VF) },
496         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_VF) },
497         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_VF_HV) },
498         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_VF) },
499         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_VF_HV) },
500         { .vendor_id = 0, /* sentinel */ },
501 };
502
503 static const struct rte_eth_desc_lim rx_desc_lim = {
504         .nb_max = IXGBE_MAX_RING_DESC,
505         .nb_min = IXGBE_MIN_RING_DESC,
506         .nb_align = IXGBE_RXD_ALIGN,
507 };
508
509 static const struct rte_eth_desc_lim tx_desc_lim = {
510         .nb_max = IXGBE_MAX_RING_DESC,
511         .nb_min = IXGBE_MIN_RING_DESC,
512         .nb_align = IXGBE_TXD_ALIGN,
513         .nb_seg_max = IXGBE_TX_MAX_SEG,
514         .nb_mtu_seg_max = IXGBE_TX_MAX_SEG,
515 };
516
517 static const struct eth_dev_ops ixgbe_eth_dev_ops = {
518         .dev_configure        = ixgbe_dev_configure,
519         .dev_start            = ixgbe_dev_start,
520         .dev_stop             = ixgbe_dev_stop,
521         .dev_set_link_up    = ixgbe_dev_set_link_up,
522         .dev_set_link_down  = ixgbe_dev_set_link_down,
523         .dev_close            = ixgbe_dev_close,
524         .dev_reset            = ixgbe_dev_reset,
525         .promiscuous_enable   = ixgbe_dev_promiscuous_enable,
526         .promiscuous_disable  = ixgbe_dev_promiscuous_disable,
527         .allmulticast_enable  = ixgbe_dev_allmulticast_enable,
528         .allmulticast_disable = ixgbe_dev_allmulticast_disable,
529         .link_update          = ixgbe_dev_link_update,
530         .stats_get            = ixgbe_dev_stats_get,
531         .xstats_get           = ixgbe_dev_xstats_get,
532         .xstats_get_by_id     = ixgbe_dev_xstats_get_by_id,
533         .stats_reset          = ixgbe_dev_stats_reset,
534         .xstats_reset         = ixgbe_dev_xstats_reset,
535         .xstats_get_names     = ixgbe_dev_xstats_get_names,
536         .xstats_get_names_by_id = ixgbe_dev_xstats_get_names_by_id,
537         .queue_stats_mapping_set = ixgbe_dev_queue_stats_mapping_set,
538         .fw_version_get       = ixgbe_fw_version_get,
539         .dev_infos_get        = ixgbe_dev_info_get,
540         .dev_supported_ptypes_get = ixgbe_dev_supported_ptypes_get,
541         .mtu_set              = ixgbe_dev_mtu_set,
542         .vlan_filter_set      = ixgbe_vlan_filter_set,
543         .vlan_tpid_set        = ixgbe_vlan_tpid_set,
544         .vlan_offload_set     = ixgbe_vlan_offload_set,
545         .vlan_strip_queue_set = ixgbe_vlan_strip_queue_set,
546         .rx_queue_start       = ixgbe_dev_rx_queue_start,
547         .rx_queue_stop        = ixgbe_dev_rx_queue_stop,
548         .tx_queue_start       = ixgbe_dev_tx_queue_start,
549         .tx_queue_stop        = ixgbe_dev_tx_queue_stop,
550         .rx_queue_setup       = ixgbe_dev_rx_queue_setup,
551         .rx_queue_intr_enable = ixgbe_dev_rx_queue_intr_enable,
552         .rx_queue_intr_disable = ixgbe_dev_rx_queue_intr_disable,
553         .rx_queue_release     = ixgbe_dev_rx_queue_release,
554         .rx_queue_count       = ixgbe_dev_rx_queue_count,
555         .rx_descriptor_done   = ixgbe_dev_rx_descriptor_done,
556         .rx_descriptor_status = ixgbe_dev_rx_descriptor_status,
557         .tx_descriptor_status = ixgbe_dev_tx_descriptor_status,
558         .tx_queue_setup       = ixgbe_dev_tx_queue_setup,
559         .tx_queue_release     = ixgbe_dev_tx_queue_release,
560         .dev_led_on           = ixgbe_dev_led_on,
561         .dev_led_off          = ixgbe_dev_led_off,
562         .flow_ctrl_get        = ixgbe_flow_ctrl_get,
563         .flow_ctrl_set        = ixgbe_flow_ctrl_set,
564         .priority_flow_ctrl_set = ixgbe_priority_flow_ctrl_set,
565         .mac_addr_add         = ixgbe_add_rar,
566         .mac_addr_remove      = ixgbe_remove_rar,
567         .mac_addr_set         = ixgbe_set_default_mac_addr,
568         .uc_hash_table_set    = ixgbe_uc_hash_table_set,
569         .uc_all_hash_table_set  = ixgbe_uc_all_hash_table_set,
570         .mirror_rule_set      = ixgbe_mirror_rule_set,
571         .mirror_rule_reset    = ixgbe_mirror_rule_reset,
572         .set_queue_rate_limit = ixgbe_set_queue_rate_limit,
573         .reta_update          = ixgbe_dev_rss_reta_update,
574         .reta_query           = ixgbe_dev_rss_reta_query,
575         .rss_hash_update      = ixgbe_dev_rss_hash_update,
576         .rss_hash_conf_get    = ixgbe_dev_rss_hash_conf_get,
577         .filter_ctrl          = ixgbe_dev_filter_ctrl,
578         .set_mc_addr_list     = ixgbe_dev_set_mc_addr_list,
579         .rxq_info_get         = ixgbe_rxq_info_get,
580         .txq_info_get         = ixgbe_txq_info_get,
581         .timesync_enable      = ixgbe_timesync_enable,
582         .timesync_disable     = ixgbe_timesync_disable,
583         .timesync_read_rx_timestamp = ixgbe_timesync_read_rx_timestamp,
584         .timesync_read_tx_timestamp = ixgbe_timesync_read_tx_timestamp,
585         .get_reg              = ixgbe_get_regs,
586         .get_eeprom_length    = ixgbe_get_eeprom_length,
587         .get_eeprom           = ixgbe_get_eeprom,
588         .set_eeprom           = ixgbe_set_eeprom,
589         .get_dcb_info         = ixgbe_dev_get_dcb_info,
590         .timesync_adjust_time = ixgbe_timesync_adjust_time,
591         .timesync_read_time   = ixgbe_timesync_read_time,
592         .timesync_write_time  = ixgbe_timesync_write_time,
593         .l2_tunnel_eth_type_conf = ixgbe_dev_l2_tunnel_eth_type_conf,
594         .l2_tunnel_offload_set   = ixgbe_dev_l2_tunnel_offload_set,
595         .udp_tunnel_port_add  = ixgbe_dev_udp_tunnel_port_add,
596         .udp_tunnel_port_del  = ixgbe_dev_udp_tunnel_port_del,
597         .tm_ops_get           = ixgbe_tm_ops_get,
598 };
599
600 /*
601  * dev_ops for virtual function, bare necessities for basic vf
602  * operation have been implemented
603  */
604 static const struct eth_dev_ops ixgbevf_eth_dev_ops = {
605         .dev_configure        = ixgbevf_dev_configure,
606         .dev_start            = ixgbevf_dev_start,
607         .dev_stop             = ixgbevf_dev_stop,
608         .link_update          = ixgbevf_dev_link_update,
609         .stats_get            = ixgbevf_dev_stats_get,
610         .xstats_get           = ixgbevf_dev_xstats_get,
611         .stats_reset          = ixgbevf_dev_stats_reset,
612         .xstats_reset         = ixgbevf_dev_stats_reset,
613         .xstats_get_names     = ixgbevf_dev_xstats_get_names,
614         .dev_close            = ixgbevf_dev_close,
615         .dev_reset            = ixgbevf_dev_reset,
616         .allmulticast_enable  = ixgbevf_dev_allmulticast_enable,
617         .allmulticast_disable = ixgbevf_dev_allmulticast_disable,
618         .dev_infos_get        = ixgbevf_dev_info_get,
619         .dev_supported_ptypes_get = ixgbe_dev_supported_ptypes_get,
620         .mtu_set              = ixgbevf_dev_set_mtu,
621         .vlan_filter_set      = ixgbevf_vlan_filter_set,
622         .vlan_strip_queue_set = ixgbevf_vlan_strip_queue_set,
623         .vlan_offload_set     = ixgbevf_vlan_offload_set,
624         .rx_queue_setup       = ixgbe_dev_rx_queue_setup,
625         .rx_queue_release     = ixgbe_dev_rx_queue_release,
626         .rx_descriptor_done   = ixgbe_dev_rx_descriptor_done,
627         .rx_descriptor_status = ixgbe_dev_rx_descriptor_status,
628         .tx_descriptor_status = ixgbe_dev_tx_descriptor_status,
629         .tx_queue_setup       = ixgbe_dev_tx_queue_setup,
630         .tx_queue_release     = ixgbe_dev_tx_queue_release,
631         .rx_queue_intr_enable = ixgbevf_dev_rx_queue_intr_enable,
632         .rx_queue_intr_disable = ixgbevf_dev_rx_queue_intr_disable,
633         .mac_addr_add         = ixgbevf_add_mac_addr,
634         .mac_addr_remove      = ixgbevf_remove_mac_addr,
635         .set_mc_addr_list     = ixgbe_dev_set_mc_addr_list,
636         .rxq_info_get         = ixgbe_rxq_info_get,
637         .txq_info_get         = ixgbe_txq_info_get,
638         .mac_addr_set         = ixgbevf_set_default_mac_addr,
639         .get_reg              = ixgbevf_get_regs,
640         .reta_update          = ixgbe_dev_rss_reta_update,
641         .reta_query           = ixgbe_dev_rss_reta_query,
642         .rss_hash_update      = ixgbe_dev_rss_hash_update,
643         .rss_hash_conf_get    = ixgbe_dev_rss_hash_conf_get,
644 };
645
646 /* store statistics names and its offset in stats structure */
647 struct rte_ixgbe_xstats_name_off {
648         char name[RTE_ETH_XSTATS_NAME_SIZE];
649         unsigned offset;
650 };
651
652 static const struct rte_ixgbe_xstats_name_off rte_ixgbe_stats_strings[] = {
653         {"rx_crc_errors", offsetof(struct ixgbe_hw_stats, crcerrs)},
654         {"rx_illegal_byte_errors", offsetof(struct ixgbe_hw_stats, illerrc)},
655         {"rx_error_bytes", offsetof(struct ixgbe_hw_stats, errbc)},
656         {"mac_local_errors", offsetof(struct ixgbe_hw_stats, mlfc)},
657         {"mac_remote_errors", offsetof(struct ixgbe_hw_stats, mrfc)},
658         {"rx_length_errors", offsetof(struct ixgbe_hw_stats, rlec)},
659         {"tx_xon_packets", offsetof(struct ixgbe_hw_stats, lxontxc)},
660         {"rx_xon_packets", offsetof(struct ixgbe_hw_stats, lxonrxc)},
661         {"tx_xoff_packets", offsetof(struct ixgbe_hw_stats, lxofftxc)},
662         {"rx_xoff_packets", offsetof(struct ixgbe_hw_stats, lxoffrxc)},
663         {"rx_size_64_packets", offsetof(struct ixgbe_hw_stats, prc64)},
664         {"rx_size_65_to_127_packets", offsetof(struct ixgbe_hw_stats, prc127)},
665         {"rx_size_128_to_255_packets", offsetof(struct ixgbe_hw_stats, prc255)},
666         {"rx_size_256_to_511_packets", offsetof(struct ixgbe_hw_stats, prc511)},
667         {"rx_size_512_to_1023_packets", offsetof(struct ixgbe_hw_stats,
668                 prc1023)},
669         {"rx_size_1024_to_max_packets", offsetof(struct ixgbe_hw_stats,
670                 prc1522)},
671         {"rx_broadcast_packets", offsetof(struct ixgbe_hw_stats, bprc)},
672         {"rx_multicast_packets", offsetof(struct ixgbe_hw_stats, mprc)},
673         {"rx_fragment_errors", offsetof(struct ixgbe_hw_stats, rfc)},
674         {"rx_undersize_errors", offsetof(struct ixgbe_hw_stats, ruc)},
675         {"rx_oversize_errors", offsetof(struct ixgbe_hw_stats, roc)},
676         {"rx_jabber_errors", offsetof(struct ixgbe_hw_stats, rjc)},
677         {"rx_management_packets", offsetof(struct ixgbe_hw_stats, mngprc)},
678         {"rx_management_dropped", offsetof(struct ixgbe_hw_stats, mngpdc)},
679         {"tx_management_packets", offsetof(struct ixgbe_hw_stats, mngptc)},
680         {"rx_total_packets", offsetof(struct ixgbe_hw_stats, tpr)},
681         {"rx_total_bytes", offsetof(struct ixgbe_hw_stats, tor)},
682         {"tx_total_packets", offsetof(struct ixgbe_hw_stats, tpt)},
683         {"tx_size_64_packets", offsetof(struct ixgbe_hw_stats, ptc64)},
684         {"tx_size_65_to_127_packets", offsetof(struct ixgbe_hw_stats, ptc127)},
685         {"tx_size_128_to_255_packets", offsetof(struct ixgbe_hw_stats, ptc255)},
686         {"tx_size_256_to_511_packets", offsetof(struct ixgbe_hw_stats, ptc511)},
687         {"tx_size_512_to_1023_packets", offsetof(struct ixgbe_hw_stats,
688                 ptc1023)},
689         {"tx_size_1024_to_max_packets", offsetof(struct ixgbe_hw_stats,
690                 ptc1522)},
691         {"tx_multicast_packets", offsetof(struct ixgbe_hw_stats, mptc)},
692         {"tx_broadcast_packets", offsetof(struct ixgbe_hw_stats, bptc)},
693         {"rx_mac_short_packet_dropped", offsetof(struct ixgbe_hw_stats, mspdc)},
694         {"rx_l3_l4_xsum_error", offsetof(struct ixgbe_hw_stats, xec)},
695
696         {"flow_director_added_filters", offsetof(struct ixgbe_hw_stats,
697                 fdirustat_add)},
698         {"flow_director_removed_filters", offsetof(struct ixgbe_hw_stats,
699                 fdirustat_remove)},
700         {"flow_director_filter_add_errors", offsetof(struct ixgbe_hw_stats,
701                 fdirfstat_fadd)},
702         {"flow_director_filter_remove_errors", offsetof(struct ixgbe_hw_stats,
703                 fdirfstat_fremove)},
704         {"flow_director_matched_filters", offsetof(struct ixgbe_hw_stats,
705                 fdirmatch)},
706         {"flow_director_missed_filters", offsetof(struct ixgbe_hw_stats,
707                 fdirmiss)},
708
709         {"rx_fcoe_crc_errors", offsetof(struct ixgbe_hw_stats, fccrc)},
710         {"rx_fcoe_dropped", offsetof(struct ixgbe_hw_stats, fcoerpdc)},
711         {"rx_fcoe_mbuf_allocation_errors", offsetof(struct ixgbe_hw_stats,
712                 fclast)},
713         {"rx_fcoe_packets", offsetof(struct ixgbe_hw_stats, fcoeprc)},
714         {"tx_fcoe_packets", offsetof(struct ixgbe_hw_stats, fcoeptc)},
715         {"rx_fcoe_bytes", offsetof(struct ixgbe_hw_stats, fcoedwrc)},
716         {"tx_fcoe_bytes", offsetof(struct ixgbe_hw_stats, fcoedwtc)},
717         {"rx_fcoe_no_direct_data_placement", offsetof(struct ixgbe_hw_stats,
718                 fcoe_noddp)},
719         {"rx_fcoe_no_direct_data_placement_ext_buff",
720                 offsetof(struct ixgbe_hw_stats, fcoe_noddp_ext_buff)},
721
722         {"tx_flow_control_xon_packets", offsetof(struct ixgbe_hw_stats,
723                 lxontxc)},
724         {"rx_flow_control_xon_packets", offsetof(struct ixgbe_hw_stats,
725                 lxonrxc)},
726         {"tx_flow_control_xoff_packets", offsetof(struct ixgbe_hw_stats,
727                 lxofftxc)},
728         {"rx_flow_control_xoff_packets", offsetof(struct ixgbe_hw_stats,
729                 lxoffrxc)},
730         {"rx_total_missed_packets", offsetof(struct ixgbe_hw_stats, mpctotal)},
731 };
732
733 #define IXGBE_NB_HW_STATS (sizeof(rte_ixgbe_stats_strings) / \
734                            sizeof(rte_ixgbe_stats_strings[0]))
735
736 /* MACsec statistics */
737 static const struct rte_ixgbe_xstats_name_off rte_ixgbe_macsec_strings[] = {
738         {"out_pkts_untagged", offsetof(struct ixgbe_macsec_stats,
739                 out_pkts_untagged)},
740         {"out_pkts_encrypted", offsetof(struct ixgbe_macsec_stats,
741                 out_pkts_encrypted)},
742         {"out_pkts_protected", offsetof(struct ixgbe_macsec_stats,
743                 out_pkts_protected)},
744         {"out_octets_encrypted", offsetof(struct ixgbe_macsec_stats,
745                 out_octets_encrypted)},
746         {"out_octets_protected", offsetof(struct ixgbe_macsec_stats,
747                 out_octets_protected)},
748         {"in_pkts_untagged", offsetof(struct ixgbe_macsec_stats,
749                 in_pkts_untagged)},
750         {"in_pkts_badtag", offsetof(struct ixgbe_macsec_stats,
751                 in_pkts_badtag)},
752         {"in_pkts_nosci", offsetof(struct ixgbe_macsec_stats,
753                 in_pkts_nosci)},
754         {"in_pkts_unknownsci", offsetof(struct ixgbe_macsec_stats,
755                 in_pkts_unknownsci)},
756         {"in_octets_decrypted", offsetof(struct ixgbe_macsec_stats,
757                 in_octets_decrypted)},
758         {"in_octets_validated", offsetof(struct ixgbe_macsec_stats,
759                 in_octets_validated)},
760         {"in_pkts_unchecked", offsetof(struct ixgbe_macsec_stats,
761                 in_pkts_unchecked)},
762         {"in_pkts_delayed", offsetof(struct ixgbe_macsec_stats,
763                 in_pkts_delayed)},
764         {"in_pkts_late", offsetof(struct ixgbe_macsec_stats,
765                 in_pkts_late)},
766         {"in_pkts_ok", offsetof(struct ixgbe_macsec_stats,
767                 in_pkts_ok)},
768         {"in_pkts_invalid", offsetof(struct ixgbe_macsec_stats,
769                 in_pkts_invalid)},
770         {"in_pkts_notvalid", offsetof(struct ixgbe_macsec_stats,
771                 in_pkts_notvalid)},
772         {"in_pkts_unusedsa", offsetof(struct ixgbe_macsec_stats,
773                 in_pkts_unusedsa)},
774         {"in_pkts_notusingsa", offsetof(struct ixgbe_macsec_stats,
775                 in_pkts_notusingsa)},
776 };
777
778 #define IXGBE_NB_MACSEC_STATS (sizeof(rte_ixgbe_macsec_strings) / \
779                            sizeof(rte_ixgbe_macsec_strings[0]))
780
781 /* Per-queue statistics */
782 static const struct rte_ixgbe_xstats_name_off rte_ixgbe_rxq_strings[] = {
783         {"mbuf_allocation_errors", offsetof(struct ixgbe_hw_stats, rnbc)},
784         {"dropped", offsetof(struct ixgbe_hw_stats, mpc)},
785         {"xon_packets", offsetof(struct ixgbe_hw_stats, pxonrxc)},
786         {"xoff_packets", offsetof(struct ixgbe_hw_stats, pxoffrxc)},
787 };
788
789 #define IXGBE_NB_RXQ_PRIO_STATS (sizeof(rte_ixgbe_rxq_strings) / \
790                            sizeof(rte_ixgbe_rxq_strings[0]))
791 #define IXGBE_NB_RXQ_PRIO_VALUES 8
792
793 static const struct rte_ixgbe_xstats_name_off rte_ixgbe_txq_strings[] = {
794         {"xon_packets", offsetof(struct ixgbe_hw_stats, pxontxc)},
795         {"xoff_packets", offsetof(struct ixgbe_hw_stats, pxofftxc)},
796         {"xon_to_xoff_packets", offsetof(struct ixgbe_hw_stats,
797                 pxon2offc)},
798 };
799
800 #define IXGBE_NB_TXQ_PRIO_STATS (sizeof(rte_ixgbe_txq_strings) / \
801                            sizeof(rte_ixgbe_txq_strings[0]))
802 #define IXGBE_NB_TXQ_PRIO_VALUES 8
803
804 static const struct rte_ixgbe_xstats_name_off rte_ixgbevf_stats_strings[] = {
805         {"rx_multicast_packets", offsetof(struct ixgbevf_hw_stats, vfmprc)},
806 };
807
808 #define IXGBEVF_NB_XSTATS (sizeof(rte_ixgbevf_stats_strings) /  \
809                 sizeof(rte_ixgbevf_stats_strings[0]))
810
811 /**
812  * Atomically reads the link status information from global
813  * structure rte_eth_dev.
814  *
815  * @param dev
816  *   - Pointer to the structure rte_eth_dev to read from.
817  *   - Pointer to the buffer to be saved with the link status.
818  *
819  * @return
820  *   - On success, zero.
821  *   - On failure, negative value.
822  */
823 static inline int
824 rte_ixgbe_dev_atomic_read_link_status(struct rte_eth_dev *dev,
825                                 struct rte_eth_link *link)
826 {
827         struct rte_eth_link *dst = link;
828         struct rte_eth_link *src = &(dev->data->dev_link);
829
830         if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
831                                         *(uint64_t *)src) == 0)
832                 return -1;
833
834         return 0;
835 }
836
837 /**
838  * Atomically writes the link status information into global
839  * structure rte_eth_dev.
840  *
841  * @param dev
842  *   - Pointer to the structure rte_eth_dev to read from.
843  *   - Pointer to the buffer to be saved with the link status.
844  *
845  * @return
846  *   - On success, zero.
847  *   - On failure, negative value.
848  */
849 static inline int
850 rte_ixgbe_dev_atomic_write_link_status(struct rte_eth_dev *dev,
851                                 struct rte_eth_link *link)
852 {
853         struct rte_eth_link *dst = &(dev->data->dev_link);
854         struct rte_eth_link *src = link;
855
856         if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
857                                         *(uint64_t *)src) == 0)
858                 return -1;
859
860         return 0;
861 }
862
863 /*
864  * This function is the same as ixgbe_is_sfp() in base/ixgbe.h.
865  */
866 static inline int
867 ixgbe_is_sfp(struct ixgbe_hw *hw)
868 {
869         switch (hw->phy.type) {
870         case ixgbe_phy_sfp_avago:
871         case ixgbe_phy_sfp_ftl:
872         case ixgbe_phy_sfp_intel:
873         case ixgbe_phy_sfp_unknown:
874         case ixgbe_phy_sfp_passive_tyco:
875         case ixgbe_phy_sfp_passive_unknown:
876                 return 1;
877         default:
878                 return 0;
879         }
880 }
881
882 static inline int32_t
883 ixgbe_pf_reset_hw(struct ixgbe_hw *hw)
884 {
885         uint32_t ctrl_ext;
886         int32_t status;
887
888         status = ixgbe_reset_hw(hw);
889
890         ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
891         /* Set PF Reset Done bit so PF/VF Mail Ops can work */
892         ctrl_ext |= IXGBE_CTRL_EXT_PFRSTD;
893         IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
894         IXGBE_WRITE_FLUSH(hw);
895
896         if (status == IXGBE_ERR_SFP_NOT_PRESENT)
897                 status = IXGBE_SUCCESS;
898         return status;
899 }
900
901 static inline void
902 ixgbe_enable_intr(struct rte_eth_dev *dev)
903 {
904         struct ixgbe_interrupt *intr =
905                 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
906         struct ixgbe_hw *hw =
907                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
908
909         IXGBE_WRITE_REG(hw, IXGBE_EIMS, intr->mask);
910         IXGBE_WRITE_FLUSH(hw);
911 }
912
913 /*
914  * This function is based on ixgbe_disable_intr() in base/ixgbe.h.
915  */
916 static void
917 ixgbe_disable_intr(struct ixgbe_hw *hw)
918 {
919         PMD_INIT_FUNC_TRACE();
920
921         if (hw->mac.type == ixgbe_mac_82598EB) {
922                 IXGBE_WRITE_REG(hw, IXGBE_EIMC, ~0);
923         } else {
924                 IXGBE_WRITE_REG(hw, IXGBE_EIMC, 0xFFFF0000);
925                 IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(0), ~0);
926                 IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(1), ~0);
927         }
928         IXGBE_WRITE_FLUSH(hw);
929 }
930
931 /*
932  * This function resets queue statistics mapping registers.
933  * From Niantic datasheet, Initialization of Statistics section:
934  * "...if software requires the queue counters, the RQSMR and TQSM registers
935  * must be re-programmed following a device reset.
936  */
937 static void
938 ixgbe_reset_qstat_mappings(struct ixgbe_hw *hw)
939 {
940         uint32_t i;
941
942         for (i = 0; i != IXGBE_NB_STAT_MAPPING_REGS; i++) {
943                 IXGBE_WRITE_REG(hw, IXGBE_RQSMR(i), 0);
944                 IXGBE_WRITE_REG(hw, IXGBE_TQSM(i), 0);
945         }
946 }
947
948
949 static int
950 ixgbe_dev_queue_stats_mapping_set(struct rte_eth_dev *eth_dev,
951                                   uint16_t queue_id,
952                                   uint8_t stat_idx,
953                                   uint8_t is_rx)
954 {
955 #define QSM_REG_NB_BITS_PER_QMAP_FIELD 8
956 #define NB_QMAP_FIELDS_PER_QSM_REG 4
957 #define QMAP_FIELD_RESERVED_BITS_MASK 0x0f
958
959         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
960         struct ixgbe_stat_mapping_registers *stat_mappings =
961                 IXGBE_DEV_PRIVATE_TO_STAT_MAPPINGS(eth_dev->data->dev_private);
962         uint32_t qsmr_mask = 0;
963         uint32_t clearing_mask = QMAP_FIELD_RESERVED_BITS_MASK;
964         uint32_t q_map;
965         uint8_t n, offset;
966
967         if ((hw->mac.type != ixgbe_mac_82599EB) &&
968                 (hw->mac.type != ixgbe_mac_X540) &&
969                 (hw->mac.type != ixgbe_mac_X550) &&
970                 (hw->mac.type != ixgbe_mac_X550EM_x) &&
971                 (hw->mac.type != ixgbe_mac_X550EM_a))
972                 return -ENOSYS;
973
974         PMD_INIT_LOG(DEBUG, "Setting port %d, %s queue_id %d to stat index %d",
975                      (int)(eth_dev->data->port_id), is_rx ? "RX" : "TX",
976                      queue_id, stat_idx);
977
978         n = (uint8_t)(queue_id / NB_QMAP_FIELDS_PER_QSM_REG);
979         if (n >= IXGBE_NB_STAT_MAPPING_REGS) {
980                 PMD_INIT_LOG(ERR, "Nb of stat mapping registers exceeded");
981                 return -EIO;
982         }
983         offset = (uint8_t)(queue_id % NB_QMAP_FIELDS_PER_QSM_REG);
984
985         /* Now clear any previous stat_idx set */
986         clearing_mask <<= (QSM_REG_NB_BITS_PER_QMAP_FIELD * offset);
987         if (!is_rx)
988                 stat_mappings->tqsm[n] &= ~clearing_mask;
989         else
990                 stat_mappings->rqsmr[n] &= ~clearing_mask;
991
992         q_map = (uint32_t)stat_idx;
993         q_map &= QMAP_FIELD_RESERVED_BITS_MASK;
994         qsmr_mask = q_map << (QSM_REG_NB_BITS_PER_QMAP_FIELD * offset);
995         if (!is_rx)
996                 stat_mappings->tqsm[n] |= qsmr_mask;
997         else
998                 stat_mappings->rqsmr[n] |= qsmr_mask;
999
1000         PMD_INIT_LOG(DEBUG, "Set port %d, %s queue_id %d to stat index %d",
1001                      (int)(eth_dev->data->port_id), is_rx ? "RX" : "TX",
1002                      queue_id, stat_idx);
1003         PMD_INIT_LOG(DEBUG, "%s[%d] = 0x%08x", is_rx ? "RQSMR" : "TQSM", n,
1004                      is_rx ? stat_mappings->rqsmr[n] : stat_mappings->tqsm[n]);
1005
1006         /* Now write the mapping in the appropriate register */
1007         if (is_rx) {
1008                 PMD_INIT_LOG(DEBUG, "Write 0x%x to RX IXGBE stat mapping reg:%d",
1009                              stat_mappings->rqsmr[n], n);
1010                 IXGBE_WRITE_REG(hw, IXGBE_RQSMR(n), stat_mappings->rqsmr[n]);
1011         } else {
1012                 PMD_INIT_LOG(DEBUG, "Write 0x%x to TX IXGBE stat mapping reg:%d",
1013                              stat_mappings->tqsm[n], n);
1014                 IXGBE_WRITE_REG(hw, IXGBE_TQSM(n), stat_mappings->tqsm[n]);
1015         }
1016         return 0;
1017 }
1018
1019 static void
1020 ixgbe_restore_statistics_mapping(struct rte_eth_dev *dev)
1021 {
1022         struct ixgbe_stat_mapping_registers *stat_mappings =
1023                 IXGBE_DEV_PRIVATE_TO_STAT_MAPPINGS(dev->data->dev_private);
1024         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1025         int i;
1026
1027         /* write whatever was in stat mapping table to the NIC */
1028         for (i = 0; i < IXGBE_NB_STAT_MAPPING_REGS; i++) {
1029                 /* rx */
1030                 IXGBE_WRITE_REG(hw, IXGBE_RQSMR(i), stat_mappings->rqsmr[i]);
1031
1032                 /* tx */
1033                 IXGBE_WRITE_REG(hw, IXGBE_TQSM(i), stat_mappings->tqsm[i]);
1034         }
1035 }
1036
1037 static void
1038 ixgbe_dcb_init(struct ixgbe_hw *hw, struct ixgbe_dcb_config *dcb_config)
1039 {
1040         uint8_t i;
1041         struct ixgbe_dcb_tc_config *tc;
1042         uint8_t dcb_max_tc = IXGBE_DCB_MAX_TRAFFIC_CLASS;
1043
1044         dcb_config->num_tcs.pg_tcs = dcb_max_tc;
1045         dcb_config->num_tcs.pfc_tcs = dcb_max_tc;
1046         for (i = 0; i < dcb_max_tc; i++) {
1047                 tc = &dcb_config->tc_config[i];
1048                 tc->path[IXGBE_DCB_TX_CONFIG].bwg_id = i;
1049                 tc->path[IXGBE_DCB_TX_CONFIG].bwg_percent =
1050                                  (uint8_t)(100/dcb_max_tc + (i & 1));
1051                 tc->path[IXGBE_DCB_RX_CONFIG].bwg_id = i;
1052                 tc->path[IXGBE_DCB_RX_CONFIG].bwg_percent =
1053                                  (uint8_t)(100/dcb_max_tc + (i & 1));
1054                 tc->pfc = ixgbe_dcb_pfc_disabled;
1055         }
1056
1057         /* Initialize default user to priority mapping, UPx->TC0 */
1058         tc = &dcb_config->tc_config[0];
1059         tc->path[IXGBE_DCB_TX_CONFIG].up_to_tc_bitmap = 0xFF;
1060         tc->path[IXGBE_DCB_RX_CONFIG].up_to_tc_bitmap = 0xFF;
1061         for (i = 0; i < IXGBE_DCB_MAX_BW_GROUP; i++) {
1062                 dcb_config->bw_percentage[IXGBE_DCB_TX_CONFIG][i] = 100;
1063                 dcb_config->bw_percentage[IXGBE_DCB_RX_CONFIG][i] = 100;
1064         }
1065         dcb_config->rx_pba_cfg = ixgbe_dcb_pba_equal;
1066         dcb_config->pfc_mode_enable = false;
1067         dcb_config->vt_mode = true;
1068         dcb_config->round_robin_enable = false;
1069         /* support all DCB capabilities in 82599 */
1070         dcb_config->support.capabilities = 0xFF;
1071
1072         /*we only support 4 Tcs for X540, X550 */
1073         if (hw->mac.type == ixgbe_mac_X540 ||
1074                 hw->mac.type == ixgbe_mac_X550 ||
1075                 hw->mac.type == ixgbe_mac_X550EM_x ||
1076                 hw->mac.type == ixgbe_mac_X550EM_a) {
1077                 dcb_config->num_tcs.pg_tcs = 4;
1078                 dcb_config->num_tcs.pfc_tcs = 4;
1079         }
1080 }
1081
1082 /*
1083  * Ensure that all locks are released before first NVM or PHY access
1084  */
1085 static void
1086 ixgbe_swfw_lock_reset(struct ixgbe_hw *hw)
1087 {
1088         uint16_t mask;
1089
1090         /*
1091          * Phy lock should not fail in this early stage. If this is the case,
1092          * it is due to an improper exit of the application.
1093          * So force the release of the faulty lock. Release of common lock
1094          * is done automatically by swfw_sync function.
1095          */
1096         mask = IXGBE_GSSR_PHY0_SM << hw->bus.func;
1097         if (ixgbe_acquire_swfw_semaphore(hw, mask) < 0) {
1098                 PMD_DRV_LOG(DEBUG, "SWFW phy%d lock released", hw->bus.func);
1099         }
1100         ixgbe_release_swfw_semaphore(hw, mask);
1101
1102         /*
1103          * These ones are more tricky since they are common to all ports; but
1104          * swfw_sync retries last long enough (1s) to be almost sure that if
1105          * lock can not be taken it is due to an improper lock of the
1106          * semaphore.
1107          */
1108         mask = IXGBE_GSSR_EEP_SM | IXGBE_GSSR_MAC_CSR_SM | IXGBE_GSSR_SW_MNG_SM;
1109         if (ixgbe_acquire_swfw_semaphore(hw, mask) < 0) {
1110                 PMD_DRV_LOG(DEBUG, "SWFW common locks released");
1111         }
1112         ixgbe_release_swfw_semaphore(hw, mask);
1113 }
1114
1115 /*
1116  * This function is based on code in ixgbe_attach() in base/ixgbe.c.
1117  * It returns 0 on success.
1118  */
1119 static int
1120 eth_ixgbe_dev_init(struct rte_eth_dev *eth_dev)
1121 {
1122         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
1123         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
1124         struct ixgbe_hw *hw =
1125                 IXGBE_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
1126         struct ixgbe_vfta *shadow_vfta =
1127                 IXGBE_DEV_PRIVATE_TO_VFTA(eth_dev->data->dev_private);
1128         struct ixgbe_hwstrip *hwstrip =
1129                 IXGBE_DEV_PRIVATE_TO_HWSTRIP_BITMAP(eth_dev->data->dev_private);
1130         struct ixgbe_dcb_config *dcb_config =
1131                 IXGBE_DEV_PRIVATE_TO_DCB_CFG(eth_dev->data->dev_private);
1132         struct ixgbe_filter_info *filter_info =
1133                 IXGBE_DEV_PRIVATE_TO_FILTER_INFO(eth_dev->data->dev_private);
1134         struct ixgbe_bw_conf *bw_conf =
1135                 IXGBE_DEV_PRIVATE_TO_BW_CONF(eth_dev->data->dev_private);
1136         uint32_t ctrl_ext;
1137         uint16_t csum;
1138         int diag, i;
1139
1140         PMD_INIT_FUNC_TRACE();
1141
1142         eth_dev->dev_ops = &ixgbe_eth_dev_ops;
1143         eth_dev->rx_pkt_burst = &ixgbe_recv_pkts;
1144         eth_dev->tx_pkt_burst = &ixgbe_xmit_pkts;
1145         eth_dev->tx_pkt_prepare = &ixgbe_prep_pkts;
1146
1147         /*
1148          * For secondary processes, we don't initialise any further as primary
1149          * has already done this work. Only check we don't need a different
1150          * RX and TX function.
1151          */
1152         if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
1153                 struct ixgbe_tx_queue *txq;
1154                 /* TX queue function in primary, set by last queue initialized
1155                  * Tx queue may not initialized by primary process
1156                  */
1157                 if (eth_dev->data->tx_queues) {
1158                         txq = eth_dev->data->tx_queues[eth_dev->data->nb_tx_queues-1];
1159                         ixgbe_set_tx_function(eth_dev, txq);
1160                 } else {
1161                         /* Use default TX function if we get here */
1162                         PMD_INIT_LOG(NOTICE, "No TX queues configured yet. "
1163                                      "Using default TX function.");
1164                 }
1165
1166                 ixgbe_set_rx_function(eth_dev);
1167
1168                 return 0;
1169         }
1170
1171         /* Initialize security_ctx only for primary process*/
1172         eth_dev->security_ctx = ixgbe_ipsec_ctx_create(eth_dev);
1173         if (eth_dev->security_ctx == NULL)
1174                 return -ENOMEM;
1175
1176         rte_eth_copy_pci_info(eth_dev, pci_dev);
1177
1178         /* Vendor and Device ID need to be set before init of shared code */
1179         hw->device_id = pci_dev->id.device_id;
1180         hw->vendor_id = pci_dev->id.vendor_id;
1181         hw->hw_addr = (void *)pci_dev->mem_resource[0].addr;
1182         hw->allow_unsupported_sfp = 1;
1183
1184         /* Initialize the shared code (base driver) */
1185 #ifdef RTE_LIBRTE_IXGBE_BYPASS
1186         diag = ixgbe_bypass_init_shared_code(hw);
1187 #else
1188         diag = ixgbe_init_shared_code(hw);
1189 #endif /* RTE_LIBRTE_IXGBE_BYPASS */
1190
1191         if (diag != IXGBE_SUCCESS) {
1192                 PMD_INIT_LOG(ERR, "Shared code init failed: %d", diag);
1193                 return -EIO;
1194         }
1195
1196         /* pick up the PCI bus settings for reporting later */
1197         ixgbe_get_bus_info(hw);
1198
1199         /* Unlock any pending hardware semaphore */
1200         ixgbe_swfw_lock_reset(hw);
1201
1202         /* Initialize DCB configuration*/
1203         memset(dcb_config, 0, sizeof(struct ixgbe_dcb_config));
1204         ixgbe_dcb_init(hw, dcb_config);
1205         /* Get Hardware Flow Control setting */
1206         hw->fc.requested_mode = ixgbe_fc_full;
1207         hw->fc.current_mode = ixgbe_fc_full;
1208         hw->fc.pause_time = IXGBE_FC_PAUSE;
1209         for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
1210                 hw->fc.low_water[i] = IXGBE_FC_LO;
1211                 hw->fc.high_water[i] = IXGBE_FC_HI;
1212         }
1213         hw->fc.send_xon = 1;
1214
1215         /* Make sure we have a good EEPROM before we read from it */
1216         diag = ixgbe_validate_eeprom_checksum(hw, &csum);
1217         if (diag != IXGBE_SUCCESS) {
1218                 PMD_INIT_LOG(ERR, "The EEPROM checksum is not valid: %d", diag);
1219                 return -EIO;
1220         }
1221
1222 #ifdef RTE_LIBRTE_IXGBE_BYPASS
1223         diag = ixgbe_bypass_init_hw(hw);
1224 #else
1225         diag = ixgbe_init_hw(hw);
1226 #endif /* RTE_LIBRTE_IXGBE_BYPASS */
1227
1228         /*
1229          * Devices with copper phys will fail to initialise if ixgbe_init_hw()
1230          * is called too soon after the kernel driver unbinding/binding occurs.
1231          * The failure occurs in ixgbe_identify_phy_generic() for all devices,
1232          * but for non-copper devies, ixgbe_identify_sfp_module_generic() is
1233          * also called. See ixgbe_identify_phy_82599(). The reason for the
1234          * failure is not known, and only occuts when virtualisation features
1235          * are disabled in the bios. A delay of 100ms  was found to be enough by
1236          * trial-and-error, and is doubled to be safe.
1237          */
1238         if (diag && (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_copper)) {
1239                 rte_delay_ms(200);
1240                 diag = ixgbe_init_hw(hw);
1241         }
1242
1243         if (diag == IXGBE_ERR_SFP_NOT_PRESENT)
1244                 diag = IXGBE_SUCCESS;
1245
1246         if (diag == IXGBE_ERR_EEPROM_VERSION) {
1247                 PMD_INIT_LOG(ERR, "This device is a pre-production adapter/"
1248                              "LOM.  Please be aware there may be issues associated "
1249                              "with your hardware.");
1250                 PMD_INIT_LOG(ERR, "If you are experiencing problems "
1251                              "please contact your Intel or hardware representative "
1252                              "who provided you with this hardware.");
1253         } else if (diag == IXGBE_ERR_SFP_NOT_SUPPORTED)
1254                 PMD_INIT_LOG(ERR, "Unsupported SFP+ Module");
1255         if (diag) {
1256                 PMD_INIT_LOG(ERR, "Hardware Initialization Failure: %d", diag);
1257                 return -EIO;
1258         }
1259
1260         /* Reset the hw statistics */
1261         ixgbe_dev_stats_reset(eth_dev);
1262
1263         /* disable interrupt */
1264         ixgbe_disable_intr(hw);
1265
1266         /* reset mappings for queue statistics hw counters*/
1267         ixgbe_reset_qstat_mappings(hw);
1268
1269         /* Allocate memory for storing MAC addresses */
1270         eth_dev->data->mac_addrs = rte_zmalloc("ixgbe", ETHER_ADDR_LEN *
1271                                                hw->mac.num_rar_entries, 0);
1272         if (eth_dev->data->mac_addrs == NULL) {
1273                 PMD_INIT_LOG(ERR,
1274                              "Failed to allocate %u bytes needed to store "
1275                              "MAC addresses",
1276                              ETHER_ADDR_LEN * hw->mac.num_rar_entries);
1277                 return -ENOMEM;
1278         }
1279         /* Copy the permanent MAC address */
1280         ether_addr_copy((struct ether_addr *) hw->mac.perm_addr,
1281                         &eth_dev->data->mac_addrs[0]);
1282
1283         /* Allocate memory for storing hash filter MAC addresses */
1284         eth_dev->data->hash_mac_addrs = rte_zmalloc("ixgbe", ETHER_ADDR_LEN *
1285                                                     IXGBE_VMDQ_NUM_UC_MAC, 0);
1286         if (eth_dev->data->hash_mac_addrs == NULL) {
1287                 PMD_INIT_LOG(ERR,
1288                              "Failed to allocate %d bytes needed to store MAC addresses",
1289                              ETHER_ADDR_LEN * IXGBE_VMDQ_NUM_UC_MAC);
1290                 return -ENOMEM;
1291         }
1292
1293         /* initialize the vfta */
1294         memset(shadow_vfta, 0, sizeof(*shadow_vfta));
1295
1296         /* initialize the hw strip bitmap*/
1297         memset(hwstrip, 0, sizeof(*hwstrip));
1298
1299         /* initialize PF if max_vfs not zero */
1300         ixgbe_pf_host_init(eth_dev);
1301
1302         ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
1303         /* let hardware know driver is loaded */
1304         ctrl_ext |= IXGBE_CTRL_EXT_DRV_LOAD;
1305         /* Set PF Reset Done bit so PF/VF Mail Ops can work */
1306         ctrl_ext |= IXGBE_CTRL_EXT_PFRSTD;
1307         IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
1308         IXGBE_WRITE_FLUSH(hw);
1309
1310         if (ixgbe_is_sfp(hw) && hw->phy.sfp_type != ixgbe_sfp_type_not_present)
1311                 PMD_INIT_LOG(DEBUG, "MAC: %d, PHY: %d, SFP+: %d",
1312                              (int) hw->mac.type, (int) hw->phy.type,
1313                              (int) hw->phy.sfp_type);
1314         else
1315                 PMD_INIT_LOG(DEBUG, "MAC: %d, PHY: %d",
1316                              (int) hw->mac.type, (int) hw->phy.type);
1317
1318         PMD_INIT_LOG(DEBUG, "port %d vendorID=0x%x deviceID=0x%x",
1319                      eth_dev->data->port_id, pci_dev->id.vendor_id,
1320                      pci_dev->id.device_id);
1321
1322         rte_intr_callback_register(intr_handle,
1323                                    ixgbe_dev_interrupt_handler, eth_dev);
1324
1325         /* enable uio/vfio intr/eventfd mapping */
1326         rte_intr_enable(intr_handle);
1327
1328         /* enable support intr */
1329         ixgbe_enable_intr(eth_dev);
1330
1331         /* initialize filter info */
1332         memset(filter_info, 0,
1333                sizeof(struct ixgbe_filter_info));
1334
1335         /* initialize 5tuple filter list */
1336         TAILQ_INIT(&filter_info->fivetuple_list);
1337
1338         /* initialize flow director filter list & hash */
1339         ixgbe_fdir_filter_init(eth_dev);
1340
1341         /* initialize l2 tunnel filter list & hash */
1342         ixgbe_l2_tn_filter_init(eth_dev);
1343
1344         /* initialize flow filter lists */
1345         ixgbe_filterlist_init();
1346
1347         /* initialize bandwidth configuration info */
1348         memset(bw_conf, 0, sizeof(struct ixgbe_bw_conf));
1349
1350         /* initialize Traffic Manager configuration */
1351         ixgbe_tm_conf_init(eth_dev);
1352
1353         return 0;
1354 }
1355
1356 static int
1357 eth_ixgbe_dev_uninit(struct rte_eth_dev *eth_dev)
1358 {
1359         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
1360         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
1361         struct ixgbe_hw *hw;
1362
1363         PMD_INIT_FUNC_TRACE();
1364
1365         if (rte_eal_process_type() != RTE_PROC_PRIMARY)
1366                 return -EPERM;
1367
1368         hw = IXGBE_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
1369
1370         if (hw->adapter_stopped == 0)
1371                 ixgbe_dev_close(eth_dev);
1372
1373         eth_dev->dev_ops = NULL;
1374         eth_dev->rx_pkt_burst = NULL;
1375         eth_dev->tx_pkt_burst = NULL;
1376
1377         /* Unlock any pending hardware semaphore */
1378         ixgbe_swfw_lock_reset(hw);
1379
1380         /* disable uio intr before callback unregister */
1381         rte_intr_disable(intr_handle);
1382         rte_intr_callback_unregister(intr_handle,
1383                                      ixgbe_dev_interrupt_handler, eth_dev);
1384
1385         /* uninitialize PF if max_vfs not zero */
1386         ixgbe_pf_host_uninit(eth_dev);
1387
1388         rte_free(eth_dev->data->mac_addrs);
1389         eth_dev->data->mac_addrs = NULL;
1390
1391         rte_free(eth_dev->data->hash_mac_addrs);
1392         eth_dev->data->hash_mac_addrs = NULL;
1393
1394         /* remove all the fdir filters & hash */
1395         ixgbe_fdir_filter_uninit(eth_dev);
1396
1397         /* remove all the L2 tunnel filters & hash */
1398         ixgbe_l2_tn_filter_uninit(eth_dev);
1399
1400         /* Remove all ntuple filters of the device */
1401         ixgbe_ntuple_filter_uninit(eth_dev);
1402
1403         /* clear all the filters list */
1404         ixgbe_filterlist_flush();
1405
1406         /* Remove all Traffic Manager configuration */
1407         ixgbe_tm_conf_uninit(eth_dev);
1408
1409         rte_free(eth_dev->security_ctx);
1410
1411         return 0;
1412 }
1413
1414 static int ixgbe_ntuple_filter_uninit(struct rte_eth_dev *eth_dev)
1415 {
1416         struct ixgbe_filter_info *filter_info =
1417                 IXGBE_DEV_PRIVATE_TO_FILTER_INFO(eth_dev->data->dev_private);
1418         struct ixgbe_5tuple_filter *p_5tuple;
1419
1420         while ((p_5tuple = TAILQ_FIRST(&filter_info->fivetuple_list))) {
1421                 TAILQ_REMOVE(&filter_info->fivetuple_list,
1422                              p_5tuple,
1423                              entries);
1424                 rte_free(p_5tuple);
1425         }
1426         memset(filter_info->fivetuple_mask, 0,
1427                sizeof(uint32_t) * IXGBE_5TUPLE_ARRAY_SIZE);
1428
1429         return 0;
1430 }
1431
1432 static int ixgbe_fdir_filter_uninit(struct rte_eth_dev *eth_dev)
1433 {
1434         struct ixgbe_hw_fdir_info *fdir_info =
1435                 IXGBE_DEV_PRIVATE_TO_FDIR_INFO(eth_dev->data->dev_private);
1436         struct ixgbe_fdir_filter *fdir_filter;
1437
1438                 if (fdir_info->hash_map)
1439                 rte_free(fdir_info->hash_map);
1440         if (fdir_info->hash_handle)
1441                 rte_hash_free(fdir_info->hash_handle);
1442
1443         while ((fdir_filter = TAILQ_FIRST(&fdir_info->fdir_list))) {
1444                 TAILQ_REMOVE(&fdir_info->fdir_list,
1445                              fdir_filter,
1446                              entries);
1447                 rte_free(fdir_filter);
1448         }
1449
1450         return 0;
1451 }
1452
1453 static int ixgbe_l2_tn_filter_uninit(struct rte_eth_dev *eth_dev)
1454 {
1455         struct ixgbe_l2_tn_info *l2_tn_info =
1456                 IXGBE_DEV_PRIVATE_TO_L2_TN_INFO(eth_dev->data->dev_private);
1457         struct ixgbe_l2_tn_filter *l2_tn_filter;
1458
1459         if (l2_tn_info->hash_map)
1460                 rte_free(l2_tn_info->hash_map);
1461         if (l2_tn_info->hash_handle)
1462                 rte_hash_free(l2_tn_info->hash_handle);
1463
1464         while ((l2_tn_filter = TAILQ_FIRST(&l2_tn_info->l2_tn_list))) {
1465                 TAILQ_REMOVE(&l2_tn_info->l2_tn_list,
1466                              l2_tn_filter,
1467                              entries);
1468                 rte_free(l2_tn_filter);
1469         }
1470
1471         return 0;
1472 }
1473
1474 static int ixgbe_fdir_filter_init(struct rte_eth_dev *eth_dev)
1475 {
1476         struct ixgbe_hw_fdir_info *fdir_info =
1477                 IXGBE_DEV_PRIVATE_TO_FDIR_INFO(eth_dev->data->dev_private);
1478         char fdir_hash_name[RTE_HASH_NAMESIZE];
1479         struct rte_hash_parameters fdir_hash_params = {
1480                 .name = fdir_hash_name,
1481                 .entries = IXGBE_MAX_FDIR_FILTER_NUM,
1482                 .key_len = sizeof(union ixgbe_atr_input),
1483                 .hash_func = rte_hash_crc,
1484                 .hash_func_init_val = 0,
1485                 .socket_id = rte_socket_id(),
1486         };
1487
1488         TAILQ_INIT(&fdir_info->fdir_list);
1489         snprintf(fdir_hash_name, RTE_HASH_NAMESIZE,
1490                  "fdir_%s", eth_dev->device->name);
1491         fdir_info->hash_handle = rte_hash_create(&fdir_hash_params);
1492         if (!fdir_info->hash_handle) {
1493                 PMD_INIT_LOG(ERR, "Failed to create fdir hash table!");
1494                 return -EINVAL;
1495         }
1496         fdir_info->hash_map = rte_zmalloc("ixgbe",
1497                                           sizeof(struct ixgbe_fdir_filter *) *
1498                                           IXGBE_MAX_FDIR_FILTER_NUM,
1499                                           0);
1500         if (!fdir_info->hash_map) {
1501                 PMD_INIT_LOG(ERR,
1502                              "Failed to allocate memory for fdir hash map!");
1503                 return -ENOMEM;
1504         }
1505         fdir_info->mask_added = FALSE;
1506
1507         return 0;
1508 }
1509
1510 static int ixgbe_l2_tn_filter_init(struct rte_eth_dev *eth_dev)
1511 {
1512         struct ixgbe_l2_tn_info *l2_tn_info =
1513                 IXGBE_DEV_PRIVATE_TO_L2_TN_INFO(eth_dev->data->dev_private);
1514         char l2_tn_hash_name[RTE_HASH_NAMESIZE];
1515         struct rte_hash_parameters l2_tn_hash_params = {
1516                 .name = l2_tn_hash_name,
1517                 .entries = IXGBE_MAX_L2_TN_FILTER_NUM,
1518                 .key_len = sizeof(struct ixgbe_l2_tn_key),
1519                 .hash_func = rte_hash_crc,
1520                 .hash_func_init_val = 0,
1521                 .socket_id = rte_socket_id(),
1522         };
1523
1524         TAILQ_INIT(&l2_tn_info->l2_tn_list);
1525         snprintf(l2_tn_hash_name, RTE_HASH_NAMESIZE,
1526                  "l2_tn_%s", eth_dev->device->name);
1527         l2_tn_info->hash_handle = rte_hash_create(&l2_tn_hash_params);
1528         if (!l2_tn_info->hash_handle) {
1529                 PMD_INIT_LOG(ERR, "Failed to create L2 TN hash table!");
1530                 return -EINVAL;
1531         }
1532         l2_tn_info->hash_map = rte_zmalloc("ixgbe",
1533                                    sizeof(struct ixgbe_l2_tn_filter *) *
1534                                    IXGBE_MAX_L2_TN_FILTER_NUM,
1535                                    0);
1536         if (!l2_tn_info->hash_map) {
1537                 PMD_INIT_LOG(ERR,
1538                         "Failed to allocate memory for L2 TN hash map!");
1539                 return -ENOMEM;
1540         }
1541         l2_tn_info->e_tag_en = FALSE;
1542         l2_tn_info->e_tag_fwd_en = FALSE;
1543         l2_tn_info->e_tag_ether_type = DEFAULT_ETAG_ETYPE;
1544
1545         return 0;
1546 }
1547 /*
1548  * Negotiate mailbox API version with the PF.
1549  * After reset API version is always set to the basic one (ixgbe_mbox_api_10).
1550  * Then we try to negotiate starting with the most recent one.
1551  * If all negotiation attempts fail, then we will proceed with
1552  * the default one (ixgbe_mbox_api_10).
1553  */
1554 static void
1555 ixgbevf_negotiate_api(struct ixgbe_hw *hw)
1556 {
1557         int32_t i;
1558
1559         /* start with highest supported, proceed down */
1560         static const enum ixgbe_pfvf_api_rev sup_ver[] = {
1561                 ixgbe_mbox_api_12,
1562                 ixgbe_mbox_api_11,
1563                 ixgbe_mbox_api_10,
1564         };
1565
1566         for (i = 0;
1567                         i != RTE_DIM(sup_ver) &&
1568                         ixgbevf_negotiate_api_version(hw, sup_ver[i]) != 0;
1569                         i++)
1570                 ;
1571 }
1572
1573 static void
1574 generate_random_mac_addr(struct ether_addr *mac_addr)
1575 {
1576         uint64_t random;
1577
1578         /* Set Organizationally Unique Identifier (OUI) prefix. */
1579         mac_addr->addr_bytes[0] = 0x00;
1580         mac_addr->addr_bytes[1] = 0x09;
1581         mac_addr->addr_bytes[2] = 0xC0;
1582         /* Force indication of locally assigned MAC address. */
1583         mac_addr->addr_bytes[0] |= ETHER_LOCAL_ADMIN_ADDR;
1584         /* Generate the last 3 bytes of the MAC address with a random number. */
1585         random = rte_rand();
1586         memcpy(&mac_addr->addr_bytes[3], &random, 3);
1587 }
1588
1589 /*
1590  * Virtual Function device init
1591  */
1592 static int
1593 eth_ixgbevf_dev_init(struct rte_eth_dev *eth_dev)
1594 {
1595         int diag;
1596         uint32_t tc, tcs;
1597         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
1598         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
1599         struct ixgbe_hw *hw =
1600                 IXGBE_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
1601         struct ixgbe_vfta *shadow_vfta =
1602                 IXGBE_DEV_PRIVATE_TO_VFTA(eth_dev->data->dev_private);
1603         struct ixgbe_hwstrip *hwstrip =
1604                 IXGBE_DEV_PRIVATE_TO_HWSTRIP_BITMAP(eth_dev->data->dev_private);
1605         struct ether_addr *perm_addr = (struct ether_addr *) hw->mac.perm_addr;
1606
1607         PMD_INIT_FUNC_TRACE();
1608
1609         eth_dev->dev_ops = &ixgbevf_eth_dev_ops;
1610         eth_dev->rx_pkt_burst = &ixgbe_recv_pkts;
1611         eth_dev->tx_pkt_burst = &ixgbe_xmit_pkts;
1612
1613         /* for secondary processes, we don't initialise any further as primary
1614          * has already done this work. Only check we don't need a different
1615          * RX function
1616          */
1617         if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
1618                 struct ixgbe_tx_queue *txq;
1619                 /* TX queue function in primary, set by last queue initialized
1620                  * Tx queue may not initialized by primary process
1621                  */
1622                 if (eth_dev->data->tx_queues) {
1623                         txq = eth_dev->data->tx_queues[eth_dev->data->nb_tx_queues - 1];
1624                         ixgbe_set_tx_function(eth_dev, txq);
1625                 } else {
1626                         /* Use default TX function if we get here */
1627                         PMD_INIT_LOG(NOTICE,
1628                                      "No TX queues configured yet. Using default TX function.");
1629                 }
1630
1631                 ixgbe_set_rx_function(eth_dev);
1632
1633                 return 0;
1634         }
1635
1636         rte_eth_copy_pci_info(eth_dev, pci_dev);
1637
1638         hw->device_id = pci_dev->id.device_id;
1639         hw->vendor_id = pci_dev->id.vendor_id;
1640         hw->hw_addr = (void *)pci_dev->mem_resource[0].addr;
1641
1642         /* initialize the vfta */
1643         memset(shadow_vfta, 0, sizeof(*shadow_vfta));
1644
1645         /* initialize the hw strip bitmap*/
1646         memset(hwstrip, 0, sizeof(*hwstrip));
1647
1648         /* Initialize the shared code (base driver) */
1649         diag = ixgbe_init_shared_code(hw);
1650         if (diag != IXGBE_SUCCESS) {
1651                 PMD_INIT_LOG(ERR, "Shared code init failed for ixgbevf: %d", diag);
1652                 return -EIO;
1653         }
1654
1655         /* init_mailbox_params */
1656         hw->mbx.ops.init_params(hw);
1657
1658         /* Reset the hw statistics */
1659         ixgbevf_dev_stats_reset(eth_dev);
1660
1661         /* Disable the interrupts for VF */
1662         ixgbevf_intr_disable(hw);
1663
1664         hw->mac.num_rar_entries = 128; /* The MAX of the underlying PF */
1665         diag = hw->mac.ops.reset_hw(hw);
1666
1667         /*
1668          * The VF reset operation returns the IXGBE_ERR_INVALID_MAC_ADDR when
1669          * the underlying PF driver has not assigned a MAC address to the VF.
1670          * In this case, assign a random MAC address.
1671          */
1672         if ((diag != IXGBE_SUCCESS) && (diag != IXGBE_ERR_INVALID_MAC_ADDR)) {
1673                 PMD_INIT_LOG(ERR, "VF Initialization Failure: %d", diag);
1674                 return diag;
1675         }
1676
1677         /* negotiate mailbox API version to use with the PF. */
1678         ixgbevf_negotiate_api(hw);
1679
1680         /* Get Rx/Tx queue count via mailbox, which is ready after reset_hw */
1681         ixgbevf_get_queues(hw, &tcs, &tc);
1682
1683         /* Allocate memory for storing MAC addresses */
1684         eth_dev->data->mac_addrs = rte_zmalloc("ixgbevf", ETHER_ADDR_LEN *
1685                                                hw->mac.num_rar_entries, 0);
1686         if (eth_dev->data->mac_addrs == NULL) {
1687                 PMD_INIT_LOG(ERR,
1688                              "Failed to allocate %u bytes needed to store "
1689                              "MAC addresses",
1690                              ETHER_ADDR_LEN * hw->mac.num_rar_entries);
1691                 return -ENOMEM;
1692         }
1693
1694         /* Generate a random MAC address, if none was assigned by PF. */
1695         if (is_zero_ether_addr(perm_addr)) {
1696                 generate_random_mac_addr(perm_addr);
1697                 diag = ixgbe_set_rar_vf(hw, 1, perm_addr->addr_bytes, 0, 1);
1698                 if (diag) {
1699                         rte_free(eth_dev->data->mac_addrs);
1700                         eth_dev->data->mac_addrs = NULL;
1701                         return diag;
1702                 }
1703                 PMD_INIT_LOG(INFO, "\tVF MAC address not assigned by Host PF");
1704                 PMD_INIT_LOG(INFO, "\tAssign randomly generated MAC address "
1705                              "%02x:%02x:%02x:%02x:%02x:%02x",
1706                              perm_addr->addr_bytes[0],
1707                              perm_addr->addr_bytes[1],
1708                              perm_addr->addr_bytes[2],
1709                              perm_addr->addr_bytes[3],
1710                              perm_addr->addr_bytes[4],
1711                              perm_addr->addr_bytes[5]);
1712         }
1713
1714         /* Copy the permanent MAC address */
1715         ether_addr_copy(perm_addr, &eth_dev->data->mac_addrs[0]);
1716
1717         /* reset the hardware with the new settings */
1718         diag = hw->mac.ops.start_hw(hw);
1719         switch (diag) {
1720         case  0:
1721                 break;
1722
1723         default:
1724                 PMD_INIT_LOG(ERR, "VF Initialization Failure: %d", diag);
1725                 return -EIO;
1726         }
1727
1728         rte_intr_callback_register(intr_handle,
1729                                    ixgbevf_dev_interrupt_handler, eth_dev);
1730         rte_intr_enable(intr_handle);
1731         ixgbevf_intr_enable(hw);
1732
1733         PMD_INIT_LOG(DEBUG, "port %d vendorID=0x%x deviceID=0x%x mac.type=%s",
1734                      eth_dev->data->port_id, pci_dev->id.vendor_id,
1735                      pci_dev->id.device_id, "ixgbe_mac_82599_vf");
1736
1737         return 0;
1738 }
1739
1740 /* Virtual Function device uninit */
1741
1742 static int
1743 eth_ixgbevf_dev_uninit(struct rte_eth_dev *eth_dev)
1744 {
1745         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
1746         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
1747         struct ixgbe_hw *hw;
1748
1749         PMD_INIT_FUNC_TRACE();
1750
1751         if (rte_eal_process_type() != RTE_PROC_PRIMARY)
1752                 return -EPERM;
1753
1754         hw = IXGBE_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
1755
1756         if (hw->adapter_stopped == 0)
1757                 ixgbevf_dev_close(eth_dev);
1758
1759         eth_dev->dev_ops = NULL;
1760         eth_dev->rx_pkt_burst = NULL;
1761         eth_dev->tx_pkt_burst = NULL;
1762
1763         /* Disable the interrupts for VF */
1764         ixgbevf_intr_disable(hw);
1765
1766         rte_free(eth_dev->data->mac_addrs);
1767         eth_dev->data->mac_addrs = NULL;
1768
1769         rte_intr_disable(intr_handle);
1770         rte_intr_callback_unregister(intr_handle,
1771                                      ixgbevf_dev_interrupt_handler, eth_dev);
1772
1773         return 0;
1774 }
1775
1776 static int eth_ixgbe_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
1777         struct rte_pci_device *pci_dev)
1778 {
1779         return rte_eth_dev_pci_generic_probe(pci_dev,
1780                 sizeof(struct ixgbe_adapter), eth_ixgbe_dev_init);
1781 }
1782
1783 static int eth_ixgbe_pci_remove(struct rte_pci_device *pci_dev)
1784 {
1785         return rte_eth_dev_pci_generic_remove(pci_dev, eth_ixgbe_dev_uninit);
1786 }
1787
1788 static struct rte_pci_driver rte_ixgbe_pmd = {
1789         .id_table = pci_id_ixgbe_map,
1790         .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC |
1791                      RTE_PCI_DRV_IOVA_AS_VA,
1792         .probe = eth_ixgbe_pci_probe,
1793         .remove = eth_ixgbe_pci_remove,
1794 };
1795
1796 static int eth_ixgbevf_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
1797         struct rte_pci_device *pci_dev)
1798 {
1799         return rte_eth_dev_pci_generic_probe(pci_dev,
1800                 sizeof(struct ixgbe_adapter), eth_ixgbevf_dev_init);
1801 }
1802
1803 static int eth_ixgbevf_pci_remove(struct rte_pci_device *pci_dev)
1804 {
1805         return rte_eth_dev_pci_generic_remove(pci_dev, eth_ixgbevf_dev_uninit);
1806 }
1807
1808 /*
1809  * virtual function driver struct
1810  */
1811 static struct rte_pci_driver rte_ixgbevf_pmd = {
1812         .id_table = pci_id_ixgbevf_map,
1813         .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_IOVA_AS_VA,
1814         .probe = eth_ixgbevf_pci_probe,
1815         .remove = eth_ixgbevf_pci_remove,
1816 };
1817
1818 static int
1819 ixgbe_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
1820 {
1821         struct ixgbe_hw *hw =
1822                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1823         struct ixgbe_vfta *shadow_vfta =
1824                 IXGBE_DEV_PRIVATE_TO_VFTA(dev->data->dev_private);
1825         uint32_t vfta;
1826         uint32_t vid_idx;
1827         uint32_t vid_bit;
1828
1829         vid_idx = (uint32_t) ((vlan_id >> 5) & 0x7F);
1830         vid_bit = (uint32_t) (1 << (vlan_id & 0x1F));
1831         vfta = IXGBE_READ_REG(hw, IXGBE_VFTA(vid_idx));
1832         if (on)
1833                 vfta |= vid_bit;
1834         else
1835                 vfta &= ~vid_bit;
1836         IXGBE_WRITE_REG(hw, IXGBE_VFTA(vid_idx), vfta);
1837
1838         /* update local VFTA copy */
1839         shadow_vfta->vfta[vid_idx] = vfta;
1840
1841         return 0;
1842 }
1843
1844 static void
1845 ixgbe_vlan_strip_queue_set(struct rte_eth_dev *dev, uint16_t queue, int on)
1846 {
1847         if (on)
1848                 ixgbe_vlan_hw_strip_enable(dev, queue);
1849         else
1850                 ixgbe_vlan_hw_strip_disable(dev, queue);
1851 }
1852
1853 static int
1854 ixgbe_vlan_tpid_set(struct rte_eth_dev *dev,
1855                     enum rte_vlan_type vlan_type,
1856                     uint16_t tpid)
1857 {
1858         struct ixgbe_hw *hw =
1859                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1860         int ret = 0;
1861         uint32_t reg;
1862         uint32_t qinq;
1863
1864         qinq = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
1865         qinq &= IXGBE_DMATXCTL_GDV;
1866
1867         switch (vlan_type) {
1868         case ETH_VLAN_TYPE_INNER:
1869                 if (qinq) {
1870                         reg = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
1871                         reg = (reg & (~IXGBE_VLNCTRL_VET)) | (uint32_t)tpid;
1872                         IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, reg);
1873                         reg = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
1874                         reg = (reg & (~IXGBE_DMATXCTL_VT_MASK))
1875                                 | ((uint32_t)tpid << IXGBE_DMATXCTL_VT_SHIFT);
1876                         IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, reg);
1877                 } else {
1878                         ret = -ENOTSUP;
1879                         PMD_DRV_LOG(ERR, "Inner type is not supported"
1880                                     " by single VLAN");
1881                 }
1882                 break;
1883         case ETH_VLAN_TYPE_OUTER:
1884                 if (qinq) {
1885                         /* Only the high 16-bits is valid */
1886                         IXGBE_WRITE_REG(hw, IXGBE_EXVET, (uint32_t)tpid <<
1887                                         IXGBE_EXVET_VET_EXT_SHIFT);
1888                 } else {
1889                         reg = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
1890                         reg = (reg & (~IXGBE_VLNCTRL_VET)) | (uint32_t)tpid;
1891                         IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, reg);
1892                         reg = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
1893                         reg = (reg & (~IXGBE_DMATXCTL_VT_MASK))
1894                                 | ((uint32_t)tpid << IXGBE_DMATXCTL_VT_SHIFT);
1895                         IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, reg);
1896                 }
1897
1898                 break;
1899         default:
1900                 ret = -EINVAL;
1901                 PMD_DRV_LOG(ERR, "Unsupported VLAN type %d", vlan_type);
1902                 break;
1903         }
1904
1905         return ret;
1906 }
1907
1908 void
1909 ixgbe_vlan_hw_filter_disable(struct rte_eth_dev *dev)
1910 {
1911         struct ixgbe_hw *hw =
1912                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1913         uint32_t vlnctrl;
1914
1915         PMD_INIT_FUNC_TRACE();
1916
1917         /* Filter Table Disable */
1918         vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
1919         vlnctrl &= ~IXGBE_VLNCTRL_VFE;
1920
1921         IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
1922 }
1923
1924 void
1925 ixgbe_vlan_hw_filter_enable(struct rte_eth_dev *dev)
1926 {
1927         struct ixgbe_hw *hw =
1928                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1929         struct ixgbe_vfta *shadow_vfta =
1930                 IXGBE_DEV_PRIVATE_TO_VFTA(dev->data->dev_private);
1931         uint32_t vlnctrl;
1932         uint16_t i;
1933
1934         PMD_INIT_FUNC_TRACE();
1935
1936         /* Filter Table Enable */
1937         vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
1938         vlnctrl &= ~IXGBE_VLNCTRL_CFIEN;
1939         vlnctrl |= IXGBE_VLNCTRL_VFE;
1940
1941         IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
1942
1943         /* write whatever is in local vfta copy */
1944         for (i = 0; i < IXGBE_VFTA_SIZE; i++)
1945                 IXGBE_WRITE_REG(hw, IXGBE_VFTA(i), shadow_vfta->vfta[i]);
1946 }
1947
1948 static void
1949 ixgbe_vlan_hw_strip_bitmap_set(struct rte_eth_dev *dev, uint16_t queue, bool on)
1950 {
1951         struct ixgbe_hwstrip *hwstrip =
1952                 IXGBE_DEV_PRIVATE_TO_HWSTRIP_BITMAP(dev->data->dev_private);
1953         struct ixgbe_rx_queue *rxq;
1954
1955         if (queue >= IXGBE_MAX_RX_QUEUE_NUM)
1956                 return;
1957
1958         if (on)
1959                 IXGBE_SET_HWSTRIP(hwstrip, queue);
1960         else
1961                 IXGBE_CLEAR_HWSTRIP(hwstrip, queue);
1962
1963         if (queue >= dev->data->nb_rx_queues)
1964                 return;
1965
1966         rxq = dev->data->rx_queues[queue];
1967
1968         if (on)
1969                 rxq->vlan_flags = PKT_RX_VLAN_PKT | PKT_RX_VLAN_STRIPPED;
1970         else
1971                 rxq->vlan_flags = PKT_RX_VLAN_PKT;
1972 }
1973
1974 static void
1975 ixgbe_vlan_hw_strip_disable(struct rte_eth_dev *dev, uint16_t queue)
1976 {
1977         struct ixgbe_hw *hw =
1978                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1979         uint32_t ctrl;
1980
1981         PMD_INIT_FUNC_TRACE();
1982
1983         if (hw->mac.type == ixgbe_mac_82598EB) {
1984                 /* No queue level support */
1985                 PMD_INIT_LOG(NOTICE, "82598EB not support queue level hw strip");
1986                 return;
1987         }
1988
1989         /* Other 10G NIC, the VLAN strip can be setup per queue in RXDCTL */
1990         ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(queue));
1991         ctrl &= ~IXGBE_RXDCTL_VME;
1992         IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(queue), ctrl);
1993
1994         /* record those setting for HW strip per queue */
1995         ixgbe_vlan_hw_strip_bitmap_set(dev, queue, 0);
1996 }
1997
1998 static void
1999 ixgbe_vlan_hw_strip_enable(struct rte_eth_dev *dev, uint16_t queue)
2000 {
2001         struct ixgbe_hw *hw =
2002                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2003         uint32_t ctrl;
2004
2005         PMD_INIT_FUNC_TRACE();
2006
2007         if (hw->mac.type == ixgbe_mac_82598EB) {
2008                 /* No queue level supported */
2009                 PMD_INIT_LOG(NOTICE, "82598EB not support queue level hw strip");
2010                 return;
2011         }
2012
2013         /* Other 10G NIC, the VLAN strip can be setup per queue in RXDCTL */
2014         ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(queue));
2015         ctrl |= IXGBE_RXDCTL_VME;
2016         IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(queue), ctrl);
2017
2018         /* record those setting for HW strip per queue */
2019         ixgbe_vlan_hw_strip_bitmap_set(dev, queue, 1);
2020 }
2021
2022 void
2023 ixgbe_vlan_hw_strip_disable_all(struct rte_eth_dev *dev)
2024 {
2025         struct ixgbe_hw *hw =
2026                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2027         uint32_t ctrl;
2028         uint16_t i;
2029         struct ixgbe_rx_queue *rxq;
2030
2031         PMD_INIT_FUNC_TRACE();
2032
2033         if (hw->mac.type == ixgbe_mac_82598EB) {
2034                 ctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
2035                 ctrl &= ~IXGBE_VLNCTRL_VME;
2036                 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, ctrl);
2037         } else {
2038                 /* Other 10G NIC, the VLAN strip can be setup per queue in RXDCTL */
2039                 for (i = 0; i < dev->data->nb_rx_queues; i++) {
2040                         rxq = dev->data->rx_queues[i];
2041                         ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxq->reg_idx));
2042                         ctrl &= ~IXGBE_RXDCTL_VME;
2043                         IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxq->reg_idx), ctrl);
2044
2045                         /* record those setting for HW strip per queue */
2046                         ixgbe_vlan_hw_strip_bitmap_set(dev, i, 0);
2047                 }
2048         }
2049 }
2050
2051 void
2052 ixgbe_vlan_hw_strip_enable_all(struct rte_eth_dev *dev)
2053 {
2054         struct ixgbe_hw *hw =
2055                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2056         uint32_t ctrl;
2057         uint16_t i;
2058         struct ixgbe_rx_queue *rxq;
2059
2060         PMD_INIT_FUNC_TRACE();
2061
2062         if (hw->mac.type == ixgbe_mac_82598EB) {
2063                 ctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
2064                 ctrl |= IXGBE_VLNCTRL_VME;
2065                 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, ctrl);
2066         } else {
2067                 /* Other 10G NIC, the VLAN strip can be setup per queue in RXDCTL */
2068                 for (i = 0; i < dev->data->nb_rx_queues; i++) {
2069                         rxq = dev->data->rx_queues[i];
2070                         ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxq->reg_idx));
2071                         ctrl |= IXGBE_RXDCTL_VME;
2072                         IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxq->reg_idx), ctrl);
2073
2074                         /* record those setting for HW strip per queue */
2075                         ixgbe_vlan_hw_strip_bitmap_set(dev, i, 1);
2076                 }
2077         }
2078 }
2079
2080 static void
2081 ixgbe_vlan_hw_extend_disable(struct rte_eth_dev *dev)
2082 {
2083         struct ixgbe_hw *hw =
2084                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2085         uint32_t ctrl;
2086
2087         PMD_INIT_FUNC_TRACE();
2088
2089         /* DMATXCTRL: Geric Double VLAN Disable */
2090         ctrl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
2091         ctrl &= ~IXGBE_DMATXCTL_GDV;
2092         IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, ctrl);
2093
2094         /* CTRL_EXT: Global Double VLAN Disable */
2095         ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
2096         ctrl &= ~IXGBE_EXTENDED_VLAN;
2097         IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl);
2098
2099 }
2100
2101 static void
2102 ixgbe_vlan_hw_extend_enable(struct rte_eth_dev *dev)
2103 {
2104         struct ixgbe_hw *hw =
2105                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2106         uint32_t ctrl;
2107
2108         PMD_INIT_FUNC_TRACE();
2109
2110         /* DMATXCTRL: Geric Double VLAN Enable */
2111         ctrl  = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
2112         ctrl |= IXGBE_DMATXCTL_GDV;
2113         IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, ctrl);
2114
2115         /* CTRL_EXT: Global Double VLAN Enable */
2116         ctrl  = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
2117         ctrl |= IXGBE_EXTENDED_VLAN;
2118         IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl);
2119
2120         /* Clear pooling mode of PFVTCTL. It's required by X550. */
2121         if (hw->mac.type == ixgbe_mac_X550 ||
2122             hw->mac.type == ixgbe_mac_X550EM_x ||
2123             hw->mac.type == ixgbe_mac_X550EM_a) {
2124                 ctrl = IXGBE_READ_REG(hw, IXGBE_VT_CTL);
2125                 ctrl &= ~IXGBE_VT_CTL_POOLING_MODE_MASK;
2126                 IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, ctrl);
2127         }
2128
2129         /*
2130          * VET EXT field in the EXVET register = 0x8100 by default
2131          * So no need to change. Same to VT field of DMATXCTL register
2132          */
2133 }
2134
2135 static void
2136 ixgbe_vlan_offload_set(struct rte_eth_dev *dev, int mask)
2137 {
2138         if (mask & ETH_VLAN_STRIP_MASK) {
2139                 if (dev->data->dev_conf.rxmode.hw_vlan_strip)
2140                         ixgbe_vlan_hw_strip_enable_all(dev);
2141                 else
2142                         ixgbe_vlan_hw_strip_disable_all(dev);
2143         }
2144
2145         if (mask & ETH_VLAN_FILTER_MASK) {
2146                 if (dev->data->dev_conf.rxmode.hw_vlan_filter)
2147                         ixgbe_vlan_hw_filter_enable(dev);
2148                 else
2149                         ixgbe_vlan_hw_filter_disable(dev);
2150         }
2151
2152         if (mask & ETH_VLAN_EXTEND_MASK) {
2153                 if (dev->data->dev_conf.rxmode.hw_vlan_extend)
2154                         ixgbe_vlan_hw_extend_enable(dev);
2155                 else
2156                         ixgbe_vlan_hw_extend_disable(dev);
2157         }
2158 }
2159
2160 static void
2161 ixgbe_vmdq_vlan_hw_filter_enable(struct rte_eth_dev *dev)
2162 {
2163         struct ixgbe_hw *hw =
2164                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2165         /* VLNCTRL: enable vlan filtering and allow all vlan tags through */
2166         uint32_t vlanctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
2167
2168         vlanctrl |= IXGBE_VLNCTRL_VFE; /* enable vlan filters */
2169         IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlanctrl);
2170 }
2171
2172 static int
2173 ixgbe_check_vf_rss_rxq_num(struct rte_eth_dev *dev, uint16_t nb_rx_q)
2174 {
2175         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
2176
2177         switch (nb_rx_q) {
2178         case 1:
2179         case 2:
2180                 RTE_ETH_DEV_SRIOV(dev).active = ETH_64_POOLS;
2181                 break;
2182         case 4:
2183                 RTE_ETH_DEV_SRIOV(dev).active = ETH_32_POOLS;
2184                 break;
2185         default:
2186                 return -EINVAL;
2187         }
2188
2189         RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool = nb_rx_q;
2190         RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx = pci_dev->max_vfs * nb_rx_q;
2191
2192         return 0;
2193 }
2194
2195 static int
2196 ixgbe_check_mq_mode(struct rte_eth_dev *dev)
2197 {
2198         struct rte_eth_conf *dev_conf = &dev->data->dev_conf;
2199         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2200         uint16_t nb_rx_q = dev->data->nb_rx_queues;
2201         uint16_t nb_tx_q = dev->data->nb_tx_queues;
2202
2203         if (RTE_ETH_DEV_SRIOV(dev).active != 0) {
2204                 /* check multi-queue mode */
2205                 switch (dev_conf->rxmode.mq_mode) {
2206                 case ETH_MQ_RX_VMDQ_DCB:
2207                         PMD_INIT_LOG(INFO, "ETH_MQ_RX_VMDQ_DCB mode supported in SRIOV");
2208                         break;
2209                 case ETH_MQ_RX_VMDQ_DCB_RSS:
2210                         /* DCB/RSS VMDQ in SRIOV mode, not implement yet */
2211                         PMD_INIT_LOG(ERR, "SRIOV active,"
2212                                         " unsupported mq_mode rx %d.",
2213                                         dev_conf->rxmode.mq_mode);
2214                         return -EINVAL;
2215                 case ETH_MQ_RX_RSS:
2216                 case ETH_MQ_RX_VMDQ_RSS:
2217                         dev->data->dev_conf.rxmode.mq_mode = ETH_MQ_RX_VMDQ_RSS;
2218                         if (nb_rx_q <= RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool)
2219                                 if (ixgbe_check_vf_rss_rxq_num(dev, nb_rx_q)) {
2220                                         PMD_INIT_LOG(ERR, "SRIOV is active,"
2221                                                 " invalid queue number"
2222                                                 " for VMDQ RSS, allowed"
2223                                                 " value are 1, 2 or 4.");
2224                                         return -EINVAL;
2225                                 }
2226                         break;
2227                 case ETH_MQ_RX_VMDQ_ONLY:
2228                 case ETH_MQ_RX_NONE:
2229                         /* if nothing mq mode configure, use default scheme */
2230                         dev->data->dev_conf.rxmode.mq_mode = ETH_MQ_RX_VMDQ_ONLY;
2231                         if (RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool > 1)
2232                                 RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool = 1;
2233                         break;
2234                 default: /* ETH_MQ_RX_DCB, ETH_MQ_RX_DCB_RSS or ETH_MQ_TX_DCB*/
2235                         /* SRIOV only works in VMDq enable mode */
2236                         PMD_INIT_LOG(ERR, "SRIOV is active,"
2237                                         " wrong mq_mode rx %d.",
2238                                         dev_conf->rxmode.mq_mode);
2239                         return -EINVAL;
2240                 }
2241
2242                 switch (dev_conf->txmode.mq_mode) {
2243                 case ETH_MQ_TX_VMDQ_DCB:
2244                         PMD_INIT_LOG(INFO, "ETH_MQ_TX_VMDQ_DCB mode supported in SRIOV");
2245                         dev->data->dev_conf.txmode.mq_mode = ETH_MQ_TX_VMDQ_DCB;
2246                         break;
2247                 default: /* ETH_MQ_TX_VMDQ_ONLY or ETH_MQ_TX_NONE */
2248                         dev->data->dev_conf.txmode.mq_mode = ETH_MQ_TX_VMDQ_ONLY;
2249                         break;
2250                 }
2251
2252                 /* check valid queue number */
2253                 if ((nb_rx_q > RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool) ||
2254                     (nb_tx_q > RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool)) {
2255                         PMD_INIT_LOG(ERR, "SRIOV is active,"
2256                                         " nb_rx_q=%d nb_tx_q=%d queue number"
2257                                         " must be less than or equal to %d.",
2258                                         nb_rx_q, nb_tx_q,
2259                                         RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool);
2260                         return -EINVAL;
2261                 }
2262         } else {
2263                 if (dev_conf->rxmode.mq_mode == ETH_MQ_RX_VMDQ_DCB_RSS) {
2264                         PMD_INIT_LOG(ERR, "VMDQ+DCB+RSS mq_mode is"
2265                                           " not supported.");
2266                         return -EINVAL;
2267                 }
2268                 /* check configuration for vmdb+dcb mode */
2269                 if (dev_conf->rxmode.mq_mode == ETH_MQ_RX_VMDQ_DCB) {
2270                         const struct rte_eth_vmdq_dcb_conf *conf;
2271
2272                         if (nb_rx_q != IXGBE_VMDQ_DCB_NB_QUEUES) {
2273                                 PMD_INIT_LOG(ERR, "VMDQ+DCB, nb_rx_q != %d.",
2274                                                 IXGBE_VMDQ_DCB_NB_QUEUES);
2275                                 return -EINVAL;
2276                         }
2277                         conf = &dev_conf->rx_adv_conf.vmdq_dcb_conf;
2278                         if (!(conf->nb_queue_pools == ETH_16_POOLS ||
2279                                conf->nb_queue_pools == ETH_32_POOLS)) {
2280                                 PMD_INIT_LOG(ERR, "VMDQ+DCB selected,"
2281                                                 " nb_queue_pools must be %d or %d.",
2282                                                 ETH_16_POOLS, ETH_32_POOLS);
2283                                 return -EINVAL;
2284                         }
2285                 }
2286                 if (dev_conf->txmode.mq_mode == ETH_MQ_TX_VMDQ_DCB) {
2287                         const struct rte_eth_vmdq_dcb_tx_conf *conf;
2288
2289                         if (nb_tx_q != IXGBE_VMDQ_DCB_NB_QUEUES) {
2290                                 PMD_INIT_LOG(ERR, "VMDQ+DCB, nb_tx_q != %d",
2291                                                  IXGBE_VMDQ_DCB_NB_QUEUES);
2292                                 return -EINVAL;
2293                         }
2294                         conf = &dev_conf->tx_adv_conf.vmdq_dcb_tx_conf;
2295                         if (!(conf->nb_queue_pools == ETH_16_POOLS ||
2296                                conf->nb_queue_pools == ETH_32_POOLS)) {
2297                                 PMD_INIT_LOG(ERR, "VMDQ+DCB selected,"
2298                                                 " nb_queue_pools != %d and"
2299                                                 " nb_queue_pools != %d.",
2300                                                 ETH_16_POOLS, ETH_32_POOLS);
2301                                 return -EINVAL;
2302                         }
2303                 }
2304
2305                 /* For DCB mode check our configuration before we go further */
2306                 if (dev_conf->rxmode.mq_mode == ETH_MQ_RX_DCB) {
2307                         const struct rte_eth_dcb_rx_conf *conf;
2308
2309                         if (nb_rx_q != IXGBE_DCB_NB_QUEUES) {
2310                                 PMD_INIT_LOG(ERR, "DCB selected, nb_rx_q != %d.",
2311                                                  IXGBE_DCB_NB_QUEUES);
2312                                 return -EINVAL;
2313                         }
2314                         conf = &dev_conf->rx_adv_conf.dcb_rx_conf;
2315                         if (!(conf->nb_tcs == ETH_4_TCS ||
2316                                conf->nb_tcs == ETH_8_TCS)) {
2317                                 PMD_INIT_LOG(ERR, "DCB selected, nb_tcs != %d"
2318                                                 " and nb_tcs != %d.",
2319                                                 ETH_4_TCS, ETH_8_TCS);
2320                                 return -EINVAL;
2321                         }
2322                 }
2323
2324                 if (dev_conf->txmode.mq_mode == ETH_MQ_TX_DCB) {
2325                         const struct rte_eth_dcb_tx_conf *conf;
2326
2327                         if (nb_tx_q != IXGBE_DCB_NB_QUEUES) {
2328                                 PMD_INIT_LOG(ERR, "DCB, nb_tx_q != %d.",
2329                                                  IXGBE_DCB_NB_QUEUES);
2330                                 return -EINVAL;
2331                         }
2332                         conf = &dev_conf->tx_adv_conf.dcb_tx_conf;
2333                         if (!(conf->nb_tcs == ETH_4_TCS ||
2334                                conf->nb_tcs == ETH_8_TCS)) {
2335                                 PMD_INIT_LOG(ERR, "DCB selected, nb_tcs != %d"
2336                                                 " and nb_tcs != %d.",
2337                                                 ETH_4_TCS, ETH_8_TCS);
2338                                 return -EINVAL;
2339                         }
2340                 }
2341
2342                 /*
2343                  * When DCB/VT is off, maximum number of queues changes,
2344                  * except for 82598EB, which remains constant.
2345                  */
2346                 if (dev_conf->txmode.mq_mode == ETH_MQ_TX_NONE &&
2347                                 hw->mac.type != ixgbe_mac_82598EB) {
2348                         if (nb_tx_q > IXGBE_NONE_MODE_TX_NB_QUEUES) {
2349                                 PMD_INIT_LOG(ERR,
2350                                              "Neither VT nor DCB are enabled, "
2351                                              "nb_tx_q > %d.",
2352                                              IXGBE_NONE_MODE_TX_NB_QUEUES);
2353                                 return -EINVAL;
2354                         }
2355                 }
2356         }
2357         return 0;
2358 }
2359
2360 static int
2361 ixgbe_dev_configure(struct rte_eth_dev *dev)
2362 {
2363         struct ixgbe_interrupt *intr =
2364                 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
2365         struct ixgbe_adapter *adapter =
2366                 (struct ixgbe_adapter *)dev->data->dev_private;
2367         int ret;
2368
2369         PMD_INIT_FUNC_TRACE();
2370         /* multipe queue mode checking */
2371         ret  = ixgbe_check_mq_mode(dev);
2372         if (ret != 0) {
2373                 PMD_DRV_LOG(ERR, "ixgbe_check_mq_mode fails with %d.",
2374                             ret);
2375                 return ret;
2376         }
2377
2378         /* set flag to update link status after init */
2379         intr->flags |= IXGBE_FLAG_NEED_LINK_UPDATE;
2380
2381         /*
2382          * Initialize to TRUE. If any of Rx queues doesn't meet the bulk
2383          * allocation or vector Rx preconditions we will reset it.
2384          */
2385         adapter->rx_bulk_alloc_allowed = true;
2386         adapter->rx_vec_allowed = true;
2387
2388         return 0;
2389 }
2390
2391 static void
2392 ixgbe_dev_phy_intr_setup(struct rte_eth_dev *dev)
2393 {
2394         struct ixgbe_hw *hw =
2395                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2396         struct ixgbe_interrupt *intr =
2397                 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
2398         uint32_t gpie;
2399
2400         /* only set up it on X550EM_X */
2401         if (hw->mac.type == ixgbe_mac_X550EM_x) {
2402                 gpie = IXGBE_READ_REG(hw, IXGBE_GPIE);
2403                 gpie |= IXGBE_SDP0_GPIEN_X550EM_x;
2404                 IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
2405                 if (hw->phy.type == ixgbe_phy_x550em_ext_t)
2406                         intr->mask |= IXGBE_EICR_GPI_SDP0_X550EM_x;
2407         }
2408 }
2409
2410 int
2411 ixgbe_set_vf_rate_limit(struct rte_eth_dev *dev, uint16_t vf,
2412                         uint16_t tx_rate, uint64_t q_msk)
2413 {
2414         struct ixgbe_hw *hw;
2415         struct ixgbe_vf_info *vfinfo;
2416         struct rte_eth_link link;
2417         uint8_t  nb_q_per_pool;
2418         uint32_t queue_stride;
2419         uint32_t queue_idx, idx = 0, vf_idx;
2420         uint32_t queue_end;
2421         uint16_t total_rate = 0;
2422         struct rte_pci_device *pci_dev;
2423
2424         pci_dev = RTE_ETH_DEV_TO_PCI(dev);
2425         rte_eth_link_get_nowait(dev->data->port_id, &link);
2426
2427         if (vf >= pci_dev->max_vfs)
2428                 return -EINVAL;
2429
2430         if (tx_rate > link.link_speed)
2431                 return -EINVAL;
2432
2433         if (q_msk == 0)
2434                 return 0;
2435
2436         hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2437         vfinfo = *(IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private));
2438         nb_q_per_pool = RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool;
2439         queue_stride = IXGBE_MAX_RX_QUEUE_NUM / RTE_ETH_DEV_SRIOV(dev).active;
2440         queue_idx = vf * queue_stride;
2441         queue_end = queue_idx + nb_q_per_pool - 1;
2442         if (queue_end >= hw->mac.max_tx_queues)
2443                 return -EINVAL;
2444
2445         if (vfinfo) {
2446                 for (vf_idx = 0; vf_idx < pci_dev->max_vfs; vf_idx++) {
2447                         if (vf_idx == vf)
2448                                 continue;
2449                         for (idx = 0; idx < RTE_DIM(vfinfo[vf_idx].tx_rate);
2450                                 idx++)
2451                                 total_rate += vfinfo[vf_idx].tx_rate[idx];
2452                 }
2453         } else {
2454                 return -EINVAL;
2455         }
2456
2457         /* Store tx_rate for this vf. */
2458         for (idx = 0; idx < nb_q_per_pool; idx++) {
2459                 if (((uint64_t)0x1 << idx) & q_msk) {
2460                         if (vfinfo[vf].tx_rate[idx] != tx_rate)
2461                                 vfinfo[vf].tx_rate[idx] = tx_rate;
2462                         total_rate += tx_rate;
2463                 }
2464         }
2465
2466         if (total_rate > dev->data->dev_link.link_speed) {
2467                 /* Reset stored TX rate of the VF if it causes exceed
2468                  * link speed.
2469                  */
2470                 memset(vfinfo[vf].tx_rate, 0, sizeof(vfinfo[vf].tx_rate));
2471                 return -EINVAL;
2472         }
2473
2474         /* Set RTTBCNRC of each queue/pool for vf X  */
2475         for (; queue_idx <= queue_end; queue_idx++) {
2476                 if (0x1 & q_msk)
2477                         ixgbe_set_queue_rate_limit(dev, queue_idx, tx_rate);
2478                 q_msk = q_msk >> 1;
2479         }
2480
2481         return 0;
2482 }
2483
2484 /*
2485  * Configure device link speed and setup link.
2486  * It returns 0 on success.
2487  */
2488 static int
2489 ixgbe_dev_start(struct rte_eth_dev *dev)
2490 {
2491         struct ixgbe_hw *hw =
2492                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2493         struct ixgbe_vf_info *vfinfo =
2494                 *IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private);
2495         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
2496         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
2497         uint32_t intr_vector = 0;
2498         int err, link_up = 0, negotiate = 0;
2499         uint32_t speed = 0;
2500         int mask = 0;
2501         int status;
2502         uint16_t vf, idx;
2503         uint32_t *link_speeds;
2504         struct ixgbe_tm_conf *tm_conf =
2505                 IXGBE_DEV_PRIVATE_TO_TM_CONF(dev->data->dev_private);
2506
2507         PMD_INIT_FUNC_TRACE();
2508
2509         /* IXGBE devices don't support:
2510         *    - half duplex (checked afterwards for valid speeds)
2511         *    - fixed speed: TODO implement
2512         */
2513         if (dev->data->dev_conf.link_speeds & ETH_LINK_SPEED_FIXED) {
2514                 PMD_INIT_LOG(ERR,
2515                 "Invalid link_speeds for port %u, fix speed not supported",
2516                                 dev->data->port_id);
2517                 return -EINVAL;
2518         }
2519
2520         /* disable uio/vfio intr/eventfd mapping */
2521         rte_intr_disable(intr_handle);
2522
2523         /* stop adapter */
2524         hw->adapter_stopped = 0;
2525         ixgbe_stop_adapter(hw);
2526
2527         /* reinitialize adapter
2528          * this calls reset and start
2529          */
2530         status = ixgbe_pf_reset_hw(hw);
2531         if (status != 0)
2532                 return -1;
2533         hw->mac.ops.start_hw(hw);
2534         hw->mac.get_link_status = true;
2535
2536         /* configure PF module if SRIOV enabled */
2537         ixgbe_pf_host_configure(dev);
2538
2539         ixgbe_dev_phy_intr_setup(dev);
2540
2541         /* check and configure queue intr-vector mapping */
2542         if ((rte_intr_cap_multiple(intr_handle) ||
2543              !RTE_ETH_DEV_SRIOV(dev).active) &&
2544             dev->data->dev_conf.intr_conf.rxq != 0) {
2545                 intr_vector = dev->data->nb_rx_queues;
2546                 if (intr_vector > IXGBE_MAX_INTR_QUEUE_NUM) {
2547                         PMD_INIT_LOG(ERR, "At most %d intr queues supported",
2548                                         IXGBE_MAX_INTR_QUEUE_NUM);
2549                         return -ENOTSUP;
2550                 }
2551                 if (rte_intr_efd_enable(intr_handle, intr_vector))
2552                         return -1;
2553         }
2554
2555         if (rte_intr_dp_is_en(intr_handle) && !intr_handle->intr_vec) {
2556                 intr_handle->intr_vec =
2557                         rte_zmalloc("intr_vec",
2558                                     dev->data->nb_rx_queues * sizeof(int), 0);
2559                 if (intr_handle->intr_vec == NULL) {
2560                         PMD_INIT_LOG(ERR, "Failed to allocate %d rx_queues"
2561                                      " intr_vec", dev->data->nb_rx_queues);
2562                         return -ENOMEM;
2563                 }
2564         }
2565
2566         /* confiugre msix for sleep until rx interrupt */
2567         ixgbe_configure_msix(dev);
2568
2569         /* initialize transmission unit */
2570         ixgbe_dev_tx_init(dev);
2571
2572         /* This can fail when allocating mbufs for descriptor rings */
2573         err = ixgbe_dev_rx_init(dev);
2574         if (err) {
2575                 PMD_INIT_LOG(ERR, "Unable to initialize RX hardware");
2576                 goto error;
2577         }
2578
2579     mask = ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK |
2580                 ETH_VLAN_EXTEND_MASK;
2581         ixgbe_vlan_offload_set(dev, mask);
2582
2583         if (dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_VMDQ_ONLY) {
2584                 /* Enable vlan filtering for VMDq */
2585                 ixgbe_vmdq_vlan_hw_filter_enable(dev);
2586         }
2587
2588         /* Configure DCB hw */
2589         ixgbe_configure_dcb(dev);
2590
2591         if (dev->data->dev_conf.fdir_conf.mode != RTE_FDIR_MODE_NONE) {
2592                 err = ixgbe_fdir_configure(dev);
2593                 if (err)
2594                         goto error;
2595         }
2596
2597         /* Restore vf rate limit */
2598         if (vfinfo != NULL) {
2599                 for (vf = 0; vf < pci_dev->max_vfs; vf++)
2600                         for (idx = 0; idx < IXGBE_MAX_QUEUE_NUM_PER_VF; idx++)
2601                                 if (vfinfo[vf].tx_rate[idx] != 0)
2602                                         ixgbe_set_vf_rate_limit(
2603                                                 dev, vf,
2604                                                 vfinfo[vf].tx_rate[idx],
2605                                                 1 << idx);
2606         }
2607
2608         ixgbe_restore_statistics_mapping(dev);
2609
2610         err = ixgbe_dev_rxtx_start(dev);
2611         if (err < 0) {
2612                 PMD_INIT_LOG(ERR, "Unable to start rxtx queues");
2613                 goto error;
2614         }
2615
2616         /* Skip link setup if loopback mode is enabled for 82599. */
2617         if (hw->mac.type == ixgbe_mac_82599EB &&
2618                         dev->data->dev_conf.lpbk_mode == IXGBE_LPBK_82599_TX_RX)
2619                 goto skip_link_setup;
2620
2621         if (ixgbe_is_sfp(hw) && hw->phy.multispeed_fiber) {
2622                 err = hw->mac.ops.setup_sfp(hw);
2623                 if (err)
2624                         goto error;
2625         }
2626
2627         if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_copper) {
2628                 /* Turn on the copper */
2629                 ixgbe_set_phy_power(hw, true);
2630         } else {
2631                 /* Turn on the laser */
2632                 ixgbe_enable_tx_laser(hw);
2633         }
2634
2635         err = ixgbe_check_link(hw, &speed, &link_up, 0);
2636         if (err)
2637                 goto error;
2638         dev->data->dev_link.link_status = link_up;
2639
2640         err = ixgbe_get_link_capabilities(hw, &speed, &negotiate);
2641         if (err)
2642                 goto error;
2643
2644         link_speeds = &dev->data->dev_conf.link_speeds;
2645         if (*link_speeds & ~(ETH_LINK_SPEED_100M | ETH_LINK_SPEED_1G |
2646                         ETH_LINK_SPEED_10G)) {
2647                 PMD_INIT_LOG(ERR, "Invalid link setting");
2648                 goto error;
2649         }
2650
2651         speed = 0x0;
2652         if (*link_speeds == ETH_LINK_SPEED_AUTONEG) {
2653                 switch (hw->mac.type) {
2654                 case ixgbe_mac_82598EB:
2655                         speed = IXGBE_LINK_SPEED_82598_AUTONEG;
2656                         break;
2657                 case ixgbe_mac_82599EB:
2658                 case ixgbe_mac_X540:
2659                         speed = IXGBE_LINK_SPEED_82599_AUTONEG;
2660                         break;
2661                 case ixgbe_mac_X550:
2662                 case ixgbe_mac_X550EM_x:
2663                 case ixgbe_mac_X550EM_a:
2664                         speed = IXGBE_LINK_SPEED_X550_AUTONEG;
2665                         break;
2666                 default:
2667                         speed = IXGBE_LINK_SPEED_82599_AUTONEG;
2668                 }
2669         } else {
2670                 if (*link_speeds & ETH_LINK_SPEED_10G)
2671                         speed |= IXGBE_LINK_SPEED_10GB_FULL;
2672                 if (*link_speeds & ETH_LINK_SPEED_1G)
2673                         speed |= IXGBE_LINK_SPEED_1GB_FULL;
2674                 if (*link_speeds & ETH_LINK_SPEED_100M)
2675                         speed |= IXGBE_LINK_SPEED_100_FULL;
2676         }
2677
2678         err = ixgbe_setup_link(hw, speed, link_up);
2679         if (err)
2680                 goto error;
2681
2682 skip_link_setup:
2683
2684         if (rte_intr_allow_others(intr_handle)) {
2685                 /* check if lsc interrupt is enabled */
2686                 if (dev->data->dev_conf.intr_conf.lsc != 0)
2687                         ixgbe_dev_lsc_interrupt_setup(dev, TRUE);
2688                 else
2689                         ixgbe_dev_lsc_interrupt_setup(dev, FALSE);
2690                 ixgbe_dev_macsec_interrupt_setup(dev);
2691         } else {
2692                 rte_intr_callback_unregister(intr_handle,
2693                                              ixgbe_dev_interrupt_handler, dev);
2694                 if (dev->data->dev_conf.intr_conf.lsc != 0)
2695                         PMD_INIT_LOG(INFO, "lsc won't enable because of"
2696                                      " no intr multiplex");
2697         }
2698
2699         /* check if rxq interrupt is enabled */
2700         if (dev->data->dev_conf.intr_conf.rxq != 0 &&
2701             rte_intr_dp_is_en(intr_handle))
2702                 ixgbe_dev_rxq_interrupt_setup(dev);
2703
2704         /* enable uio/vfio intr/eventfd mapping */
2705         rte_intr_enable(intr_handle);
2706
2707         /* resume enabled intr since hw reset */
2708         ixgbe_enable_intr(dev);
2709         ixgbe_l2_tunnel_conf(dev);
2710         ixgbe_filter_restore(dev);
2711
2712         if (tm_conf->root && !tm_conf->committed)
2713                 PMD_DRV_LOG(WARNING,
2714                             "please call hierarchy_commit() "
2715                             "before starting the port");
2716
2717         return 0;
2718
2719 error:
2720         PMD_INIT_LOG(ERR, "failure in ixgbe_dev_start(): %d", err);
2721         ixgbe_dev_clear_queues(dev);
2722         return -EIO;
2723 }
2724
2725 /*
2726  * Stop device: disable rx and tx functions to allow for reconfiguring.
2727  */
2728 static void
2729 ixgbe_dev_stop(struct rte_eth_dev *dev)
2730 {
2731         struct rte_eth_link link;
2732         struct ixgbe_hw *hw =
2733                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2734         struct ixgbe_vf_info *vfinfo =
2735                 *IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private);
2736         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
2737         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
2738         int vf;
2739         struct ixgbe_tm_conf *tm_conf =
2740                 IXGBE_DEV_PRIVATE_TO_TM_CONF(dev->data->dev_private);
2741
2742         PMD_INIT_FUNC_TRACE();
2743
2744         /* disable interrupts */
2745         ixgbe_disable_intr(hw);
2746
2747         /* reset the NIC */
2748         ixgbe_pf_reset_hw(hw);
2749         hw->adapter_stopped = 0;
2750
2751         /* stop adapter */
2752         ixgbe_stop_adapter(hw);
2753
2754         for (vf = 0; vfinfo != NULL && vf < pci_dev->max_vfs; vf++)
2755                 vfinfo[vf].clear_to_send = false;
2756
2757         if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_copper) {
2758                 /* Turn off the copper */
2759                 ixgbe_set_phy_power(hw, false);
2760         } else {
2761                 /* Turn off the laser */
2762                 ixgbe_disable_tx_laser(hw);
2763         }
2764
2765         ixgbe_dev_clear_queues(dev);
2766
2767         /* Clear stored conf */
2768         dev->data->scattered_rx = 0;
2769         dev->data->lro = 0;
2770
2771         /* Clear recorded link status */
2772         memset(&link, 0, sizeof(link));
2773         rte_ixgbe_dev_atomic_write_link_status(dev, &link);
2774
2775         if (!rte_intr_allow_others(intr_handle))
2776                 /* resume to the default handler */
2777                 rte_intr_callback_register(intr_handle,
2778                                            ixgbe_dev_interrupt_handler,
2779                                            (void *)dev);
2780
2781         /* Clean datapath event and queue/vec mapping */
2782         rte_intr_efd_disable(intr_handle);
2783         if (intr_handle->intr_vec != NULL) {
2784                 rte_free(intr_handle->intr_vec);
2785                 intr_handle->intr_vec = NULL;
2786         }
2787
2788         /* reset hierarchy commit */
2789         tm_conf->committed = false;
2790 }
2791
2792 /*
2793  * Set device link up: enable tx.
2794  */
2795 static int
2796 ixgbe_dev_set_link_up(struct rte_eth_dev *dev)
2797 {
2798         struct ixgbe_hw *hw =
2799                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2800         if (hw->mac.type == ixgbe_mac_82599EB) {
2801 #ifdef RTE_LIBRTE_IXGBE_BYPASS
2802                 if (hw->device_id == IXGBE_DEV_ID_82599_BYPASS) {
2803                         /* Not suported in bypass mode */
2804                         PMD_INIT_LOG(ERR, "Set link up is not supported "
2805                                      "by device id 0x%x", hw->device_id);
2806                         return -ENOTSUP;
2807                 }
2808 #endif
2809         }
2810
2811         if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_copper) {
2812                 /* Turn on the copper */
2813                 ixgbe_set_phy_power(hw, true);
2814         } else {
2815                 /* Turn on the laser */
2816                 ixgbe_enable_tx_laser(hw);
2817         }
2818
2819         return 0;
2820 }
2821
2822 /*
2823  * Set device link down: disable tx.
2824  */
2825 static int
2826 ixgbe_dev_set_link_down(struct rte_eth_dev *dev)
2827 {
2828         struct ixgbe_hw *hw =
2829                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2830         if (hw->mac.type == ixgbe_mac_82599EB) {
2831 #ifdef RTE_LIBRTE_IXGBE_BYPASS
2832                 if (hw->device_id == IXGBE_DEV_ID_82599_BYPASS) {
2833                         /* Not suported in bypass mode */
2834                         PMD_INIT_LOG(ERR, "Set link down is not supported "
2835                                      "by device id 0x%x", hw->device_id);
2836                         return -ENOTSUP;
2837                 }
2838 #endif
2839         }
2840
2841         if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_copper) {
2842                 /* Turn off the copper */
2843                 ixgbe_set_phy_power(hw, false);
2844         } else {
2845                 /* Turn off the laser */
2846                 ixgbe_disable_tx_laser(hw);
2847         }
2848
2849         return 0;
2850 }
2851
2852 /*
2853  * Reset and stop device.
2854  */
2855 static void
2856 ixgbe_dev_close(struct rte_eth_dev *dev)
2857 {
2858         struct ixgbe_hw *hw =
2859                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2860
2861         PMD_INIT_FUNC_TRACE();
2862
2863         ixgbe_pf_reset_hw(hw);
2864
2865         ixgbe_dev_stop(dev);
2866         hw->adapter_stopped = 1;
2867
2868         ixgbe_dev_free_queues(dev);
2869
2870         ixgbe_disable_pcie_master(hw);
2871
2872         /* reprogram the RAR[0] in case user changed it. */
2873         ixgbe_set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
2874 }
2875
2876 /*
2877  * Reset PF device.
2878  */
2879 static int
2880 ixgbe_dev_reset(struct rte_eth_dev *dev)
2881 {
2882         int ret;
2883
2884         /* When a DPDK PMD PF begin to reset PF port, it should notify all
2885          * its VF to make them align with it. The detailed notification
2886          * mechanism is PMD specific. As to ixgbe PF, it is rather complex.
2887          * To avoid unexpected behavior in VF, currently reset of PF with
2888          * SR-IOV activation is not supported. It might be supported later.
2889          */
2890         if (dev->data->sriov.active)
2891                 return -ENOTSUP;
2892
2893         ret = eth_ixgbe_dev_uninit(dev);
2894         if (ret)
2895                 return ret;
2896
2897         ret = eth_ixgbe_dev_init(dev);
2898
2899         return ret;
2900 }
2901
2902 static void
2903 ixgbe_read_stats_registers(struct ixgbe_hw *hw,
2904                            struct ixgbe_hw_stats *hw_stats,
2905                            struct ixgbe_macsec_stats *macsec_stats,
2906                            uint64_t *total_missed_rx, uint64_t *total_qbrc,
2907                            uint64_t *total_qprc, uint64_t *total_qprdc)
2908 {
2909         uint32_t bprc, lxon, lxoff, total;
2910         uint32_t delta_gprc = 0;
2911         unsigned i;
2912         /* Workaround for RX byte count not including CRC bytes when CRC
2913          * strip is enabled. CRC bytes are removed from counters when crc_strip
2914          * is disabled.
2915          */
2916         int crc_strip = (IXGBE_READ_REG(hw, IXGBE_HLREG0) &
2917                         IXGBE_HLREG0_RXCRCSTRP);
2918
2919         hw_stats->crcerrs += IXGBE_READ_REG(hw, IXGBE_CRCERRS);
2920         hw_stats->illerrc += IXGBE_READ_REG(hw, IXGBE_ILLERRC);
2921         hw_stats->errbc += IXGBE_READ_REG(hw, IXGBE_ERRBC);
2922         hw_stats->mspdc += IXGBE_READ_REG(hw, IXGBE_MSPDC);
2923
2924         for (i = 0; i < 8; i++) {
2925                 uint32_t mp = IXGBE_READ_REG(hw, IXGBE_MPC(i));
2926
2927                 /* global total per queue */
2928                 hw_stats->mpc[i] += mp;
2929                 /* Running comprehensive total for stats display */
2930                 *total_missed_rx += hw_stats->mpc[i];
2931                 if (hw->mac.type == ixgbe_mac_82598EB) {
2932                         hw_stats->rnbc[i] +=
2933                             IXGBE_READ_REG(hw, IXGBE_RNBC(i));
2934                         hw_stats->pxonrxc[i] +=
2935                                 IXGBE_READ_REG(hw, IXGBE_PXONRXC(i));
2936                         hw_stats->pxoffrxc[i] +=
2937                                 IXGBE_READ_REG(hw, IXGBE_PXOFFRXC(i));
2938                 } else {
2939                         hw_stats->pxonrxc[i] +=
2940                                 IXGBE_READ_REG(hw, IXGBE_PXONRXCNT(i));
2941                         hw_stats->pxoffrxc[i] +=
2942                                 IXGBE_READ_REG(hw, IXGBE_PXOFFRXCNT(i));
2943                         hw_stats->pxon2offc[i] +=
2944                                 IXGBE_READ_REG(hw, IXGBE_PXON2OFFCNT(i));
2945                 }
2946                 hw_stats->pxontxc[i] +=
2947                     IXGBE_READ_REG(hw, IXGBE_PXONTXC(i));
2948                 hw_stats->pxofftxc[i] +=
2949                     IXGBE_READ_REG(hw, IXGBE_PXOFFTXC(i));
2950         }
2951         for (i = 0; i < IXGBE_QUEUE_STAT_COUNTERS; i++) {
2952                 uint32_t delta_qprc = IXGBE_READ_REG(hw, IXGBE_QPRC(i));
2953                 uint32_t delta_qptc = IXGBE_READ_REG(hw, IXGBE_QPTC(i));
2954                 uint32_t delta_qprdc = IXGBE_READ_REG(hw, IXGBE_QPRDC(i));
2955
2956                 delta_gprc += delta_qprc;
2957
2958                 hw_stats->qprc[i] += delta_qprc;
2959                 hw_stats->qptc[i] += delta_qptc;
2960
2961                 hw_stats->qbrc[i] += IXGBE_READ_REG(hw, IXGBE_QBRC_L(i));
2962                 hw_stats->qbrc[i] +=
2963                     ((uint64_t)IXGBE_READ_REG(hw, IXGBE_QBRC_H(i)) << 32);
2964                 if (crc_strip == 0)
2965                         hw_stats->qbrc[i] -= delta_qprc * ETHER_CRC_LEN;
2966
2967                 hw_stats->qbtc[i] += IXGBE_READ_REG(hw, IXGBE_QBTC_L(i));
2968                 hw_stats->qbtc[i] +=
2969                     ((uint64_t)IXGBE_READ_REG(hw, IXGBE_QBTC_H(i)) << 32);
2970
2971                 hw_stats->qprdc[i] += delta_qprdc;
2972                 *total_qprdc += hw_stats->qprdc[i];
2973
2974                 *total_qprc += hw_stats->qprc[i];
2975                 *total_qbrc += hw_stats->qbrc[i];
2976         }
2977         hw_stats->mlfc += IXGBE_READ_REG(hw, IXGBE_MLFC);
2978         hw_stats->mrfc += IXGBE_READ_REG(hw, IXGBE_MRFC);
2979         hw_stats->rlec += IXGBE_READ_REG(hw, IXGBE_RLEC);
2980
2981         /*
2982          * An errata states that gprc actually counts good + missed packets:
2983          * Workaround to set gprc to summated queue packet receives
2984          */
2985         hw_stats->gprc = *total_qprc;
2986
2987         if (hw->mac.type != ixgbe_mac_82598EB) {
2988                 hw_stats->gorc += IXGBE_READ_REG(hw, IXGBE_GORCL);
2989                 hw_stats->gorc += ((u64)IXGBE_READ_REG(hw, IXGBE_GORCH) << 32);
2990                 hw_stats->gotc += IXGBE_READ_REG(hw, IXGBE_GOTCL);
2991                 hw_stats->gotc += ((u64)IXGBE_READ_REG(hw, IXGBE_GOTCH) << 32);
2992                 hw_stats->tor += IXGBE_READ_REG(hw, IXGBE_TORL);
2993                 hw_stats->tor += ((u64)IXGBE_READ_REG(hw, IXGBE_TORH) << 32);
2994                 hw_stats->lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXCNT);
2995                 hw_stats->lxoffrxc += IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT);
2996         } else {
2997                 hw_stats->lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXC);
2998                 hw_stats->lxoffrxc += IXGBE_READ_REG(hw, IXGBE_LXOFFRXC);
2999                 /* 82598 only has a counter in the high register */
3000                 hw_stats->gorc += IXGBE_READ_REG(hw, IXGBE_GORCH);
3001                 hw_stats->gotc += IXGBE_READ_REG(hw, IXGBE_GOTCH);
3002                 hw_stats->tor += IXGBE_READ_REG(hw, IXGBE_TORH);
3003         }
3004         uint64_t old_tpr = hw_stats->tpr;
3005
3006         hw_stats->tpr += IXGBE_READ_REG(hw, IXGBE_TPR);
3007         hw_stats->tpt += IXGBE_READ_REG(hw, IXGBE_TPT);
3008
3009         if (crc_strip == 0)
3010                 hw_stats->gorc -= delta_gprc * ETHER_CRC_LEN;
3011
3012         uint64_t delta_gptc = IXGBE_READ_REG(hw, IXGBE_GPTC);
3013         hw_stats->gptc += delta_gptc;
3014         hw_stats->gotc -= delta_gptc * ETHER_CRC_LEN;
3015         hw_stats->tor -= (hw_stats->tpr - old_tpr) * ETHER_CRC_LEN;
3016
3017         /*
3018          * Workaround: mprc hardware is incorrectly counting
3019          * broadcasts, so for now we subtract those.
3020          */
3021         bprc = IXGBE_READ_REG(hw, IXGBE_BPRC);
3022         hw_stats->bprc += bprc;
3023         hw_stats->mprc += IXGBE_READ_REG(hw, IXGBE_MPRC);
3024         if (hw->mac.type == ixgbe_mac_82598EB)
3025                 hw_stats->mprc -= bprc;
3026
3027         hw_stats->prc64 += IXGBE_READ_REG(hw, IXGBE_PRC64);
3028         hw_stats->prc127 += IXGBE_READ_REG(hw, IXGBE_PRC127);
3029         hw_stats->prc255 += IXGBE_READ_REG(hw, IXGBE_PRC255);
3030         hw_stats->prc511 += IXGBE_READ_REG(hw, IXGBE_PRC511);
3031         hw_stats->prc1023 += IXGBE_READ_REG(hw, IXGBE_PRC1023);
3032         hw_stats->prc1522 += IXGBE_READ_REG(hw, IXGBE_PRC1522);
3033
3034         lxon = IXGBE_READ_REG(hw, IXGBE_LXONTXC);
3035         hw_stats->lxontxc += lxon;
3036         lxoff = IXGBE_READ_REG(hw, IXGBE_LXOFFTXC);
3037         hw_stats->lxofftxc += lxoff;
3038         total = lxon + lxoff;
3039
3040         hw_stats->mptc += IXGBE_READ_REG(hw, IXGBE_MPTC);
3041         hw_stats->ptc64 += IXGBE_READ_REG(hw, IXGBE_PTC64);
3042         hw_stats->gptc -= total;
3043         hw_stats->mptc -= total;
3044         hw_stats->ptc64 -= total;
3045         hw_stats->gotc -= total * ETHER_MIN_LEN;
3046
3047         hw_stats->ruc += IXGBE_READ_REG(hw, IXGBE_RUC);
3048         hw_stats->rfc += IXGBE_READ_REG(hw, IXGBE_RFC);
3049         hw_stats->roc += IXGBE_READ_REG(hw, IXGBE_ROC);
3050         hw_stats->rjc += IXGBE_READ_REG(hw, IXGBE_RJC);
3051         hw_stats->mngprc += IXGBE_READ_REG(hw, IXGBE_MNGPRC);
3052         hw_stats->mngpdc += IXGBE_READ_REG(hw, IXGBE_MNGPDC);
3053         hw_stats->mngptc += IXGBE_READ_REG(hw, IXGBE_MNGPTC);
3054         hw_stats->ptc127 += IXGBE_READ_REG(hw, IXGBE_PTC127);
3055         hw_stats->ptc255 += IXGBE_READ_REG(hw, IXGBE_PTC255);
3056         hw_stats->ptc511 += IXGBE_READ_REG(hw, IXGBE_PTC511);
3057         hw_stats->ptc1023 += IXGBE_READ_REG(hw, IXGBE_PTC1023);
3058         hw_stats->ptc1522 += IXGBE_READ_REG(hw, IXGBE_PTC1522);
3059         hw_stats->bptc += IXGBE_READ_REG(hw, IXGBE_BPTC);
3060         hw_stats->xec += IXGBE_READ_REG(hw, IXGBE_XEC);
3061         hw_stats->fccrc += IXGBE_READ_REG(hw, IXGBE_FCCRC);
3062         hw_stats->fclast += IXGBE_READ_REG(hw, IXGBE_FCLAST);
3063         /* Only read FCOE on 82599 */
3064         if (hw->mac.type != ixgbe_mac_82598EB) {
3065                 hw_stats->fcoerpdc += IXGBE_READ_REG(hw, IXGBE_FCOERPDC);
3066                 hw_stats->fcoeprc += IXGBE_READ_REG(hw, IXGBE_FCOEPRC);
3067                 hw_stats->fcoeptc += IXGBE_READ_REG(hw, IXGBE_FCOEPTC);
3068                 hw_stats->fcoedwrc += IXGBE_READ_REG(hw, IXGBE_FCOEDWRC);
3069                 hw_stats->fcoedwtc += IXGBE_READ_REG(hw, IXGBE_FCOEDWTC);
3070         }
3071
3072         /* Flow Director Stats registers */
3073         hw_stats->fdirmatch += IXGBE_READ_REG(hw, IXGBE_FDIRMATCH);
3074         hw_stats->fdirmiss += IXGBE_READ_REG(hw, IXGBE_FDIRMISS);
3075
3076         /* MACsec Stats registers */
3077         macsec_stats->out_pkts_untagged += IXGBE_READ_REG(hw, IXGBE_LSECTXUT);
3078         macsec_stats->out_pkts_encrypted +=
3079                 IXGBE_READ_REG(hw, IXGBE_LSECTXPKTE);
3080         macsec_stats->out_pkts_protected +=
3081                 IXGBE_READ_REG(hw, IXGBE_LSECTXPKTP);
3082         macsec_stats->out_octets_encrypted +=
3083                 IXGBE_READ_REG(hw, IXGBE_LSECTXOCTE);
3084         macsec_stats->out_octets_protected +=
3085                 IXGBE_READ_REG(hw, IXGBE_LSECTXOCTP);
3086         macsec_stats->in_pkts_untagged += IXGBE_READ_REG(hw, IXGBE_LSECRXUT);
3087         macsec_stats->in_pkts_badtag += IXGBE_READ_REG(hw, IXGBE_LSECRXBAD);
3088         macsec_stats->in_pkts_nosci += IXGBE_READ_REG(hw, IXGBE_LSECRXNOSCI);
3089         macsec_stats->in_pkts_unknownsci +=
3090                 IXGBE_READ_REG(hw, IXGBE_LSECRXUNSCI);
3091         macsec_stats->in_octets_decrypted +=
3092                 IXGBE_READ_REG(hw, IXGBE_LSECRXOCTD);
3093         macsec_stats->in_octets_validated +=
3094                 IXGBE_READ_REG(hw, IXGBE_LSECRXOCTV);
3095         macsec_stats->in_pkts_unchecked += IXGBE_READ_REG(hw, IXGBE_LSECRXUNCH);
3096         macsec_stats->in_pkts_delayed += IXGBE_READ_REG(hw, IXGBE_LSECRXDELAY);
3097         macsec_stats->in_pkts_late += IXGBE_READ_REG(hw, IXGBE_LSECRXLATE);
3098         for (i = 0; i < 2; i++) {
3099                 macsec_stats->in_pkts_ok +=
3100                         IXGBE_READ_REG(hw, IXGBE_LSECRXOK(i));
3101                 macsec_stats->in_pkts_invalid +=
3102                         IXGBE_READ_REG(hw, IXGBE_LSECRXINV(i));
3103                 macsec_stats->in_pkts_notvalid +=
3104                         IXGBE_READ_REG(hw, IXGBE_LSECRXNV(i));
3105         }
3106         macsec_stats->in_pkts_unusedsa += IXGBE_READ_REG(hw, IXGBE_LSECRXUNSA);
3107         macsec_stats->in_pkts_notusingsa +=
3108                 IXGBE_READ_REG(hw, IXGBE_LSECRXNUSA);
3109 }
3110
3111 /*
3112  * This function is based on ixgbe_update_stats_counters() in ixgbe/ixgbe.c
3113  */
3114 static int
3115 ixgbe_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
3116 {
3117         struct ixgbe_hw *hw =
3118                         IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3119         struct ixgbe_hw_stats *hw_stats =
3120                         IXGBE_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
3121         struct ixgbe_macsec_stats *macsec_stats =
3122                         IXGBE_DEV_PRIVATE_TO_MACSEC_STATS(
3123                                 dev->data->dev_private);
3124         uint64_t total_missed_rx, total_qbrc, total_qprc, total_qprdc;
3125         unsigned i;
3126
3127         total_missed_rx = 0;
3128         total_qbrc = 0;
3129         total_qprc = 0;
3130         total_qprdc = 0;
3131
3132         ixgbe_read_stats_registers(hw, hw_stats, macsec_stats, &total_missed_rx,
3133                         &total_qbrc, &total_qprc, &total_qprdc);
3134
3135         if (stats == NULL)
3136                 return -EINVAL;
3137
3138         /* Fill out the rte_eth_stats statistics structure */
3139         stats->ipackets = total_qprc;
3140         stats->ibytes = total_qbrc;
3141         stats->opackets = hw_stats->gptc;
3142         stats->obytes = hw_stats->gotc;
3143
3144         for (i = 0; i < IXGBE_QUEUE_STAT_COUNTERS; i++) {
3145                 stats->q_ipackets[i] = hw_stats->qprc[i];
3146                 stats->q_opackets[i] = hw_stats->qptc[i];
3147                 stats->q_ibytes[i] = hw_stats->qbrc[i];
3148                 stats->q_obytes[i] = hw_stats->qbtc[i];
3149                 stats->q_errors[i] = hw_stats->qprdc[i];
3150         }
3151
3152         /* Rx Errors */
3153         stats->imissed  = total_missed_rx;
3154         stats->ierrors  = hw_stats->crcerrs +
3155                           hw_stats->mspdc +
3156                           hw_stats->rlec +
3157                           hw_stats->ruc +
3158                           hw_stats->roc +
3159                           hw_stats->illerrc +
3160                           hw_stats->errbc +
3161                           hw_stats->rfc +
3162                           hw_stats->fccrc +
3163                           hw_stats->fclast;
3164
3165         /* Tx Errors */
3166         stats->oerrors  = 0;
3167         return 0;
3168 }
3169
3170 static void
3171 ixgbe_dev_stats_reset(struct rte_eth_dev *dev)
3172 {
3173         struct ixgbe_hw_stats *stats =
3174                         IXGBE_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
3175
3176         /* HW registers are cleared on read */
3177         ixgbe_dev_stats_get(dev, NULL);
3178
3179         /* Reset software totals */
3180         memset(stats, 0, sizeof(*stats));
3181 }
3182
3183 /* This function calculates the number of xstats based on the current config */
3184 static unsigned
3185 ixgbe_xstats_calc_num(void) {
3186         return IXGBE_NB_HW_STATS + IXGBE_NB_MACSEC_STATS +
3187                 (IXGBE_NB_RXQ_PRIO_STATS * IXGBE_NB_RXQ_PRIO_VALUES) +
3188                 (IXGBE_NB_TXQ_PRIO_STATS * IXGBE_NB_TXQ_PRIO_VALUES);
3189 }
3190
3191 static int ixgbe_dev_xstats_get_names(__rte_unused struct rte_eth_dev *dev,
3192         struct rte_eth_xstat_name *xstats_names, __rte_unused unsigned int size)
3193 {
3194         const unsigned cnt_stats = ixgbe_xstats_calc_num();
3195         unsigned stat, i, count;
3196
3197         if (xstats_names != NULL) {
3198                 count = 0;
3199
3200                 /* Note: limit >= cnt_stats checked upstream
3201                  * in rte_eth_xstats_names()
3202                  */
3203
3204                 /* Extended stats from ixgbe_hw_stats */
3205                 for (i = 0; i < IXGBE_NB_HW_STATS; i++) {
3206                         snprintf(xstats_names[count].name,
3207                                 sizeof(xstats_names[count].name),
3208                                 "%s",
3209                                 rte_ixgbe_stats_strings[i].name);
3210                         count++;
3211                 }
3212
3213                 /* MACsec Stats */
3214                 for (i = 0; i < IXGBE_NB_MACSEC_STATS; i++) {
3215                         snprintf(xstats_names[count].name,
3216                                 sizeof(xstats_names[count].name),
3217                                 "%s",
3218                                 rte_ixgbe_macsec_strings[i].name);
3219                         count++;
3220                 }
3221
3222                 /* RX Priority Stats */
3223                 for (stat = 0; stat < IXGBE_NB_RXQ_PRIO_STATS; stat++) {
3224                         for (i = 0; i < IXGBE_NB_RXQ_PRIO_VALUES; i++) {
3225                                 snprintf(xstats_names[count].name,
3226                                         sizeof(xstats_names[count].name),
3227                                         "rx_priority%u_%s", i,
3228                                         rte_ixgbe_rxq_strings[stat].name);
3229                                 count++;
3230                         }
3231                 }
3232
3233                 /* TX Priority Stats */
3234                 for (stat = 0; stat < IXGBE_NB_TXQ_PRIO_STATS; stat++) {
3235                         for (i = 0; i < IXGBE_NB_TXQ_PRIO_VALUES; i++) {
3236                                 snprintf(xstats_names[count].name,
3237                                         sizeof(xstats_names[count].name),
3238                                         "tx_priority%u_%s", i,
3239                                         rte_ixgbe_txq_strings[stat].name);
3240                                 count++;
3241                         }
3242                 }
3243         }
3244         return cnt_stats;
3245 }
3246
3247 static int ixgbe_dev_xstats_get_names_by_id(
3248         struct rte_eth_dev *dev,
3249         struct rte_eth_xstat_name *xstats_names,
3250         const uint64_t *ids,
3251         unsigned int limit)
3252 {
3253         if (!ids) {
3254                 const unsigned int cnt_stats = ixgbe_xstats_calc_num();
3255                 unsigned int stat, i, count;
3256
3257                 if (xstats_names != NULL) {
3258                         count = 0;
3259
3260                         /* Note: limit >= cnt_stats checked upstream
3261                          * in rte_eth_xstats_names()
3262                          */
3263
3264                         /* Extended stats from ixgbe_hw_stats */
3265                         for (i = 0; i < IXGBE_NB_HW_STATS; i++) {
3266                                 snprintf(xstats_names[count].name,
3267                                         sizeof(xstats_names[count].name),
3268                                         "%s",
3269                                         rte_ixgbe_stats_strings[i].name);
3270                                 count++;
3271                         }
3272
3273                         /* MACsec Stats */
3274                         for (i = 0; i < IXGBE_NB_MACSEC_STATS; i++) {
3275                                 snprintf(xstats_names[count].name,
3276                                         sizeof(xstats_names[count].name),
3277                                         "%s",
3278                                         rte_ixgbe_macsec_strings[i].name);
3279                                 count++;
3280                         }
3281
3282                         /* RX Priority Stats */
3283                         for (stat = 0; stat < IXGBE_NB_RXQ_PRIO_STATS; stat++) {
3284                                 for (i = 0; i < IXGBE_NB_RXQ_PRIO_VALUES; i++) {
3285                                         snprintf(xstats_names[count].name,
3286                                             sizeof(xstats_names[count].name),
3287                                             "rx_priority%u_%s", i,
3288                                             rte_ixgbe_rxq_strings[stat].name);
3289                                         count++;
3290                                 }
3291                         }
3292
3293                         /* TX Priority Stats */
3294                         for (stat = 0; stat < IXGBE_NB_TXQ_PRIO_STATS; stat++) {
3295                                 for (i = 0; i < IXGBE_NB_TXQ_PRIO_VALUES; i++) {
3296                                         snprintf(xstats_names[count].name,
3297                                             sizeof(xstats_names[count].name),
3298                                             "tx_priority%u_%s", i,
3299                                             rte_ixgbe_txq_strings[stat].name);
3300                                         count++;
3301                                 }
3302                         }
3303                 }
3304                 return cnt_stats;
3305         }
3306
3307         uint16_t i;
3308         uint16_t size = ixgbe_xstats_calc_num();
3309         struct rte_eth_xstat_name xstats_names_copy[size];
3310
3311         ixgbe_dev_xstats_get_names_by_id(dev, xstats_names_copy, NULL,
3312                         size);
3313
3314         for (i = 0; i < limit; i++) {
3315                 if (ids[i] >= size) {
3316                         PMD_INIT_LOG(ERR, "id value isn't valid");
3317                         return -1;
3318                 }
3319                 strcpy(xstats_names[i].name,
3320                                 xstats_names_copy[ids[i]].name);
3321         }
3322         return limit;
3323 }
3324
3325 static int ixgbevf_dev_xstats_get_names(__rte_unused struct rte_eth_dev *dev,
3326         struct rte_eth_xstat_name *xstats_names, unsigned limit)
3327 {
3328         unsigned i;
3329
3330         if (limit < IXGBEVF_NB_XSTATS && xstats_names != NULL)
3331                 return -ENOMEM;
3332
3333         if (xstats_names != NULL)
3334                 for (i = 0; i < IXGBEVF_NB_XSTATS; i++)
3335                         snprintf(xstats_names[i].name,
3336                                 sizeof(xstats_names[i].name),
3337                                 "%s", rte_ixgbevf_stats_strings[i].name);
3338         return IXGBEVF_NB_XSTATS;
3339 }
3340
3341 static int
3342 ixgbe_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
3343                                          unsigned n)
3344 {
3345         struct ixgbe_hw *hw =
3346                         IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3347         struct ixgbe_hw_stats *hw_stats =
3348                         IXGBE_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
3349         struct ixgbe_macsec_stats *macsec_stats =
3350                         IXGBE_DEV_PRIVATE_TO_MACSEC_STATS(
3351                                 dev->data->dev_private);
3352         uint64_t total_missed_rx, total_qbrc, total_qprc, total_qprdc;
3353         unsigned i, stat, count = 0;
3354
3355         count = ixgbe_xstats_calc_num();
3356
3357         if (n < count)
3358                 return count;
3359
3360         total_missed_rx = 0;
3361         total_qbrc = 0;
3362         total_qprc = 0;
3363         total_qprdc = 0;
3364
3365         ixgbe_read_stats_registers(hw, hw_stats, macsec_stats, &total_missed_rx,
3366                         &total_qbrc, &total_qprc, &total_qprdc);
3367
3368         /* If this is a reset xstats is NULL, and we have cleared the
3369          * registers by reading them.
3370          */
3371         if (!xstats)
3372                 return 0;
3373
3374         /* Extended stats from ixgbe_hw_stats */
3375         count = 0;
3376         for (i = 0; i < IXGBE_NB_HW_STATS; i++) {
3377                 xstats[count].value = *(uint64_t *)(((char *)hw_stats) +
3378                                 rte_ixgbe_stats_strings[i].offset);
3379                 xstats[count].id = count;
3380                 count++;
3381         }
3382
3383         /* MACsec Stats */
3384         for (i = 0; i < IXGBE_NB_MACSEC_STATS; i++) {
3385                 xstats[count].value = *(uint64_t *)(((char *)macsec_stats) +
3386                                 rte_ixgbe_macsec_strings[i].offset);
3387                 xstats[count].id = count;
3388                 count++;
3389         }
3390
3391         /* RX Priority Stats */
3392         for (stat = 0; stat < IXGBE_NB_RXQ_PRIO_STATS; stat++) {
3393                 for (i = 0; i < IXGBE_NB_RXQ_PRIO_VALUES; i++) {
3394                         xstats[count].value = *(uint64_t *)(((char *)hw_stats) +
3395                                         rte_ixgbe_rxq_strings[stat].offset +
3396                                         (sizeof(uint64_t) * i));
3397                         xstats[count].id = count;
3398                         count++;
3399                 }
3400         }
3401
3402         /* TX Priority Stats */
3403         for (stat = 0; stat < IXGBE_NB_TXQ_PRIO_STATS; stat++) {
3404                 for (i = 0; i < IXGBE_NB_TXQ_PRIO_VALUES; i++) {
3405                         xstats[count].value = *(uint64_t *)(((char *)hw_stats) +
3406                                         rte_ixgbe_txq_strings[stat].offset +
3407                                         (sizeof(uint64_t) * i));
3408                         xstats[count].id = count;
3409                         count++;
3410                 }
3411         }
3412         return count;
3413 }
3414
3415 static int
3416 ixgbe_dev_xstats_get_by_id(struct rte_eth_dev *dev, const uint64_t *ids,
3417                 uint64_t *values, unsigned int n)
3418 {
3419         if (!ids) {
3420                 struct ixgbe_hw *hw =
3421                                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3422                 struct ixgbe_hw_stats *hw_stats =
3423                                 IXGBE_DEV_PRIVATE_TO_STATS(
3424                                                 dev->data->dev_private);
3425                 struct ixgbe_macsec_stats *macsec_stats =
3426                                 IXGBE_DEV_PRIVATE_TO_MACSEC_STATS(
3427                                         dev->data->dev_private);
3428                 uint64_t total_missed_rx, total_qbrc, total_qprc, total_qprdc;
3429                 unsigned int i, stat, count = 0;
3430
3431                 count = ixgbe_xstats_calc_num();
3432
3433                 if (!ids && n < count)
3434                         return count;
3435
3436                 total_missed_rx = 0;
3437                 total_qbrc = 0;
3438                 total_qprc = 0;
3439                 total_qprdc = 0;
3440
3441                 ixgbe_read_stats_registers(hw, hw_stats, macsec_stats,
3442                                 &total_missed_rx, &total_qbrc, &total_qprc,
3443                                 &total_qprdc);
3444
3445                 /* If this is a reset xstats is NULL, and we have cleared the
3446                  * registers by reading them.
3447                  */
3448                 if (!ids && !values)
3449                         return 0;
3450
3451                 /* Extended stats from ixgbe_hw_stats */
3452                 count = 0;
3453                 for (i = 0; i < IXGBE_NB_HW_STATS; i++) {
3454                         values[count] = *(uint64_t *)(((char *)hw_stats) +
3455                                         rte_ixgbe_stats_strings[i].offset);
3456                         count++;
3457                 }
3458
3459                 /* MACsec Stats */
3460                 for (i = 0; i < IXGBE_NB_MACSEC_STATS; i++) {
3461                         values[count] = *(uint64_t *)(((char *)macsec_stats) +
3462                                         rte_ixgbe_macsec_strings[i].offset);
3463                         count++;
3464                 }
3465
3466                 /* RX Priority Stats */
3467                 for (stat = 0; stat < IXGBE_NB_RXQ_PRIO_STATS; stat++) {
3468                         for (i = 0; i < IXGBE_NB_RXQ_PRIO_VALUES; i++) {
3469                                 values[count] =
3470                                         *(uint64_t *)(((char *)hw_stats) +
3471                                         rte_ixgbe_rxq_strings[stat].offset +
3472                                         (sizeof(uint64_t) * i));
3473                                 count++;
3474                         }
3475                 }
3476
3477                 /* TX Priority Stats */
3478                 for (stat = 0; stat < IXGBE_NB_TXQ_PRIO_STATS; stat++) {
3479                         for (i = 0; i < IXGBE_NB_TXQ_PRIO_VALUES; i++) {
3480                                 values[count] =
3481                                         *(uint64_t *)(((char *)hw_stats) +
3482                                         rte_ixgbe_txq_strings[stat].offset +
3483                                         (sizeof(uint64_t) * i));
3484                                 count++;
3485                         }
3486                 }
3487                 return count;
3488         }
3489
3490         uint16_t i;
3491         uint16_t size = ixgbe_xstats_calc_num();
3492         uint64_t values_copy[size];
3493
3494         ixgbe_dev_xstats_get_by_id(dev, NULL, values_copy, size);
3495
3496         for (i = 0; i < n; i++) {
3497                 if (ids[i] >= size) {
3498                         PMD_INIT_LOG(ERR, "id value isn't valid");
3499                         return -1;
3500                 }
3501                 values[i] = values_copy[ids[i]];
3502         }
3503         return n;
3504 }
3505
3506 static void
3507 ixgbe_dev_xstats_reset(struct rte_eth_dev *dev)
3508 {
3509         struct ixgbe_hw_stats *stats =
3510                         IXGBE_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
3511         struct ixgbe_macsec_stats *macsec_stats =
3512                         IXGBE_DEV_PRIVATE_TO_MACSEC_STATS(
3513                                 dev->data->dev_private);
3514
3515         unsigned count = ixgbe_xstats_calc_num();
3516
3517         /* HW registers are cleared on read */
3518         ixgbe_dev_xstats_get(dev, NULL, count);
3519
3520         /* Reset software totals */
3521         memset(stats, 0, sizeof(*stats));
3522         memset(macsec_stats, 0, sizeof(*macsec_stats));
3523 }
3524
3525 static void
3526 ixgbevf_update_stats(struct rte_eth_dev *dev)
3527 {
3528         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3529         struct ixgbevf_hw_stats *hw_stats = (struct ixgbevf_hw_stats *)
3530                           IXGBE_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
3531
3532         /* Good Rx packet, include VF loopback */
3533         UPDATE_VF_STAT(IXGBE_VFGPRC,
3534             hw_stats->last_vfgprc, hw_stats->vfgprc);
3535
3536         /* Good Rx octets, include VF loopback */
3537         UPDATE_VF_STAT_36BIT(IXGBE_VFGORC_LSB, IXGBE_VFGORC_MSB,
3538             hw_stats->last_vfgorc, hw_stats->vfgorc);
3539
3540         /* Good Tx packet, include VF loopback */
3541         UPDATE_VF_STAT(IXGBE_VFGPTC,
3542             hw_stats->last_vfgptc, hw_stats->vfgptc);
3543
3544         /* Good Tx octets, include VF loopback */
3545         UPDATE_VF_STAT_36BIT(IXGBE_VFGOTC_LSB, IXGBE_VFGOTC_MSB,
3546             hw_stats->last_vfgotc, hw_stats->vfgotc);
3547
3548         /* Rx Multicst Packet */
3549         UPDATE_VF_STAT(IXGBE_VFMPRC,
3550             hw_stats->last_vfmprc, hw_stats->vfmprc);
3551 }
3552
3553 static int
3554 ixgbevf_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
3555                        unsigned n)
3556 {
3557         struct ixgbevf_hw_stats *hw_stats = (struct ixgbevf_hw_stats *)
3558                         IXGBE_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
3559         unsigned i;
3560
3561         if (n < IXGBEVF_NB_XSTATS)
3562                 return IXGBEVF_NB_XSTATS;
3563
3564         ixgbevf_update_stats(dev);
3565
3566         if (!xstats)
3567                 return 0;
3568
3569         /* Extended stats */
3570         for (i = 0; i < IXGBEVF_NB_XSTATS; i++) {
3571                 xstats[i].id = i;
3572                 xstats[i].value = *(uint64_t *)(((char *)hw_stats) +
3573                         rte_ixgbevf_stats_strings[i].offset);
3574         }
3575
3576         return IXGBEVF_NB_XSTATS;
3577 }
3578
3579 static int
3580 ixgbevf_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
3581 {
3582         struct ixgbevf_hw_stats *hw_stats = (struct ixgbevf_hw_stats *)
3583                           IXGBE_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
3584
3585         ixgbevf_update_stats(dev);
3586
3587         if (stats == NULL)
3588                 return -EINVAL;
3589
3590         stats->ipackets = hw_stats->vfgprc;
3591         stats->ibytes = hw_stats->vfgorc;
3592         stats->opackets = hw_stats->vfgptc;
3593         stats->obytes = hw_stats->vfgotc;
3594         return 0;
3595 }
3596
3597 static void
3598 ixgbevf_dev_stats_reset(struct rte_eth_dev *dev)
3599 {
3600         struct ixgbevf_hw_stats *hw_stats = (struct ixgbevf_hw_stats *)
3601                         IXGBE_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
3602
3603         /* Sync HW register to the last stats */
3604         ixgbevf_dev_stats_get(dev, NULL);
3605
3606         /* reset HW current stats*/
3607         hw_stats->vfgprc = 0;
3608         hw_stats->vfgorc = 0;
3609         hw_stats->vfgptc = 0;
3610         hw_stats->vfgotc = 0;
3611 }
3612
3613 static int
3614 ixgbe_fw_version_get(struct rte_eth_dev *dev, char *fw_version, size_t fw_size)
3615 {
3616         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3617         u16 eeprom_verh, eeprom_verl;
3618         u32 etrack_id;
3619         int ret;
3620
3621         ixgbe_read_eeprom(hw, 0x2e, &eeprom_verh);
3622         ixgbe_read_eeprom(hw, 0x2d, &eeprom_verl);
3623
3624         etrack_id = (eeprom_verh << 16) | eeprom_verl;
3625         ret = snprintf(fw_version, fw_size, "0x%08x", etrack_id);
3626
3627         ret += 1; /* add the size of '\0' */
3628         if (fw_size < (u32)ret)
3629                 return ret;
3630         else
3631                 return 0;
3632 }
3633
3634 static void
3635 ixgbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
3636 {
3637         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
3638         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3639         struct rte_eth_conf *dev_conf = &dev->data->dev_conf;
3640
3641         dev_info->pci_dev = pci_dev;
3642         dev_info->max_rx_queues = (uint16_t)hw->mac.max_rx_queues;
3643         dev_info->max_tx_queues = (uint16_t)hw->mac.max_tx_queues;
3644         if (RTE_ETH_DEV_SRIOV(dev).active == 0) {
3645                 /*
3646                  * When DCB/VT is off, maximum number of queues changes,
3647                  * except for 82598EB, which remains constant.
3648                  */
3649                 if (dev_conf->txmode.mq_mode == ETH_MQ_TX_NONE &&
3650                                 hw->mac.type != ixgbe_mac_82598EB)
3651                         dev_info->max_tx_queues = IXGBE_NONE_MODE_TX_NB_QUEUES;
3652         }
3653         dev_info->min_rx_bufsize = 1024; /* cf BSIZEPACKET in SRRCTL register */
3654         dev_info->max_rx_pktlen = 15872; /* includes CRC, cf MAXFRS register */
3655         dev_info->max_mac_addrs = hw->mac.num_rar_entries;
3656         dev_info->max_hash_mac_addrs = IXGBE_VMDQ_NUM_UC_MAC;
3657         dev_info->max_vfs = pci_dev->max_vfs;
3658         if (hw->mac.type == ixgbe_mac_82598EB)
3659                 dev_info->max_vmdq_pools = ETH_16_POOLS;
3660         else
3661                 dev_info->max_vmdq_pools = ETH_64_POOLS;
3662         dev_info->vmdq_queue_num = dev_info->max_rx_queues;
3663         dev_info->rx_offload_capa =
3664                 DEV_RX_OFFLOAD_VLAN_STRIP |
3665                 DEV_RX_OFFLOAD_IPV4_CKSUM |
3666                 DEV_RX_OFFLOAD_UDP_CKSUM  |
3667                 DEV_RX_OFFLOAD_TCP_CKSUM;
3668
3669         /*
3670          * RSC is only supported by 82599 and x540 PF devices in a non-SR-IOV
3671          * mode.
3672          */
3673         if ((hw->mac.type == ixgbe_mac_82599EB ||
3674              hw->mac.type == ixgbe_mac_X540) &&
3675             !RTE_ETH_DEV_SRIOV(dev).active)
3676                 dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_TCP_LRO;
3677
3678         if (hw->mac.type == ixgbe_mac_82599EB ||
3679             hw->mac.type == ixgbe_mac_X540)
3680                 dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_MACSEC_STRIP;
3681
3682         if (hw->mac.type == ixgbe_mac_X550 ||
3683             hw->mac.type == ixgbe_mac_X550EM_x ||
3684             hw->mac.type == ixgbe_mac_X550EM_a)
3685                 dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM;
3686
3687         dev_info->tx_offload_capa =
3688                 DEV_TX_OFFLOAD_VLAN_INSERT |
3689                 DEV_TX_OFFLOAD_IPV4_CKSUM  |
3690                 DEV_TX_OFFLOAD_UDP_CKSUM   |
3691                 DEV_TX_OFFLOAD_TCP_CKSUM   |
3692                 DEV_TX_OFFLOAD_SCTP_CKSUM  |
3693                 DEV_TX_OFFLOAD_TCP_TSO;
3694
3695         if (hw->mac.type == ixgbe_mac_82599EB ||
3696             hw->mac.type == ixgbe_mac_X540)
3697                 dev_info->tx_offload_capa |= DEV_TX_OFFLOAD_MACSEC_INSERT;
3698
3699         if (hw->mac.type == ixgbe_mac_X550 ||
3700             hw->mac.type == ixgbe_mac_X550EM_x ||
3701             hw->mac.type == ixgbe_mac_X550EM_a)
3702                 dev_info->tx_offload_capa |= DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM;
3703
3704         dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_SECURITY;
3705         dev_info->tx_offload_capa |= DEV_TX_OFFLOAD_SECURITY;
3706
3707         dev_info->default_rxconf = (struct rte_eth_rxconf) {
3708                 .rx_thresh = {
3709                         .pthresh = IXGBE_DEFAULT_RX_PTHRESH,
3710                         .hthresh = IXGBE_DEFAULT_RX_HTHRESH,
3711                         .wthresh = IXGBE_DEFAULT_RX_WTHRESH,
3712                 },
3713                 .rx_free_thresh = IXGBE_DEFAULT_RX_FREE_THRESH,
3714                 .rx_drop_en = 0,
3715         };
3716
3717         dev_info->default_txconf = (struct rte_eth_txconf) {
3718                 .tx_thresh = {
3719                         .pthresh = IXGBE_DEFAULT_TX_PTHRESH,
3720                         .hthresh = IXGBE_DEFAULT_TX_HTHRESH,
3721                         .wthresh = IXGBE_DEFAULT_TX_WTHRESH,
3722                 },
3723                 .tx_free_thresh = IXGBE_DEFAULT_TX_FREE_THRESH,
3724                 .tx_rs_thresh = IXGBE_DEFAULT_TX_RSBIT_THRESH,
3725                 .txq_flags = ETH_TXQ_FLAGS_NOMULTSEGS |
3726                                 ETH_TXQ_FLAGS_NOOFFLOADS,
3727         };
3728
3729         dev_info->rx_desc_lim = rx_desc_lim;
3730         dev_info->tx_desc_lim = tx_desc_lim;
3731
3732         dev_info->hash_key_size = IXGBE_HKEY_MAX_INDEX * sizeof(uint32_t);
3733         dev_info->reta_size = ixgbe_reta_size_get(hw->mac.type);
3734         dev_info->flow_type_rss_offloads = IXGBE_RSS_OFFLOAD_ALL;
3735
3736         dev_info->speed_capa = ETH_LINK_SPEED_1G | ETH_LINK_SPEED_10G;
3737         if (hw->mac.type == ixgbe_mac_X540 ||
3738             hw->mac.type == ixgbe_mac_X540_vf ||
3739             hw->mac.type == ixgbe_mac_X550 ||
3740             hw->mac.type == ixgbe_mac_X550_vf) {
3741                 dev_info->speed_capa |= ETH_LINK_SPEED_100M;
3742         }
3743         if (hw->mac.type == ixgbe_mac_X550) {
3744                 dev_info->speed_capa |= ETH_LINK_SPEED_2_5G;
3745                 dev_info->speed_capa |= ETH_LINK_SPEED_5G;
3746         }
3747 }
3748
3749 static const uint32_t *
3750 ixgbe_dev_supported_ptypes_get(struct rte_eth_dev *dev)
3751 {
3752         static const uint32_t ptypes[] = {
3753                 /* For non-vec functions,
3754                  * refers to ixgbe_rxd_pkt_info_to_pkt_type();
3755                  * for vec functions,
3756                  * refers to _recv_raw_pkts_vec().
3757                  */
3758                 RTE_PTYPE_L2_ETHER,
3759                 RTE_PTYPE_L3_IPV4,
3760                 RTE_PTYPE_L3_IPV4_EXT,
3761                 RTE_PTYPE_L3_IPV6,
3762                 RTE_PTYPE_L3_IPV6_EXT,
3763                 RTE_PTYPE_L4_SCTP,
3764                 RTE_PTYPE_L4_TCP,
3765                 RTE_PTYPE_L4_UDP,
3766                 RTE_PTYPE_TUNNEL_IP,
3767                 RTE_PTYPE_INNER_L3_IPV6,
3768                 RTE_PTYPE_INNER_L3_IPV6_EXT,
3769                 RTE_PTYPE_INNER_L4_TCP,
3770                 RTE_PTYPE_INNER_L4_UDP,
3771                 RTE_PTYPE_UNKNOWN
3772         };
3773
3774         if (dev->rx_pkt_burst == ixgbe_recv_pkts ||
3775             dev->rx_pkt_burst == ixgbe_recv_pkts_lro_single_alloc ||
3776             dev->rx_pkt_burst == ixgbe_recv_pkts_lro_bulk_alloc ||
3777             dev->rx_pkt_burst == ixgbe_recv_pkts_bulk_alloc)
3778                 return ptypes;
3779
3780 #if defined(RTE_ARCH_X86)
3781         if (dev->rx_pkt_burst == ixgbe_recv_pkts_vec ||
3782             dev->rx_pkt_burst == ixgbe_recv_scattered_pkts_vec)
3783                 return ptypes;
3784 #endif
3785         return NULL;
3786 }
3787
3788 static void
3789 ixgbevf_dev_info_get(struct rte_eth_dev *dev,
3790                      struct rte_eth_dev_info *dev_info)
3791 {
3792         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
3793         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3794
3795         dev_info->pci_dev = pci_dev;
3796         dev_info->max_rx_queues = (uint16_t)hw->mac.max_rx_queues;
3797         dev_info->max_tx_queues = (uint16_t)hw->mac.max_tx_queues;
3798         dev_info->min_rx_bufsize = 1024; /* cf BSIZEPACKET in SRRCTL reg */
3799         dev_info->max_rx_pktlen = 9728; /* includes CRC, cf MAXFRS reg */
3800         dev_info->max_mac_addrs = hw->mac.num_rar_entries;
3801         dev_info->max_hash_mac_addrs = IXGBE_VMDQ_NUM_UC_MAC;
3802         dev_info->max_vfs = pci_dev->max_vfs;
3803         if (hw->mac.type == ixgbe_mac_82598EB)
3804                 dev_info->max_vmdq_pools = ETH_16_POOLS;
3805         else
3806                 dev_info->max_vmdq_pools = ETH_64_POOLS;
3807         dev_info->rx_offload_capa = DEV_RX_OFFLOAD_VLAN_STRIP |
3808                                 DEV_RX_OFFLOAD_IPV4_CKSUM |
3809                                 DEV_RX_OFFLOAD_UDP_CKSUM  |
3810                                 DEV_RX_OFFLOAD_TCP_CKSUM;
3811         dev_info->tx_offload_capa = DEV_TX_OFFLOAD_VLAN_INSERT |
3812                                 DEV_TX_OFFLOAD_IPV4_CKSUM  |
3813                                 DEV_TX_OFFLOAD_UDP_CKSUM   |
3814                                 DEV_TX_OFFLOAD_TCP_CKSUM   |
3815                                 DEV_TX_OFFLOAD_SCTP_CKSUM  |
3816                                 DEV_TX_OFFLOAD_TCP_TSO;
3817
3818         dev_info->default_rxconf = (struct rte_eth_rxconf) {
3819                 .rx_thresh = {
3820                         .pthresh = IXGBE_DEFAULT_RX_PTHRESH,
3821                         .hthresh = IXGBE_DEFAULT_RX_HTHRESH,
3822                         .wthresh = IXGBE_DEFAULT_RX_WTHRESH,
3823                 },
3824                 .rx_free_thresh = IXGBE_DEFAULT_RX_FREE_THRESH,
3825                 .rx_drop_en = 0,
3826         };
3827
3828         dev_info->default_txconf = (struct rte_eth_txconf) {
3829                 .tx_thresh = {
3830                         .pthresh = IXGBE_DEFAULT_TX_PTHRESH,
3831                         .hthresh = IXGBE_DEFAULT_TX_HTHRESH,
3832                         .wthresh = IXGBE_DEFAULT_TX_WTHRESH,
3833                 },
3834                 .tx_free_thresh = IXGBE_DEFAULT_TX_FREE_THRESH,
3835                 .tx_rs_thresh = IXGBE_DEFAULT_TX_RSBIT_THRESH,
3836                 .txq_flags = ETH_TXQ_FLAGS_NOMULTSEGS |
3837                                 ETH_TXQ_FLAGS_NOOFFLOADS,
3838         };
3839
3840         dev_info->rx_desc_lim = rx_desc_lim;
3841         dev_info->tx_desc_lim = tx_desc_lim;
3842 }
3843
3844 static int
3845 ixgbevf_check_link(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
3846                    int *link_up, int wait_to_complete)
3847 {
3848         /**
3849          * for a quick link status checking, wait_to_compelet == 0,
3850          * skip PF link status checking
3851          */
3852         bool no_pflink_check = wait_to_complete == 0;
3853         struct ixgbe_mbx_info *mbx = &hw->mbx;
3854         struct ixgbe_mac_info *mac = &hw->mac;
3855         uint32_t links_reg, in_msg;
3856         int ret_val = 0;
3857
3858         /* If we were hit with a reset drop the link */
3859         if (!mbx->ops.check_for_rst(hw, 0) || !mbx->timeout)
3860                 mac->get_link_status = true;
3861
3862         if (!mac->get_link_status)
3863                 goto out;
3864
3865         /* if link status is down no point in checking to see if pf is up */
3866         links_reg = IXGBE_READ_REG(hw, IXGBE_VFLINKS);
3867         if (!(links_reg & IXGBE_LINKS_UP))
3868                 goto out;
3869
3870         /* for SFP+ modules and DA cables on 82599 it can take up to 500usecs
3871          * before the link status is correct
3872          */
3873         if (mac->type == ixgbe_mac_82599_vf) {
3874                 int i;
3875
3876                 for (i = 0; i < 5; i++) {
3877                         rte_delay_us(100);
3878                         links_reg = IXGBE_READ_REG(hw, IXGBE_VFLINKS);
3879
3880                         if (!(links_reg & IXGBE_LINKS_UP))
3881                                 goto out;
3882                 }
3883         }
3884
3885         switch (links_reg & IXGBE_LINKS_SPEED_82599) {
3886         case IXGBE_LINKS_SPEED_10G_82599:
3887                 *speed = IXGBE_LINK_SPEED_10GB_FULL;
3888                 if (hw->mac.type >= ixgbe_mac_X550) {
3889                         if (links_reg & IXGBE_LINKS_SPEED_NON_STD)
3890                                 *speed = IXGBE_LINK_SPEED_2_5GB_FULL;
3891                 }
3892                 break;
3893         case IXGBE_LINKS_SPEED_1G_82599:
3894                 *speed = IXGBE_LINK_SPEED_1GB_FULL;
3895                 break;
3896         case IXGBE_LINKS_SPEED_100_82599:
3897                 *speed = IXGBE_LINK_SPEED_100_FULL;
3898                 if (hw->mac.type == ixgbe_mac_X550) {
3899                         if (links_reg & IXGBE_LINKS_SPEED_NON_STD)
3900                                 *speed = IXGBE_LINK_SPEED_5GB_FULL;
3901                 }
3902                 break;
3903         case IXGBE_LINKS_SPEED_10_X550EM_A:
3904                 *speed = IXGBE_LINK_SPEED_UNKNOWN;
3905                 /* Since Reserved in older MAC's */
3906                 if (hw->mac.type >= ixgbe_mac_X550)
3907                         *speed = IXGBE_LINK_SPEED_10_FULL;
3908                 break;
3909         default:
3910                 *speed = IXGBE_LINK_SPEED_UNKNOWN;
3911         }
3912
3913         if (no_pflink_check) {
3914                 if (*speed == IXGBE_LINK_SPEED_UNKNOWN)
3915                         mac->get_link_status = true;
3916                 else
3917                         mac->get_link_status = false;
3918
3919                 goto out;
3920         }
3921         /* if the read failed it could just be a mailbox collision, best wait
3922          * until we are called again and don't report an error
3923          */
3924         if (mbx->ops.read(hw, &in_msg, 1, 0))
3925                 goto out;
3926
3927         if (!(in_msg & IXGBE_VT_MSGTYPE_CTS)) {
3928                 /* msg is not CTS and is NACK we must have lost CTS status */
3929                 if (in_msg & IXGBE_VT_MSGTYPE_NACK)
3930                         ret_val = -1;
3931                 goto out;
3932         }
3933
3934         /* the pf is talking, if we timed out in the past we reinit */
3935         if (!mbx->timeout) {
3936                 ret_val = -1;
3937                 goto out;
3938         }
3939
3940         /* if we passed all the tests above then the link is up and we no
3941          * longer need to check for link
3942          */
3943         mac->get_link_status = false;
3944
3945 out:
3946         *link_up = !mac->get_link_status;
3947         return ret_val;
3948 }
3949
3950 /* return 0 means link status changed, -1 means not changed */
3951 static int
3952 ixgbe_dev_link_update_share(struct rte_eth_dev *dev,
3953                             int wait_to_complete, int vf)
3954 {
3955         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3956         struct rte_eth_link link, old;
3957         ixgbe_link_speed link_speed = IXGBE_LINK_SPEED_UNKNOWN;
3958         struct ixgbe_interrupt *intr =
3959                 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
3960         int link_up;
3961         int diag;
3962         u32 speed = 0;
3963         int wait = 1;
3964         bool autoneg = false;
3965
3966         link.link_status = ETH_LINK_DOWN;
3967         link.link_speed = 0;
3968         link.link_duplex = ETH_LINK_HALF_DUPLEX;
3969         link.link_autoneg = ETH_LINK_AUTONEG;
3970         memset(&old, 0, sizeof(old));
3971         rte_ixgbe_dev_atomic_read_link_status(dev, &old);
3972
3973         hw->mac.get_link_status = true;
3974
3975         if ((intr->flags & IXGBE_FLAG_NEED_LINK_CONFIG) &&
3976                 ixgbe_get_media_type(hw) == ixgbe_media_type_fiber) {
3977                 speed = hw->phy.autoneg_advertised;
3978                 if (!speed)
3979                         ixgbe_get_link_capabilities(hw, &speed, &autoneg);
3980                 ixgbe_setup_link(hw, speed, true);
3981         }
3982
3983         /* check if it needs to wait to complete, if lsc interrupt is enabled */
3984         if (wait_to_complete == 0 || dev->data->dev_conf.intr_conf.lsc != 0)
3985                 wait = 0;
3986
3987         if (vf)
3988                 diag = ixgbevf_check_link(hw, &link_speed, &link_up, wait);
3989         else
3990                 diag = ixgbe_check_link(hw, &link_speed, &link_up, wait);
3991
3992         if (diag != 0) {
3993                 link.link_speed = ETH_SPEED_NUM_100M;
3994                 link.link_duplex = ETH_LINK_FULL_DUPLEX;
3995                 rte_ixgbe_dev_atomic_write_link_status(dev, &link);
3996                 if (link.link_status == old.link_status)
3997                         return -1;
3998                 return 0;
3999         }
4000
4001         if (link_up == 0) {
4002                 rte_ixgbe_dev_atomic_write_link_status(dev, &link);
4003                 intr->flags |= IXGBE_FLAG_NEED_LINK_CONFIG;
4004                 if (link.link_status == old.link_status)
4005                         return -1;
4006                 return 0;
4007         }
4008         intr->flags &= ~IXGBE_FLAG_NEED_LINK_CONFIG;
4009         link.link_status = ETH_LINK_UP;
4010         link.link_duplex = ETH_LINK_FULL_DUPLEX;
4011
4012         switch (link_speed) {
4013         default:
4014         case IXGBE_LINK_SPEED_UNKNOWN:
4015                 link.link_duplex = ETH_LINK_FULL_DUPLEX;
4016                 link.link_speed = ETH_SPEED_NUM_100M;
4017                 break;
4018
4019         case IXGBE_LINK_SPEED_100_FULL:
4020                 link.link_speed = ETH_SPEED_NUM_100M;
4021                 break;
4022
4023         case IXGBE_LINK_SPEED_1GB_FULL:
4024                 link.link_speed = ETH_SPEED_NUM_1G;
4025                 break;
4026
4027         case IXGBE_LINK_SPEED_2_5GB_FULL:
4028                 link.link_speed = ETH_SPEED_NUM_2_5G;
4029                 break;
4030
4031         case IXGBE_LINK_SPEED_5GB_FULL:
4032                 link.link_speed = ETH_SPEED_NUM_5G;
4033                 break;
4034
4035         case IXGBE_LINK_SPEED_10GB_FULL:
4036                 link.link_speed = ETH_SPEED_NUM_10G;
4037                 break;
4038         }
4039         rte_ixgbe_dev_atomic_write_link_status(dev, &link);
4040
4041         if (link.link_status == old.link_status)
4042                 return -1;
4043
4044         return 0;
4045 }
4046
4047 static int
4048 ixgbe_dev_link_update(struct rte_eth_dev *dev, int wait_to_complete)
4049 {
4050         return ixgbe_dev_link_update_share(dev, wait_to_complete, 0);
4051 }
4052
4053 static int
4054 ixgbevf_dev_link_update(struct rte_eth_dev *dev, int wait_to_complete)
4055 {
4056         return ixgbe_dev_link_update_share(dev, wait_to_complete, 1);
4057 }
4058
4059 static void
4060 ixgbe_dev_promiscuous_enable(struct rte_eth_dev *dev)
4061 {
4062         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4063         uint32_t fctrl;
4064
4065         fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
4066         fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
4067         IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
4068 }
4069
4070 static void
4071 ixgbe_dev_promiscuous_disable(struct rte_eth_dev *dev)
4072 {
4073         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4074         uint32_t fctrl;
4075
4076         fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
4077         fctrl &= (~IXGBE_FCTRL_UPE);
4078         if (dev->data->all_multicast == 1)
4079                 fctrl |= IXGBE_FCTRL_MPE;
4080         else
4081                 fctrl &= (~IXGBE_FCTRL_MPE);
4082         IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
4083 }
4084
4085 static void
4086 ixgbe_dev_allmulticast_enable(struct rte_eth_dev *dev)
4087 {
4088         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4089         uint32_t fctrl;
4090
4091         fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
4092         fctrl |= IXGBE_FCTRL_MPE;
4093         IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
4094 }
4095
4096 static void
4097 ixgbe_dev_allmulticast_disable(struct rte_eth_dev *dev)
4098 {
4099         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4100         uint32_t fctrl;
4101
4102         if (dev->data->promiscuous == 1)
4103                 return; /* must remain in all_multicast mode */
4104
4105         fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
4106         fctrl &= (~IXGBE_FCTRL_MPE);
4107         IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
4108 }
4109
4110 /**
4111  * It clears the interrupt causes and enables the interrupt.
4112  * It will be called once only during nic initialized.
4113  *
4114  * @param dev
4115  *  Pointer to struct rte_eth_dev.
4116  * @param on
4117  *  Enable or Disable.
4118  *
4119  * @return
4120  *  - On success, zero.
4121  *  - On failure, a negative value.
4122  */
4123 static int
4124 ixgbe_dev_lsc_interrupt_setup(struct rte_eth_dev *dev, uint8_t on)
4125 {
4126         struct ixgbe_interrupt *intr =
4127                 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
4128
4129         ixgbe_dev_link_status_print(dev);
4130         if (on)
4131                 intr->mask |= IXGBE_EICR_LSC;
4132         else
4133                 intr->mask &= ~IXGBE_EICR_LSC;
4134
4135         return 0;
4136 }
4137
4138 /**
4139  * It clears the interrupt causes and enables the interrupt.
4140  * It will be called once only during nic initialized.
4141  *
4142  * @param dev
4143  *  Pointer to struct rte_eth_dev.
4144  *
4145  * @return
4146  *  - On success, zero.
4147  *  - On failure, a negative value.
4148  */
4149 static int
4150 ixgbe_dev_rxq_interrupt_setup(struct rte_eth_dev *dev)
4151 {
4152         struct ixgbe_interrupt *intr =
4153                 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
4154
4155         intr->mask |= IXGBE_EICR_RTX_QUEUE;
4156
4157         return 0;
4158 }
4159
4160 /**
4161  * It clears the interrupt causes and enables the interrupt.
4162  * It will be called once only during nic initialized.
4163  *
4164  * @param dev
4165  *  Pointer to struct rte_eth_dev.
4166  *
4167  * @return
4168  *  - On success, zero.
4169  *  - On failure, a negative value.
4170  */
4171 static int
4172 ixgbe_dev_macsec_interrupt_setup(struct rte_eth_dev *dev)
4173 {
4174         struct ixgbe_interrupt *intr =
4175                 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
4176
4177         intr->mask |= IXGBE_EICR_LINKSEC;
4178
4179         return 0;
4180 }
4181
4182 /*
4183  * It reads ICR and sets flag (IXGBE_EICR_LSC) for the link_update.
4184  *
4185  * @param dev
4186  *  Pointer to struct rte_eth_dev.
4187  *
4188  * @return
4189  *  - On success, zero.
4190  *  - On failure, a negative value.
4191  */
4192 static int
4193 ixgbe_dev_interrupt_get_status(struct rte_eth_dev *dev)
4194 {
4195         uint32_t eicr;
4196         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4197         struct ixgbe_interrupt *intr =
4198                 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
4199
4200         /* clear all cause mask */
4201         ixgbe_disable_intr(hw);
4202
4203         /* read-on-clear nic registers here */
4204         eicr = IXGBE_READ_REG(hw, IXGBE_EICR);
4205         PMD_DRV_LOG(DEBUG, "eicr %x", eicr);
4206
4207         intr->flags = 0;
4208
4209         /* set flag for async link update */
4210         if (eicr & IXGBE_EICR_LSC)
4211                 intr->flags |= IXGBE_FLAG_NEED_LINK_UPDATE;
4212
4213         if (eicr & IXGBE_EICR_MAILBOX)
4214                 intr->flags |= IXGBE_FLAG_MAILBOX;
4215
4216         if (eicr & IXGBE_EICR_LINKSEC)
4217                 intr->flags |= IXGBE_FLAG_MACSEC;
4218
4219         if (hw->mac.type ==  ixgbe_mac_X550EM_x &&
4220             hw->phy.type == ixgbe_phy_x550em_ext_t &&
4221             (eicr & IXGBE_EICR_GPI_SDP0_X550EM_x))
4222                 intr->flags |= IXGBE_FLAG_PHY_INTERRUPT;
4223
4224         return 0;
4225 }
4226
4227 /**
4228  * It gets and then prints the link status.
4229  *
4230  * @param dev
4231  *  Pointer to struct rte_eth_dev.
4232  *
4233  * @return
4234  *  - On success, zero.
4235  *  - On failure, a negative value.
4236  */
4237 static void
4238 ixgbe_dev_link_status_print(struct rte_eth_dev *dev)
4239 {
4240         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
4241         struct rte_eth_link link;
4242
4243         memset(&link, 0, sizeof(link));
4244         rte_ixgbe_dev_atomic_read_link_status(dev, &link);
4245         if (link.link_status) {
4246                 PMD_INIT_LOG(INFO, "Port %d: Link Up - speed %u Mbps - %s",
4247                                         (int)(dev->data->port_id),
4248                                         (unsigned)link.link_speed,
4249                         link.link_duplex == ETH_LINK_FULL_DUPLEX ?
4250                                         "full-duplex" : "half-duplex");
4251         } else {
4252                 PMD_INIT_LOG(INFO, " Port %d: Link Down",
4253                                 (int)(dev->data->port_id));
4254         }
4255         PMD_INIT_LOG(DEBUG, "PCI Address: " PCI_PRI_FMT,
4256                                 pci_dev->addr.domain,
4257                                 pci_dev->addr.bus,
4258                                 pci_dev->addr.devid,
4259                                 pci_dev->addr.function);
4260 }
4261
4262 /*
4263  * It executes link_update after knowing an interrupt occurred.
4264  *
4265  * @param dev
4266  *  Pointer to struct rte_eth_dev.
4267  *
4268  * @return
4269  *  - On success, zero.
4270  *  - On failure, a negative value.
4271  */
4272 static int
4273 ixgbe_dev_interrupt_action(struct rte_eth_dev *dev,
4274                            struct rte_intr_handle *intr_handle)
4275 {
4276         struct ixgbe_interrupt *intr =
4277                 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
4278         int64_t timeout;
4279         struct rte_eth_link link;
4280         struct ixgbe_hw *hw =
4281                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4282
4283         PMD_DRV_LOG(DEBUG, "intr action type %d", intr->flags);
4284
4285         if (intr->flags & IXGBE_FLAG_MAILBOX) {
4286                 ixgbe_pf_mbx_process(dev);
4287                 intr->flags &= ~IXGBE_FLAG_MAILBOX;
4288         }
4289
4290         if (intr->flags & IXGBE_FLAG_PHY_INTERRUPT) {
4291                 ixgbe_handle_lasi(hw);
4292                 intr->flags &= ~IXGBE_FLAG_PHY_INTERRUPT;
4293         }
4294
4295         if (intr->flags & IXGBE_FLAG_NEED_LINK_UPDATE) {
4296                 /* get the link status before link update, for predicting later */
4297                 memset(&link, 0, sizeof(link));
4298                 rte_ixgbe_dev_atomic_read_link_status(dev, &link);
4299
4300                 ixgbe_dev_link_update(dev, 0);
4301
4302                 /* likely to up */
4303                 if (!link.link_status)
4304                         /* handle it 1 sec later, wait it being stable */
4305                         timeout = IXGBE_LINK_UP_CHECK_TIMEOUT;
4306                 /* likely to down */
4307                 else
4308                         /* handle it 4 sec later, wait it being stable */
4309                         timeout = IXGBE_LINK_DOWN_CHECK_TIMEOUT;
4310
4311                 ixgbe_dev_link_status_print(dev);
4312                 if (rte_eal_alarm_set(timeout * 1000,
4313                                       ixgbe_dev_interrupt_delayed_handler, (void *)dev) < 0)
4314                         PMD_DRV_LOG(ERR, "Error setting alarm");
4315                 else {
4316                         /* remember original mask */
4317                         intr->mask_original = intr->mask;
4318                         /* only disable lsc interrupt */
4319                         intr->mask &= ~IXGBE_EIMS_LSC;
4320                 }
4321         }
4322
4323         PMD_DRV_LOG(DEBUG, "enable intr immediately");
4324         ixgbe_enable_intr(dev);
4325         rte_intr_enable(intr_handle);
4326
4327         return 0;
4328 }
4329
4330 /**
4331  * Interrupt handler which shall be registered for alarm callback for delayed
4332  * handling specific interrupt to wait for the stable nic state. As the
4333  * NIC interrupt state is not stable for ixgbe after link is just down,
4334  * it needs to wait 4 seconds to get the stable status.
4335  *
4336  * @param handle
4337  *  Pointer to interrupt handle.
4338  * @param param
4339  *  The address of parameter (struct rte_eth_dev *) regsitered before.
4340  *
4341  * @return
4342  *  void
4343  */
4344 static void
4345 ixgbe_dev_interrupt_delayed_handler(void *param)
4346 {
4347         struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
4348         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
4349         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
4350         struct ixgbe_interrupt *intr =
4351                 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
4352         struct ixgbe_hw *hw =
4353                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4354         uint32_t eicr;
4355
4356         ixgbe_disable_intr(hw);
4357
4358         eicr = IXGBE_READ_REG(hw, IXGBE_EICR);
4359         if (eicr & IXGBE_EICR_MAILBOX)
4360                 ixgbe_pf_mbx_process(dev);
4361
4362         if (intr->flags & IXGBE_FLAG_PHY_INTERRUPT) {
4363                 ixgbe_handle_lasi(hw);
4364                 intr->flags &= ~IXGBE_FLAG_PHY_INTERRUPT;
4365         }
4366
4367         if (intr->flags & IXGBE_FLAG_NEED_LINK_UPDATE) {
4368                 ixgbe_dev_link_update(dev, 0);
4369                 intr->flags &= ~IXGBE_FLAG_NEED_LINK_UPDATE;
4370                 ixgbe_dev_link_status_print(dev);
4371                 _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC,
4372                                               NULL, NULL);
4373         }
4374
4375         if (intr->flags & IXGBE_FLAG_MACSEC) {
4376                 _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_MACSEC,
4377                                               NULL, NULL);
4378                 intr->flags &= ~IXGBE_FLAG_MACSEC;
4379         }
4380
4381         /* restore original mask */
4382         intr->mask = intr->mask_original;
4383         intr->mask_original = 0;
4384
4385         PMD_DRV_LOG(DEBUG, "enable intr in delayed handler S[%08x]", eicr);
4386         ixgbe_enable_intr(dev);
4387         rte_intr_enable(intr_handle);
4388 }
4389
4390 /**
4391  * Interrupt handler triggered by NIC  for handling
4392  * specific interrupt.
4393  *
4394  * @param handle
4395  *  Pointer to interrupt handle.
4396  * @param param
4397  *  The address of parameter (struct rte_eth_dev *) regsitered before.
4398  *
4399  * @return
4400  *  void
4401  */
4402 static void
4403 ixgbe_dev_interrupt_handler(void *param)
4404 {
4405         struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
4406
4407         ixgbe_dev_interrupt_get_status(dev);
4408         ixgbe_dev_interrupt_action(dev, dev->intr_handle);
4409 }
4410
4411 static int
4412 ixgbe_dev_led_on(struct rte_eth_dev *dev)
4413 {
4414         struct ixgbe_hw *hw;
4415
4416         hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4417         return ixgbe_led_on(hw, 0) == IXGBE_SUCCESS ? 0 : -ENOTSUP;
4418 }
4419
4420 static int
4421 ixgbe_dev_led_off(struct rte_eth_dev *dev)
4422 {
4423         struct ixgbe_hw *hw;
4424
4425         hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4426         return ixgbe_led_off(hw, 0) == IXGBE_SUCCESS ? 0 : -ENOTSUP;
4427 }
4428
4429 static int
4430 ixgbe_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
4431 {
4432         struct ixgbe_hw *hw;
4433         uint32_t mflcn_reg;
4434         uint32_t fccfg_reg;
4435         int rx_pause;
4436         int tx_pause;
4437
4438         hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4439
4440         fc_conf->pause_time = hw->fc.pause_time;
4441         fc_conf->high_water = hw->fc.high_water[0];
4442         fc_conf->low_water = hw->fc.low_water[0];
4443         fc_conf->send_xon = hw->fc.send_xon;
4444         fc_conf->autoneg = !hw->fc.disable_fc_autoneg;
4445
4446         /*
4447          * Return rx_pause status according to actual setting of
4448          * MFLCN register.
4449          */
4450         mflcn_reg = IXGBE_READ_REG(hw, IXGBE_MFLCN);
4451         if (mflcn_reg & (IXGBE_MFLCN_RPFCE | IXGBE_MFLCN_RFCE))
4452                 rx_pause = 1;
4453         else
4454                 rx_pause = 0;
4455
4456         /*
4457          * Return tx_pause status according to actual setting of
4458          * FCCFG register.
4459          */
4460         fccfg_reg = IXGBE_READ_REG(hw, IXGBE_FCCFG);
4461         if (fccfg_reg & (IXGBE_FCCFG_TFCE_802_3X | IXGBE_FCCFG_TFCE_PRIORITY))
4462                 tx_pause = 1;
4463         else
4464                 tx_pause = 0;
4465
4466         if (rx_pause && tx_pause)
4467                 fc_conf->mode = RTE_FC_FULL;
4468         else if (rx_pause)
4469                 fc_conf->mode = RTE_FC_RX_PAUSE;
4470         else if (tx_pause)
4471                 fc_conf->mode = RTE_FC_TX_PAUSE;
4472         else
4473                 fc_conf->mode = RTE_FC_NONE;
4474
4475         return 0;
4476 }
4477
4478 static int
4479 ixgbe_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
4480 {
4481         struct ixgbe_hw *hw;
4482         int err;
4483         uint32_t rx_buf_size;
4484         uint32_t max_high_water;
4485         uint32_t mflcn;
4486         enum ixgbe_fc_mode rte_fcmode_2_ixgbe_fcmode[] = {
4487                 ixgbe_fc_none,
4488                 ixgbe_fc_rx_pause,
4489                 ixgbe_fc_tx_pause,
4490                 ixgbe_fc_full
4491         };
4492
4493         PMD_INIT_FUNC_TRACE();
4494
4495         hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4496         rx_buf_size = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(0));
4497         PMD_INIT_LOG(DEBUG, "Rx packet buffer size = 0x%x", rx_buf_size);
4498
4499         /*
4500          * At least reserve one Ethernet frame for watermark
4501          * high_water/low_water in kilo bytes for ixgbe
4502          */
4503         max_high_water = (rx_buf_size - ETHER_MAX_LEN) >> IXGBE_RXPBSIZE_SHIFT;
4504         if ((fc_conf->high_water > max_high_water) ||
4505                 (fc_conf->high_water < fc_conf->low_water)) {
4506                 PMD_INIT_LOG(ERR, "Invalid high/low water setup value in KB");
4507                 PMD_INIT_LOG(ERR, "High_water must <= 0x%x", max_high_water);
4508                 return -EINVAL;
4509         }
4510
4511         hw->fc.requested_mode = rte_fcmode_2_ixgbe_fcmode[fc_conf->mode];
4512         hw->fc.pause_time     = fc_conf->pause_time;
4513         hw->fc.high_water[0]  = fc_conf->high_water;
4514         hw->fc.low_water[0]   = fc_conf->low_water;
4515         hw->fc.send_xon       = fc_conf->send_xon;
4516         hw->fc.disable_fc_autoneg = !fc_conf->autoneg;
4517
4518         err = ixgbe_fc_enable(hw);
4519
4520         /* Not negotiated is not an error case */
4521         if ((err == IXGBE_SUCCESS) || (err == IXGBE_ERR_FC_NOT_NEGOTIATED)) {
4522
4523                 /* check if we want to forward MAC frames - driver doesn't have native
4524                  * capability to do that, so we'll write the registers ourselves */
4525
4526                 mflcn = IXGBE_READ_REG(hw, IXGBE_MFLCN);
4527
4528                 /* set or clear MFLCN.PMCF bit depending on configuration */
4529                 if (fc_conf->mac_ctrl_frame_fwd != 0)
4530                         mflcn |= IXGBE_MFLCN_PMCF;
4531                 else
4532                         mflcn &= ~IXGBE_MFLCN_PMCF;
4533
4534                 IXGBE_WRITE_REG(hw, IXGBE_MFLCN, mflcn);
4535                 IXGBE_WRITE_FLUSH(hw);
4536
4537                 return 0;
4538         }
4539
4540         PMD_INIT_LOG(ERR, "ixgbe_fc_enable = 0x%x", err);
4541         return -EIO;
4542 }
4543
4544 /**
4545  *  ixgbe_pfc_enable_generic - Enable flow control
4546  *  @hw: pointer to hardware structure
4547  *  @tc_num: traffic class number
4548  *  Enable flow control according to the current settings.
4549  */
4550 static int
4551 ixgbe_dcb_pfc_enable_generic(struct ixgbe_hw *hw, uint8_t tc_num)
4552 {
4553         int ret_val = 0;
4554         uint32_t mflcn_reg, fccfg_reg;
4555         uint32_t reg;
4556         uint32_t fcrtl, fcrth;
4557         uint8_t i;
4558         uint8_t nb_rx_en;
4559
4560         /* Validate the water mark configuration */
4561         if (!hw->fc.pause_time) {
4562                 ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
4563                 goto out;
4564         }
4565
4566         /* Low water mark of zero causes XOFF floods */
4567         if (hw->fc.current_mode & ixgbe_fc_tx_pause) {
4568                  /* High/Low water can not be 0 */
4569                 if ((!hw->fc.high_water[tc_num]) || (!hw->fc.low_water[tc_num])) {
4570                         PMD_INIT_LOG(ERR, "Invalid water mark configuration");
4571                         ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
4572                         goto out;
4573                 }
4574
4575                 if (hw->fc.low_water[tc_num] >= hw->fc.high_water[tc_num]) {
4576                         PMD_INIT_LOG(ERR, "Invalid water mark configuration");
4577                         ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
4578                         goto out;
4579                 }
4580         }
4581         /* Negotiate the fc mode to use */
4582         ixgbe_fc_autoneg(hw);
4583
4584         /* Disable any previous flow control settings */
4585         mflcn_reg = IXGBE_READ_REG(hw, IXGBE_MFLCN);
4586         mflcn_reg &= ~(IXGBE_MFLCN_RPFCE_SHIFT | IXGBE_MFLCN_RFCE|IXGBE_MFLCN_RPFCE);
4587
4588         fccfg_reg = IXGBE_READ_REG(hw, IXGBE_FCCFG);
4589         fccfg_reg &= ~(IXGBE_FCCFG_TFCE_802_3X | IXGBE_FCCFG_TFCE_PRIORITY);
4590
4591         switch (hw->fc.current_mode) {
4592         case ixgbe_fc_none:
4593                 /*
4594                  * If the count of enabled RX Priority Flow control >1,
4595                  * and the TX pause can not be disabled
4596                  */
4597                 nb_rx_en = 0;
4598                 for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
4599                         reg = IXGBE_READ_REG(hw, IXGBE_FCRTH_82599(i));
4600                         if (reg & IXGBE_FCRTH_FCEN)
4601                                 nb_rx_en++;
4602                 }
4603                 if (nb_rx_en > 1)
4604                         fccfg_reg |= IXGBE_FCCFG_TFCE_PRIORITY;
4605                 break;
4606         case ixgbe_fc_rx_pause:
4607                 /*
4608                  * Rx Flow control is enabled and Tx Flow control is
4609                  * disabled by software override. Since there really
4610                  * isn't a way to advertise that we are capable of RX
4611                  * Pause ONLY, we will advertise that we support both
4612                  * symmetric and asymmetric Rx PAUSE.  Later, we will
4613                  * disable the adapter's ability to send PAUSE frames.
4614                  */
4615                 mflcn_reg |= IXGBE_MFLCN_RPFCE;
4616                 /*
4617                  * If the count of enabled RX Priority Flow control >1,
4618                  * and the TX pause can not be disabled
4619                  */
4620                 nb_rx_en = 0;
4621                 for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
4622                         reg = IXGBE_READ_REG(hw, IXGBE_FCRTH_82599(i));
4623                         if (reg & IXGBE_FCRTH_FCEN)
4624                                 nb_rx_en++;
4625                 }
4626                 if (nb_rx_en > 1)
4627                         fccfg_reg |= IXGBE_FCCFG_TFCE_PRIORITY;
4628                 break;
4629         case ixgbe_fc_tx_pause:
4630                 /*
4631                  * Tx Flow control is enabled, and Rx Flow control is
4632                  * disabled by software override.
4633                  */
4634                 fccfg_reg |= IXGBE_FCCFG_TFCE_PRIORITY;
4635                 break;
4636         case ixgbe_fc_full:
4637                 /* Flow control (both Rx and Tx) is enabled by SW override. */
4638                 mflcn_reg |= IXGBE_MFLCN_RPFCE;
4639                 fccfg_reg |= IXGBE_FCCFG_TFCE_PRIORITY;
4640                 break;
4641         default:
4642                 PMD_DRV_LOG(DEBUG, "Flow control param set incorrectly");
4643                 ret_val = IXGBE_ERR_CONFIG;
4644                 goto out;
4645         }
4646
4647         /* Set 802.3x based flow control settings. */
4648         mflcn_reg |= IXGBE_MFLCN_DPF;
4649         IXGBE_WRITE_REG(hw, IXGBE_MFLCN, mflcn_reg);
4650         IXGBE_WRITE_REG(hw, IXGBE_FCCFG, fccfg_reg);
4651
4652         /* Set up and enable Rx high/low water mark thresholds, enable XON. */
4653         if ((hw->fc.current_mode & ixgbe_fc_tx_pause) &&
4654                 hw->fc.high_water[tc_num]) {
4655                 fcrtl = (hw->fc.low_water[tc_num] << 10) | IXGBE_FCRTL_XONE;
4656                 IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(tc_num), fcrtl);
4657                 fcrth = (hw->fc.high_water[tc_num] << 10) | IXGBE_FCRTH_FCEN;
4658         } else {
4659                 IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(tc_num), 0);
4660                 /*
4661                  * In order to prevent Tx hangs when the internal Tx
4662                  * switch is enabled we must set the high water mark
4663                  * to the maximum FCRTH value.  This allows the Tx
4664                  * switch to function even under heavy Rx workloads.
4665                  */
4666                 fcrth = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(tc_num)) - 32;
4667         }
4668         IXGBE_WRITE_REG(hw, IXGBE_FCRTH_82599(tc_num), fcrth);
4669
4670         /* Configure pause time (2 TCs per register) */
4671         reg = hw->fc.pause_time * 0x00010001;
4672         for (i = 0; i < (IXGBE_DCB_MAX_TRAFFIC_CLASS / 2); i++)
4673                 IXGBE_WRITE_REG(hw, IXGBE_FCTTV(i), reg);
4674
4675         /* Configure flow control refresh threshold value */
4676         IXGBE_WRITE_REG(hw, IXGBE_FCRTV, hw->fc.pause_time / 2);
4677
4678 out:
4679         return ret_val;
4680 }
4681
4682 static int
4683 ixgbe_dcb_pfc_enable(struct rte_eth_dev *dev, uint8_t tc_num)
4684 {
4685         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4686         int32_t ret_val = IXGBE_NOT_IMPLEMENTED;
4687
4688         if (hw->mac.type != ixgbe_mac_82598EB) {
4689                 ret_val = ixgbe_dcb_pfc_enable_generic(hw, tc_num);
4690         }
4691         return ret_val;
4692 }
4693
4694 static int
4695 ixgbe_priority_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_pfc_conf *pfc_conf)
4696 {
4697         int err;
4698         uint32_t rx_buf_size;
4699         uint32_t max_high_water;
4700         uint8_t tc_num;
4701         uint8_t  map[IXGBE_DCB_MAX_USER_PRIORITY] = { 0 };
4702         struct ixgbe_hw *hw =
4703                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4704         struct ixgbe_dcb_config *dcb_config =
4705                 IXGBE_DEV_PRIVATE_TO_DCB_CFG(dev->data->dev_private);
4706
4707         enum ixgbe_fc_mode rte_fcmode_2_ixgbe_fcmode[] = {
4708                 ixgbe_fc_none,
4709                 ixgbe_fc_rx_pause,
4710                 ixgbe_fc_tx_pause,
4711                 ixgbe_fc_full
4712         };
4713
4714         PMD_INIT_FUNC_TRACE();
4715
4716         ixgbe_dcb_unpack_map_cee(dcb_config, IXGBE_DCB_RX_CONFIG, map);
4717         tc_num = map[pfc_conf->priority];
4718         rx_buf_size = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(tc_num));
4719         PMD_INIT_LOG(DEBUG, "Rx packet buffer size = 0x%x", rx_buf_size);
4720         /*
4721          * At least reserve one Ethernet frame for watermark
4722          * high_water/low_water in kilo bytes for ixgbe
4723          */
4724         max_high_water = (rx_buf_size - ETHER_MAX_LEN) >> IXGBE_RXPBSIZE_SHIFT;
4725         if ((pfc_conf->fc.high_water > max_high_water) ||
4726             (pfc_conf->fc.high_water <= pfc_conf->fc.low_water)) {
4727                 PMD_INIT_LOG(ERR, "Invalid high/low water setup value in KB");
4728                 PMD_INIT_LOG(ERR, "High_water must <= 0x%x", max_high_water);
4729                 return -EINVAL;
4730         }
4731
4732         hw->fc.requested_mode = rte_fcmode_2_ixgbe_fcmode[pfc_conf->fc.mode];
4733         hw->fc.pause_time = pfc_conf->fc.pause_time;
4734         hw->fc.send_xon = pfc_conf->fc.send_xon;
4735         hw->fc.low_water[tc_num] =  pfc_conf->fc.low_water;
4736         hw->fc.high_water[tc_num] = pfc_conf->fc.high_water;
4737
4738         err = ixgbe_dcb_pfc_enable(dev, tc_num);
4739
4740         /* Not negotiated is not an error case */
4741         if ((err == IXGBE_SUCCESS) || (err == IXGBE_ERR_FC_NOT_NEGOTIATED))
4742                 return 0;
4743
4744         PMD_INIT_LOG(ERR, "ixgbe_dcb_pfc_enable = 0x%x", err);
4745         return -EIO;
4746 }
4747
4748 static int
4749 ixgbe_dev_rss_reta_update(struct rte_eth_dev *dev,
4750                           struct rte_eth_rss_reta_entry64 *reta_conf,
4751                           uint16_t reta_size)
4752 {
4753         uint16_t i, sp_reta_size;
4754         uint8_t j, mask;
4755         uint32_t reta, r;
4756         uint16_t idx, shift;
4757         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4758         uint32_t reta_reg;
4759
4760         PMD_INIT_FUNC_TRACE();
4761
4762         if (!ixgbe_rss_update_sp(hw->mac.type)) {
4763                 PMD_DRV_LOG(ERR, "RSS reta update is not supported on this "
4764                         "NIC.");
4765                 return -ENOTSUP;
4766         }
4767
4768         sp_reta_size = ixgbe_reta_size_get(hw->mac.type);
4769         if (reta_size != sp_reta_size) {
4770                 PMD_DRV_LOG(ERR, "The size of hash lookup table configured "
4771                         "(%d) doesn't match the number hardware can supported "
4772                         "(%d)", reta_size, sp_reta_size);
4773                 return -EINVAL;
4774         }
4775
4776         for (i = 0; i < reta_size; i += IXGBE_4_BIT_WIDTH) {
4777                 idx = i / RTE_RETA_GROUP_SIZE;
4778                 shift = i % RTE_RETA_GROUP_SIZE;
4779                 mask = (uint8_t)((reta_conf[idx].mask >> shift) &
4780                                                 IXGBE_4_BIT_MASK);
4781                 if (!mask)
4782                         continue;
4783                 reta_reg = ixgbe_reta_reg_get(hw->mac.type, i);
4784                 if (mask == IXGBE_4_BIT_MASK)
4785                         r = 0;
4786                 else
4787                         r = IXGBE_READ_REG(hw, reta_reg);
4788                 for (j = 0, reta = 0; j < IXGBE_4_BIT_WIDTH; j++) {
4789                         if (mask & (0x1 << j))
4790                                 reta |= reta_conf[idx].reta[shift + j] <<
4791                                                         (CHAR_BIT * j);
4792                         else
4793                                 reta |= r & (IXGBE_8_BIT_MASK <<
4794                                                 (CHAR_BIT * j));
4795                 }
4796                 IXGBE_WRITE_REG(hw, reta_reg, reta);
4797         }
4798
4799         return 0;
4800 }
4801
4802 static int
4803 ixgbe_dev_rss_reta_query(struct rte_eth_dev *dev,
4804                          struct rte_eth_rss_reta_entry64 *reta_conf,
4805                          uint16_t reta_size)
4806 {
4807         uint16_t i, sp_reta_size;
4808         uint8_t j, mask;
4809         uint32_t reta;
4810         uint16_t idx, shift;
4811         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4812         uint32_t reta_reg;
4813
4814         PMD_INIT_FUNC_TRACE();
4815         sp_reta_size = ixgbe_reta_size_get(hw->mac.type);
4816         if (reta_size != sp_reta_size) {
4817                 PMD_DRV_LOG(ERR, "The size of hash lookup table configured "
4818                         "(%d) doesn't match the number hardware can supported "
4819                         "(%d)", reta_size, sp_reta_size);
4820                 return -EINVAL;
4821         }
4822
4823         for (i = 0; i < reta_size; i += IXGBE_4_BIT_WIDTH) {
4824                 idx = i / RTE_RETA_GROUP_SIZE;
4825                 shift = i % RTE_RETA_GROUP_SIZE;
4826                 mask = (uint8_t)((reta_conf[idx].mask >> shift) &
4827                                                 IXGBE_4_BIT_MASK);
4828                 if (!mask)
4829                         continue;
4830
4831                 reta_reg = ixgbe_reta_reg_get(hw->mac.type, i);
4832                 reta = IXGBE_READ_REG(hw, reta_reg);
4833                 for (j = 0; j < IXGBE_4_BIT_WIDTH; j++) {
4834                         if (mask & (0x1 << j))
4835                                 reta_conf[idx].reta[shift + j] =
4836                                         ((reta >> (CHAR_BIT * j)) &
4837                                                 IXGBE_8_BIT_MASK);
4838                 }
4839         }
4840
4841         return 0;
4842 }
4843
4844 static int
4845 ixgbe_add_rar(struct rte_eth_dev *dev, struct ether_addr *mac_addr,
4846                                 uint32_t index, uint32_t pool)
4847 {
4848         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4849         uint32_t enable_addr = 1;
4850
4851         return ixgbe_set_rar(hw, index, mac_addr->addr_bytes,
4852                              pool, enable_addr);
4853 }
4854
4855 static void
4856 ixgbe_remove_rar(struct rte_eth_dev *dev, uint32_t index)
4857 {
4858         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4859
4860         ixgbe_clear_rar(hw, index);
4861 }
4862
4863 static void
4864 ixgbe_set_default_mac_addr(struct rte_eth_dev *dev, struct ether_addr *addr)
4865 {
4866         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
4867
4868         ixgbe_remove_rar(dev, 0);
4869
4870         ixgbe_add_rar(dev, addr, 0, pci_dev->max_vfs);
4871 }
4872
4873 static bool
4874 is_device_supported(struct rte_eth_dev *dev, struct rte_pci_driver *drv)
4875 {
4876         if (strcmp(dev->device->driver->name, drv->driver.name))
4877                 return false;
4878
4879         return true;
4880 }
4881
4882 bool
4883 is_ixgbe_supported(struct rte_eth_dev *dev)
4884 {
4885         return is_device_supported(dev, &rte_ixgbe_pmd);
4886 }
4887
4888 static int
4889 ixgbe_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
4890 {
4891         uint32_t hlreg0;
4892         uint32_t maxfrs;
4893         struct ixgbe_hw *hw;
4894         struct rte_eth_dev_info dev_info;
4895         uint32_t frame_size = mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
4896         struct rte_eth_dev_data *dev_data = dev->data;
4897
4898         ixgbe_dev_info_get(dev, &dev_info);
4899
4900         /* check that mtu is within the allowed range */
4901         if ((mtu < ETHER_MIN_MTU) || (frame_size > dev_info.max_rx_pktlen))
4902                 return -EINVAL;
4903
4904         /* If device is started, refuse mtu that requires the support of
4905          * scattered packets when this feature has not been enabled before.
4906          */
4907         if (dev_data->dev_started && !dev_data->scattered_rx &&
4908             (frame_size + 2 * IXGBE_VLAN_TAG_SIZE >
4909              dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM)) {
4910                 PMD_INIT_LOG(ERR, "Stop port first.");
4911                 return -EINVAL;
4912         }
4913
4914         hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4915         hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0);
4916
4917         /* switch to jumbo mode if needed */
4918         if (frame_size > ETHER_MAX_LEN) {
4919                 dev->data->dev_conf.rxmode.jumbo_frame = 1;
4920                 hlreg0 |= IXGBE_HLREG0_JUMBOEN;
4921         } else {
4922                 dev->data->dev_conf.rxmode.jumbo_frame = 0;
4923                 hlreg0 &= ~IXGBE_HLREG0_JUMBOEN;
4924         }
4925         IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0);
4926
4927         /* update max frame size */
4928         dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
4929
4930         maxfrs = IXGBE_READ_REG(hw, IXGBE_MAXFRS);
4931         maxfrs &= 0x0000FFFF;
4932         maxfrs |= (dev->data->dev_conf.rxmode.max_rx_pkt_len << 16);
4933         IXGBE_WRITE_REG(hw, IXGBE_MAXFRS, maxfrs);
4934
4935         return 0;
4936 }
4937
4938 /*
4939  * Virtual Function operations
4940  */
4941 static void
4942 ixgbevf_intr_disable(struct ixgbe_hw *hw)
4943 {
4944         PMD_INIT_FUNC_TRACE();
4945
4946         /* Clear interrupt mask to stop from interrupts being generated */
4947         IXGBE_WRITE_REG(hw, IXGBE_VTEIMC, IXGBE_VF_IRQ_CLEAR_MASK);
4948
4949         IXGBE_WRITE_FLUSH(hw);
4950 }
4951
4952 static void
4953 ixgbevf_intr_enable(struct ixgbe_hw *hw)
4954 {
4955         PMD_INIT_FUNC_TRACE();
4956
4957         /* VF enable interrupt autoclean */
4958         IXGBE_WRITE_REG(hw, IXGBE_VTEIAM, IXGBE_VF_IRQ_ENABLE_MASK);
4959         IXGBE_WRITE_REG(hw, IXGBE_VTEIAC, IXGBE_VF_IRQ_ENABLE_MASK);
4960         IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, IXGBE_VF_IRQ_ENABLE_MASK);
4961
4962         IXGBE_WRITE_FLUSH(hw);
4963 }
4964
4965 static int
4966 ixgbevf_dev_configure(struct rte_eth_dev *dev)
4967 {
4968         struct rte_eth_conf *conf = &dev->data->dev_conf;
4969         struct ixgbe_adapter *adapter =
4970                         (struct ixgbe_adapter *)dev->data->dev_private;
4971
4972         PMD_INIT_LOG(DEBUG, "Configured Virtual Function port id: %d",
4973                      dev->data->port_id);
4974
4975         /*
4976          * VF has no ability to enable/disable HW CRC
4977          * Keep the persistent behavior the same as Host PF
4978          */
4979 #ifndef RTE_LIBRTE_IXGBE_PF_DISABLE_STRIP_CRC
4980         if (!conf->rxmode.hw_strip_crc) {
4981                 PMD_INIT_LOG(NOTICE, "VF can't disable HW CRC Strip");
4982                 conf->rxmode.hw_strip_crc = 1;
4983         }
4984 #else
4985         if (conf->rxmode.hw_strip_crc) {
4986                 PMD_INIT_LOG(NOTICE, "VF can't enable HW CRC Strip");
4987                 conf->rxmode.hw_strip_crc = 0;
4988         }
4989 #endif
4990
4991         /*
4992          * Initialize to TRUE. If any of Rx queues doesn't meet the bulk
4993          * allocation or vector Rx preconditions we will reset it.
4994          */
4995         adapter->rx_bulk_alloc_allowed = true;
4996         adapter->rx_vec_allowed = true;
4997
4998         return 0;
4999 }
5000
5001 static int
5002 ixgbevf_dev_start(struct rte_eth_dev *dev)
5003 {
5004         struct ixgbe_hw *hw =
5005                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5006         uint32_t intr_vector = 0;
5007         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
5008         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
5009
5010         int err, mask = 0;
5011
5012         PMD_INIT_FUNC_TRACE();
5013
5014         hw->mac.ops.reset_hw(hw);
5015         hw->mac.get_link_status = true;
5016
5017         /* negotiate mailbox API version to use with the PF. */
5018         ixgbevf_negotiate_api(hw);
5019
5020         ixgbevf_dev_tx_init(dev);
5021
5022         /* This can fail when allocating mbufs for descriptor rings */
5023         err = ixgbevf_dev_rx_init(dev);
5024         if (err) {
5025                 PMD_INIT_LOG(ERR, "Unable to initialize RX hardware (%d)", err);
5026                 ixgbe_dev_clear_queues(dev);
5027                 return err;
5028         }
5029
5030         /* Set vfta */
5031         ixgbevf_set_vfta_all(dev, 1);
5032
5033         /* Set HW strip */
5034         mask = ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK |
5035                 ETH_VLAN_EXTEND_MASK;
5036         ixgbevf_vlan_offload_set(dev, mask);
5037
5038         ixgbevf_dev_rxtx_start(dev);
5039
5040         /* check and configure queue intr-vector mapping */
5041         if (dev->data->dev_conf.intr_conf.rxq != 0) {
5042                 /* According to datasheet, only vector 0/1/2 can be used,
5043                  * now only one vector is used for Rx queue
5044                  */
5045                 intr_vector = 1;
5046                 if (rte_intr_efd_enable(intr_handle, intr_vector))
5047                         return -1;
5048         }
5049
5050         if (rte_intr_dp_is_en(intr_handle) && !intr_handle->intr_vec) {
5051                 intr_handle->intr_vec =
5052                         rte_zmalloc("intr_vec",
5053                                     dev->data->nb_rx_queues * sizeof(int), 0);
5054                 if (intr_handle->intr_vec == NULL) {
5055                         PMD_INIT_LOG(ERR, "Failed to allocate %d rx_queues"
5056                                      " intr_vec", dev->data->nb_rx_queues);
5057                         return -ENOMEM;
5058                 }
5059         }
5060         ixgbevf_configure_msix(dev);
5061
5062         /* When a VF port is bound to VFIO-PCI, only miscellaneous interrupt
5063          * is mapped to VFIO vector 0 in eth_ixgbevf_dev_init( ).
5064          * If previous VFIO interrupt mapping setting in eth_ixgbevf_dev_init( )
5065          * is not cleared, it will fail when following rte_intr_enable( ) tries
5066          * to map Rx queue interrupt to other VFIO vectors.
5067          * So clear uio/vfio intr/evevnfd first to avoid failure.
5068          */
5069         rte_intr_disable(intr_handle);
5070
5071         rte_intr_enable(intr_handle);
5072
5073         /* Re-enable interrupt for VF */
5074         ixgbevf_intr_enable(hw);
5075
5076         return 0;
5077 }
5078
5079 static void
5080 ixgbevf_dev_stop(struct rte_eth_dev *dev)
5081 {
5082         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5083         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
5084         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
5085
5086         PMD_INIT_FUNC_TRACE();
5087
5088         ixgbevf_intr_disable(hw);
5089
5090         hw->adapter_stopped = 1;
5091         ixgbe_stop_adapter(hw);
5092
5093         /*
5094           * Clear what we set, but we still keep shadow_vfta to
5095           * restore after device starts
5096           */
5097         ixgbevf_set_vfta_all(dev, 0);
5098
5099         /* Clear stored conf */
5100         dev->data->scattered_rx = 0;
5101
5102         ixgbe_dev_clear_queues(dev);
5103
5104         /* Clean datapath event and queue/vec mapping */
5105         rte_intr_efd_disable(intr_handle);
5106         if (intr_handle->intr_vec != NULL) {
5107                 rte_free(intr_handle->intr_vec);
5108                 intr_handle->intr_vec = NULL;
5109         }
5110 }
5111
5112 static void
5113 ixgbevf_dev_close(struct rte_eth_dev *dev)
5114 {
5115         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5116
5117         PMD_INIT_FUNC_TRACE();
5118
5119         ixgbe_reset_hw(hw);
5120
5121         ixgbevf_dev_stop(dev);
5122
5123         ixgbe_dev_free_queues(dev);
5124
5125         /**
5126          * Remove the VF MAC address ro ensure
5127          * that the VF traffic goes to the PF
5128          * after stop, close and detach of the VF
5129          **/
5130         ixgbevf_remove_mac_addr(dev, 0);
5131 }
5132
5133 /*
5134  * Reset VF device
5135  */
5136 static int
5137 ixgbevf_dev_reset(struct rte_eth_dev *dev)
5138 {
5139         int ret;
5140
5141         ret = eth_ixgbevf_dev_uninit(dev);
5142         if (ret)
5143                 return ret;
5144
5145         ret = eth_ixgbevf_dev_init(dev);
5146
5147         return ret;
5148 }
5149
5150 static void ixgbevf_set_vfta_all(struct rte_eth_dev *dev, bool on)
5151 {
5152         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5153         struct ixgbe_vfta *shadow_vfta =
5154                 IXGBE_DEV_PRIVATE_TO_VFTA(dev->data->dev_private);
5155         int i = 0, j = 0, vfta = 0, mask = 1;
5156
5157         for (i = 0; i < IXGBE_VFTA_SIZE; i++) {
5158                 vfta = shadow_vfta->vfta[i];
5159                 if (vfta) {
5160                         mask = 1;
5161                         for (j = 0; j < 32; j++) {
5162                                 if (vfta & mask)
5163                                         ixgbe_set_vfta(hw, (i<<5)+j, 0,
5164                                                        on, false);
5165                                 mask <<= 1;
5166                         }
5167                 }
5168         }
5169
5170 }
5171
5172 static int
5173 ixgbevf_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
5174 {
5175         struct ixgbe_hw *hw =
5176                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5177         struct ixgbe_vfta *shadow_vfta =
5178                 IXGBE_DEV_PRIVATE_TO_VFTA(dev->data->dev_private);
5179         uint32_t vid_idx = 0;
5180         uint32_t vid_bit = 0;
5181         int ret = 0;
5182
5183         PMD_INIT_FUNC_TRACE();
5184
5185         /* vind is not used in VF driver, set to 0, check ixgbe_set_vfta_vf */
5186         ret = ixgbe_set_vfta(hw, vlan_id, 0, !!on, false);
5187         if (ret) {
5188                 PMD_INIT_LOG(ERR, "Unable to set VF vlan");
5189                 return ret;
5190         }
5191         vid_idx = (uint32_t) ((vlan_id >> 5) & 0x7F);
5192         vid_bit = (uint32_t) (1 << (vlan_id & 0x1F));
5193
5194         /* Save what we set and retore it after device reset */
5195         if (on)
5196                 shadow_vfta->vfta[vid_idx] |= vid_bit;
5197         else
5198                 shadow_vfta->vfta[vid_idx] &= ~vid_bit;
5199
5200         return 0;
5201 }
5202
5203 static void
5204 ixgbevf_vlan_strip_queue_set(struct rte_eth_dev *dev, uint16_t queue, int on)
5205 {
5206         struct ixgbe_hw *hw =
5207                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5208         uint32_t ctrl;
5209
5210         PMD_INIT_FUNC_TRACE();
5211
5212         if (queue >= hw->mac.max_rx_queues)
5213                 return;
5214
5215         ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(queue));
5216         if (on)
5217                 ctrl |= IXGBE_RXDCTL_VME;
5218         else
5219                 ctrl &= ~IXGBE_RXDCTL_VME;
5220         IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(queue), ctrl);
5221
5222         ixgbe_vlan_hw_strip_bitmap_set(dev, queue, on);
5223 }
5224
5225 static void
5226 ixgbevf_vlan_offload_set(struct rte_eth_dev *dev, int mask)
5227 {
5228         struct ixgbe_hw *hw =
5229                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5230         uint16_t i;
5231         int on = 0;
5232
5233         /* VF function only support hw strip feature, others are not support */
5234         if (mask & ETH_VLAN_STRIP_MASK) {
5235                 on = !!(dev->data->dev_conf.rxmode.hw_vlan_strip);
5236
5237                 for (i = 0; i < hw->mac.max_rx_queues; i++)
5238                         ixgbevf_vlan_strip_queue_set(dev, i, on);
5239         }
5240 }
5241
5242 int
5243 ixgbe_vt_check(struct ixgbe_hw *hw)
5244 {
5245         uint32_t reg_val;
5246
5247         /* if Virtualization Technology is enabled */
5248         reg_val = IXGBE_READ_REG(hw, IXGBE_VT_CTL);
5249         if (!(reg_val & IXGBE_VT_CTL_VT_ENABLE)) {
5250                 PMD_INIT_LOG(ERR, "VT must be enabled for this setting");
5251                 return -1;
5252         }
5253
5254         return 0;
5255 }
5256
5257 static uint32_t
5258 ixgbe_uta_vector(struct ixgbe_hw *hw, struct ether_addr *uc_addr)
5259 {
5260         uint32_t vector = 0;
5261
5262         switch (hw->mac.mc_filter_type) {
5263         case 0:   /* use bits [47:36] of the address */
5264                 vector = ((uc_addr->addr_bytes[4] >> 4) |
5265                         (((uint16_t)uc_addr->addr_bytes[5]) << 4));
5266                 break;
5267         case 1:   /* use bits [46:35] of the address */
5268                 vector = ((uc_addr->addr_bytes[4] >> 3) |
5269                         (((uint16_t)uc_addr->addr_bytes[5]) << 5));
5270                 break;
5271         case 2:   /* use bits [45:34] of the address */
5272                 vector = ((uc_addr->addr_bytes[4] >> 2) |
5273                         (((uint16_t)uc_addr->addr_bytes[5]) << 6));
5274                 break;
5275         case 3:   /* use bits [43:32] of the address */
5276                 vector = ((uc_addr->addr_bytes[4]) |
5277                         (((uint16_t)uc_addr->addr_bytes[5]) << 8));
5278                 break;
5279         default:  /* Invalid mc_filter_type */
5280                 break;
5281         }
5282
5283         /* vector can only be 12-bits or boundary will be exceeded */
5284         vector &= 0xFFF;
5285         return vector;
5286 }
5287
5288 static int
5289 ixgbe_uc_hash_table_set(struct rte_eth_dev *dev, struct ether_addr *mac_addr,
5290                         uint8_t on)
5291 {
5292         uint32_t vector;
5293         uint32_t uta_idx;
5294         uint32_t reg_val;
5295         uint32_t uta_shift;
5296         uint32_t rc;
5297         const uint32_t ixgbe_uta_idx_mask = 0x7F;
5298         const uint32_t ixgbe_uta_bit_shift = 5;
5299         const uint32_t ixgbe_uta_bit_mask = (0x1 << ixgbe_uta_bit_shift) - 1;
5300         const uint32_t bit1 = 0x1;
5301
5302         struct ixgbe_hw *hw =
5303                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5304         struct ixgbe_uta_info *uta_info =
5305                 IXGBE_DEV_PRIVATE_TO_UTA(dev->data->dev_private);
5306
5307         /* The UTA table only exists on 82599 hardware and newer */
5308         if (hw->mac.type < ixgbe_mac_82599EB)
5309                 return -ENOTSUP;
5310
5311         vector = ixgbe_uta_vector(hw, mac_addr);
5312         uta_idx = (vector >> ixgbe_uta_bit_shift) & ixgbe_uta_idx_mask;
5313         uta_shift = vector & ixgbe_uta_bit_mask;
5314
5315         rc = ((uta_info->uta_shadow[uta_idx] >> uta_shift & bit1) != 0);
5316         if (rc == on)
5317                 return 0;
5318
5319         reg_val = IXGBE_READ_REG(hw, IXGBE_UTA(uta_idx));
5320         if (on) {
5321                 uta_info->uta_in_use++;
5322                 reg_val |= (bit1 << uta_shift);
5323                 uta_info->uta_shadow[uta_idx] |= (bit1 << uta_shift);
5324         } else {
5325                 uta_info->uta_in_use--;
5326                 reg_val &= ~(bit1 << uta_shift);
5327                 uta_info->uta_shadow[uta_idx] &= ~(bit1 << uta_shift);
5328         }
5329
5330         IXGBE_WRITE_REG(hw, IXGBE_UTA(uta_idx), reg_val);
5331
5332         if (uta_info->uta_in_use > 0)
5333                 IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL,
5334                                 IXGBE_MCSTCTRL_MFE | hw->mac.mc_filter_type);
5335         else
5336                 IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, hw->mac.mc_filter_type);
5337
5338         return 0;
5339 }
5340
5341 static int
5342 ixgbe_uc_all_hash_table_set(struct rte_eth_dev *dev, uint8_t on)
5343 {
5344         int i;
5345         struct ixgbe_hw *hw =
5346                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5347         struct ixgbe_uta_info *uta_info =
5348                 IXGBE_DEV_PRIVATE_TO_UTA(dev->data->dev_private);
5349
5350         /* The UTA table only exists on 82599 hardware and newer */
5351         if (hw->mac.type < ixgbe_mac_82599EB)
5352                 return -ENOTSUP;
5353
5354         if (on) {
5355                 for (i = 0; i < ETH_VMDQ_NUM_UC_HASH_ARRAY; i++) {
5356                         uta_info->uta_shadow[i] = ~0;
5357                         IXGBE_WRITE_REG(hw, IXGBE_UTA(i), ~0);
5358                 }
5359         } else {
5360                 for (i = 0; i < ETH_VMDQ_NUM_UC_HASH_ARRAY; i++) {
5361                         uta_info->uta_shadow[i] = 0;
5362                         IXGBE_WRITE_REG(hw, IXGBE_UTA(i), 0);
5363                 }
5364         }
5365         return 0;
5366
5367 }
5368
5369 uint32_t
5370 ixgbe_convert_vm_rx_mask_to_val(uint16_t rx_mask, uint32_t orig_val)
5371 {
5372         uint32_t new_val = orig_val;
5373
5374         if (rx_mask & ETH_VMDQ_ACCEPT_UNTAG)
5375                 new_val |= IXGBE_VMOLR_AUPE;
5376         if (rx_mask & ETH_VMDQ_ACCEPT_HASH_MC)
5377                 new_val |= IXGBE_VMOLR_ROMPE;
5378         if (rx_mask & ETH_VMDQ_ACCEPT_HASH_UC)
5379                 new_val |= IXGBE_VMOLR_ROPE;
5380         if (rx_mask & ETH_VMDQ_ACCEPT_BROADCAST)
5381                 new_val |= IXGBE_VMOLR_BAM;
5382         if (rx_mask & ETH_VMDQ_ACCEPT_MULTICAST)
5383                 new_val |= IXGBE_VMOLR_MPE;
5384
5385         return new_val;
5386 }
5387
5388 #define IXGBE_MRCTL_VPME  0x01 /* Virtual Pool Mirroring. */
5389 #define IXGBE_MRCTL_UPME  0x02 /* Uplink Port Mirroring. */
5390 #define IXGBE_MRCTL_DPME  0x04 /* Downlink Port Mirroring. */
5391 #define IXGBE_MRCTL_VLME  0x08 /* VLAN Mirroring. */
5392 #define IXGBE_INVALID_MIRROR_TYPE(mirror_type) \
5393         ((mirror_type) & ~(uint8_t)(ETH_MIRROR_VIRTUAL_POOL_UP | \
5394         ETH_MIRROR_UPLINK_PORT | ETH_MIRROR_DOWNLINK_PORT | ETH_MIRROR_VLAN))
5395
5396 static int
5397 ixgbe_mirror_rule_set(struct rte_eth_dev *dev,
5398                       struct rte_eth_mirror_conf *mirror_conf,
5399                       uint8_t rule_id, uint8_t on)
5400 {
5401         uint32_t mr_ctl, vlvf;
5402         uint32_t mp_lsb = 0;
5403         uint32_t mv_msb = 0;
5404         uint32_t mv_lsb = 0;
5405         uint32_t mp_msb = 0;
5406         uint8_t i = 0;
5407         int reg_index = 0;
5408         uint64_t vlan_mask = 0;
5409
5410         const uint8_t pool_mask_offset = 32;
5411         const uint8_t vlan_mask_offset = 32;
5412         const uint8_t dst_pool_offset = 8;
5413         const uint8_t rule_mr_offset  = 4;
5414         const uint8_t mirror_rule_mask = 0x0F;
5415
5416         struct ixgbe_mirror_info *mr_info =
5417                         (IXGBE_DEV_PRIVATE_TO_PFDATA(dev->data->dev_private));
5418         struct ixgbe_hw *hw =
5419                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5420         uint8_t mirror_type = 0;
5421
5422         if (ixgbe_vt_check(hw) < 0)
5423                 return -ENOTSUP;
5424
5425         if (rule_id >= IXGBE_MAX_MIRROR_RULES)
5426                 return -EINVAL;
5427
5428         if (IXGBE_INVALID_MIRROR_TYPE(mirror_conf->rule_type)) {
5429                 PMD_DRV_LOG(ERR, "unsupported mirror type 0x%x.",
5430                             mirror_conf->rule_type);
5431                 return -EINVAL;
5432         }
5433
5434         if (mirror_conf->rule_type & ETH_MIRROR_VLAN) {
5435                 mirror_type |= IXGBE_MRCTL_VLME;
5436                 /* Check if vlan id is valid and find conresponding VLAN ID
5437                  * index in VLVF
5438                  */
5439                 for (i = 0; i < IXGBE_VLVF_ENTRIES; i++) {
5440                         if (mirror_conf->vlan.vlan_mask & (1ULL << i)) {
5441                                 /* search vlan id related pool vlan filter
5442                                  * index
5443                                  */
5444                                 reg_index = ixgbe_find_vlvf_slot(
5445                                                 hw,
5446                                                 mirror_conf->vlan.vlan_id[i],
5447                                                 false);
5448                                 if (reg_index < 0)
5449                                         return -EINVAL;
5450                                 vlvf = IXGBE_READ_REG(hw,
5451                                                       IXGBE_VLVF(reg_index));
5452                                 if ((vlvf & IXGBE_VLVF_VIEN) &&
5453                                     ((vlvf & IXGBE_VLVF_VLANID_MASK) ==
5454                                       mirror_conf->vlan.vlan_id[i]))
5455                                         vlan_mask |= (1ULL << reg_index);
5456                                 else
5457                                         return -EINVAL;
5458                         }
5459                 }
5460
5461                 if (on) {
5462                         mv_lsb = vlan_mask & 0xFFFFFFFF;
5463                         mv_msb = vlan_mask >> vlan_mask_offset;
5464
5465                         mr_info->mr_conf[rule_id].vlan.vlan_mask =
5466                                                 mirror_conf->vlan.vlan_mask;
5467                         for (i = 0; i < ETH_VMDQ_MAX_VLAN_FILTERS; i++) {
5468                                 if (mirror_conf->vlan.vlan_mask & (1ULL << i))
5469                                         mr_info->mr_conf[rule_id].vlan.vlan_id[i] =
5470                                                 mirror_conf->vlan.vlan_id[i];
5471                         }
5472                 } else {
5473                         mv_lsb = 0;
5474                         mv_msb = 0;
5475                         mr_info->mr_conf[rule_id].vlan.vlan_mask = 0;
5476                         for (i = 0; i < ETH_VMDQ_MAX_VLAN_FILTERS; i++)
5477                                 mr_info->mr_conf[rule_id].vlan.vlan_id[i] = 0;
5478                 }
5479         }
5480
5481         /**
5482          * if enable pool mirror, write related pool mask register,if disable
5483          * pool mirror, clear PFMRVM register
5484          */
5485         if (mirror_conf->rule_type & ETH_MIRROR_VIRTUAL_POOL_UP) {
5486                 mirror_type |= IXGBE_MRCTL_VPME;
5487                 if (on) {
5488                         mp_lsb = mirror_conf->pool_mask & 0xFFFFFFFF;
5489                         mp_msb = mirror_conf->pool_mask >> pool_mask_offset;
5490                         mr_info->mr_conf[rule_id].pool_mask =
5491                                         mirror_conf->pool_mask;
5492
5493                 } else {
5494                         mp_lsb = 0;
5495                         mp_msb = 0;
5496                         mr_info->mr_conf[rule_id].pool_mask = 0;
5497                 }
5498         }
5499         if (mirror_conf->rule_type & ETH_MIRROR_UPLINK_PORT)
5500                 mirror_type |= IXGBE_MRCTL_UPME;
5501         if (mirror_conf->rule_type & ETH_MIRROR_DOWNLINK_PORT)
5502                 mirror_type |= IXGBE_MRCTL_DPME;
5503
5504         /* read  mirror control register and recalculate it */
5505         mr_ctl = IXGBE_READ_REG(hw, IXGBE_MRCTL(rule_id));
5506
5507         if (on) {
5508                 mr_ctl |= mirror_type;
5509                 mr_ctl &= mirror_rule_mask;
5510                 mr_ctl |= mirror_conf->dst_pool << dst_pool_offset;
5511         } else {
5512                 mr_ctl &= ~(mirror_conf->rule_type & mirror_rule_mask);
5513         }
5514
5515         mr_info->mr_conf[rule_id].rule_type = mirror_conf->rule_type;
5516         mr_info->mr_conf[rule_id].dst_pool = mirror_conf->dst_pool;
5517
5518         /* write mirrror control  register */
5519         IXGBE_WRITE_REG(hw, IXGBE_MRCTL(rule_id), mr_ctl);
5520
5521         /* write pool mirrror control  register */
5522         if (mirror_conf->rule_type & ETH_MIRROR_VIRTUAL_POOL_UP) {
5523                 IXGBE_WRITE_REG(hw, IXGBE_VMRVM(rule_id), mp_lsb);
5524                 IXGBE_WRITE_REG(hw, IXGBE_VMRVM(rule_id + rule_mr_offset),
5525                                 mp_msb);
5526         }
5527         /* write VLAN mirrror control  register */
5528         if (mirror_conf->rule_type & ETH_MIRROR_VLAN) {
5529                 IXGBE_WRITE_REG(hw, IXGBE_VMRVLAN(rule_id), mv_lsb);
5530                 IXGBE_WRITE_REG(hw, IXGBE_VMRVLAN(rule_id + rule_mr_offset),
5531                                 mv_msb);
5532         }
5533
5534         return 0;
5535 }
5536
5537 static int
5538 ixgbe_mirror_rule_reset(struct rte_eth_dev *dev, uint8_t rule_id)
5539 {
5540         int mr_ctl = 0;
5541         uint32_t lsb_val = 0;
5542         uint32_t msb_val = 0;
5543         const uint8_t rule_mr_offset = 4;
5544
5545         struct ixgbe_hw *hw =
5546                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5547         struct ixgbe_mirror_info *mr_info =
5548                 (IXGBE_DEV_PRIVATE_TO_PFDATA(dev->data->dev_private));
5549
5550         if (ixgbe_vt_check(hw) < 0)
5551                 return -ENOTSUP;
5552
5553         if (rule_id >= IXGBE_MAX_MIRROR_RULES)
5554                 return -EINVAL;
5555
5556         memset(&mr_info->mr_conf[rule_id], 0,
5557                sizeof(struct rte_eth_mirror_conf));
5558
5559         /* clear PFVMCTL register */
5560         IXGBE_WRITE_REG(hw, IXGBE_MRCTL(rule_id), mr_ctl);
5561
5562         /* clear pool mask register */
5563         IXGBE_WRITE_REG(hw, IXGBE_VMRVM(rule_id), lsb_val);
5564         IXGBE_WRITE_REG(hw, IXGBE_VMRVM(rule_id + rule_mr_offset), msb_val);
5565
5566         /* clear vlan mask register */
5567         IXGBE_WRITE_REG(hw, IXGBE_VMRVLAN(rule_id), lsb_val);
5568         IXGBE_WRITE_REG(hw, IXGBE_VMRVLAN(rule_id + rule_mr_offset), msb_val);
5569
5570         return 0;
5571 }
5572
5573 static int
5574 ixgbevf_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id)
5575 {
5576         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
5577         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
5578         uint32_t mask;
5579         struct ixgbe_hw *hw =
5580                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5581         uint32_t vec = IXGBE_MISC_VEC_ID;
5582
5583         mask = IXGBE_READ_REG(hw, IXGBE_VTEIMS);
5584         if (rte_intr_allow_others(intr_handle))
5585                 vec = IXGBE_RX_VEC_START;
5586         mask |= (1 << vec);
5587         RTE_SET_USED(queue_id);
5588         IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, mask);
5589
5590         rte_intr_enable(intr_handle);
5591
5592         return 0;
5593 }
5594
5595 static int
5596 ixgbevf_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id)
5597 {
5598         uint32_t mask;
5599         struct ixgbe_hw *hw =
5600                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5601         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
5602         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
5603         uint32_t vec = IXGBE_MISC_VEC_ID;
5604
5605         mask = IXGBE_READ_REG(hw, IXGBE_VTEIMS);
5606         if (rte_intr_allow_others(intr_handle))
5607                 vec = IXGBE_RX_VEC_START;
5608         mask &= ~(1 << vec);
5609         RTE_SET_USED(queue_id);
5610         IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, mask);
5611
5612         return 0;
5613 }
5614
5615 static int
5616 ixgbe_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id)
5617 {
5618         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
5619         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
5620         uint32_t mask;
5621         struct ixgbe_hw *hw =
5622                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5623         struct ixgbe_interrupt *intr =
5624                 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
5625
5626         if (queue_id < 16) {
5627                 ixgbe_disable_intr(hw);
5628                 intr->mask |= (1 << queue_id);
5629                 ixgbe_enable_intr(dev);
5630         } else if (queue_id < 32) {
5631                 mask = IXGBE_READ_REG(hw, IXGBE_EIMS_EX(0));
5632                 mask &= (1 << queue_id);
5633                 IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(0), mask);
5634         } else if (queue_id < 64) {
5635                 mask = IXGBE_READ_REG(hw, IXGBE_EIMS_EX(1));
5636                 mask &= (1 << (queue_id - 32));
5637                 IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(1), mask);
5638         }
5639         rte_intr_enable(intr_handle);
5640
5641         return 0;
5642 }
5643
5644 static int
5645 ixgbe_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id)
5646 {
5647         uint32_t mask;
5648         struct ixgbe_hw *hw =
5649                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5650         struct ixgbe_interrupt *intr =
5651                 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
5652
5653         if (queue_id < 16) {
5654                 ixgbe_disable_intr(hw);
5655                 intr->mask &= ~(1 << queue_id);
5656                 ixgbe_enable_intr(dev);
5657         } else if (queue_id < 32) {
5658                 mask = IXGBE_READ_REG(hw, IXGBE_EIMS_EX(0));
5659                 mask &= ~(1 << queue_id);
5660                 IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(0), mask);
5661         } else if (queue_id < 64) {
5662                 mask = IXGBE_READ_REG(hw, IXGBE_EIMS_EX(1));
5663                 mask &= ~(1 << (queue_id - 32));
5664                 IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(1), mask);
5665         }
5666
5667         return 0;
5668 }
5669
5670 static void
5671 ixgbevf_set_ivar_map(struct ixgbe_hw *hw, int8_t direction,
5672                      uint8_t queue, uint8_t msix_vector)
5673 {
5674         uint32_t tmp, idx;
5675
5676         if (direction == -1) {
5677                 /* other causes */
5678                 msix_vector |= IXGBE_IVAR_ALLOC_VAL;
5679                 tmp = IXGBE_READ_REG(hw, IXGBE_VTIVAR_MISC);
5680                 tmp &= ~0xFF;
5681                 tmp |= msix_vector;
5682                 IXGBE_WRITE_REG(hw, IXGBE_VTIVAR_MISC, tmp);
5683         } else {
5684                 /* rx or tx cause */
5685                 msix_vector |= IXGBE_IVAR_ALLOC_VAL;
5686                 idx = ((16 * (queue & 1)) + (8 * direction));
5687                 tmp = IXGBE_READ_REG(hw, IXGBE_VTIVAR(queue >> 1));
5688                 tmp &= ~(0xFF << idx);
5689                 tmp |= (msix_vector << idx);
5690                 IXGBE_WRITE_REG(hw, IXGBE_VTIVAR(queue >> 1), tmp);
5691         }
5692 }
5693
5694 /**
5695  * set the IVAR registers, mapping interrupt causes to vectors
5696  * @param hw
5697  *  pointer to ixgbe_hw struct
5698  * @direction
5699  *  0 for Rx, 1 for Tx, -1 for other causes
5700  * @queue
5701  *  queue to map the corresponding interrupt to
5702  * @msix_vector
5703  *  the vector to map to the corresponding queue
5704  */
5705 static void
5706 ixgbe_set_ivar_map(struct ixgbe_hw *hw, int8_t direction,
5707                    uint8_t queue, uint8_t msix_vector)
5708 {
5709         uint32_t tmp, idx;
5710
5711         msix_vector |= IXGBE_IVAR_ALLOC_VAL;
5712         if (hw->mac.type == ixgbe_mac_82598EB) {
5713                 if (direction == -1)
5714                         direction = 0;
5715                 idx = (((direction * 64) + queue) >> 2) & 0x1F;
5716                 tmp = IXGBE_READ_REG(hw, IXGBE_IVAR(idx));
5717                 tmp &= ~(0xFF << (8 * (queue & 0x3)));
5718                 tmp |= (msix_vector << (8 * (queue & 0x3)));
5719                 IXGBE_WRITE_REG(hw, IXGBE_IVAR(idx), tmp);
5720         } else if ((hw->mac.type == ixgbe_mac_82599EB) ||
5721                         (hw->mac.type == ixgbe_mac_X540) ||
5722                         (hw->mac.type == ixgbe_mac_X550)) {
5723                 if (direction == -1) {
5724                         /* other causes */
5725                         idx = ((queue & 1) * 8);
5726                         tmp = IXGBE_READ_REG(hw, IXGBE_IVAR_MISC);
5727                         tmp &= ~(0xFF << idx);
5728                         tmp |= (msix_vector << idx);
5729                         IXGBE_WRITE_REG(hw, IXGBE_IVAR_MISC, tmp);
5730                 } else {
5731                         /* rx or tx causes */
5732                         idx = ((16 * (queue & 1)) + (8 * direction));
5733                         tmp = IXGBE_READ_REG(hw, IXGBE_IVAR(queue >> 1));
5734                         tmp &= ~(0xFF << idx);
5735                         tmp |= (msix_vector << idx);
5736                         IXGBE_WRITE_REG(hw, IXGBE_IVAR(queue >> 1), tmp);
5737                 }
5738         }
5739 }
5740
5741 static void
5742 ixgbevf_configure_msix(struct rte_eth_dev *dev)
5743 {
5744         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
5745         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
5746         struct ixgbe_hw *hw =
5747                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5748         uint32_t q_idx;
5749         uint32_t vector_idx = IXGBE_MISC_VEC_ID;
5750         uint32_t base = IXGBE_MISC_VEC_ID;
5751
5752         /* Configure VF other cause ivar */
5753         ixgbevf_set_ivar_map(hw, -1, 1, vector_idx);
5754
5755         /* won't configure msix register if no mapping is done
5756          * between intr vector and event fd.
5757          */
5758         if (!rte_intr_dp_is_en(intr_handle))
5759                 return;
5760
5761         if (rte_intr_allow_others(intr_handle)) {
5762                 base = IXGBE_RX_VEC_START;
5763                 vector_idx = IXGBE_RX_VEC_START;
5764         }
5765
5766         /* Configure all RX queues of VF */
5767         for (q_idx = 0; q_idx < dev->data->nb_rx_queues; q_idx++) {
5768                 /* Force all queue use vector 0,
5769                  * as IXGBE_VF_MAXMSIVECOTR = 1
5770                  */
5771                 ixgbevf_set_ivar_map(hw, 0, q_idx, vector_idx);
5772                 intr_handle->intr_vec[q_idx] = vector_idx;
5773                 if (vector_idx < base + intr_handle->nb_efd - 1)
5774                         vector_idx++;
5775         }
5776 }
5777
5778 /**
5779  * Sets up the hardware to properly generate MSI-X interrupts
5780  * @hw
5781  *  board private structure
5782  */
5783 static void
5784 ixgbe_configure_msix(struct rte_eth_dev *dev)
5785 {
5786         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
5787         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
5788         struct ixgbe_hw *hw =
5789                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5790         uint32_t queue_id, base = IXGBE_MISC_VEC_ID;
5791         uint32_t vec = IXGBE_MISC_VEC_ID;
5792         uint32_t mask;
5793         uint32_t gpie;
5794
5795         /* won't configure msix register if no mapping is done
5796          * between intr vector and event fd
5797          */
5798         if (!rte_intr_dp_is_en(intr_handle))
5799                 return;
5800
5801         if (rte_intr_allow_others(intr_handle))
5802                 vec = base = IXGBE_RX_VEC_START;
5803
5804         /* setup GPIE for MSI-x mode */
5805         gpie = IXGBE_READ_REG(hw, IXGBE_GPIE);
5806         gpie |= IXGBE_GPIE_MSIX_MODE | IXGBE_GPIE_PBA_SUPPORT |
5807                 IXGBE_GPIE_OCD | IXGBE_GPIE_EIAME;
5808         /* auto clearing and auto setting corresponding bits in EIMS
5809          * when MSI-X interrupt is triggered
5810          */
5811         if (hw->mac.type == ixgbe_mac_82598EB) {
5812                 IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
5813         } else {
5814                 IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(0), 0xFFFFFFFF);
5815                 IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(1), 0xFFFFFFFF);
5816         }
5817         IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
5818
5819         /* Populate the IVAR table and set the ITR values to the
5820          * corresponding register.
5821          */
5822         for (queue_id = 0; queue_id < dev->data->nb_rx_queues;
5823              queue_id++) {
5824                 /* by default, 1:1 mapping */
5825                 ixgbe_set_ivar_map(hw, 0, queue_id, vec);
5826                 intr_handle->intr_vec[queue_id] = vec;
5827                 if (vec < base + intr_handle->nb_efd - 1)
5828                         vec++;
5829         }
5830
5831         switch (hw->mac.type) {
5832         case ixgbe_mac_82598EB:
5833                 ixgbe_set_ivar_map(hw, -1, IXGBE_IVAR_OTHER_CAUSES_INDEX,
5834                                    IXGBE_MISC_VEC_ID);
5835                 break;
5836         case ixgbe_mac_82599EB:
5837         case ixgbe_mac_X540:
5838         case ixgbe_mac_X550:
5839                 ixgbe_set_ivar_map(hw, -1, 1, IXGBE_MISC_VEC_ID);
5840                 break;
5841         default:
5842                 break;
5843         }
5844         IXGBE_WRITE_REG(hw, IXGBE_EITR(IXGBE_MISC_VEC_ID),
5845                         IXGBE_MIN_INTER_INTERRUPT_INTERVAL_DEFAULT & 0xFFF);
5846
5847         /* set up to autoclear timer, and the vectors */
5848         mask = IXGBE_EIMS_ENABLE_MASK;
5849         mask &= ~(IXGBE_EIMS_OTHER |
5850                   IXGBE_EIMS_MAILBOX |
5851                   IXGBE_EIMS_LSC);
5852
5853         IXGBE_WRITE_REG(hw, IXGBE_EIAC, mask);
5854 }
5855
5856 int
5857 ixgbe_set_queue_rate_limit(struct rte_eth_dev *dev,
5858                            uint16_t queue_idx, uint16_t tx_rate)
5859 {
5860         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5861         uint32_t rf_dec, rf_int;
5862         uint32_t bcnrc_val;
5863         uint16_t link_speed = dev->data->dev_link.link_speed;
5864
5865         if (queue_idx >= hw->mac.max_tx_queues)
5866                 return -EINVAL;
5867
5868         if (tx_rate != 0) {
5869                 /* Calculate the rate factor values to set */
5870                 rf_int = (uint32_t)link_speed / (uint32_t)tx_rate;
5871                 rf_dec = (uint32_t)link_speed % (uint32_t)tx_rate;
5872                 rf_dec = (rf_dec << IXGBE_RTTBCNRC_RF_INT_SHIFT) / tx_rate;
5873
5874                 bcnrc_val = IXGBE_RTTBCNRC_RS_ENA;
5875                 bcnrc_val |= ((rf_int << IXGBE_RTTBCNRC_RF_INT_SHIFT) &
5876                                 IXGBE_RTTBCNRC_RF_INT_MASK_M);
5877                 bcnrc_val |= (rf_dec & IXGBE_RTTBCNRC_RF_DEC_MASK);
5878         } else {
5879                 bcnrc_val = 0;
5880         }
5881
5882         /*
5883          * Set global transmit compensation time to the MMW_SIZE in RTTBCNRM
5884          * register. MMW_SIZE=0x014 if 9728-byte jumbo is supported, otherwise
5885          * set as 0x4.
5886          */
5887         if ((dev->data->dev_conf.rxmode.jumbo_frame == 1) &&
5888                 (dev->data->dev_conf.rxmode.max_rx_pkt_len >=
5889                                 IXGBE_MAX_JUMBO_FRAME_SIZE))
5890                 IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRM,
5891                         IXGBE_MMW_SIZE_JUMBO_FRAME);
5892         else
5893                 IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRM,
5894                         IXGBE_MMW_SIZE_DEFAULT);
5895
5896         /* Set RTTBCNRC of queue X */
5897         IXGBE_WRITE_REG(hw, IXGBE_RTTDQSEL, queue_idx);
5898         IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRC, bcnrc_val);
5899         IXGBE_WRITE_FLUSH(hw);
5900
5901         return 0;
5902 }
5903
5904 static int
5905 ixgbevf_add_mac_addr(struct rte_eth_dev *dev, struct ether_addr *mac_addr,
5906                      __attribute__((unused)) uint32_t index,
5907                      __attribute__((unused)) uint32_t pool)
5908 {
5909         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5910         int diag;
5911
5912         /*
5913          * On a 82599 VF, adding again the same MAC addr is not an idempotent
5914          * operation. Trap this case to avoid exhausting the [very limited]
5915          * set of PF resources used to store VF MAC addresses.
5916          */
5917         if (memcmp(hw->mac.perm_addr, mac_addr, sizeof(struct ether_addr)) == 0)
5918                 return -1;
5919         diag = ixgbevf_set_uc_addr_vf(hw, 2, mac_addr->addr_bytes);
5920         if (diag != 0)
5921                 PMD_DRV_LOG(ERR, "Unable to add MAC address "
5922                             "%02x:%02x:%02x:%02x:%02x:%02x - diag=%d",
5923                             mac_addr->addr_bytes[0],
5924                             mac_addr->addr_bytes[1],
5925                             mac_addr->addr_bytes[2],
5926                             mac_addr->addr_bytes[3],
5927                             mac_addr->addr_bytes[4],
5928                             mac_addr->addr_bytes[5],
5929                             diag);
5930         return diag;
5931 }
5932
5933 static void
5934 ixgbevf_remove_mac_addr(struct rte_eth_dev *dev, uint32_t index)
5935 {
5936         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5937         struct ether_addr *perm_addr = (struct ether_addr *) hw->mac.perm_addr;
5938         struct ether_addr *mac_addr;
5939         uint32_t i;
5940         int diag;
5941
5942         /*
5943          * The IXGBE_VF_SET_MACVLAN command of the ixgbe-pf driver does
5944          * not support the deletion of a given MAC address.
5945          * Instead, it imposes to delete all MAC addresses, then to add again
5946          * all MAC addresses with the exception of the one to be deleted.
5947          */
5948         (void) ixgbevf_set_uc_addr_vf(hw, 0, NULL);
5949
5950         /*
5951          * Add again all MAC addresses, with the exception of the deleted one
5952          * and of the permanent MAC address.
5953          */
5954         for (i = 0, mac_addr = dev->data->mac_addrs;
5955              i < hw->mac.num_rar_entries; i++, mac_addr++) {
5956                 /* Skip the deleted MAC address */
5957                 if (i == index)
5958                         continue;
5959                 /* Skip NULL MAC addresses */
5960                 if (is_zero_ether_addr(mac_addr))
5961                         continue;
5962                 /* Skip the permanent MAC address */
5963                 if (memcmp(perm_addr, mac_addr, sizeof(struct ether_addr)) == 0)
5964                         continue;
5965                 diag = ixgbevf_set_uc_addr_vf(hw, 2, mac_addr->addr_bytes);
5966                 if (diag != 0)
5967                         PMD_DRV_LOG(ERR,
5968                                     "Adding again MAC address "
5969                                     "%02x:%02x:%02x:%02x:%02x:%02x failed "
5970                                     "diag=%d",
5971                                     mac_addr->addr_bytes[0],
5972                                     mac_addr->addr_bytes[1],
5973                                     mac_addr->addr_bytes[2],
5974                                     mac_addr->addr_bytes[3],
5975                                     mac_addr->addr_bytes[4],
5976                                     mac_addr->addr_bytes[5],
5977                                     diag);
5978         }
5979 }
5980
5981 static void
5982 ixgbevf_set_default_mac_addr(struct rte_eth_dev *dev, struct ether_addr *addr)
5983 {
5984         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5985
5986         hw->mac.ops.set_rar(hw, 0, (void *)addr, 0, 0);
5987 }
5988
5989 int
5990 ixgbe_syn_filter_set(struct rte_eth_dev *dev,
5991                         struct rte_eth_syn_filter *filter,
5992                         bool add)
5993 {
5994         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5995         struct ixgbe_filter_info *filter_info =
5996                 IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
5997         uint32_t syn_info;
5998         uint32_t synqf;
5999
6000         if (filter->queue >= IXGBE_MAX_RX_QUEUE_NUM)
6001                 return -EINVAL;
6002
6003         syn_info = filter_info->syn_info;
6004
6005         if (add) {
6006                 if (syn_info & IXGBE_SYN_FILTER_ENABLE)
6007                         return -EINVAL;
6008                 synqf = (uint32_t)(((filter->queue << IXGBE_SYN_FILTER_QUEUE_SHIFT) &
6009                         IXGBE_SYN_FILTER_QUEUE) | IXGBE_SYN_FILTER_ENABLE);
6010
6011                 if (filter->hig_pri)
6012                         synqf |= IXGBE_SYN_FILTER_SYNQFP;
6013                 else
6014                         synqf &= ~IXGBE_SYN_FILTER_SYNQFP;
6015         } else {
6016                 synqf = IXGBE_READ_REG(hw, IXGBE_SYNQF);
6017                 if (!(syn_info & IXGBE_SYN_FILTER_ENABLE))
6018                         return -ENOENT;
6019                 synqf &= ~(IXGBE_SYN_FILTER_QUEUE | IXGBE_SYN_FILTER_ENABLE);
6020         }
6021
6022         filter_info->syn_info = synqf;
6023         IXGBE_WRITE_REG(hw, IXGBE_SYNQF, synqf);
6024         IXGBE_WRITE_FLUSH(hw);
6025         return 0;
6026 }
6027
6028 static int
6029 ixgbe_syn_filter_get(struct rte_eth_dev *dev,
6030                         struct rte_eth_syn_filter *filter)
6031 {
6032         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
6033         uint32_t synqf = IXGBE_READ_REG(hw, IXGBE_SYNQF);
6034
6035         if (synqf & IXGBE_SYN_FILTER_ENABLE) {
6036                 filter->hig_pri = (synqf & IXGBE_SYN_FILTER_SYNQFP) ? 1 : 0;
6037                 filter->queue = (uint16_t)((synqf & IXGBE_SYN_FILTER_QUEUE) >> 1);
6038                 return 0;
6039         }
6040         return -ENOENT;
6041 }
6042
6043 static int
6044 ixgbe_syn_filter_handle(struct rte_eth_dev *dev,
6045                         enum rte_filter_op filter_op,
6046                         void *arg)
6047 {
6048         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
6049         int ret;
6050
6051         MAC_TYPE_FILTER_SUP(hw->mac.type);
6052
6053         if (filter_op == RTE_ETH_FILTER_NOP)
6054                 return 0;
6055
6056         if (arg == NULL) {
6057                 PMD_DRV_LOG(ERR, "arg shouldn't be NULL for operation %u",
6058                             filter_op);
6059                 return -EINVAL;
6060         }
6061
6062         switch (filter_op) {
6063         case RTE_ETH_FILTER_ADD:
6064                 ret = ixgbe_syn_filter_set(dev,
6065                                 (struct rte_eth_syn_filter *)arg,
6066                                 TRUE);
6067                 break;
6068         case RTE_ETH_FILTER_DELETE:
6069                 ret = ixgbe_syn_filter_set(dev,
6070                                 (struct rte_eth_syn_filter *)arg,
6071                                 FALSE);
6072                 break;
6073         case RTE_ETH_FILTER_GET:
6074                 ret = ixgbe_syn_filter_get(dev,
6075                                 (struct rte_eth_syn_filter *)arg);
6076                 break;
6077         default:
6078                 PMD_DRV_LOG(ERR, "unsupported operation %u", filter_op);
6079                 ret = -EINVAL;
6080                 break;
6081         }
6082
6083         return ret;
6084 }
6085
6086
6087 static inline enum ixgbe_5tuple_protocol
6088 convert_protocol_type(uint8_t protocol_value)
6089 {
6090         if (protocol_value == IPPROTO_TCP)
6091                 return IXGBE_FILTER_PROTOCOL_TCP;
6092         else if (protocol_value == IPPROTO_UDP)
6093                 return IXGBE_FILTER_PROTOCOL_UDP;
6094         else if (protocol_value == IPPROTO_SCTP)
6095                 return IXGBE_FILTER_PROTOCOL_SCTP;
6096         else
6097                 return IXGBE_FILTER_PROTOCOL_NONE;
6098 }
6099
6100 /* inject a 5-tuple filter to HW */
6101 static inline void
6102 ixgbe_inject_5tuple_filter(struct rte_eth_dev *dev,
6103                            struct ixgbe_5tuple_filter *filter)
6104 {
6105         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
6106         int i;
6107         uint32_t ftqf, sdpqf;
6108         uint32_t l34timir = 0;
6109         uint8_t mask = 0xff;
6110
6111         i = filter->index;
6112
6113         sdpqf = (uint32_t)(filter->filter_info.dst_port <<
6114                                 IXGBE_SDPQF_DSTPORT_SHIFT);
6115         sdpqf = sdpqf | (filter->filter_info.src_port & IXGBE_SDPQF_SRCPORT);
6116
6117         ftqf = (uint32_t)(filter->filter_info.proto &
6118                 IXGBE_FTQF_PROTOCOL_MASK);
6119         ftqf |= (uint32_t)((filter->filter_info.priority &
6120                 IXGBE_FTQF_PRIORITY_MASK) << IXGBE_FTQF_PRIORITY_SHIFT);
6121         if (filter->filter_info.src_ip_mask == 0) /* 0 means compare. */
6122                 mask &= IXGBE_FTQF_SOURCE_ADDR_MASK;
6123         if (filter->filter_info.dst_ip_mask == 0)
6124                 mask &= IXGBE_FTQF_DEST_ADDR_MASK;
6125         if (filter->filter_info.src_port_mask == 0)
6126                 mask &= IXGBE_FTQF_SOURCE_PORT_MASK;
6127         if (filter->filter_info.dst_port_mask == 0)
6128                 mask &= IXGBE_FTQF_DEST_PORT_MASK;
6129         if (filter->filter_info.proto_mask == 0)
6130                 mask &= IXGBE_FTQF_PROTOCOL_COMP_MASK;
6131         ftqf |= mask << IXGBE_FTQF_5TUPLE_MASK_SHIFT;
6132         ftqf |= IXGBE_FTQF_POOL_MASK_EN;
6133         ftqf |= IXGBE_FTQF_QUEUE_ENABLE;
6134
6135         IXGBE_WRITE_REG(hw, IXGBE_DAQF(i), filter->filter_info.dst_ip);
6136         IXGBE_WRITE_REG(hw, IXGBE_SAQF(i), filter->filter_info.src_ip);
6137         IXGBE_WRITE_REG(hw, IXGBE_SDPQF(i), sdpqf);
6138         IXGBE_WRITE_REG(hw, IXGBE_FTQF(i), ftqf);
6139
6140         l34timir |= IXGBE_L34T_IMIR_RESERVE;
6141         l34timir |= (uint32_t)(filter->queue <<
6142                                 IXGBE_L34T_IMIR_QUEUE_SHIFT);
6143         IXGBE_WRITE_REG(hw, IXGBE_L34T_IMIR(i), l34timir);
6144 }
6145
6146 /*
6147  * add a 5tuple filter
6148  *
6149  * @param
6150  * dev: Pointer to struct rte_eth_dev.
6151  * index: the index the filter allocates.
6152  * filter: ponter to the filter that will be added.
6153  * rx_queue: the queue id the filter assigned to.
6154  *
6155  * @return
6156  *    - On success, zero.
6157  *    - On failure, a negative value.
6158  */
6159 static int
6160 ixgbe_add_5tuple_filter(struct rte_eth_dev *dev,
6161                         struct ixgbe_5tuple_filter *filter)
6162 {
6163         struct ixgbe_filter_info *filter_info =
6164                 IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
6165         int i, idx, shift;
6166
6167         /*
6168          * look for an unused 5tuple filter index,
6169          * and insert the filter to list.
6170          */
6171         for (i = 0; i < IXGBE_MAX_FTQF_FILTERS; i++) {
6172                 idx = i / (sizeof(uint32_t) * NBBY);
6173                 shift = i % (sizeof(uint32_t) * NBBY);
6174                 if (!(filter_info->fivetuple_mask[idx] & (1 << shift))) {
6175                         filter_info->fivetuple_mask[idx] |= 1 << shift;
6176                         filter->index = i;
6177                         TAILQ_INSERT_TAIL(&filter_info->fivetuple_list,
6178                                           filter,
6179                                           entries);
6180                         break;
6181                 }
6182         }
6183         if (i >= IXGBE_MAX_FTQF_FILTERS) {
6184                 PMD_DRV_LOG(ERR, "5tuple filters are full.");
6185                 return -ENOSYS;
6186         }
6187
6188         ixgbe_inject_5tuple_filter(dev, filter);
6189
6190         return 0;
6191 }
6192
6193 /*
6194  * remove a 5tuple filter
6195  *
6196  * @param
6197  * dev: Pointer to struct rte_eth_dev.
6198  * filter: the pointer of the filter will be removed.
6199  */
6200 static void
6201 ixgbe_remove_5tuple_filter(struct rte_eth_dev *dev,
6202                         struct ixgbe_5tuple_filter *filter)
6203 {
6204         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
6205         struct ixgbe_filter_info *filter_info =
6206                 IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
6207         uint16_t index = filter->index;
6208
6209         filter_info->fivetuple_mask[index / (sizeof(uint32_t) * NBBY)] &=
6210                                 ~(1 << (index % (sizeof(uint32_t) * NBBY)));
6211         TAILQ_REMOVE(&filter_info->fivetuple_list, filter, entries);
6212         rte_free(filter);
6213
6214         IXGBE_WRITE_REG(hw, IXGBE_DAQF(index), 0);
6215         IXGBE_WRITE_REG(hw, IXGBE_SAQF(index), 0);
6216         IXGBE_WRITE_REG(hw, IXGBE_SDPQF(index), 0);
6217         IXGBE_WRITE_REG(hw, IXGBE_FTQF(index), 0);
6218         IXGBE_WRITE_REG(hw, IXGBE_L34T_IMIR(index), 0);
6219 }
6220
6221 static int
6222 ixgbevf_dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu)
6223 {
6224         struct ixgbe_hw *hw;
6225         uint32_t max_frame = mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
6226         struct rte_eth_rxmode *rx_conf = &dev->data->dev_conf.rxmode;
6227
6228         hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
6229
6230         if ((mtu < ETHER_MIN_MTU) || (max_frame > ETHER_MAX_JUMBO_FRAME_LEN))
6231                 return -EINVAL;
6232
6233         /* refuse mtu that requires the support of scattered packets when this
6234          * feature has not been enabled before.
6235          */
6236         if (!rx_conf->enable_scatter &&
6237             (max_frame + 2 * IXGBE_VLAN_TAG_SIZE >
6238              dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM))
6239                 return -EINVAL;
6240
6241         /*
6242          * When supported by the underlying PF driver, use the IXGBE_VF_SET_MTU
6243          * request of the version 2.0 of the mailbox API.
6244          * For now, use the IXGBE_VF_SET_LPE request of the version 1.0
6245          * of the mailbox API.
6246          * This call to IXGBE_SET_LPE action won't work with ixgbe pf drivers
6247          * prior to 3.11.33 which contains the following change:
6248          * "ixgbe: Enable jumbo frames support w/ SR-IOV"
6249          */
6250         ixgbevf_rlpml_set_vf(hw, max_frame);
6251
6252         /* update max frame size */
6253         dev->data->dev_conf.rxmode.max_rx_pkt_len = max_frame;
6254         return 0;
6255 }
6256
6257 static inline struct ixgbe_5tuple_filter *
6258 ixgbe_5tuple_filter_lookup(struct ixgbe_5tuple_filter_list *filter_list,
6259                         struct ixgbe_5tuple_filter_info *key)
6260 {
6261         struct ixgbe_5tuple_filter *it;
6262
6263         TAILQ_FOREACH(it, filter_list, entries) {
6264                 if (memcmp(key, &it->filter_info,
6265                         sizeof(struct ixgbe_5tuple_filter_info)) == 0) {
6266                         return it;
6267                 }
6268         }
6269         return NULL;
6270 }
6271
6272 /* translate elements in struct rte_eth_ntuple_filter to struct ixgbe_5tuple_filter_info*/
6273 static inline int
6274 ntuple_filter_to_5tuple(struct rte_eth_ntuple_filter *filter,
6275                         struct ixgbe_5tuple_filter_info *filter_info)
6276 {
6277         if (filter->queue >= IXGBE_MAX_RX_QUEUE_NUM ||
6278                 filter->priority > IXGBE_5TUPLE_MAX_PRI ||
6279                 filter->priority < IXGBE_5TUPLE_MIN_PRI)
6280                 return -EINVAL;
6281
6282         switch (filter->dst_ip_mask) {
6283         case UINT32_MAX:
6284                 filter_info->dst_ip_mask = 0;
6285                 filter_info->dst_ip = filter->dst_ip;
6286                 break;
6287         case 0:
6288                 filter_info->dst_ip_mask = 1;
6289                 break;
6290         default:
6291                 PMD_DRV_LOG(ERR, "invalid dst_ip mask.");
6292                 return -EINVAL;
6293         }
6294
6295         switch (filter->src_ip_mask) {
6296         case UINT32_MAX:
6297                 filter_info->src_ip_mask = 0;
6298                 filter_info->src_ip = filter->src_ip;
6299                 break;
6300         case 0:
6301                 filter_info->src_ip_mask = 1;
6302                 break;
6303         default:
6304                 PMD_DRV_LOG(ERR, "invalid src_ip mask.");
6305                 return -EINVAL;
6306         }
6307
6308         switch (filter->dst_port_mask) {
6309         case UINT16_MAX:
6310                 filter_info->dst_port_mask = 0;
6311                 filter_info->dst_port = filter->dst_port;
6312                 break;
6313         case 0:
6314                 filter_info->dst_port_mask = 1;
6315                 break;
6316         default:
6317                 PMD_DRV_LOG(ERR, "invalid dst_port mask.");
6318                 return -EINVAL;
6319         }
6320
6321         switch (filter->src_port_mask) {
6322         case UINT16_MAX:
6323                 filter_info->src_port_mask = 0;
6324                 filter_info->src_port = filter->src_port;
6325                 break;
6326         case 0:
6327                 filter_info->src_port_mask = 1;
6328                 break;
6329         default:
6330                 PMD_DRV_LOG(ERR, "invalid src_port mask.");
6331                 return -EINVAL;
6332         }
6333
6334         switch (filter->proto_mask) {
6335         case UINT8_MAX:
6336                 filter_info->proto_mask = 0;
6337                 filter_info->proto =
6338                         convert_protocol_type(filter->proto);
6339                 break;
6340         case 0:
6341                 filter_info->proto_mask = 1;
6342                 break;
6343         default:
6344                 PMD_DRV_LOG(ERR, "invalid protocol mask.");
6345                 return -EINVAL;
6346         }
6347
6348         filter_info->priority = (uint8_t)filter->priority;
6349         return 0;
6350 }
6351
6352 /*
6353  * add or delete a ntuple filter
6354  *
6355  * @param
6356  * dev: Pointer to struct rte_eth_dev.
6357  * ntuple_filter: Pointer to struct rte_eth_ntuple_filter
6358  * add: if true, add filter, if false, remove filter
6359  *
6360  * @return
6361  *    - On success, zero.
6362  *    - On failure, a negative value.
6363  */
6364 int
6365 ixgbe_add_del_ntuple_filter(struct rte_eth_dev *dev,
6366                         struct rte_eth_ntuple_filter *ntuple_filter,
6367                         bool add)
6368 {
6369         struct ixgbe_filter_info *filter_info =
6370                 IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
6371         struct ixgbe_5tuple_filter_info filter_5tuple;
6372         struct ixgbe_5tuple_filter *filter;
6373         int ret;
6374
6375         if (ntuple_filter->flags != RTE_5TUPLE_FLAGS) {
6376                 PMD_DRV_LOG(ERR, "only 5tuple is supported.");
6377                 return -EINVAL;
6378         }
6379
6380         memset(&filter_5tuple, 0, sizeof(struct ixgbe_5tuple_filter_info));
6381         ret = ntuple_filter_to_5tuple(ntuple_filter, &filter_5tuple);
6382         if (ret < 0)
6383                 return ret;
6384
6385         filter = ixgbe_5tuple_filter_lookup(&filter_info->fivetuple_list,
6386                                          &filter_5tuple);
6387         if (filter != NULL && add) {
6388                 PMD_DRV_LOG(ERR, "filter exists.");
6389                 return -EEXIST;
6390         }
6391         if (filter == NULL && !add) {
6392                 PMD_DRV_LOG(ERR, "filter doesn't exist.");
6393                 return -ENOENT;
6394         }
6395
6396         if (add) {
6397                 filter = rte_zmalloc("ixgbe_5tuple_filter",
6398                                 sizeof(struct ixgbe_5tuple_filter), 0);
6399                 if (filter == NULL)
6400                         return -ENOMEM;
6401                 rte_memcpy(&filter->filter_info,
6402                                  &filter_5tuple,
6403                                  sizeof(struct ixgbe_5tuple_filter_info));
6404                 filter->queue = ntuple_filter->queue;
6405                 ret = ixgbe_add_5tuple_filter(dev, filter);
6406                 if (ret < 0) {
6407                         rte_free(filter);
6408                         return ret;
6409                 }
6410         } else
6411                 ixgbe_remove_5tuple_filter(dev, filter);
6412
6413         return 0;
6414 }
6415
6416 /*
6417  * get a ntuple filter
6418  *
6419  * @param
6420  * dev: Pointer to struct rte_eth_dev.
6421  * ntuple_filter: Pointer to struct rte_eth_ntuple_filter
6422  *
6423  * @return
6424  *    - On success, zero.
6425  *    - On failure, a negative value.
6426  */
6427 static int
6428 ixgbe_get_ntuple_filter(struct rte_eth_dev *dev,
6429                         struct rte_eth_ntuple_filter *ntuple_filter)
6430 {
6431         struct ixgbe_filter_info *filter_info =
6432                 IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
6433         struct ixgbe_5tuple_filter_info filter_5tuple;
6434         struct ixgbe_5tuple_filter *filter;
6435         int ret;
6436
6437         if (ntuple_filter->flags != RTE_5TUPLE_FLAGS) {
6438                 PMD_DRV_LOG(ERR, "only 5tuple is supported.");
6439                 return -EINVAL;
6440         }
6441
6442         memset(&filter_5tuple, 0, sizeof(struct ixgbe_5tuple_filter_info));
6443         ret = ntuple_filter_to_5tuple(ntuple_filter, &filter_5tuple);
6444         if (ret < 0)
6445                 return ret;
6446
6447         filter = ixgbe_5tuple_filter_lookup(&filter_info->fivetuple_list,
6448                                          &filter_5tuple);
6449         if (filter == NULL) {
6450                 PMD_DRV_LOG(ERR, "filter doesn't exist.");
6451                 return -ENOENT;
6452         }
6453         ntuple_filter->queue = filter->queue;
6454         return 0;
6455 }
6456
6457 /*
6458  * ixgbe_ntuple_filter_handle - Handle operations for ntuple filter.
6459  * @dev: pointer to rte_eth_dev structure
6460  * @filter_op:operation will be taken.
6461  * @arg: a pointer to specific structure corresponding to the filter_op
6462  *
6463  * @return
6464  *    - On success, zero.
6465  *    - On failure, a negative value.
6466  */
6467 static int
6468 ixgbe_ntuple_filter_handle(struct rte_eth_dev *dev,
6469                                 enum rte_filter_op filter_op,
6470                                 void *arg)
6471 {
6472         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
6473         int ret;
6474
6475         MAC_TYPE_FILTER_SUP_EXT(hw->mac.type);
6476
6477         if (filter_op == RTE_ETH_FILTER_NOP)
6478                 return 0;
6479
6480         if (arg == NULL) {
6481                 PMD_DRV_LOG(ERR, "arg shouldn't be NULL for operation %u.",
6482                             filter_op);
6483                 return -EINVAL;
6484         }
6485
6486         switch (filter_op) {
6487         case RTE_ETH_FILTER_ADD:
6488                 ret = ixgbe_add_del_ntuple_filter(dev,
6489                         (struct rte_eth_ntuple_filter *)arg,
6490                         TRUE);
6491                 break;
6492         case RTE_ETH_FILTER_DELETE:
6493                 ret = ixgbe_add_del_ntuple_filter(dev,
6494                         (struct rte_eth_ntuple_filter *)arg,
6495                         FALSE);
6496                 break;
6497         case RTE_ETH_FILTER_GET:
6498                 ret = ixgbe_get_ntuple_filter(dev,
6499                         (struct rte_eth_ntuple_filter *)arg);
6500                 break;
6501         default:
6502                 PMD_DRV_LOG(ERR, "unsupported operation %u.", filter_op);
6503                 ret = -EINVAL;
6504                 break;
6505         }
6506         return ret;
6507 }
6508
6509 int
6510 ixgbe_add_del_ethertype_filter(struct rte_eth_dev *dev,
6511                         struct rte_eth_ethertype_filter *filter,
6512                         bool add)
6513 {
6514         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
6515         struct ixgbe_filter_info *filter_info =
6516                 IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
6517         uint32_t etqf = 0;
6518         uint32_t etqs = 0;
6519         int ret;
6520         struct ixgbe_ethertype_filter ethertype_filter;
6521
6522         if (filter->queue >= IXGBE_MAX_RX_QUEUE_NUM)
6523                 return -EINVAL;
6524
6525         if (filter->ether_type == ETHER_TYPE_IPv4 ||
6526                 filter->ether_type == ETHER_TYPE_IPv6) {
6527                 PMD_DRV_LOG(ERR, "unsupported ether_type(0x%04x) in"
6528                         " ethertype filter.", filter->ether_type);
6529                 return -EINVAL;
6530         }
6531
6532         if (filter->flags & RTE_ETHTYPE_FLAGS_MAC) {
6533                 PMD_DRV_LOG(ERR, "mac compare is unsupported.");
6534                 return -EINVAL;
6535         }
6536         if (filter->flags & RTE_ETHTYPE_FLAGS_DROP) {
6537                 PMD_DRV_LOG(ERR, "drop option is unsupported.");
6538                 return -EINVAL;
6539         }
6540
6541         ret = ixgbe_ethertype_filter_lookup(filter_info, filter->ether_type);
6542         if (ret >= 0 && add) {
6543                 PMD_DRV_LOG(ERR, "ethertype (0x%04x) filter exists.",
6544                             filter->ether_type);
6545                 return -EEXIST;
6546         }
6547         if (ret < 0 && !add) {
6548                 PMD_DRV_LOG(ERR, "ethertype (0x%04x) filter doesn't exist.",
6549                             filter->ether_type);
6550                 return -ENOENT;
6551         }
6552
6553         if (add) {
6554                 etqf = IXGBE_ETQF_FILTER_EN;
6555                 etqf |= (uint32_t)filter->ether_type;
6556                 etqs |= (uint32_t)((filter->queue <<
6557                                     IXGBE_ETQS_RX_QUEUE_SHIFT) &
6558                                     IXGBE_ETQS_RX_QUEUE);
6559                 etqs |= IXGBE_ETQS_QUEUE_EN;
6560
6561                 ethertype_filter.ethertype = filter->ether_type;
6562                 ethertype_filter.etqf = etqf;
6563                 ethertype_filter.etqs = etqs;
6564                 ethertype_filter.conf = FALSE;
6565                 ret = ixgbe_ethertype_filter_insert(filter_info,
6566                                                     &ethertype_filter);
6567                 if (ret < 0) {
6568                         PMD_DRV_LOG(ERR, "ethertype filters are full.");
6569                         return -ENOSPC;
6570                 }
6571         } else {
6572                 ret = ixgbe_ethertype_filter_remove(filter_info, (uint8_t)ret);
6573                 if (ret < 0)
6574                         return -ENOSYS;
6575         }
6576         IXGBE_WRITE_REG(hw, IXGBE_ETQF(ret), etqf);
6577         IXGBE_WRITE_REG(hw, IXGBE_ETQS(ret), etqs);
6578         IXGBE_WRITE_FLUSH(hw);
6579
6580         return 0;
6581 }
6582
6583 static int
6584 ixgbe_get_ethertype_filter(struct rte_eth_dev *dev,
6585                         struct rte_eth_ethertype_filter *filter)
6586 {
6587         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
6588         struct ixgbe_filter_info *filter_info =
6589                 IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
6590         uint32_t etqf, etqs;
6591         int ret;
6592
6593         ret = ixgbe_ethertype_filter_lookup(filter_info, filter->ether_type);
6594         if (ret < 0) {
6595                 PMD_DRV_LOG(ERR, "ethertype (0x%04x) filter doesn't exist.",
6596                             filter->ether_type);
6597                 return -ENOENT;
6598         }
6599
6600         etqf = IXGBE_READ_REG(hw, IXGBE_ETQF(ret));
6601         if (etqf & IXGBE_ETQF_FILTER_EN) {
6602                 etqs = IXGBE_READ_REG(hw, IXGBE_ETQS(ret));
6603                 filter->ether_type = etqf & IXGBE_ETQF_ETHERTYPE;
6604                 filter->flags = 0;
6605                 filter->queue = (etqs & IXGBE_ETQS_RX_QUEUE) >>
6606                                IXGBE_ETQS_RX_QUEUE_SHIFT;
6607                 return 0;
6608         }
6609         return -ENOENT;
6610 }
6611
6612 /*
6613  * ixgbe_ethertype_filter_handle - Handle operations for ethertype filter.
6614  * @dev: pointer to rte_eth_dev structure
6615  * @filter_op:operation will be taken.
6616  * @arg: a pointer to specific structure corresponding to the filter_op
6617  */
6618 static int
6619 ixgbe_ethertype_filter_handle(struct rte_eth_dev *dev,
6620                                 enum rte_filter_op filter_op,
6621                                 void *arg)
6622 {
6623         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
6624         int ret;
6625
6626         MAC_TYPE_FILTER_SUP(hw->mac.type);
6627
6628         if (filter_op == RTE_ETH_FILTER_NOP)
6629                 return 0;
6630
6631         if (arg == NULL) {
6632                 PMD_DRV_LOG(ERR, "arg shouldn't be NULL for operation %u.",
6633                             filter_op);
6634                 return -EINVAL;
6635         }
6636
6637         switch (filter_op) {
6638         case RTE_ETH_FILTER_ADD:
6639                 ret = ixgbe_add_del_ethertype_filter(dev,
6640                         (struct rte_eth_ethertype_filter *)arg,
6641                         TRUE);
6642                 break;
6643         case RTE_ETH_FILTER_DELETE:
6644                 ret = ixgbe_add_del_ethertype_filter(dev,
6645                         (struct rte_eth_ethertype_filter *)arg,
6646                         FALSE);
6647                 break;
6648         case RTE_ETH_FILTER_GET:
6649                 ret = ixgbe_get_ethertype_filter(dev,
6650                         (struct rte_eth_ethertype_filter *)arg);
6651                 break;
6652         default:
6653                 PMD_DRV_LOG(ERR, "unsupported operation %u.", filter_op);
6654                 ret = -EINVAL;
6655                 break;
6656         }
6657         return ret;
6658 }
6659
6660 static int
6661 ixgbe_dev_filter_ctrl(struct rte_eth_dev *dev,
6662                      enum rte_filter_type filter_type,
6663                      enum rte_filter_op filter_op,
6664                      void *arg)
6665 {
6666         int ret = 0;
6667
6668         switch (filter_type) {
6669         case RTE_ETH_FILTER_NTUPLE:
6670                 ret = ixgbe_ntuple_filter_handle(dev, filter_op, arg);
6671                 break;
6672         case RTE_ETH_FILTER_ETHERTYPE:
6673                 ret = ixgbe_ethertype_filter_handle(dev, filter_op, arg);
6674                 break;
6675         case RTE_ETH_FILTER_SYN:
6676                 ret = ixgbe_syn_filter_handle(dev, filter_op, arg);
6677                 break;
6678         case RTE_ETH_FILTER_FDIR:
6679                 ret = ixgbe_fdir_ctrl_func(dev, filter_op, arg);
6680                 break;
6681         case RTE_ETH_FILTER_L2_TUNNEL:
6682                 ret = ixgbe_dev_l2_tunnel_filter_handle(dev, filter_op, arg);
6683                 break;
6684         case RTE_ETH_FILTER_GENERIC:
6685                 if (filter_op != RTE_ETH_FILTER_GET)
6686                         return -EINVAL;
6687                 *(const void **)arg = &ixgbe_flow_ops;
6688                 break;
6689         default:
6690                 PMD_DRV_LOG(WARNING, "Filter type (%d) not supported",
6691                                                         filter_type);
6692                 ret = -EINVAL;
6693                 break;
6694         }
6695
6696         return ret;
6697 }
6698
6699 static u8 *
6700 ixgbe_dev_addr_list_itr(__attribute__((unused)) struct ixgbe_hw *hw,
6701                         u8 **mc_addr_ptr, u32 *vmdq)
6702 {
6703         u8 *mc_addr;
6704
6705         *vmdq = 0;
6706         mc_addr = *mc_addr_ptr;
6707         *mc_addr_ptr = (mc_addr + sizeof(struct ether_addr));
6708         return mc_addr;
6709 }
6710
6711 static int
6712 ixgbe_dev_set_mc_addr_list(struct rte_eth_dev *dev,
6713                           struct ether_addr *mc_addr_set,
6714                           uint32_t nb_mc_addr)
6715 {
6716         struct ixgbe_hw *hw;
6717         u8 *mc_addr_list;
6718
6719         hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
6720         mc_addr_list = (u8 *)mc_addr_set;
6721         return ixgbe_update_mc_addr_list(hw, mc_addr_list, nb_mc_addr,
6722                                          ixgbe_dev_addr_list_itr, TRUE);
6723 }
6724
6725 static uint64_t
6726 ixgbe_read_systime_cyclecounter(struct rte_eth_dev *dev)
6727 {
6728         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
6729         uint64_t systime_cycles;
6730
6731         switch (hw->mac.type) {
6732         case ixgbe_mac_X550:
6733         case ixgbe_mac_X550EM_x:
6734         case ixgbe_mac_X550EM_a:
6735                 /* SYSTIMEL stores ns and SYSTIMEH stores seconds. */
6736                 systime_cycles = (uint64_t)IXGBE_READ_REG(hw, IXGBE_SYSTIML);
6737                 systime_cycles += (uint64_t)IXGBE_READ_REG(hw, IXGBE_SYSTIMH)
6738                                 * NSEC_PER_SEC;
6739                 break;
6740         default:
6741                 systime_cycles = (uint64_t)IXGBE_READ_REG(hw, IXGBE_SYSTIML);
6742                 systime_cycles |= (uint64_t)IXGBE_READ_REG(hw, IXGBE_SYSTIMH)
6743                                 << 32;
6744         }
6745
6746         return systime_cycles;
6747 }
6748
6749 static uint64_t
6750 ixgbe_read_rx_tstamp_cyclecounter(struct rte_eth_dev *dev)
6751 {
6752         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
6753         uint64_t rx_tstamp_cycles;
6754
6755         switch (hw->mac.type) {
6756         case ixgbe_mac_X550:
6757         case ixgbe_mac_X550EM_x:
6758         case ixgbe_mac_X550EM_a:
6759                 /* RXSTMPL stores ns and RXSTMPH stores seconds. */
6760                 rx_tstamp_cycles = (uint64_t)IXGBE_READ_REG(hw, IXGBE_RXSTMPL);
6761                 rx_tstamp_cycles += (uint64_t)IXGBE_READ_REG(hw, IXGBE_RXSTMPH)
6762                                 * NSEC_PER_SEC;
6763                 break;
6764         default:
6765                 /* RXSTMPL stores ns and RXSTMPH stores seconds. */
6766                 rx_tstamp_cycles = (uint64_t)IXGBE_READ_REG(hw, IXGBE_RXSTMPL);
6767                 rx_tstamp_cycles |= (uint64_t)IXGBE_READ_REG(hw, IXGBE_RXSTMPH)
6768                                 << 32;
6769         }
6770
6771         return rx_tstamp_cycles;
6772 }
6773
6774 static uint64_t
6775 ixgbe_read_tx_tstamp_cyclecounter(struct rte_eth_dev *dev)
6776 {
6777         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
6778         uint64_t tx_tstamp_cycles;
6779
6780         switch (hw->mac.type) {
6781         case ixgbe_mac_X550:
6782         case ixgbe_mac_X550EM_x:
6783         case ixgbe_mac_X550EM_a:
6784                 /* TXSTMPL stores ns and TXSTMPH stores seconds. */
6785                 tx_tstamp_cycles = (uint64_t)IXGBE_READ_REG(hw, IXGBE_TXSTMPL);
6786                 tx_tstamp_cycles += (uint64_t)IXGBE_READ_REG(hw, IXGBE_TXSTMPH)
6787                                 * NSEC_PER_SEC;
6788                 break;
6789         default:
6790                 /* TXSTMPL stores ns and TXSTMPH stores seconds. */
6791                 tx_tstamp_cycles = (uint64_t)IXGBE_READ_REG(hw, IXGBE_TXSTMPL);
6792                 tx_tstamp_cycles |= (uint64_t)IXGBE_READ_REG(hw, IXGBE_TXSTMPH)
6793                                 << 32;
6794         }
6795
6796         return tx_tstamp_cycles;
6797 }
6798
6799 static void
6800 ixgbe_start_timecounters(struct rte_eth_dev *dev)
6801 {
6802         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
6803         struct ixgbe_adapter *adapter =
6804                 (struct ixgbe_adapter *)dev->data->dev_private;
6805         struct rte_eth_link link;
6806         uint32_t incval = 0;
6807         uint32_t shift = 0;
6808
6809         /* Get current link speed. */
6810         memset(&link, 0, sizeof(link));
6811         ixgbe_dev_link_update(dev, 1);
6812         rte_ixgbe_dev_atomic_read_link_status(dev, &link);
6813
6814         switch (link.link_speed) {
6815         case ETH_SPEED_NUM_100M:
6816                 incval = IXGBE_INCVAL_100;
6817                 shift = IXGBE_INCVAL_SHIFT_100;
6818                 break;
6819         case ETH_SPEED_NUM_1G:
6820                 incval = IXGBE_INCVAL_1GB;
6821                 shift = IXGBE_INCVAL_SHIFT_1GB;
6822                 break;
6823         case ETH_SPEED_NUM_10G:
6824         default:
6825                 incval = IXGBE_INCVAL_10GB;
6826                 shift = IXGBE_INCVAL_SHIFT_10GB;
6827                 break;
6828         }
6829
6830         switch (hw->mac.type) {
6831         case ixgbe_mac_X550:
6832         case ixgbe_mac_X550EM_x:
6833         case ixgbe_mac_X550EM_a:
6834                 /* Independent of link speed. */
6835                 incval = 1;
6836                 /* Cycles read will be interpreted as ns. */
6837                 shift = 0;
6838                 /* Fall-through */
6839         case ixgbe_mac_X540:
6840                 IXGBE_WRITE_REG(hw, IXGBE_TIMINCA, incval);
6841                 break;
6842         case ixgbe_mac_82599EB:
6843                 incval >>= IXGBE_INCVAL_SHIFT_82599;
6844                 shift -= IXGBE_INCVAL_SHIFT_82599;
6845                 IXGBE_WRITE_REG(hw, IXGBE_TIMINCA,
6846                                 (1 << IXGBE_INCPER_SHIFT_82599) | incval);
6847                 break;
6848         default:
6849                 /* Not supported. */
6850                 return;
6851         }
6852
6853         memset(&adapter->systime_tc, 0, sizeof(struct rte_timecounter));
6854         memset(&adapter->rx_tstamp_tc, 0, sizeof(struct rte_timecounter));
6855         memset(&adapter->tx_tstamp_tc, 0, sizeof(struct rte_timecounter));
6856
6857         adapter->systime_tc.cc_mask = IXGBE_CYCLECOUNTER_MASK;
6858         adapter->systime_tc.cc_shift = shift;
6859         adapter->systime_tc.nsec_mask = (1ULL << shift) - 1;
6860
6861         adapter->rx_tstamp_tc.cc_mask = IXGBE_CYCLECOUNTER_MASK;
6862         adapter->rx_tstamp_tc.cc_shift = shift;
6863         adapter->rx_tstamp_tc.nsec_mask = (1ULL << shift) - 1;
6864
6865         adapter->tx_tstamp_tc.cc_mask = IXGBE_CYCLECOUNTER_MASK;
6866         adapter->tx_tstamp_tc.cc_shift = shift;
6867         adapter->tx_tstamp_tc.nsec_mask = (1ULL << shift) - 1;
6868 }
6869
6870 static int
6871 ixgbe_timesync_adjust_time(struct rte_eth_dev *dev, int64_t delta)
6872 {
6873         struct ixgbe_adapter *adapter =
6874                         (struct ixgbe_adapter *)dev->data->dev_private;
6875
6876         adapter->systime_tc.nsec += delta;
6877         adapter->rx_tstamp_tc.nsec += delta;
6878         adapter->tx_tstamp_tc.nsec += delta;
6879
6880         return 0;
6881 }
6882
6883 static int
6884 ixgbe_timesync_write_time(struct rte_eth_dev *dev, const struct timespec *ts)
6885 {
6886         uint64_t ns;
6887         struct ixgbe_adapter *adapter =
6888                         (struct ixgbe_adapter *)dev->data->dev_private;
6889
6890         ns = rte_timespec_to_ns(ts);
6891         /* Set the timecounters to a new value. */
6892         adapter->systime_tc.nsec = ns;
6893         adapter->rx_tstamp_tc.nsec = ns;
6894         adapter->tx_tstamp_tc.nsec = ns;
6895
6896         return 0;
6897 }
6898
6899 static int
6900 ixgbe_timesync_read_time(struct rte_eth_dev *dev, struct timespec *ts)
6901 {
6902         uint64_t ns, systime_cycles;
6903         struct ixgbe_adapter *adapter =
6904                         (struct ixgbe_adapter *)dev->data->dev_private;
6905
6906         systime_cycles = ixgbe_read_systime_cyclecounter(dev);
6907         ns = rte_timecounter_update(&adapter->systime_tc, systime_cycles);
6908         *ts = rte_ns_to_timespec(ns);
6909
6910         return 0;
6911 }
6912
6913 static int
6914 ixgbe_timesync_enable(struct rte_eth_dev *dev)
6915 {
6916         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
6917         uint32_t tsync_ctl;
6918         uint32_t tsauxc;
6919
6920         /* Stop the timesync system time. */
6921         IXGBE_WRITE_REG(hw, IXGBE_TIMINCA, 0x0);
6922         /* Reset the timesync system time value. */
6923         IXGBE_WRITE_REG(hw, IXGBE_SYSTIML, 0x0);
6924         IXGBE_WRITE_REG(hw, IXGBE_SYSTIMH, 0x0);
6925
6926         /* Enable system time for platforms where it isn't on by default. */
6927         tsauxc = IXGBE_READ_REG(hw, IXGBE_TSAUXC);
6928         tsauxc &= ~IXGBE_TSAUXC_DISABLE_SYSTIME;
6929         IXGBE_WRITE_REG(hw, IXGBE_TSAUXC, tsauxc);
6930
6931         ixgbe_start_timecounters(dev);
6932
6933         /* Enable L2 filtering of IEEE1588/802.1AS Ethernet frame types. */
6934         IXGBE_WRITE_REG(hw, IXGBE_ETQF(IXGBE_ETQF_FILTER_1588),
6935                         (ETHER_TYPE_1588 |
6936                          IXGBE_ETQF_FILTER_EN |
6937                          IXGBE_ETQF_1588));
6938
6939         /* Enable timestamping of received PTP packets. */
6940         tsync_ctl = IXGBE_READ_REG(hw, IXGBE_TSYNCRXCTL);
6941         tsync_ctl |= IXGBE_TSYNCRXCTL_ENABLED;
6942         IXGBE_WRITE_REG(hw, IXGBE_TSYNCRXCTL, tsync_ctl);
6943
6944         /* Enable timestamping of transmitted PTP packets. */
6945         tsync_ctl = IXGBE_READ_REG(hw, IXGBE_TSYNCTXCTL);
6946         tsync_ctl |= IXGBE_TSYNCTXCTL_ENABLED;
6947         IXGBE_WRITE_REG(hw, IXGBE_TSYNCTXCTL, tsync_ctl);
6948
6949         IXGBE_WRITE_FLUSH(hw);
6950
6951         return 0;
6952 }
6953
6954 static int
6955 ixgbe_timesync_disable(struct rte_eth_dev *dev)
6956 {
6957         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
6958         uint32_t tsync_ctl;
6959
6960         /* Disable timestamping of transmitted PTP packets. */
6961         tsync_ctl = IXGBE_READ_REG(hw, IXGBE_TSYNCTXCTL);
6962         tsync_ctl &= ~IXGBE_TSYNCTXCTL_ENABLED;
6963         IXGBE_WRITE_REG(hw, IXGBE_TSYNCTXCTL, tsync_ctl);
6964
6965         /* Disable timestamping of received PTP packets. */
6966         tsync_ctl = IXGBE_READ_REG(hw, IXGBE_TSYNCRXCTL);
6967         tsync_ctl &= ~IXGBE_TSYNCRXCTL_ENABLED;
6968         IXGBE_WRITE_REG(hw, IXGBE_TSYNCRXCTL, tsync_ctl);
6969
6970         /* Disable L2 filtering of IEEE1588/802.1AS Ethernet frame types. */
6971         IXGBE_WRITE_REG(hw, IXGBE_ETQF(IXGBE_ETQF_FILTER_1588), 0);
6972
6973         /* Stop incrementating the System Time registers. */
6974         IXGBE_WRITE_REG(hw, IXGBE_TIMINCA, 0);
6975
6976         return 0;
6977 }
6978
6979 static int
6980 ixgbe_timesync_read_rx_timestamp(struct rte_eth_dev *dev,
6981                                  struct timespec *timestamp,
6982                                  uint32_t flags __rte_unused)
6983 {
6984         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
6985         struct ixgbe_adapter *adapter =
6986                 (struct ixgbe_adapter *)dev->data->dev_private;
6987         uint32_t tsync_rxctl;
6988         uint64_t rx_tstamp_cycles;
6989         uint64_t ns;
6990
6991         tsync_rxctl = IXGBE_READ_REG(hw, IXGBE_TSYNCRXCTL);
6992         if ((tsync_rxctl & IXGBE_TSYNCRXCTL_VALID) == 0)
6993                 return -EINVAL;
6994
6995         rx_tstamp_cycles = ixgbe_read_rx_tstamp_cyclecounter(dev);
6996         ns = rte_timecounter_update(&adapter->rx_tstamp_tc, rx_tstamp_cycles);
6997         *timestamp = rte_ns_to_timespec(ns);
6998
6999         return  0;
7000 }
7001
7002 static int
7003 ixgbe_timesync_read_tx_timestamp(struct rte_eth_dev *dev,
7004                                  struct timespec *timestamp)
7005 {
7006         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
7007         struct ixgbe_adapter *adapter =
7008                 (struct ixgbe_adapter *)dev->data->dev_private;
7009         uint32_t tsync_txctl;
7010         uint64_t tx_tstamp_cycles;
7011         uint64_t ns;
7012
7013         tsync_txctl = IXGBE_READ_REG(hw, IXGBE_TSYNCTXCTL);
7014         if ((tsync_txctl & IXGBE_TSYNCTXCTL_VALID) == 0)
7015                 return -EINVAL;
7016
7017         tx_tstamp_cycles = ixgbe_read_tx_tstamp_cyclecounter(dev);
7018         ns = rte_timecounter_update(&adapter->tx_tstamp_tc, tx_tstamp_cycles);
7019         *timestamp = rte_ns_to_timespec(ns);
7020
7021         return 0;
7022 }
7023
7024 static int
7025 ixgbe_get_reg_length(struct rte_eth_dev *dev)
7026 {
7027         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
7028         int count = 0;
7029         int g_ind = 0;
7030         const struct reg_info *reg_group;
7031         const struct reg_info **reg_set = (hw->mac.type == ixgbe_mac_82598EB) ?
7032                                     ixgbe_regs_mac_82598EB : ixgbe_regs_others;
7033
7034         while ((reg_group = reg_set[g_ind++]))
7035                 count += ixgbe_regs_group_count(reg_group);
7036
7037         return count;
7038 }
7039
7040 static int
7041 ixgbevf_get_reg_length(struct rte_eth_dev *dev __rte_unused)
7042 {
7043         int count = 0;
7044         int g_ind = 0;
7045         const struct reg_info *reg_group;
7046
7047         while ((reg_group = ixgbevf_regs[g_ind++]))
7048                 count += ixgbe_regs_group_count(reg_group);
7049
7050         return count;
7051 }
7052
7053 static int
7054 ixgbe_get_regs(struct rte_eth_dev *dev,
7055               struct rte_dev_reg_info *regs)
7056 {
7057         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
7058         uint32_t *data = regs->data;
7059         int g_ind = 0;
7060         int count = 0;
7061         const struct reg_info *reg_group;
7062         const struct reg_info **reg_set = (hw->mac.type == ixgbe_mac_82598EB) ?
7063                                     ixgbe_regs_mac_82598EB : ixgbe_regs_others;
7064
7065         if (data == NULL) {
7066                 regs->length = ixgbe_get_reg_length(dev);
7067                 regs->width = sizeof(uint32_t);
7068                 return 0;
7069         }
7070
7071         /* Support only full register dump */
7072         if ((regs->length == 0) ||
7073             (regs->length == (uint32_t)ixgbe_get_reg_length(dev))) {
7074                 regs->version = hw->mac.type << 24 | hw->revision_id << 16 |
7075                         hw->device_id;
7076                 while ((reg_group = reg_set[g_ind++]))
7077                         count += ixgbe_read_regs_group(dev, &data[count],
7078                                 reg_group);
7079                 return 0;
7080         }
7081
7082         return -ENOTSUP;
7083 }
7084
7085 static int
7086 ixgbevf_get_regs(struct rte_eth_dev *dev,
7087                 struct rte_dev_reg_info *regs)
7088 {
7089         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
7090         uint32_t *data = regs->data;
7091         int g_ind = 0;
7092         int count = 0;
7093         const struct reg_info *reg_group;
7094
7095         if (data == NULL) {
7096                 regs->length = ixgbevf_get_reg_length(dev);
7097                 regs->width = sizeof(uint32_t);
7098                 return 0;
7099         }
7100
7101         /* Support only full register dump */
7102         if ((regs->length == 0) ||
7103             (regs->length == (uint32_t)ixgbevf_get_reg_length(dev))) {
7104                 regs->version = hw->mac.type << 24 | hw->revision_id << 16 |
7105                         hw->device_id;
7106                 while ((reg_group = ixgbevf_regs[g_ind++]))
7107                         count += ixgbe_read_regs_group(dev, &data[count],
7108                                                       reg_group);
7109                 return 0;
7110         }
7111
7112         return -ENOTSUP;
7113 }
7114
7115 static int
7116 ixgbe_get_eeprom_length(struct rte_eth_dev *dev)
7117 {
7118         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
7119
7120         /* Return unit is byte count */
7121         return hw->eeprom.word_size * 2;
7122 }
7123
7124 static int
7125 ixgbe_get_eeprom(struct rte_eth_dev *dev,
7126                 struct rte_dev_eeprom_info *in_eeprom)
7127 {
7128         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
7129         struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
7130         uint16_t *data = in_eeprom->data;
7131         int first, length;
7132
7133         first = in_eeprom->offset >> 1;
7134         length = in_eeprom->length >> 1;
7135         if ((first > hw->eeprom.word_size) ||
7136             ((first + length) > hw->eeprom.word_size))
7137                 return -EINVAL;
7138
7139         in_eeprom->magic = hw->vendor_id | (hw->device_id << 16);
7140
7141         return eeprom->ops.read_buffer(hw, first, length, data);
7142 }
7143
7144 static int
7145 ixgbe_set_eeprom(struct rte_eth_dev *dev,
7146                 struct rte_dev_eeprom_info *in_eeprom)
7147 {
7148         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
7149         struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
7150         uint16_t *data = in_eeprom->data;
7151         int first, length;
7152
7153         first = in_eeprom->offset >> 1;
7154         length = in_eeprom->length >> 1;
7155         if ((first > hw->eeprom.word_size) ||
7156             ((first + length) > hw->eeprom.word_size))
7157                 return -EINVAL;
7158
7159         in_eeprom->magic = hw->vendor_id | (hw->device_id << 16);
7160
7161         return eeprom->ops.write_buffer(hw,  first, length, data);
7162 }
7163
7164 uint16_t
7165 ixgbe_reta_size_get(enum ixgbe_mac_type mac_type) {
7166         switch (mac_type) {
7167         case ixgbe_mac_X550:
7168         case ixgbe_mac_X550EM_x:
7169         case ixgbe_mac_X550EM_a:
7170                 return ETH_RSS_RETA_SIZE_512;
7171         case ixgbe_mac_X550_vf:
7172         case ixgbe_mac_X550EM_x_vf:
7173         case ixgbe_mac_X550EM_a_vf:
7174                 return ETH_RSS_RETA_SIZE_64;
7175         default:
7176                 return ETH_RSS_RETA_SIZE_128;
7177         }
7178 }
7179
7180 uint32_t
7181 ixgbe_reta_reg_get(enum ixgbe_mac_type mac_type, uint16_t reta_idx) {
7182         switch (mac_type) {
7183         case ixgbe_mac_X550:
7184         case ixgbe_mac_X550EM_x:
7185         case ixgbe_mac_X550EM_a:
7186                 if (reta_idx < ETH_RSS_RETA_SIZE_128)
7187                         return IXGBE_RETA(reta_idx >> 2);
7188                 else
7189                         return IXGBE_ERETA((reta_idx - ETH_RSS_RETA_SIZE_128) >> 2);
7190         case ixgbe_mac_X550_vf:
7191         case ixgbe_mac_X550EM_x_vf:
7192         case ixgbe_mac_X550EM_a_vf:
7193                 return IXGBE_VFRETA(reta_idx >> 2);
7194         default:
7195                 return IXGBE_RETA(reta_idx >> 2);
7196         }
7197 }
7198
7199 uint32_t
7200 ixgbe_mrqc_reg_get(enum ixgbe_mac_type mac_type) {
7201         switch (mac_type) {
7202         case ixgbe_mac_X550_vf:
7203         case ixgbe_mac_X550EM_x_vf:
7204         case ixgbe_mac_X550EM_a_vf:
7205                 return IXGBE_VFMRQC;
7206         default:
7207                 return IXGBE_MRQC;
7208         }
7209 }
7210
7211 uint32_t
7212 ixgbe_rssrk_reg_get(enum ixgbe_mac_type mac_type, uint8_t i) {
7213         switch (mac_type) {
7214         case ixgbe_mac_X550_vf:
7215         case ixgbe_mac_X550EM_x_vf:
7216         case ixgbe_mac_X550EM_a_vf:
7217                 return IXGBE_VFRSSRK(i);
7218         default:
7219                 return IXGBE_RSSRK(i);
7220         }
7221 }
7222
7223 bool
7224 ixgbe_rss_update_sp(enum ixgbe_mac_type mac_type) {
7225         switch (mac_type) {
7226         case ixgbe_mac_82599_vf:
7227         case ixgbe_mac_X540_vf:
7228                 return 0;
7229         default:
7230                 return 1;
7231         }
7232 }
7233
7234 static int
7235 ixgbe_dev_get_dcb_info(struct rte_eth_dev *dev,
7236                         struct rte_eth_dcb_info *dcb_info)
7237 {
7238         struct ixgbe_dcb_config *dcb_config =
7239                         IXGBE_DEV_PRIVATE_TO_DCB_CFG(dev->data->dev_private);
7240         struct ixgbe_dcb_tc_config *tc;
7241         struct rte_eth_dcb_tc_queue_mapping *tc_queue;
7242         uint8_t nb_tcs;
7243         uint8_t i, j;
7244
7245         if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_DCB_FLAG)
7246                 dcb_info->nb_tcs = dcb_config->num_tcs.pg_tcs;
7247         else
7248                 dcb_info->nb_tcs = 1;
7249
7250         tc_queue = &dcb_info->tc_queue;
7251         nb_tcs = dcb_info->nb_tcs;
7252
7253         if (dcb_config->vt_mode) { /* vt is enabled*/
7254                 struct rte_eth_vmdq_dcb_conf *vmdq_rx_conf =
7255                                 &dev->data->dev_conf.rx_adv_conf.vmdq_dcb_conf;
7256                 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++)
7257                         dcb_info->prio_tc[i] = vmdq_rx_conf->dcb_tc[i];
7258                 if (RTE_ETH_DEV_SRIOV(dev).active > 0) {
7259                         for (j = 0; j < nb_tcs; j++) {
7260                                 tc_queue->tc_rxq[0][j].base = j;
7261                                 tc_queue->tc_rxq[0][j].nb_queue = 1;
7262                                 tc_queue->tc_txq[0][j].base = j;
7263                                 tc_queue->tc_txq[0][j].nb_queue = 1;
7264                         }
7265                 } else {
7266                         for (i = 0; i < vmdq_rx_conf->nb_queue_pools; i++) {
7267                                 for (j = 0; j < nb_tcs; j++) {
7268                                         tc_queue->tc_rxq[i][j].base =
7269                                                 i * nb_tcs + j;
7270                                         tc_queue->tc_rxq[i][j].nb_queue = 1;
7271                                         tc_queue->tc_txq[i][j].base =
7272                                                 i * nb_tcs + j;
7273                                         tc_queue->tc_txq[i][j].nb_queue = 1;
7274                                 }
7275                         }
7276                 }
7277         } else { /* vt is disabled*/
7278                 struct rte_eth_dcb_rx_conf *rx_conf =
7279                                 &dev->data->dev_conf.rx_adv_conf.dcb_rx_conf;
7280                 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++)
7281                         dcb_info->prio_tc[i] = rx_conf->dcb_tc[i];
7282                 if (dcb_info->nb_tcs == ETH_4_TCS) {
7283                         for (i = 0; i < dcb_info->nb_tcs; i++) {
7284                                 dcb_info->tc_queue.tc_rxq[0][i].base = i * 32;
7285                                 dcb_info->tc_queue.tc_rxq[0][i].nb_queue = 16;
7286                         }
7287                         dcb_info->tc_queue.tc_txq[0][0].base = 0;
7288                         dcb_info->tc_queue.tc_txq[0][1].base = 64;
7289                         dcb_info->tc_queue.tc_txq[0][2].base = 96;
7290                         dcb_info->tc_queue.tc_txq[0][3].base = 112;
7291                         dcb_info->tc_queue.tc_txq[0][0].nb_queue = 64;
7292                         dcb_info->tc_queue.tc_txq[0][1].nb_queue = 32;
7293                         dcb_info->tc_queue.tc_txq[0][2].nb_queue = 16;
7294                         dcb_info->tc_queue.tc_txq[0][3].nb_queue = 16;
7295                 } else if (dcb_info->nb_tcs == ETH_8_TCS) {
7296                         for (i = 0; i < dcb_info->nb_tcs; i++) {
7297                                 dcb_info->tc_queue.tc_rxq[0][i].base = i * 16;
7298                                 dcb_info->tc_queue.tc_rxq[0][i].nb_queue = 16;
7299                         }
7300                         dcb_info->tc_queue.tc_txq[0][0].base = 0;
7301                         dcb_info->tc_queue.tc_txq[0][1].base = 32;
7302                         dcb_info->tc_queue.tc_txq[0][2].base = 64;
7303                         dcb_info->tc_queue.tc_txq[0][3].base = 80;
7304                         dcb_info->tc_queue.tc_txq[0][4].base = 96;
7305                         dcb_info->tc_queue.tc_txq[0][5].base = 104;
7306                         dcb_info->tc_queue.tc_txq[0][6].base = 112;
7307                         dcb_info->tc_queue.tc_txq[0][7].base = 120;
7308                         dcb_info->tc_queue.tc_txq[0][0].nb_queue = 32;
7309                         dcb_info->tc_queue.tc_txq[0][1].nb_queue = 32;
7310                         dcb_info->tc_queue.tc_txq[0][2].nb_queue = 16;
7311                         dcb_info->tc_queue.tc_txq[0][3].nb_queue = 16;
7312                         dcb_info->tc_queue.tc_txq[0][4].nb_queue = 8;
7313                         dcb_info->tc_queue.tc_txq[0][5].nb_queue = 8;
7314                         dcb_info->tc_queue.tc_txq[0][6].nb_queue = 8;
7315                         dcb_info->tc_queue.tc_txq[0][7].nb_queue = 8;
7316                 }
7317         }
7318         for (i = 0; i < dcb_info->nb_tcs; i++) {
7319                 tc = &dcb_config->tc_config[i];
7320                 dcb_info->tc_bws[i] = tc->path[IXGBE_DCB_TX_CONFIG].bwg_percent;
7321         }
7322         return 0;
7323 }
7324
7325 /* Update e-tag ether type */
7326 static int
7327 ixgbe_update_e_tag_eth_type(struct ixgbe_hw *hw,
7328                             uint16_t ether_type)
7329 {
7330         uint32_t etag_etype;
7331
7332         if (hw->mac.type != ixgbe_mac_X550 &&
7333             hw->mac.type != ixgbe_mac_X550EM_x &&
7334             hw->mac.type != ixgbe_mac_X550EM_a) {
7335                 return -ENOTSUP;
7336         }
7337
7338         etag_etype = IXGBE_READ_REG(hw, IXGBE_ETAG_ETYPE);
7339         etag_etype &= ~IXGBE_ETAG_ETYPE_MASK;
7340         etag_etype |= ether_type;
7341         IXGBE_WRITE_REG(hw, IXGBE_ETAG_ETYPE, etag_etype);
7342         IXGBE_WRITE_FLUSH(hw);
7343
7344         return 0;
7345 }
7346
7347 /* Config l2 tunnel ether type */
7348 static int
7349 ixgbe_dev_l2_tunnel_eth_type_conf(struct rte_eth_dev *dev,
7350                                   struct rte_eth_l2_tunnel_conf *l2_tunnel)
7351 {
7352         int ret = 0;
7353         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
7354         struct ixgbe_l2_tn_info *l2_tn_info =
7355                 IXGBE_DEV_PRIVATE_TO_L2_TN_INFO(dev->data->dev_private);
7356
7357         if (l2_tunnel == NULL)
7358                 return -EINVAL;
7359
7360         switch (l2_tunnel->l2_tunnel_type) {
7361         case RTE_L2_TUNNEL_TYPE_E_TAG:
7362                 l2_tn_info->e_tag_ether_type = l2_tunnel->ether_type;
7363                 ret = ixgbe_update_e_tag_eth_type(hw, l2_tunnel->ether_type);
7364                 break;
7365         default:
7366                 PMD_DRV_LOG(ERR, "Invalid tunnel type");
7367                 ret = -EINVAL;
7368                 break;
7369         }
7370
7371         return ret;
7372 }
7373
7374 /* Enable e-tag tunnel */
7375 static int
7376 ixgbe_e_tag_enable(struct ixgbe_hw *hw)
7377 {
7378         uint32_t etag_etype;
7379
7380         if (hw->mac.type != ixgbe_mac_X550 &&
7381             hw->mac.type != ixgbe_mac_X550EM_x &&
7382             hw->mac.type != ixgbe_mac_X550EM_a) {
7383                 return -ENOTSUP;
7384         }
7385
7386         etag_etype = IXGBE_READ_REG(hw, IXGBE_ETAG_ETYPE);
7387         etag_etype |= IXGBE_ETAG_ETYPE_VALID;
7388         IXGBE_WRITE_REG(hw, IXGBE_ETAG_ETYPE, etag_etype);
7389         IXGBE_WRITE_FLUSH(hw);
7390
7391         return 0;
7392 }
7393
7394 /* Enable l2 tunnel */
7395 static int
7396 ixgbe_dev_l2_tunnel_enable(struct rte_eth_dev *dev,
7397                            enum rte_eth_tunnel_type l2_tunnel_type)
7398 {
7399         int ret = 0;
7400         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
7401         struct ixgbe_l2_tn_info *l2_tn_info =
7402                 IXGBE_DEV_PRIVATE_TO_L2_TN_INFO(dev->data->dev_private);
7403
7404         switch (l2_tunnel_type) {
7405         case RTE_L2_TUNNEL_TYPE_E_TAG:
7406                 l2_tn_info->e_tag_en = TRUE;
7407                 ret = ixgbe_e_tag_enable(hw);
7408                 break;
7409         default:
7410                 PMD_DRV_LOG(ERR, "Invalid tunnel type");
7411                 ret = -EINVAL;
7412                 break;
7413         }
7414
7415         return ret;
7416 }
7417
7418 /* Disable e-tag tunnel */
7419 static int
7420 ixgbe_e_tag_disable(struct ixgbe_hw *hw)
7421 {
7422         uint32_t etag_etype;
7423
7424         if (hw->mac.type != ixgbe_mac_X550 &&
7425             hw->mac.type != ixgbe_mac_X550EM_x &&
7426             hw->mac.type != ixgbe_mac_X550EM_a) {
7427                 return -ENOTSUP;
7428         }
7429
7430         etag_etype = IXGBE_READ_REG(hw, IXGBE_ETAG_ETYPE);
7431         etag_etype &= ~IXGBE_ETAG_ETYPE_VALID;
7432         IXGBE_WRITE_REG(hw, IXGBE_ETAG_ETYPE, etag_etype);
7433         IXGBE_WRITE_FLUSH(hw);
7434
7435         return 0;
7436 }
7437
7438 /* Disable l2 tunnel */
7439 static int
7440 ixgbe_dev_l2_tunnel_disable(struct rte_eth_dev *dev,
7441                             enum rte_eth_tunnel_type l2_tunnel_type)
7442 {
7443         int ret = 0;
7444         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
7445         struct ixgbe_l2_tn_info *l2_tn_info =
7446                 IXGBE_DEV_PRIVATE_TO_L2_TN_INFO(dev->data->dev_private);
7447
7448         switch (l2_tunnel_type) {
7449         case RTE_L2_TUNNEL_TYPE_E_TAG:
7450                 l2_tn_info->e_tag_en = FALSE;
7451                 ret = ixgbe_e_tag_disable(hw);
7452                 break;
7453         default:
7454                 PMD_DRV_LOG(ERR, "Invalid tunnel type");
7455                 ret = -EINVAL;
7456                 break;
7457         }
7458
7459         return ret;
7460 }
7461
7462 static int
7463 ixgbe_e_tag_filter_del(struct rte_eth_dev *dev,
7464                        struct rte_eth_l2_tunnel_conf *l2_tunnel)
7465 {
7466         int ret = 0;
7467         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
7468         uint32_t i, rar_entries;
7469         uint32_t rar_low, rar_high;
7470
7471         if (hw->mac.type != ixgbe_mac_X550 &&
7472             hw->mac.type != ixgbe_mac_X550EM_x &&
7473             hw->mac.type != ixgbe_mac_X550EM_a) {
7474                 return -ENOTSUP;
7475         }
7476
7477         rar_entries = ixgbe_get_num_rx_addrs(hw);
7478
7479         for (i = 1; i < rar_entries; i++) {
7480                 rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(i));
7481                 rar_low  = IXGBE_READ_REG(hw, IXGBE_RAL(i));
7482                 if ((rar_high & IXGBE_RAH_AV) &&
7483                     (rar_high & IXGBE_RAH_ADTYPE) &&
7484                     ((rar_low & IXGBE_RAL_ETAG_FILTER_MASK) ==
7485                      l2_tunnel->tunnel_id)) {
7486                         IXGBE_WRITE_REG(hw, IXGBE_RAL(i), 0);
7487                         IXGBE_WRITE_REG(hw, IXGBE_RAH(i), 0);
7488
7489                         ixgbe_clear_vmdq(hw, i, IXGBE_CLEAR_VMDQ_ALL);
7490
7491                         return ret;
7492                 }
7493         }
7494
7495         return ret;
7496 }
7497
7498 static int
7499 ixgbe_e_tag_filter_add(struct rte_eth_dev *dev,
7500                        struct rte_eth_l2_tunnel_conf *l2_tunnel)
7501 {
7502         int ret = 0;
7503         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
7504         uint32_t i, rar_entries;
7505         uint32_t rar_low, rar_high;
7506
7507         if (hw->mac.type != ixgbe_mac_X550 &&
7508             hw->mac.type != ixgbe_mac_X550EM_x &&
7509             hw->mac.type != ixgbe_mac_X550EM_a) {
7510                 return -ENOTSUP;
7511         }
7512
7513         /* One entry for one tunnel. Try to remove potential existing entry. */
7514         ixgbe_e_tag_filter_del(dev, l2_tunnel);
7515
7516         rar_entries = ixgbe_get_num_rx_addrs(hw);
7517
7518         for (i = 1; i < rar_entries; i++) {
7519                 rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(i));
7520                 if (rar_high & IXGBE_RAH_AV) {
7521                         continue;
7522                 } else {
7523                         ixgbe_set_vmdq(hw, i, l2_tunnel->pool);
7524                         rar_high = IXGBE_RAH_AV | IXGBE_RAH_ADTYPE;
7525                         rar_low = l2_tunnel->tunnel_id;
7526
7527                         IXGBE_WRITE_REG(hw, IXGBE_RAL(i), rar_low);
7528                         IXGBE_WRITE_REG(hw, IXGBE_RAH(i), rar_high);
7529
7530                         return ret;
7531                 }
7532         }
7533
7534         PMD_INIT_LOG(NOTICE, "The table of E-tag forwarding rule is full."
7535                      " Please remove a rule before adding a new one.");
7536         return -EINVAL;
7537 }
7538
7539 static inline struct ixgbe_l2_tn_filter *
7540 ixgbe_l2_tn_filter_lookup(struct ixgbe_l2_tn_info *l2_tn_info,
7541                           struct ixgbe_l2_tn_key *key)
7542 {
7543         int ret;
7544
7545         ret = rte_hash_lookup(l2_tn_info->hash_handle, (const void *)key);
7546         if (ret < 0)
7547                 return NULL;
7548
7549         return l2_tn_info->hash_map[ret];
7550 }
7551
7552 static inline int
7553 ixgbe_insert_l2_tn_filter(struct ixgbe_l2_tn_info *l2_tn_info,
7554                           struct ixgbe_l2_tn_filter *l2_tn_filter)
7555 {
7556         int ret;
7557
7558         ret = rte_hash_add_key(l2_tn_info->hash_handle,
7559                                &l2_tn_filter->key);
7560
7561         if (ret < 0) {
7562                 PMD_DRV_LOG(ERR,
7563                             "Failed to insert L2 tunnel filter"
7564                             " to hash table %d!",
7565                             ret);
7566                 return ret;
7567         }
7568
7569         l2_tn_info->hash_map[ret] = l2_tn_filter;
7570
7571         TAILQ_INSERT_TAIL(&l2_tn_info->l2_tn_list, l2_tn_filter, entries);
7572
7573         return 0;
7574 }
7575
7576 static inline int
7577 ixgbe_remove_l2_tn_filter(struct ixgbe_l2_tn_info *l2_tn_info,
7578                           struct ixgbe_l2_tn_key *key)
7579 {
7580         int ret;
7581         struct ixgbe_l2_tn_filter *l2_tn_filter;
7582
7583         ret = rte_hash_del_key(l2_tn_info->hash_handle, key);
7584
7585         if (ret < 0) {
7586                 PMD_DRV_LOG(ERR,
7587                             "No such L2 tunnel filter to delete %d!",
7588                             ret);
7589                 return ret;
7590         }
7591
7592         l2_tn_filter = l2_tn_info->hash_map[ret];
7593         l2_tn_info->hash_map[ret] = NULL;
7594
7595         TAILQ_REMOVE(&l2_tn_info->l2_tn_list, l2_tn_filter, entries);
7596         rte_free(l2_tn_filter);
7597
7598         return 0;
7599 }
7600
7601 /* Add l2 tunnel filter */
7602 int
7603 ixgbe_dev_l2_tunnel_filter_add(struct rte_eth_dev *dev,
7604                                struct rte_eth_l2_tunnel_conf *l2_tunnel,
7605                                bool restore)
7606 {
7607         int ret;
7608         struct ixgbe_l2_tn_info *l2_tn_info =
7609                 IXGBE_DEV_PRIVATE_TO_L2_TN_INFO(dev->data->dev_private);
7610         struct ixgbe_l2_tn_key key;
7611         struct ixgbe_l2_tn_filter *node;
7612
7613         if (!restore) {
7614                 key.l2_tn_type = l2_tunnel->l2_tunnel_type;
7615                 key.tn_id = l2_tunnel->tunnel_id;
7616
7617                 node = ixgbe_l2_tn_filter_lookup(l2_tn_info, &key);
7618
7619                 if (node) {
7620                         PMD_DRV_LOG(ERR,
7621                                     "The L2 tunnel filter already exists!");
7622                         return -EINVAL;
7623                 }
7624
7625                 node = rte_zmalloc("ixgbe_l2_tn",
7626                                    sizeof(struct ixgbe_l2_tn_filter),
7627                                    0);
7628                 if (!node)
7629                         return -ENOMEM;
7630
7631                 rte_memcpy(&node->key,
7632                                  &key,
7633                                  sizeof(struct ixgbe_l2_tn_key));
7634                 node->pool = l2_tunnel->pool;
7635                 ret = ixgbe_insert_l2_tn_filter(l2_tn_info, node);
7636                 if (ret < 0) {
7637                         rte_free(node);
7638                         return ret;
7639                 }
7640         }
7641
7642         switch (l2_tunnel->l2_tunnel_type) {
7643         case RTE_L2_TUNNEL_TYPE_E_TAG:
7644                 ret = ixgbe_e_tag_filter_add(dev, l2_tunnel);
7645                 break;
7646         default:
7647                 PMD_DRV_LOG(ERR, "Invalid tunnel type");
7648                 ret = -EINVAL;
7649                 break;
7650         }
7651
7652         if ((!restore) && (ret < 0))
7653                 (void)ixgbe_remove_l2_tn_filter(l2_tn_info, &key);
7654
7655         return ret;
7656 }
7657
7658 /* Delete l2 tunnel filter */
7659 int
7660 ixgbe_dev_l2_tunnel_filter_del(struct rte_eth_dev *dev,
7661                                struct rte_eth_l2_tunnel_conf *l2_tunnel)
7662 {
7663         int ret;
7664         struct ixgbe_l2_tn_info *l2_tn_info =
7665                 IXGBE_DEV_PRIVATE_TO_L2_TN_INFO(dev->data->dev_private);
7666         struct ixgbe_l2_tn_key key;
7667
7668         key.l2_tn_type = l2_tunnel->l2_tunnel_type;
7669         key.tn_id = l2_tunnel->tunnel_id;
7670         ret = ixgbe_remove_l2_tn_filter(l2_tn_info, &key);
7671         if (ret < 0)
7672                 return ret;
7673
7674         switch (l2_tunnel->l2_tunnel_type) {
7675         case RTE_L2_TUNNEL_TYPE_E_TAG:
7676                 ret = ixgbe_e_tag_filter_del(dev, l2_tunnel);
7677                 break;
7678         default:
7679                 PMD_DRV_LOG(ERR, "Invalid tunnel type");
7680                 ret = -EINVAL;
7681                 break;
7682         }
7683
7684         return ret;
7685 }
7686
7687 /**
7688  * ixgbe_dev_l2_tunnel_filter_handle - Handle operations for l2 tunnel filter.
7689  * @dev: pointer to rte_eth_dev structure
7690  * @filter_op:operation will be taken.
7691  * @arg: a pointer to specific structure corresponding to the filter_op
7692  */
7693 static int
7694 ixgbe_dev_l2_tunnel_filter_handle(struct rte_eth_dev *dev,
7695                                   enum rte_filter_op filter_op,
7696                                   void *arg)
7697 {
7698         int ret;
7699
7700         if (filter_op == RTE_ETH_FILTER_NOP)
7701                 return 0;
7702
7703         if (arg == NULL) {
7704                 PMD_DRV_LOG(ERR, "arg shouldn't be NULL for operation %u.",
7705                             filter_op);
7706                 return -EINVAL;
7707         }
7708
7709         switch (filter_op) {
7710         case RTE_ETH_FILTER_ADD:
7711                 ret = ixgbe_dev_l2_tunnel_filter_add
7712                         (dev,
7713                          (struct rte_eth_l2_tunnel_conf *)arg,
7714                          FALSE);
7715                 break;
7716         case RTE_ETH_FILTER_DELETE:
7717                 ret = ixgbe_dev_l2_tunnel_filter_del
7718                         (dev,
7719                          (struct rte_eth_l2_tunnel_conf *)arg);
7720                 break;
7721         default:
7722                 PMD_DRV_LOG(ERR, "unsupported operation %u.", filter_op);
7723                 ret = -EINVAL;
7724                 break;
7725         }
7726         return ret;
7727 }
7728
7729 static int
7730 ixgbe_e_tag_forwarding_en_dis(struct rte_eth_dev *dev, bool en)
7731 {
7732         int ret = 0;
7733         uint32_t ctrl;
7734         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
7735
7736         if (hw->mac.type != ixgbe_mac_X550 &&
7737             hw->mac.type != ixgbe_mac_X550EM_x &&
7738             hw->mac.type != ixgbe_mac_X550EM_a) {
7739                 return -ENOTSUP;
7740         }
7741
7742         ctrl = IXGBE_READ_REG(hw, IXGBE_VT_CTL);
7743         ctrl &= ~IXGBE_VT_CTL_POOLING_MODE_MASK;
7744         if (en)
7745                 ctrl |= IXGBE_VT_CTL_POOLING_MODE_ETAG;
7746         IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, ctrl);
7747
7748         return ret;
7749 }
7750
7751 /* Enable l2 tunnel forwarding */
7752 static int
7753 ixgbe_dev_l2_tunnel_forwarding_enable
7754         (struct rte_eth_dev *dev,
7755          enum rte_eth_tunnel_type l2_tunnel_type)
7756 {
7757         struct ixgbe_l2_tn_info *l2_tn_info =
7758                 IXGBE_DEV_PRIVATE_TO_L2_TN_INFO(dev->data->dev_private);
7759         int ret = 0;
7760
7761         switch (l2_tunnel_type) {
7762         case RTE_L2_TUNNEL_TYPE_E_TAG:
7763                 l2_tn_info->e_tag_fwd_en = TRUE;
7764                 ret = ixgbe_e_tag_forwarding_en_dis(dev, 1);
7765                 break;
7766         default:
7767                 PMD_DRV_LOG(ERR, "Invalid tunnel type");
7768                 ret = -EINVAL;
7769                 break;
7770         }
7771
7772         return ret;
7773 }
7774
7775 /* Disable l2 tunnel forwarding */
7776 static int
7777 ixgbe_dev_l2_tunnel_forwarding_disable
7778         (struct rte_eth_dev *dev,
7779          enum rte_eth_tunnel_type l2_tunnel_type)
7780 {
7781         struct ixgbe_l2_tn_info *l2_tn_info =
7782                 IXGBE_DEV_PRIVATE_TO_L2_TN_INFO(dev->data->dev_private);
7783         int ret = 0;
7784
7785         switch (l2_tunnel_type) {
7786         case RTE_L2_TUNNEL_TYPE_E_TAG:
7787                 l2_tn_info->e_tag_fwd_en = FALSE;
7788                 ret = ixgbe_e_tag_forwarding_en_dis(dev, 0);
7789                 break;
7790         default:
7791                 PMD_DRV_LOG(ERR, "Invalid tunnel type");
7792                 ret = -EINVAL;
7793                 break;
7794         }
7795
7796         return ret;
7797 }
7798
7799 static int
7800 ixgbe_e_tag_insertion_en_dis(struct rte_eth_dev *dev,
7801                              struct rte_eth_l2_tunnel_conf *l2_tunnel,
7802                              bool en)
7803 {
7804         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
7805         int ret = 0;
7806         uint32_t vmtir, vmvir;
7807         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
7808
7809         if (l2_tunnel->vf_id >= pci_dev->max_vfs) {
7810                 PMD_DRV_LOG(ERR,
7811                             "VF id %u should be less than %u",
7812                             l2_tunnel->vf_id,
7813                             pci_dev->max_vfs);
7814                 return -EINVAL;
7815         }
7816
7817         if (hw->mac.type != ixgbe_mac_X550 &&
7818             hw->mac.type != ixgbe_mac_X550EM_x &&
7819             hw->mac.type != ixgbe_mac_X550EM_a) {
7820                 return -ENOTSUP;
7821         }
7822
7823         if (en)
7824                 vmtir = l2_tunnel->tunnel_id;
7825         else
7826                 vmtir = 0;
7827
7828         IXGBE_WRITE_REG(hw, IXGBE_VMTIR(l2_tunnel->vf_id), vmtir);
7829
7830         vmvir = IXGBE_READ_REG(hw, IXGBE_VMVIR(l2_tunnel->vf_id));
7831         vmvir &= ~IXGBE_VMVIR_TAGA_MASK;
7832         if (en)
7833                 vmvir |= IXGBE_VMVIR_TAGA_ETAG_INSERT;
7834         IXGBE_WRITE_REG(hw, IXGBE_VMVIR(l2_tunnel->vf_id), vmvir);
7835
7836         return ret;
7837 }
7838
7839 /* Enable l2 tunnel tag insertion */
7840 static int
7841 ixgbe_dev_l2_tunnel_insertion_enable(struct rte_eth_dev *dev,
7842                                      struct rte_eth_l2_tunnel_conf *l2_tunnel)
7843 {
7844         int ret = 0;
7845
7846         switch (l2_tunnel->l2_tunnel_type) {
7847         case RTE_L2_TUNNEL_TYPE_E_TAG:
7848                 ret = ixgbe_e_tag_insertion_en_dis(dev, l2_tunnel, 1);
7849                 break;
7850         default:
7851                 PMD_DRV_LOG(ERR, "Invalid tunnel type");
7852                 ret = -EINVAL;
7853                 break;
7854         }
7855
7856         return ret;
7857 }
7858
7859 /* Disable l2 tunnel tag insertion */
7860 static int
7861 ixgbe_dev_l2_tunnel_insertion_disable
7862         (struct rte_eth_dev *dev,
7863          struct rte_eth_l2_tunnel_conf *l2_tunnel)
7864 {
7865         int ret = 0;
7866
7867         switch (l2_tunnel->l2_tunnel_type) {
7868         case RTE_L2_TUNNEL_TYPE_E_TAG:
7869                 ret = ixgbe_e_tag_insertion_en_dis(dev, l2_tunnel, 0);
7870                 break;
7871         default:
7872                 PMD_DRV_LOG(ERR, "Invalid tunnel type");
7873                 ret = -EINVAL;
7874                 break;
7875         }
7876
7877         return ret;
7878 }
7879
7880 static int
7881 ixgbe_e_tag_stripping_en_dis(struct rte_eth_dev *dev,
7882                              bool en)
7883 {
7884         int ret = 0;
7885         uint32_t qde;
7886         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
7887
7888         if (hw->mac.type != ixgbe_mac_X550 &&
7889             hw->mac.type != ixgbe_mac_X550EM_x &&
7890             hw->mac.type != ixgbe_mac_X550EM_a) {
7891                 return -ENOTSUP;
7892         }
7893
7894         qde = IXGBE_READ_REG(hw, IXGBE_QDE);
7895         if (en)
7896                 qde |= IXGBE_QDE_STRIP_TAG;
7897         else
7898                 qde &= ~IXGBE_QDE_STRIP_TAG;
7899         qde &= ~IXGBE_QDE_READ;
7900         qde |= IXGBE_QDE_WRITE;
7901         IXGBE_WRITE_REG(hw, IXGBE_QDE, qde);
7902
7903         return ret;
7904 }
7905
7906 /* Enable l2 tunnel tag stripping */
7907 static int
7908 ixgbe_dev_l2_tunnel_stripping_enable
7909         (struct rte_eth_dev *dev,
7910          enum rte_eth_tunnel_type l2_tunnel_type)
7911 {
7912         int ret = 0;
7913
7914         switch (l2_tunnel_type) {
7915         case RTE_L2_TUNNEL_TYPE_E_TAG:
7916                 ret = ixgbe_e_tag_stripping_en_dis(dev, 1);
7917                 break;
7918         default:
7919                 PMD_DRV_LOG(ERR, "Invalid tunnel type");
7920                 ret = -EINVAL;
7921                 break;
7922         }
7923
7924         return ret;
7925 }
7926
7927 /* Disable l2 tunnel tag stripping */
7928 static int
7929 ixgbe_dev_l2_tunnel_stripping_disable
7930         (struct rte_eth_dev *dev,
7931          enum rte_eth_tunnel_type l2_tunnel_type)
7932 {
7933         int ret = 0;
7934
7935         switch (l2_tunnel_type) {
7936         case RTE_L2_TUNNEL_TYPE_E_TAG:
7937                 ret = ixgbe_e_tag_stripping_en_dis(dev, 0);
7938                 break;
7939         default:
7940                 PMD_DRV_LOG(ERR, "Invalid tunnel type");
7941                 ret = -EINVAL;
7942                 break;
7943         }
7944
7945         return ret;
7946 }
7947
7948 /* Enable/disable l2 tunnel offload functions */
7949 static int
7950 ixgbe_dev_l2_tunnel_offload_set
7951         (struct rte_eth_dev *dev,
7952          struct rte_eth_l2_tunnel_conf *l2_tunnel,
7953          uint32_t mask,
7954          uint8_t en)
7955 {
7956         int ret = 0;
7957
7958         if (l2_tunnel == NULL)
7959                 return -EINVAL;
7960
7961         ret = -EINVAL;
7962         if (mask & ETH_L2_TUNNEL_ENABLE_MASK) {
7963                 if (en)
7964                         ret = ixgbe_dev_l2_tunnel_enable(
7965                                 dev,
7966                                 l2_tunnel->l2_tunnel_type);
7967                 else
7968                         ret = ixgbe_dev_l2_tunnel_disable(
7969                                 dev,
7970                                 l2_tunnel->l2_tunnel_type);
7971         }
7972
7973         if (mask & ETH_L2_TUNNEL_INSERTION_MASK) {
7974                 if (en)
7975                         ret = ixgbe_dev_l2_tunnel_insertion_enable(
7976                                 dev,
7977                                 l2_tunnel);
7978                 else
7979                         ret = ixgbe_dev_l2_tunnel_insertion_disable(
7980                                 dev,
7981                                 l2_tunnel);
7982         }
7983
7984         if (mask & ETH_L2_TUNNEL_STRIPPING_MASK) {
7985                 if (en)
7986                         ret = ixgbe_dev_l2_tunnel_stripping_enable(
7987                                 dev,
7988                                 l2_tunnel->l2_tunnel_type);
7989                 else
7990                         ret = ixgbe_dev_l2_tunnel_stripping_disable(
7991                                 dev,
7992                                 l2_tunnel->l2_tunnel_type);
7993         }
7994
7995         if (mask & ETH_L2_TUNNEL_FORWARDING_MASK) {
7996                 if (en)
7997                         ret = ixgbe_dev_l2_tunnel_forwarding_enable(
7998                                 dev,
7999                                 l2_tunnel->l2_tunnel_type);
8000                 else
8001                         ret = ixgbe_dev_l2_tunnel_forwarding_disable(
8002                                 dev,
8003                                 l2_tunnel->l2_tunnel_type);
8004         }
8005
8006         return ret;
8007 }
8008
8009 static int
8010 ixgbe_update_vxlan_port(struct ixgbe_hw *hw,
8011                         uint16_t port)
8012 {
8013         IXGBE_WRITE_REG(hw, IXGBE_VXLANCTRL, port);
8014         IXGBE_WRITE_FLUSH(hw);
8015
8016         return 0;
8017 }
8018
8019 /* There's only one register for VxLAN UDP port.
8020  * So, we cannot add several ports. Will update it.
8021  */
8022 static int
8023 ixgbe_add_vxlan_port(struct ixgbe_hw *hw,
8024                      uint16_t port)
8025 {
8026         if (port == 0) {
8027                 PMD_DRV_LOG(ERR, "Add VxLAN port 0 is not allowed.");
8028                 return -EINVAL;
8029         }
8030
8031         return ixgbe_update_vxlan_port(hw, port);
8032 }
8033
8034 /* We cannot delete the VxLAN port. For there's a register for VxLAN
8035  * UDP port, it must have a value.
8036  * So, will reset it to the original value 0.
8037  */
8038 static int
8039 ixgbe_del_vxlan_port(struct ixgbe_hw *hw,
8040                      uint16_t port)
8041 {
8042         uint16_t cur_port;
8043
8044         cur_port = (uint16_t)IXGBE_READ_REG(hw, IXGBE_VXLANCTRL);
8045
8046         if (cur_port != port) {
8047                 PMD_DRV_LOG(ERR, "Port %u does not exist.", port);
8048                 return -EINVAL;
8049         }
8050
8051         return ixgbe_update_vxlan_port(hw, 0);
8052 }
8053
8054 /* Add UDP tunneling port */
8055 static int
8056 ixgbe_dev_udp_tunnel_port_add(struct rte_eth_dev *dev,
8057                               struct rte_eth_udp_tunnel *udp_tunnel)
8058 {
8059         int ret = 0;
8060         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
8061
8062         if (hw->mac.type != ixgbe_mac_X550 &&
8063             hw->mac.type != ixgbe_mac_X550EM_x &&
8064             hw->mac.type != ixgbe_mac_X550EM_a) {
8065                 return -ENOTSUP;
8066         }
8067
8068         if (udp_tunnel == NULL)
8069                 return -EINVAL;
8070
8071         switch (udp_tunnel->prot_type) {
8072         case RTE_TUNNEL_TYPE_VXLAN:
8073                 ret = ixgbe_add_vxlan_port(hw, udp_tunnel->udp_port);
8074                 break;
8075
8076         case RTE_TUNNEL_TYPE_GENEVE:
8077         case RTE_TUNNEL_TYPE_TEREDO:
8078                 PMD_DRV_LOG(ERR, "Tunnel type is not supported now.");
8079                 ret = -EINVAL;
8080                 break;
8081
8082         default:
8083                 PMD_DRV_LOG(ERR, "Invalid tunnel type");
8084                 ret = -EINVAL;
8085                 break;
8086         }
8087
8088         return ret;
8089 }
8090
8091 /* Remove UDP tunneling port */
8092 static int
8093 ixgbe_dev_udp_tunnel_port_del(struct rte_eth_dev *dev,
8094                               struct rte_eth_udp_tunnel *udp_tunnel)
8095 {
8096         int ret = 0;
8097         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
8098
8099         if (hw->mac.type != ixgbe_mac_X550 &&
8100             hw->mac.type != ixgbe_mac_X550EM_x &&
8101             hw->mac.type != ixgbe_mac_X550EM_a) {
8102                 return -ENOTSUP;
8103         }
8104
8105         if (udp_tunnel == NULL)
8106                 return -EINVAL;
8107
8108         switch (udp_tunnel->prot_type) {
8109         case RTE_TUNNEL_TYPE_VXLAN:
8110                 ret = ixgbe_del_vxlan_port(hw, udp_tunnel->udp_port);
8111                 break;
8112         case RTE_TUNNEL_TYPE_GENEVE:
8113         case RTE_TUNNEL_TYPE_TEREDO:
8114                 PMD_DRV_LOG(ERR, "Tunnel type is not supported now.");
8115                 ret = -EINVAL;
8116                 break;
8117         default:
8118                 PMD_DRV_LOG(ERR, "Invalid tunnel type");
8119                 ret = -EINVAL;
8120                 break;
8121         }
8122
8123         return ret;
8124 }
8125
8126 static void
8127 ixgbevf_dev_allmulticast_enable(struct rte_eth_dev *dev)
8128 {
8129         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
8130
8131         hw->mac.ops.update_xcast_mode(hw, IXGBEVF_XCAST_MODE_ALLMULTI);
8132 }
8133
8134 static void
8135 ixgbevf_dev_allmulticast_disable(struct rte_eth_dev *dev)
8136 {
8137         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
8138
8139         hw->mac.ops.update_xcast_mode(hw, IXGBEVF_XCAST_MODE_MULTI);
8140 }
8141
8142 static void ixgbevf_mbx_process(struct rte_eth_dev *dev)
8143 {
8144         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
8145         u32 in_msg = 0;
8146
8147         if (ixgbe_read_mbx(hw, &in_msg, 1, 0))
8148                 return;
8149
8150         /* PF reset VF event */
8151         if (in_msg == IXGBE_PF_CONTROL_MSG)
8152                 _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_RESET,
8153                                               NULL, NULL);
8154 }
8155
8156 static int
8157 ixgbevf_dev_interrupt_get_status(struct rte_eth_dev *dev)
8158 {
8159         uint32_t eicr;
8160         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
8161         struct ixgbe_interrupt *intr =
8162                 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
8163         ixgbevf_intr_disable(hw);
8164
8165         /* read-on-clear nic registers here */
8166         eicr = IXGBE_READ_REG(hw, IXGBE_VTEICR);
8167         intr->flags = 0;
8168
8169         /* only one misc vector supported - mailbox */
8170         eicr &= IXGBE_VTEICR_MASK;
8171         if (eicr == IXGBE_MISC_VEC_ID)
8172                 intr->flags |= IXGBE_FLAG_MAILBOX;
8173
8174         return 0;
8175 }
8176
8177 static int
8178 ixgbevf_dev_interrupt_action(struct rte_eth_dev *dev)
8179 {
8180         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
8181         struct ixgbe_interrupt *intr =
8182                 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
8183
8184         if (intr->flags & IXGBE_FLAG_MAILBOX) {
8185                 ixgbevf_mbx_process(dev);
8186                 intr->flags &= ~IXGBE_FLAG_MAILBOX;
8187         }
8188
8189         ixgbevf_intr_enable(hw);
8190
8191         return 0;
8192 }
8193
8194 static void
8195 ixgbevf_dev_interrupt_handler(void *param)
8196 {
8197         struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
8198
8199         ixgbevf_dev_interrupt_get_status(dev);
8200         ixgbevf_dev_interrupt_action(dev);
8201 }
8202
8203 /**
8204  *  ixgbe_disable_sec_tx_path_generic - Stops the transmit data path
8205  *  @hw: pointer to hardware structure
8206  *
8207  *  Stops the transmit data path and waits for the HW to internally empty
8208  *  the Tx security block
8209  **/
8210 int ixgbe_disable_sec_tx_path_generic(struct ixgbe_hw *hw)
8211 {
8212 #define IXGBE_MAX_SECTX_POLL 40
8213
8214         int i;
8215         int sectxreg;
8216
8217         sectxreg = IXGBE_READ_REG(hw, IXGBE_SECTXCTRL);
8218         sectxreg |= IXGBE_SECTXCTRL_TX_DIS;
8219         IXGBE_WRITE_REG(hw, IXGBE_SECTXCTRL, sectxreg);
8220         for (i = 0; i < IXGBE_MAX_SECTX_POLL; i++) {
8221                 sectxreg = IXGBE_READ_REG(hw, IXGBE_SECTXSTAT);
8222                 if (sectxreg & IXGBE_SECTXSTAT_SECTX_RDY)
8223                         break;
8224                 /* Use interrupt-safe sleep just in case */
8225                 usec_delay(1000);
8226         }
8227
8228         /* For informational purposes only */
8229         if (i >= IXGBE_MAX_SECTX_POLL)
8230                 PMD_DRV_LOG(DEBUG, "Tx unit being enabled before security "
8231                          "path fully disabled.  Continuing with init.");
8232
8233         return IXGBE_SUCCESS;
8234 }
8235
8236 /**
8237  *  ixgbe_enable_sec_tx_path_generic - Enables the transmit data path
8238  *  @hw: pointer to hardware structure
8239  *
8240  *  Enables the transmit data path.
8241  **/
8242 int ixgbe_enable_sec_tx_path_generic(struct ixgbe_hw *hw)
8243 {
8244         uint32_t sectxreg;
8245
8246         sectxreg = IXGBE_READ_REG(hw, IXGBE_SECTXCTRL);
8247         sectxreg &= ~IXGBE_SECTXCTRL_TX_DIS;
8248         IXGBE_WRITE_REG(hw, IXGBE_SECTXCTRL, sectxreg);
8249         IXGBE_WRITE_FLUSH(hw);
8250
8251         return IXGBE_SUCCESS;
8252 }
8253
8254 /* restore n-tuple filter */
8255 static inline void
8256 ixgbe_ntuple_filter_restore(struct rte_eth_dev *dev)
8257 {
8258         struct ixgbe_filter_info *filter_info =
8259                 IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
8260         struct ixgbe_5tuple_filter *node;
8261
8262         TAILQ_FOREACH(node, &filter_info->fivetuple_list, entries) {
8263                 ixgbe_inject_5tuple_filter(dev, node);
8264         }
8265 }
8266
8267 /* restore ethernet type filter */
8268 static inline void
8269 ixgbe_ethertype_filter_restore(struct rte_eth_dev *dev)
8270 {
8271         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
8272         struct ixgbe_filter_info *filter_info =
8273                 IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
8274         int i;
8275
8276         for (i = 0; i < IXGBE_MAX_ETQF_FILTERS; i++) {
8277                 if (filter_info->ethertype_mask & (1 << i)) {
8278                         IXGBE_WRITE_REG(hw, IXGBE_ETQF(i),
8279                                         filter_info->ethertype_filters[i].etqf);
8280                         IXGBE_WRITE_REG(hw, IXGBE_ETQS(i),
8281                                         filter_info->ethertype_filters[i].etqs);
8282                         IXGBE_WRITE_FLUSH(hw);
8283                 }
8284         }
8285 }
8286
8287 /* restore SYN filter */
8288 static inline void
8289 ixgbe_syn_filter_restore(struct rte_eth_dev *dev)
8290 {
8291         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
8292         struct ixgbe_filter_info *filter_info =
8293                 IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
8294         uint32_t synqf;
8295
8296         synqf = filter_info->syn_info;
8297
8298         if (synqf & IXGBE_SYN_FILTER_ENABLE) {
8299                 IXGBE_WRITE_REG(hw, IXGBE_SYNQF, synqf);
8300                 IXGBE_WRITE_FLUSH(hw);
8301         }
8302 }
8303
8304 /* restore L2 tunnel filter */
8305 static inline void
8306 ixgbe_l2_tn_filter_restore(struct rte_eth_dev *dev)
8307 {
8308         struct ixgbe_l2_tn_info *l2_tn_info =
8309                 IXGBE_DEV_PRIVATE_TO_L2_TN_INFO(dev->data->dev_private);
8310         struct ixgbe_l2_tn_filter *node;
8311         struct rte_eth_l2_tunnel_conf l2_tn_conf;
8312
8313         TAILQ_FOREACH(node, &l2_tn_info->l2_tn_list, entries) {
8314                 l2_tn_conf.l2_tunnel_type = node->key.l2_tn_type;
8315                 l2_tn_conf.tunnel_id      = node->key.tn_id;
8316                 l2_tn_conf.pool           = node->pool;
8317                 (void)ixgbe_dev_l2_tunnel_filter_add(dev, &l2_tn_conf, TRUE);
8318         }
8319 }
8320
8321 static int
8322 ixgbe_filter_restore(struct rte_eth_dev *dev)
8323 {
8324         ixgbe_ntuple_filter_restore(dev);
8325         ixgbe_ethertype_filter_restore(dev);
8326         ixgbe_syn_filter_restore(dev);
8327         ixgbe_fdir_filter_restore(dev);
8328         ixgbe_l2_tn_filter_restore(dev);
8329
8330         return 0;
8331 }
8332
8333 static void
8334 ixgbe_l2_tunnel_conf(struct rte_eth_dev *dev)
8335 {
8336         struct ixgbe_l2_tn_info *l2_tn_info =
8337                 IXGBE_DEV_PRIVATE_TO_L2_TN_INFO(dev->data->dev_private);
8338         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
8339
8340         if (l2_tn_info->e_tag_en)
8341                 (void)ixgbe_e_tag_enable(hw);
8342
8343         if (l2_tn_info->e_tag_fwd_en)
8344                 (void)ixgbe_e_tag_forwarding_en_dis(dev, 1);
8345
8346         (void)ixgbe_update_e_tag_eth_type(hw, l2_tn_info->e_tag_ether_type);
8347 }
8348
8349 /* remove all the n-tuple filters */
8350 void
8351 ixgbe_clear_all_ntuple_filter(struct rte_eth_dev *dev)
8352 {
8353         struct ixgbe_filter_info *filter_info =
8354                 IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
8355         struct ixgbe_5tuple_filter *p_5tuple;
8356
8357         while ((p_5tuple = TAILQ_FIRST(&filter_info->fivetuple_list)))
8358                 ixgbe_remove_5tuple_filter(dev, p_5tuple);
8359 }
8360
8361 /* remove all the ether type filters */
8362 void
8363 ixgbe_clear_all_ethertype_filter(struct rte_eth_dev *dev)
8364 {
8365         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
8366         struct ixgbe_filter_info *filter_info =
8367                 IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
8368         int i;
8369
8370         for (i = 0; i < IXGBE_MAX_ETQF_FILTERS; i++) {
8371                 if (filter_info->ethertype_mask & (1 << i) &&
8372                     !filter_info->ethertype_filters[i].conf) {
8373                         (void)ixgbe_ethertype_filter_remove(filter_info,
8374                                                             (uint8_t)i);
8375                         IXGBE_WRITE_REG(hw, IXGBE_ETQF(i), 0);
8376                         IXGBE_WRITE_REG(hw, IXGBE_ETQS(i), 0);
8377                         IXGBE_WRITE_FLUSH(hw);
8378                 }
8379         }
8380 }
8381
8382 /* remove the SYN filter */
8383 void
8384 ixgbe_clear_syn_filter(struct rte_eth_dev *dev)
8385 {
8386         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
8387         struct ixgbe_filter_info *filter_info =
8388                 IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
8389
8390         if (filter_info->syn_info & IXGBE_SYN_FILTER_ENABLE) {
8391                 filter_info->syn_info = 0;
8392
8393                 IXGBE_WRITE_REG(hw, IXGBE_SYNQF, 0);
8394                 IXGBE_WRITE_FLUSH(hw);
8395         }
8396 }
8397
8398 /* remove all the L2 tunnel filters */
8399 int
8400 ixgbe_clear_all_l2_tn_filter(struct rte_eth_dev *dev)
8401 {
8402         struct ixgbe_l2_tn_info *l2_tn_info =
8403                 IXGBE_DEV_PRIVATE_TO_L2_TN_INFO(dev->data->dev_private);
8404         struct ixgbe_l2_tn_filter *l2_tn_filter;
8405         struct rte_eth_l2_tunnel_conf l2_tn_conf;
8406         int ret = 0;
8407
8408         while ((l2_tn_filter = TAILQ_FIRST(&l2_tn_info->l2_tn_list))) {
8409                 l2_tn_conf.l2_tunnel_type = l2_tn_filter->key.l2_tn_type;
8410                 l2_tn_conf.tunnel_id      = l2_tn_filter->key.tn_id;
8411                 l2_tn_conf.pool           = l2_tn_filter->pool;
8412                 ret = ixgbe_dev_l2_tunnel_filter_del(dev, &l2_tn_conf);
8413                 if (ret < 0)
8414                         return ret;
8415         }
8416
8417         return 0;
8418 }
8419
8420 RTE_PMD_REGISTER_PCI(net_ixgbe, rte_ixgbe_pmd);
8421 RTE_PMD_REGISTER_PCI_TABLE(net_ixgbe, pci_id_ixgbe_map);
8422 RTE_PMD_REGISTER_KMOD_DEP(net_ixgbe, "* igb_uio | uio_pci_generic | vfio-pci");
8423 RTE_PMD_REGISTER_PCI(net_ixgbe_vf, rte_ixgbevf_pmd);
8424 RTE_PMD_REGISTER_PCI_TABLE(net_ixgbe_vf, pci_id_ixgbevf_map);
8425 RTE_PMD_REGISTER_KMOD_DEP(net_ixgbe_vf, "* igb_uio | vfio-pci");