ixgbe: support L2 tunnel operations
[dpdk.git] / drivers / net / ixgbe / ixgbe_ethdev.c
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33
34 #include <sys/queue.h>
35 #include <stdio.h>
36 #include <errno.h>
37 #include <stdint.h>
38 #include <string.h>
39 #include <unistd.h>
40 #include <stdarg.h>
41 #include <inttypes.h>
42 #include <netinet/in.h>
43 #include <rte_byteorder.h>
44 #include <rte_common.h>
45 #include <rte_cycles.h>
46
47 #include <rte_interrupts.h>
48 #include <rte_log.h>
49 #include <rte_debug.h>
50 #include <rte_pci.h>
51 #include <rte_atomic.h>
52 #include <rte_branch_prediction.h>
53 #include <rte_memory.h>
54 #include <rte_memzone.h>
55 #include <rte_eal.h>
56 #include <rte_alarm.h>
57 #include <rte_ether.h>
58 #include <rte_ethdev.h>
59 #include <rte_atomic.h>
60 #include <rte_malloc.h>
61 #include <rte_random.h>
62 #include <rte_dev.h>
63
64 #include "ixgbe_logs.h"
65 #include "base/ixgbe_api.h"
66 #include "base/ixgbe_vf.h"
67 #include "base/ixgbe_common.h"
68 #include "ixgbe_ethdev.h"
69 #include "ixgbe_bypass.h"
70 #include "ixgbe_rxtx.h"
71 #include "base/ixgbe_type.h"
72 #include "base/ixgbe_phy.h"
73 #include "ixgbe_regs.h"
74
75 /*
76  * High threshold controlling when to start sending XOFF frames. Must be at
77  * least 8 bytes less than receive packet buffer size. This value is in units
78  * of 1024 bytes.
79  */
80 #define IXGBE_FC_HI    0x80
81
82 /*
83  * Low threshold controlling when to start sending XON frames. This value is
84  * in units of 1024 bytes.
85  */
86 #define IXGBE_FC_LO    0x40
87
88 /* Default minimum inter-interrupt interval for EITR configuration */
89 #define IXGBE_MIN_INTER_INTERRUPT_INTERVAL_DEFAULT    0x79E
90
91 /* Timer value included in XOFF frames. */
92 #define IXGBE_FC_PAUSE 0x680
93
94 #define IXGBE_LINK_DOWN_CHECK_TIMEOUT 4000 /* ms */
95 #define IXGBE_LINK_UP_CHECK_TIMEOUT   1000 /* ms */
96 #define IXGBE_VMDQ_NUM_UC_MAC         4096 /* Maximum nb. of UC MAC addr. */
97
98 #define IXGBE_MMW_SIZE_DEFAULT        0x4
99 #define IXGBE_MMW_SIZE_JUMBO_FRAME    0x14
100 #define IXGBE_MAX_RING_DESC           4096 /* replicate define from rxtx */
101
102 /*
103  *  Default values for RX/TX configuration
104  */
105 #define IXGBE_DEFAULT_RX_FREE_THRESH  32
106 #define IXGBE_DEFAULT_RX_PTHRESH      8
107 #define IXGBE_DEFAULT_RX_HTHRESH      8
108 #define IXGBE_DEFAULT_RX_WTHRESH      0
109
110 #define IXGBE_DEFAULT_TX_FREE_THRESH  32
111 #define IXGBE_DEFAULT_TX_PTHRESH      32
112 #define IXGBE_DEFAULT_TX_HTHRESH      0
113 #define IXGBE_DEFAULT_TX_WTHRESH      0
114 #define IXGBE_DEFAULT_TX_RSBIT_THRESH 32
115
116 /* Bit shift and mask */
117 #define IXGBE_4_BIT_WIDTH  (CHAR_BIT / 2)
118 #define IXGBE_4_BIT_MASK   RTE_LEN2MASK(IXGBE_4_BIT_WIDTH, uint8_t)
119 #define IXGBE_8_BIT_WIDTH  CHAR_BIT
120 #define IXGBE_8_BIT_MASK   UINT8_MAX
121
122 #define IXGBEVF_PMD_NAME "rte_ixgbevf_pmd" /* PMD name */
123
124 #define IXGBE_QUEUE_STAT_COUNTERS (sizeof(hw_stats->qprc) / sizeof(hw_stats->qprc[0]))
125
126 #define IXGBE_HKEY_MAX_INDEX 10
127
128 /* Additional timesync values. */
129 #define NSEC_PER_SEC             1000000000L
130 #define IXGBE_INCVAL_10GB        0x66666666
131 #define IXGBE_INCVAL_1GB         0x40000000
132 #define IXGBE_INCVAL_100         0x50000000
133 #define IXGBE_INCVAL_SHIFT_10GB  28
134 #define IXGBE_INCVAL_SHIFT_1GB   24
135 #define IXGBE_INCVAL_SHIFT_100   21
136 #define IXGBE_INCVAL_SHIFT_82599 7
137 #define IXGBE_INCPER_SHIFT_82599 24
138
139 #define IXGBE_CYCLECOUNTER_MASK   0xffffffffffffffffULL
140
141 #define IXGBE_VT_CTL_POOLING_MODE_MASK         0x00030000
142 #define IXGBE_VT_CTL_POOLING_MODE_ETAG         0x00010000
143 #define DEFAULT_ETAG_ETYPE                     0x893f
144 #define IXGBE_ETAG_ETYPE                       0x00005084
145 #define IXGBE_ETAG_ETYPE_MASK                  0x0000ffff
146 #define IXGBE_ETAG_ETYPE_VALID                 0x80000000
147 #define IXGBE_RAH_ADTYPE                       0x40000000
148 #define IXGBE_RAL_ETAG_FILTER_MASK             0x00003fff
149 #define IXGBE_VMVIR_TAGA_MASK                  0x18000000
150 #define IXGBE_VMVIR_TAGA_ETAG_INSERT           0x08000000
151 #define IXGBE_VMTIR(_i) (0x00017000 + ((_i) * 4)) /* 64 of these (0-63) */
152 #define IXGBE_QDE_STRIP_TAG                    0x00000004
153
154 static int eth_ixgbe_dev_init(struct rte_eth_dev *eth_dev);
155 static int eth_ixgbe_dev_uninit(struct rte_eth_dev *eth_dev);
156 static int  ixgbe_dev_configure(struct rte_eth_dev *dev);
157 static int  ixgbe_dev_start(struct rte_eth_dev *dev);
158 static void ixgbe_dev_stop(struct rte_eth_dev *dev);
159 static int  ixgbe_dev_set_link_up(struct rte_eth_dev *dev);
160 static int  ixgbe_dev_set_link_down(struct rte_eth_dev *dev);
161 static void ixgbe_dev_close(struct rte_eth_dev *dev);
162 static void ixgbe_dev_promiscuous_enable(struct rte_eth_dev *dev);
163 static void ixgbe_dev_promiscuous_disable(struct rte_eth_dev *dev);
164 static void ixgbe_dev_allmulticast_enable(struct rte_eth_dev *dev);
165 static void ixgbe_dev_allmulticast_disable(struct rte_eth_dev *dev);
166 static int ixgbe_dev_link_update(struct rte_eth_dev *dev,
167                                 int wait_to_complete);
168 static void ixgbe_dev_stats_get(struct rte_eth_dev *dev,
169                                 struct rte_eth_stats *stats);
170 static int ixgbe_dev_xstats_get(struct rte_eth_dev *dev,
171                                 struct rte_eth_xstats *xstats, unsigned n);
172 static int ixgbevf_dev_xstats_get(struct rte_eth_dev *dev,
173                                   struct rte_eth_xstats *xstats, unsigned n);
174 static void ixgbe_dev_stats_reset(struct rte_eth_dev *dev);
175 static void ixgbe_dev_xstats_reset(struct rte_eth_dev *dev);
176 static int ixgbe_dev_queue_stats_mapping_set(struct rte_eth_dev *eth_dev,
177                                              uint16_t queue_id,
178                                              uint8_t stat_idx,
179                                              uint8_t is_rx);
180 static void ixgbe_dev_info_get(struct rte_eth_dev *dev,
181                                struct rte_eth_dev_info *dev_info);
182 static void ixgbevf_dev_info_get(struct rte_eth_dev *dev,
183                                  struct rte_eth_dev_info *dev_info);
184 static int ixgbe_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu);
185
186 static int ixgbe_vlan_filter_set(struct rte_eth_dev *dev,
187                 uint16_t vlan_id, int on);
188 static int ixgbe_vlan_tpid_set(struct rte_eth_dev *dev,
189                                enum rte_vlan_type vlan_type,
190                                uint16_t tpid_id);
191 static void ixgbe_vlan_hw_strip_bitmap_set(struct rte_eth_dev *dev,
192                 uint16_t queue, bool on);
193 static void ixgbe_vlan_strip_queue_set(struct rte_eth_dev *dev, uint16_t queue,
194                 int on);
195 static void ixgbe_vlan_offload_set(struct rte_eth_dev *dev, int mask);
196 static void ixgbe_vlan_hw_strip_enable(struct rte_eth_dev *dev, uint16_t queue);
197 static void ixgbe_vlan_hw_strip_disable(struct rte_eth_dev *dev, uint16_t queue);
198 static void ixgbe_vlan_hw_extend_enable(struct rte_eth_dev *dev);
199 static void ixgbe_vlan_hw_extend_disable(struct rte_eth_dev *dev);
200
201 static int ixgbe_dev_led_on(struct rte_eth_dev *dev);
202 static int ixgbe_dev_led_off(struct rte_eth_dev *dev);
203 static int ixgbe_flow_ctrl_get(struct rte_eth_dev *dev,
204                                struct rte_eth_fc_conf *fc_conf);
205 static int ixgbe_flow_ctrl_set(struct rte_eth_dev *dev,
206                                struct rte_eth_fc_conf *fc_conf);
207 static int ixgbe_priority_flow_ctrl_set(struct rte_eth_dev *dev,
208                 struct rte_eth_pfc_conf *pfc_conf);
209 static int ixgbe_dev_rss_reta_update(struct rte_eth_dev *dev,
210                         struct rte_eth_rss_reta_entry64 *reta_conf,
211                         uint16_t reta_size);
212 static int ixgbe_dev_rss_reta_query(struct rte_eth_dev *dev,
213                         struct rte_eth_rss_reta_entry64 *reta_conf,
214                         uint16_t reta_size);
215 static void ixgbe_dev_link_status_print(struct rte_eth_dev *dev);
216 static int ixgbe_dev_lsc_interrupt_setup(struct rte_eth_dev *dev);
217 static int ixgbe_dev_rxq_interrupt_setup(struct rte_eth_dev *dev);
218 static int ixgbe_dev_interrupt_get_status(struct rte_eth_dev *dev);
219 static int ixgbe_dev_interrupt_action(struct rte_eth_dev *dev);
220 static void ixgbe_dev_interrupt_handler(struct rte_intr_handle *handle,
221                 void *param);
222 static void ixgbe_dev_interrupt_delayed_handler(void *param);
223 static void ixgbe_add_rar(struct rte_eth_dev *dev, struct ether_addr *mac_addr,
224                 uint32_t index, uint32_t pool);
225 static void ixgbe_remove_rar(struct rte_eth_dev *dev, uint32_t index);
226 static void ixgbe_set_default_mac_addr(struct rte_eth_dev *dev,
227                                            struct ether_addr *mac_addr);
228 static void ixgbe_dcb_init(struct ixgbe_hw *hw,struct ixgbe_dcb_config *dcb_config);
229
230 /* For Virtual Function support */
231 static int eth_ixgbevf_dev_init(struct rte_eth_dev *eth_dev);
232 static int eth_ixgbevf_dev_uninit(struct rte_eth_dev *eth_dev);
233 static int  ixgbevf_dev_configure(struct rte_eth_dev *dev);
234 static int  ixgbevf_dev_start(struct rte_eth_dev *dev);
235 static void ixgbevf_dev_stop(struct rte_eth_dev *dev);
236 static void ixgbevf_dev_close(struct rte_eth_dev *dev);
237 static void ixgbevf_intr_disable(struct ixgbe_hw *hw);
238 static void ixgbevf_intr_enable(struct ixgbe_hw *hw);
239 static void ixgbevf_dev_stats_get(struct rte_eth_dev *dev,
240                 struct rte_eth_stats *stats);
241 static void ixgbevf_dev_stats_reset(struct rte_eth_dev *dev);
242 static int ixgbevf_vlan_filter_set(struct rte_eth_dev *dev,
243                 uint16_t vlan_id, int on);
244 static void ixgbevf_vlan_strip_queue_set(struct rte_eth_dev *dev,
245                 uint16_t queue, int on);
246 static void ixgbevf_vlan_offload_set(struct rte_eth_dev *dev, int mask);
247 static void ixgbevf_set_vfta_all(struct rte_eth_dev *dev, bool on);
248 static int ixgbevf_dev_rx_queue_intr_enable(struct rte_eth_dev *dev,
249                                             uint16_t queue_id);
250 static int ixgbevf_dev_rx_queue_intr_disable(struct rte_eth_dev *dev,
251                                              uint16_t queue_id);
252 static void ixgbevf_set_ivar_map(struct ixgbe_hw *hw, int8_t direction,
253                                  uint8_t queue, uint8_t msix_vector);
254 static void ixgbevf_configure_msix(struct rte_eth_dev *dev);
255
256 /* For Eth VMDQ APIs support */
257 static int ixgbe_uc_hash_table_set(struct rte_eth_dev *dev, struct
258                 ether_addr* mac_addr,uint8_t on);
259 static int ixgbe_uc_all_hash_table_set(struct rte_eth_dev *dev,uint8_t on);
260 static int  ixgbe_set_pool_rx_mode(struct rte_eth_dev *dev,  uint16_t pool,
261                 uint16_t rx_mask, uint8_t on);
262 static int ixgbe_set_pool_rx(struct rte_eth_dev *dev,uint16_t pool,uint8_t on);
263 static int ixgbe_set_pool_tx(struct rte_eth_dev *dev,uint16_t pool,uint8_t on);
264 static int ixgbe_set_pool_vlan_filter(struct rte_eth_dev *dev, uint16_t vlan,
265                 uint64_t pool_mask,uint8_t vlan_on);
266 static int ixgbe_mirror_rule_set(struct rte_eth_dev *dev,
267                 struct rte_eth_mirror_conf *mirror_conf,
268                 uint8_t rule_id, uint8_t on);
269 static int ixgbe_mirror_rule_reset(struct rte_eth_dev *dev,
270                 uint8_t rule_id);
271 static int ixgbe_dev_rx_queue_intr_enable(struct rte_eth_dev *dev,
272                                           uint16_t queue_id);
273 static int ixgbe_dev_rx_queue_intr_disable(struct rte_eth_dev *dev,
274                                            uint16_t queue_id);
275 static void ixgbe_set_ivar_map(struct ixgbe_hw *hw, int8_t direction,
276                                uint8_t queue, uint8_t msix_vector);
277 static void ixgbe_configure_msix(struct rte_eth_dev *dev);
278
279 static int ixgbe_set_queue_rate_limit(struct rte_eth_dev *dev,
280                 uint16_t queue_idx, uint16_t tx_rate);
281 static int ixgbe_set_vf_rate_limit(struct rte_eth_dev *dev, uint16_t vf,
282                 uint16_t tx_rate, uint64_t q_msk);
283
284 static void ixgbevf_add_mac_addr(struct rte_eth_dev *dev,
285                                  struct ether_addr *mac_addr,
286                                  uint32_t index, uint32_t pool);
287 static void ixgbevf_remove_mac_addr(struct rte_eth_dev *dev, uint32_t index);
288 static void ixgbevf_set_default_mac_addr(struct rte_eth_dev *dev,
289                                              struct ether_addr *mac_addr);
290 static int ixgbe_syn_filter_set(struct rte_eth_dev *dev,
291                         struct rte_eth_syn_filter *filter,
292                         bool add);
293 static int ixgbe_syn_filter_get(struct rte_eth_dev *dev,
294                         struct rte_eth_syn_filter *filter);
295 static int ixgbe_syn_filter_handle(struct rte_eth_dev *dev,
296                         enum rte_filter_op filter_op,
297                         void *arg);
298 static int ixgbe_add_5tuple_filter(struct rte_eth_dev *dev,
299                         struct ixgbe_5tuple_filter *filter);
300 static void ixgbe_remove_5tuple_filter(struct rte_eth_dev *dev,
301                         struct ixgbe_5tuple_filter *filter);
302 static int ixgbe_add_del_ntuple_filter(struct rte_eth_dev *dev,
303                         struct rte_eth_ntuple_filter *filter,
304                         bool add);
305 static int ixgbe_ntuple_filter_handle(struct rte_eth_dev *dev,
306                                 enum rte_filter_op filter_op,
307                                 void *arg);
308 static int ixgbe_get_ntuple_filter(struct rte_eth_dev *dev,
309                         struct rte_eth_ntuple_filter *filter);
310 static int ixgbe_add_del_ethertype_filter(struct rte_eth_dev *dev,
311                         struct rte_eth_ethertype_filter *filter,
312                         bool add);
313 static int ixgbe_ethertype_filter_handle(struct rte_eth_dev *dev,
314                                 enum rte_filter_op filter_op,
315                                 void *arg);
316 static int ixgbe_get_ethertype_filter(struct rte_eth_dev *dev,
317                         struct rte_eth_ethertype_filter *filter);
318 static int ixgbe_dev_filter_ctrl(struct rte_eth_dev *dev,
319                      enum rte_filter_type filter_type,
320                      enum rte_filter_op filter_op,
321                      void *arg);
322 static int ixgbevf_dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu);
323
324 static int ixgbe_dev_set_mc_addr_list(struct rte_eth_dev *dev,
325                                       struct ether_addr *mc_addr_set,
326                                       uint32_t nb_mc_addr);
327 static int ixgbe_dev_get_dcb_info(struct rte_eth_dev *dev,
328                                    struct rte_eth_dcb_info *dcb_info);
329
330 static int ixgbe_get_reg_length(struct rte_eth_dev *dev);
331 static int ixgbe_get_regs(struct rte_eth_dev *dev,
332                             struct rte_dev_reg_info *regs);
333 static int ixgbe_get_eeprom_length(struct rte_eth_dev *dev);
334 static int ixgbe_get_eeprom(struct rte_eth_dev *dev,
335                                 struct rte_dev_eeprom_info *eeprom);
336 static int ixgbe_set_eeprom(struct rte_eth_dev *dev,
337                                 struct rte_dev_eeprom_info *eeprom);
338
339 static int ixgbevf_get_reg_length(struct rte_eth_dev *dev);
340 static int ixgbevf_get_regs(struct rte_eth_dev *dev,
341                                 struct rte_dev_reg_info *regs);
342
343 static int ixgbe_timesync_enable(struct rte_eth_dev *dev);
344 static int ixgbe_timesync_disable(struct rte_eth_dev *dev);
345 static int ixgbe_timesync_read_rx_timestamp(struct rte_eth_dev *dev,
346                                             struct timespec *timestamp,
347                                             uint32_t flags);
348 static int ixgbe_timesync_read_tx_timestamp(struct rte_eth_dev *dev,
349                                             struct timespec *timestamp);
350 static int ixgbe_timesync_adjust_time(struct rte_eth_dev *dev, int64_t delta);
351 static int ixgbe_timesync_read_time(struct rte_eth_dev *dev,
352                                    struct timespec *timestamp);
353 static int ixgbe_timesync_write_time(struct rte_eth_dev *dev,
354                                    const struct timespec *timestamp);
355 static int ixgbe_dev_l2_tunnel_eth_type_conf
356         (struct rte_eth_dev *dev, struct rte_eth_l2_tunnel_conf *l2_tunnel);
357 static int ixgbe_dev_l2_tunnel_offload_set
358         (struct rte_eth_dev *dev,
359          struct rte_eth_l2_tunnel_conf *l2_tunnel,
360          uint32_t mask,
361          uint8_t en);
362 static int ixgbe_dev_l2_tunnel_filter_handle(struct rte_eth_dev *dev,
363                                              enum rte_filter_op filter_op,
364                                              void *arg);
365
366 /*
367  * Define VF Stats MACRO for Non "cleared on read" register
368  */
369 #define UPDATE_VF_STAT(reg, last, cur)                          \
370 {                                                               \
371         uint32_t latest = IXGBE_READ_REG(hw, reg);              \
372         cur += (latest - last) & UINT_MAX;                      \
373         last = latest;                                          \
374 }
375
376 #define UPDATE_VF_STAT_36BIT(lsb, msb, last, cur)                \
377 {                                                                \
378         u64 new_lsb = IXGBE_READ_REG(hw, lsb);                   \
379         u64 new_msb = IXGBE_READ_REG(hw, msb);                   \
380         u64 latest = ((new_msb << 32) | new_lsb);                \
381         cur += (0x1000000000LL + latest - last) & 0xFFFFFFFFFLL; \
382         last = latest;                                           \
383 }
384
385 #define IXGBE_SET_HWSTRIP(h, q) do{\
386                 uint32_t idx = (q) / (sizeof ((h)->bitmap[0]) * NBBY); \
387                 uint32_t bit = (q) % (sizeof ((h)->bitmap[0]) * NBBY); \
388                 (h)->bitmap[idx] |= 1 << bit;\
389         }while(0)
390
391 #define IXGBE_CLEAR_HWSTRIP(h, q) do{\
392                 uint32_t idx = (q) / (sizeof ((h)->bitmap[0]) * NBBY); \
393                 uint32_t bit = (q) % (sizeof ((h)->bitmap[0]) * NBBY); \
394                 (h)->bitmap[idx] &= ~(1 << bit);\
395         }while(0)
396
397 #define IXGBE_GET_HWSTRIP(h, q, r) do{\
398                 uint32_t idx = (q) / (sizeof ((h)->bitmap[0]) * NBBY); \
399                 uint32_t bit = (q) % (sizeof ((h)->bitmap[0]) * NBBY); \
400                 (r) = (h)->bitmap[idx] >> bit & 1;\
401         }while(0)
402
403 /*
404  * The set of PCI devices this driver supports
405  */
406 static const struct rte_pci_id pci_id_ixgbe_map[] = {
407
408 #define RTE_PCI_DEV_ID_DECL_IXGBE(vend, dev) {RTE_PCI_DEVICE(vend, dev)},
409 #include "rte_pci_dev_ids.h"
410
411 { .vendor_id = 0, /* sentinel */ },
412 };
413
414
415 /*
416  * The set of PCI devices this driver supports (for 82599 VF)
417  */
418 static const struct rte_pci_id pci_id_ixgbevf_map[] = {
419
420 #define RTE_PCI_DEV_ID_DECL_IXGBEVF(vend, dev) {RTE_PCI_DEVICE(vend, dev)},
421 #include "rte_pci_dev_ids.h"
422 { .vendor_id = 0, /* sentinel */ },
423
424 };
425
426 static const struct rte_eth_desc_lim rx_desc_lim = {
427         .nb_max = IXGBE_MAX_RING_DESC,
428         .nb_min = IXGBE_MIN_RING_DESC,
429         .nb_align = IXGBE_RXD_ALIGN,
430 };
431
432 static const struct rte_eth_desc_lim tx_desc_lim = {
433         .nb_max = IXGBE_MAX_RING_DESC,
434         .nb_min = IXGBE_MIN_RING_DESC,
435         .nb_align = IXGBE_TXD_ALIGN,
436 };
437
438 static const struct eth_dev_ops ixgbe_eth_dev_ops = {
439         .dev_configure        = ixgbe_dev_configure,
440         .dev_start            = ixgbe_dev_start,
441         .dev_stop             = ixgbe_dev_stop,
442         .dev_set_link_up    = ixgbe_dev_set_link_up,
443         .dev_set_link_down  = ixgbe_dev_set_link_down,
444         .dev_close            = ixgbe_dev_close,
445         .promiscuous_enable   = ixgbe_dev_promiscuous_enable,
446         .promiscuous_disable  = ixgbe_dev_promiscuous_disable,
447         .allmulticast_enable  = ixgbe_dev_allmulticast_enable,
448         .allmulticast_disable = ixgbe_dev_allmulticast_disable,
449         .link_update          = ixgbe_dev_link_update,
450         .stats_get            = ixgbe_dev_stats_get,
451         .xstats_get           = ixgbe_dev_xstats_get,
452         .stats_reset          = ixgbe_dev_stats_reset,
453         .xstats_reset         = ixgbe_dev_xstats_reset,
454         .queue_stats_mapping_set = ixgbe_dev_queue_stats_mapping_set,
455         .dev_infos_get        = ixgbe_dev_info_get,
456         .mtu_set              = ixgbe_dev_mtu_set,
457         .vlan_filter_set      = ixgbe_vlan_filter_set,
458         .vlan_tpid_set        = ixgbe_vlan_tpid_set,
459         .vlan_offload_set     = ixgbe_vlan_offload_set,
460         .vlan_strip_queue_set = ixgbe_vlan_strip_queue_set,
461         .rx_queue_start       = ixgbe_dev_rx_queue_start,
462         .rx_queue_stop        = ixgbe_dev_rx_queue_stop,
463         .tx_queue_start       = ixgbe_dev_tx_queue_start,
464         .tx_queue_stop        = ixgbe_dev_tx_queue_stop,
465         .rx_queue_setup       = ixgbe_dev_rx_queue_setup,
466         .rx_queue_intr_enable = ixgbe_dev_rx_queue_intr_enable,
467         .rx_queue_intr_disable = ixgbe_dev_rx_queue_intr_disable,
468         .rx_queue_release     = ixgbe_dev_rx_queue_release,
469         .rx_queue_count       = ixgbe_dev_rx_queue_count,
470         .rx_descriptor_done   = ixgbe_dev_rx_descriptor_done,
471         .tx_queue_setup       = ixgbe_dev_tx_queue_setup,
472         .tx_queue_release     = ixgbe_dev_tx_queue_release,
473         .dev_led_on           = ixgbe_dev_led_on,
474         .dev_led_off          = ixgbe_dev_led_off,
475         .flow_ctrl_get        = ixgbe_flow_ctrl_get,
476         .flow_ctrl_set        = ixgbe_flow_ctrl_set,
477         .priority_flow_ctrl_set = ixgbe_priority_flow_ctrl_set,
478         .mac_addr_add         = ixgbe_add_rar,
479         .mac_addr_remove      = ixgbe_remove_rar,
480         .mac_addr_set         = ixgbe_set_default_mac_addr,
481         .uc_hash_table_set    = ixgbe_uc_hash_table_set,
482         .uc_all_hash_table_set  = ixgbe_uc_all_hash_table_set,
483         .mirror_rule_set      = ixgbe_mirror_rule_set,
484         .mirror_rule_reset    = ixgbe_mirror_rule_reset,
485         .set_vf_rx_mode       = ixgbe_set_pool_rx_mode,
486         .set_vf_rx            = ixgbe_set_pool_rx,
487         .set_vf_tx            = ixgbe_set_pool_tx,
488         .set_vf_vlan_filter   = ixgbe_set_pool_vlan_filter,
489         .set_queue_rate_limit = ixgbe_set_queue_rate_limit,
490         .set_vf_rate_limit    = ixgbe_set_vf_rate_limit,
491         .reta_update          = ixgbe_dev_rss_reta_update,
492         .reta_query           = ixgbe_dev_rss_reta_query,
493 #ifdef RTE_NIC_BYPASS
494         .bypass_init          = ixgbe_bypass_init,
495         .bypass_state_set     = ixgbe_bypass_state_store,
496         .bypass_state_show    = ixgbe_bypass_state_show,
497         .bypass_event_set     = ixgbe_bypass_event_store,
498         .bypass_event_show    = ixgbe_bypass_event_show,
499         .bypass_wd_timeout_set  = ixgbe_bypass_wd_timeout_store,
500         .bypass_wd_timeout_show = ixgbe_bypass_wd_timeout_show,
501         .bypass_ver_show      = ixgbe_bypass_ver_show,
502         .bypass_wd_reset      = ixgbe_bypass_wd_reset,
503 #endif /* RTE_NIC_BYPASS */
504         .rss_hash_update      = ixgbe_dev_rss_hash_update,
505         .rss_hash_conf_get    = ixgbe_dev_rss_hash_conf_get,
506         .filter_ctrl          = ixgbe_dev_filter_ctrl,
507         .set_mc_addr_list     = ixgbe_dev_set_mc_addr_list,
508         .rxq_info_get         = ixgbe_rxq_info_get,
509         .txq_info_get         = ixgbe_txq_info_get,
510         .timesync_enable      = ixgbe_timesync_enable,
511         .timesync_disable     = ixgbe_timesync_disable,
512         .timesync_read_rx_timestamp = ixgbe_timesync_read_rx_timestamp,
513         .timesync_read_tx_timestamp = ixgbe_timesync_read_tx_timestamp,
514         .get_reg_length       = ixgbe_get_reg_length,
515         .get_reg              = ixgbe_get_regs,
516         .get_eeprom_length    = ixgbe_get_eeprom_length,
517         .get_eeprom           = ixgbe_get_eeprom,
518         .set_eeprom           = ixgbe_set_eeprom,
519         .get_dcb_info         = ixgbe_dev_get_dcb_info,
520         .timesync_adjust_time = ixgbe_timesync_adjust_time,
521         .timesync_read_time   = ixgbe_timesync_read_time,
522         .timesync_write_time  = ixgbe_timesync_write_time,
523         .l2_tunnel_eth_type_conf = ixgbe_dev_l2_tunnel_eth_type_conf,
524         .l2_tunnel_offload_set   = ixgbe_dev_l2_tunnel_offload_set,
525 };
526
527 /*
528  * dev_ops for virtual function, bare necessities for basic vf
529  * operation have been implemented
530  */
531 static const struct eth_dev_ops ixgbevf_eth_dev_ops = {
532         .dev_configure        = ixgbevf_dev_configure,
533         .dev_start            = ixgbevf_dev_start,
534         .dev_stop             = ixgbevf_dev_stop,
535         .link_update          = ixgbe_dev_link_update,
536         .stats_get            = ixgbevf_dev_stats_get,
537         .xstats_get           = ixgbevf_dev_xstats_get,
538         .stats_reset          = ixgbevf_dev_stats_reset,
539         .xstats_reset         = ixgbevf_dev_stats_reset,
540         .dev_close            = ixgbevf_dev_close,
541         .dev_infos_get        = ixgbevf_dev_info_get,
542         .mtu_set              = ixgbevf_dev_set_mtu,
543         .vlan_filter_set      = ixgbevf_vlan_filter_set,
544         .vlan_strip_queue_set = ixgbevf_vlan_strip_queue_set,
545         .vlan_offload_set     = ixgbevf_vlan_offload_set,
546         .rx_queue_setup       = ixgbe_dev_rx_queue_setup,
547         .rx_queue_release     = ixgbe_dev_rx_queue_release,
548         .rx_descriptor_done   = ixgbe_dev_rx_descriptor_done,
549         .tx_queue_setup       = ixgbe_dev_tx_queue_setup,
550         .tx_queue_release     = ixgbe_dev_tx_queue_release,
551         .rx_queue_intr_enable = ixgbevf_dev_rx_queue_intr_enable,
552         .rx_queue_intr_disable = ixgbevf_dev_rx_queue_intr_disable,
553         .mac_addr_add         = ixgbevf_add_mac_addr,
554         .mac_addr_remove      = ixgbevf_remove_mac_addr,
555         .set_mc_addr_list     = ixgbe_dev_set_mc_addr_list,
556         .rxq_info_get         = ixgbe_rxq_info_get,
557         .txq_info_get         = ixgbe_txq_info_get,
558         .mac_addr_set         = ixgbevf_set_default_mac_addr,
559         .get_reg_length       = ixgbevf_get_reg_length,
560         .get_reg              = ixgbevf_get_regs,
561         .reta_update          = ixgbe_dev_rss_reta_update,
562         .reta_query           = ixgbe_dev_rss_reta_query,
563         .rss_hash_update      = ixgbe_dev_rss_hash_update,
564         .rss_hash_conf_get    = ixgbe_dev_rss_hash_conf_get,
565 };
566
567 /* store statistics names and its offset in stats structure */
568 struct rte_ixgbe_xstats_name_off {
569         char name[RTE_ETH_XSTATS_NAME_SIZE];
570         unsigned offset;
571 };
572
573 static const struct rte_ixgbe_xstats_name_off rte_ixgbe_stats_strings[] = {
574         {"rx_crc_errors", offsetof(struct ixgbe_hw_stats, crcerrs)},
575         {"rx_illegal_byte_errors", offsetof(struct ixgbe_hw_stats, illerrc)},
576         {"rx_error_bytes", offsetof(struct ixgbe_hw_stats, errbc)},
577         {"mac_local_errors", offsetof(struct ixgbe_hw_stats, mlfc)},
578         {"mac_remote_errors", offsetof(struct ixgbe_hw_stats, mrfc)},
579         {"rx_length_errors", offsetof(struct ixgbe_hw_stats, rlec)},
580         {"tx_xon_packets", offsetof(struct ixgbe_hw_stats, lxontxc)},
581         {"rx_xon_packets", offsetof(struct ixgbe_hw_stats, lxonrxc)},
582         {"tx_xoff_packets", offsetof(struct ixgbe_hw_stats, lxofftxc)},
583         {"rx_xoff_packets", offsetof(struct ixgbe_hw_stats, lxoffrxc)},
584         {"rx_size_64_packets", offsetof(struct ixgbe_hw_stats, prc64)},
585         {"rx_size_65_to_127_packets", offsetof(struct ixgbe_hw_stats, prc127)},
586         {"rx_size_128_to_255_packets", offsetof(struct ixgbe_hw_stats, prc255)},
587         {"rx_size_256_to_511_packets", offsetof(struct ixgbe_hw_stats, prc511)},
588         {"rx_size_512_to_1023_packets", offsetof(struct ixgbe_hw_stats,
589                 prc1023)},
590         {"rx_size_1024_to_max_packets", offsetof(struct ixgbe_hw_stats,
591                 prc1522)},
592         {"rx_broadcast_packets", offsetof(struct ixgbe_hw_stats, bprc)},
593         {"rx_multicast_packets", offsetof(struct ixgbe_hw_stats, mprc)},
594         {"rx_fragment_errors", offsetof(struct ixgbe_hw_stats, rfc)},
595         {"rx_undersize_errors", offsetof(struct ixgbe_hw_stats, ruc)},
596         {"rx_oversize_errors", offsetof(struct ixgbe_hw_stats, roc)},
597         {"rx_jabber_errors", offsetof(struct ixgbe_hw_stats, rjc)},
598         {"rx_management_packets", offsetof(struct ixgbe_hw_stats, mngprc)},
599         {"rx_management_dropped", offsetof(struct ixgbe_hw_stats, mngpdc)},
600         {"tx_management_packets", offsetof(struct ixgbe_hw_stats, mngptc)},
601         {"rx_total_packets", offsetof(struct ixgbe_hw_stats, tpr)},
602         {"rx_total_bytes", offsetof(struct ixgbe_hw_stats, tor)},
603         {"tx_total_packets", offsetof(struct ixgbe_hw_stats, tpt)},
604         {"tx_size_64_packets", offsetof(struct ixgbe_hw_stats, ptc64)},
605         {"tx_size_65_to_127_packets", offsetof(struct ixgbe_hw_stats, ptc127)},
606         {"tx_size_128_to_255_packets", offsetof(struct ixgbe_hw_stats, ptc255)},
607         {"tx_size_256_to_511_packets", offsetof(struct ixgbe_hw_stats, ptc511)},
608         {"tx_size_512_to_1023_packets", offsetof(struct ixgbe_hw_stats,
609                 ptc1023)},
610         {"tx_size_1024_to_max_packets", offsetof(struct ixgbe_hw_stats,
611                 ptc1522)},
612         {"tx_multicast_packets", offsetof(struct ixgbe_hw_stats, mptc)},
613         {"tx_broadcast_packets", offsetof(struct ixgbe_hw_stats, bptc)},
614         {"rx_mac_short_packet_dropped", offsetof(struct ixgbe_hw_stats, mspdc)},
615         {"rx_l3_l4_xsum_error", offsetof(struct ixgbe_hw_stats, xec)},
616
617         {"flow_director_added_filters", offsetof(struct ixgbe_hw_stats,
618                 fdirustat_add)},
619         {"flow_director_removed_filters", offsetof(struct ixgbe_hw_stats,
620                 fdirustat_remove)},
621         {"flow_director_filter_add_errors", offsetof(struct ixgbe_hw_stats,
622                 fdirfstat_fadd)},
623         {"flow_director_filter_remove_errors", offsetof(struct ixgbe_hw_stats,
624                 fdirfstat_fremove)},
625         {"flow_director_matched_filters", offsetof(struct ixgbe_hw_stats,
626                 fdirmatch)},
627         {"flow_director_missed_filters", offsetof(struct ixgbe_hw_stats,
628                 fdirmiss)},
629
630         {"rx_fcoe_crc_errors", offsetof(struct ixgbe_hw_stats, fccrc)},
631         {"rx_fcoe_dropped", offsetof(struct ixgbe_hw_stats, fcoerpdc)},
632         {"rx_fcoe_mbuf_allocation_errors", offsetof(struct ixgbe_hw_stats,
633                 fclast)},
634         {"rx_fcoe_packets", offsetof(struct ixgbe_hw_stats, fcoeprc)},
635         {"tx_fcoe_packets", offsetof(struct ixgbe_hw_stats, fcoeptc)},
636         {"rx_fcoe_bytes", offsetof(struct ixgbe_hw_stats, fcoedwrc)},
637         {"tx_fcoe_bytes", offsetof(struct ixgbe_hw_stats, fcoedwtc)},
638         {"rx_fcoe_no_direct_data_placement", offsetof(struct ixgbe_hw_stats,
639                 fcoe_noddp)},
640         {"rx_fcoe_no_direct_data_placement_ext_buff",
641                 offsetof(struct ixgbe_hw_stats, fcoe_noddp_ext_buff)},
642
643         {"tx_flow_control_xon_packets", offsetof(struct ixgbe_hw_stats,
644                 lxontxc)},
645         {"rx_flow_control_xon_packets", offsetof(struct ixgbe_hw_stats,
646                 lxonrxc)},
647         {"tx_flow_control_xoff_packets", offsetof(struct ixgbe_hw_stats,
648                 lxofftxc)},
649         {"rx_flow_control_xoff_packets", offsetof(struct ixgbe_hw_stats,
650                 lxoffrxc)},
651         {"rx_total_missed_packets", offsetof(struct ixgbe_hw_stats, mpctotal)},
652 };
653
654 #define IXGBE_NB_HW_STATS (sizeof(rte_ixgbe_stats_strings) / \
655                            sizeof(rte_ixgbe_stats_strings[0]))
656
657 /* Per-queue statistics */
658 static const struct rte_ixgbe_xstats_name_off rte_ixgbe_rxq_strings[] = {
659         {"mbuf_allocation_errors", offsetof(struct ixgbe_hw_stats, rnbc)},
660         {"dropped", offsetof(struct ixgbe_hw_stats, mpc)},
661         {"xon_packets", offsetof(struct ixgbe_hw_stats, pxonrxc)},
662         {"xoff_packets", offsetof(struct ixgbe_hw_stats, pxoffrxc)},
663 };
664
665 #define IXGBE_NB_RXQ_PRIO_STATS (sizeof(rte_ixgbe_rxq_strings) / \
666                            sizeof(rte_ixgbe_rxq_strings[0]))
667
668 static const struct rte_ixgbe_xstats_name_off rte_ixgbe_txq_strings[] = {
669         {"xon_packets", offsetof(struct ixgbe_hw_stats, pxontxc)},
670         {"xoff_packets", offsetof(struct ixgbe_hw_stats, pxofftxc)},
671         {"xon_to_xoff_packets", offsetof(struct ixgbe_hw_stats,
672                 pxon2offc)},
673 };
674
675 #define IXGBE_NB_TXQ_PRIO_STATS (sizeof(rte_ixgbe_txq_strings) / \
676                            sizeof(rte_ixgbe_txq_strings[0]))
677
678 static const struct rte_ixgbe_xstats_name_off rte_ixgbevf_stats_strings[] = {
679         {"rx_multicast_packets", offsetof(struct ixgbevf_hw_stats, vfmprc)},
680 };
681
682 #define IXGBEVF_NB_XSTATS (sizeof(rte_ixgbevf_stats_strings) /  \
683                 sizeof(rte_ixgbevf_stats_strings[0]))
684
685 /**
686  * Atomically reads the link status information from global
687  * structure rte_eth_dev.
688  *
689  * @param dev
690  *   - Pointer to the structure rte_eth_dev to read from.
691  *   - Pointer to the buffer to be saved with the link status.
692  *
693  * @return
694  *   - On success, zero.
695  *   - On failure, negative value.
696  */
697 static inline int
698 rte_ixgbe_dev_atomic_read_link_status(struct rte_eth_dev *dev,
699                                 struct rte_eth_link *link)
700 {
701         struct rte_eth_link *dst = link;
702         struct rte_eth_link *src = &(dev->data->dev_link);
703
704         if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
705                                         *(uint64_t *)src) == 0)
706                 return -1;
707
708         return 0;
709 }
710
711 /**
712  * Atomically writes the link status information into global
713  * structure rte_eth_dev.
714  *
715  * @param dev
716  *   - Pointer to the structure rte_eth_dev to read from.
717  *   - Pointer to the buffer to be saved with the link status.
718  *
719  * @return
720  *   - On success, zero.
721  *   - On failure, negative value.
722  */
723 static inline int
724 rte_ixgbe_dev_atomic_write_link_status(struct rte_eth_dev *dev,
725                                 struct rte_eth_link *link)
726 {
727         struct rte_eth_link *dst = &(dev->data->dev_link);
728         struct rte_eth_link *src = link;
729
730         if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
731                                         *(uint64_t *)src) == 0)
732                 return -1;
733
734         return 0;
735 }
736
737 /*
738  * This function is the same as ixgbe_is_sfp() in base/ixgbe.h.
739  */
740 static inline int
741 ixgbe_is_sfp(struct ixgbe_hw *hw)
742 {
743         switch (hw->phy.type) {
744         case ixgbe_phy_sfp_avago:
745         case ixgbe_phy_sfp_ftl:
746         case ixgbe_phy_sfp_intel:
747         case ixgbe_phy_sfp_unknown:
748         case ixgbe_phy_sfp_passive_tyco:
749         case ixgbe_phy_sfp_passive_unknown:
750                 return 1;
751         default:
752                 return 0;
753         }
754 }
755
756 static inline int32_t
757 ixgbe_pf_reset_hw(struct ixgbe_hw *hw)
758 {
759         uint32_t ctrl_ext;
760         int32_t status;
761
762         status = ixgbe_reset_hw(hw);
763
764         ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
765         /* Set PF Reset Done bit so PF/VF Mail Ops can work */
766         ctrl_ext |= IXGBE_CTRL_EXT_PFRSTD;
767         IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
768         IXGBE_WRITE_FLUSH(hw);
769
770         return status;
771 }
772
773 static inline void
774 ixgbe_enable_intr(struct rte_eth_dev *dev)
775 {
776         struct ixgbe_interrupt *intr =
777                 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
778         struct ixgbe_hw *hw =
779                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
780
781         IXGBE_WRITE_REG(hw, IXGBE_EIMS, intr->mask);
782         IXGBE_WRITE_FLUSH(hw);
783 }
784
785 /*
786  * This function is based on ixgbe_disable_intr() in base/ixgbe.h.
787  */
788 static void
789 ixgbe_disable_intr(struct ixgbe_hw *hw)
790 {
791         PMD_INIT_FUNC_TRACE();
792
793         if (hw->mac.type == ixgbe_mac_82598EB) {
794                 IXGBE_WRITE_REG(hw, IXGBE_EIMC, ~0);
795         } else {
796                 IXGBE_WRITE_REG(hw, IXGBE_EIMC, 0xFFFF0000);
797                 IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(0), ~0);
798                 IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(1), ~0);
799         }
800         IXGBE_WRITE_FLUSH(hw);
801 }
802
803 /*
804  * This function resets queue statistics mapping registers.
805  * From Niantic datasheet, Initialization of Statistics section:
806  * "...if software requires the queue counters, the RQSMR and TQSM registers
807  * must be re-programmed following a device reset.
808  */
809 static void
810 ixgbe_reset_qstat_mappings(struct ixgbe_hw *hw)
811 {
812         uint32_t i;
813
814         for(i = 0; i != IXGBE_NB_STAT_MAPPING_REGS; i++) {
815                 IXGBE_WRITE_REG(hw, IXGBE_RQSMR(i), 0);
816                 IXGBE_WRITE_REG(hw, IXGBE_TQSM(i), 0);
817         }
818 }
819
820
821 static int
822 ixgbe_dev_queue_stats_mapping_set(struct rte_eth_dev *eth_dev,
823                                   uint16_t queue_id,
824                                   uint8_t stat_idx,
825                                   uint8_t is_rx)
826 {
827 #define QSM_REG_NB_BITS_PER_QMAP_FIELD 8
828 #define NB_QMAP_FIELDS_PER_QSM_REG 4
829 #define QMAP_FIELD_RESERVED_BITS_MASK 0x0f
830
831         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
832         struct ixgbe_stat_mapping_registers *stat_mappings =
833                 IXGBE_DEV_PRIVATE_TO_STAT_MAPPINGS(eth_dev->data->dev_private);
834         uint32_t qsmr_mask = 0;
835         uint32_t clearing_mask = QMAP_FIELD_RESERVED_BITS_MASK;
836         uint32_t q_map;
837         uint8_t n, offset;
838
839         if ((hw->mac.type != ixgbe_mac_82599EB) &&
840                 (hw->mac.type != ixgbe_mac_X540) &&
841                 (hw->mac.type != ixgbe_mac_X550) &&
842                 (hw->mac.type != ixgbe_mac_X550EM_x))
843                 return -ENOSYS;
844
845         PMD_INIT_LOG(DEBUG, "Setting port %d, %s queue_id %d to stat index %d",
846                      (int)(eth_dev->data->port_id), is_rx ? "RX" : "TX",
847                      queue_id, stat_idx);
848
849         n = (uint8_t)(queue_id / NB_QMAP_FIELDS_PER_QSM_REG);
850         if (n >= IXGBE_NB_STAT_MAPPING_REGS) {
851                 PMD_INIT_LOG(ERR, "Nb of stat mapping registers exceeded");
852                 return -EIO;
853         }
854         offset = (uint8_t)(queue_id % NB_QMAP_FIELDS_PER_QSM_REG);
855
856         /* Now clear any previous stat_idx set */
857         clearing_mask <<= (QSM_REG_NB_BITS_PER_QMAP_FIELD * offset);
858         if (!is_rx)
859                 stat_mappings->tqsm[n] &= ~clearing_mask;
860         else
861                 stat_mappings->rqsmr[n] &= ~clearing_mask;
862
863         q_map = (uint32_t)stat_idx;
864         q_map &= QMAP_FIELD_RESERVED_BITS_MASK;
865         qsmr_mask = q_map << (QSM_REG_NB_BITS_PER_QMAP_FIELD * offset);
866         if (!is_rx)
867                 stat_mappings->tqsm[n] |= qsmr_mask;
868         else
869                 stat_mappings->rqsmr[n] |= qsmr_mask;
870
871         PMD_INIT_LOG(DEBUG, "Set port %d, %s queue_id %d to stat index %d",
872                      (int)(eth_dev->data->port_id), is_rx ? "RX" : "TX",
873                      queue_id, stat_idx);
874         PMD_INIT_LOG(DEBUG, "%s[%d] = 0x%08x", is_rx ? "RQSMR" : "TQSM", n,
875                      is_rx ? stat_mappings->rqsmr[n] : stat_mappings->tqsm[n]);
876
877         /* Now write the mapping in the appropriate register */
878         if (is_rx) {
879                 PMD_INIT_LOG(DEBUG, "Write 0x%x to RX IXGBE stat mapping reg:%d",
880                              stat_mappings->rqsmr[n], n);
881                 IXGBE_WRITE_REG(hw, IXGBE_RQSMR(n), stat_mappings->rqsmr[n]);
882         }
883         else {
884                 PMD_INIT_LOG(DEBUG, "Write 0x%x to TX IXGBE stat mapping reg:%d",
885                              stat_mappings->tqsm[n], n);
886                 IXGBE_WRITE_REG(hw, IXGBE_TQSM(n), stat_mappings->tqsm[n]);
887         }
888         return 0;
889 }
890
891 static void
892 ixgbe_restore_statistics_mapping(struct rte_eth_dev * dev)
893 {
894         struct ixgbe_stat_mapping_registers *stat_mappings =
895                 IXGBE_DEV_PRIVATE_TO_STAT_MAPPINGS(dev->data->dev_private);
896         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
897         int i;
898
899         /* write whatever was in stat mapping table to the NIC */
900         for (i = 0; i < IXGBE_NB_STAT_MAPPING_REGS; i++) {
901                 /* rx */
902                 IXGBE_WRITE_REG(hw, IXGBE_RQSMR(i), stat_mappings->rqsmr[i]);
903
904                 /* tx */
905                 IXGBE_WRITE_REG(hw, IXGBE_TQSM(i), stat_mappings->tqsm[i]);
906         }
907 }
908
909 static void
910 ixgbe_dcb_init(struct ixgbe_hw *hw,struct ixgbe_dcb_config *dcb_config)
911 {
912         uint8_t i;
913         struct ixgbe_dcb_tc_config *tc;
914         uint8_t dcb_max_tc = IXGBE_DCB_MAX_TRAFFIC_CLASS;
915
916         dcb_config->num_tcs.pg_tcs = dcb_max_tc;
917         dcb_config->num_tcs.pfc_tcs = dcb_max_tc;
918         for (i = 0; i < dcb_max_tc; i++) {
919                 tc = &dcb_config->tc_config[i];
920                 tc->path[IXGBE_DCB_TX_CONFIG].bwg_id = i;
921                 tc->path[IXGBE_DCB_TX_CONFIG].bwg_percent =
922                                  (uint8_t)(100/dcb_max_tc + (i & 1));
923                 tc->path[IXGBE_DCB_RX_CONFIG].bwg_id = i;
924                 tc->path[IXGBE_DCB_RX_CONFIG].bwg_percent =
925                                  (uint8_t)(100/dcb_max_tc + (i & 1));
926                 tc->pfc = ixgbe_dcb_pfc_disabled;
927         }
928
929         /* Initialize default user to priority mapping, UPx->TC0 */
930         tc = &dcb_config->tc_config[0];
931         tc->path[IXGBE_DCB_TX_CONFIG].up_to_tc_bitmap = 0xFF;
932         tc->path[IXGBE_DCB_RX_CONFIG].up_to_tc_bitmap = 0xFF;
933         for (i = 0; i< IXGBE_DCB_MAX_BW_GROUP; i++) {
934                 dcb_config->bw_percentage[IXGBE_DCB_TX_CONFIG][i] = 100;
935                 dcb_config->bw_percentage[IXGBE_DCB_RX_CONFIG][i] = 100;
936         }
937         dcb_config->rx_pba_cfg = ixgbe_dcb_pba_equal;
938         dcb_config->pfc_mode_enable = false;
939         dcb_config->vt_mode = true;
940         dcb_config->round_robin_enable = false;
941         /* support all DCB capabilities in 82599 */
942         dcb_config->support.capabilities = 0xFF;
943
944         /*we only support 4 Tcs for X540, X550 */
945         if (hw->mac.type == ixgbe_mac_X540 ||
946                 hw->mac.type == ixgbe_mac_X550 ||
947                 hw->mac.type == ixgbe_mac_X550EM_x) {
948                 dcb_config->num_tcs.pg_tcs = 4;
949                 dcb_config->num_tcs.pfc_tcs = 4;
950         }
951 }
952
953 /*
954  * Ensure that all locks are released before first NVM or PHY access
955  */
956 static void
957 ixgbe_swfw_lock_reset(struct ixgbe_hw *hw)
958 {
959         uint16_t mask;
960
961         /*
962          * Phy lock should not fail in this early stage. If this is the case,
963          * it is due to an improper exit of the application.
964          * So force the release of the faulty lock. Release of common lock
965          * is done automatically by swfw_sync function.
966          */
967         mask = IXGBE_GSSR_PHY0_SM << hw->bus.func;
968         if (ixgbe_acquire_swfw_semaphore(hw, mask) < 0) {
969                 PMD_DRV_LOG(DEBUG, "SWFW phy%d lock released", hw->bus.func);
970         }
971         ixgbe_release_swfw_semaphore(hw, mask);
972
973         /*
974          * These ones are more tricky since they are common to all ports; but
975          * swfw_sync retries last long enough (1s) to be almost sure that if
976          * lock can not be taken it is due to an improper lock of the
977          * semaphore.
978          */
979         mask = IXGBE_GSSR_EEP_SM | IXGBE_GSSR_MAC_CSR_SM | IXGBE_GSSR_SW_MNG_SM;
980         if (ixgbe_acquire_swfw_semaphore(hw, mask) < 0) {
981                 PMD_DRV_LOG(DEBUG, "SWFW common locks released");
982         }
983         ixgbe_release_swfw_semaphore(hw, mask);
984 }
985
986 /*
987  * This function is based on code in ixgbe_attach() in base/ixgbe.c.
988  * It returns 0 on success.
989  */
990 static int
991 eth_ixgbe_dev_init(struct rte_eth_dev *eth_dev)
992 {
993         struct rte_pci_device *pci_dev;
994         struct ixgbe_hw *hw =
995                 IXGBE_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
996         struct ixgbe_vfta * shadow_vfta =
997                 IXGBE_DEV_PRIVATE_TO_VFTA(eth_dev->data->dev_private);
998         struct ixgbe_hwstrip *hwstrip =
999                 IXGBE_DEV_PRIVATE_TO_HWSTRIP_BITMAP(eth_dev->data->dev_private);
1000         struct ixgbe_dcb_config *dcb_config =
1001                 IXGBE_DEV_PRIVATE_TO_DCB_CFG(eth_dev->data->dev_private);
1002         struct ixgbe_filter_info *filter_info =
1003                 IXGBE_DEV_PRIVATE_TO_FILTER_INFO(eth_dev->data->dev_private);
1004         uint32_t ctrl_ext;
1005         uint16_t csum;
1006         int diag, i;
1007
1008         PMD_INIT_FUNC_TRACE();
1009
1010         eth_dev->dev_ops = &ixgbe_eth_dev_ops;
1011         eth_dev->rx_pkt_burst = &ixgbe_recv_pkts;
1012         eth_dev->tx_pkt_burst = &ixgbe_xmit_pkts;
1013
1014         /*
1015          * For secondary processes, we don't initialise any further as primary
1016          * has already done this work. Only check we don't need a different
1017          * RX and TX function.
1018          */
1019         if (rte_eal_process_type() != RTE_PROC_PRIMARY){
1020                 struct ixgbe_tx_queue *txq;
1021                 /* TX queue function in primary, set by last queue initialized
1022                  * Tx queue may not initialized by primary process */
1023                 if (eth_dev->data->tx_queues) {
1024                         txq = eth_dev->data->tx_queues[eth_dev->data->nb_tx_queues-1];
1025                         ixgbe_set_tx_function(eth_dev, txq);
1026                 } else {
1027                         /* Use default TX function if we get here */
1028                         PMD_INIT_LOG(NOTICE, "No TX queues configured yet. "
1029                                              "Using default TX function.");
1030                 }
1031
1032                 ixgbe_set_rx_function(eth_dev);
1033
1034                 return 0;
1035         }
1036         pci_dev = eth_dev->pci_dev;
1037
1038         rte_eth_copy_pci_info(eth_dev, pci_dev);
1039
1040         /* Vendor and Device ID need to be set before init of shared code */
1041         hw->device_id = pci_dev->id.device_id;
1042         hw->vendor_id = pci_dev->id.vendor_id;
1043         hw->hw_addr = (void *)pci_dev->mem_resource[0].addr;
1044         hw->allow_unsupported_sfp = 1;
1045
1046         /* Initialize the shared code (base driver) */
1047 #ifdef RTE_NIC_BYPASS
1048         diag = ixgbe_bypass_init_shared_code(hw);
1049 #else
1050         diag = ixgbe_init_shared_code(hw);
1051 #endif /* RTE_NIC_BYPASS */
1052
1053         if (diag != IXGBE_SUCCESS) {
1054                 PMD_INIT_LOG(ERR, "Shared code init failed: %d", diag);
1055                 return -EIO;
1056         }
1057
1058         /* pick up the PCI bus settings for reporting later */
1059         ixgbe_get_bus_info(hw);
1060
1061         /* Unlock any pending hardware semaphore */
1062         ixgbe_swfw_lock_reset(hw);
1063
1064         /* Initialize DCB configuration*/
1065         memset(dcb_config, 0, sizeof(struct ixgbe_dcb_config));
1066         ixgbe_dcb_init(hw,dcb_config);
1067         /* Get Hardware Flow Control setting */
1068         hw->fc.requested_mode = ixgbe_fc_full;
1069         hw->fc.current_mode = ixgbe_fc_full;
1070         hw->fc.pause_time = IXGBE_FC_PAUSE;
1071         for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
1072                 hw->fc.low_water[i] = IXGBE_FC_LO;
1073                 hw->fc.high_water[i] = IXGBE_FC_HI;
1074         }
1075         hw->fc.send_xon = 1;
1076
1077         /* Make sure we have a good EEPROM before we read from it */
1078         diag = ixgbe_validate_eeprom_checksum(hw, &csum);
1079         if (diag != IXGBE_SUCCESS) {
1080                 PMD_INIT_LOG(ERR, "The EEPROM checksum is not valid: %d", diag);
1081                 return -EIO;
1082         }
1083
1084 #ifdef RTE_NIC_BYPASS
1085         diag = ixgbe_bypass_init_hw(hw);
1086 #else
1087         diag = ixgbe_init_hw(hw);
1088 #endif /* RTE_NIC_BYPASS */
1089
1090         /*
1091          * Devices with copper phys will fail to initialise if ixgbe_init_hw()
1092          * is called too soon after the kernel driver unbinding/binding occurs.
1093          * The failure occurs in ixgbe_identify_phy_generic() for all devices,
1094          * but for non-copper devies, ixgbe_identify_sfp_module_generic() is
1095          * also called. See ixgbe_identify_phy_82599(). The reason for the
1096          * failure is not known, and only occuts when virtualisation features
1097          * are disabled in the bios. A delay of 100ms  was found to be enough by
1098          * trial-and-error, and is doubled to be safe.
1099          */
1100         if (diag && (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_copper)) {
1101                 rte_delay_ms(200);
1102                 diag = ixgbe_init_hw(hw);
1103         }
1104
1105         if (diag == IXGBE_ERR_EEPROM_VERSION) {
1106                 PMD_INIT_LOG(ERR, "This device is a pre-production adapter/"
1107                     "LOM.  Please be aware there may be issues associated "
1108                     "with your hardware.");
1109                 PMD_INIT_LOG(ERR, "If you are experiencing problems "
1110                     "please contact your Intel or hardware representative "
1111                     "who provided you with this hardware.");
1112         } else if (diag == IXGBE_ERR_SFP_NOT_SUPPORTED)
1113                 PMD_INIT_LOG(ERR, "Unsupported SFP+ Module");
1114         if (diag) {
1115                 PMD_INIT_LOG(ERR, "Hardware Initialization Failure: %d", diag);
1116                 return -EIO;
1117         }
1118
1119         /* Reset the hw statistics */
1120         ixgbe_dev_stats_reset(eth_dev);
1121
1122         /* disable interrupt */
1123         ixgbe_disable_intr(hw);
1124
1125         /* reset mappings for queue statistics hw counters*/
1126         ixgbe_reset_qstat_mappings(hw);
1127
1128         /* Allocate memory for storing MAC addresses */
1129         eth_dev->data->mac_addrs = rte_zmalloc("ixgbe", ETHER_ADDR_LEN *
1130                         hw->mac.num_rar_entries, 0);
1131         if (eth_dev->data->mac_addrs == NULL) {
1132                 PMD_INIT_LOG(ERR,
1133                         "Failed to allocate %u bytes needed to store "
1134                         "MAC addresses",
1135                         ETHER_ADDR_LEN * hw->mac.num_rar_entries);
1136                 return -ENOMEM;
1137         }
1138         /* Copy the permanent MAC address */
1139         ether_addr_copy((struct ether_addr *) hw->mac.perm_addr,
1140                         &eth_dev->data->mac_addrs[0]);
1141
1142         /* Allocate memory for storing hash filter MAC addresses */
1143         eth_dev->data->hash_mac_addrs = rte_zmalloc("ixgbe", ETHER_ADDR_LEN *
1144                         IXGBE_VMDQ_NUM_UC_MAC, 0);
1145         if (eth_dev->data->hash_mac_addrs == NULL) {
1146                 PMD_INIT_LOG(ERR,
1147                         "Failed to allocate %d bytes needed to store MAC addresses",
1148                         ETHER_ADDR_LEN * IXGBE_VMDQ_NUM_UC_MAC);
1149                 return -ENOMEM;
1150         }
1151
1152         /* initialize the vfta */
1153         memset(shadow_vfta, 0, sizeof(*shadow_vfta));
1154
1155         /* initialize the hw strip bitmap*/
1156         memset(hwstrip, 0, sizeof(*hwstrip));
1157
1158         /* initialize PF if max_vfs not zero */
1159         ixgbe_pf_host_init(eth_dev);
1160
1161         ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
1162         /* let hardware know driver is loaded */
1163         ctrl_ext |= IXGBE_CTRL_EXT_DRV_LOAD;
1164         /* Set PF Reset Done bit so PF/VF Mail Ops can work */
1165         ctrl_ext |= IXGBE_CTRL_EXT_PFRSTD;
1166         IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
1167         IXGBE_WRITE_FLUSH(hw);
1168
1169         if (ixgbe_is_sfp(hw) && hw->phy.sfp_type != ixgbe_sfp_type_not_present)
1170                 PMD_INIT_LOG(DEBUG, "MAC: %d, PHY: %d, SFP+: %d",
1171                              (int) hw->mac.type, (int) hw->phy.type,
1172                              (int) hw->phy.sfp_type);
1173         else
1174                 PMD_INIT_LOG(DEBUG, "MAC: %d, PHY: %d",
1175                              (int) hw->mac.type, (int) hw->phy.type);
1176
1177         PMD_INIT_LOG(DEBUG, "port %d vendorID=0x%x deviceID=0x%x",
1178                         eth_dev->data->port_id, pci_dev->id.vendor_id,
1179                         pci_dev->id.device_id);
1180
1181         rte_intr_callback_register(&pci_dev->intr_handle,
1182                                    ixgbe_dev_interrupt_handler,
1183                                    (void *)eth_dev);
1184
1185         /* enable uio/vfio intr/eventfd mapping */
1186         rte_intr_enable(&pci_dev->intr_handle);
1187
1188         /* enable support intr */
1189         ixgbe_enable_intr(eth_dev);
1190
1191         /* initialize 5tuple filter list */
1192         TAILQ_INIT(&filter_info->fivetuple_list);
1193         memset(filter_info->fivetuple_mask, 0,
1194                 sizeof(uint32_t) * IXGBE_5TUPLE_ARRAY_SIZE);
1195
1196         return 0;
1197 }
1198
1199 static int
1200 eth_ixgbe_dev_uninit(struct rte_eth_dev *eth_dev)
1201 {
1202         struct rte_pci_device *pci_dev;
1203         struct ixgbe_hw *hw;
1204
1205         PMD_INIT_FUNC_TRACE();
1206
1207         if (rte_eal_process_type() != RTE_PROC_PRIMARY)
1208                 return -EPERM;
1209
1210         hw = IXGBE_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
1211         pci_dev = eth_dev->pci_dev;
1212
1213         if (hw->adapter_stopped == 0)
1214                 ixgbe_dev_close(eth_dev);
1215
1216         eth_dev->dev_ops = NULL;
1217         eth_dev->rx_pkt_burst = NULL;
1218         eth_dev->tx_pkt_burst = NULL;
1219
1220         /* Unlock any pending hardware semaphore */
1221         ixgbe_swfw_lock_reset(hw);
1222
1223         /* disable uio intr before callback unregister */
1224         rte_intr_disable(&(pci_dev->intr_handle));
1225         rte_intr_callback_unregister(&(pci_dev->intr_handle),
1226                 ixgbe_dev_interrupt_handler, (void *)eth_dev);
1227
1228         /* uninitialize PF if max_vfs not zero */
1229         ixgbe_pf_host_uninit(eth_dev);
1230
1231         rte_free(eth_dev->data->mac_addrs);
1232         eth_dev->data->mac_addrs = NULL;
1233
1234         rte_free(eth_dev->data->hash_mac_addrs);
1235         eth_dev->data->hash_mac_addrs = NULL;
1236
1237         return 0;
1238 }
1239
1240 /*
1241  * Negotiate mailbox API version with the PF.
1242  * After reset API version is always set to the basic one (ixgbe_mbox_api_10).
1243  * Then we try to negotiate starting with the most recent one.
1244  * If all negotiation attempts fail, then we will proceed with
1245  * the default one (ixgbe_mbox_api_10).
1246  */
1247 static void
1248 ixgbevf_negotiate_api(struct ixgbe_hw *hw)
1249 {
1250         int32_t i;
1251
1252         /* start with highest supported, proceed down */
1253         static const enum ixgbe_pfvf_api_rev sup_ver[] = {
1254                 ixgbe_mbox_api_11,
1255                 ixgbe_mbox_api_10,
1256         };
1257
1258         for (i = 0;
1259                         i != RTE_DIM(sup_ver) &&
1260                         ixgbevf_negotiate_api_version(hw, sup_ver[i]) != 0;
1261                         i++)
1262                 ;
1263 }
1264
1265 static void
1266 generate_random_mac_addr(struct ether_addr *mac_addr)
1267 {
1268         uint64_t random;
1269
1270         /* Set Organizationally Unique Identifier (OUI) prefix. */
1271         mac_addr->addr_bytes[0] = 0x00;
1272         mac_addr->addr_bytes[1] = 0x09;
1273         mac_addr->addr_bytes[2] = 0xC0;
1274         /* Force indication of locally assigned MAC address. */
1275         mac_addr->addr_bytes[0] |= ETHER_LOCAL_ADMIN_ADDR;
1276         /* Generate the last 3 bytes of the MAC address with a random number. */
1277         random = rte_rand();
1278         memcpy(&mac_addr->addr_bytes[3], &random, 3);
1279 }
1280
1281 /*
1282  * Virtual Function device init
1283  */
1284 static int
1285 eth_ixgbevf_dev_init(struct rte_eth_dev *eth_dev)
1286 {
1287         int diag;
1288         uint32_t tc, tcs;
1289         struct rte_pci_device *pci_dev;
1290         struct ixgbe_hw *hw =
1291                 IXGBE_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
1292         struct ixgbe_vfta * shadow_vfta =
1293                 IXGBE_DEV_PRIVATE_TO_VFTA(eth_dev->data->dev_private);
1294         struct ixgbe_hwstrip *hwstrip =
1295                 IXGBE_DEV_PRIVATE_TO_HWSTRIP_BITMAP(eth_dev->data->dev_private);
1296         struct ether_addr *perm_addr = (struct ether_addr *) hw->mac.perm_addr;
1297
1298         PMD_INIT_FUNC_TRACE();
1299
1300         eth_dev->dev_ops = &ixgbevf_eth_dev_ops;
1301         eth_dev->rx_pkt_burst = &ixgbe_recv_pkts;
1302         eth_dev->tx_pkt_burst = &ixgbe_xmit_pkts;
1303
1304         /* for secondary processes, we don't initialise any further as primary
1305          * has already done this work. Only check we don't need a different
1306          * RX function */
1307         if (rte_eal_process_type() != RTE_PROC_PRIMARY){
1308                 if (eth_dev->data->scattered_rx)
1309                         eth_dev->rx_pkt_burst = ixgbe_recv_pkts_lro_single_alloc;
1310                 return 0;
1311         }
1312
1313         pci_dev = eth_dev->pci_dev;
1314
1315         rte_eth_copy_pci_info(eth_dev, pci_dev);
1316
1317         hw->device_id = pci_dev->id.device_id;
1318         hw->vendor_id = pci_dev->id.vendor_id;
1319         hw->hw_addr = (void *)pci_dev->mem_resource[0].addr;
1320
1321         /* initialize the vfta */
1322         memset(shadow_vfta, 0, sizeof(*shadow_vfta));
1323
1324         /* initialize the hw strip bitmap*/
1325         memset(hwstrip, 0, sizeof(*hwstrip));
1326
1327         /* Initialize the shared code (base driver) */
1328         diag = ixgbe_init_shared_code(hw);
1329         if (diag != IXGBE_SUCCESS) {
1330                 PMD_INIT_LOG(ERR, "Shared code init failed for ixgbevf: %d", diag);
1331                 return -EIO;
1332         }
1333
1334         /* init_mailbox_params */
1335         hw->mbx.ops.init_params(hw);
1336
1337         /* Reset the hw statistics */
1338         ixgbevf_dev_stats_reset(eth_dev);
1339
1340         /* Disable the interrupts for VF */
1341         ixgbevf_intr_disable(hw);
1342
1343         hw->mac.num_rar_entries = 128; /* The MAX of the underlying PF */
1344         diag = hw->mac.ops.reset_hw(hw);
1345
1346         /*
1347          * The VF reset operation returns the IXGBE_ERR_INVALID_MAC_ADDR when
1348          * the underlying PF driver has not assigned a MAC address to the VF.
1349          * In this case, assign a random MAC address.
1350          */
1351         if ((diag != IXGBE_SUCCESS) && (diag != IXGBE_ERR_INVALID_MAC_ADDR)) {
1352                 PMD_INIT_LOG(ERR, "VF Initialization Failure: %d", diag);
1353                 return diag;
1354         }
1355
1356         /* negotiate mailbox API version to use with the PF. */
1357         ixgbevf_negotiate_api(hw);
1358
1359         /* Get Rx/Tx queue count via mailbox, which is ready after reset_hw */
1360         ixgbevf_get_queues(hw, &tcs, &tc);
1361
1362         /* Allocate memory for storing MAC addresses */
1363         eth_dev->data->mac_addrs = rte_zmalloc("ixgbevf", ETHER_ADDR_LEN *
1364                         hw->mac.num_rar_entries, 0);
1365         if (eth_dev->data->mac_addrs == NULL) {
1366                 PMD_INIT_LOG(ERR,
1367                         "Failed to allocate %u bytes needed to store "
1368                         "MAC addresses",
1369                         ETHER_ADDR_LEN * hw->mac.num_rar_entries);
1370                 return -ENOMEM;
1371         }
1372
1373         /* Generate a random MAC address, if none was assigned by PF. */
1374         if (is_zero_ether_addr(perm_addr)) {
1375                 generate_random_mac_addr(perm_addr);
1376                 diag = ixgbe_set_rar_vf(hw, 1, perm_addr->addr_bytes, 0, 1);
1377                 if (diag) {
1378                         rte_free(eth_dev->data->mac_addrs);
1379                         eth_dev->data->mac_addrs = NULL;
1380                         return diag;
1381                 }
1382                 PMD_INIT_LOG(INFO, "\tVF MAC address not assigned by Host PF");
1383                 PMD_INIT_LOG(INFO, "\tAssign randomly generated MAC address "
1384                              "%02x:%02x:%02x:%02x:%02x:%02x",
1385                              perm_addr->addr_bytes[0],
1386                              perm_addr->addr_bytes[1],
1387                              perm_addr->addr_bytes[2],
1388                              perm_addr->addr_bytes[3],
1389                              perm_addr->addr_bytes[4],
1390                              perm_addr->addr_bytes[5]);
1391         }
1392
1393         /* Copy the permanent MAC address */
1394         ether_addr_copy(perm_addr, &eth_dev->data->mac_addrs[0]);
1395
1396         /* reset the hardware with the new settings */
1397         diag = hw->mac.ops.start_hw(hw);
1398         switch (diag) {
1399                 case  0:
1400                         break;
1401
1402                 default:
1403                         PMD_INIT_LOG(ERR, "VF Initialization Failure: %d", diag);
1404                         return -EIO;
1405         }
1406
1407         PMD_INIT_LOG(DEBUG, "port %d vendorID=0x%x deviceID=0x%x mac.type=%s",
1408                      eth_dev->data->port_id, pci_dev->id.vendor_id,
1409                      pci_dev->id.device_id, "ixgbe_mac_82599_vf");
1410
1411         return 0;
1412 }
1413
1414 /* Virtual Function device uninit */
1415
1416 static int
1417 eth_ixgbevf_dev_uninit(struct rte_eth_dev *eth_dev)
1418 {
1419         struct ixgbe_hw *hw;
1420         unsigned i;
1421
1422         PMD_INIT_FUNC_TRACE();
1423
1424         if (rte_eal_process_type() != RTE_PROC_PRIMARY)
1425                 return -EPERM;
1426
1427         hw = IXGBE_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
1428
1429         if (hw->adapter_stopped == 0)
1430                 ixgbevf_dev_close(eth_dev);
1431
1432         eth_dev->dev_ops = NULL;
1433         eth_dev->rx_pkt_burst = NULL;
1434         eth_dev->tx_pkt_burst = NULL;
1435
1436         /* Disable the interrupts for VF */
1437         ixgbevf_intr_disable(hw);
1438
1439         for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
1440                 ixgbe_dev_rx_queue_release(eth_dev->data->rx_queues[i]);
1441                 eth_dev->data->rx_queues[i] = NULL;
1442         }
1443         eth_dev->data->nb_rx_queues = 0;
1444
1445         for (i = 0; i < eth_dev->data->nb_tx_queues; i++) {
1446                 ixgbe_dev_tx_queue_release(eth_dev->data->tx_queues[i]);
1447                 eth_dev->data->tx_queues[i] = NULL;
1448         }
1449         eth_dev->data->nb_tx_queues = 0;
1450
1451         rte_free(eth_dev->data->mac_addrs);
1452         eth_dev->data->mac_addrs = NULL;
1453
1454         return 0;
1455 }
1456
1457 static struct eth_driver rte_ixgbe_pmd = {
1458         .pci_drv = {
1459                 .name = "rte_ixgbe_pmd",
1460                 .id_table = pci_id_ixgbe_map,
1461                 .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC |
1462                         RTE_PCI_DRV_DETACHABLE,
1463         },
1464         .eth_dev_init = eth_ixgbe_dev_init,
1465         .eth_dev_uninit = eth_ixgbe_dev_uninit,
1466         .dev_private_size = sizeof(struct ixgbe_adapter),
1467 };
1468
1469 /*
1470  * virtual function driver struct
1471  */
1472 static struct eth_driver rte_ixgbevf_pmd = {
1473         .pci_drv = {
1474                 .name = "rte_ixgbevf_pmd",
1475                 .id_table = pci_id_ixgbevf_map,
1476                 .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_DETACHABLE,
1477         },
1478         .eth_dev_init = eth_ixgbevf_dev_init,
1479         .eth_dev_uninit = eth_ixgbevf_dev_uninit,
1480         .dev_private_size = sizeof(struct ixgbe_adapter),
1481 };
1482
1483 /*
1484  * Driver initialization routine.
1485  * Invoked once at EAL init time.
1486  * Register itself as the [Poll Mode] Driver of PCI IXGBE devices.
1487  */
1488 static int
1489 rte_ixgbe_pmd_init(const char *name __rte_unused, const char *params __rte_unused)
1490 {
1491         PMD_INIT_FUNC_TRACE();
1492
1493         rte_eth_driver_register(&rte_ixgbe_pmd);
1494         return 0;
1495 }
1496
1497 /*
1498  * VF Driver initialization routine.
1499  * Invoked one at EAL init time.
1500  * Register itself as the [Virtual Poll Mode] Driver of PCI niantic devices.
1501  */
1502 static int
1503 rte_ixgbevf_pmd_init(const char *name __rte_unused, const char *param __rte_unused)
1504 {
1505         PMD_INIT_FUNC_TRACE();
1506
1507         rte_eth_driver_register(&rte_ixgbevf_pmd);
1508         return 0;
1509 }
1510
1511 static int
1512 ixgbe_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
1513 {
1514         struct ixgbe_hw *hw =
1515                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1516         struct ixgbe_vfta * shadow_vfta =
1517                 IXGBE_DEV_PRIVATE_TO_VFTA(dev->data->dev_private);
1518         uint32_t vfta;
1519         uint32_t vid_idx;
1520         uint32_t vid_bit;
1521
1522         vid_idx = (uint32_t) ((vlan_id >> 5) & 0x7F);
1523         vid_bit = (uint32_t) (1 << (vlan_id & 0x1F));
1524         vfta = IXGBE_READ_REG(hw, IXGBE_VFTA(vid_idx));
1525         if (on)
1526                 vfta |= vid_bit;
1527         else
1528                 vfta &= ~vid_bit;
1529         IXGBE_WRITE_REG(hw, IXGBE_VFTA(vid_idx), vfta);
1530
1531         /* update local VFTA copy */
1532         shadow_vfta->vfta[vid_idx] = vfta;
1533
1534         return 0;
1535 }
1536
1537 static void
1538 ixgbe_vlan_strip_queue_set(struct rte_eth_dev *dev, uint16_t queue, int on)
1539 {
1540         if (on)
1541                 ixgbe_vlan_hw_strip_enable(dev, queue);
1542         else
1543                 ixgbe_vlan_hw_strip_disable(dev, queue);
1544 }
1545
1546 static int
1547 ixgbe_vlan_tpid_set(struct rte_eth_dev *dev,
1548                     enum rte_vlan_type vlan_type,
1549                     uint16_t tpid)
1550 {
1551         struct ixgbe_hw *hw =
1552                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1553         int ret = 0;
1554
1555         switch (vlan_type) {
1556         case ETH_VLAN_TYPE_INNER:
1557                 /* Only the high 16-bits is valid */
1558                 IXGBE_WRITE_REG(hw, IXGBE_EXVET, tpid << 16);
1559                 break;
1560         default:
1561                 ret = -EINVAL;
1562                 PMD_DRV_LOG(ERR, "Unsupported vlan type %d\n", vlan_type);
1563                 break;
1564         }
1565
1566         return ret;
1567 }
1568
1569 void
1570 ixgbe_vlan_hw_filter_disable(struct rte_eth_dev *dev)
1571 {
1572         struct ixgbe_hw *hw =
1573                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1574         uint32_t vlnctrl;
1575
1576         PMD_INIT_FUNC_TRACE();
1577
1578         /* Filter Table Disable */
1579         vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
1580         vlnctrl &= ~IXGBE_VLNCTRL_VFE;
1581
1582         IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
1583 }
1584
1585 void
1586 ixgbe_vlan_hw_filter_enable(struct rte_eth_dev *dev)
1587 {
1588         struct ixgbe_hw *hw =
1589                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1590         struct ixgbe_vfta * shadow_vfta =
1591                 IXGBE_DEV_PRIVATE_TO_VFTA(dev->data->dev_private);
1592         uint32_t vlnctrl;
1593         uint16_t i;
1594
1595         PMD_INIT_FUNC_TRACE();
1596
1597         /* Filter Table Enable */
1598         vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
1599         vlnctrl &= ~IXGBE_VLNCTRL_CFIEN;
1600         vlnctrl |= IXGBE_VLNCTRL_VFE;
1601
1602         IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
1603
1604         /* write whatever is in local vfta copy */
1605         for (i = 0; i < IXGBE_VFTA_SIZE; i++)
1606                 IXGBE_WRITE_REG(hw, IXGBE_VFTA(i), shadow_vfta->vfta[i]);
1607 }
1608
1609 static void
1610 ixgbe_vlan_hw_strip_bitmap_set(struct rte_eth_dev *dev, uint16_t queue, bool on)
1611 {
1612         struct ixgbe_hwstrip *hwstrip =
1613                 IXGBE_DEV_PRIVATE_TO_HWSTRIP_BITMAP(dev->data->dev_private);
1614
1615         if(queue >= IXGBE_MAX_RX_QUEUE_NUM)
1616                 return;
1617
1618         if (on)
1619                 IXGBE_SET_HWSTRIP(hwstrip, queue);
1620         else
1621                 IXGBE_CLEAR_HWSTRIP(hwstrip, queue);
1622 }
1623
1624 static void
1625 ixgbe_vlan_hw_strip_disable(struct rte_eth_dev *dev, uint16_t queue)
1626 {
1627         struct ixgbe_hw *hw =
1628                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1629         uint32_t ctrl;
1630
1631         PMD_INIT_FUNC_TRACE();
1632
1633         if (hw->mac.type == ixgbe_mac_82598EB) {
1634                 /* No queue level support */
1635                 PMD_INIT_LOG(NOTICE, "82598EB not support queue level hw strip");
1636                 return;
1637         }
1638         else {
1639                 /* Other 10G NIC, the VLAN strip can be setup per queue in RXDCTL */
1640                 ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(queue));
1641                 ctrl &= ~IXGBE_RXDCTL_VME;
1642                 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(queue), ctrl);
1643         }
1644         /* record those setting for HW strip per queue */
1645         ixgbe_vlan_hw_strip_bitmap_set(dev, queue, 0);
1646 }
1647
1648 static void
1649 ixgbe_vlan_hw_strip_enable(struct rte_eth_dev *dev, uint16_t queue)
1650 {
1651         struct ixgbe_hw *hw =
1652                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1653         uint32_t ctrl;
1654
1655         PMD_INIT_FUNC_TRACE();
1656
1657         if (hw->mac.type == ixgbe_mac_82598EB) {
1658                 /* No queue level supported */
1659                 PMD_INIT_LOG(NOTICE, "82598EB not support queue level hw strip");
1660                 return;
1661         }
1662         else {
1663                 /* Other 10G NIC, the VLAN strip can be setup per queue in RXDCTL */
1664                 ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(queue));
1665                 ctrl |= IXGBE_RXDCTL_VME;
1666                 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(queue), ctrl);
1667         }
1668         /* record those setting for HW strip per queue */
1669         ixgbe_vlan_hw_strip_bitmap_set(dev, queue, 1);
1670 }
1671
1672 void
1673 ixgbe_vlan_hw_strip_disable_all(struct rte_eth_dev *dev)
1674 {
1675         struct ixgbe_hw *hw =
1676                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1677         uint32_t ctrl;
1678         uint16_t i;
1679
1680         PMD_INIT_FUNC_TRACE();
1681
1682         if (hw->mac.type == ixgbe_mac_82598EB) {
1683                 ctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
1684                 ctrl &= ~IXGBE_VLNCTRL_VME;
1685                 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, ctrl);
1686         }
1687         else {
1688                 /* Other 10G NIC, the VLAN strip can be setup per queue in RXDCTL */
1689                 for (i = 0; i < dev->data->nb_rx_queues; i++) {
1690                         ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(i));
1691                         ctrl &= ~IXGBE_RXDCTL_VME;
1692                         IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(i), ctrl);
1693
1694                         /* record those setting for HW strip per queue */
1695                         ixgbe_vlan_hw_strip_bitmap_set(dev, i, 0);
1696                 }
1697         }
1698 }
1699
1700 void
1701 ixgbe_vlan_hw_strip_enable_all(struct rte_eth_dev *dev)
1702 {
1703         struct ixgbe_hw *hw =
1704                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1705         uint32_t ctrl;
1706         uint16_t i;
1707
1708         PMD_INIT_FUNC_TRACE();
1709
1710         if (hw->mac.type == ixgbe_mac_82598EB) {
1711                 ctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
1712                 ctrl |= IXGBE_VLNCTRL_VME;
1713                 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, ctrl);
1714         }
1715         else {
1716                 /* Other 10G NIC, the VLAN strip can be setup per queue in RXDCTL */
1717                 for (i = 0; i < dev->data->nb_rx_queues; i++) {
1718                         ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(i));
1719                         ctrl |= IXGBE_RXDCTL_VME;
1720                         IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(i), ctrl);
1721
1722                         /* record those setting for HW strip per queue */
1723                         ixgbe_vlan_hw_strip_bitmap_set(dev, i, 1);
1724                 }
1725         }
1726 }
1727
1728 static void
1729 ixgbe_vlan_hw_extend_disable(struct rte_eth_dev *dev)
1730 {
1731         struct ixgbe_hw *hw =
1732                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1733         uint32_t ctrl;
1734
1735         PMD_INIT_FUNC_TRACE();
1736
1737         /* DMATXCTRL: Geric Double VLAN Disable */
1738         ctrl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
1739         ctrl &= ~IXGBE_DMATXCTL_GDV;
1740         IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, ctrl);
1741
1742         /* CTRL_EXT: Global Double VLAN Disable */
1743         ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
1744         ctrl &= ~IXGBE_EXTENDED_VLAN;
1745         IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl);
1746
1747 }
1748
1749 static void
1750 ixgbe_vlan_hw_extend_enable(struct rte_eth_dev *dev)
1751 {
1752         struct ixgbe_hw *hw =
1753                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1754         uint32_t ctrl;
1755
1756         PMD_INIT_FUNC_TRACE();
1757
1758         /* DMATXCTRL: Geric Double VLAN Enable */
1759         ctrl  = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
1760         ctrl |= IXGBE_DMATXCTL_GDV;
1761         IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, ctrl);
1762
1763         /* CTRL_EXT: Global Double VLAN Enable */
1764         ctrl  = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
1765         ctrl |= IXGBE_EXTENDED_VLAN;
1766         IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl);
1767
1768         /* Clear pooling mode of PFVTCTL. It's required by X550. */
1769         if (hw->mac.type == ixgbe_mac_X550 ||
1770             hw->mac.type == ixgbe_mac_X550EM_x) {
1771                 ctrl = IXGBE_READ_REG(hw, IXGBE_VT_CTL);
1772                 ctrl &= ~IXGBE_VT_CTL_POOLING_MODE_MASK;
1773                 IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, ctrl);
1774         }
1775
1776         /*
1777          * VET EXT field in the EXVET register = 0x8100 by default
1778          * So no need to change. Same to VT field of DMATXCTL register
1779          */
1780 }
1781
1782 static void
1783 ixgbe_vlan_offload_set(struct rte_eth_dev *dev, int mask)
1784 {
1785         if(mask & ETH_VLAN_STRIP_MASK){
1786                 if (dev->data->dev_conf.rxmode.hw_vlan_strip)
1787                         ixgbe_vlan_hw_strip_enable_all(dev);
1788                 else
1789                         ixgbe_vlan_hw_strip_disable_all(dev);
1790         }
1791
1792         if(mask & ETH_VLAN_FILTER_MASK){
1793                 if (dev->data->dev_conf.rxmode.hw_vlan_filter)
1794                         ixgbe_vlan_hw_filter_enable(dev);
1795                 else
1796                         ixgbe_vlan_hw_filter_disable(dev);
1797         }
1798
1799         if(mask & ETH_VLAN_EXTEND_MASK){
1800                 if (dev->data->dev_conf.rxmode.hw_vlan_extend)
1801                         ixgbe_vlan_hw_extend_enable(dev);
1802                 else
1803                         ixgbe_vlan_hw_extend_disable(dev);
1804         }
1805 }
1806
1807 static void
1808 ixgbe_vmdq_vlan_hw_filter_enable(struct rte_eth_dev *dev)
1809 {
1810         struct ixgbe_hw *hw =
1811                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1812         /* VLNCTRL: enable vlan filtering and allow all vlan tags through */
1813         uint32_t vlanctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
1814         vlanctrl |= IXGBE_VLNCTRL_VFE ; /* enable vlan filters */
1815         IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlanctrl);
1816 }
1817
1818 static int
1819 ixgbe_check_vf_rss_rxq_num(struct rte_eth_dev *dev, uint16_t nb_rx_q)
1820 {
1821         switch (nb_rx_q) {
1822         case 1:
1823         case 2:
1824                 RTE_ETH_DEV_SRIOV(dev).active = ETH_64_POOLS;
1825                 break;
1826         case 4:
1827                 RTE_ETH_DEV_SRIOV(dev).active = ETH_32_POOLS;
1828                 break;
1829         default:
1830                 return -EINVAL;
1831         }
1832
1833         RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool = nb_rx_q;
1834         RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx = dev->pci_dev->max_vfs * nb_rx_q;
1835
1836         return 0;
1837 }
1838
1839 static int
1840 ixgbe_check_mq_mode(struct rte_eth_dev *dev)
1841 {
1842         struct rte_eth_conf *dev_conf = &dev->data->dev_conf;
1843         uint16_t nb_rx_q = dev->data->nb_rx_queues;
1844         uint16_t nb_tx_q = dev->data->nb_rx_queues;
1845
1846         if (RTE_ETH_DEV_SRIOV(dev).active != 0) {
1847                 /* check multi-queue mode */
1848                 switch (dev_conf->rxmode.mq_mode) {
1849                 case ETH_MQ_RX_VMDQ_DCB:
1850                 case ETH_MQ_RX_VMDQ_DCB_RSS:
1851                         /* DCB/RSS VMDQ in SRIOV mode, not implement yet */
1852                         PMD_INIT_LOG(ERR, "SRIOV active,"
1853                                         " unsupported mq_mode rx %d.",
1854                                         dev_conf->rxmode.mq_mode);
1855                         return -EINVAL;
1856                 case ETH_MQ_RX_RSS:
1857                 case ETH_MQ_RX_VMDQ_RSS:
1858                         dev->data->dev_conf.rxmode.mq_mode = ETH_MQ_RX_VMDQ_RSS;
1859                         if (nb_rx_q <= RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool)
1860                                 if (ixgbe_check_vf_rss_rxq_num(dev, nb_rx_q)) {
1861                                         PMD_INIT_LOG(ERR, "SRIOV is active,"
1862                                                 " invalid queue number"
1863                                                 " for VMDQ RSS, allowed"
1864                                                 " value are 1, 2 or 4.");
1865                                         return -EINVAL;
1866                                 }
1867                         break;
1868                 case ETH_MQ_RX_VMDQ_ONLY:
1869                 case ETH_MQ_RX_NONE:
1870                         /* if nothing mq mode configure, use default scheme */
1871                         dev->data->dev_conf.rxmode.mq_mode = ETH_MQ_RX_VMDQ_ONLY;
1872                         if (RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool > 1)
1873                                 RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool = 1;
1874                         break;
1875                 default: /* ETH_MQ_RX_DCB, ETH_MQ_RX_DCB_RSS or ETH_MQ_TX_DCB*/
1876                         /* SRIOV only works in VMDq enable mode */
1877                         PMD_INIT_LOG(ERR, "SRIOV is active,"
1878                                         " wrong mq_mode rx %d.",
1879                                         dev_conf->rxmode.mq_mode);
1880                         return -EINVAL;
1881                 }
1882
1883                 switch (dev_conf->txmode.mq_mode) {
1884                 case ETH_MQ_TX_VMDQ_DCB:
1885                         /* DCB VMDQ in SRIOV mode, not implement yet */
1886                         PMD_INIT_LOG(ERR, "SRIOV is active,"
1887                                         " unsupported VMDQ mq_mode tx %d.",
1888                                         dev_conf->txmode.mq_mode);
1889                         return -EINVAL;
1890                 default: /* ETH_MQ_TX_VMDQ_ONLY or ETH_MQ_TX_NONE */
1891                         dev->data->dev_conf.txmode.mq_mode = ETH_MQ_TX_VMDQ_ONLY;
1892                         break;
1893                 }
1894
1895                 /* check valid queue number */
1896                 if ((nb_rx_q > RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool) ||
1897                     (nb_tx_q > RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool)) {
1898                         PMD_INIT_LOG(ERR, "SRIOV is active,"
1899                                         " queue number must less equal to %d.",
1900                                         RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool);
1901                         return -EINVAL;
1902                 }
1903         } else {
1904                 /* check configuration for vmdb+dcb mode */
1905                 if (dev_conf->rxmode.mq_mode == ETH_MQ_RX_VMDQ_DCB) {
1906                         const struct rte_eth_vmdq_dcb_conf *conf;
1907
1908                         if (nb_rx_q != IXGBE_VMDQ_DCB_NB_QUEUES) {
1909                                 PMD_INIT_LOG(ERR, "VMDQ+DCB, nb_rx_q != %d.",
1910                                                 IXGBE_VMDQ_DCB_NB_QUEUES);
1911                                 return -EINVAL;
1912                         }
1913                         conf = &dev_conf->rx_adv_conf.vmdq_dcb_conf;
1914                         if (!(conf->nb_queue_pools == ETH_16_POOLS ||
1915                                conf->nb_queue_pools == ETH_32_POOLS)) {
1916                                 PMD_INIT_LOG(ERR, "VMDQ+DCB selected,"
1917                                                 " nb_queue_pools must be %d or %d.",
1918                                                 ETH_16_POOLS, ETH_32_POOLS);
1919                                 return -EINVAL;
1920                         }
1921                 }
1922                 if (dev_conf->txmode.mq_mode == ETH_MQ_TX_VMDQ_DCB) {
1923                         const struct rte_eth_vmdq_dcb_tx_conf *conf;
1924
1925                         if (nb_tx_q != IXGBE_VMDQ_DCB_NB_QUEUES) {
1926                                 PMD_INIT_LOG(ERR, "VMDQ+DCB, nb_tx_q != %d",
1927                                                  IXGBE_VMDQ_DCB_NB_QUEUES);
1928                                 return -EINVAL;
1929                         }
1930                         conf = &dev_conf->tx_adv_conf.vmdq_dcb_tx_conf;
1931                         if (!(conf->nb_queue_pools == ETH_16_POOLS ||
1932                                conf->nb_queue_pools == ETH_32_POOLS)) {
1933                                 PMD_INIT_LOG(ERR, "VMDQ+DCB selected,"
1934                                                 " nb_queue_pools != %d and"
1935                                                 " nb_queue_pools != %d.",
1936                                                 ETH_16_POOLS, ETH_32_POOLS);
1937                                 return -EINVAL;
1938                         }
1939                 }
1940
1941                 /* For DCB mode check our configuration before we go further */
1942                 if (dev_conf->rxmode.mq_mode == ETH_MQ_RX_DCB) {
1943                         const struct rte_eth_dcb_rx_conf *conf;
1944
1945                         if (nb_rx_q != IXGBE_DCB_NB_QUEUES) {
1946                                 PMD_INIT_LOG(ERR, "DCB selected, nb_rx_q != %d.",
1947                                                  IXGBE_DCB_NB_QUEUES);
1948                                 return -EINVAL;
1949                         }
1950                         conf = &dev_conf->rx_adv_conf.dcb_rx_conf;
1951                         if (!(conf->nb_tcs == ETH_4_TCS ||
1952                                conf->nb_tcs == ETH_8_TCS)) {
1953                                 PMD_INIT_LOG(ERR, "DCB selected, nb_tcs != %d"
1954                                                 " and nb_tcs != %d.",
1955                                                 ETH_4_TCS, ETH_8_TCS);
1956                                 return -EINVAL;
1957                         }
1958                 }
1959
1960                 if (dev_conf->txmode.mq_mode == ETH_MQ_TX_DCB) {
1961                         const struct rte_eth_dcb_tx_conf *conf;
1962
1963                         if (nb_tx_q != IXGBE_DCB_NB_QUEUES) {
1964                                 PMD_INIT_LOG(ERR, "DCB, nb_tx_q != %d.",
1965                                                  IXGBE_DCB_NB_QUEUES);
1966                                 return -EINVAL;
1967                         }
1968                         conf = &dev_conf->tx_adv_conf.dcb_tx_conf;
1969                         if (!(conf->nb_tcs == ETH_4_TCS ||
1970                                conf->nb_tcs == ETH_8_TCS)) {
1971                                 PMD_INIT_LOG(ERR, "DCB selected, nb_tcs != %d"
1972                                                 " and nb_tcs != %d.",
1973                                                 ETH_4_TCS, ETH_8_TCS);
1974                                 return -EINVAL;
1975                         }
1976                 }
1977         }
1978         return 0;
1979 }
1980
1981 static int
1982 ixgbe_dev_configure(struct rte_eth_dev *dev)
1983 {
1984         struct ixgbe_interrupt *intr =
1985                 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
1986         struct ixgbe_adapter *adapter =
1987                 (struct ixgbe_adapter *)dev->data->dev_private;
1988         int ret;
1989
1990         PMD_INIT_FUNC_TRACE();
1991         /* multipe queue mode checking */
1992         ret  = ixgbe_check_mq_mode(dev);
1993         if (ret != 0) {
1994                 PMD_DRV_LOG(ERR, "ixgbe_check_mq_mode fails with %d.",
1995                             ret);
1996                 return ret;
1997         }
1998
1999         /* set flag to update link status after init */
2000         intr->flags |= IXGBE_FLAG_NEED_LINK_UPDATE;
2001
2002         /*
2003          * Initialize to TRUE. If any of Rx queues doesn't meet the bulk
2004          * allocation or vector Rx preconditions we will reset it.
2005          */
2006         adapter->rx_bulk_alloc_allowed = true;
2007         adapter->rx_vec_allowed = true;
2008
2009         return 0;
2010 }
2011
2012 /*
2013  * Configure device link speed and setup link.
2014  * It returns 0 on success.
2015  */
2016 static int
2017 ixgbe_dev_start(struct rte_eth_dev *dev)
2018 {
2019         struct ixgbe_hw *hw =
2020                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2021         struct ixgbe_vf_info *vfinfo =
2022                 *IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private);
2023         struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle;
2024         uint32_t intr_vector = 0;
2025         int err, link_up = 0, negotiate = 0;
2026         uint32_t speed = 0;
2027         int mask = 0;
2028         int status;
2029         uint16_t vf, idx;
2030
2031         PMD_INIT_FUNC_TRACE();
2032
2033         /* IXGBE devices don't support half duplex */
2034         if ((dev->data->dev_conf.link_duplex != ETH_LINK_AUTONEG_DUPLEX) &&
2035                         (dev->data->dev_conf.link_duplex != ETH_LINK_FULL_DUPLEX)) {
2036                 PMD_INIT_LOG(ERR, "Invalid link_duplex (%hu) for port %hhu",
2037                              dev->data->dev_conf.link_duplex,
2038                              dev->data->port_id);
2039                 return -EINVAL;
2040         }
2041
2042         /* disable uio/vfio intr/eventfd mapping */
2043         rte_intr_disable(intr_handle);
2044
2045         /* stop adapter */
2046         hw->adapter_stopped = 0;
2047         ixgbe_stop_adapter(hw);
2048
2049         /* reinitialize adapter
2050          * this calls reset and start */
2051         status = ixgbe_pf_reset_hw(hw);
2052         if (status != 0)
2053                 return -1;
2054         hw->mac.ops.start_hw(hw);
2055         hw->mac.get_link_status = true;
2056
2057         /* configure PF module if SRIOV enabled */
2058         ixgbe_pf_host_configure(dev);
2059
2060         /* check and configure queue intr-vector mapping */
2061         if ((rte_intr_cap_multiple(intr_handle) ||
2062              !RTE_ETH_DEV_SRIOV(dev).active) &&
2063             dev->data->dev_conf.intr_conf.rxq != 0) {
2064                 intr_vector = dev->data->nb_rx_queues;
2065                 if (rte_intr_efd_enable(intr_handle, intr_vector))
2066                         return -1;
2067         }
2068
2069         if (rte_intr_dp_is_en(intr_handle) && !intr_handle->intr_vec) {
2070                 intr_handle->intr_vec =
2071                         rte_zmalloc("intr_vec",
2072                                     dev->data->nb_rx_queues * sizeof(int), 0);
2073                 if (intr_handle->intr_vec == NULL) {
2074                         PMD_INIT_LOG(ERR, "Failed to allocate %d rx_queues"
2075                                      " intr_vec\n", dev->data->nb_rx_queues);
2076                         return -ENOMEM;
2077                 }
2078         }
2079
2080         /* confiugre msix for sleep until rx interrupt */
2081         ixgbe_configure_msix(dev);
2082
2083         /* initialize transmission unit */
2084         ixgbe_dev_tx_init(dev);
2085
2086         /* This can fail when allocating mbufs for descriptor rings */
2087         err = ixgbe_dev_rx_init(dev);
2088         if (err) {
2089                 PMD_INIT_LOG(ERR, "Unable to initialize RX hardware");
2090                 goto error;
2091         }
2092
2093         err = ixgbe_dev_rxtx_start(dev);
2094         if (err < 0) {
2095                 PMD_INIT_LOG(ERR, "Unable to start rxtx queues");
2096                 goto error;
2097         }
2098
2099         /* Skip link setup if loopback mode is enabled for 82599. */
2100         if (hw->mac.type == ixgbe_mac_82599EB &&
2101                         dev->data->dev_conf.lpbk_mode == IXGBE_LPBK_82599_TX_RX)
2102                 goto skip_link_setup;
2103
2104         if (ixgbe_is_sfp(hw) && hw->phy.multispeed_fiber) {
2105                 err = hw->mac.ops.setup_sfp(hw);
2106                 if (err)
2107                         goto error;
2108         }
2109
2110         if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_copper) {
2111                 /* Turn on the copper */
2112                 ixgbe_set_phy_power(hw, true);
2113         } else {
2114                 /* Turn on the laser */
2115                 ixgbe_enable_tx_laser(hw);
2116         }
2117
2118         err = ixgbe_check_link(hw, &speed, &link_up, 0);
2119         if (err)
2120                 goto error;
2121         dev->data->dev_link.link_status = link_up;
2122
2123         err = ixgbe_get_link_capabilities(hw, &speed, &negotiate);
2124         if (err)
2125                 goto error;
2126
2127         switch(dev->data->dev_conf.link_speed) {
2128         case ETH_LINK_SPEED_AUTONEG:
2129                 speed = (hw->mac.type != ixgbe_mac_82598EB) ?
2130                                 IXGBE_LINK_SPEED_82599_AUTONEG :
2131                                 IXGBE_LINK_SPEED_82598_AUTONEG;
2132                 break;
2133         case ETH_LINK_SPEED_100:
2134                 /*
2135                  * Invalid for 82598 but error will be detected by
2136                  * ixgbe_setup_link()
2137                  */
2138                 speed = IXGBE_LINK_SPEED_100_FULL;
2139                 break;
2140         case ETH_LINK_SPEED_1000:
2141                 speed = IXGBE_LINK_SPEED_1GB_FULL;
2142                 break;
2143         case ETH_LINK_SPEED_10000:
2144                 speed = IXGBE_LINK_SPEED_10GB_FULL;
2145                 break;
2146         default:
2147                 PMD_INIT_LOG(ERR, "Invalid link_speed (%hu) for port %hhu",
2148                              dev->data->dev_conf.link_speed,
2149                              dev->data->port_id);
2150                 goto error;
2151         }
2152
2153         err = ixgbe_setup_link(hw, speed, link_up);
2154         if (err)
2155                 goto error;
2156
2157 skip_link_setup:
2158
2159         if (rte_intr_allow_others(intr_handle)) {
2160                 /* check if lsc interrupt is enabled */
2161                 if (dev->data->dev_conf.intr_conf.lsc != 0)
2162                         ixgbe_dev_lsc_interrupt_setup(dev);
2163         } else {
2164                 rte_intr_callback_unregister(intr_handle,
2165                                              ixgbe_dev_interrupt_handler,
2166                                              (void *)dev);
2167                 if (dev->data->dev_conf.intr_conf.lsc != 0)
2168                         PMD_INIT_LOG(INFO, "lsc won't enable because of"
2169                                      " no intr multiplex\n");
2170         }
2171
2172         /* check if rxq interrupt is enabled */
2173         if (dev->data->dev_conf.intr_conf.rxq != 0 &&
2174             rte_intr_dp_is_en(intr_handle))
2175                 ixgbe_dev_rxq_interrupt_setup(dev);
2176
2177         /* enable uio/vfio intr/eventfd mapping */
2178         rte_intr_enable(intr_handle);
2179
2180         /* resume enabled intr since hw reset */
2181         ixgbe_enable_intr(dev);
2182
2183         mask = ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK | \
2184                 ETH_VLAN_EXTEND_MASK;
2185         ixgbe_vlan_offload_set(dev, mask);
2186
2187         if (dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_VMDQ_ONLY) {
2188                 /* Enable vlan filtering for VMDq */
2189                 ixgbe_vmdq_vlan_hw_filter_enable(dev);
2190         }
2191
2192         /* Configure DCB hw */
2193         ixgbe_configure_dcb(dev);
2194
2195         if (dev->data->dev_conf.fdir_conf.mode != RTE_FDIR_MODE_NONE) {
2196                 err = ixgbe_fdir_configure(dev);
2197                 if (err)
2198                         goto error;
2199         }
2200
2201         /* Restore vf rate limit */
2202         if (vfinfo != NULL) {
2203                 for (vf = 0; vf < dev->pci_dev->max_vfs; vf++)
2204                         for (idx = 0; idx < IXGBE_MAX_QUEUE_NUM_PER_VF; idx++)
2205                                 if (vfinfo[vf].tx_rate[idx] != 0)
2206                                         ixgbe_set_vf_rate_limit(dev, vf,
2207                                                 vfinfo[vf].tx_rate[idx],
2208                                                 1 << idx);
2209         }
2210
2211         ixgbe_restore_statistics_mapping(dev);
2212
2213         return 0;
2214
2215 error:
2216         PMD_INIT_LOG(ERR, "failure in ixgbe_dev_start(): %d", err);
2217         ixgbe_dev_clear_queues(dev);
2218         return -EIO;
2219 }
2220
2221 /*
2222  * Stop device: disable rx and tx functions to allow for reconfiguring.
2223  */
2224 static void
2225 ixgbe_dev_stop(struct rte_eth_dev *dev)
2226 {
2227         struct rte_eth_link link;
2228         struct ixgbe_hw *hw =
2229                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2230         struct ixgbe_vf_info *vfinfo =
2231                 *IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private);
2232         struct ixgbe_filter_info *filter_info =
2233                 IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
2234         struct ixgbe_5tuple_filter *p_5tuple, *p_5tuple_next;
2235         struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle;
2236         int vf;
2237
2238         PMD_INIT_FUNC_TRACE();
2239
2240         /* disable interrupts */
2241         ixgbe_disable_intr(hw);
2242
2243         /* disable intr eventfd mapping */
2244         rte_intr_disable(intr_handle);
2245
2246         /* reset the NIC */
2247         ixgbe_pf_reset_hw(hw);
2248         hw->adapter_stopped = 0;
2249
2250         /* stop adapter */
2251         ixgbe_stop_adapter(hw);
2252
2253         for (vf = 0; vfinfo != NULL &&
2254                      vf < dev->pci_dev->max_vfs; vf++)
2255                 vfinfo[vf].clear_to_send = false;
2256
2257         if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_copper) {
2258                 /* Turn off the copper */
2259                 ixgbe_set_phy_power(hw, false);
2260         } else {
2261                 /* Turn off the laser */
2262                 ixgbe_disable_tx_laser(hw);
2263         }
2264
2265         ixgbe_dev_clear_queues(dev);
2266
2267         /* Clear stored conf */
2268         dev->data->scattered_rx = 0;
2269         dev->data->lro = 0;
2270
2271         /* Clear recorded link status */
2272         memset(&link, 0, sizeof(link));
2273         rte_ixgbe_dev_atomic_write_link_status(dev, &link);
2274
2275         /* Remove all ntuple filters of the device */
2276         for (p_5tuple = TAILQ_FIRST(&filter_info->fivetuple_list);
2277              p_5tuple != NULL; p_5tuple = p_5tuple_next) {
2278                 p_5tuple_next = TAILQ_NEXT(p_5tuple, entries);
2279                 TAILQ_REMOVE(&filter_info->fivetuple_list,
2280                              p_5tuple, entries);
2281                 rte_free(p_5tuple);
2282         }
2283         memset(filter_info->fivetuple_mask, 0,
2284                 sizeof(uint32_t) * IXGBE_5TUPLE_ARRAY_SIZE);
2285
2286         if (!rte_intr_allow_others(intr_handle))
2287                 /* resume to the default handler */
2288                 rte_intr_callback_register(intr_handle,
2289                                            ixgbe_dev_interrupt_handler,
2290                                            (void *)dev);
2291
2292         /* Clean datapath event and queue/vec mapping */
2293         rte_intr_efd_disable(intr_handle);
2294         if (intr_handle->intr_vec != NULL) {
2295                 rte_free(intr_handle->intr_vec);
2296                 intr_handle->intr_vec = NULL;
2297         }
2298 }
2299
2300 /*
2301  * Set device link up: enable tx.
2302  */
2303 static int
2304 ixgbe_dev_set_link_up(struct rte_eth_dev *dev)
2305 {
2306         struct ixgbe_hw *hw =
2307                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2308         if (hw->mac.type == ixgbe_mac_82599EB) {
2309 #ifdef RTE_NIC_BYPASS
2310                 if (hw->device_id == IXGBE_DEV_ID_82599_BYPASS) {
2311                         /* Not suported in bypass mode */
2312                         PMD_INIT_LOG(ERR, "Set link up is not supported "
2313                                      "by device id 0x%x", hw->device_id);
2314                         return -ENOTSUP;
2315                 }
2316 #endif
2317         }
2318
2319         if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_copper) {
2320                 /* Turn on the copper */
2321                 ixgbe_set_phy_power(hw, true);
2322         } else {
2323                 /* Turn on the laser */
2324                 ixgbe_enable_tx_laser(hw);
2325         }
2326
2327         return 0;
2328 }
2329
2330 /*
2331  * Set device link down: disable tx.
2332  */
2333 static int
2334 ixgbe_dev_set_link_down(struct rte_eth_dev *dev)
2335 {
2336         struct ixgbe_hw *hw =
2337                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2338         if (hw->mac.type == ixgbe_mac_82599EB) {
2339 #ifdef RTE_NIC_BYPASS
2340                 if (hw->device_id == IXGBE_DEV_ID_82599_BYPASS) {
2341                         /* Not suported in bypass mode */
2342                         PMD_INIT_LOG(ERR, "Set link down is not supported "
2343                                      "by device id 0x%x", hw->device_id);
2344                         return -ENOTSUP;
2345                 }
2346 #endif
2347         }
2348
2349         if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_copper) {
2350                 /* Turn off the copper */
2351                 ixgbe_set_phy_power(hw, false);
2352         } else {
2353                 /* Turn off the laser */
2354                 ixgbe_disable_tx_laser(hw);
2355         }
2356
2357         return 0;
2358 }
2359
2360 /*
2361  * Reest and stop device.
2362  */
2363 static void
2364 ixgbe_dev_close(struct rte_eth_dev *dev)
2365 {
2366         struct ixgbe_hw *hw =
2367                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2368
2369         PMD_INIT_FUNC_TRACE();
2370
2371         ixgbe_pf_reset_hw(hw);
2372
2373         ixgbe_dev_stop(dev);
2374         hw->adapter_stopped = 1;
2375
2376         ixgbe_dev_free_queues(dev);
2377
2378         ixgbe_disable_pcie_master(hw);
2379
2380         /* reprogram the RAR[0] in case user changed it. */
2381         ixgbe_set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
2382 }
2383
2384 static void
2385 ixgbe_read_stats_registers(struct ixgbe_hw *hw,
2386                            struct ixgbe_hw_stats *hw_stats,
2387                            uint64_t *total_missed_rx, uint64_t *total_qbrc,
2388                            uint64_t *total_qprc, uint64_t *total_qprdc)
2389 {
2390         uint32_t bprc, lxon, lxoff, total;
2391         uint32_t delta_gprc = 0;
2392         unsigned i;
2393         /* Workaround for RX byte count not including CRC bytes when CRC
2394 +        * strip is enabled. CRC bytes are removed from counters when crc_strip
2395          * is disabled.
2396 +        */
2397         int crc_strip = (IXGBE_READ_REG(hw, IXGBE_HLREG0) &
2398                         IXGBE_HLREG0_RXCRCSTRP);
2399
2400         hw_stats->crcerrs += IXGBE_READ_REG(hw, IXGBE_CRCERRS);
2401         hw_stats->illerrc += IXGBE_READ_REG(hw, IXGBE_ILLERRC);
2402         hw_stats->errbc += IXGBE_READ_REG(hw, IXGBE_ERRBC);
2403         hw_stats->mspdc += IXGBE_READ_REG(hw, IXGBE_MSPDC);
2404
2405         for (i = 0; i < 8; i++) {
2406                 uint32_t mp;
2407                 mp = IXGBE_READ_REG(hw, IXGBE_MPC(i));
2408                 /* global total per queue */
2409                 hw_stats->mpc[i] += mp;
2410                 /* Running comprehensive total for stats display */
2411                 *total_missed_rx += hw_stats->mpc[i];
2412                 if (hw->mac.type == ixgbe_mac_82598EB) {
2413                         hw_stats->rnbc[i] +=
2414                             IXGBE_READ_REG(hw, IXGBE_RNBC(i));
2415                         hw_stats->pxonrxc[i] +=
2416                                 IXGBE_READ_REG(hw, IXGBE_PXONRXC(i));
2417                         hw_stats->pxoffrxc[i] +=
2418                                 IXGBE_READ_REG(hw, IXGBE_PXOFFRXC(i));
2419                 } else {
2420                         hw_stats->pxonrxc[i] +=
2421                                 IXGBE_READ_REG(hw, IXGBE_PXONRXCNT(i));
2422                         hw_stats->pxoffrxc[i] +=
2423                                 IXGBE_READ_REG(hw, IXGBE_PXOFFRXCNT(i));
2424                         hw_stats->pxon2offc[i] +=
2425                                 IXGBE_READ_REG(hw, IXGBE_PXON2OFFCNT(i));
2426                 }
2427                 hw_stats->pxontxc[i] +=
2428                     IXGBE_READ_REG(hw, IXGBE_PXONTXC(i));
2429                 hw_stats->pxofftxc[i] +=
2430                     IXGBE_READ_REG(hw, IXGBE_PXOFFTXC(i));
2431         }
2432         for (i = 0; i < IXGBE_QUEUE_STAT_COUNTERS; i++) {
2433                 uint32_t delta_qprc = IXGBE_READ_REG(hw, IXGBE_QPRC(i));
2434                 uint32_t delta_qptc = IXGBE_READ_REG(hw, IXGBE_QPTC(i));
2435                 uint32_t delta_qprdc = IXGBE_READ_REG(hw, IXGBE_QPRDC(i));
2436
2437                 delta_gprc += delta_qprc;
2438
2439                 hw_stats->qprc[i] += delta_qprc;
2440                 hw_stats->qptc[i] += delta_qptc;
2441
2442                 hw_stats->qbrc[i] += IXGBE_READ_REG(hw, IXGBE_QBRC_L(i));
2443                 hw_stats->qbrc[i] +=
2444                     ((uint64_t)IXGBE_READ_REG(hw, IXGBE_QBRC_H(i)) << 32);
2445                 if (crc_strip == 0)
2446                         hw_stats->qbrc[i] -= delta_qprc * ETHER_CRC_LEN;
2447
2448                 hw_stats->qbtc[i] += IXGBE_READ_REG(hw, IXGBE_QBTC_L(i));
2449                 hw_stats->qbtc[i] +=
2450                     ((uint64_t)IXGBE_READ_REG(hw, IXGBE_QBTC_H(i)) << 32);
2451
2452                 hw_stats->qprdc[i] += delta_qprdc;
2453                 *total_qprdc += hw_stats->qprdc[i];
2454
2455                 *total_qprc += hw_stats->qprc[i];
2456                 *total_qbrc += hw_stats->qbrc[i];
2457         }
2458         hw_stats->mlfc += IXGBE_READ_REG(hw, IXGBE_MLFC);
2459         hw_stats->mrfc += IXGBE_READ_REG(hw, IXGBE_MRFC);
2460         hw_stats->rlec += IXGBE_READ_REG(hw, IXGBE_RLEC);
2461
2462         /*
2463          * An errata states that gprc actually counts good + missed packets:
2464          * Workaround to set gprc to summated queue packet receives
2465          */
2466         hw_stats->gprc = *total_qprc;
2467
2468         if (hw->mac.type != ixgbe_mac_82598EB) {
2469                 hw_stats->gorc += IXGBE_READ_REG(hw, IXGBE_GORCL);
2470                 hw_stats->gorc += ((u64)IXGBE_READ_REG(hw, IXGBE_GORCH) << 32);
2471                 hw_stats->gotc += IXGBE_READ_REG(hw, IXGBE_GOTCL);
2472                 hw_stats->gotc += ((u64)IXGBE_READ_REG(hw, IXGBE_GOTCH) << 32);
2473                 hw_stats->tor += IXGBE_READ_REG(hw, IXGBE_TORL);
2474                 hw_stats->tor += ((u64)IXGBE_READ_REG(hw, IXGBE_TORH) << 32);
2475                 hw_stats->lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXCNT);
2476                 hw_stats->lxoffrxc += IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT);
2477         } else {
2478                 hw_stats->lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXC);
2479                 hw_stats->lxoffrxc += IXGBE_READ_REG(hw, IXGBE_LXOFFRXC);
2480                 /* 82598 only has a counter in the high register */
2481                 hw_stats->gorc += IXGBE_READ_REG(hw, IXGBE_GORCH);
2482                 hw_stats->gotc += IXGBE_READ_REG(hw, IXGBE_GOTCH);
2483                 hw_stats->tor += IXGBE_READ_REG(hw, IXGBE_TORH);
2484         }
2485         uint64_t old_tpr = hw_stats->tpr;
2486
2487         hw_stats->tpr += IXGBE_READ_REG(hw, IXGBE_TPR);
2488         hw_stats->tpt += IXGBE_READ_REG(hw, IXGBE_TPT);
2489
2490         if (crc_strip == 0)
2491                 hw_stats->gorc -= delta_gprc * ETHER_CRC_LEN;
2492
2493         uint64_t delta_gptc = IXGBE_READ_REG(hw, IXGBE_GPTC);
2494         hw_stats->gptc += delta_gptc;
2495         hw_stats->gotc -= delta_gptc * ETHER_CRC_LEN;
2496         hw_stats->tor -= (hw_stats->tpr - old_tpr) * ETHER_CRC_LEN;
2497
2498         /*
2499          * Workaround: mprc hardware is incorrectly counting
2500          * broadcasts, so for now we subtract those.
2501          */
2502         bprc = IXGBE_READ_REG(hw, IXGBE_BPRC);
2503         hw_stats->bprc += bprc;
2504         hw_stats->mprc += IXGBE_READ_REG(hw, IXGBE_MPRC);
2505         if (hw->mac.type == ixgbe_mac_82598EB)
2506                 hw_stats->mprc -= bprc;
2507
2508         hw_stats->prc64 += IXGBE_READ_REG(hw, IXGBE_PRC64);
2509         hw_stats->prc127 += IXGBE_READ_REG(hw, IXGBE_PRC127);
2510         hw_stats->prc255 += IXGBE_READ_REG(hw, IXGBE_PRC255);
2511         hw_stats->prc511 += IXGBE_READ_REG(hw, IXGBE_PRC511);
2512         hw_stats->prc1023 += IXGBE_READ_REG(hw, IXGBE_PRC1023);
2513         hw_stats->prc1522 += IXGBE_READ_REG(hw, IXGBE_PRC1522);
2514
2515         lxon = IXGBE_READ_REG(hw, IXGBE_LXONTXC);
2516         hw_stats->lxontxc += lxon;
2517         lxoff = IXGBE_READ_REG(hw, IXGBE_LXOFFTXC);
2518         hw_stats->lxofftxc += lxoff;
2519         total = lxon + lxoff;
2520
2521         hw_stats->mptc += IXGBE_READ_REG(hw, IXGBE_MPTC);
2522         hw_stats->ptc64 += IXGBE_READ_REG(hw, IXGBE_PTC64);
2523         hw_stats->gptc -= total;
2524         hw_stats->mptc -= total;
2525         hw_stats->ptc64 -= total;
2526         hw_stats->gotc -= total * ETHER_MIN_LEN;
2527
2528         hw_stats->ruc += IXGBE_READ_REG(hw, IXGBE_RUC);
2529         hw_stats->rfc += IXGBE_READ_REG(hw, IXGBE_RFC);
2530         hw_stats->roc += IXGBE_READ_REG(hw, IXGBE_ROC);
2531         hw_stats->rjc += IXGBE_READ_REG(hw, IXGBE_RJC);
2532         hw_stats->mngprc += IXGBE_READ_REG(hw, IXGBE_MNGPRC);
2533         hw_stats->mngpdc += IXGBE_READ_REG(hw, IXGBE_MNGPDC);
2534         hw_stats->mngptc += IXGBE_READ_REG(hw, IXGBE_MNGPTC);
2535         hw_stats->ptc127 += IXGBE_READ_REG(hw, IXGBE_PTC127);
2536         hw_stats->ptc255 += IXGBE_READ_REG(hw, IXGBE_PTC255);
2537         hw_stats->ptc511 += IXGBE_READ_REG(hw, IXGBE_PTC511);
2538         hw_stats->ptc1023 += IXGBE_READ_REG(hw, IXGBE_PTC1023);
2539         hw_stats->ptc1522 += IXGBE_READ_REG(hw, IXGBE_PTC1522);
2540         hw_stats->bptc += IXGBE_READ_REG(hw, IXGBE_BPTC);
2541         hw_stats->xec += IXGBE_READ_REG(hw, IXGBE_XEC);
2542         hw_stats->fccrc += IXGBE_READ_REG(hw, IXGBE_FCCRC);
2543         hw_stats->fclast += IXGBE_READ_REG(hw, IXGBE_FCLAST);
2544         /* Only read FCOE on 82599 */
2545         if (hw->mac.type != ixgbe_mac_82598EB) {
2546                 hw_stats->fcoerpdc += IXGBE_READ_REG(hw, IXGBE_FCOERPDC);
2547                 hw_stats->fcoeprc += IXGBE_READ_REG(hw, IXGBE_FCOEPRC);
2548                 hw_stats->fcoeptc += IXGBE_READ_REG(hw, IXGBE_FCOEPTC);
2549                 hw_stats->fcoedwrc += IXGBE_READ_REG(hw, IXGBE_FCOEDWRC);
2550                 hw_stats->fcoedwtc += IXGBE_READ_REG(hw, IXGBE_FCOEDWTC);
2551         }
2552
2553         /* Flow Director Stats registers */
2554         hw_stats->fdirmatch += IXGBE_READ_REG(hw, IXGBE_FDIRMATCH);
2555         hw_stats->fdirmiss += IXGBE_READ_REG(hw, IXGBE_FDIRMISS);
2556 }
2557
2558 /*
2559  * This function is based on ixgbe_update_stats_counters() in ixgbe/ixgbe.c
2560  */
2561 static void
2562 ixgbe_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
2563 {
2564         struct ixgbe_hw *hw =
2565                         IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2566         struct ixgbe_hw_stats *hw_stats =
2567                         IXGBE_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
2568         uint64_t total_missed_rx, total_qbrc, total_qprc, total_qprdc;
2569         unsigned i;
2570
2571         total_missed_rx = 0;
2572         total_qbrc = 0;
2573         total_qprc = 0;
2574         total_qprdc = 0;
2575
2576         ixgbe_read_stats_registers(hw, hw_stats, &total_missed_rx, &total_qbrc,
2577                         &total_qprc, &total_qprdc);
2578
2579         if (stats == NULL)
2580                 return;
2581
2582         /* Fill out the rte_eth_stats statistics structure */
2583         stats->ipackets = total_qprc;
2584         stats->ibytes = total_qbrc;
2585         stats->opackets = hw_stats->gptc;
2586         stats->obytes = hw_stats->gotc;
2587
2588         for (i = 0; i < IXGBE_QUEUE_STAT_COUNTERS; i++) {
2589                 stats->q_ipackets[i] = hw_stats->qprc[i];
2590                 stats->q_opackets[i] = hw_stats->qptc[i];
2591                 stats->q_ibytes[i] = hw_stats->qbrc[i];
2592                 stats->q_obytes[i] = hw_stats->qbtc[i];
2593                 stats->q_errors[i] = hw_stats->qprdc[i];
2594         }
2595
2596         /* Rx Errors */
2597         stats->imissed  = total_missed_rx;
2598         stats->ierrors  = hw_stats->crcerrs +
2599                           hw_stats->mspdc +
2600                           hw_stats->rlec +
2601                           hw_stats->ruc +
2602                           hw_stats->roc +
2603                           total_missed_rx +
2604                           hw_stats->illerrc +
2605                           hw_stats->errbc +
2606                           hw_stats->rfc +
2607                           hw_stats->fccrc +
2608                           hw_stats->fclast;
2609
2610         /* Tx Errors */
2611         stats->oerrors  = 0;
2612 }
2613
2614 static void
2615 ixgbe_dev_stats_reset(struct rte_eth_dev *dev)
2616 {
2617         struct ixgbe_hw_stats *stats =
2618                         IXGBE_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
2619
2620         /* HW registers are cleared on read */
2621         ixgbe_dev_stats_get(dev, NULL);
2622
2623         /* Reset software totals */
2624         memset(stats, 0, sizeof(*stats));
2625 }
2626
2627 /* This function calculates the number of xstats based on the current config */
2628 static unsigned
2629 ixgbe_xstats_calc_num(void) {
2630         return IXGBE_NB_HW_STATS + (IXGBE_NB_RXQ_PRIO_STATS * 8) +
2631                 (IXGBE_NB_TXQ_PRIO_STATS * 8);
2632 }
2633
2634 static int
2635 ixgbe_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstats *xstats,
2636                                          unsigned n)
2637 {
2638         struct ixgbe_hw *hw =
2639                         IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2640         struct ixgbe_hw_stats *hw_stats =
2641                         IXGBE_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
2642         uint64_t total_missed_rx, total_qbrc, total_qprc, total_qprdc;
2643         unsigned i, stat, count = 0;
2644
2645         count = ixgbe_xstats_calc_num();
2646
2647         if (n < count)
2648                 return count;
2649
2650         total_missed_rx = 0;
2651         total_qbrc = 0;
2652         total_qprc = 0;
2653         total_qprdc = 0;
2654
2655         ixgbe_read_stats_registers(hw, hw_stats, &total_missed_rx, &total_qbrc,
2656                                    &total_qprc, &total_qprdc);
2657
2658         /* If this is a reset xstats is NULL, and we have cleared the
2659          * registers by reading them.
2660          */
2661         if (!xstats)
2662                 return 0;
2663
2664         /* Extended stats from ixgbe_hw_stats */
2665         count = 0;
2666         for (i = 0; i < IXGBE_NB_HW_STATS; i++) {
2667                 snprintf(xstats[count].name, sizeof(xstats[count].name), "%s",
2668                          rte_ixgbe_stats_strings[i].name);
2669                 xstats[count].value = *(uint64_t *)(((char *)hw_stats) +
2670                                 rte_ixgbe_stats_strings[i].offset);
2671                 count++;
2672         }
2673
2674         /* RX Priority Stats */
2675         for (stat = 0; stat < IXGBE_NB_RXQ_PRIO_STATS; stat++) {
2676                 for (i = 0; i < 8; i++) {
2677                         snprintf(xstats[count].name, sizeof(xstats[count].name),
2678                                  "rx_priority%u_%s", i,
2679                                  rte_ixgbe_rxq_strings[stat].name);
2680                         xstats[count].value = *(uint64_t *)(((char *)hw_stats) +
2681                                         rte_ixgbe_rxq_strings[stat].offset +
2682                                         (sizeof(uint64_t) * i));
2683                         count++;
2684                 }
2685         }
2686
2687         /* TX Priority Stats */
2688         for (stat = 0; stat < IXGBE_NB_TXQ_PRIO_STATS; stat++) {
2689                 for (i = 0; i < 8; i++) {
2690                         snprintf(xstats[count].name, sizeof(xstats[count].name),
2691                                  "tx_priority%u_%s", i,
2692                                  rte_ixgbe_txq_strings[stat].name);
2693                         xstats[count].value = *(uint64_t *)(((char *)hw_stats) +
2694                                         rte_ixgbe_txq_strings[stat].offset +
2695                                         (sizeof(uint64_t) * i));
2696                         count++;
2697                 }
2698         }
2699
2700         return count;
2701 }
2702
2703 static void
2704 ixgbe_dev_xstats_reset(struct rte_eth_dev *dev)
2705 {
2706         struct ixgbe_hw_stats *stats =
2707                         IXGBE_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
2708
2709         unsigned count = ixgbe_xstats_calc_num();
2710
2711         /* HW registers are cleared on read */
2712         ixgbe_dev_xstats_get(dev, NULL, count);
2713
2714         /* Reset software totals */
2715         memset(stats, 0, sizeof(*stats));
2716 }
2717
2718 static void
2719 ixgbevf_update_stats(struct rte_eth_dev *dev)
2720 {
2721         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2722         struct ixgbevf_hw_stats *hw_stats = (struct ixgbevf_hw_stats*)
2723                           IXGBE_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
2724
2725         /* Good Rx packet, include VF loopback */
2726         UPDATE_VF_STAT(IXGBE_VFGPRC,
2727             hw_stats->last_vfgprc, hw_stats->vfgprc);
2728
2729         /* Good Rx octets, include VF loopback */
2730         UPDATE_VF_STAT_36BIT(IXGBE_VFGORC_LSB, IXGBE_VFGORC_MSB,
2731             hw_stats->last_vfgorc, hw_stats->vfgorc);
2732
2733         /* Good Tx packet, include VF loopback */
2734         UPDATE_VF_STAT(IXGBE_VFGPTC,
2735             hw_stats->last_vfgptc, hw_stats->vfgptc);
2736
2737         /* Good Tx octets, include VF loopback */
2738         UPDATE_VF_STAT_36BIT(IXGBE_VFGOTC_LSB, IXGBE_VFGOTC_MSB,
2739             hw_stats->last_vfgotc, hw_stats->vfgotc);
2740
2741         /* Rx Multicst Packet */
2742         UPDATE_VF_STAT(IXGBE_VFMPRC,
2743             hw_stats->last_vfmprc, hw_stats->vfmprc);
2744 }
2745
2746 static int
2747 ixgbevf_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstats *xstats,
2748                        unsigned n)
2749 {
2750         struct ixgbevf_hw_stats *hw_stats = (struct ixgbevf_hw_stats *)
2751                         IXGBE_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
2752         unsigned i;
2753
2754         if (n < IXGBEVF_NB_XSTATS)
2755                 return IXGBEVF_NB_XSTATS;
2756
2757         ixgbevf_update_stats(dev);
2758
2759         if (!xstats)
2760                 return 0;
2761
2762         /* Extended stats */
2763         for (i = 0; i < IXGBEVF_NB_XSTATS; i++) {
2764                 snprintf(xstats[i].name, sizeof(xstats[i].name),
2765                          "%s", rte_ixgbevf_stats_strings[i].name);
2766                 xstats[i].value = *(uint64_t *)(((char *)hw_stats) +
2767                         rte_ixgbevf_stats_strings[i].offset);
2768         }
2769
2770         return IXGBEVF_NB_XSTATS;
2771 }
2772
2773 static void
2774 ixgbevf_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
2775 {
2776         struct ixgbevf_hw_stats *hw_stats = (struct ixgbevf_hw_stats *)
2777                           IXGBE_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
2778
2779         ixgbevf_update_stats(dev);
2780
2781         if (stats == NULL)
2782                 return;
2783
2784         stats->ipackets = hw_stats->vfgprc;
2785         stats->ibytes = hw_stats->vfgorc;
2786         stats->opackets = hw_stats->vfgptc;
2787         stats->obytes = hw_stats->vfgotc;
2788         stats->imcasts = hw_stats->vfmprc;
2789         /* stats->imcasts should be removed as imcasts is deprecated */
2790 }
2791
2792 static void
2793 ixgbevf_dev_stats_reset(struct rte_eth_dev *dev)
2794 {
2795         struct ixgbevf_hw_stats *hw_stats = (struct ixgbevf_hw_stats*)
2796                         IXGBE_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
2797
2798         /* Sync HW register to the last stats */
2799         ixgbevf_dev_stats_get(dev, NULL);
2800
2801         /* reset HW current stats*/
2802         hw_stats->vfgprc = 0;
2803         hw_stats->vfgorc = 0;
2804         hw_stats->vfgptc = 0;
2805         hw_stats->vfgotc = 0;
2806         hw_stats->vfmprc = 0;
2807
2808 }
2809
2810 static void
2811 ixgbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
2812 {
2813         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2814
2815         dev_info->max_rx_queues = (uint16_t)hw->mac.max_rx_queues;
2816         dev_info->max_tx_queues = (uint16_t)hw->mac.max_tx_queues;
2817         dev_info->min_rx_bufsize = 1024; /* cf BSIZEPACKET in SRRCTL register */
2818         dev_info->max_rx_pktlen = 15872; /* includes CRC, cf MAXFRS register */
2819         dev_info->max_mac_addrs = hw->mac.num_rar_entries;
2820         dev_info->max_hash_mac_addrs = IXGBE_VMDQ_NUM_UC_MAC;
2821         dev_info->max_vfs = dev->pci_dev->max_vfs;
2822         if (hw->mac.type == ixgbe_mac_82598EB)
2823                 dev_info->max_vmdq_pools = ETH_16_POOLS;
2824         else
2825                 dev_info->max_vmdq_pools = ETH_64_POOLS;
2826         dev_info->vmdq_queue_num = dev_info->max_rx_queues;
2827         dev_info->rx_offload_capa =
2828                 DEV_RX_OFFLOAD_VLAN_STRIP |
2829                 DEV_RX_OFFLOAD_IPV4_CKSUM |
2830                 DEV_RX_OFFLOAD_UDP_CKSUM  |
2831                 DEV_RX_OFFLOAD_TCP_CKSUM;
2832
2833         /*
2834          * RSC is only supported by 82599 and x540 PF devices in a non-SR-IOV
2835          * mode.
2836          */
2837         if ((hw->mac.type == ixgbe_mac_82599EB ||
2838              hw->mac.type == ixgbe_mac_X540) &&
2839             !RTE_ETH_DEV_SRIOV(dev).active)
2840                 dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_TCP_LRO;
2841
2842         dev_info->tx_offload_capa =
2843                 DEV_TX_OFFLOAD_VLAN_INSERT |
2844                 DEV_TX_OFFLOAD_IPV4_CKSUM  |
2845                 DEV_TX_OFFLOAD_UDP_CKSUM   |
2846                 DEV_TX_OFFLOAD_TCP_CKSUM   |
2847                 DEV_TX_OFFLOAD_SCTP_CKSUM  |
2848                 DEV_TX_OFFLOAD_TCP_TSO;
2849
2850         dev_info->default_rxconf = (struct rte_eth_rxconf) {
2851                 .rx_thresh = {
2852                         .pthresh = IXGBE_DEFAULT_RX_PTHRESH,
2853                         .hthresh = IXGBE_DEFAULT_RX_HTHRESH,
2854                         .wthresh = IXGBE_DEFAULT_RX_WTHRESH,
2855                 },
2856                 .rx_free_thresh = IXGBE_DEFAULT_RX_FREE_THRESH,
2857                 .rx_drop_en = 0,
2858         };
2859
2860         dev_info->default_txconf = (struct rte_eth_txconf) {
2861                 .tx_thresh = {
2862                         .pthresh = IXGBE_DEFAULT_TX_PTHRESH,
2863                         .hthresh = IXGBE_DEFAULT_TX_HTHRESH,
2864                         .wthresh = IXGBE_DEFAULT_TX_WTHRESH,
2865                 },
2866                 .tx_free_thresh = IXGBE_DEFAULT_TX_FREE_THRESH,
2867                 .tx_rs_thresh = IXGBE_DEFAULT_TX_RSBIT_THRESH,
2868                 .txq_flags = ETH_TXQ_FLAGS_NOMULTSEGS |
2869                                 ETH_TXQ_FLAGS_NOOFFLOADS,
2870         };
2871
2872         dev_info->rx_desc_lim = rx_desc_lim;
2873         dev_info->tx_desc_lim = tx_desc_lim;
2874
2875         dev_info->hash_key_size = IXGBE_HKEY_MAX_INDEX * sizeof(uint32_t);
2876         dev_info->reta_size = ixgbe_reta_size_get(hw->mac.type);
2877         dev_info->flow_type_rss_offloads = IXGBE_RSS_OFFLOAD_ALL;
2878 }
2879
2880 static void
2881 ixgbevf_dev_info_get(struct rte_eth_dev *dev,
2882                      struct rte_eth_dev_info *dev_info)
2883 {
2884         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2885
2886         dev_info->max_rx_queues = (uint16_t)hw->mac.max_rx_queues;
2887         dev_info->max_tx_queues = (uint16_t)hw->mac.max_tx_queues;
2888         dev_info->min_rx_bufsize = 1024; /* cf BSIZEPACKET in SRRCTL reg */
2889         dev_info->max_rx_pktlen = 15872; /* includes CRC, cf MAXFRS reg */
2890         dev_info->max_mac_addrs = hw->mac.num_rar_entries;
2891         dev_info->max_hash_mac_addrs = IXGBE_VMDQ_NUM_UC_MAC;
2892         dev_info->max_vfs = dev->pci_dev->max_vfs;
2893         if (hw->mac.type == ixgbe_mac_82598EB)
2894                 dev_info->max_vmdq_pools = ETH_16_POOLS;
2895         else
2896                 dev_info->max_vmdq_pools = ETH_64_POOLS;
2897         dev_info->rx_offload_capa = DEV_RX_OFFLOAD_VLAN_STRIP |
2898                                 DEV_RX_OFFLOAD_IPV4_CKSUM |
2899                                 DEV_RX_OFFLOAD_UDP_CKSUM  |
2900                                 DEV_RX_OFFLOAD_TCP_CKSUM;
2901         dev_info->tx_offload_capa = DEV_TX_OFFLOAD_VLAN_INSERT |
2902                                 DEV_TX_OFFLOAD_IPV4_CKSUM  |
2903                                 DEV_TX_OFFLOAD_UDP_CKSUM   |
2904                                 DEV_TX_OFFLOAD_TCP_CKSUM   |
2905                                 DEV_TX_OFFLOAD_SCTP_CKSUM  |
2906                                 DEV_TX_OFFLOAD_TCP_TSO;
2907
2908         dev_info->default_rxconf = (struct rte_eth_rxconf) {
2909                 .rx_thresh = {
2910                         .pthresh = IXGBE_DEFAULT_RX_PTHRESH,
2911                         .hthresh = IXGBE_DEFAULT_RX_HTHRESH,
2912                         .wthresh = IXGBE_DEFAULT_RX_WTHRESH,
2913                 },
2914                 .rx_free_thresh = IXGBE_DEFAULT_RX_FREE_THRESH,
2915                 .rx_drop_en = 0,
2916         };
2917
2918         dev_info->default_txconf = (struct rte_eth_txconf) {
2919                 .tx_thresh = {
2920                         .pthresh = IXGBE_DEFAULT_TX_PTHRESH,
2921                         .hthresh = IXGBE_DEFAULT_TX_HTHRESH,
2922                         .wthresh = IXGBE_DEFAULT_TX_WTHRESH,
2923                 },
2924                 .tx_free_thresh = IXGBE_DEFAULT_TX_FREE_THRESH,
2925                 .tx_rs_thresh = IXGBE_DEFAULT_TX_RSBIT_THRESH,
2926                 .txq_flags = ETH_TXQ_FLAGS_NOMULTSEGS |
2927                                 ETH_TXQ_FLAGS_NOOFFLOADS,
2928         };
2929
2930         dev_info->rx_desc_lim = rx_desc_lim;
2931         dev_info->tx_desc_lim = tx_desc_lim;
2932 }
2933
2934 /* return 0 means link status changed, -1 means not changed */
2935 static int
2936 ixgbe_dev_link_update(struct rte_eth_dev *dev, int wait_to_complete)
2937 {
2938         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2939         struct rte_eth_link link, old;
2940         ixgbe_link_speed link_speed = IXGBE_LINK_SPEED_UNKNOWN;
2941         int link_up;
2942         int diag;
2943
2944         link.link_status = 0;
2945         link.link_speed = 0;
2946         link.link_duplex = 0;
2947         memset(&old, 0, sizeof(old));
2948         rte_ixgbe_dev_atomic_read_link_status(dev, &old);
2949
2950         hw->mac.get_link_status = true;
2951
2952         /* check if it needs to wait to complete, if lsc interrupt is enabled */
2953         if (wait_to_complete == 0 || dev->data->dev_conf.intr_conf.lsc != 0)
2954                 diag = ixgbe_check_link(hw, &link_speed, &link_up, 0);
2955         else
2956                 diag = ixgbe_check_link(hw, &link_speed, &link_up, 1);
2957
2958         if (diag != 0) {
2959                 link.link_speed = ETH_LINK_SPEED_100;
2960                 link.link_duplex = ETH_LINK_HALF_DUPLEX;
2961                 rte_ixgbe_dev_atomic_write_link_status(dev, &link);
2962                 if (link.link_status == old.link_status)
2963                         return -1;
2964                 return 0;
2965         }
2966
2967         if (link_up == 0) {
2968                 rte_ixgbe_dev_atomic_write_link_status(dev, &link);
2969                 if (link.link_status == old.link_status)
2970                         return -1;
2971                 return 0;
2972         }
2973         link.link_status = 1;
2974         link.link_duplex = ETH_LINK_FULL_DUPLEX;
2975
2976         switch (link_speed) {
2977         default:
2978         case IXGBE_LINK_SPEED_UNKNOWN:
2979                 link.link_duplex = ETH_LINK_HALF_DUPLEX;
2980                 link.link_speed = ETH_LINK_SPEED_100;
2981                 break;
2982
2983         case IXGBE_LINK_SPEED_100_FULL:
2984                 link.link_speed = ETH_LINK_SPEED_100;
2985                 break;
2986
2987         case IXGBE_LINK_SPEED_1GB_FULL:
2988                 link.link_speed = ETH_LINK_SPEED_1000;
2989                 break;
2990
2991         case IXGBE_LINK_SPEED_10GB_FULL:
2992                 link.link_speed = ETH_LINK_SPEED_10000;
2993                 break;
2994         }
2995         rte_ixgbe_dev_atomic_write_link_status(dev, &link);
2996
2997         if (link.link_status == old.link_status)
2998                 return -1;
2999
3000         return 0;
3001 }
3002
3003 static void
3004 ixgbe_dev_promiscuous_enable(struct rte_eth_dev *dev)
3005 {
3006         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3007         uint32_t fctrl;
3008
3009         fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
3010         fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
3011         IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
3012 }
3013
3014 static void
3015 ixgbe_dev_promiscuous_disable(struct rte_eth_dev *dev)
3016 {
3017         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3018         uint32_t fctrl;
3019
3020         fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
3021         fctrl &= (~IXGBE_FCTRL_UPE);
3022         if (dev->data->all_multicast == 1)
3023                 fctrl |= IXGBE_FCTRL_MPE;
3024         else
3025                 fctrl &= (~IXGBE_FCTRL_MPE);
3026         IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
3027 }
3028
3029 static void
3030 ixgbe_dev_allmulticast_enable(struct rte_eth_dev *dev)
3031 {
3032         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3033         uint32_t fctrl;
3034
3035         fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
3036         fctrl |= IXGBE_FCTRL_MPE;
3037         IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
3038 }
3039
3040 static void
3041 ixgbe_dev_allmulticast_disable(struct rte_eth_dev *dev)
3042 {
3043         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3044         uint32_t fctrl;
3045
3046         if (dev->data->promiscuous == 1)
3047                 return; /* must remain in all_multicast mode */
3048
3049         fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
3050         fctrl &= (~IXGBE_FCTRL_MPE);
3051         IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
3052 }
3053
3054 /**
3055  * It clears the interrupt causes and enables the interrupt.
3056  * It will be called once only during nic initialized.
3057  *
3058  * @param dev
3059  *  Pointer to struct rte_eth_dev.
3060  *
3061  * @return
3062  *  - On success, zero.
3063  *  - On failure, a negative value.
3064  */
3065 static int
3066 ixgbe_dev_lsc_interrupt_setup(struct rte_eth_dev *dev)
3067 {
3068         struct ixgbe_interrupt *intr =
3069                 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
3070
3071         ixgbe_dev_link_status_print(dev);
3072         intr->mask |= IXGBE_EICR_LSC;
3073
3074         return 0;
3075 }
3076
3077 /**
3078  * It clears the interrupt causes and enables the interrupt.
3079  * It will be called once only during nic initialized.
3080  *
3081  * @param dev
3082  *  Pointer to struct rte_eth_dev.
3083  *
3084  * @return
3085  *  - On success, zero.
3086  *  - On failure, a negative value.
3087  */
3088 static int
3089 ixgbe_dev_rxq_interrupt_setup(struct rte_eth_dev *dev)
3090 {
3091         struct ixgbe_interrupt *intr =
3092                 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
3093
3094         intr->mask |= IXGBE_EICR_RTX_QUEUE;
3095
3096         return 0;
3097 }
3098
3099 /*
3100  * It reads ICR and sets flag (IXGBE_EICR_LSC) for the link_update.
3101  *
3102  * @param dev
3103  *  Pointer to struct rte_eth_dev.
3104  *
3105  * @return
3106  *  - On success, zero.
3107  *  - On failure, a negative value.
3108  */
3109 static int
3110 ixgbe_dev_interrupt_get_status(struct rte_eth_dev *dev)
3111 {
3112         uint32_t eicr;
3113         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3114         struct ixgbe_interrupt *intr =
3115                 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
3116
3117         /* clear all cause mask */
3118         ixgbe_disable_intr(hw);
3119
3120         /* read-on-clear nic registers here */
3121         eicr = IXGBE_READ_REG(hw, IXGBE_EICR);
3122         PMD_DRV_LOG(DEBUG, "eicr %x", eicr);
3123
3124         intr->flags = 0;
3125
3126         /* set flag for async link update */
3127         if (eicr & IXGBE_EICR_LSC)
3128                 intr->flags |= IXGBE_FLAG_NEED_LINK_UPDATE;
3129
3130         if (eicr & IXGBE_EICR_MAILBOX)
3131                 intr->flags |= IXGBE_FLAG_MAILBOX;
3132
3133         return 0;
3134 }
3135
3136 /**
3137  * It gets and then prints the link status.
3138  *
3139  * @param dev
3140  *  Pointer to struct rte_eth_dev.
3141  *
3142  * @return
3143  *  - On success, zero.
3144  *  - On failure, a negative value.
3145  */
3146 static void
3147 ixgbe_dev_link_status_print(struct rte_eth_dev *dev)
3148 {
3149         struct rte_eth_link link;
3150
3151         memset(&link, 0, sizeof(link));
3152         rte_ixgbe_dev_atomic_read_link_status(dev, &link);
3153         if (link.link_status) {
3154                 PMD_INIT_LOG(INFO, "Port %d: Link Up - speed %u Mbps - %s",
3155                                         (int)(dev->data->port_id),
3156                                         (unsigned)link.link_speed,
3157                         link.link_duplex == ETH_LINK_FULL_DUPLEX ?
3158                                         "full-duplex" : "half-duplex");
3159         } else {
3160                 PMD_INIT_LOG(INFO, " Port %d: Link Down",
3161                                 (int)(dev->data->port_id));
3162         }
3163         PMD_INIT_LOG(DEBUG, "PCI Address: %04d:%02d:%02d:%d",
3164                                 dev->pci_dev->addr.domain,
3165                                 dev->pci_dev->addr.bus,
3166                                 dev->pci_dev->addr.devid,
3167                                 dev->pci_dev->addr.function);
3168 }
3169
3170 /*
3171  * It executes link_update after knowing an interrupt occurred.
3172  *
3173  * @param dev
3174  *  Pointer to struct rte_eth_dev.
3175  *
3176  * @return
3177  *  - On success, zero.
3178  *  - On failure, a negative value.
3179  */
3180 static int
3181 ixgbe_dev_interrupt_action(struct rte_eth_dev *dev)
3182 {
3183         struct ixgbe_interrupt *intr =
3184                 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
3185         int64_t timeout;
3186         struct rte_eth_link link;
3187         int intr_enable_delay = false;
3188
3189         PMD_DRV_LOG(DEBUG, "intr action type %d", intr->flags);
3190
3191         if (intr->flags & IXGBE_FLAG_MAILBOX) {
3192                 ixgbe_pf_mbx_process(dev);
3193                 intr->flags &= ~IXGBE_FLAG_MAILBOX;
3194         }
3195
3196         if (intr->flags & IXGBE_FLAG_NEED_LINK_UPDATE) {
3197                 /* get the link status before link update, for predicting later */
3198                 memset(&link, 0, sizeof(link));
3199                 rte_ixgbe_dev_atomic_read_link_status(dev, &link);
3200
3201                 ixgbe_dev_link_update(dev, 0);
3202
3203                 /* likely to up */
3204                 if (!link.link_status)
3205                         /* handle it 1 sec later, wait it being stable */
3206                         timeout = IXGBE_LINK_UP_CHECK_TIMEOUT;
3207                 /* likely to down */
3208                 else
3209                         /* handle it 4 sec later, wait it being stable */
3210                         timeout = IXGBE_LINK_DOWN_CHECK_TIMEOUT;
3211
3212                 ixgbe_dev_link_status_print(dev);
3213
3214                 intr_enable_delay = true;
3215         }
3216
3217         if (intr_enable_delay) {
3218                 if (rte_eal_alarm_set(timeout * 1000,
3219                                       ixgbe_dev_interrupt_delayed_handler, (void*)dev) < 0)
3220                         PMD_DRV_LOG(ERR, "Error setting alarm");
3221         } else {
3222                 PMD_DRV_LOG(DEBUG, "enable intr immediately");
3223                 ixgbe_enable_intr(dev);
3224                 rte_intr_enable(&(dev->pci_dev->intr_handle));
3225         }
3226
3227
3228         return 0;
3229 }
3230
3231 /**
3232  * Interrupt handler which shall be registered for alarm callback for delayed
3233  * handling specific interrupt to wait for the stable nic state. As the
3234  * NIC interrupt state is not stable for ixgbe after link is just down,
3235  * it needs to wait 4 seconds to get the stable status.
3236  *
3237  * @param handle
3238  *  Pointer to interrupt handle.
3239  * @param param
3240  *  The address of parameter (struct rte_eth_dev *) regsitered before.
3241  *
3242  * @return
3243  *  void
3244  */
3245 static void
3246 ixgbe_dev_interrupt_delayed_handler(void *param)
3247 {
3248         struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
3249         struct ixgbe_interrupt *intr =
3250                 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
3251         struct ixgbe_hw *hw =
3252                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3253         uint32_t eicr;
3254
3255         eicr = IXGBE_READ_REG(hw, IXGBE_EICR);
3256         if (eicr & IXGBE_EICR_MAILBOX)
3257                 ixgbe_pf_mbx_process(dev);
3258
3259         if (intr->flags & IXGBE_FLAG_NEED_LINK_UPDATE) {
3260                 ixgbe_dev_link_update(dev, 0);
3261                 intr->flags &= ~IXGBE_FLAG_NEED_LINK_UPDATE;
3262                 ixgbe_dev_link_status_print(dev);
3263                 _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC);
3264         }
3265
3266         PMD_DRV_LOG(DEBUG, "enable intr in delayed handler S[%08x]", eicr);
3267         ixgbe_enable_intr(dev);
3268         rte_intr_enable(&(dev->pci_dev->intr_handle));
3269 }
3270
3271 /**
3272  * Interrupt handler triggered by NIC  for handling
3273  * specific interrupt.
3274  *
3275  * @param handle
3276  *  Pointer to interrupt handle.
3277  * @param param
3278  *  The address of parameter (struct rte_eth_dev *) regsitered before.
3279  *
3280  * @return
3281  *  void
3282  */
3283 static void
3284 ixgbe_dev_interrupt_handler(__rte_unused struct rte_intr_handle *handle,
3285                             void *param)
3286 {
3287         struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
3288
3289         ixgbe_dev_interrupt_get_status(dev);
3290         ixgbe_dev_interrupt_action(dev);
3291 }
3292
3293 static int
3294 ixgbe_dev_led_on(struct rte_eth_dev *dev)
3295 {
3296         struct ixgbe_hw *hw;
3297
3298         hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3299         return ixgbe_led_on(hw, 0) == IXGBE_SUCCESS ? 0 : -ENOTSUP;
3300 }
3301
3302 static int
3303 ixgbe_dev_led_off(struct rte_eth_dev *dev)
3304 {
3305         struct ixgbe_hw *hw;
3306
3307         hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3308         return ixgbe_led_off(hw, 0) == IXGBE_SUCCESS ? 0 : -ENOTSUP;
3309 }
3310
3311 static int
3312 ixgbe_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
3313 {
3314         struct ixgbe_hw *hw;
3315         uint32_t mflcn_reg;
3316         uint32_t fccfg_reg;
3317         int rx_pause;
3318         int tx_pause;
3319
3320         hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3321
3322         fc_conf->pause_time = hw->fc.pause_time;
3323         fc_conf->high_water = hw->fc.high_water[0];
3324         fc_conf->low_water = hw->fc.low_water[0];
3325         fc_conf->send_xon = hw->fc.send_xon;
3326         fc_conf->autoneg = !hw->fc.disable_fc_autoneg;
3327
3328         /*
3329          * Return rx_pause status according to actual setting of
3330          * MFLCN register.
3331          */
3332         mflcn_reg = IXGBE_READ_REG(hw, IXGBE_MFLCN);
3333         if (mflcn_reg & (IXGBE_MFLCN_RPFCE | IXGBE_MFLCN_RFCE))
3334                 rx_pause = 1;
3335         else
3336                 rx_pause = 0;
3337
3338         /*
3339          * Return tx_pause status according to actual setting of
3340          * FCCFG register.
3341          */
3342         fccfg_reg = IXGBE_READ_REG(hw, IXGBE_FCCFG);
3343         if (fccfg_reg & (IXGBE_FCCFG_TFCE_802_3X | IXGBE_FCCFG_TFCE_PRIORITY))
3344                 tx_pause = 1;
3345         else
3346                 tx_pause = 0;
3347
3348         if (rx_pause && tx_pause)
3349                 fc_conf->mode = RTE_FC_FULL;
3350         else if (rx_pause)
3351                 fc_conf->mode = RTE_FC_RX_PAUSE;
3352         else if (tx_pause)
3353                 fc_conf->mode = RTE_FC_TX_PAUSE;
3354         else
3355                 fc_conf->mode = RTE_FC_NONE;
3356
3357         return 0;
3358 }
3359
3360 static int
3361 ixgbe_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
3362 {
3363         struct ixgbe_hw *hw;
3364         int err;
3365         uint32_t rx_buf_size;
3366         uint32_t max_high_water;
3367         uint32_t mflcn;
3368         enum ixgbe_fc_mode rte_fcmode_2_ixgbe_fcmode[] = {
3369                 ixgbe_fc_none,
3370                 ixgbe_fc_rx_pause,
3371                 ixgbe_fc_tx_pause,
3372                 ixgbe_fc_full
3373         };
3374
3375         PMD_INIT_FUNC_TRACE();
3376
3377         hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3378         rx_buf_size = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(0));
3379         PMD_INIT_LOG(DEBUG, "Rx packet buffer size = 0x%x", rx_buf_size);
3380
3381         /*
3382          * At least reserve one Ethernet frame for watermark
3383          * high_water/low_water in kilo bytes for ixgbe
3384          */
3385         max_high_water = (rx_buf_size - ETHER_MAX_LEN) >> IXGBE_RXPBSIZE_SHIFT;
3386         if ((fc_conf->high_water > max_high_water) ||
3387                 (fc_conf->high_water < fc_conf->low_water)) {
3388                 PMD_INIT_LOG(ERR, "Invalid high/low water setup value in KB");
3389                 PMD_INIT_LOG(ERR, "High_water must <= 0x%x", max_high_water);
3390                 return -EINVAL;
3391         }
3392
3393         hw->fc.requested_mode = rte_fcmode_2_ixgbe_fcmode[fc_conf->mode];
3394         hw->fc.pause_time     = fc_conf->pause_time;
3395         hw->fc.high_water[0]  = fc_conf->high_water;
3396         hw->fc.low_water[0]   = fc_conf->low_water;
3397         hw->fc.send_xon       = fc_conf->send_xon;
3398         hw->fc.disable_fc_autoneg = !fc_conf->autoneg;
3399
3400         err = ixgbe_fc_enable(hw);
3401
3402         /* Not negotiated is not an error case */
3403         if ((err == IXGBE_SUCCESS) || (err == IXGBE_ERR_FC_NOT_NEGOTIATED)) {
3404
3405                 /* check if we want to forward MAC frames - driver doesn't have native
3406                  * capability to do that, so we'll write the registers ourselves */
3407
3408                 mflcn = IXGBE_READ_REG(hw, IXGBE_MFLCN);
3409
3410                 /* set or clear MFLCN.PMCF bit depending on configuration */
3411                 if (fc_conf->mac_ctrl_frame_fwd != 0)
3412                         mflcn |= IXGBE_MFLCN_PMCF;
3413                 else
3414                         mflcn &= ~IXGBE_MFLCN_PMCF;
3415
3416                 IXGBE_WRITE_REG(hw, IXGBE_MFLCN, mflcn);
3417                 IXGBE_WRITE_FLUSH(hw);
3418
3419                 return 0;
3420         }
3421
3422         PMD_INIT_LOG(ERR, "ixgbe_fc_enable = 0x%x", err);
3423         return -EIO;
3424 }
3425
3426 /**
3427  *  ixgbe_pfc_enable_generic - Enable flow control
3428  *  @hw: pointer to hardware structure
3429  *  @tc_num: traffic class number
3430  *  Enable flow control according to the current settings.
3431  */
3432 static int
3433 ixgbe_dcb_pfc_enable_generic(struct ixgbe_hw *hw,uint8_t tc_num)
3434 {
3435         int ret_val = 0;
3436         uint32_t mflcn_reg, fccfg_reg;
3437         uint32_t reg;
3438         uint32_t fcrtl, fcrth;
3439         uint8_t i;
3440         uint8_t nb_rx_en;
3441
3442         /* Validate the water mark configuration */
3443         if (!hw->fc.pause_time) {
3444                 ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
3445                 goto out;
3446         }
3447
3448         /* Low water mark of zero causes XOFF floods */
3449         if (hw->fc.current_mode & ixgbe_fc_tx_pause) {
3450                  /* High/Low water can not be 0 */
3451                 if( (!hw->fc.high_water[tc_num])|| (!hw->fc.low_water[tc_num])) {
3452                         PMD_INIT_LOG(ERR, "Invalid water mark configuration");
3453                         ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
3454                         goto out;
3455                 }
3456
3457                 if(hw->fc.low_water[tc_num] >= hw->fc.high_water[tc_num]) {
3458                         PMD_INIT_LOG(ERR, "Invalid water mark configuration");
3459                         ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
3460                         goto out;
3461                 }
3462         }
3463         /* Negotiate the fc mode to use */
3464         ixgbe_fc_autoneg(hw);
3465
3466         /* Disable any previous flow control settings */
3467         mflcn_reg = IXGBE_READ_REG(hw, IXGBE_MFLCN);
3468         mflcn_reg &= ~(IXGBE_MFLCN_RPFCE_SHIFT | IXGBE_MFLCN_RFCE|IXGBE_MFLCN_RPFCE);
3469
3470         fccfg_reg = IXGBE_READ_REG(hw, IXGBE_FCCFG);
3471         fccfg_reg &= ~(IXGBE_FCCFG_TFCE_802_3X | IXGBE_FCCFG_TFCE_PRIORITY);
3472
3473         switch (hw->fc.current_mode) {
3474         case ixgbe_fc_none:
3475                 /*
3476                  * If the count of enabled RX Priority Flow control >1,
3477                  * and the TX pause can not be disabled
3478                  */
3479                 nb_rx_en = 0;
3480                 for (i =0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
3481                         reg = IXGBE_READ_REG(hw, IXGBE_FCRTH_82599(i));
3482                         if (reg & IXGBE_FCRTH_FCEN)
3483                                 nb_rx_en++;
3484                 }
3485                 if (nb_rx_en > 1)
3486                         fccfg_reg |=IXGBE_FCCFG_TFCE_PRIORITY;
3487                 break;
3488         case ixgbe_fc_rx_pause:
3489                 /*
3490                  * Rx Flow control is enabled and Tx Flow control is
3491                  * disabled by software override. Since there really
3492                  * isn't a way to advertise that we are capable of RX
3493                  * Pause ONLY, we will advertise that we support both
3494                  * symmetric and asymmetric Rx PAUSE.  Later, we will
3495                  * disable the adapter's ability to send PAUSE frames.
3496                  */
3497                 mflcn_reg |= IXGBE_MFLCN_RPFCE;
3498                 /*
3499                  * If the count of enabled RX Priority Flow control >1,
3500                  * and the TX pause can not be disabled
3501                  */
3502                 nb_rx_en = 0;
3503                 for (i =0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
3504                         reg = IXGBE_READ_REG(hw, IXGBE_FCRTH_82599(i));
3505                         if (reg & IXGBE_FCRTH_FCEN)
3506                                 nb_rx_en++;
3507                 }
3508                 if (nb_rx_en > 1)
3509                         fccfg_reg |=IXGBE_FCCFG_TFCE_PRIORITY;
3510                 break;
3511         case ixgbe_fc_tx_pause:
3512                 /*
3513                  * Tx Flow control is enabled, and Rx Flow control is
3514                  * disabled by software override.
3515                  */
3516                 fccfg_reg |=IXGBE_FCCFG_TFCE_PRIORITY;
3517                 break;
3518         case ixgbe_fc_full:
3519                 /* Flow control (both Rx and Tx) is enabled by SW override. */
3520                 mflcn_reg |= IXGBE_MFLCN_RPFCE;
3521                 fccfg_reg |= IXGBE_FCCFG_TFCE_PRIORITY;
3522                 break;
3523         default:
3524                 PMD_DRV_LOG(DEBUG, "Flow control param set incorrectly");
3525                 ret_val = IXGBE_ERR_CONFIG;
3526                 goto out;
3527                 break;
3528         }
3529
3530         /* Set 802.3x based flow control settings. */
3531         mflcn_reg |= IXGBE_MFLCN_DPF;
3532         IXGBE_WRITE_REG(hw, IXGBE_MFLCN, mflcn_reg);
3533         IXGBE_WRITE_REG(hw, IXGBE_FCCFG, fccfg_reg);
3534
3535         /* Set up and enable Rx high/low water mark thresholds, enable XON. */
3536         if ((hw->fc.current_mode & ixgbe_fc_tx_pause) &&
3537                 hw->fc.high_water[tc_num]) {
3538                 fcrtl = (hw->fc.low_water[tc_num] << 10) | IXGBE_FCRTL_XONE;
3539                 IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(tc_num), fcrtl);
3540                 fcrth = (hw->fc.high_water[tc_num] << 10) | IXGBE_FCRTH_FCEN;
3541         } else {
3542                 IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(tc_num), 0);
3543                 /*
3544                  * In order to prevent Tx hangs when the internal Tx
3545                  * switch is enabled we must set the high water mark
3546                  * to the maximum FCRTH value.  This allows the Tx
3547                  * switch to function even under heavy Rx workloads.
3548                  */
3549                 fcrth = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(tc_num)) - 32;
3550         }
3551         IXGBE_WRITE_REG(hw, IXGBE_FCRTH_82599(tc_num), fcrth);
3552
3553         /* Configure pause time (2 TCs per register) */
3554         reg = hw->fc.pause_time * 0x00010001;
3555         for (i = 0; i < (IXGBE_DCB_MAX_TRAFFIC_CLASS / 2); i++)
3556                 IXGBE_WRITE_REG(hw, IXGBE_FCTTV(i), reg);
3557
3558         /* Configure flow control refresh threshold value */
3559         IXGBE_WRITE_REG(hw, IXGBE_FCRTV, hw->fc.pause_time / 2);
3560
3561 out:
3562         return ret_val;
3563 }
3564
3565 static int
3566 ixgbe_dcb_pfc_enable(struct rte_eth_dev *dev,uint8_t tc_num)
3567 {
3568         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3569         int32_t ret_val = IXGBE_NOT_IMPLEMENTED;
3570
3571         if(hw->mac.type != ixgbe_mac_82598EB) {
3572                 ret_val = ixgbe_dcb_pfc_enable_generic(hw,tc_num);
3573         }
3574         return ret_val;
3575 }
3576
3577 static int
3578 ixgbe_priority_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_pfc_conf *pfc_conf)
3579 {
3580         int err;
3581         uint32_t rx_buf_size;
3582         uint32_t max_high_water;
3583         uint8_t tc_num;
3584         uint8_t  map[IXGBE_DCB_MAX_USER_PRIORITY] = { 0 };
3585         struct ixgbe_hw *hw =
3586                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3587         struct ixgbe_dcb_config *dcb_config =
3588                 IXGBE_DEV_PRIVATE_TO_DCB_CFG(dev->data->dev_private);
3589
3590         enum ixgbe_fc_mode rte_fcmode_2_ixgbe_fcmode[] = {
3591                 ixgbe_fc_none,
3592                 ixgbe_fc_rx_pause,
3593                 ixgbe_fc_tx_pause,
3594                 ixgbe_fc_full
3595         };
3596
3597         PMD_INIT_FUNC_TRACE();
3598
3599         ixgbe_dcb_unpack_map_cee(dcb_config, IXGBE_DCB_RX_CONFIG, map);
3600         tc_num = map[pfc_conf->priority];
3601         rx_buf_size = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(tc_num));
3602         PMD_INIT_LOG(DEBUG, "Rx packet buffer size = 0x%x", rx_buf_size);
3603         /*
3604          * At least reserve one Ethernet frame for watermark
3605          * high_water/low_water in kilo bytes for ixgbe
3606          */
3607         max_high_water = (rx_buf_size - ETHER_MAX_LEN) >> IXGBE_RXPBSIZE_SHIFT;
3608         if ((pfc_conf->fc.high_water > max_high_water) ||
3609             (pfc_conf->fc.high_water <= pfc_conf->fc.low_water)) {
3610                 PMD_INIT_LOG(ERR, "Invalid high/low water setup value in KB");
3611                 PMD_INIT_LOG(ERR, "High_water must <= 0x%x", max_high_water);
3612                 return -EINVAL;
3613         }
3614
3615         hw->fc.requested_mode = rte_fcmode_2_ixgbe_fcmode[pfc_conf->fc.mode];
3616         hw->fc.pause_time = pfc_conf->fc.pause_time;
3617         hw->fc.send_xon = pfc_conf->fc.send_xon;
3618         hw->fc.low_water[tc_num] =  pfc_conf->fc.low_water;
3619         hw->fc.high_water[tc_num] = pfc_conf->fc.high_water;
3620
3621         err = ixgbe_dcb_pfc_enable(dev,tc_num);
3622
3623         /* Not negotiated is not an error case */
3624         if ((err == IXGBE_SUCCESS) || (err == IXGBE_ERR_FC_NOT_NEGOTIATED))
3625                 return 0;
3626
3627         PMD_INIT_LOG(ERR, "ixgbe_dcb_pfc_enable = 0x%x", err);
3628         return -EIO;
3629 }
3630
3631 static int
3632 ixgbe_dev_rss_reta_update(struct rte_eth_dev *dev,
3633                           struct rte_eth_rss_reta_entry64 *reta_conf,
3634                           uint16_t reta_size)
3635 {
3636         uint8_t i, j, mask;
3637         uint32_t reta, r;
3638         uint16_t idx, shift;
3639         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3640         uint16_t sp_reta_size;
3641         uint32_t reta_reg;
3642
3643         PMD_INIT_FUNC_TRACE();
3644
3645         if (!ixgbe_rss_update_sp(hw->mac.type)) {
3646                 PMD_DRV_LOG(ERR, "RSS reta update is not supported on this "
3647                         "NIC.");
3648                 return -ENOTSUP;
3649         }
3650
3651         sp_reta_size = ixgbe_reta_size_get(hw->mac.type);
3652         if (reta_size != sp_reta_size) {
3653                 PMD_DRV_LOG(ERR, "The size of hash lookup table configured "
3654                         "(%d) doesn't match the number hardware can supported "
3655                         "(%d)\n", reta_size, sp_reta_size);
3656                 return -EINVAL;
3657         }
3658
3659         for (i = 0; i < reta_size; i += IXGBE_4_BIT_WIDTH) {
3660                 idx = i / RTE_RETA_GROUP_SIZE;
3661                 shift = i % RTE_RETA_GROUP_SIZE;
3662                 mask = (uint8_t)((reta_conf[idx].mask >> shift) &
3663                                                 IXGBE_4_BIT_MASK);
3664                 if (!mask)
3665                         continue;
3666                 reta_reg = ixgbe_reta_reg_get(hw->mac.type, i);
3667                 if (mask == IXGBE_4_BIT_MASK)
3668                         r = 0;
3669                 else
3670                         r = IXGBE_READ_REG(hw, reta_reg);
3671                 for (j = 0, reta = 0; j < IXGBE_4_BIT_WIDTH; j++) {
3672                         if (mask & (0x1 << j))
3673                                 reta |= reta_conf[idx].reta[shift + j] <<
3674                                                         (CHAR_BIT * j);
3675                         else
3676                                 reta |= r & (IXGBE_8_BIT_MASK <<
3677                                                 (CHAR_BIT * j));
3678                 }
3679                 IXGBE_WRITE_REG(hw, reta_reg, reta);
3680         }
3681
3682         return 0;
3683 }
3684
3685 static int
3686 ixgbe_dev_rss_reta_query(struct rte_eth_dev *dev,
3687                          struct rte_eth_rss_reta_entry64 *reta_conf,
3688                          uint16_t reta_size)
3689 {
3690         uint8_t i, j, mask;
3691         uint32_t reta;
3692         uint16_t idx, shift;
3693         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3694         uint16_t sp_reta_size;
3695         uint32_t reta_reg;
3696
3697         PMD_INIT_FUNC_TRACE();
3698         sp_reta_size = ixgbe_reta_size_get(hw->mac.type);
3699         if (reta_size != sp_reta_size) {
3700                 PMD_DRV_LOG(ERR, "The size of hash lookup table configured "
3701                         "(%d) doesn't match the number hardware can supported "
3702                         "(%d)\n", reta_size, sp_reta_size);
3703                 return -EINVAL;
3704         }
3705
3706         for (i = 0; i < reta_size; i += IXGBE_4_BIT_WIDTH) {
3707                 idx = i / RTE_RETA_GROUP_SIZE;
3708                 shift = i % RTE_RETA_GROUP_SIZE;
3709                 mask = (uint8_t)((reta_conf[idx].mask >> shift) &
3710                                                 IXGBE_4_BIT_MASK);
3711                 if (!mask)
3712                         continue;
3713
3714                 reta_reg = ixgbe_reta_reg_get(hw->mac.type, i);
3715                 reta = IXGBE_READ_REG(hw, reta_reg);
3716                 for (j = 0; j < IXGBE_4_BIT_WIDTH; j++) {
3717                         if (mask & (0x1 << j))
3718                                 reta_conf[idx].reta[shift + j] =
3719                                         ((reta >> (CHAR_BIT * j)) &
3720                                                 IXGBE_8_BIT_MASK);
3721                 }
3722         }
3723
3724         return 0;
3725 }
3726
3727 static void
3728 ixgbe_add_rar(struct rte_eth_dev *dev, struct ether_addr *mac_addr,
3729                                 uint32_t index, uint32_t pool)
3730 {
3731         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3732         uint32_t enable_addr = 1;
3733
3734         ixgbe_set_rar(hw, index, mac_addr->addr_bytes, pool, enable_addr);
3735 }
3736
3737 static void
3738 ixgbe_remove_rar(struct rte_eth_dev *dev, uint32_t index)
3739 {
3740         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3741
3742         ixgbe_clear_rar(hw, index);
3743 }
3744
3745 static void
3746 ixgbe_set_default_mac_addr(struct rte_eth_dev *dev, struct ether_addr *addr)
3747 {
3748         ixgbe_remove_rar(dev, 0);
3749
3750         ixgbe_add_rar(dev, addr, 0, 0);
3751 }
3752
3753 static int
3754 ixgbe_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
3755 {
3756         uint32_t hlreg0;
3757         uint32_t maxfrs;
3758         struct ixgbe_hw *hw;
3759         struct rte_eth_dev_info dev_info;
3760         uint32_t frame_size = mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
3761
3762         ixgbe_dev_info_get(dev, &dev_info);
3763
3764         /* check that mtu is within the allowed range */
3765         if ((mtu < ETHER_MIN_MTU) || (frame_size > dev_info.max_rx_pktlen))
3766                 return -EINVAL;
3767
3768         /* refuse mtu that requires the support of scattered packets when this
3769          * feature has not been enabled before. */
3770         if (!dev->data->scattered_rx &&
3771             (frame_size + 2 * IXGBE_VLAN_TAG_SIZE >
3772              dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM))
3773                 return -EINVAL;
3774
3775         hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3776         hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0);
3777
3778         /* switch to jumbo mode if needed */
3779         if (frame_size > ETHER_MAX_LEN) {
3780                 dev->data->dev_conf.rxmode.jumbo_frame = 1;
3781                 hlreg0 |= IXGBE_HLREG0_JUMBOEN;
3782         } else {
3783                 dev->data->dev_conf.rxmode.jumbo_frame = 0;
3784                 hlreg0 &= ~IXGBE_HLREG0_JUMBOEN;
3785         }
3786         IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0);
3787
3788         /* update max frame size */
3789         dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
3790
3791         maxfrs = IXGBE_READ_REG(hw, IXGBE_MAXFRS);
3792         maxfrs &= 0x0000FFFF;
3793         maxfrs |= (dev->data->dev_conf.rxmode.max_rx_pkt_len << 16);
3794         IXGBE_WRITE_REG(hw, IXGBE_MAXFRS, maxfrs);
3795
3796         return 0;
3797 }
3798
3799 /*
3800  * Virtual Function operations
3801  */
3802 static void
3803 ixgbevf_intr_disable(struct ixgbe_hw *hw)
3804 {
3805         PMD_INIT_FUNC_TRACE();
3806
3807         /* Clear interrupt mask to stop from interrupts being generated */
3808         IXGBE_WRITE_REG(hw, IXGBE_VTEIMC, IXGBE_VF_IRQ_CLEAR_MASK);
3809
3810         IXGBE_WRITE_FLUSH(hw);
3811 }
3812
3813 static void
3814 ixgbevf_intr_enable(struct ixgbe_hw *hw)
3815 {
3816         PMD_INIT_FUNC_TRACE();
3817
3818         /* VF enable interrupt autoclean */
3819         IXGBE_WRITE_REG(hw, IXGBE_VTEIAM, IXGBE_VF_IRQ_ENABLE_MASK);
3820         IXGBE_WRITE_REG(hw, IXGBE_VTEIAC, IXGBE_VF_IRQ_ENABLE_MASK);
3821         IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, IXGBE_VF_IRQ_ENABLE_MASK);
3822
3823         IXGBE_WRITE_FLUSH(hw);
3824 }
3825
3826 static int
3827 ixgbevf_dev_configure(struct rte_eth_dev *dev)
3828 {
3829         struct rte_eth_conf* conf = &dev->data->dev_conf;
3830         struct ixgbe_adapter *adapter =
3831                         (struct ixgbe_adapter *)dev->data->dev_private;
3832
3833         PMD_INIT_LOG(DEBUG, "Configured Virtual Function port id: %d",
3834                      dev->data->port_id);
3835
3836         /*
3837          * VF has no ability to enable/disable HW CRC
3838          * Keep the persistent behavior the same as Host PF
3839          */
3840 #ifndef RTE_LIBRTE_IXGBE_PF_DISABLE_STRIP_CRC
3841         if (!conf->rxmode.hw_strip_crc) {
3842                 PMD_INIT_LOG(NOTICE, "VF can't disable HW CRC Strip");
3843                 conf->rxmode.hw_strip_crc = 1;
3844         }
3845 #else
3846         if (conf->rxmode.hw_strip_crc) {
3847                 PMD_INIT_LOG(NOTICE, "VF can't enable HW CRC Strip");
3848                 conf->rxmode.hw_strip_crc = 0;
3849         }
3850 #endif
3851
3852         /*
3853          * Initialize to TRUE. If any of Rx queues doesn't meet the bulk
3854          * allocation or vector Rx preconditions we will reset it.
3855          */
3856         adapter->rx_bulk_alloc_allowed = true;
3857         adapter->rx_vec_allowed = true;
3858
3859         return 0;
3860 }
3861
3862 static int
3863 ixgbevf_dev_start(struct rte_eth_dev *dev)
3864 {
3865         struct ixgbe_hw *hw =
3866                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3867         uint32_t intr_vector = 0;
3868         struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle;
3869
3870         int err, mask = 0;
3871
3872         PMD_INIT_FUNC_TRACE();
3873
3874         hw->mac.ops.reset_hw(hw);
3875         hw->mac.get_link_status = true;
3876
3877         /* negotiate mailbox API version to use with the PF. */
3878         ixgbevf_negotiate_api(hw);
3879
3880         ixgbevf_dev_tx_init(dev);
3881
3882         /* This can fail when allocating mbufs for descriptor rings */
3883         err = ixgbevf_dev_rx_init(dev);
3884         if (err) {
3885                 PMD_INIT_LOG(ERR, "Unable to initialize RX hardware (%d)", err);
3886                 ixgbe_dev_clear_queues(dev);
3887                 return err;
3888         }
3889
3890         /* Set vfta */
3891         ixgbevf_set_vfta_all(dev,1);
3892
3893         /* Set HW strip */
3894         mask = ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK | \
3895                 ETH_VLAN_EXTEND_MASK;
3896         ixgbevf_vlan_offload_set(dev, mask);
3897
3898         ixgbevf_dev_rxtx_start(dev);
3899
3900         /* check and configure queue intr-vector mapping */
3901         if (dev->data->dev_conf.intr_conf.rxq != 0) {
3902                 intr_vector = dev->data->nb_rx_queues;
3903                 if (rte_intr_efd_enable(intr_handle, intr_vector))
3904                         return -1;
3905         }
3906
3907         if (rte_intr_dp_is_en(intr_handle) && !intr_handle->intr_vec) {
3908                 intr_handle->intr_vec =
3909                         rte_zmalloc("intr_vec",
3910                                     dev->data->nb_rx_queues * sizeof(int), 0);
3911                 if (intr_handle->intr_vec == NULL) {
3912                         PMD_INIT_LOG(ERR, "Failed to allocate %d rx_queues"
3913                                      " intr_vec\n", dev->data->nb_rx_queues);
3914                         return -ENOMEM;
3915                 }
3916         }
3917         ixgbevf_configure_msix(dev);
3918
3919         rte_intr_enable(intr_handle);
3920
3921         /* Re-enable interrupt for VF */
3922         ixgbevf_intr_enable(hw);
3923
3924         return 0;
3925 }
3926
3927 static void
3928 ixgbevf_dev_stop(struct rte_eth_dev *dev)
3929 {
3930         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3931         struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle;
3932
3933         PMD_INIT_FUNC_TRACE();
3934
3935         hw->adapter_stopped = 1;
3936         ixgbe_stop_adapter(hw);
3937
3938         /*
3939           * Clear what we set, but we still keep shadow_vfta to
3940           * restore after device starts
3941           */
3942         ixgbevf_set_vfta_all(dev,0);
3943
3944         /* Clear stored conf */
3945         dev->data->scattered_rx = 0;
3946
3947         ixgbe_dev_clear_queues(dev);
3948
3949         /* disable intr eventfd mapping */
3950         rte_intr_disable(intr_handle);
3951
3952         /* Clean datapath event and queue/vec mapping */
3953         rte_intr_efd_disable(intr_handle);
3954         if (intr_handle->intr_vec != NULL) {
3955                 rte_free(intr_handle->intr_vec);
3956                 intr_handle->intr_vec = NULL;
3957         }
3958 }
3959
3960 static void
3961 ixgbevf_dev_close(struct rte_eth_dev *dev)
3962 {
3963         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3964
3965         PMD_INIT_FUNC_TRACE();
3966
3967         ixgbe_reset_hw(hw);
3968
3969         ixgbevf_dev_stop(dev);
3970
3971         ixgbe_dev_free_queues(dev);
3972
3973         /* reprogram the RAR[0] in case user changed it. */
3974         ixgbe_set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
3975 }
3976
3977 static void ixgbevf_set_vfta_all(struct rte_eth_dev *dev, bool on)
3978 {
3979         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3980         struct ixgbe_vfta * shadow_vfta =
3981                 IXGBE_DEV_PRIVATE_TO_VFTA(dev->data->dev_private);
3982         int i = 0, j = 0, vfta = 0, mask = 1;
3983
3984         for (i = 0; i < IXGBE_VFTA_SIZE; i++){
3985                 vfta = shadow_vfta->vfta[i];
3986                 if(vfta){
3987                         mask = 1;
3988                         for (j = 0; j < 32; j++){
3989                                 if(vfta & mask)
3990                                         ixgbe_set_vfta(hw, (i<<5)+j, 0, on);
3991                                 mask<<=1;
3992                         }
3993                 }
3994         }
3995
3996 }
3997
3998 static int
3999 ixgbevf_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
4000 {
4001         struct ixgbe_hw *hw =
4002                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4003         struct ixgbe_vfta * shadow_vfta =
4004                 IXGBE_DEV_PRIVATE_TO_VFTA(dev->data->dev_private);
4005         uint32_t vid_idx = 0;
4006         uint32_t vid_bit = 0;
4007         int ret = 0;
4008
4009         PMD_INIT_FUNC_TRACE();
4010
4011         /* vind is not used in VF driver, set to 0, check ixgbe_set_vfta_vf */
4012         ret = ixgbe_set_vfta(hw, vlan_id, 0, !!on);
4013         if(ret){
4014                 PMD_INIT_LOG(ERR, "Unable to set VF vlan");
4015                 return ret;
4016         }
4017         vid_idx = (uint32_t) ((vlan_id >> 5) & 0x7F);
4018         vid_bit = (uint32_t) (1 << (vlan_id & 0x1F));
4019
4020         /* Save what we set and retore it after device reset */
4021         if (on)
4022                 shadow_vfta->vfta[vid_idx] |= vid_bit;
4023         else
4024                 shadow_vfta->vfta[vid_idx] &= ~vid_bit;
4025
4026         return 0;
4027 }
4028
4029 static void
4030 ixgbevf_vlan_strip_queue_set(struct rte_eth_dev *dev, uint16_t queue, int on)
4031 {
4032         struct ixgbe_hw *hw =
4033                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4034         uint32_t ctrl;
4035
4036         PMD_INIT_FUNC_TRACE();
4037
4038         if(queue >= hw->mac.max_rx_queues)
4039                 return;
4040
4041         ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(queue));
4042         if(on)
4043                 ctrl |= IXGBE_RXDCTL_VME;
4044         else
4045                 ctrl &= ~IXGBE_RXDCTL_VME;
4046         IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(queue), ctrl);
4047
4048         ixgbe_vlan_hw_strip_bitmap_set( dev, queue, on);
4049 }
4050
4051 static void
4052 ixgbevf_vlan_offload_set(struct rte_eth_dev *dev, int mask)
4053 {
4054         struct ixgbe_hw *hw =
4055                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4056         uint16_t i;
4057         int on = 0;
4058
4059         /* VF function only support hw strip feature, others are not support */
4060         if(mask & ETH_VLAN_STRIP_MASK){
4061                 on = !!(dev->data->dev_conf.rxmode.hw_vlan_strip);
4062
4063                 for(i=0; i < hw->mac.max_rx_queues; i++)
4064                         ixgbevf_vlan_strip_queue_set(dev,i,on);
4065         }
4066 }
4067
4068 static int
4069 ixgbe_vmdq_mode_check(struct ixgbe_hw *hw)
4070 {
4071         uint32_t reg_val;
4072
4073         /* we only need to do this if VMDq is enabled */
4074         reg_val = IXGBE_READ_REG(hw, IXGBE_VT_CTL);
4075         if (!(reg_val & IXGBE_VT_CTL_VT_ENABLE)) {
4076                 PMD_INIT_LOG(ERR, "VMDq must be enabled for this setting");
4077                 return -1;
4078         }
4079
4080         return 0;
4081 }
4082
4083 static uint32_t
4084 ixgbe_uta_vector(struct ixgbe_hw *hw, struct ether_addr* uc_addr)
4085 {
4086         uint32_t vector = 0;
4087         switch (hw->mac.mc_filter_type) {
4088         case 0:   /* use bits [47:36] of the address */
4089                 vector = ((uc_addr->addr_bytes[4] >> 4) |
4090                         (((uint16_t)uc_addr->addr_bytes[5]) << 4));
4091                 break;
4092         case 1:   /* use bits [46:35] of the address */
4093                 vector = ((uc_addr->addr_bytes[4] >> 3) |
4094                         (((uint16_t)uc_addr->addr_bytes[5]) << 5));
4095                 break;
4096         case 2:   /* use bits [45:34] of the address */
4097                 vector = ((uc_addr->addr_bytes[4] >> 2) |
4098                         (((uint16_t)uc_addr->addr_bytes[5]) << 6));
4099                 break;
4100         case 3:   /* use bits [43:32] of the address */
4101                 vector = ((uc_addr->addr_bytes[4]) |
4102                         (((uint16_t)uc_addr->addr_bytes[5]) << 8));
4103                 break;
4104         default:  /* Invalid mc_filter_type */
4105                 break;
4106         }
4107
4108         /* vector can only be 12-bits or boundary will be exceeded */
4109         vector &= 0xFFF;
4110         return vector;
4111 }
4112
4113 static int
4114 ixgbe_uc_hash_table_set(struct rte_eth_dev *dev,struct ether_addr* mac_addr,
4115                                uint8_t on)
4116 {
4117         uint32_t vector;
4118         uint32_t uta_idx;
4119         uint32_t reg_val;
4120         uint32_t uta_shift;
4121         uint32_t rc;
4122         const uint32_t ixgbe_uta_idx_mask = 0x7F;
4123         const uint32_t ixgbe_uta_bit_shift = 5;
4124         const uint32_t ixgbe_uta_bit_mask = (0x1 << ixgbe_uta_bit_shift) - 1;
4125         const uint32_t bit1 = 0x1;
4126
4127         struct ixgbe_hw *hw =
4128                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4129         struct ixgbe_uta_info *uta_info =
4130                 IXGBE_DEV_PRIVATE_TO_UTA(dev->data->dev_private);
4131
4132         /* The UTA table only exists on 82599 hardware and newer */
4133         if (hw->mac.type < ixgbe_mac_82599EB)
4134                 return -ENOTSUP;
4135
4136         vector = ixgbe_uta_vector(hw,mac_addr);
4137         uta_idx = (vector >> ixgbe_uta_bit_shift) & ixgbe_uta_idx_mask;
4138         uta_shift = vector & ixgbe_uta_bit_mask;
4139
4140         rc = ((uta_info->uta_shadow[uta_idx] >> uta_shift & bit1) != 0);
4141         if(rc == on)
4142                 return 0;
4143
4144         reg_val = IXGBE_READ_REG(hw, IXGBE_UTA(uta_idx));
4145         if (on) {
4146                 uta_info->uta_in_use++;
4147                 reg_val |= (bit1 << uta_shift);
4148                 uta_info->uta_shadow[uta_idx] |= (bit1 << uta_shift);
4149         } else {
4150                 uta_info->uta_in_use--;
4151                 reg_val &= ~(bit1 << uta_shift);
4152                 uta_info->uta_shadow[uta_idx] &= ~(bit1 << uta_shift);
4153         }
4154
4155         IXGBE_WRITE_REG(hw, IXGBE_UTA(uta_idx), reg_val);
4156
4157         if (uta_info->uta_in_use > 0)
4158                 IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL,
4159                                 IXGBE_MCSTCTRL_MFE | hw->mac.mc_filter_type);
4160         else
4161                 IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL,hw->mac.mc_filter_type);
4162
4163         return 0;
4164 }
4165
4166 static int
4167 ixgbe_uc_all_hash_table_set(struct rte_eth_dev *dev, uint8_t on)
4168 {
4169         int i;
4170         struct ixgbe_hw *hw =
4171                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4172         struct ixgbe_uta_info *uta_info =
4173                 IXGBE_DEV_PRIVATE_TO_UTA(dev->data->dev_private);
4174
4175         /* The UTA table only exists on 82599 hardware and newer */
4176         if (hw->mac.type < ixgbe_mac_82599EB)
4177                 return -ENOTSUP;
4178
4179         if(on) {
4180                 for (i = 0; i < ETH_VMDQ_NUM_UC_HASH_ARRAY; i++) {
4181                         uta_info->uta_shadow[i] = ~0;
4182                         IXGBE_WRITE_REG(hw, IXGBE_UTA(i), ~0);
4183                 }
4184         } else {
4185                 for (i = 0; i < ETH_VMDQ_NUM_UC_HASH_ARRAY; i++) {
4186                         uta_info->uta_shadow[i] = 0;
4187                         IXGBE_WRITE_REG(hw, IXGBE_UTA(i), 0);
4188                 }
4189         }
4190         return 0;
4191
4192 }
4193
4194 uint32_t
4195 ixgbe_convert_vm_rx_mask_to_val(uint16_t rx_mask, uint32_t orig_val)
4196 {
4197         uint32_t new_val = orig_val;
4198
4199         if (rx_mask & ETH_VMDQ_ACCEPT_UNTAG)
4200                 new_val |= IXGBE_VMOLR_AUPE;
4201         if (rx_mask & ETH_VMDQ_ACCEPT_HASH_MC)
4202                 new_val |= IXGBE_VMOLR_ROMPE;
4203         if (rx_mask & ETH_VMDQ_ACCEPT_HASH_UC)
4204                 new_val |= IXGBE_VMOLR_ROPE;
4205         if (rx_mask & ETH_VMDQ_ACCEPT_BROADCAST)
4206                 new_val |= IXGBE_VMOLR_BAM;
4207         if (rx_mask & ETH_VMDQ_ACCEPT_MULTICAST)
4208                 new_val |= IXGBE_VMOLR_MPE;
4209
4210         return new_val;
4211 }
4212
4213 static int
4214 ixgbe_set_pool_rx_mode(struct rte_eth_dev *dev, uint16_t pool,
4215                                uint16_t rx_mask, uint8_t on)
4216 {
4217         int val = 0;
4218
4219         struct ixgbe_hw *hw =
4220                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4221         uint32_t vmolr = IXGBE_READ_REG(hw, IXGBE_VMOLR(pool));
4222
4223         if (hw->mac.type == ixgbe_mac_82598EB) {
4224                 PMD_INIT_LOG(ERR, "setting VF receive mode set should be done"
4225                              " on 82599 hardware and newer");
4226                 return -ENOTSUP;
4227         }
4228         if (ixgbe_vmdq_mode_check(hw) < 0)
4229                 return -ENOTSUP;
4230
4231         val = ixgbe_convert_vm_rx_mask_to_val(rx_mask, val);
4232
4233         if (on)
4234                 vmolr |= val;
4235         else
4236                 vmolr &= ~val;
4237
4238         IXGBE_WRITE_REG(hw, IXGBE_VMOLR(pool), vmolr);
4239
4240         return 0;
4241 }
4242
4243 static int
4244 ixgbe_set_pool_rx(struct rte_eth_dev *dev, uint16_t pool, uint8_t on)
4245 {
4246         uint32_t reg,addr;
4247         uint32_t val;
4248         const uint8_t bit1 = 0x1;
4249
4250         struct ixgbe_hw *hw =
4251                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4252
4253         if (ixgbe_vmdq_mode_check(hw) < 0)
4254                 return -ENOTSUP;
4255
4256         addr = IXGBE_VFRE(pool >= ETH_64_POOLS/2);
4257         reg = IXGBE_READ_REG(hw, addr);
4258         val = bit1 << pool;
4259
4260         if (on)
4261                 reg |= val;
4262         else
4263                 reg &= ~val;
4264
4265         IXGBE_WRITE_REG(hw, addr,reg);
4266
4267         return 0;
4268 }
4269
4270 static int
4271 ixgbe_set_pool_tx(struct rte_eth_dev *dev, uint16_t pool, uint8_t on)
4272 {
4273         uint32_t reg,addr;
4274         uint32_t val;
4275         const uint8_t bit1 = 0x1;
4276
4277         struct ixgbe_hw *hw =
4278                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4279
4280         if (ixgbe_vmdq_mode_check(hw) < 0)
4281                 return -ENOTSUP;
4282
4283         addr = IXGBE_VFTE(pool >= ETH_64_POOLS/2);
4284         reg = IXGBE_READ_REG(hw, addr);
4285         val = bit1 << pool;
4286
4287         if (on)
4288                 reg |= val;
4289         else
4290                 reg &= ~val;
4291
4292         IXGBE_WRITE_REG(hw, addr,reg);
4293
4294         return 0;
4295 }
4296
4297 static int
4298 ixgbe_set_pool_vlan_filter(struct rte_eth_dev *dev, uint16_t vlan,
4299                         uint64_t pool_mask, uint8_t vlan_on)
4300 {
4301         int ret = 0;
4302         uint16_t pool_idx;
4303         struct ixgbe_hw *hw =
4304                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4305
4306         if (ixgbe_vmdq_mode_check(hw) < 0)
4307                 return -ENOTSUP;
4308         for (pool_idx = 0; pool_idx < ETH_64_POOLS; pool_idx++) {
4309                 if (pool_mask & ((uint64_t)(1ULL << pool_idx)))
4310                         ret = hw->mac.ops.set_vfta(hw,vlan,pool_idx,vlan_on);
4311                         if (ret < 0)
4312                                 return ret;
4313         }
4314
4315         return ret;
4316 }
4317
4318 #define IXGBE_MRCTL_VPME  0x01 /* Virtual Pool Mirroring. */
4319 #define IXGBE_MRCTL_UPME  0x02 /* Uplink Port Mirroring. */
4320 #define IXGBE_MRCTL_DPME  0x04 /* Downlink Port Mirroring. */
4321 #define IXGBE_MRCTL_VLME  0x08 /* VLAN Mirroring. */
4322 #define IXGBE_INVALID_MIRROR_TYPE(mirror_type) \
4323         ((mirror_type) & ~(uint8_t)(ETH_MIRROR_VIRTUAL_POOL_UP | \
4324         ETH_MIRROR_UPLINK_PORT | ETH_MIRROR_DOWNLINK_PORT | ETH_MIRROR_VLAN))
4325
4326 static int
4327 ixgbe_mirror_rule_set(struct rte_eth_dev *dev,
4328                         struct rte_eth_mirror_conf *mirror_conf,
4329                         uint8_t rule_id, uint8_t on)
4330 {
4331         uint32_t mr_ctl,vlvf;
4332         uint32_t mp_lsb = 0;
4333         uint32_t mv_msb = 0;
4334         uint32_t mv_lsb = 0;
4335         uint32_t mp_msb = 0;
4336         uint8_t i = 0;
4337         int reg_index = 0;
4338         uint64_t vlan_mask = 0;
4339
4340         const uint8_t pool_mask_offset = 32;
4341         const uint8_t vlan_mask_offset = 32;
4342         const uint8_t dst_pool_offset = 8;
4343         const uint8_t rule_mr_offset  = 4;
4344         const uint8_t mirror_rule_mask= 0x0F;
4345
4346         struct ixgbe_mirror_info *mr_info =
4347                         (IXGBE_DEV_PRIVATE_TO_PFDATA(dev->data->dev_private));
4348         struct ixgbe_hw *hw =
4349                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4350         uint8_t mirror_type = 0;
4351
4352         if (ixgbe_vmdq_mode_check(hw) < 0)
4353                 return -ENOTSUP;
4354
4355         if (rule_id >= IXGBE_MAX_MIRROR_RULES)
4356                 return -EINVAL;
4357
4358         if (IXGBE_INVALID_MIRROR_TYPE(mirror_conf->rule_type)) {
4359                 PMD_DRV_LOG(ERR, "unsupported mirror type 0x%x.",
4360                         mirror_conf->rule_type);
4361                 return -EINVAL;
4362         }
4363
4364         if (mirror_conf->rule_type & ETH_MIRROR_VLAN) {
4365                 mirror_type |= IXGBE_MRCTL_VLME;
4366                 /* Check if vlan id is valid and find conresponding VLAN ID index in VLVF */
4367                 for (i = 0;i < IXGBE_VLVF_ENTRIES; i++) {
4368                         if (mirror_conf->vlan.vlan_mask & (1ULL << i)) {
4369                                 /* search vlan id related pool vlan filter index */
4370                                 reg_index = ixgbe_find_vlvf_slot(hw,
4371                                                 mirror_conf->vlan.vlan_id[i]);
4372                                 if(reg_index < 0)
4373                                         return -EINVAL;
4374                                 vlvf = IXGBE_READ_REG(hw, IXGBE_VLVF(reg_index));
4375                                 if ((vlvf & IXGBE_VLVF_VIEN) &&
4376                                     ((vlvf & IXGBE_VLVF_VLANID_MASK) ==
4377                                       mirror_conf->vlan.vlan_id[i]))
4378                                         vlan_mask |= (1ULL << reg_index);
4379                                 else
4380                                         return -EINVAL;
4381                         }
4382                 }
4383
4384                 if (on) {
4385                         mv_lsb = vlan_mask & 0xFFFFFFFF;
4386                         mv_msb = vlan_mask >> vlan_mask_offset;
4387
4388                         mr_info->mr_conf[rule_id].vlan.vlan_mask =
4389                                                 mirror_conf->vlan.vlan_mask;
4390                         for(i = 0 ;i < ETH_VMDQ_MAX_VLAN_FILTERS; i++) {
4391                                 if(mirror_conf->vlan.vlan_mask & (1ULL << i))
4392                                         mr_info->mr_conf[rule_id].vlan.vlan_id[i] =
4393                                                 mirror_conf->vlan.vlan_id[i];
4394                         }
4395                 } else {
4396                         mv_lsb = 0;
4397                         mv_msb = 0;
4398                         mr_info->mr_conf[rule_id].vlan.vlan_mask = 0;
4399                         for(i = 0 ;i < ETH_VMDQ_MAX_VLAN_FILTERS; i++)
4400                                 mr_info->mr_conf[rule_id].vlan.vlan_id[i] = 0;
4401                 }
4402         }
4403
4404         /*
4405          * if enable pool mirror, write related pool mask register,if disable
4406          * pool mirror, clear PFMRVM register
4407          */
4408         if (mirror_conf->rule_type & ETH_MIRROR_VIRTUAL_POOL_UP) {
4409                 mirror_type |= IXGBE_MRCTL_VPME;
4410                 if (on) {
4411                         mp_lsb = mirror_conf->pool_mask & 0xFFFFFFFF;
4412                         mp_msb = mirror_conf->pool_mask >> pool_mask_offset;
4413                         mr_info->mr_conf[rule_id].pool_mask =
4414                                         mirror_conf->pool_mask;
4415
4416                 } else {
4417                         mp_lsb = 0;
4418                         mp_msb = 0;
4419                         mr_info->mr_conf[rule_id].pool_mask = 0;
4420                 }
4421         }
4422         if (mirror_conf->rule_type & ETH_MIRROR_UPLINK_PORT)
4423                 mirror_type |= IXGBE_MRCTL_UPME;
4424         if (mirror_conf->rule_type & ETH_MIRROR_DOWNLINK_PORT)
4425                 mirror_type |= IXGBE_MRCTL_DPME;
4426
4427         /* read  mirror control register and recalculate it */
4428         mr_ctl = IXGBE_READ_REG(hw, IXGBE_MRCTL(rule_id));
4429
4430         if (on) {
4431                 mr_ctl |= mirror_type;
4432                 mr_ctl &= mirror_rule_mask;
4433                 mr_ctl |= mirror_conf->dst_pool << dst_pool_offset;
4434         } else
4435                 mr_ctl &= ~(mirror_conf->rule_type & mirror_rule_mask);
4436
4437         mr_info->mr_conf[rule_id].rule_type = mirror_conf->rule_type;
4438         mr_info->mr_conf[rule_id].dst_pool = mirror_conf->dst_pool;
4439
4440         /* write mirrror control  register */
4441         IXGBE_WRITE_REG(hw, IXGBE_MRCTL(rule_id), mr_ctl);
4442
4443         /* write pool mirrror control  register */
4444         if (mirror_conf->rule_type == ETH_MIRROR_VIRTUAL_POOL_UP) {
4445                 IXGBE_WRITE_REG(hw, IXGBE_VMRVM(rule_id), mp_lsb);
4446                 IXGBE_WRITE_REG(hw, IXGBE_VMRVM(rule_id + rule_mr_offset),
4447                                 mp_msb);
4448         }
4449         /* write VLAN mirrror control  register */
4450         if (mirror_conf->rule_type == ETH_MIRROR_VLAN) {
4451                 IXGBE_WRITE_REG(hw, IXGBE_VMRVLAN(rule_id), mv_lsb);
4452                 IXGBE_WRITE_REG(hw, IXGBE_VMRVLAN(rule_id + rule_mr_offset),
4453                                 mv_msb);
4454         }
4455
4456         return 0;
4457 }
4458
4459 static int
4460 ixgbe_mirror_rule_reset(struct rte_eth_dev *dev, uint8_t rule_id)
4461 {
4462         int mr_ctl = 0;
4463         uint32_t lsb_val = 0;
4464         uint32_t msb_val = 0;
4465         const uint8_t rule_mr_offset = 4;
4466
4467         struct ixgbe_hw *hw =
4468                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4469         struct ixgbe_mirror_info *mr_info =
4470                 (IXGBE_DEV_PRIVATE_TO_PFDATA(dev->data->dev_private));
4471
4472         if (ixgbe_vmdq_mode_check(hw) < 0)
4473                 return -ENOTSUP;
4474
4475         memset(&mr_info->mr_conf[rule_id], 0,
4476                 sizeof(struct rte_eth_mirror_conf));
4477
4478         /* clear PFVMCTL register */
4479         IXGBE_WRITE_REG(hw, IXGBE_MRCTL(rule_id), mr_ctl);
4480
4481         /* clear pool mask register */
4482         IXGBE_WRITE_REG(hw, IXGBE_VMRVM(rule_id), lsb_val);
4483         IXGBE_WRITE_REG(hw, IXGBE_VMRVM(rule_id + rule_mr_offset), msb_val);
4484
4485         /* clear vlan mask register */
4486         IXGBE_WRITE_REG(hw, IXGBE_VMRVLAN(rule_id), lsb_val);
4487         IXGBE_WRITE_REG(hw, IXGBE_VMRVLAN(rule_id + rule_mr_offset), msb_val);
4488
4489         return 0;
4490 }
4491
4492 static int
4493 ixgbevf_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id)
4494 {
4495         uint32_t mask;
4496         struct ixgbe_hw *hw =
4497                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4498
4499         mask = IXGBE_READ_REG(hw, IXGBE_VTEIMS);
4500         mask |= (1 << IXGBE_MISC_VEC_ID);
4501         RTE_SET_USED(queue_id);
4502         IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, mask);
4503
4504         rte_intr_enable(&dev->pci_dev->intr_handle);
4505
4506         return 0;
4507 }
4508
4509 static int
4510 ixgbevf_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id)
4511 {
4512         uint32_t mask;
4513         struct ixgbe_hw *hw =
4514                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4515
4516         mask = IXGBE_READ_REG(hw, IXGBE_VTEIMS);
4517         mask &= ~(1 << IXGBE_MISC_VEC_ID);
4518         RTE_SET_USED(queue_id);
4519         IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, mask);
4520
4521         return 0;
4522 }
4523
4524 static int
4525 ixgbe_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id)
4526 {
4527         uint32_t mask;
4528         struct ixgbe_hw *hw =
4529                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4530         struct ixgbe_interrupt *intr =
4531                 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
4532
4533         if (queue_id < 16) {
4534                 ixgbe_disable_intr(hw);
4535                 intr->mask |= (1 << queue_id);
4536                 ixgbe_enable_intr(dev);
4537         } else if (queue_id < 32) {
4538                 mask = IXGBE_READ_REG(hw, IXGBE_EIMS_EX(0));
4539                 mask &= (1 << queue_id);
4540                 IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(0), mask);
4541         } else if (queue_id < 64) {
4542                 mask = IXGBE_READ_REG(hw, IXGBE_EIMS_EX(1));
4543                 mask &= (1 << (queue_id - 32));
4544                 IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(1), mask);
4545         }
4546         rte_intr_enable(&dev->pci_dev->intr_handle);
4547
4548         return 0;
4549 }
4550
4551 static int
4552 ixgbe_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id)
4553 {
4554         uint32_t mask;
4555         struct ixgbe_hw *hw =
4556                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4557         struct ixgbe_interrupt *intr =
4558                 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
4559
4560         if (queue_id < 16) {
4561                 ixgbe_disable_intr(hw);
4562                 intr->mask &= ~(1 << queue_id);
4563                 ixgbe_enable_intr(dev);
4564         } else if (queue_id < 32) {
4565                 mask = IXGBE_READ_REG(hw, IXGBE_EIMS_EX(0));
4566                 mask &= ~(1 << queue_id);
4567                 IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(0), mask);
4568         } else if (queue_id < 64) {
4569                 mask = IXGBE_READ_REG(hw, IXGBE_EIMS_EX(1));
4570                 mask &= ~(1 << (queue_id - 32));
4571                 IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(1), mask);
4572         }
4573
4574         return 0;
4575 }
4576
4577 static void
4578 ixgbevf_set_ivar_map(struct ixgbe_hw *hw, int8_t direction,
4579                      uint8_t queue, uint8_t msix_vector)
4580 {
4581         uint32_t tmp, idx;
4582
4583         if (direction == -1) {
4584                 /* other causes */
4585                 msix_vector |= IXGBE_IVAR_ALLOC_VAL;
4586                 tmp = IXGBE_READ_REG(hw, IXGBE_VTIVAR_MISC);
4587                 tmp &= ~0xFF;
4588                 tmp |= msix_vector;
4589                 IXGBE_WRITE_REG(hw, IXGBE_VTIVAR_MISC, tmp);
4590         } else {
4591                 /* rx or tx cause */
4592                 msix_vector |= IXGBE_IVAR_ALLOC_VAL;
4593                 idx = ((16 * (queue & 1)) + (8 * direction));
4594                 tmp = IXGBE_READ_REG(hw, IXGBE_VTIVAR(queue >> 1));
4595                 tmp &= ~(0xFF << idx);
4596                 tmp |= (msix_vector << idx);
4597                 IXGBE_WRITE_REG(hw, IXGBE_VTIVAR(queue >> 1), tmp);
4598         }
4599 }
4600
4601 /**
4602  * set the IVAR registers, mapping interrupt causes to vectors
4603  * @param hw
4604  *  pointer to ixgbe_hw struct
4605  * @direction
4606  *  0 for Rx, 1 for Tx, -1 for other causes
4607  * @queue
4608  *  queue to map the corresponding interrupt to
4609  * @msix_vector
4610  *  the vector to map to the corresponding queue
4611  */
4612 static void
4613 ixgbe_set_ivar_map(struct ixgbe_hw *hw, int8_t direction,
4614                    uint8_t queue, uint8_t msix_vector)
4615 {
4616         uint32_t tmp, idx;
4617
4618         msix_vector |= IXGBE_IVAR_ALLOC_VAL;
4619         if (hw->mac.type == ixgbe_mac_82598EB) {
4620                 if (direction == -1)
4621                         direction = 0;
4622                 idx = (((direction * 64) + queue) >> 2) & 0x1F;
4623                 tmp = IXGBE_READ_REG(hw, IXGBE_IVAR(idx));
4624                 tmp &= ~(0xFF << (8 * (queue & 0x3)));
4625                 tmp |= (msix_vector << (8 * (queue & 0x3)));
4626                 IXGBE_WRITE_REG(hw, IXGBE_IVAR(idx), tmp);
4627         } else if ((hw->mac.type == ixgbe_mac_82599EB) ||
4628                         (hw->mac.type == ixgbe_mac_X540)) {
4629                 if (direction == -1) {
4630                         /* other causes */
4631                         idx = ((queue & 1) * 8);
4632                         tmp = IXGBE_READ_REG(hw, IXGBE_IVAR_MISC);
4633                         tmp &= ~(0xFF << idx);
4634                         tmp |= (msix_vector << idx);
4635                         IXGBE_WRITE_REG(hw, IXGBE_IVAR_MISC, tmp);
4636                 } else {
4637                         /* rx or tx causes */
4638                         idx = ((16 * (queue & 1)) + (8 * direction));
4639                         tmp = IXGBE_READ_REG(hw, IXGBE_IVAR(queue >> 1));
4640                         tmp &= ~(0xFF << idx);
4641                         tmp |= (msix_vector << idx);
4642                         IXGBE_WRITE_REG(hw, IXGBE_IVAR(queue >> 1), tmp);
4643                 }
4644         }
4645 }
4646
4647 static void
4648 ixgbevf_configure_msix(struct rte_eth_dev *dev)
4649 {
4650         struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle;
4651         struct ixgbe_hw *hw =
4652                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4653         uint32_t q_idx;
4654         uint32_t vector_idx = IXGBE_MISC_VEC_ID;
4655
4656         /* won't configure msix register if no mapping is done
4657          * between intr vector and event fd.
4658          */
4659         if (!rte_intr_dp_is_en(intr_handle))
4660                 return;
4661
4662         /* Configure all RX queues of VF */
4663         for (q_idx = 0; q_idx < dev->data->nb_rx_queues; q_idx++) {
4664                 /* Force all queue use vector 0,
4665                  * as IXGBE_VF_MAXMSIVECOTR = 1
4666                  */
4667                 ixgbevf_set_ivar_map(hw, 0, q_idx, vector_idx);
4668                 intr_handle->intr_vec[q_idx] = vector_idx;
4669         }
4670
4671         /* Configure VF other cause ivar */
4672         ixgbevf_set_ivar_map(hw, -1, 1, vector_idx);
4673 }
4674
4675 /**
4676  * Sets up the hardware to properly generate MSI-X interrupts
4677  * @hw
4678  *  board private structure
4679  */
4680 static void
4681 ixgbe_configure_msix(struct rte_eth_dev *dev)
4682 {
4683         struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle;
4684         struct ixgbe_hw *hw =
4685                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4686         uint32_t queue_id, base = IXGBE_MISC_VEC_ID;
4687         uint32_t vec = IXGBE_MISC_VEC_ID;
4688         uint32_t mask;
4689         uint32_t gpie;
4690
4691         /* won't configure msix register if no mapping is done
4692          * between intr vector and event fd
4693          */
4694         if (!rte_intr_dp_is_en(intr_handle))
4695                 return;
4696
4697         if (rte_intr_allow_others(intr_handle))
4698                 vec = base = IXGBE_RX_VEC_START;
4699
4700         /* setup GPIE for MSI-x mode */
4701         gpie = IXGBE_READ_REG(hw, IXGBE_GPIE);
4702         gpie |= IXGBE_GPIE_MSIX_MODE | IXGBE_GPIE_PBA_SUPPORT |
4703                 IXGBE_GPIE_OCD | IXGBE_GPIE_EIAME;
4704         /* auto clearing and auto setting corresponding bits in EIMS
4705          * when MSI-X interrupt is triggered
4706          */
4707         if (hw->mac.type == ixgbe_mac_82598EB) {
4708                 IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
4709         } else {
4710                 IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(0), 0xFFFFFFFF);
4711                 IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(1), 0xFFFFFFFF);
4712         }
4713         IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
4714
4715         /* Populate the IVAR table and set the ITR values to the
4716          * corresponding register.
4717          */
4718         for (queue_id = 0; queue_id < dev->data->nb_rx_queues;
4719              queue_id++) {
4720                 /* by default, 1:1 mapping */
4721                 ixgbe_set_ivar_map(hw, 0, queue_id, vec);
4722                 intr_handle->intr_vec[queue_id] = vec;
4723                 if (vec < base + intr_handle->nb_efd - 1)
4724                         vec++;
4725         }
4726
4727         switch (hw->mac.type) {
4728         case ixgbe_mac_82598EB:
4729                 ixgbe_set_ivar_map(hw, -1, IXGBE_IVAR_OTHER_CAUSES_INDEX,
4730                                    IXGBE_MISC_VEC_ID);
4731                 break;
4732         case ixgbe_mac_82599EB:
4733         case ixgbe_mac_X540:
4734                 ixgbe_set_ivar_map(hw, -1, 1, IXGBE_MISC_VEC_ID);
4735                 break;
4736         default:
4737                 break;
4738         }
4739         IXGBE_WRITE_REG(hw, IXGBE_EITR(IXGBE_MISC_VEC_ID),
4740                         IXGBE_MIN_INTER_INTERRUPT_INTERVAL_DEFAULT & 0xFFF);
4741
4742         /* set up to autoclear timer, and the vectors */
4743         mask = IXGBE_EIMS_ENABLE_MASK;
4744         mask &= ~(IXGBE_EIMS_OTHER |
4745                   IXGBE_EIMS_MAILBOX |
4746                   IXGBE_EIMS_LSC);
4747
4748         IXGBE_WRITE_REG(hw, IXGBE_EIAC, mask);
4749 }
4750
4751 static int ixgbe_set_queue_rate_limit(struct rte_eth_dev *dev,
4752         uint16_t queue_idx, uint16_t tx_rate)
4753 {
4754         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4755         uint32_t rf_dec, rf_int;
4756         uint32_t bcnrc_val;
4757         uint16_t link_speed = dev->data->dev_link.link_speed;
4758
4759         if (queue_idx >= hw->mac.max_tx_queues)
4760                 return -EINVAL;
4761
4762         if (tx_rate != 0) {
4763                 /* Calculate the rate factor values to set */
4764                 rf_int = (uint32_t)link_speed / (uint32_t)tx_rate;
4765                 rf_dec = (uint32_t)link_speed % (uint32_t)tx_rate;
4766                 rf_dec = (rf_dec << IXGBE_RTTBCNRC_RF_INT_SHIFT) / tx_rate;
4767
4768                 bcnrc_val = IXGBE_RTTBCNRC_RS_ENA;
4769                 bcnrc_val |= ((rf_int << IXGBE_RTTBCNRC_RF_INT_SHIFT) &
4770                                 IXGBE_RTTBCNRC_RF_INT_MASK_M);
4771                 bcnrc_val |= (rf_dec & IXGBE_RTTBCNRC_RF_DEC_MASK);
4772         } else {
4773                 bcnrc_val = 0;
4774         }
4775
4776         /*
4777          * Set global transmit compensation time to the MMW_SIZE in RTTBCNRM
4778          * register. MMW_SIZE=0x014 if 9728-byte jumbo is supported, otherwise
4779          * set as 0x4.
4780          */
4781         if ((dev->data->dev_conf.rxmode.jumbo_frame == 1) &&
4782                 (dev->data->dev_conf.rxmode.max_rx_pkt_len >=
4783                                 IXGBE_MAX_JUMBO_FRAME_SIZE))
4784                 IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRM,
4785                         IXGBE_MMW_SIZE_JUMBO_FRAME);
4786         else
4787                 IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRM,
4788                         IXGBE_MMW_SIZE_DEFAULT);
4789
4790         /* Set RTTBCNRC of queue X */
4791         IXGBE_WRITE_REG(hw, IXGBE_RTTDQSEL, queue_idx);
4792         IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRC, bcnrc_val);
4793         IXGBE_WRITE_FLUSH(hw);
4794
4795         return 0;
4796 }
4797
4798 static int ixgbe_set_vf_rate_limit(struct rte_eth_dev *dev, uint16_t vf,
4799         uint16_t tx_rate, uint64_t q_msk)
4800 {
4801         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4802         struct ixgbe_vf_info *vfinfo =
4803                 *(IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private));
4804         uint8_t  nb_q_per_pool = RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool;
4805         uint32_t queue_stride =
4806                 IXGBE_MAX_RX_QUEUE_NUM / RTE_ETH_DEV_SRIOV(dev).active;
4807         uint32_t queue_idx = vf * queue_stride, idx = 0, vf_idx;
4808         uint32_t queue_end = queue_idx + nb_q_per_pool - 1;
4809         uint16_t total_rate = 0;
4810
4811         if (queue_end >= hw->mac.max_tx_queues)
4812                 return -EINVAL;
4813
4814         if (vfinfo != NULL) {
4815                 for (vf_idx = 0; vf_idx < dev->pci_dev->max_vfs; vf_idx++) {
4816                         if (vf_idx == vf)
4817                                 continue;
4818                         for (idx = 0; idx < RTE_DIM(vfinfo[vf_idx].tx_rate);
4819                                 idx++)
4820                                 total_rate += vfinfo[vf_idx].tx_rate[idx];
4821                 }
4822         } else
4823                 return -EINVAL;
4824
4825         /* Store tx_rate for this vf. */
4826         for (idx = 0; idx < nb_q_per_pool; idx++) {
4827                 if (((uint64_t)0x1 << idx) & q_msk) {
4828                         if (vfinfo[vf].tx_rate[idx] != tx_rate)
4829                                 vfinfo[vf].tx_rate[idx] = tx_rate;
4830                         total_rate += tx_rate;
4831                 }
4832         }
4833
4834         if (total_rate > dev->data->dev_link.link_speed) {
4835                 /*
4836                  * Reset stored TX rate of the VF if it causes exceed
4837                  * link speed.
4838                  */
4839                 memset(vfinfo[vf].tx_rate, 0, sizeof(vfinfo[vf].tx_rate));
4840                 return -EINVAL;
4841         }
4842
4843         /* Set RTTBCNRC of each queue/pool for vf X  */
4844         for (; queue_idx <= queue_end; queue_idx++) {
4845                 if (0x1 & q_msk)
4846                         ixgbe_set_queue_rate_limit(dev, queue_idx, tx_rate);
4847                 q_msk = q_msk >> 1;
4848         }
4849
4850         return 0;
4851 }
4852
4853 static void
4854 ixgbevf_add_mac_addr(struct rte_eth_dev *dev, struct ether_addr *mac_addr,
4855                      __attribute__((unused)) uint32_t index,
4856                      __attribute__((unused)) uint32_t pool)
4857 {
4858         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4859         int diag;
4860
4861         /*
4862          * On a 82599 VF, adding again the same MAC addr is not an idempotent
4863          * operation. Trap this case to avoid exhausting the [very limited]
4864          * set of PF resources used to store VF MAC addresses.
4865          */
4866         if (memcmp(hw->mac.perm_addr, mac_addr, sizeof(struct ether_addr)) == 0)
4867                 return;
4868         diag = ixgbevf_set_uc_addr_vf(hw, 2, mac_addr->addr_bytes);
4869         if (diag == 0)
4870                 return;
4871         PMD_DRV_LOG(ERR, "Unable to add MAC address - diag=%d", diag);
4872 }
4873
4874 static void
4875 ixgbevf_remove_mac_addr(struct rte_eth_dev *dev, uint32_t index)
4876 {
4877         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4878         struct ether_addr *perm_addr = (struct ether_addr *) hw->mac.perm_addr;
4879         struct ether_addr *mac_addr;
4880         uint32_t i;
4881         int diag;
4882
4883         /*
4884          * The IXGBE_VF_SET_MACVLAN command of the ixgbe-pf driver does
4885          * not support the deletion of a given MAC address.
4886          * Instead, it imposes to delete all MAC addresses, then to add again
4887          * all MAC addresses with the exception of the one to be deleted.
4888          */
4889         (void) ixgbevf_set_uc_addr_vf(hw, 0, NULL);
4890
4891         /*
4892          * Add again all MAC addresses, with the exception of the deleted one
4893          * and of the permanent MAC address.
4894          */
4895         for (i = 0, mac_addr = dev->data->mac_addrs;
4896              i < hw->mac.num_rar_entries; i++, mac_addr++) {
4897                 /* Skip the deleted MAC address */
4898                 if (i == index)
4899                         continue;
4900                 /* Skip NULL MAC addresses */
4901                 if (is_zero_ether_addr(mac_addr))
4902                         continue;
4903                 /* Skip the permanent MAC address */
4904                 if (memcmp(perm_addr, mac_addr, sizeof(struct ether_addr)) == 0)
4905                         continue;
4906                 diag = ixgbevf_set_uc_addr_vf(hw, 2, mac_addr->addr_bytes);
4907                 if (diag != 0)
4908                         PMD_DRV_LOG(ERR,
4909                                     "Adding again MAC address "
4910                                     "%02x:%02x:%02x:%02x:%02x:%02x failed "
4911                                     "diag=%d",
4912                                     mac_addr->addr_bytes[0],
4913                                     mac_addr->addr_bytes[1],
4914                                     mac_addr->addr_bytes[2],
4915                                     mac_addr->addr_bytes[3],
4916                                     mac_addr->addr_bytes[4],
4917                                     mac_addr->addr_bytes[5],
4918                                     diag);
4919         }
4920 }
4921
4922 static void
4923 ixgbevf_set_default_mac_addr(struct rte_eth_dev *dev, struct ether_addr *addr)
4924 {
4925         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4926
4927         hw->mac.ops.set_rar(hw, 0, (void *)addr, 0, 0);
4928 }
4929
4930 #define MAC_TYPE_FILTER_SUP(type)    do {\
4931         if ((type) != ixgbe_mac_82599EB && (type) != ixgbe_mac_X540 &&\
4932                 (type) != ixgbe_mac_X550)\
4933                 return -ENOTSUP;\
4934 } while (0)
4935
4936 static int
4937 ixgbe_syn_filter_set(struct rte_eth_dev *dev,
4938                         struct rte_eth_syn_filter *filter,
4939                         bool add)
4940 {
4941         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4942         uint32_t synqf;
4943
4944         if (filter->queue >= IXGBE_MAX_RX_QUEUE_NUM)
4945                 return -EINVAL;
4946
4947         synqf = IXGBE_READ_REG(hw, IXGBE_SYNQF);
4948
4949         if (add) {
4950                 if (synqf & IXGBE_SYN_FILTER_ENABLE)
4951                         return -EINVAL;
4952                 synqf = (uint32_t)(((filter->queue << IXGBE_SYN_FILTER_QUEUE_SHIFT) &
4953                         IXGBE_SYN_FILTER_QUEUE) | IXGBE_SYN_FILTER_ENABLE);
4954
4955                 if (filter->hig_pri)
4956                         synqf |= IXGBE_SYN_FILTER_SYNQFP;
4957                 else
4958                         synqf &= ~IXGBE_SYN_FILTER_SYNQFP;
4959         } else {
4960                 if (!(synqf & IXGBE_SYN_FILTER_ENABLE))
4961                         return -ENOENT;
4962                 synqf &= ~(IXGBE_SYN_FILTER_QUEUE | IXGBE_SYN_FILTER_ENABLE);
4963         }
4964         IXGBE_WRITE_REG(hw, IXGBE_SYNQF, synqf);
4965         IXGBE_WRITE_FLUSH(hw);
4966         return 0;
4967 }
4968
4969 static int
4970 ixgbe_syn_filter_get(struct rte_eth_dev *dev,
4971                         struct rte_eth_syn_filter *filter)
4972 {
4973         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4974         uint32_t synqf = IXGBE_READ_REG(hw, IXGBE_SYNQF);
4975
4976         if (synqf & IXGBE_SYN_FILTER_ENABLE) {
4977                 filter->hig_pri = (synqf & IXGBE_SYN_FILTER_SYNQFP) ? 1 : 0;
4978                 filter->queue = (uint16_t)((synqf & IXGBE_SYN_FILTER_QUEUE) >> 1);
4979                 return 0;
4980         }
4981         return -ENOENT;
4982 }
4983
4984 static int
4985 ixgbe_syn_filter_handle(struct rte_eth_dev *dev,
4986                         enum rte_filter_op filter_op,
4987                         void *arg)
4988 {
4989         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4990         int ret;
4991
4992         MAC_TYPE_FILTER_SUP(hw->mac.type);
4993
4994         if (filter_op == RTE_ETH_FILTER_NOP)
4995                 return 0;
4996
4997         if (arg == NULL) {
4998                 PMD_DRV_LOG(ERR, "arg shouldn't be NULL for operation %u",
4999                             filter_op);
5000                 return -EINVAL;
5001         }
5002
5003         switch (filter_op) {
5004         case RTE_ETH_FILTER_ADD:
5005                 ret = ixgbe_syn_filter_set(dev,
5006                                 (struct rte_eth_syn_filter *)arg,
5007                                 TRUE);
5008                 break;
5009         case RTE_ETH_FILTER_DELETE:
5010                 ret = ixgbe_syn_filter_set(dev,
5011                                 (struct rte_eth_syn_filter *)arg,
5012                                 FALSE);
5013                 break;
5014         case RTE_ETH_FILTER_GET:
5015                 ret = ixgbe_syn_filter_get(dev,
5016                                 (struct rte_eth_syn_filter *)arg);
5017                 break;
5018         default:
5019                 PMD_DRV_LOG(ERR, "unsupported operation %u\n", filter_op);
5020                 ret = -EINVAL;
5021                 break;
5022         }
5023
5024         return ret;
5025 }
5026
5027
5028 static inline enum ixgbe_5tuple_protocol
5029 convert_protocol_type(uint8_t protocol_value)
5030 {
5031         if (protocol_value == IPPROTO_TCP)
5032                 return IXGBE_FILTER_PROTOCOL_TCP;
5033         else if (protocol_value == IPPROTO_UDP)
5034                 return IXGBE_FILTER_PROTOCOL_UDP;
5035         else if (protocol_value == IPPROTO_SCTP)
5036                 return IXGBE_FILTER_PROTOCOL_SCTP;
5037         else
5038                 return IXGBE_FILTER_PROTOCOL_NONE;
5039 }
5040
5041 /*
5042  * add a 5tuple filter
5043  *
5044  * @param
5045  * dev: Pointer to struct rte_eth_dev.
5046  * index: the index the filter allocates.
5047  * filter: ponter to the filter that will be added.
5048  * rx_queue: the queue id the filter assigned to.
5049  *
5050  * @return
5051  *    - On success, zero.
5052  *    - On failure, a negative value.
5053  */
5054 static int
5055 ixgbe_add_5tuple_filter(struct rte_eth_dev *dev,
5056                         struct ixgbe_5tuple_filter *filter)
5057 {
5058         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5059         struct ixgbe_filter_info *filter_info =
5060                 IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
5061         int i, idx, shift;
5062         uint32_t ftqf, sdpqf;
5063         uint32_t l34timir = 0;
5064         uint8_t mask = 0xff;
5065
5066         /*
5067          * look for an unused 5tuple filter index,
5068          * and insert the filter to list.
5069          */
5070         for (i = 0; i < IXGBE_MAX_FTQF_FILTERS; i++) {
5071                 idx = i / (sizeof(uint32_t) * NBBY);
5072                 shift = i % (sizeof(uint32_t) * NBBY);
5073                 if (!(filter_info->fivetuple_mask[idx] & (1 << shift))) {
5074                         filter_info->fivetuple_mask[idx] |= 1 << shift;
5075                         filter->index = i;
5076                         TAILQ_INSERT_TAIL(&filter_info->fivetuple_list,
5077                                           filter,
5078                                           entries);
5079                         break;
5080                 }
5081         }
5082         if (i >= IXGBE_MAX_FTQF_FILTERS) {
5083                 PMD_DRV_LOG(ERR, "5tuple filters are full.");
5084                 return -ENOSYS;
5085         }
5086
5087         sdpqf = (uint32_t)(filter->filter_info.dst_port <<
5088                                 IXGBE_SDPQF_DSTPORT_SHIFT);
5089         sdpqf = sdpqf | (filter->filter_info.src_port & IXGBE_SDPQF_SRCPORT);
5090
5091         ftqf = (uint32_t)(filter->filter_info.proto &
5092                 IXGBE_FTQF_PROTOCOL_MASK);
5093         ftqf |= (uint32_t)((filter->filter_info.priority &
5094                 IXGBE_FTQF_PRIORITY_MASK) << IXGBE_FTQF_PRIORITY_SHIFT);
5095         if (filter->filter_info.src_ip_mask == 0) /* 0 means compare. */
5096                 mask &= IXGBE_FTQF_SOURCE_ADDR_MASK;
5097         if (filter->filter_info.dst_ip_mask == 0)
5098                 mask &= IXGBE_FTQF_DEST_ADDR_MASK;
5099         if (filter->filter_info.src_port_mask == 0)
5100                 mask &= IXGBE_FTQF_SOURCE_PORT_MASK;
5101         if (filter->filter_info.dst_port_mask == 0)
5102                 mask &= IXGBE_FTQF_DEST_PORT_MASK;
5103         if (filter->filter_info.proto_mask == 0)
5104                 mask &= IXGBE_FTQF_PROTOCOL_COMP_MASK;
5105         ftqf |= mask << IXGBE_FTQF_5TUPLE_MASK_SHIFT;
5106         ftqf |= IXGBE_FTQF_POOL_MASK_EN;
5107         ftqf |= IXGBE_FTQF_QUEUE_ENABLE;
5108
5109         IXGBE_WRITE_REG(hw, IXGBE_DAQF(i), filter->filter_info.dst_ip);
5110         IXGBE_WRITE_REG(hw, IXGBE_SAQF(i), filter->filter_info.src_ip);
5111         IXGBE_WRITE_REG(hw, IXGBE_SDPQF(i), sdpqf);
5112         IXGBE_WRITE_REG(hw, IXGBE_FTQF(i), ftqf);
5113
5114         l34timir |= IXGBE_L34T_IMIR_RESERVE;
5115         l34timir |= (uint32_t)(filter->queue <<
5116                                 IXGBE_L34T_IMIR_QUEUE_SHIFT);
5117         IXGBE_WRITE_REG(hw, IXGBE_L34T_IMIR(i), l34timir);
5118         return 0;
5119 }
5120
5121 /*
5122  * remove a 5tuple filter
5123  *
5124  * @param
5125  * dev: Pointer to struct rte_eth_dev.
5126  * filter: the pointer of the filter will be removed.
5127  */
5128 static void
5129 ixgbe_remove_5tuple_filter(struct rte_eth_dev *dev,
5130                         struct ixgbe_5tuple_filter *filter)
5131 {
5132         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5133         struct ixgbe_filter_info *filter_info =
5134                 IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
5135         uint16_t index = filter->index;
5136
5137         filter_info->fivetuple_mask[index / (sizeof(uint32_t) * NBBY)] &=
5138                                 ~(1 << (index % (sizeof(uint32_t) * NBBY)));
5139         TAILQ_REMOVE(&filter_info->fivetuple_list, filter, entries);
5140         rte_free(filter);
5141
5142         IXGBE_WRITE_REG(hw, IXGBE_DAQF(index), 0);
5143         IXGBE_WRITE_REG(hw, IXGBE_SAQF(index), 0);
5144         IXGBE_WRITE_REG(hw, IXGBE_SDPQF(index), 0);
5145         IXGBE_WRITE_REG(hw, IXGBE_FTQF(index), 0);
5146         IXGBE_WRITE_REG(hw, IXGBE_L34T_IMIR(index), 0);
5147 }
5148
5149 static int
5150 ixgbevf_dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu)
5151 {
5152         struct ixgbe_hw *hw;
5153         uint32_t max_frame = mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
5154
5155         hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5156
5157         if ((mtu < ETHER_MIN_MTU) || (max_frame > ETHER_MAX_JUMBO_FRAME_LEN))
5158                 return -EINVAL;
5159
5160         /* refuse mtu that requires the support of scattered packets when this
5161          * feature has not been enabled before. */
5162         if (!dev->data->scattered_rx &&
5163             (max_frame + 2 * IXGBE_VLAN_TAG_SIZE >
5164              dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM))
5165                 return -EINVAL;
5166
5167         /*
5168          * When supported by the underlying PF driver, use the IXGBE_VF_SET_MTU
5169          * request of the version 2.0 of the mailbox API.
5170          * For now, use the IXGBE_VF_SET_LPE request of the version 1.0
5171          * of the mailbox API.
5172          * This call to IXGBE_SET_LPE action won't work with ixgbe pf drivers
5173          * prior to 3.11.33 which contains the following change:
5174          * "ixgbe: Enable jumbo frames support w/ SR-IOV"
5175          */
5176         ixgbevf_rlpml_set_vf(hw, max_frame);
5177
5178         /* update max frame size */
5179         dev->data->dev_conf.rxmode.max_rx_pkt_len = max_frame;
5180         return 0;
5181 }
5182
5183 #define MAC_TYPE_FILTER_SUP_EXT(type)    do {\
5184         if ((type) != ixgbe_mac_82599EB && (type) != ixgbe_mac_X540)\
5185                 return -ENOTSUP;\
5186 } while (0)
5187
5188 static inline struct ixgbe_5tuple_filter *
5189 ixgbe_5tuple_filter_lookup(struct ixgbe_5tuple_filter_list *filter_list,
5190                         struct ixgbe_5tuple_filter_info *key)
5191 {
5192         struct ixgbe_5tuple_filter *it;
5193
5194         TAILQ_FOREACH(it, filter_list, entries) {
5195                 if (memcmp(key, &it->filter_info,
5196                         sizeof(struct ixgbe_5tuple_filter_info)) == 0) {
5197                         return it;
5198                 }
5199         }
5200         return NULL;
5201 }
5202
5203 /* translate elements in struct rte_eth_ntuple_filter to struct ixgbe_5tuple_filter_info*/
5204 static inline int
5205 ntuple_filter_to_5tuple(struct rte_eth_ntuple_filter *filter,
5206                         struct ixgbe_5tuple_filter_info *filter_info)
5207 {
5208         if (filter->queue >= IXGBE_MAX_RX_QUEUE_NUM ||
5209                 filter->priority > IXGBE_5TUPLE_MAX_PRI ||
5210                 filter->priority < IXGBE_5TUPLE_MIN_PRI)
5211                 return -EINVAL;
5212
5213         switch (filter->dst_ip_mask) {
5214         case UINT32_MAX:
5215                 filter_info->dst_ip_mask = 0;
5216                 filter_info->dst_ip = filter->dst_ip;
5217                 break;
5218         case 0:
5219                 filter_info->dst_ip_mask = 1;
5220                 break;
5221         default:
5222                 PMD_DRV_LOG(ERR, "invalid dst_ip mask.");
5223                 return -EINVAL;
5224         }
5225
5226         switch (filter->src_ip_mask) {
5227         case UINT32_MAX:
5228                 filter_info->src_ip_mask = 0;
5229                 filter_info->src_ip = filter->src_ip;
5230                 break;
5231         case 0:
5232                 filter_info->src_ip_mask = 1;
5233                 break;
5234         default:
5235                 PMD_DRV_LOG(ERR, "invalid src_ip mask.");
5236                 return -EINVAL;
5237         }
5238
5239         switch (filter->dst_port_mask) {
5240         case UINT16_MAX:
5241                 filter_info->dst_port_mask = 0;
5242                 filter_info->dst_port = filter->dst_port;
5243                 break;
5244         case 0:
5245                 filter_info->dst_port_mask = 1;
5246                 break;
5247         default:
5248                 PMD_DRV_LOG(ERR, "invalid dst_port mask.");
5249                 return -EINVAL;
5250         }
5251
5252         switch (filter->src_port_mask) {
5253         case UINT16_MAX:
5254                 filter_info->src_port_mask = 0;
5255                 filter_info->src_port = filter->src_port;
5256                 break;
5257         case 0:
5258                 filter_info->src_port_mask = 1;
5259                 break;
5260         default:
5261                 PMD_DRV_LOG(ERR, "invalid src_port mask.");
5262                 return -EINVAL;
5263         }
5264
5265         switch (filter->proto_mask) {
5266         case UINT8_MAX:
5267                 filter_info->proto_mask = 0;
5268                 filter_info->proto =
5269                         convert_protocol_type(filter->proto);
5270                 break;
5271         case 0:
5272                 filter_info->proto_mask = 1;
5273                 break;
5274         default:
5275                 PMD_DRV_LOG(ERR, "invalid protocol mask.");
5276                 return -EINVAL;
5277         }
5278
5279         filter_info->priority = (uint8_t)filter->priority;
5280         return 0;
5281 }
5282
5283 /*
5284  * add or delete a ntuple filter
5285  *
5286  * @param
5287  * dev: Pointer to struct rte_eth_dev.
5288  * ntuple_filter: Pointer to struct rte_eth_ntuple_filter
5289  * add: if true, add filter, if false, remove filter
5290  *
5291  * @return
5292  *    - On success, zero.
5293  *    - On failure, a negative value.
5294  */
5295 static int
5296 ixgbe_add_del_ntuple_filter(struct rte_eth_dev *dev,
5297                         struct rte_eth_ntuple_filter *ntuple_filter,
5298                         bool add)
5299 {
5300         struct ixgbe_filter_info *filter_info =
5301                 IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
5302         struct ixgbe_5tuple_filter_info filter_5tuple;
5303         struct ixgbe_5tuple_filter *filter;
5304         int ret;
5305
5306         if (ntuple_filter->flags != RTE_5TUPLE_FLAGS) {
5307                 PMD_DRV_LOG(ERR, "only 5tuple is supported.");
5308                 return -EINVAL;
5309         }
5310
5311         memset(&filter_5tuple, 0, sizeof(struct ixgbe_5tuple_filter_info));
5312         ret = ntuple_filter_to_5tuple(ntuple_filter, &filter_5tuple);
5313         if (ret < 0)
5314                 return ret;
5315
5316         filter = ixgbe_5tuple_filter_lookup(&filter_info->fivetuple_list,
5317                                          &filter_5tuple);
5318         if (filter != NULL && add) {
5319                 PMD_DRV_LOG(ERR, "filter exists.");
5320                 return -EEXIST;
5321         }
5322         if (filter == NULL && !add) {
5323                 PMD_DRV_LOG(ERR, "filter doesn't exist.");
5324                 return -ENOENT;
5325         }
5326
5327         if (add) {
5328                 filter = rte_zmalloc("ixgbe_5tuple_filter",
5329                                 sizeof(struct ixgbe_5tuple_filter), 0);
5330                 if (filter == NULL)
5331                         return -ENOMEM;
5332                 (void)rte_memcpy(&filter->filter_info,
5333                                  &filter_5tuple,
5334                                  sizeof(struct ixgbe_5tuple_filter_info));
5335                 filter->queue = ntuple_filter->queue;
5336                 ret = ixgbe_add_5tuple_filter(dev, filter);
5337                 if (ret < 0) {
5338                         rte_free(filter);
5339                         return ret;
5340                 }
5341         } else
5342                 ixgbe_remove_5tuple_filter(dev, filter);
5343
5344         return 0;
5345 }
5346
5347 /*
5348  * get a ntuple filter
5349  *
5350  * @param
5351  * dev: Pointer to struct rte_eth_dev.
5352  * ntuple_filter: Pointer to struct rte_eth_ntuple_filter
5353  *
5354  * @return
5355  *    - On success, zero.
5356  *    - On failure, a negative value.
5357  */
5358 static int
5359 ixgbe_get_ntuple_filter(struct rte_eth_dev *dev,
5360                         struct rte_eth_ntuple_filter *ntuple_filter)
5361 {
5362         struct ixgbe_filter_info *filter_info =
5363                 IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
5364         struct ixgbe_5tuple_filter_info filter_5tuple;
5365         struct ixgbe_5tuple_filter *filter;
5366         int ret;
5367
5368         if (ntuple_filter->flags != RTE_5TUPLE_FLAGS) {
5369                 PMD_DRV_LOG(ERR, "only 5tuple is supported.");
5370                 return -EINVAL;
5371         }
5372
5373         memset(&filter_5tuple, 0, sizeof(struct ixgbe_5tuple_filter_info));
5374         ret = ntuple_filter_to_5tuple(ntuple_filter, &filter_5tuple);
5375         if (ret < 0)
5376                 return ret;
5377
5378         filter = ixgbe_5tuple_filter_lookup(&filter_info->fivetuple_list,
5379                                          &filter_5tuple);
5380         if (filter == NULL) {
5381                 PMD_DRV_LOG(ERR, "filter doesn't exist.");
5382                 return -ENOENT;
5383         }
5384         ntuple_filter->queue = filter->queue;
5385         return 0;
5386 }
5387
5388 /*
5389  * ixgbe_ntuple_filter_handle - Handle operations for ntuple filter.
5390  * @dev: pointer to rte_eth_dev structure
5391  * @filter_op:operation will be taken.
5392  * @arg: a pointer to specific structure corresponding to the filter_op
5393  *
5394  * @return
5395  *    - On success, zero.
5396  *    - On failure, a negative value.
5397  */
5398 static int
5399 ixgbe_ntuple_filter_handle(struct rte_eth_dev *dev,
5400                                 enum rte_filter_op filter_op,
5401                                 void *arg)
5402 {
5403         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5404         int ret;
5405
5406         MAC_TYPE_FILTER_SUP_EXT(hw->mac.type);
5407
5408         if (filter_op == RTE_ETH_FILTER_NOP)
5409                 return 0;
5410
5411         if (arg == NULL) {
5412                 PMD_DRV_LOG(ERR, "arg shouldn't be NULL for operation %u.",
5413                             filter_op);
5414                 return -EINVAL;
5415         }
5416
5417         switch (filter_op) {
5418         case RTE_ETH_FILTER_ADD:
5419                 ret = ixgbe_add_del_ntuple_filter(dev,
5420                         (struct rte_eth_ntuple_filter *)arg,
5421                         TRUE);
5422                 break;
5423         case RTE_ETH_FILTER_DELETE:
5424                 ret = ixgbe_add_del_ntuple_filter(dev,
5425                         (struct rte_eth_ntuple_filter *)arg,
5426                         FALSE);
5427                 break;
5428         case RTE_ETH_FILTER_GET:
5429                 ret = ixgbe_get_ntuple_filter(dev,
5430                         (struct rte_eth_ntuple_filter *)arg);
5431                 break;
5432         default:
5433                 PMD_DRV_LOG(ERR, "unsupported operation %u.", filter_op);
5434                 ret = -EINVAL;
5435                 break;
5436         }
5437         return ret;
5438 }
5439
5440 static inline int
5441 ixgbe_ethertype_filter_lookup(struct ixgbe_filter_info *filter_info,
5442                         uint16_t ethertype)
5443 {
5444         int i;
5445
5446         for (i = 0; i < IXGBE_MAX_ETQF_FILTERS; i++) {
5447                 if (filter_info->ethertype_filters[i] == ethertype &&
5448                     (filter_info->ethertype_mask & (1 << i)))
5449                         return i;
5450         }
5451         return -1;
5452 }
5453
5454 static inline int
5455 ixgbe_ethertype_filter_insert(struct ixgbe_filter_info *filter_info,
5456                         uint16_t ethertype)
5457 {
5458         int i;
5459
5460         for (i = 0; i < IXGBE_MAX_ETQF_FILTERS; i++) {
5461                 if (!(filter_info->ethertype_mask & (1 << i))) {
5462                         filter_info->ethertype_mask |= 1 << i;
5463                         filter_info->ethertype_filters[i] = ethertype;
5464                         return i;
5465                 }
5466         }
5467         return -1;
5468 }
5469
5470 static inline int
5471 ixgbe_ethertype_filter_remove(struct ixgbe_filter_info *filter_info,
5472                         uint8_t idx)
5473 {
5474         if (idx >= IXGBE_MAX_ETQF_FILTERS)
5475                 return -1;
5476         filter_info->ethertype_mask &= ~(1 << idx);
5477         filter_info->ethertype_filters[idx] = 0;
5478         return idx;
5479 }
5480
5481 static int
5482 ixgbe_add_del_ethertype_filter(struct rte_eth_dev *dev,
5483                         struct rte_eth_ethertype_filter *filter,
5484                         bool add)
5485 {
5486         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5487         struct ixgbe_filter_info *filter_info =
5488                 IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
5489         uint32_t etqf = 0;
5490         uint32_t etqs = 0;
5491         int ret;
5492
5493         if (filter->queue >= IXGBE_MAX_RX_QUEUE_NUM)
5494                 return -EINVAL;
5495
5496         if (filter->ether_type == ETHER_TYPE_IPv4 ||
5497                 filter->ether_type == ETHER_TYPE_IPv6) {
5498                 PMD_DRV_LOG(ERR, "unsupported ether_type(0x%04x) in"
5499                         " ethertype filter.", filter->ether_type);
5500                 return -EINVAL;
5501         }
5502
5503         if (filter->flags & RTE_ETHTYPE_FLAGS_MAC) {
5504                 PMD_DRV_LOG(ERR, "mac compare is unsupported.");
5505                 return -EINVAL;
5506         }
5507         if (filter->flags & RTE_ETHTYPE_FLAGS_DROP) {
5508                 PMD_DRV_LOG(ERR, "drop option is unsupported.");
5509                 return -EINVAL;
5510         }
5511
5512         ret = ixgbe_ethertype_filter_lookup(filter_info, filter->ether_type);
5513         if (ret >= 0 && add) {
5514                 PMD_DRV_LOG(ERR, "ethertype (0x%04x) filter exists.",
5515                             filter->ether_type);
5516                 return -EEXIST;
5517         }
5518         if (ret < 0 && !add) {
5519                 PMD_DRV_LOG(ERR, "ethertype (0x%04x) filter doesn't exist.",
5520                             filter->ether_type);
5521                 return -ENOENT;
5522         }
5523
5524         if (add) {
5525                 ret = ixgbe_ethertype_filter_insert(filter_info,
5526                         filter->ether_type);
5527                 if (ret < 0) {
5528                         PMD_DRV_LOG(ERR, "ethertype filters are full.");
5529                         return -ENOSYS;
5530                 }
5531                 etqf = IXGBE_ETQF_FILTER_EN;
5532                 etqf |= (uint32_t)filter->ether_type;
5533                 etqs |= (uint32_t)((filter->queue <<
5534                                     IXGBE_ETQS_RX_QUEUE_SHIFT) &
5535                                     IXGBE_ETQS_RX_QUEUE);
5536                 etqs |= IXGBE_ETQS_QUEUE_EN;
5537         } else {
5538                 ret = ixgbe_ethertype_filter_remove(filter_info, (uint8_t)ret);
5539                 if (ret < 0)
5540                         return -ENOSYS;
5541         }
5542         IXGBE_WRITE_REG(hw, IXGBE_ETQF(ret), etqf);
5543         IXGBE_WRITE_REG(hw, IXGBE_ETQS(ret), etqs);
5544         IXGBE_WRITE_FLUSH(hw);
5545
5546         return 0;
5547 }
5548
5549 static int
5550 ixgbe_get_ethertype_filter(struct rte_eth_dev *dev,
5551                         struct rte_eth_ethertype_filter *filter)
5552 {
5553         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5554         struct ixgbe_filter_info *filter_info =
5555                 IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
5556         uint32_t etqf, etqs;
5557         int ret;
5558
5559         ret = ixgbe_ethertype_filter_lookup(filter_info, filter->ether_type);
5560         if (ret < 0) {
5561                 PMD_DRV_LOG(ERR, "ethertype (0x%04x) filter doesn't exist.",
5562                             filter->ether_type);
5563                 return -ENOENT;
5564         }
5565
5566         etqf = IXGBE_READ_REG(hw, IXGBE_ETQF(ret));
5567         if (etqf & IXGBE_ETQF_FILTER_EN) {
5568                 etqs = IXGBE_READ_REG(hw, IXGBE_ETQS(ret));
5569                 filter->ether_type = etqf & IXGBE_ETQF_ETHERTYPE;
5570                 filter->flags = 0;
5571                 filter->queue = (etqs & IXGBE_ETQS_RX_QUEUE) >>
5572                                IXGBE_ETQS_RX_QUEUE_SHIFT;
5573                 return 0;
5574         }
5575         return -ENOENT;
5576 }
5577
5578 /*
5579  * ixgbe_ethertype_filter_handle - Handle operations for ethertype filter.
5580  * @dev: pointer to rte_eth_dev structure
5581  * @filter_op:operation will be taken.
5582  * @arg: a pointer to specific structure corresponding to the filter_op
5583  */
5584 static int
5585 ixgbe_ethertype_filter_handle(struct rte_eth_dev *dev,
5586                                 enum rte_filter_op filter_op,
5587                                 void *arg)
5588 {
5589         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5590         int ret;
5591
5592         MAC_TYPE_FILTER_SUP(hw->mac.type);
5593
5594         if (filter_op == RTE_ETH_FILTER_NOP)
5595                 return 0;
5596
5597         if (arg == NULL) {
5598                 PMD_DRV_LOG(ERR, "arg shouldn't be NULL for operation %u.",
5599                             filter_op);
5600                 return -EINVAL;
5601         }
5602
5603         switch (filter_op) {
5604         case RTE_ETH_FILTER_ADD:
5605                 ret = ixgbe_add_del_ethertype_filter(dev,
5606                         (struct rte_eth_ethertype_filter *)arg,
5607                         TRUE);
5608                 break;
5609         case RTE_ETH_FILTER_DELETE:
5610                 ret = ixgbe_add_del_ethertype_filter(dev,
5611                         (struct rte_eth_ethertype_filter *)arg,
5612                         FALSE);
5613                 break;
5614         case RTE_ETH_FILTER_GET:
5615                 ret = ixgbe_get_ethertype_filter(dev,
5616                         (struct rte_eth_ethertype_filter *)arg);
5617                 break;
5618         default:
5619                 PMD_DRV_LOG(ERR, "unsupported operation %u.", filter_op);
5620                 ret = -EINVAL;
5621                 break;
5622         }
5623         return ret;
5624 }
5625
5626 static int
5627 ixgbe_dev_filter_ctrl(struct rte_eth_dev *dev,
5628                      enum rte_filter_type filter_type,
5629                      enum rte_filter_op filter_op,
5630                      void *arg)
5631 {
5632         int ret = -EINVAL;
5633
5634         switch (filter_type) {
5635         case RTE_ETH_FILTER_NTUPLE:
5636                 ret = ixgbe_ntuple_filter_handle(dev, filter_op, arg);
5637                 break;
5638         case RTE_ETH_FILTER_ETHERTYPE:
5639                 ret = ixgbe_ethertype_filter_handle(dev, filter_op, arg);
5640                 break;
5641         case RTE_ETH_FILTER_SYN:
5642                 ret = ixgbe_syn_filter_handle(dev, filter_op, arg);
5643                 break;
5644         case RTE_ETH_FILTER_FDIR:
5645                 ret = ixgbe_fdir_ctrl_func(dev, filter_op, arg);
5646                 break;
5647         case RTE_ETH_FILTER_L2_TUNNEL:
5648                 ret = ixgbe_dev_l2_tunnel_filter_handle(dev, filter_op, arg);
5649                 break;
5650         default:
5651                 PMD_DRV_LOG(WARNING, "Filter type (%d) not supported",
5652                                                         filter_type);
5653                 break;
5654         }
5655
5656         return ret;
5657 }
5658
5659 static u8 *
5660 ixgbe_dev_addr_list_itr(__attribute__((unused)) struct ixgbe_hw *hw,
5661                         u8 **mc_addr_ptr, u32 *vmdq)
5662 {
5663         u8 *mc_addr;
5664
5665         *vmdq = 0;
5666         mc_addr = *mc_addr_ptr;
5667         *mc_addr_ptr = (mc_addr + sizeof(struct ether_addr));
5668         return mc_addr;
5669 }
5670
5671 static int
5672 ixgbe_dev_set_mc_addr_list(struct rte_eth_dev *dev,
5673                           struct ether_addr *mc_addr_set,
5674                           uint32_t nb_mc_addr)
5675 {
5676         struct ixgbe_hw *hw;
5677         u8 *mc_addr_list;
5678
5679         hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5680         mc_addr_list = (u8 *)mc_addr_set;
5681         return ixgbe_update_mc_addr_list(hw, mc_addr_list, nb_mc_addr,
5682                                          ixgbe_dev_addr_list_itr, TRUE);
5683 }
5684
5685 static uint64_t
5686 ixgbe_read_systime_cyclecounter(struct rte_eth_dev *dev)
5687 {
5688         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5689         uint64_t systime_cycles;
5690
5691         switch (hw->mac.type) {
5692         case ixgbe_mac_X550:
5693                 /* SYSTIMEL stores ns and SYSTIMEH stores seconds. */
5694                 systime_cycles = (uint64_t)IXGBE_READ_REG(hw, IXGBE_SYSTIML);
5695                 systime_cycles += (uint64_t)IXGBE_READ_REG(hw, IXGBE_SYSTIMH)
5696                                 * NSEC_PER_SEC;
5697                 break;
5698         default:
5699                 systime_cycles = (uint64_t)IXGBE_READ_REG(hw, IXGBE_SYSTIML);
5700                 systime_cycles |= (uint64_t)IXGBE_READ_REG(hw, IXGBE_SYSTIMH)
5701                                 << 32;
5702         }
5703
5704         return systime_cycles;
5705 }
5706
5707 static uint64_t
5708 ixgbe_read_rx_tstamp_cyclecounter(struct rte_eth_dev *dev)
5709 {
5710         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5711         uint64_t rx_tstamp_cycles;
5712
5713         switch (hw->mac.type) {
5714         case ixgbe_mac_X550:
5715                 /* RXSTMPL stores ns and RXSTMPH stores seconds. */
5716                 rx_tstamp_cycles = (uint64_t)IXGBE_READ_REG(hw, IXGBE_RXSTMPL);
5717                 rx_tstamp_cycles += (uint64_t)IXGBE_READ_REG(hw, IXGBE_RXSTMPH)
5718                                 * NSEC_PER_SEC;
5719                 break;
5720         default:
5721                 /* RXSTMPL stores ns and RXSTMPH stores seconds. */
5722                 rx_tstamp_cycles = (uint64_t)IXGBE_READ_REG(hw, IXGBE_RXSTMPL);
5723                 rx_tstamp_cycles |= (uint64_t)IXGBE_READ_REG(hw, IXGBE_RXSTMPH)
5724                                 << 32;
5725         }
5726
5727         return rx_tstamp_cycles;
5728 }
5729
5730 static uint64_t
5731 ixgbe_read_tx_tstamp_cyclecounter(struct rte_eth_dev *dev)
5732 {
5733         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5734         uint64_t tx_tstamp_cycles;
5735
5736         switch (hw->mac.type) {
5737         case ixgbe_mac_X550:
5738                 /* TXSTMPL stores ns and TXSTMPH stores seconds. */
5739                 tx_tstamp_cycles = (uint64_t)IXGBE_READ_REG(hw, IXGBE_TXSTMPL);
5740                 tx_tstamp_cycles += (uint64_t)IXGBE_READ_REG(hw, IXGBE_TXSTMPH)
5741                                 * NSEC_PER_SEC;
5742                 break;
5743         default:
5744                 /* TXSTMPL stores ns and TXSTMPH stores seconds. */
5745                 tx_tstamp_cycles = (uint64_t)IXGBE_READ_REG(hw, IXGBE_TXSTMPL);
5746                 tx_tstamp_cycles |= (uint64_t)IXGBE_READ_REG(hw, IXGBE_TXSTMPH)
5747                                 << 32;
5748         }
5749
5750         return tx_tstamp_cycles;
5751 }
5752
5753 static void
5754 ixgbe_start_timecounters(struct rte_eth_dev *dev)
5755 {
5756         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5757         struct ixgbe_adapter *adapter =
5758                 (struct ixgbe_adapter *)dev->data->dev_private;
5759         struct rte_eth_link link;
5760         uint32_t incval = 0;
5761         uint32_t shift = 0;
5762
5763         /* Get current link speed. */
5764         memset(&link, 0, sizeof(link));
5765         ixgbe_dev_link_update(dev, 1);
5766         rte_ixgbe_dev_atomic_read_link_status(dev, &link);
5767
5768         switch (link.link_speed) {
5769         case ETH_LINK_SPEED_100:
5770                 incval = IXGBE_INCVAL_100;
5771                 shift = IXGBE_INCVAL_SHIFT_100;
5772                 break;
5773         case ETH_LINK_SPEED_1000:
5774                 incval = IXGBE_INCVAL_1GB;
5775                 shift = IXGBE_INCVAL_SHIFT_1GB;
5776                 break;
5777         case ETH_LINK_SPEED_10000:
5778         default:
5779                 incval = IXGBE_INCVAL_10GB;
5780                 shift = IXGBE_INCVAL_SHIFT_10GB;
5781                 break;
5782         }
5783
5784         switch (hw->mac.type) {
5785         case ixgbe_mac_X550:
5786                 /* Independent of link speed. */
5787                 incval = 1;
5788                 /* Cycles read will be interpreted as ns. */
5789                 shift = 0;
5790                 /* Fall-through */
5791         case ixgbe_mac_X540:
5792                 IXGBE_WRITE_REG(hw, IXGBE_TIMINCA, incval);
5793                 break;
5794         case ixgbe_mac_82599EB:
5795                 incval >>= IXGBE_INCVAL_SHIFT_82599;
5796                 shift -= IXGBE_INCVAL_SHIFT_82599;
5797                 IXGBE_WRITE_REG(hw, IXGBE_TIMINCA,
5798                                 (1 << IXGBE_INCPER_SHIFT_82599) | incval);
5799                 break;
5800         default:
5801                 /* Not supported. */
5802                 return;
5803         }
5804
5805         memset(&adapter->systime_tc, 0, sizeof(struct rte_timecounter));
5806         memset(&adapter->rx_tstamp_tc, 0, sizeof(struct rte_timecounter));
5807         memset(&adapter->tx_tstamp_tc, 0, sizeof(struct rte_timecounter));
5808
5809         adapter->systime_tc.cc_mask = IXGBE_CYCLECOUNTER_MASK;
5810         adapter->systime_tc.cc_shift = shift;
5811         adapter->systime_tc.nsec_mask = (1ULL << shift) - 1;
5812
5813         adapter->rx_tstamp_tc.cc_mask = IXGBE_CYCLECOUNTER_MASK;
5814         adapter->rx_tstamp_tc.cc_shift = shift;
5815         adapter->rx_tstamp_tc.nsec_mask = (1ULL << shift) - 1;
5816
5817         adapter->tx_tstamp_tc.cc_mask = IXGBE_CYCLECOUNTER_MASK;
5818         adapter->tx_tstamp_tc.cc_shift = shift;
5819         adapter->tx_tstamp_tc.nsec_mask = (1ULL << shift) - 1;
5820 }
5821
5822 static int
5823 ixgbe_timesync_adjust_time(struct rte_eth_dev *dev, int64_t delta)
5824 {
5825         struct ixgbe_adapter *adapter =
5826                         (struct ixgbe_adapter *)dev->data->dev_private;
5827
5828         adapter->systime_tc.nsec += delta;
5829         adapter->rx_tstamp_tc.nsec += delta;
5830         adapter->tx_tstamp_tc.nsec += delta;
5831
5832         return 0;
5833 }
5834
5835 static int
5836 ixgbe_timesync_write_time(struct rte_eth_dev *dev, const struct timespec *ts)
5837 {
5838         uint64_t ns;
5839         struct ixgbe_adapter *adapter =
5840                         (struct ixgbe_adapter *)dev->data->dev_private;
5841
5842         ns = rte_timespec_to_ns(ts);
5843         /* Set the timecounters to a new value. */
5844         adapter->systime_tc.nsec = ns;
5845         adapter->rx_tstamp_tc.nsec = ns;
5846         adapter->tx_tstamp_tc.nsec = ns;
5847
5848         return 0;
5849 }
5850
5851 static int
5852 ixgbe_timesync_read_time(struct rte_eth_dev *dev, struct timespec *ts)
5853 {
5854         uint64_t ns, systime_cycles;
5855         struct ixgbe_adapter *adapter =
5856                         (struct ixgbe_adapter *)dev->data->dev_private;
5857
5858         systime_cycles = ixgbe_read_systime_cyclecounter(dev);
5859         ns = rte_timecounter_update(&adapter->systime_tc, systime_cycles);
5860         *ts = rte_ns_to_timespec(ns);
5861
5862         return 0;
5863 }
5864
5865 static int
5866 ixgbe_timesync_enable(struct rte_eth_dev *dev)
5867 {
5868         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5869         uint32_t tsync_ctl;
5870         uint32_t tsauxc;
5871
5872         /* Stop the timesync system time. */
5873         IXGBE_WRITE_REG(hw, IXGBE_TIMINCA, 0x0);
5874         /* Reset the timesync system time value. */
5875         IXGBE_WRITE_REG(hw, IXGBE_SYSTIML, 0x0);
5876         IXGBE_WRITE_REG(hw, IXGBE_SYSTIMH, 0x0);
5877
5878         /* Enable system time for platforms where it isn't on by default. */
5879         tsauxc = IXGBE_READ_REG(hw, IXGBE_TSAUXC);
5880         tsauxc &= ~IXGBE_TSAUXC_DISABLE_SYSTIME;
5881         IXGBE_WRITE_REG(hw, IXGBE_TSAUXC, tsauxc);
5882
5883         ixgbe_start_timecounters(dev);
5884
5885         /* Enable L2 filtering of IEEE1588/802.1AS Ethernet frame types. */
5886         IXGBE_WRITE_REG(hw, IXGBE_ETQF(IXGBE_ETQF_FILTER_1588),
5887                         (ETHER_TYPE_1588 |
5888                          IXGBE_ETQF_FILTER_EN |
5889                          IXGBE_ETQF_1588));
5890
5891         /* Enable timestamping of received PTP packets. */
5892         tsync_ctl = IXGBE_READ_REG(hw, IXGBE_TSYNCRXCTL);
5893         tsync_ctl |= IXGBE_TSYNCRXCTL_ENABLED;
5894         IXGBE_WRITE_REG(hw, IXGBE_TSYNCRXCTL, tsync_ctl);
5895
5896         /* Enable timestamping of transmitted PTP packets. */
5897         tsync_ctl = IXGBE_READ_REG(hw, IXGBE_TSYNCTXCTL);
5898         tsync_ctl |= IXGBE_TSYNCTXCTL_ENABLED;
5899         IXGBE_WRITE_REG(hw, IXGBE_TSYNCTXCTL, tsync_ctl);
5900
5901         IXGBE_WRITE_FLUSH(hw);
5902
5903         return 0;
5904 }
5905
5906 static int
5907 ixgbe_timesync_disable(struct rte_eth_dev *dev)
5908 {
5909         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5910         uint32_t tsync_ctl;
5911
5912         /* Disable timestamping of transmitted PTP packets. */
5913         tsync_ctl = IXGBE_READ_REG(hw, IXGBE_TSYNCTXCTL);
5914         tsync_ctl &= ~IXGBE_TSYNCTXCTL_ENABLED;
5915         IXGBE_WRITE_REG(hw, IXGBE_TSYNCTXCTL, tsync_ctl);
5916
5917         /* Disable timestamping of received PTP packets. */
5918         tsync_ctl = IXGBE_READ_REG(hw, IXGBE_TSYNCRXCTL);
5919         tsync_ctl &= ~IXGBE_TSYNCRXCTL_ENABLED;
5920         IXGBE_WRITE_REG(hw, IXGBE_TSYNCRXCTL, tsync_ctl);
5921
5922         /* Disable L2 filtering of IEEE1588/802.1AS Ethernet frame types. */
5923         IXGBE_WRITE_REG(hw, IXGBE_ETQF(IXGBE_ETQF_FILTER_1588), 0);
5924
5925         /* Stop incrementating the System Time registers. */
5926         IXGBE_WRITE_REG(hw, IXGBE_TIMINCA, 0);
5927
5928         return 0;
5929 }
5930
5931 static int
5932 ixgbe_timesync_read_rx_timestamp(struct rte_eth_dev *dev,
5933                                  struct timespec *timestamp,
5934                                  uint32_t flags __rte_unused)
5935 {
5936         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5937         struct ixgbe_adapter *adapter =
5938                 (struct ixgbe_adapter *)dev->data->dev_private;
5939         uint32_t tsync_rxctl;
5940         uint64_t rx_tstamp_cycles;
5941         uint64_t ns;
5942
5943         tsync_rxctl = IXGBE_READ_REG(hw, IXGBE_TSYNCRXCTL);
5944         if ((tsync_rxctl & IXGBE_TSYNCRXCTL_VALID) == 0)
5945                 return -EINVAL;
5946
5947         rx_tstamp_cycles = ixgbe_read_rx_tstamp_cyclecounter(dev);
5948         ns = rte_timecounter_update(&adapter->rx_tstamp_tc, rx_tstamp_cycles);
5949         *timestamp = rte_ns_to_timespec(ns);
5950
5951         return  0;
5952 }
5953
5954 static int
5955 ixgbe_timesync_read_tx_timestamp(struct rte_eth_dev *dev,
5956                                  struct timespec *timestamp)
5957 {
5958         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5959         struct ixgbe_adapter *adapter =
5960                 (struct ixgbe_adapter *)dev->data->dev_private;
5961         uint32_t tsync_txctl;
5962         uint64_t tx_tstamp_cycles;
5963         uint64_t ns;
5964
5965         tsync_txctl = IXGBE_READ_REG(hw, IXGBE_TSYNCTXCTL);
5966         if ((tsync_txctl & IXGBE_TSYNCTXCTL_VALID) == 0)
5967                 return -EINVAL;
5968
5969         tx_tstamp_cycles = ixgbe_read_tx_tstamp_cyclecounter(dev);
5970         ns = rte_timecounter_update(&adapter->tx_tstamp_tc, tx_tstamp_cycles);
5971         *timestamp = rte_ns_to_timespec(ns);
5972
5973         return 0;
5974 }
5975
5976 static int
5977 ixgbe_get_reg_length(struct rte_eth_dev *dev)
5978 {
5979         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5980         int count = 0;
5981         int g_ind = 0;
5982         const struct reg_info *reg_group;
5983         const struct reg_info **reg_set = (hw->mac.type == ixgbe_mac_82598EB) ?
5984                                     ixgbe_regs_mac_82598EB : ixgbe_regs_others;
5985
5986         while ((reg_group = reg_set[g_ind++]))
5987                 count += ixgbe_regs_group_count(reg_group);
5988
5989         return count;
5990 }
5991
5992 static int
5993 ixgbevf_get_reg_length(struct rte_eth_dev *dev __rte_unused)
5994 {
5995         int count = 0;
5996         int g_ind = 0;
5997         const struct reg_info *reg_group;
5998
5999         while ((reg_group = ixgbevf_regs[g_ind++]))
6000                 count += ixgbe_regs_group_count(reg_group);
6001
6002         return count;
6003 }
6004
6005 static int
6006 ixgbe_get_regs(struct rte_eth_dev *dev,
6007               struct rte_dev_reg_info *regs)
6008 {
6009         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
6010         uint32_t *data = regs->data;
6011         int g_ind = 0;
6012         int count = 0;
6013         const struct reg_info *reg_group;
6014         const struct reg_info **reg_set = (hw->mac.type == ixgbe_mac_82598EB) ?
6015                                     ixgbe_regs_mac_82598EB : ixgbe_regs_others;
6016
6017         /* Support only full register dump */
6018         if ((regs->length == 0) ||
6019             (regs->length == (uint32_t)ixgbe_get_reg_length(dev))) {
6020                 regs->version = hw->mac.type << 24 | hw->revision_id << 16 |
6021                         hw->device_id;
6022                 while ((reg_group = reg_set[g_ind++]))
6023                         count += ixgbe_read_regs_group(dev, &data[count],
6024                                 reg_group);
6025                 return 0;
6026         }
6027
6028         return -ENOTSUP;
6029 }
6030
6031 static int
6032 ixgbevf_get_regs(struct rte_eth_dev *dev,
6033                 struct rte_dev_reg_info *regs)
6034 {
6035         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
6036         uint32_t *data = regs->data;
6037         int g_ind = 0;
6038         int count = 0;
6039         const struct reg_info *reg_group;
6040
6041         /* Support only full register dump */
6042         if ((regs->length == 0) ||
6043             (regs->length == (uint32_t)ixgbevf_get_reg_length(dev))) {
6044                 regs->version = hw->mac.type << 24 | hw->revision_id << 16 |
6045                         hw->device_id;
6046                 while ((reg_group = ixgbevf_regs[g_ind++]))
6047                         count += ixgbe_read_regs_group(dev, &data[count],
6048                                                       reg_group);
6049                 return 0;
6050         }
6051
6052         return -ENOTSUP;
6053 }
6054
6055 static int
6056 ixgbe_get_eeprom_length(struct rte_eth_dev *dev)
6057 {
6058         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
6059
6060         /* Return unit is byte count */
6061         return hw->eeprom.word_size * 2;
6062 }
6063
6064 static int
6065 ixgbe_get_eeprom(struct rte_eth_dev *dev,
6066                 struct rte_dev_eeprom_info *in_eeprom)
6067 {
6068         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
6069         struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
6070         uint16_t *data = in_eeprom->data;
6071         int first, length;
6072
6073         first = in_eeprom->offset >> 1;
6074         length = in_eeprom->length >> 1;
6075         if ((first > hw->eeprom.word_size) ||
6076             ((first + length) > hw->eeprom.word_size))
6077                 return -EINVAL;
6078
6079         in_eeprom->magic = hw->vendor_id | (hw->device_id << 16);
6080
6081         return eeprom->ops.read_buffer(hw, first, length, data);
6082 }
6083
6084 static int
6085 ixgbe_set_eeprom(struct rte_eth_dev *dev,
6086                 struct rte_dev_eeprom_info *in_eeprom)
6087 {
6088         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
6089         struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
6090         uint16_t *data = in_eeprom->data;
6091         int first, length;
6092
6093         first = in_eeprom->offset >> 1;
6094         length = in_eeprom->length >> 1;
6095         if ((first > hw->eeprom.word_size) ||
6096             ((first + length) > hw->eeprom.word_size))
6097                 return -EINVAL;
6098
6099         in_eeprom->magic = hw->vendor_id | (hw->device_id << 16);
6100
6101         return eeprom->ops.write_buffer(hw,  first, length, data);
6102 }
6103
6104 uint16_t
6105 ixgbe_reta_size_get(enum ixgbe_mac_type mac_type) {
6106         switch (mac_type) {
6107         case ixgbe_mac_X550:
6108         case ixgbe_mac_X550EM_x:
6109                 return ETH_RSS_RETA_SIZE_512;
6110         case ixgbe_mac_X550_vf:
6111         case ixgbe_mac_X550EM_x_vf:
6112                 return ETH_RSS_RETA_SIZE_64;
6113         default:
6114                 return ETH_RSS_RETA_SIZE_128;
6115         }
6116 }
6117
6118 uint32_t
6119 ixgbe_reta_reg_get(enum ixgbe_mac_type mac_type, uint16_t reta_idx) {
6120         switch (mac_type) {
6121         case ixgbe_mac_X550:
6122         case ixgbe_mac_X550EM_x:
6123                 if (reta_idx < ETH_RSS_RETA_SIZE_128)
6124                         return IXGBE_RETA(reta_idx >> 2);
6125                 else
6126                         return IXGBE_ERETA((reta_idx - ETH_RSS_RETA_SIZE_128) >> 2);
6127         case ixgbe_mac_X550_vf:
6128         case ixgbe_mac_X550EM_x_vf:
6129                 return IXGBE_VFRETA(reta_idx >> 2);
6130         default:
6131                 return IXGBE_RETA(reta_idx >> 2);
6132         }
6133 }
6134
6135 uint32_t
6136 ixgbe_mrqc_reg_get(enum ixgbe_mac_type mac_type) {
6137         switch (mac_type) {
6138         case ixgbe_mac_X550_vf:
6139         case ixgbe_mac_X550EM_x_vf:
6140                 return IXGBE_VFMRQC;
6141         default:
6142                 return IXGBE_MRQC;
6143         }
6144 }
6145
6146 uint32_t
6147 ixgbe_rssrk_reg_get(enum ixgbe_mac_type mac_type, uint8_t i) {
6148         switch (mac_type) {
6149         case ixgbe_mac_X550_vf:
6150         case ixgbe_mac_X550EM_x_vf:
6151                 return IXGBE_VFRSSRK(i);
6152         default:
6153                 return IXGBE_RSSRK(i);
6154         }
6155 }
6156
6157 bool
6158 ixgbe_rss_update_sp(enum ixgbe_mac_type mac_type) {
6159         switch (mac_type) {
6160         case ixgbe_mac_82599_vf:
6161         case ixgbe_mac_X540_vf:
6162                 return 0;
6163         default:
6164                 return 1;
6165         }
6166 }
6167
6168 static int
6169 ixgbe_dev_get_dcb_info(struct rte_eth_dev *dev,
6170                         struct rte_eth_dcb_info *dcb_info)
6171 {
6172         struct ixgbe_dcb_config *dcb_config =
6173                         IXGBE_DEV_PRIVATE_TO_DCB_CFG(dev->data->dev_private);
6174         struct ixgbe_dcb_tc_config *tc;
6175         uint8_t i, j;
6176
6177         if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_DCB_FLAG)
6178                 dcb_info->nb_tcs = dcb_config->num_tcs.pg_tcs;
6179         else
6180                 dcb_info->nb_tcs = 1;
6181
6182         if (dcb_config->vt_mode) { /* vt is enabled*/
6183                 struct rte_eth_vmdq_dcb_conf *vmdq_rx_conf =
6184                                 &dev->data->dev_conf.rx_adv_conf.vmdq_dcb_conf;
6185                 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++)
6186                         dcb_info->prio_tc[i] = vmdq_rx_conf->dcb_tc[i];
6187                 for (i = 0; i < vmdq_rx_conf->nb_queue_pools; i++) {
6188                         for (j = 0; j < dcb_info->nb_tcs; j++) {
6189                                 dcb_info->tc_queue.tc_rxq[i][j].base =
6190                                                 i * dcb_info->nb_tcs + j;
6191                                 dcb_info->tc_queue.tc_rxq[i][j].nb_queue = 1;
6192                                 dcb_info->tc_queue.tc_txq[i][j].base =
6193                                                 i * dcb_info->nb_tcs + j;
6194                                 dcb_info->tc_queue.tc_txq[i][j].nb_queue = 1;
6195                         }
6196                 }
6197         } else { /* vt is disabled*/
6198                 struct rte_eth_dcb_rx_conf *rx_conf =
6199                                 &dev->data->dev_conf.rx_adv_conf.dcb_rx_conf;
6200                 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++)
6201                         dcb_info->prio_tc[i] = rx_conf->dcb_tc[i];
6202                 if (dcb_info->nb_tcs == ETH_4_TCS) {
6203                         for (i = 0; i < dcb_info->nb_tcs; i++) {
6204                                 dcb_info->tc_queue.tc_rxq[0][i].base = i * 32;
6205                                 dcb_info->tc_queue.tc_rxq[0][i].nb_queue = 16;
6206                         }
6207                         dcb_info->tc_queue.tc_txq[0][0].base = 0;
6208                         dcb_info->tc_queue.tc_txq[0][1].base = 64;
6209                         dcb_info->tc_queue.tc_txq[0][2].base = 96;
6210                         dcb_info->tc_queue.tc_txq[0][3].base = 112;
6211                         dcb_info->tc_queue.tc_txq[0][0].nb_queue = 64;
6212                         dcb_info->tc_queue.tc_txq[0][1].nb_queue = 32;
6213                         dcb_info->tc_queue.tc_txq[0][2].nb_queue = 16;
6214                         dcb_info->tc_queue.tc_txq[0][3].nb_queue = 16;
6215                 } else if (dcb_info->nb_tcs == ETH_8_TCS) {
6216                         for (i = 0; i < dcb_info->nb_tcs; i++) {
6217                                 dcb_info->tc_queue.tc_rxq[0][i].base = i * 16;
6218                                 dcb_info->tc_queue.tc_rxq[0][i].nb_queue = 16;
6219                         }
6220                         dcb_info->tc_queue.tc_txq[0][0].base = 0;
6221                         dcb_info->tc_queue.tc_txq[0][1].base = 32;
6222                         dcb_info->tc_queue.tc_txq[0][2].base = 64;
6223                         dcb_info->tc_queue.tc_txq[0][3].base = 80;
6224                         dcb_info->tc_queue.tc_txq[0][4].base = 96;
6225                         dcb_info->tc_queue.tc_txq[0][5].base = 104;
6226                         dcb_info->tc_queue.tc_txq[0][6].base = 112;
6227                         dcb_info->tc_queue.tc_txq[0][7].base = 120;
6228                         dcb_info->tc_queue.tc_txq[0][0].nb_queue = 32;
6229                         dcb_info->tc_queue.tc_txq[0][1].nb_queue = 32;
6230                         dcb_info->tc_queue.tc_txq[0][2].nb_queue = 16;
6231                         dcb_info->tc_queue.tc_txq[0][3].nb_queue = 16;
6232                         dcb_info->tc_queue.tc_txq[0][4].nb_queue = 8;
6233                         dcb_info->tc_queue.tc_txq[0][5].nb_queue = 8;
6234                         dcb_info->tc_queue.tc_txq[0][6].nb_queue = 8;
6235                         dcb_info->tc_queue.tc_txq[0][7].nb_queue = 8;
6236                 }
6237         }
6238         for (i = 0; i < dcb_info->nb_tcs; i++) {
6239                 tc = &dcb_config->tc_config[i];
6240                 dcb_info->tc_bws[i] = tc->path[IXGBE_DCB_TX_CONFIG].bwg_percent;
6241         }
6242         return 0;
6243 }
6244
6245 /* Update e-tag ether type */
6246 static int
6247 ixgbe_update_e_tag_eth_type(struct ixgbe_hw *hw,
6248                             uint16_t ether_type)
6249 {
6250         uint32_t etag_etype;
6251
6252         if (hw->mac.type != ixgbe_mac_X550 &&
6253             hw->mac.type != ixgbe_mac_X550EM_x) {
6254                 return -ENOTSUP;
6255         }
6256
6257         etag_etype = IXGBE_READ_REG(hw, IXGBE_ETAG_ETYPE);
6258         etag_etype &= ~IXGBE_ETAG_ETYPE_MASK;
6259         etag_etype |= ether_type;
6260         IXGBE_WRITE_REG(hw, IXGBE_ETAG_ETYPE, etag_etype);
6261         IXGBE_WRITE_FLUSH(hw);
6262
6263         return 0;
6264 }
6265
6266 /* Config l2 tunnel ether type */
6267 static int
6268 ixgbe_dev_l2_tunnel_eth_type_conf(struct rte_eth_dev *dev,
6269                                   struct rte_eth_l2_tunnel_conf *l2_tunnel)
6270 {
6271         int ret = 0;
6272         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
6273
6274         if (l2_tunnel == NULL)
6275                 return -EINVAL;
6276
6277         switch (l2_tunnel->l2_tunnel_type) {
6278         case RTE_L2_TUNNEL_TYPE_E_TAG:
6279                 ret = ixgbe_update_e_tag_eth_type(hw, l2_tunnel->ether_type);
6280                 break;
6281         default:
6282                 PMD_DRV_LOG(ERR, "Invalid tunnel type");
6283                 ret = -EINVAL;
6284                 break;
6285         }
6286
6287         return ret;
6288 }
6289
6290 /* Enable e-tag tunnel */
6291 static int
6292 ixgbe_e_tag_enable(struct ixgbe_hw *hw)
6293 {
6294         uint32_t etag_etype;
6295
6296         if (hw->mac.type != ixgbe_mac_X550 &&
6297             hw->mac.type != ixgbe_mac_X550EM_x) {
6298                 return -ENOTSUP;
6299         }
6300
6301         etag_etype = IXGBE_READ_REG(hw, IXGBE_ETAG_ETYPE);
6302         etag_etype |= IXGBE_ETAG_ETYPE_VALID;
6303         IXGBE_WRITE_REG(hw, IXGBE_ETAG_ETYPE, etag_etype);
6304         IXGBE_WRITE_FLUSH(hw);
6305
6306         return 0;
6307 }
6308
6309 /* Enable l2 tunnel */
6310 static int
6311 ixgbe_dev_l2_tunnel_enable(struct rte_eth_dev *dev,
6312                            enum rte_eth_tunnel_type l2_tunnel_type)
6313 {
6314         int ret = 0;
6315         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
6316
6317         switch (l2_tunnel_type) {
6318         case RTE_L2_TUNNEL_TYPE_E_TAG:
6319                 ret = ixgbe_e_tag_enable(hw);
6320                 break;
6321         default:
6322                 PMD_DRV_LOG(ERR, "Invalid tunnel type");
6323                 ret = -EINVAL;
6324                 break;
6325         }
6326
6327         return ret;
6328 }
6329
6330 /* Disable e-tag tunnel */
6331 static int
6332 ixgbe_e_tag_disable(struct ixgbe_hw *hw)
6333 {
6334         uint32_t etag_etype;
6335
6336         if (hw->mac.type != ixgbe_mac_X550 &&
6337             hw->mac.type != ixgbe_mac_X550EM_x) {
6338                 return -ENOTSUP;
6339         }
6340
6341         etag_etype = IXGBE_READ_REG(hw, IXGBE_ETAG_ETYPE);
6342         etag_etype &= ~IXGBE_ETAG_ETYPE_VALID;
6343         IXGBE_WRITE_REG(hw, IXGBE_ETAG_ETYPE, etag_etype);
6344         IXGBE_WRITE_FLUSH(hw);
6345
6346         return 0;
6347 }
6348
6349 /* Disable l2 tunnel */
6350 static int
6351 ixgbe_dev_l2_tunnel_disable(struct rte_eth_dev *dev,
6352                             enum rte_eth_tunnel_type l2_tunnel_type)
6353 {
6354         int ret = 0;
6355         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
6356
6357         switch (l2_tunnel_type) {
6358         case RTE_L2_TUNNEL_TYPE_E_TAG:
6359                 ret = ixgbe_e_tag_disable(hw);
6360                 break;
6361         default:
6362                 PMD_DRV_LOG(ERR, "Invalid tunnel type");
6363                 ret = -EINVAL;
6364                 break;
6365         }
6366
6367         return ret;
6368 }
6369
6370 static int
6371 ixgbe_e_tag_filter_del(struct rte_eth_dev *dev,
6372                        struct rte_eth_l2_tunnel_conf *l2_tunnel)
6373 {
6374         int ret = 0;
6375         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
6376         uint32_t i, rar_entries;
6377         uint32_t rar_low, rar_high;
6378
6379         if (hw->mac.type != ixgbe_mac_X550 &&
6380             hw->mac.type != ixgbe_mac_X550EM_x) {
6381                 return -ENOTSUP;
6382         }
6383
6384         rar_entries = ixgbe_get_num_rx_addrs(hw);
6385
6386         for (i = 1; i < rar_entries; i++) {
6387                 rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(i));
6388                 rar_low  = IXGBE_READ_REG(hw, IXGBE_RAL(i));
6389                 if ((rar_high & IXGBE_RAH_AV) &&
6390                     (rar_high & IXGBE_RAH_ADTYPE) &&
6391                     ((rar_low & IXGBE_RAL_ETAG_FILTER_MASK) ==
6392                      l2_tunnel->tunnel_id)) {
6393                         IXGBE_WRITE_REG(hw, IXGBE_RAL(i), 0);
6394                         IXGBE_WRITE_REG(hw, IXGBE_RAH(i), 0);
6395
6396                         ixgbe_clear_vmdq(hw, i, IXGBE_CLEAR_VMDQ_ALL);
6397
6398                         return ret;
6399                 }
6400         }
6401
6402         return ret;
6403 }
6404
6405 static int
6406 ixgbe_e_tag_filter_add(struct rte_eth_dev *dev,
6407                        struct rte_eth_l2_tunnel_conf *l2_tunnel)
6408 {
6409         int ret = 0;
6410         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
6411         uint32_t i, rar_entries;
6412         uint32_t rar_low, rar_high;
6413
6414         if (hw->mac.type != ixgbe_mac_X550 &&
6415             hw->mac.type != ixgbe_mac_X550EM_x) {
6416                 return -ENOTSUP;
6417         }
6418
6419         /* One entry for one tunnel. Try to remove potential existing entry. */
6420         ixgbe_e_tag_filter_del(dev, l2_tunnel);
6421
6422         rar_entries = ixgbe_get_num_rx_addrs(hw);
6423
6424         for (i = 1; i < rar_entries; i++) {
6425                 rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(i));
6426                 if (rar_high & IXGBE_RAH_AV) {
6427                         continue;
6428                 } else {
6429                         ixgbe_set_vmdq(hw, i, l2_tunnel->pool);
6430                         rar_high = IXGBE_RAH_AV | IXGBE_RAH_ADTYPE;
6431                         rar_low = l2_tunnel->tunnel_id;
6432
6433                         IXGBE_WRITE_REG(hw, IXGBE_RAL(i), rar_low);
6434                         IXGBE_WRITE_REG(hw, IXGBE_RAH(i), rar_high);
6435
6436                         return ret;
6437                 }
6438         }
6439
6440         PMD_INIT_LOG(NOTICE, "The table of E-tag forwarding rule is full."
6441                      " Please remove a rule before adding a new one.");
6442         return -EINVAL;
6443 }
6444
6445 /* Add l2 tunnel filter */
6446 static int
6447 ixgbe_dev_l2_tunnel_filter_add(struct rte_eth_dev *dev,
6448                                struct rte_eth_l2_tunnel_conf *l2_tunnel)
6449 {
6450         int ret = 0;
6451
6452         switch (l2_tunnel->l2_tunnel_type) {
6453         case RTE_L2_TUNNEL_TYPE_E_TAG:
6454                 ret = ixgbe_e_tag_filter_add(dev, l2_tunnel);
6455                 break;
6456         default:
6457                 PMD_DRV_LOG(ERR, "Invalid tunnel type");
6458                 ret = -EINVAL;
6459                 break;
6460         }
6461
6462         return ret;
6463 }
6464
6465 /* Delete l2 tunnel filter */
6466 static int
6467 ixgbe_dev_l2_tunnel_filter_del(struct rte_eth_dev *dev,
6468                                struct rte_eth_l2_tunnel_conf *l2_tunnel)
6469 {
6470         int ret = 0;
6471
6472         switch (l2_tunnel->l2_tunnel_type) {
6473         case RTE_L2_TUNNEL_TYPE_E_TAG:
6474                 ret = ixgbe_e_tag_filter_del(dev, l2_tunnel);
6475                 break;
6476         default:
6477                 PMD_DRV_LOG(ERR, "Invalid tunnel type");
6478                 ret = -EINVAL;
6479                 break;
6480         }
6481
6482         return ret;
6483 }
6484
6485 /**
6486  * ixgbe_dev_l2_tunnel_filter_handle - Handle operations for l2 tunnel filter.
6487  * @dev: pointer to rte_eth_dev structure
6488  * @filter_op:operation will be taken.
6489  * @arg: a pointer to specific structure corresponding to the filter_op
6490  */
6491 static int
6492 ixgbe_dev_l2_tunnel_filter_handle(struct rte_eth_dev *dev,
6493                                   enum rte_filter_op filter_op,
6494                                   void *arg)
6495 {
6496         int ret = 0;
6497
6498         if (filter_op == RTE_ETH_FILTER_NOP)
6499                 return 0;
6500
6501         if (arg == NULL) {
6502                 PMD_DRV_LOG(ERR, "arg shouldn't be NULL for operation %u.",
6503                             filter_op);
6504                 return -EINVAL;
6505         }
6506
6507         switch (filter_op) {
6508         case RTE_ETH_FILTER_ADD:
6509                 ret = ixgbe_dev_l2_tunnel_filter_add
6510                         (dev,
6511                          (struct rte_eth_l2_tunnel_conf *)arg);
6512                 break;
6513         case RTE_ETH_FILTER_DELETE:
6514                 ret = ixgbe_dev_l2_tunnel_filter_del
6515                         (dev,
6516                          (struct rte_eth_l2_tunnel_conf *)arg);
6517                 break;
6518         default:
6519                 PMD_DRV_LOG(ERR, "unsupported operation %u.", filter_op);
6520                 ret = -EINVAL;
6521                 break;
6522         }
6523         return ret;
6524 }
6525
6526 static int
6527 ixgbe_e_tag_forwarding_en_dis(struct rte_eth_dev *dev, bool en)
6528 {
6529         int ret = 0;
6530         uint32_t ctrl;
6531         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
6532
6533         if (hw->mac.type != ixgbe_mac_X550 &&
6534             hw->mac.type != ixgbe_mac_X550EM_x) {
6535                 return -ENOTSUP;
6536         }
6537
6538         ctrl = IXGBE_READ_REG(hw, IXGBE_VT_CTL);
6539         ctrl &= ~IXGBE_VT_CTL_POOLING_MODE_MASK;
6540         if (en)
6541                 ctrl |= IXGBE_VT_CTL_POOLING_MODE_ETAG;
6542         IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, ctrl);
6543
6544         return ret;
6545 }
6546
6547 /* Enable l2 tunnel forwarding */
6548 static int
6549 ixgbe_dev_l2_tunnel_forwarding_enable
6550         (struct rte_eth_dev *dev,
6551          enum rte_eth_tunnel_type l2_tunnel_type)
6552 {
6553         int ret = 0;
6554
6555         switch (l2_tunnel_type) {
6556         case RTE_L2_TUNNEL_TYPE_E_TAG:
6557                 ret = ixgbe_e_tag_forwarding_en_dis(dev, 1);
6558                 break;
6559         default:
6560                 PMD_DRV_LOG(ERR, "Invalid tunnel type");
6561                 ret = -EINVAL;
6562                 break;
6563         }
6564
6565         return ret;
6566 }
6567
6568 /* Disable l2 tunnel forwarding */
6569 static int
6570 ixgbe_dev_l2_tunnel_forwarding_disable
6571         (struct rte_eth_dev *dev,
6572          enum rte_eth_tunnel_type l2_tunnel_type)
6573 {
6574         int ret = 0;
6575
6576         switch (l2_tunnel_type) {
6577         case RTE_L2_TUNNEL_TYPE_E_TAG:
6578                 ret = ixgbe_e_tag_forwarding_en_dis(dev, 0);
6579                 break;
6580         default:
6581                 PMD_DRV_LOG(ERR, "Invalid tunnel type");
6582                 ret = -EINVAL;
6583                 break;
6584         }
6585
6586         return ret;
6587 }
6588
6589 static int
6590 ixgbe_e_tag_insertion_en_dis(struct rte_eth_dev *dev,
6591                              struct rte_eth_l2_tunnel_conf *l2_tunnel,
6592                              bool en)
6593 {
6594         int ret = 0;
6595         uint32_t vmtir, vmvir;
6596         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
6597
6598         if (l2_tunnel->vf_id >= dev->pci_dev->max_vfs) {
6599                 PMD_DRV_LOG(ERR,
6600                             "VF id %u should be less than %u",
6601                             l2_tunnel->vf_id,
6602                             dev->pci_dev->max_vfs);
6603                 return -EINVAL;
6604         }
6605
6606         if (hw->mac.type != ixgbe_mac_X550 &&
6607             hw->mac.type != ixgbe_mac_X550EM_x) {
6608                 return -ENOTSUP;
6609         }
6610
6611         if (en)
6612                 vmtir = l2_tunnel->tunnel_id;
6613         else
6614                 vmtir = 0;
6615
6616         IXGBE_WRITE_REG(hw, IXGBE_VMTIR(l2_tunnel->vf_id), vmtir);
6617
6618         vmvir = IXGBE_READ_REG(hw, IXGBE_VMVIR(l2_tunnel->vf_id));
6619         vmvir &= ~IXGBE_VMVIR_TAGA_MASK;
6620         if (en)
6621                 vmvir |= IXGBE_VMVIR_TAGA_ETAG_INSERT;
6622         IXGBE_WRITE_REG(hw, IXGBE_VMVIR(l2_tunnel->vf_id), vmvir);
6623
6624         return ret;
6625 }
6626
6627 /* Enable l2 tunnel tag insertion */
6628 static int
6629 ixgbe_dev_l2_tunnel_insertion_enable(struct rte_eth_dev *dev,
6630                                      struct rte_eth_l2_tunnel_conf *l2_tunnel)
6631 {
6632         int ret = 0;
6633
6634         switch (l2_tunnel->l2_tunnel_type) {
6635         case RTE_L2_TUNNEL_TYPE_E_TAG:
6636                 ret = ixgbe_e_tag_insertion_en_dis(dev, l2_tunnel, 1);
6637                 break;
6638         default:
6639                 PMD_DRV_LOG(ERR, "Invalid tunnel type");
6640                 ret = -EINVAL;
6641                 break;
6642         }
6643
6644         return ret;
6645 }
6646
6647 /* Disable l2 tunnel tag insertion */
6648 static int
6649 ixgbe_dev_l2_tunnel_insertion_disable
6650         (struct rte_eth_dev *dev,
6651          struct rte_eth_l2_tunnel_conf *l2_tunnel)
6652 {
6653         int ret = 0;
6654
6655         switch (l2_tunnel->l2_tunnel_type) {
6656         case RTE_L2_TUNNEL_TYPE_E_TAG:
6657                 ret = ixgbe_e_tag_insertion_en_dis(dev, l2_tunnel, 0);
6658                 break;
6659         default:
6660                 PMD_DRV_LOG(ERR, "Invalid tunnel type");
6661                 ret = -EINVAL;
6662                 break;
6663         }
6664
6665         return ret;
6666 }
6667
6668 static int
6669 ixgbe_e_tag_stripping_en_dis(struct rte_eth_dev *dev,
6670                              bool en)
6671 {
6672         int ret = 0;
6673         uint32_t qde;
6674         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
6675
6676         if (hw->mac.type != ixgbe_mac_X550 &&
6677             hw->mac.type != ixgbe_mac_X550EM_x) {
6678                 return -ENOTSUP;
6679         }
6680
6681         qde = IXGBE_READ_REG(hw, IXGBE_QDE);
6682         if (en)
6683                 qde |= IXGBE_QDE_STRIP_TAG;
6684         else
6685                 qde &= ~IXGBE_QDE_STRIP_TAG;
6686         qde &= ~IXGBE_QDE_READ;
6687         qde |= IXGBE_QDE_WRITE;
6688         IXGBE_WRITE_REG(hw, IXGBE_QDE, qde);
6689
6690         return ret;
6691 }
6692
6693 /* Enable l2 tunnel tag stripping */
6694 static int
6695 ixgbe_dev_l2_tunnel_stripping_enable
6696         (struct rte_eth_dev *dev,
6697          enum rte_eth_tunnel_type l2_tunnel_type)
6698 {
6699         int ret = 0;
6700
6701         switch (l2_tunnel_type) {
6702         case RTE_L2_TUNNEL_TYPE_E_TAG:
6703                 ret = ixgbe_e_tag_stripping_en_dis(dev, 1);
6704                 break;
6705         default:
6706                 PMD_DRV_LOG(ERR, "Invalid tunnel type");
6707                 ret = -EINVAL;
6708                 break;
6709         }
6710
6711         return ret;
6712 }
6713
6714 /* Disable l2 tunnel tag stripping */
6715 static int
6716 ixgbe_dev_l2_tunnel_stripping_disable
6717         (struct rte_eth_dev *dev,
6718          enum rte_eth_tunnel_type l2_tunnel_type)
6719 {
6720         int ret = 0;
6721
6722         switch (l2_tunnel_type) {
6723         case RTE_L2_TUNNEL_TYPE_E_TAG:
6724                 ret = ixgbe_e_tag_stripping_en_dis(dev, 0);
6725                 break;
6726         default:
6727                 PMD_DRV_LOG(ERR, "Invalid tunnel type");
6728                 ret = -EINVAL;
6729                 break;
6730         }
6731
6732         return ret;
6733 }
6734
6735 /* Enable/disable l2 tunnel offload functions */
6736 static int
6737 ixgbe_dev_l2_tunnel_offload_set
6738         (struct rte_eth_dev *dev,
6739          struct rte_eth_l2_tunnel_conf *l2_tunnel,
6740          uint32_t mask,
6741          uint8_t en)
6742 {
6743         int ret = 0;
6744
6745         if (l2_tunnel == NULL)
6746                 return -EINVAL;
6747
6748         ret = -EINVAL;
6749         if (mask & ETH_L2_TUNNEL_ENABLE_MASK) {
6750                 if (en)
6751                         ret = ixgbe_dev_l2_tunnel_enable(
6752                                 dev,
6753                                 l2_tunnel->l2_tunnel_type);
6754                 else
6755                         ret = ixgbe_dev_l2_tunnel_disable(
6756                                 dev,
6757                                 l2_tunnel->l2_tunnel_type);
6758         }
6759
6760         if (mask & ETH_L2_TUNNEL_INSERTION_MASK) {
6761                 if (en)
6762                         ret = ixgbe_dev_l2_tunnel_insertion_enable(
6763                                 dev,
6764                                 l2_tunnel);
6765                 else
6766                         ret = ixgbe_dev_l2_tunnel_insertion_disable(
6767                                 dev,
6768                                 l2_tunnel);
6769         }
6770
6771         if (mask & ETH_L2_TUNNEL_STRIPPING_MASK) {
6772                 if (en)
6773                         ret = ixgbe_dev_l2_tunnel_stripping_enable(
6774                                 dev,
6775                                 l2_tunnel->l2_tunnel_type);
6776                 else
6777                         ret = ixgbe_dev_l2_tunnel_stripping_disable(
6778                                 dev,
6779                                 l2_tunnel->l2_tunnel_type);
6780         }
6781
6782         if (mask & ETH_L2_TUNNEL_FORWARDING_MASK) {
6783                 if (en)
6784                         ret = ixgbe_dev_l2_tunnel_forwarding_enable(
6785                                 dev,
6786                                 l2_tunnel->l2_tunnel_type);
6787                 else
6788                         ret = ixgbe_dev_l2_tunnel_forwarding_disable(
6789                                 dev,
6790                                 l2_tunnel->l2_tunnel_type);
6791         }
6792
6793         return ret;
6794 }
6795
6796 static struct rte_driver rte_ixgbe_driver = {
6797         .type = PMD_PDEV,
6798         .init = rte_ixgbe_pmd_init,
6799 };
6800
6801 static struct rte_driver rte_ixgbevf_driver = {
6802         .type = PMD_PDEV,
6803         .init = rte_ixgbevf_pmd_init,
6804 };
6805
6806 PMD_REGISTER_DRIVER(rte_ixgbe_driver);
6807 PMD_REGISTER_DRIVER(rte_ixgbevf_driver);