ethdev: add vlan type when setting ether type
[dpdk.git] / drivers / net / ixgbe / ixgbe_ethdev.c
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33
34 #include <sys/queue.h>
35 #include <stdio.h>
36 #include <errno.h>
37 #include <stdint.h>
38 #include <string.h>
39 #include <unistd.h>
40 #include <stdarg.h>
41 #include <inttypes.h>
42 #include <netinet/in.h>
43 #include <rte_byteorder.h>
44 #include <rte_common.h>
45 #include <rte_cycles.h>
46
47 #include <rte_interrupts.h>
48 #include <rte_log.h>
49 #include <rte_debug.h>
50 #include <rte_pci.h>
51 #include <rte_atomic.h>
52 #include <rte_branch_prediction.h>
53 #include <rte_memory.h>
54 #include <rte_memzone.h>
55 #include <rte_eal.h>
56 #include <rte_alarm.h>
57 #include <rte_ether.h>
58 #include <rte_ethdev.h>
59 #include <rte_atomic.h>
60 #include <rte_malloc.h>
61 #include <rte_random.h>
62 #include <rte_dev.h>
63
64 #include "ixgbe_logs.h"
65 #include "base/ixgbe_api.h"
66 #include "base/ixgbe_vf.h"
67 #include "base/ixgbe_common.h"
68 #include "ixgbe_ethdev.h"
69 #include "ixgbe_bypass.h"
70 #include "ixgbe_rxtx.h"
71 #include "base/ixgbe_type.h"
72 #include "base/ixgbe_phy.h"
73 #include "ixgbe_regs.h"
74
75 /*
76  * High threshold controlling when to start sending XOFF frames. Must be at
77  * least 8 bytes less than receive packet buffer size. This value is in units
78  * of 1024 bytes.
79  */
80 #define IXGBE_FC_HI    0x80
81
82 /*
83  * Low threshold controlling when to start sending XON frames. This value is
84  * in units of 1024 bytes.
85  */
86 #define IXGBE_FC_LO    0x40
87
88 /* Default minimum inter-interrupt interval for EITR configuration */
89 #define IXGBE_MIN_INTER_INTERRUPT_INTERVAL_DEFAULT    0x79E
90
91 /* Timer value included in XOFF frames. */
92 #define IXGBE_FC_PAUSE 0x680
93
94 #define IXGBE_LINK_DOWN_CHECK_TIMEOUT 4000 /* ms */
95 #define IXGBE_LINK_UP_CHECK_TIMEOUT   1000 /* ms */
96 #define IXGBE_VMDQ_NUM_UC_MAC         4096 /* Maximum nb. of UC MAC addr. */
97
98 #define IXGBE_MMW_SIZE_DEFAULT        0x4
99 #define IXGBE_MMW_SIZE_JUMBO_FRAME    0x14
100 #define IXGBE_MAX_RING_DESC           4096 /* replicate define from rxtx */
101
102 /*
103  *  Default values for RX/TX configuration
104  */
105 #define IXGBE_DEFAULT_RX_FREE_THRESH  32
106 #define IXGBE_DEFAULT_RX_PTHRESH      8
107 #define IXGBE_DEFAULT_RX_HTHRESH      8
108 #define IXGBE_DEFAULT_RX_WTHRESH      0
109
110 #define IXGBE_DEFAULT_TX_FREE_THRESH  32
111 #define IXGBE_DEFAULT_TX_PTHRESH      32
112 #define IXGBE_DEFAULT_TX_HTHRESH      0
113 #define IXGBE_DEFAULT_TX_WTHRESH      0
114 #define IXGBE_DEFAULT_TX_RSBIT_THRESH 32
115
116 /* Bit shift and mask */
117 #define IXGBE_4_BIT_WIDTH  (CHAR_BIT / 2)
118 #define IXGBE_4_BIT_MASK   RTE_LEN2MASK(IXGBE_4_BIT_WIDTH, uint8_t)
119 #define IXGBE_8_BIT_WIDTH  CHAR_BIT
120 #define IXGBE_8_BIT_MASK   UINT8_MAX
121
122 #define IXGBEVF_PMD_NAME "rte_ixgbevf_pmd" /* PMD name */
123
124 #define IXGBE_QUEUE_STAT_COUNTERS (sizeof(hw_stats->qprc) / sizeof(hw_stats->qprc[0]))
125
126 #define IXGBE_HKEY_MAX_INDEX 10
127
128 /* Additional timesync values. */
129 #define NSEC_PER_SEC             1000000000L
130 #define IXGBE_INCVAL_10GB        0x66666666
131 #define IXGBE_INCVAL_1GB         0x40000000
132 #define IXGBE_INCVAL_100         0x50000000
133 #define IXGBE_INCVAL_SHIFT_10GB  28
134 #define IXGBE_INCVAL_SHIFT_1GB   24
135 #define IXGBE_INCVAL_SHIFT_100   21
136 #define IXGBE_INCVAL_SHIFT_82599 7
137 #define IXGBE_INCPER_SHIFT_82599 24
138
139 #define IXGBE_CYCLECOUNTER_MASK   0xffffffffffffffffULL
140
141 static int eth_ixgbe_dev_init(struct rte_eth_dev *eth_dev);
142 static int eth_ixgbe_dev_uninit(struct rte_eth_dev *eth_dev);
143 static int  ixgbe_dev_configure(struct rte_eth_dev *dev);
144 static int  ixgbe_dev_start(struct rte_eth_dev *dev);
145 static void ixgbe_dev_stop(struct rte_eth_dev *dev);
146 static int  ixgbe_dev_set_link_up(struct rte_eth_dev *dev);
147 static int  ixgbe_dev_set_link_down(struct rte_eth_dev *dev);
148 static void ixgbe_dev_close(struct rte_eth_dev *dev);
149 static void ixgbe_dev_promiscuous_enable(struct rte_eth_dev *dev);
150 static void ixgbe_dev_promiscuous_disable(struct rte_eth_dev *dev);
151 static void ixgbe_dev_allmulticast_enable(struct rte_eth_dev *dev);
152 static void ixgbe_dev_allmulticast_disable(struct rte_eth_dev *dev);
153 static int ixgbe_dev_link_update(struct rte_eth_dev *dev,
154                                 int wait_to_complete);
155 static void ixgbe_dev_stats_get(struct rte_eth_dev *dev,
156                                 struct rte_eth_stats *stats);
157 static int ixgbe_dev_xstats_get(struct rte_eth_dev *dev,
158                                 struct rte_eth_xstats *xstats, unsigned n);
159 static int ixgbevf_dev_xstats_get(struct rte_eth_dev *dev,
160                                   struct rte_eth_xstats *xstats, unsigned n);
161 static void ixgbe_dev_stats_reset(struct rte_eth_dev *dev);
162 static void ixgbe_dev_xstats_reset(struct rte_eth_dev *dev);
163 static int ixgbe_dev_queue_stats_mapping_set(struct rte_eth_dev *eth_dev,
164                                              uint16_t queue_id,
165                                              uint8_t stat_idx,
166                                              uint8_t is_rx);
167 static void ixgbe_dev_info_get(struct rte_eth_dev *dev,
168                                struct rte_eth_dev_info *dev_info);
169 static void ixgbevf_dev_info_get(struct rte_eth_dev *dev,
170                                  struct rte_eth_dev_info *dev_info);
171 static int ixgbe_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu);
172
173 static int ixgbe_vlan_filter_set(struct rte_eth_dev *dev,
174                 uint16_t vlan_id, int on);
175 static int ixgbe_vlan_tpid_set(struct rte_eth_dev *dev,
176                                enum rte_vlan_type vlan_type,
177                                uint16_t tpid_id);
178 static void ixgbe_vlan_hw_strip_bitmap_set(struct rte_eth_dev *dev,
179                 uint16_t queue, bool on);
180 static void ixgbe_vlan_strip_queue_set(struct rte_eth_dev *dev, uint16_t queue,
181                 int on);
182 static void ixgbe_vlan_offload_set(struct rte_eth_dev *dev, int mask);
183 static void ixgbe_vlan_hw_strip_enable(struct rte_eth_dev *dev, uint16_t queue);
184 static void ixgbe_vlan_hw_strip_disable(struct rte_eth_dev *dev, uint16_t queue);
185 static void ixgbe_vlan_hw_extend_enable(struct rte_eth_dev *dev);
186 static void ixgbe_vlan_hw_extend_disable(struct rte_eth_dev *dev);
187
188 static int ixgbe_dev_led_on(struct rte_eth_dev *dev);
189 static int ixgbe_dev_led_off(struct rte_eth_dev *dev);
190 static int ixgbe_flow_ctrl_get(struct rte_eth_dev *dev,
191                                struct rte_eth_fc_conf *fc_conf);
192 static int ixgbe_flow_ctrl_set(struct rte_eth_dev *dev,
193                                struct rte_eth_fc_conf *fc_conf);
194 static int ixgbe_priority_flow_ctrl_set(struct rte_eth_dev *dev,
195                 struct rte_eth_pfc_conf *pfc_conf);
196 static int ixgbe_dev_rss_reta_update(struct rte_eth_dev *dev,
197                         struct rte_eth_rss_reta_entry64 *reta_conf,
198                         uint16_t reta_size);
199 static int ixgbe_dev_rss_reta_query(struct rte_eth_dev *dev,
200                         struct rte_eth_rss_reta_entry64 *reta_conf,
201                         uint16_t reta_size);
202 static void ixgbe_dev_link_status_print(struct rte_eth_dev *dev);
203 static int ixgbe_dev_lsc_interrupt_setup(struct rte_eth_dev *dev);
204 static int ixgbe_dev_rxq_interrupt_setup(struct rte_eth_dev *dev);
205 static int ixgbe_dev_interrupt_get_status(struct rte_eth_dev *dev);
206 static int ixgbe_dev_interrupt_action(struct rte_eth_dev *dev);
207 static void ixgbe_dev_interrupt_handler(struct rte_intr_handle *handle,
208                 void *param);
209 static void ixgbe_dev_interrupt_delayed_handler(void *param);
210 static void ixgbe_add_rar(struct rte_eth_dev *dev, struct ether_addr *mac_addr,
211                 uint32_t index, uint32_t pool);
212 static void ixgbe_remove_rar(struct rte_eth_dev *dev, uint32_t index);
213 static void ixgbe_set_default_mac_addr(struct rte_eth_dev *dev,
214                                            struct ether_addr *mac_addr);
215 static void ixgbe_dcb_init(struct ixgbe_hw *hw,struct ixgbe_dcb_config *dcb_config);
216
217 /* For Virtual Function support */
218 static int eth_ixgbevf_dev_init(struct rte_eth_dev *eth_dev);
219 static int eth_ixgbevf_dev_uninit(struct rte_eth_dev *eth_dev);
220 static int  ixgbevf_dev_configure(struct rte_eth_dev *dev);
221 static int  ixgbevf_dev_start(struct rte_eth_dev *dev);
222 static void ixgbevf_dev_stop(struct rte_eth_dev *dev);
223 static void ixgbevf_dev_close(struct rte_eth_dev *dev);
224 static void ixgbevf_intr_disable(struct ixgbe_hw *hw);
225 static void ixgbevf_intr_enable(struct ixgbe_hw *hw);
226 static void ixgbevf_dev_stats_get(struct rte_eth_dev *dev,
227                 struct rte_eth_stats *stats);
228 static void ixgbevf_dev_stats_reset(struct rte_eth_dev *dev);
229 static int ixgbevf_vlan_filter_set(struct rte_eth_dev *dev,
230                 uint16_t vlan_id, int on);
231 static void ixgbevf_vlan_strip_queue_set(struct rte_eth_dev *dev,
232                 uint16_t queue, int on);
233 static void ixgbevf_vlan_offload_set(struct rte_eth_dev *dev, int mask);
234 static void ixgbevf_set_vfta_all(struct rte_eth_dev *dev, bool on);
235 static int ixgbevf_dev_rx_queue_intr_enable(struct rte_eth_dev *dev,
236                                             uint16_t queue_id);
237 static int ixgbevf_dev_rx_queue_intr_disable(struct rte_eth_dev *dev,
238                                              uint16_t queue_id);
239 static void ixgbevf_set_ivar_map(struct ixgbe_hw *hw, int8_t direction,
240                                  uint8_t queue, uint8_t msix_vector);
241 static void ixgbevf_configure_msix(struct rte_eth_dev *dev);
242
243 /* For Eth VMDQ APIs support */
244 static int ixgbe_uc_hash_table_set(struct rte_eth_dev *dev, struct
245                 ether_addr* mac_addr,uint8_t on);
246 static int ixgbe_uc_all_hash_table_set(struct rte_eth_dev *dev,uint8_t on);
247 static int  ixgbe_set_pool_rx_mode(struct rte_eth_dev *dev,  uint16_t pool,
248                 uint16_t rx_mask, uint8_t on);
249 static int ixgbe_set_pool_rx(struct rte_eth_dev *dev,uint16_t pool,uint8_t on);
250 static int ixgbe_set_pool_tx(struct rte_eth_dev *dev,uint16_t pool,uint8_t on);
251 static int ixgbe_set_pool_vlan_filter(struct rte_eth_dev *dev, uint16_t vlan,
252                 uint64_t pool_mask,uint8_t vlan_on);
253 static int ixgbe_mirror_rule_set(struct rte_eth_dev *dev,
254                 struct rte_eth_mirror_conf *mirror_conf,
255                 uint8_t rule_id, uint8_t on);
256 static int ixgbe_mirror_rule_reset(struct rte_eth_dev *dev,
257                 uint8_t rule_id);
258 static int ixgbe_dev_rx_queue_intr_enable(struct rte_eth_dev *dev,
259                                           uint16_t queue_id);
260 static int ixgbe_dev_rx_queue_intr_disable(struct rte_eth_dev *dev,
261                                            uint16_t queue_id);
262 static void ixgbe_set_ivar_map(struct ixgbe_hw *hw, int8_t direction,
263                                uint8_t queue, uint8_t msix_vector);
264 static void ixgbe_configure_msix(struct rte_eth_dev *dev);
265
266 static int ixgbe_set_queue_rate_limit(struct rte_eth_dev *dev,
267                 uint16_t queue_idx, uint16_t tx_rate);
268 static int ixgbe_set_vf_rate_limit(struct rte_eth_dev *dev, uint16_t vf,
269                 uint16_t tx_rate, uint64_t q_msk);
270
271 static void ixgbevf_add_mac_addr(struct rte_eth_dev *dev,
272                                  struct ether_addr *mac_addr,
273                                  uint32_t index, uint32_t pool);
274 static void ixgbevf_remove_mac_addr(struct rte_eth_dev *dev, uint32_t index);
275 static void ixgbevf_set_default_mac_addr(struct rte_eth_dev *dev,
276                                              struct ether_addr *mac_addr);
277 static int ixgbe_syn_filter_set(struct rte_eth_dev *dev,
278                         struct rte_eth_syn_filter *filter,
279                         bool add);
280 static int ixgbe_syn_filter_get(struct rte_eth_dev *dev,
281                         struct rte_eth_syn_filter *filter);
282 static int ixgbe_syn_filter_handle(struct rte_eth_dev *dev,
283                         enum rte_filter_op filter_op,
284                         void *arg);
285 static int ixgbe_add_5tuple_filter(struct rte_eth_dev *dev,
286                         struct ixgbe_5tuple_filter *filter);
287 static void ixgbe_remove_5tuple_filter(struct rte_eth_dev *dev,
288                         struct ixgbe_5tuple_filter *filter);
289 static int ixgbe_add_del_ntuple_filter(struct rte_eth_dev *dev,
290                         struct rte_eth_ntuple_filter *filter,
291                         bool add);
292 static int ixgbe_ntuple_filter_handle(struct rte_eth_dev *dev,
293                                 enum rte_filter_op filter_op,
294                                 void *arg);
295 static int ixgbe_get_ntuple_filter(struct rte_eth_dev *dev,
296                         struct rte_eth_ntuple_filter *filter);
297 static int ixgbe_add_del_ethertype_filter(struct rte_eth_dev *dev,
298                         struct rte_eth_ethertype_filter *filter,
299                         bool add);
300 static int ixgbe_ethertype_filter_handle(struct rte_eth_dev *dev,
301                                 enum rte_filter_op filter_op,
302                                 void *arg);
303 static int ixgbe_get_ethertype_filter(struct rte_eth_dev *dev,
304                         struct rte_eth_ethertype_filter *filter);
305 static int ixgbe_dev_filter_ctrl(struct rte_eth_dev *dev,
306                      enum rte_filter_type filter_type,
307                      enum rte_filter_op filter_op,
308                      void *arg);
309 static int ixgbevf_dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu);
310
311 static int ixgbe_dev_set_mc_addr_list(struct rte_eth_dev *dev,
312                                       struct ether_addr *mc_addr_set,
313                                       uint32_t nb_mc_addr);
314 static int ixgbe_dev_get_dcb_info(struct rte_eth_dev *dev,
315                                    struct rte_eth_dcb_info *dcb_info);
316
317 static int ixgbe_get_reg_length(struct rte_eth_dev *dev);
318 static int ixgbe_get_regs(struct rte_eth_dev *dev,
319                             struct rte_dev_reg_info *regs);
320 static int ixgbe_get_eeprom_length(struct rte_eth_dev *dev);
321 static int ixgbe_get_eeprom(struct rte_eth_dev *dev,
322                                 struct rte_dev_eeprom_info *eeprom);
323 static int ixgbe_set_eeprom(struct rte_eth_dev *dev,
324                                 struct rte_dev_eeprom_info *eeprom);
325
326 static int ixgbevf_get_reg_length(struct rte_eth_dev *dev);
327 static int ixgbevf_get_regs(struct rte_eth_dev *dev,
328                                 struct rte_dev_reg_info *regs);
329
330 static int ixgbe_timesync_enable(struct rte_eth_dev *dev);
331 static int ixgbe_timesync_disable(struct rte_eth_dev *dev);
332 static int ixgbe_timesync_read_rx_timestamp(struct rte_eth_dev *dev,
333                                             struct timespec *timestamp,
334                                             uint32_t flags);
335 static int ixgbe_timesync_read_tx_timestamp(struct rte_eth_dev *dev,
336                                             struct timespec *timestamp);
337 static int ixgbe_timesync_adjust_time(struct rte_eth_dev *dev, int64_t delta);
338 static int ixgbe_timesync_read_time(struct rte_eth_dev *dev,
339                                    struct timespec *timestamp);
340 static int ixgbe_timesync_write_time(struct rte_eth_dev *dev,
341                                    const struct timespec *timestamp);
342
343 /*
344  * Define VF Stats MACRO for Non "cleared on read" register
345  */
346 #define UPDATE_VF_STAT(reg, last, cur)                          \
347 {                                                               \
348         uint32_t latest = IXGBE_READ_REG(hw, reg);              \
349         cur += (latest - last) & UINT_MAX;                      \
350         last = latest;                                          \
351 }
352
353 #define UPDATE_VF_STAT_36BIT(lsb, msb, last, cur)                \
354 {                                                                \
355         u64 new_lsb = IXGBE_READ_REG(hw, lsb);                   \
356         u64 new_msb = IXGBE_READ_REG(hw, msb);                   \
357         u64 latest = ((new_msb << 32) | new_lsb);                \
358         cur += (0x1000000000LL + latest - last) & 0xFFFFFFFFFLL; \
359         last = latest;                                           \
360 }
361
362 #define IXGBE_SET_HWSTRIP(h, q) do{\
363                 uint32_t idx = (q) / (sizeof ((h)->bitmap[0]) * NBBY); \
364                 uint32_t bit = (q) % (sizeof ((h)->bitmap[0]) * NBBY); \
365                 (h)->bitmap[idx] |= 1 << bit;\
366         }while(0)
367
368 #define IXGBE_CLEAR_HWSTRIP(h, q) do{\
369                 uint32_t idx = (q) / (sizeof ((h)->bitmap[0]) * NBBY); \
370                 uint32_t bit = (q) % (sizeof ((h)->bitmap[0]) * NBBY); \
371                 (h)->bitmap[idx] &= ~(1 << bit);\
372         }while(0)
373
374 #define IXGBE_GET_HWSTRIP(h, q, r) do{\
375                 uint32_t idx = (q) / (sizeof ((h)->bitmap[0]) * NBBY); \
376                 uint32_t bit = (q) % (sizeof ((h)->bitmap[0]) * NBBY); \
377                 (r) = (h)->bitmap[idx] >> bit & 1;\
378         }while(0)
379
380 /*
381  * The set of PCI devices this driver supports
382  */
383 static const struct rte_pci_id pci_id_ixgbe_map[] = {
384
385 #define RTE_PCI_DEV_ID_DECL_IXGBE(vend, dev) {RTE_PCI_DEVICE(vend, dev)},
386 #include "rte_pci_dev_ids.h"
387
388 { .vendor_id = 0, /* sentinel */ },
389 };
390
391
392 /*
393  * The set of PCI devices this driver supports (for 82599 VF)
394  */
395 static const struct rte_pci_id pci_id_ixgbevf_map[] = {
396
397 #define RTE_PCI_DEV_ID_DECL_IXGBEVF(vend, dev) {RTE_PCI_DEVICE(vend, dev)},
398 #include "rte_pci_dev_ids.h"
399 { .vendor_id = 0, /* sentinel */ },
400
401 };
402
403 static const struct rte_eth_desc_lim rx_desc_lim = {
404         .nb_max = IXGBE_MAX_RING_DESC,
405         .nb_min = IXGBE_MIN_RING_DESC,
406         .nb_align = IXGBE_RXD_ALIGN,
407 };
408
409 static const struct rte_eth_desc_lim tx_desc_lim = {
410         .nb_max = IXGBE_MAX_RING_DESC,
411         .nb_min = IXGBE_MIN_RING_DESC,
412         .nb_align = IXGBE_TXD_ALIGN,
413 };
414
415 static const struct eth_dev_ops ixgbe_eth_dev_ops = {
416         .dev_configure        = ixgbe_dev_configure,
417         .dev_start            = ixgbe_dev_start,
418         .dev_stop             = ixgbe_dev_stop,
419         .dev_set_link_up    = ixgbe_dev_set_link_up,
420         .dev_set_link_down  = ixgbe_dev_set_link_down,
421         .dev_close            = ixgbe_dev_close,
422         .promiscuous_enable   = ixgbe_dev_promiscuous_enable,
423         .promiscuous_disable  = ixgbe_dev_promiscuous_disable,
424         .allmulticast_enable  = ixgbe_dev_allmulticast_enable,
425         .allmulticast_disable = ixgbe_dev_allmulticast_disable,
426         .link_update          = ixgbe_dev_link_update,
427         .stats_get            = ixgbe_dev_stats_get,
428         .xstats_get           = ixgbe_dev_xstats_get,
429         .stats_reset          = ixgbe_dev_stats_reset,
430         .xstats_reset         = ixgbe_dev_xstats_reset,
431         .queue_stats_mapping_set = ixgbe_dev_queue_stats_mapping_set,
432         .dev_infos_get        = ixgbe_dev_info_get,
433         .mtu_set              = ixgbe_dev_mtu_set,
434         .vlan_filter_set      = ixgbe_vlan_filter_set,
435         .vlan_tpid_set        = ixgbe_vlan_tpid_set,
436         .vlan_offload_set     = ixgbe_vlan_offload_set,
437         .vlan_strip_queue_set = ixgbe_vlan_strip_queue_set,
438         .rx_queue_start       = ixgbe_dev_rx_queue_start,
439         .rx_queue_stop        = ixgbe_dev_rx_queue_stop,
440         .tx_queue_start       = ixgbe_dev_tx_queue_start,
441         .tx_queue_stop        = ixgbe_dev_tx_queue_stop,
442         .rx_queue_setup       = ixgbe_dev_rx_queue_setup,
443         .rx_queue_intr_enable = ixgbe_dev_rx_queue_intr_enable,
444         .rx_queue_intr_disable = ixgbe_dev_rx_queue_intr_disable,
445         .rx_queue_release     = ixgbe_dev_rx_queue_release,
446         .rx_queue_count       = ixgbe_dev_rx_queue_count,
447         .rx_descriptor_done   = ixgbe_dev_rx_descriptor_done,
448         .tx_queue_setup       = ixgbe_dev_tx_queue_setup,
449         .tx_queue_release     = ixgbe_dev_tx_queue_release,
450         .dev_led_on           = ixgbe_dev_led_on,
451         .dev_led_off          = ixgbe_dev_led_off,
452         .flow_ctrl_get        = ixgbe_flow_ctrl_get,
453         .flow_ctrl_set        = ixgbe_flow_ctrl_set,
454         .priority_flow_ctrl_set = ixgbe_priority_flow_ctrl_set,
455         .mac_addr_add         = ixgbe_add_rar,
456         .mac_addr_remove      = ixgbe_remove_rar,
457         .mac_addr_set         = ixgbe_set_default_mac_addr,
458         .uc_hash_table_set    = ixgbe_uc_hash_table_set,
459         .uc_all_hash_table_set  = ixgbe_uc_all_hash_table_set,
460         .mirror_rule_set      = ixgbe_mirror_rule_set,
461         .mirror_rule_reset    = ixgbe_mirror_rule_reset,
462         .set_vf_rx_mode       = ixgbe_set_pool_rx_mode,
463         .set_vf_rx            = ixgbe_set_pool_rx,
464         .set_vf_tx            = ixgbe_set_pool_tx,
465         .set_vf_vlan_filter   = ixgbe_set_pool_vlan_filter,
466         .set_queue_rate_limit = ixgbe_set_queue_rate_limit,
467         .set_vf_rate_limit    = ixgbe_set_vf_rate_limit,
468         .reta_update          = ixgbe_dev_rss_reta_update,
469         .reta_query           = ixgbe_dev_rss_reta_query,
470 #ifdef RTE_NIC_BYPASS
471         .bypass_init          = ixgbe_bypass_init,
472         .bypass_state_set     = ixgbe_bypass_state_store,
473         .bypass_state_show    = ixgbe_bypass_state_show,
474         .bypass_event_set     = ixgbe_bypass_event_store,
475         .bypass_event_show    = ixgbe_bypass_event_show,
476         .bypass_wd_timeout_set  = ixgbe_bypass_wd_timeout_store,
477         .bypass_wd_timeout_show = ixgbe_bypass_wd_timeout_show,
478         .bypass_ver_show      = ixgbe_bypass_ver_show,
479         .bypass_wd_reset      = ixgbe_bypass_wd_reset,
480 #endif /* RTE_NIC_BYPASS */
481         .rss_hash_update      = ixgbe_dev_rss_hash_update,
482         .rss_hash_conf_get    = ixgbe_dev_rss_hash_conf_get,
483         .filter_ctrl          = ixgbe_dev_filter_ctrl,
484         .set_mc_addr_list     = ixgbe_dev_set_mc_addr_list,
485         .rxq_info_get         = ixgbe_rxq_info_get,
486         .txq_info_get         = ixgbe_txq_info_get,
487         .timesync_enable      = ixgbe_timesync_enable,
488         .timesync_disable     = ixgbe_timesync_disable,
489         .timesync_read_rx_timestamp = ixgbe_timesync_read_rx_timestamp,
490         .timesync_read_tx_timestamp = ixgbe_timesync_read_tx_timestamp,
491         .get_reg_length       = ixgbe_get_reg_length,
492         .get_reg              = ixgbe_get_regs,
493         .get_eeprom_length    = ixgbe_get_eeprom_length,
494         .get_eeprom           = ixgbe_get_eeprom,
495         .set_eeprom           = ixgbe_set_eeprom,
496         .get_dcb_info         = ixgbe_dev_get_dcb_info,
497         .timesync_adjust_time = ixgbe_timesync_adjust_time,
498         .timesync_read_time   = ixgbe_timesync_read_time,
499         .timesync_write_time  = ixgbe_timesync_write_time,
500 };
501
502 /*
503  * dev_ops for virtual function, bare necessities for basic vf
504  * operation have been implemented
505  */
506 static const struct eth_dev_ops ixgbevf_eth_dev_ops = {
507         .dev_configure        = ixgbevf_dev_configure,
508         .dev_start            = ixgbevf_dev_start,
509         .dev_stop             = ixgbevf_dev_stop,
510         .link_update          = ixgbe_dev_link_update,
511         .stats_get            = ixgbevf_dev_stats_get,
512         .xstats_get           = ixgbevf_dev_xstats_get,
513         .stats_reset          = ixgbevf_dev_stats_reset,
514         .xstats_reset         = ixgbevf_dev_stats_reset,
515         .dev_close            = ixgbevf_dev_close,
516         .dev_infos_get        = ixgbevf_dev_info_get,
517         .mtu_set              = ixgbevf_dev_set_mtu,
518         .vlan_filter_set      = ixgbevf_vlan_filter_set,
519         .vlan_strip_queue_set = ixgbevf_vlan_strip_queue_set,
520         .vlan_offload_set     = ixgbevf_vlan_offload_set,
521         .rx_queue_setup       = ixgbe_dev_rx_queue_setup,
522         .rx_queue_release     = ixgbe_dev_rx_queue_release,
523         .rx_descriptor_done   = ixgbe_dev_rx_descriptor_done,
524         .tx_queue_setup       = ixgbe_dev_tx_queue_setup,
525         .tx_queue_release     = ixgbe_dev_tx_queue_release,
526         .rx_queue_intr_enable = ixgbevf_dev_rx_queue_intr_enable,
527         .rx_queue_intr_disable = ixgbevf_dev_rx_queue_intr_disable,
528         .mac_addr_add         = ixgbevf_add_mac_addr,
529         .mac_addr_remove      = ixgbevf_remove_mac_addr,
530         .set_mc_addr_list     = ixgbe_dev_set_mc_addr_list,
531         .rxq_info_get         = ixgbe_rxq_info_get,
532         .txq_info_get         = ixgbe_txq_info_get,
533         .mac_addr_set         = ixgbevf_set_default_mac_addr,
534         .get_reg_length       = ixgbevf_get_reg_length,
535         .get_reg              = ixgbevf_get_regs,
536         .reta_update          = ixgbe_dev_rss_reta_update,
537         .reta_query           = ixgbe_dev_rss_reta_query,
538         .rss_hash_update      = ixgbe_dev_rss_hash_update,
539         .rss_hash_conf_get    = ixgbe_dev_rss_hash_conf_get,
540 };
541
542 /* store statistics names and its offset in stats structure */
543 struct rte_ixgbe_xstats_name_off {
544         char name[RTE_ETH_XSTATS_NAME_SIZE];
545         unsigned offset;
546 };
547
548 static const struct rte_ixgbe_xstats_name_off rte_ixgbe_stats_strings[] = {
549         {"rx_crc_errors", offsetof(struct ixgbe_hw_stats, crcerrs)},
550         {"rx_illegal_byte_errors", offsetof(struct ixgbe_hw_stats, illerrc)},
551         {"rx_error_bytes", offsetof(struct ixgbe_hw_stats, errbc)},
552         {"mac_local_errors", offsetof(struct ixgbe_hw_stats, mlfc)},
553         {"mac_remote_errors", offsetof(struct ixgbe_hw_stats, mrfc)},
554         {"rx_length_errors", offsetof(struct ixgbe_hw_stats, rlec)},
555         {"tx_xon_packets", offsetof(struct ixgbe_hw_stats, lxontxc)},
556         {"rx_xon_packets", offsetof(struct ixgbe_hw_stats, lxonrxc)},
557         {"tx_xoff_packets", offsetof(struct ixgbe_hw_stats, lxofftxc)},
558         {"rx_xoff_packets", offsetof(struct ixgbe_hw_stats, lxoffrxc)},
559         {"rx_size_64_packets", offsetof(struct ixgbe_hw_stats, prc64)},
560         {"rx_size_65_to_127_packets", offsetof(struct ixgbe_hw_stats, prc127)},
561         {"rx_size_128_to_255_packets", offsetof(struct ixgbe_hw_stats, prc255)},
562         {"rx_size_256_to_511_packets", offsetof(struct ixgbe_hw_stats, prc511)},
563         {"rx_size_512_to_1023_packets", offsetof(struct ixgbe_hw_stats,
564                 prc1023)},
565         {"rx_size_1024_to_max_packets", offsetof(struct ixgbe_hw_stats,
566                 prc1522)},
567         {"rx_broadcast_packets", offsetof(struct ixgbe_hw_stats, bprc)},
568         {"rx_multicast_packets", offsetof(struct ixgbe_hw_stats, mprc)},
569         {"rx_fragment_errors", offsetof(struct ixgbe_hw_stats, rfc)},
570         {"rx_undersize_errors", offsetof(struct ixgbe_hw_stats, ruc)},
571         {"rx_oversize_errors", offsetof(struct ixgbe_hw_stats, roc)},
572         {"rx_jabber_errors", offsetof(struct ixgbe_hw_stats, rjc)},
573         {"rx_management_packets", offsetof(struct ixgbe_hw_stats, mngprc)},
574         {"rx_management_dropped", offsetof(struct ixgbe_hw_stats, mngpdc)},
575         {"tx_management_packets", offsetof(struct ixgbe_hw_stats, mngptc)},
576         {"rx_total_packets", offsetof(struct ixgbe_hw_stats, tpr)},
577         {"rx_total_bytes", offsetof(struct ixgbe_hw_stats, tor)},
578         {"tx_total_packets", offsetof(struct ixgbe_hw_stats, tpt)},
579         {"tx_size_64_packets", offsetof(struct ixgbe_hw_stats, ptc64)},
580         {"tx_size_65_to_127_packets", offsetof(struct ixgbe_hw_stats, ptc127)},
581         {"tx_size_128_to_255_packets", offsetof(struct ixgbe_hw_stats, ptc255)},
582         {"tx_size_256_to_511_packets", offsetof(struct ixgbe_hw_stats, ptc511)},
583         {"tx_size_512_to_1023_packets", offsetof(struct ixgbe_hw_stats,
584                 ptc1023)},
585         {"tx_size_1024_to_max_packets", offsetof(struct ixgbe_hw_stats,
586                 ptc1522)},
587         {"tx_multicast_packets", offsetof(struct ixgbe_hw_stats, mptc)},
588         {"tx_broadcast_packets", offsetof(struct ixgbe_hw_stats, bptc)},
589         {"rx_mac_short_packet_dropped", offsetof(struct ixgbe_hw_stats, mspdc)},
590         {"rx_l3_l4_xsum_error", offsetof(struct ixgbe_hw_stats, xec)},
591
592         {"flow_director_added_filters", offsetof(struct ixgbe_hw_stats,
593                 fdirustat_add)},
594         {"flow_director_removed_filters", offsetof(struct ixgbe_hw_stats,
595                 fdirustat_remove)},
596         {"flow_director_filter_add_errors", offsetof(struct ixgbe_hw_stats,
597                 fdirfstat_fadd)},
598         {"flow_director_filter_remove_errors", offsetof(struct ixgbe_hw_stats,
599                 fdirfstat_fremove)},
600         {"flow_director_matched_filters", offsetof(struct ixgbe_hw_stats,
601                 fdirmatch)},
602         {"flow_director_missed_filters", offsetof(struct ixgbe_hw_stats,
603                 fdirmiss)},
604
605         {"rx_fcoe_crc_errors", offsetof(struct ixgbe_hw_stats, fccrc)},
606         {"rx_fcoe_dropped", offsetof(struct ixgbe_hw_stats, fcoerpdc)},
607         {"rx_fcoe_mbuf_allocation_errors", offsetof(struct ixgbe_hw_stats,
608                 fclast)},
609         {"rx_fcoe_packets", offsetof(struct ixgbe_hw_stats, fcoeprc)},
610         {"tx_fcoe_packets", offsetof(struct ixgbe_hw_stats, fcoeptc)},
611         {"rx_fcoe_bytes", offsetof(struct ixgbe_hw_stats, fcoedwrc)},
612         {"tx_fcoe_bytes", offsetof(struct ixgbe_hw_stats, fcoedwtc)},
613         {"rx_fcoe_no_direct_data_placement", offsetof(struct ixgbe_hw_stats,
614                 fcoe_noddp)},
615         {"rx_fcoe_no_direct_data_placement_ext_buff",
616                 offsetof(struct ixgbe_hw_stats, fcoe_noddp_ext_buff)},
617
618         {"tx_flow_control_xon_packets", offsetof(struct ixgbe_hw_stats,
619                 lxontxc)},
620         {"rx_flow_control_xon_packets", offsetof(struct ixgbe_hw_stats,
621                 lxonrxc)},
622         {"tx_flow_control_xoff_packets", offsetof(struct ixgbe_hw_stats,
623                 lxofftxc)},
624         {"rx_flow_control_xoff_packets", offsetof(struct ixgbe_hw_stats,
625                 lxoffrxc)},
626         {"rx_total_missed_packets", offsetof(struct ixgbe_hw_stats, mpctotal)},
627 };
628
629 #define IXGBE_NB_HW_STATS (sizeof(rte_ixgbe_stats_strings) / \
630                            sizeof(rte_ixgbe_stats_strings[0]))
631
632 /* Per-queue statistics */
633 static const struct rte_ixgbe_xstats_name_off rte_ixgbe_rxq_strings[] = {
634         {"mbuf_allocation_errors", offsetof(struct ixgbe_hw_stats, rnbc)},
635         {"dropped", offsetof(struct ixgbe_hw_stats, mpc)},
636         {"xon_packets", offsetof(struct ixgbe_hw_stats, pxonrxc)},
637         {"xoff_packets", offsetof(struct ixgbe_hw_stats, pxoffrxc)},
638 };
639
640 #define IXGBE_NB_RXQ_PRIO_STATS (sizeof(rte_ixgbe_rxq_strings) / \
641                            sizeof(rte_ixgbe_rxq_strings[0]))
642
643 static const struct rte_ixgbe_xstats_name_off rte_ixgbe_txq_strings[] = {
644         {"xon_packets", offsetof(struct ixgbe_hw_stats, pxontxc)},
645         {"xoff_packets", offsetof(struct ixgbe_hw_stats, pxofftxc)},
646         {"xon_to_xoff_packets", offsetof(struct ixgbe_hw_stats,
647                 pxon2offc)},
648 };
649
650 #define IXGBE_NB_TXQ_PRIO_STATS (sizeof(rte_ixgbe_txq_strings) / \
651                            sizeof(rte_ixgbe_txq_strings[0]))
652
653 static const struct rte_ixgbe_xstats_name_off rte_ixgbevf_stats_strings[] = {
654         {"rx_multicast_packets", offsetof(struct ixgbevf_hw_stats, vfmprc)},
655 };
656
657 #define IXGBEVF_NB_XSTATS (sizeof(rte_ixgbevf_stats_strings) /  \
658                 sizeof(rte_ixgbevf_stats_strings[0]))
659
660 /**
661  * Atomically reads the link status information from global
662  * structure rte_eth_dev.
663  *
664  * @param dev
665  *   - Pointer to the structure rte_eth_dev to read from.
666  *   - Pointer to the buffer to be saved with the link status.
667  *
668  * @return
669  *   - On success, zero.
670  *   - On failure, negative value.
671  */
672 static inline int
673 rte_ixgbe_dev_atomic_read_link_status(struct rte_eth_dev *dev,
674                                 struct rte_eth_link *link)
675 {
676         struct rte_eth_link *dst = link;
677         struct rte_eth_link *src = &(dev->data->dev_link);
678
679         if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
680                                         *(uint64_t *)src) == 0)
681                 return -1;
682
683         return 0;
684 }
685
686 /**
687  * Atomically writes the link status information into global
688  * structure rte_eth_dev.
689  *
690  * @param dev
691  *   - Pointer to the structure rte_eth_dev to read from.
692  *   - Pointer to the buffer to be saved with the link status.
693  *
694  * @return
695  *   - On success, zero.
696  *   - On failure, negative value.
697  */
698 static inline int
699 rte_ixgbe_dev_atomic_write_link_status(struct rte_eth_dev *dev,
700                                 struct rte_eth_link *link)
701 {
702         struct rte_eth_link *dst = &(dev->data->dev_link);
703         struct rte_eth_link *src = link;
704
705         if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
706                                         *(uint64_t *)src) == 0)
707                 return -1;
708
709         return 0;
710 }
711
712 /*
713  * This function is the same as ixgbe_is_sfp() in base/ixgbe.h.
714  */
715 static inline int
716 ixgbe_is_sfp(struct ixgbe_hw *hw)
717 {
718         switch (hw->phy.type) {
719         case ixgbe_phy_sfp_avago:
720         case ixgbe_phy_sfp_ftl:
721         case ixgbe_phy_sfp_intel:
722         case ixgbe_phy_sfp_unknown:
723         case ixgbe_phy_sfp_passive_tyco:
724         case ixgbe_phy_sfp_passive_unknown:
725                 return 1;
726         default:
727                 return 0;
728         }
729 }
730
731 static inline int32_t
732 ixgbe_pf_reset_hw(struct ixgbe_hw *hw)
733 {
734         uint32_t ctrl_ext;
735         int32_t status;
736
737         status = ixgbe_reset_hw(hw);
738
739         ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
740         /* Set PF Reset Done bit so PF/VF Mail Ops can work */
741         ctrl_ext |= IXGBE_CTRL_EXT_PFRSTD;
742         IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
743         IXGBE_WRITE_FLUSH(hw);
744
745         return status;
746 }
747
748 static inline void
749 ixgbe_enable_intr(struct rte_eth_dev *dev)
750 {
751         struct ixgbe_interrupt *intr =
752                 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
753         struct ixgbe_hw *hw =
754                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
755
756         IXGBE_WRITE_REG(hw, IXGBE_EIMS, intr->mask);
757         IXGBE_WRITE_FLUSH(hw);
758 }
759
760 /*
761  * This function is based on ixgbe_disable_intr() in base/ixgbe.h.
762  */
763 static void
764 ixgbe_disable_intr(struct ixgbe_hw *hw)
765 {
766         PMD_INIT_FUNC_TRACE();
767
768         if (hw->mac.type == ixgbe_mac_82598EB) {
769                 IXGBE_WRITE_REG(hw, IXGBE_EIMC, ~0);
770         } else {
771                 IXGBE_WRITE_REG(hw, IXGBE_EIMC, 0xFFFF0000);
772                 IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(0), ~0);
773                 IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(1), ~0);
774         }
775         IXGBE_WRITE_FLUSH(hw);
776 }
777
778 /*
779  * This function resets queue statistics mapping registers.
780  * From Niantic datasheet, Initialization of Statistics section:
781  * "...if software requires the queue counters, the RQSMR and TQSM registers
782  * must be re-programmed following a device reset.
783  */
784 static void
785 ixgbe_reset_qstat_mappings(struct ixgbe_hw *hw)
786 {
787         uint32_t i;
788
789         for(i = 0; i != IXGBE_NB_STAT_MAPPING_REGS; i++) {
790                 IXGBE_WRITE_REG(hw, IXGBE_RQSMR(i), 0);
791                 IXGBE_WRITE_REG(hw, IXGBE_TQSM(i), 0);
792         }
793 }
794
795
796 static int
797 ixgbe_dev_queue_stats_mapping_set(struct rte_eth_dev *eth_dev,
798                                   uint16_t queue_id,
799                                   uint8_t stat_idx,
800                                   uint8_t is_rx)
801 {
802 #define QSM_REG_NB_BITS_PER_QMAP_FIELD 8
803 #define NB_QMAP_FIELDS_PER_QSM_REG 4
804 #define QMAP_FIELD_RESERVED_BITS_MASK 0x0f
805
806         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
807         struct ixgbe_stat_mapping_registers *stat_mappings =
808                 IXGBE_DEV_PRIVATE_TO_STAT_MAPPINGS(eth_dev->data->dev_private);
809         uint32_t qsmr_mask = 0;
810         uint32_t clearing_mask = QMAP_FIELD_RESERVED_BITS_MASK;
811         uint32_t q_map;
812         uint8_t n, offset;
813
814         if ((hw->mac.type != ixgbe_mac_82599EB) &&
815                 (hw->mac.type != ixgbe_mac_X540) &&
816                 (hw->mac.type != ixgbe_mac_X550) &&
817                 (hw->mac.type != ixgbe_mac_X550EM_x))
818                 return -ENOSYS;
819
820         PMD_INIT_LOG(DEBUG, "Setting port %d, %s queue_id %d to stat index %d",
821                      (int)(eth_dev->data->port_id), is_rx ? "RX" : "TX",
822                      queue_id, stat_idx);
823
824         n = (uint8_t)(queue_id / NB_QMAP_FIELDS_PER_QSM_REG);
825         if (n >= IXGBE_NB_STAT_MAPPING_REGS) {
826                 PMD_INIT_LOG(ERR, "Nb of stat mapping registers exceeded");
827                 return -EIO;
828         }
829         offset = (uint8_t)(queue_id % NB_QMAP_FIELDS_PER_QSM_REG);
830
831         /* Now clear any previous stat_idx set */
832         clearing_mask <<= (QSM_REG_NB_BITS_PER_QMAP_FIELD * offset);
833         if (!is_rx)
834                 stat_mappings->tqsm[n] &= ~clearing_mask;
835         else
836                 stat_mappings->rqsmr[n] &= ~clearing_mask;
837
838         q_map = (uint32_t)stat_idx;
839         q_map &= QMAP_FIELD_RESERVED_BITS_MASK;
840         qsmr_mask = q_map << (QSM_REG_NB_BITS_PER_QMAP_FIELD * offset);
841         if (!is_rx)
842                 stat_mappings->tqsm[n] |= qsmr_mask;
843         else
844                 stat_mappings->rqsmr[n] |= qsmr_mask;
845
846         PMD_INIT_LOG(DEBUG, "Set port %d, %s queue_id %d to stat index %d",
847                      (int)(eth_dev->data->port_id), is_rx ? "RX" : "TX",
848                      queue_id, stat_idx);
849         PMD_INIT_LOG(DEBUG, "%s[%d] = 0x%08x", is_rx ? "RQSMR" : "TQSM", n,
850                      is_rx ? stat_mappings->rqsmr[n] : stat_mappings->tqsm[n]);
851
852         /* Now write the mapping in the appropriate register */
853         if (is_rx) {
854                 PMD_INIT_LOG(DEBUG, "Write 0x%x to RX IXGBE stat mapping reg:%d",
855                              stat_mappings->rqsmr[n], n);
856                 IXGBE_WRITE_REG(hw, IXGBE_RQSMR(n), stat_mappings->rqsmr[n]);
857         }
858         else {
859                 PMD_INIT_LOG(DEBUG, "Write 0x%x to TX IXGBE stat mapping reg:%d",
860                              stat_mappings->tqsm[n], n);
861                 IXGBE_WRITE_REG(hw, IXGBE_TQSM(n), stat_mappings->tqsm[n]);
862         }
863         return 0;
864 }
865
866 static void
867 ixgbe_restore_statistics_mapping(struct rte_eth_dev * dev)
868 {
869         struct ixgbe_stat_mapping_registers *stat_mappings =
870                 IXGBE_DEV_PRIVATE_TO_STAT_MAPPINGS(dev->data->dev_private);
871         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
872         int i;
873
874         /* write whatever was in stat mapping table to the NIC */
875         for (i = 0; i < IXGBE_NB_STAT_MAPPING_REGS; i++) {
876                 /* rx */
877                 IXGBE_WRITE_REG(hw, IXGBE_RQSMR(i), stat_mappings->rqsmr[i]);
878
879                 /* tx */
880                 IXGBE_WRITE_REG(hw, IXGBE_TQSM(i), stat_mappings->tqsm[i]);
881         }
882 }
883
884 static void
885 ixgbe_dcb_init(struct ixgbe_hw *hw,struct ixgbe_dcb_config *dcb_config)
886 {
887         uint8_t i;
888         struct ixgbe_dcb_tc_config *tc;
889         uint8_t dcb_max_tc = IXGBE_DCB_MAX_TRAFFIC_CLASS;
890
891         dcb_config->num_tcs.pg_tcs = dcb_max_tc;
892         dcb_config->num_tcs.pfc_tcs = dcb_max_tc;
893         for (i = 0; i < dcb_max_tc; i++) {
894                 tc = &dcb_config->tc_config[i];
895                 tc->path[IXGBE_DCB_TX_CONFIG].bwg_id = i;
896                 tc->path[IXGBE_DCB_TX_CONFIG].bwg_percent =
897                                  (uint8_t)(100/dcb_max_tc + (i & 1));
898                 tc->path[IXGBE_DCB_RX_CONFIG].bwg_id = i;
899                 tc->path[IXGBE_DCB_RX_CONFIG].bwg_percent =
900                                  (uint8_t)(100/dcb_max_tc + (i & 1));
901                 tc->pfc = ixgbe_dcb_pfc_disabled;
902         }
903
904         /* Initialize default user to priority mapping, UPx->TC0 */
905         tc = &dcb_config->tc_config[0];
906         tc->path[IXGBE_DCB_TX_CONFIG].up_to_tc_bitmap = 0xFF;
907         tc->path[IXGBE_DCB_RX_CONFIG].up_to_tc_bitmap = 0xFF;
908         for (i = 0; i< IXGBE_DCB_MAX_BW_GROUP; i++) {
909                 dcb_config->bw_percentage[IXGBE_DCB_TX_CONFIG][i] = 100;
910                 dcb_config->bw_percentage[IXGBE_DCB_RX_CONFIG][i] = 100;
911         }
912         dcb_config->rx_pba_cfg = ixgbe_dcb_pba_equal;
913         dcb_config->pfc_mode_enable = false;
914         dcb_config->vt_mode = true;
915         dcb_config->round_robin_enable = false;
916         /* support all DCB capabilities in 82599 */
917         dcb_config->support.capabilities = 0xFF;
918
919         /*we only support 4 Tcs for X540, X550 */
920         if (hw->mac.type == ixgbe_mac_X540 ||
921                 hw->mac.type == ixgbe_mac_X550 ||
922                 hw->mac.type == ixgbe_mac_X550EM_x) {
923                 dcb_config->num_tcs.pg_tcs = 4;
924                 dcb_config->num_tcs.pfc_tcs = 4;
925         }
926 }
927
928 /*
929  * Ensure that all locks are released before first NVM or PHY access
930  */
931 static void
932 ixgbe_swfw_lock_reset(struct ixgbe_hw *hw)
933 {
934         uint16_t mask;
935
936         /*
937          * Phy lock should not fail in this early stage. If this is the case,
938          * it is due to an improper exit of the application.
939          * So force the release of the faulty lock. Release of common lock
940          * is done automatically by swfw_sync function.
941          */
942         mask = IXGBE_GSSR_PHY0_SM << hw->bus.func;
943         if (ixgbe_acquire_swfw_semaphore(hw, mask) < 0) {
944                 PMD_DRV_LOG(DEBUG, "SWFW phy%d lock released", hw->bus.func);
945         }
946         ixgbe_release_swfw_semaphore(hw, mask);
947
948         /*
949          * These ones are more tricky since they are common to all ports; but
950          * swfw_sync retries last long enough (1s) to be almost sure that if
951          * lock can not be taken it is due to an improper lock of the
952          * semaphore.
953          */
954         mask = IXGBE_GSSR_EEP_SM | IXGBE_GSSR_MAC_CSR_SM | IXGBE_GSSR_SW_MNG_SM;
955         if (ixgbe_acquire_swfw_semaphore(hw, mask) < 0) {
956                 PMD_DRV_LOG(DEBUG, "SWFW common locks released");
957         }
958         ixgbe_release_swfw_semaphore(hw, mask);
959 }
960
961 /*
962  * This function is based on code in ixgbe_attach() in base/ixgbe.c.
963  * It returns 0 on success.
964  */
965 static int
966 eth_ixgbe_dev_init(struct rte_eth_dev *eth_dev)
967 {
968         struct rte_pci_device *pci_dev;
969         struct ixgbe_hw *hw =
970                 IXGBE_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
971         struct ixgbe_vfta * shadow_vfta =
972                 IXGBE_DEV_PRIVATE_TO_VFTA(eth_dev->data->dev_private);
973         struct ixgbe_hwstrip *hwstrip =
974                 IXGBE_DEV_PRIVATE_TO_HWSTRIP_BITMAP(eth_dev->data->dev_private);
975         struct ixgbe_dcb_config *dcb_config =
976                 IXGBE_DEV_PRIVATE_TO_DCB_CFG(eth_dev->data->dev_private);
977         struct ixgbe_filter_info *filter_info =
978                 IXGBE_DEV_PRIVATE_TO_FILTER_INFO(eth_dev->data->dev_private);
979         uint32_t ctrl_ext;
980         uint16_t csum;
981         int diag, i;
982
983         PMD_INIT_FUNC_TRACE();
984
985         eth_dev->dev_ops = &ixgbe_eth_dev_ops;
986         eth_dev->rx_pkt_burst = &ixgbe_recv_pkts;
987         eth_dev->tx_pkt_burst = &ixgbe_xmit_pkts;
988
989         /*
990          * For secondary processes, we don't initialise any further as primary
991          * has already done this work. Only check we don't need a different
992          * RX and TX function.
993          */
994         if (rte_eal_process_type() != RTE_PROC_PRIMARY){
995                 struct ixgbe_tx_queue *txq;
996                 /* TX queue function in primary, set by last queue initialized
997                  * Tx queue may not initialized by primary process */
998                 if (eth_dev->data->tx_queues) {
999                         txq = eth_dev->data->tx_queues[eth_dev->data->nb_tx_queues-1];
1000                         ixgbe_set_tx_function(eth_dev, txq);
1001                 } else {
1002                         /* Use default TX function if we get here */
1003                         PMD_INIT_LOG(NOTICE, "No TX queues configured yet. "
1004                                              "Using default TX function.");
1005                 }
1006
1007                 ixgbe_set_rx_function(eth_dev);
1008
1009                 return 0;
1010         }
1011         pci_dev = eth_dev->pci_dev;
1012
1013         rte_eth_copy_pci_info(eth_dev, pci_dev);
1014
1015         /* Vendor and Device ID need to be set before init of shared code */
1016         hw->device_id = pci_dev->id.device_id;
1017         hw->vendor_id = pci_dev->id.vendor_id;
1018         hw->hw_addr = (void *)pci_dev->mem_resource[0].addr;
1019         hw->allow_unsupported_sfp = 1;
1020
1021         /* Initialize the shared code (base driver) */
1022 #ifdef RTE_NIC_BYPASS
1023         diag = ixgbe_bypass_init_shared_code(hw);
1024 #else
1025         diag = ixgbe_init_shared_code(hw);
1026 #endif /* RTE_NIC_BYPASS */
1027
1028         if (diag != IXGBE_SUCCESS) {
1029                 PMD_INIT_LOG(ERR, "Shared code init failed: %d", diag);
1030                 return -EIO;
1031         }
1032
1033         /* pick up the PCI bus settings for reporting later */
1034         ixgbe_get_bus_info(hw);
1035
1036         /* Unlock any pending hardware semaphore */
1037         ixgbe_swfw_lock_reset(hw);
1038
1039         /* Initialize DCB configuration*/
1040         memset(dcb_config, 0, sizeof(struct ixgbe_dcb_config));
1041         ixgbe_dcb_init(hw,dcb_config);
1042         /* Get Hardware Flow Control setting */
1043         hw->fc.requested_mode = ixgbe_fc_full;
1044         hw->fc.current_mode = ixgbe_fc_full;
1045         hw->fc.pause_time = IXGBE_FC_PAUSE;
1046         for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
1047                 hw->fc.low_water[i] = IXGBE_FC_LO;
1048                 hw->fc.high_water[i] = IXGBE_FC_HI;
1049         }
1050         hw->fc.send_xon = 1;
1051
1052         /* Make sure we have a good EEPROM before we read from it */
1053         diag = ixgbe_validate_eeprom_checksum(hw, &csum);
1054         if (diag != IXGBE_SUCCESS) {
1055                 PMD_INIT_LOG(ERR, "The EEPROM checksum is not valid: %d", diag);
1056                 return -EIO;
1057         }
1058
1059 #ifdef RTE_NIC_BYPASS
1060         diag = ixgbe_bypass_init_hw(hw);
1061 #else
1062         diag = ixgbe_init_hw(hw);
1063 #endif /* RTE_NIC_BYPASS */
1064
1065         /*
1066          * Devices with copper phys will fail to initialise if ixgbe_init_hw()
1067          * is called too soon after the kernel driver unbinding/binding occurs.
1068          * The failure occurs in ixgbe_identify_phy_generic() for all devices,
1069          * but for non-copper devies, ixgbe_identify_sfp_module_generic() is
1070          * also called. See ixgbe_identify_phy_82599(). The reason for the
1071          * failure is not known, and only occuts when virtualisation features
1072          * are disabled in the bios. A delay of 100ms  was found to be enough by
1073          * trial-and-error, and is doubled to be safe.
1074          */
1075         if (diag && (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_copper)) {
1076                 rte_delay_ms(200);
1077                 diag = ixgbe_init_hw(hw);
1078         }
1079
1080         if (diag == IXGBE_ERR_EEPROM_VERSION) {
1081                 PMD_INIT_LOG(ERR, "This device is a pre-production adapter/"
1082                     "LOM.  Please be aware there may be issues associated "
1083                     "with your hardware.");
1084                 PMD_INIT_LOG(ERR, "If you are experiencing problems "
1085                     "please contact your Intel or hardware representative "
1086                     "who provided you with this hardware.");
1087         } else if (diag == IXGBE_ERR_SFP_NOT_SUPPORTED)
1088                 PMD_INIT_LOG(ERR, "Unsupported SFP+ Module");
1089         if (diag) {
1090                 PMD_INIT_LOG(ERR, "Hardware Initialization Failure: %d", diag);
1091                 return -EIO;
1092         }
1093
1094         /* Reset the hw statistics */
1095         ixgbe_dev_stats_reset(eth_dev);
1096
1097         /* disable interrupt */
1098         ixgbe_disable_intr(hw);
1099
1100         /* reset mappings for queue statistics hw counters*/
1101         ixgbe_reset_qstat_mappings(hw);
1102
1103         /* Allocate memory for storing MAC addresses */
1104         eth_dev->data->mac_addrs = rte_zmalloc("ixgbe", ETHER_ADDR_LEN *
1105                         hw->mac.num_rar_entries, 0);
1106         if (eth_dev->data->mac_addrs == NULL) {
1107                 PMD_INIT_LOG(ERR,
1108                         "Failed to allocate %u bytes needed to store "
1109                         "MAC addresses",
1110                         ETHER_ADDR_LEN * hw->mac.num_rar_entries);
1111                 return -ENOMEM;
1112         }
1113         /* Copy the permanent MAC address */
1114         ether_addr_copy((struct ether_addr *) hw->mac.perm_addr,
1115                         &eth_dev->data->mac_addrs[0]);
1116
1117         /* Allocate memory for storing hash filter MAC addresses */
1118         eth_dev->data->hash_mac_addrs = rte_zmalloc("ixgbe", ETHER_ADDR_LEN *
1119                         IXGBE_VMDQ_NUM_UC_MAC, 0);
1120         if (eth_dev->data->hash_mac_addrs == NULL) {
1121                 PMD_INIT_LOG(ERR,
1122                         "Failed to allocate %d bytes needed to store MAC addresses",
1123                         ETHER_ADDR_LEN * IXGBE_VMDQ_NUM_UC_MAC);
1124                 return -ENOMEM;
1125         }
1126
1127         /* initialize the vfta */
1128         memset(shadow_vfta, 0, sizeof(*shadow_vfta));
1129
1130         /* initialize the hw strip bitmap*/
1131         memset(hwstrip, 0, sizeof(*hwstrip));
1132
1133         /* initialize PF if max_vfs not zero */
1134         ixgbe_pf_host_init(eth_dev);
1135
1136         ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
1137         /* let hardware know driver is loaded */
1138         ctrl_ext |= IXGBE_CTRL_EXT_DRV_LOAD;
1139         /* Set PF Reset Done bit so PF/VF Mail Ops can work */
1140         ctrl_ext |= IXGBE_CTRL_EXT_PFRSTD;
1141         IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
1142         IXGBE_WRITE_FLUSH(hw);
1143
1144         if (ixgbe_is_sfp(hw) && hw->phy.sfp_type != ixgbe_sfp_type_not_present)
1145                 PMD_INIT_LOG(DEBUG, "MAC: %d, PHY: %d, SFP+: %d",
1146                              (int) hw->mac.type, (int) hw->phy.type,
1147                              (int) hw->phy.sfp_type);
1148         else
1149                 PMD_INIT_LOG(DEBUG, "MAC: %d, PHY: %d",
1150                              (int) hw->mac.type, (int) hw->phy.type);
1151
1152         PMD_INIT_LOG(DEBUG, "port %d vendorID=0x%x deviceID=0x%x",
1153                         eth_dev->data->port_id, pci_dev->id.vendor_id,
1154                         pci_dev->id.device_id);
1155
1156         rte_intr_callback_register(&pci_dev->intr_handle,
1157                                    ixgbe_dev_interrupt_handler,
1158                                    (void *)eth_dev);
1159
1160         /* enable uio/vfio intr/eventfd mapping */
1161         rte_intr_enable(&pci_dev->intr_handle);
1162
1163         /* enable support intr */
1164         ixgbe_enable_intr(eth_dev);
1165
1166         /* initialize 5tuple filter list */
1167         TAILQ_INIT(&filter_info->fivetuple_list);
1168         memset(filter_info->fivetuple_mask, 0,
1169                 sizeof(uint32_t) * IXGBE_5TUPLE_ARRAY_SIZE);
1170
1171         return 0;
1172 }
1173
1174 static int
1175 eth_ixgbe_dev_uninit(struct rte_eth_dev *eth_dev)
1176 {
1177         struct rte_pci_device *pci_dev;
1178         struct ixgbe_hw *hw;
1179
1180         PMD_INIT_FUNC_TRACE();
1181
1182         if (rte_eal_process_type() != RTE_PROC_PRIMARY)
1183                 return -EPERM;
1184
1185         hw = IXGBE_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
1186         pci_dev = eth_dev->pci_dev;
1187
1188         if (hw->adapter_stopped == 0)
1189                 ixgbe_dev_close(eth_dev);
1190
1191         eth_dev->dev_ops = NULL;
1192         eth_dev->rx_pkt_burst = NULL;
1193         eth_dev->tx_pkt_burst = NULL;
1194
1195         /* Unlock any pending hardware semaphore */
1196         ixgbe_swfw_lock_reset(hw);
1197
1198         /* disable uio intr before callback unregister */
1199         rte_intr_disable(&(pci_dev->intr_handle));
1200         rte_intr_callback_unregister(&(pci_dev->intr_handle),
1201                 ixgbe_dev_interrupt_handler, (void *)eth_dev);
1202
1203         /* uninitialize PF if max_vfs not zero */
1204         ixgbe_pf_host_uninit(eth_dev);
1205
1206         rte_free(eth_dev->data->mac_addrs);
1207         eth_dev->data->mac_addrs = NULL;
1208
1209         rte_free(eth_dev->data->hash_mac_addrs);
1210         eth_dev->data->hash_mac_addrs = NULL;
1211
1212         return 0;
1213 }
1214
1215 /*
1216  * Negotiate mailbox API version with the PF.
1217  * After reset API version is always set to the basic one (ixgbe_mbox_api_10).
1218  * Then we try to negotiate starting with the most recent one.
1219  * If all negotiation attempts fail, then we will proceed with
1220  * the default one (ixgbe_mbox_api_10).
1221  */
1222 static void
1223 ixgbevf_negotiate_api(struct ixgbe_hw *hw)
1224 {
1225         int32_t i;
1226
1227         /* start with highest supported, proceed down */
1228         static const enum ixgbe_pfvf_api_rev sup_ver[] = {
1229                 ixgbe_mbox_api_11,
1230                 ixgbe_mbox_api_10,
1231         };
1232
1233         for (i = 0;
1234                         i != RTE_DIM(sup_ver) &&
1235                         ixgbevf_negotiate_api_version(hw, sup_ver[i]) != 0;
1236                         i++)
1237                 ;
1238 }
1239
1240 static void
1241 generate_random_mac_addr(struct ether_addr *mac_addr)
1242 {
1243         uint64_t random;
1244
1245         /* Set Organizationally Unique Identifier (OUI) prefix. */
1246         mac_addr->addr_bytes[0] = 0x00;
1247         mac_addr->addr_bytes[1] = 0x09;
1248         mac_addr->addr_bytes[2] = 0xC0;
1249         /* Force indication of locally assigned MAC address. */
1250         mac_addr->addr_bytes[0] |= ETHER_LOCAL_ADMIN_ADDR;
1251         /* Generate the last 3 bytes of the MAC address with a random number. */
1252         random = rte_rand();
1253         memcpy(&mac_addr->addr_bytes[3], &random, 3);
1254 }
1255
1256 /*
1257  * Virtual Function device init
1258  */
1259 static int
1260 eth_ixgbevf_dev_init(struct rte_eth_dev *eth_dev)
1261 {
1262         int diag;
1263         uint32_t tc, tcs;
1264         struct rte_pci_device *pci_dev;
1265         struct ixgbe_hw *hw =
1266                 IXGBE_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
1267         struct ixgbe_vfta * shadow_vfta =
1268                 IXGBE_DEV_PRIVATE_TO_VFTA(eth_dev->data->dev_private);
1269         struct ixgbe_hwstrip *hwstrip =
1270                 IXGBE_DEV_PRIVATE_TO_HWSTRIP_BITMAP(eth_dev->data->dev_private);
1271         struct ether_addr *perm_addr = (struct ether_addr *) hw->mac.perm_addr;
1272
1273         PMD_INIT_FUNC_TRACE();
1274
1275         eth_dev->dev_ops = &ixgbevf_eth_dev_ops;
1276         eth_dev->rx_pkt_burst = &ixgbe_recv_pkts;
1277         eth_dev->tx_pkt_burst = &ixgbe_xmit_pkts;
1278
1279         /* for secondary processes, we don't initialise any further as primary
1280          * has already done this work. Only check we don't need a different
1281          * RX function */
1282         if (rte_eal_process_type() != RTE_PROC_PRIMARY){
1283                 if (eth_dev->data->scattered_rx)
1284                         eth_dev->rx_pkt_burst = ixgbe_recv_pkts_lro_single_alloc;
1285                 return 0;
1286         }
1287
1288         pci_dev = eth_dev->pci_dev;
1289
1290         rte_eth_copy_pci_info(eth_dev, pci_dev);
1291
1292         hw->device_id = pci_dev->id.device_id;
1293         hw->vendor_id = pci_dev->id.vendor_id;
1294         hw->hw_addr = (void *)pci_dev->mem_resource[0].addr;
1295
1296         /* initialize the vfta */
1297         memset(shadow_vfta, 0, sizeof(*shadow_vfta));
1298
1299         /* initialize the hw strip bitmap*/
1300         memset(hwstrip, 0, sizeof(*hwstrip));
1301
1302         /* Initialize the shared code (base driver) */
1303         diag = ixgbe_init_shared_code(hw);
1304         if (diag != IXGBE_SUCCESS) {
1305                 PMD_INIT_LOG(ERR, "Shared code init failed for ixgbevf: %d", diag);
1306                 return -EIO;
1307         }
1308
1309         /* init_mailbox_params */
1310         hw->mbx.ops.init_params(hw);
1311
1312         /* Reset the hw statistics */
1313         ixgbevf_dev_stats_reset(eth_dev);
1314
1315         /* Disable the interrupts for VF */
1316         ixgbevf_intr_disable(hw);
1317
1318         hw->mac.num_rar_entries = 128; /* The MAX of the underlying PF */
1319         diag = hw->mac.ops.reset_hw(hw);
1320
1321         /*
1322          * The VF reset operation returns the IXGBE_ERR_INVALID_MAC_ADDR when
1323          * the underlying PF driver has not assigned a MAC address to the VF.
1324          * In this case, assign a random MAC address.
1325          */
1326         if ((diag != IXGBE_SUCCESS) && (diag != IXGBE_ERR_INVALID_MAC_ADDR)) {
1327                 PMD_INIT_LOG(ERR, "VF Initialization Failure: %d", diag);
1328                 return diag;
1329         }
1330
1331         /* negotiate mailbox API version to use with the PF. */
1332         ixgbevf_negotiate_api(hw);
1333
1334         /* Get Rx/Tx queue count via mailbox, which is ready after reset_hw */
1335         ixgbevf_get_queues(hw, &tcs, &tc);
1336
1337         /* Allocate memory for storing MAC addresses */
1338         eth_dev->data->mac_addrs = rte_zmalloc("ixgbevf", ETHER_ADDR_LEN *
1339                         hw->mac.num_rar_entries, 0);
1340         if (eth_dev->data->mac_addrs == NULL) {
1341                 PMD_INIT_LOG(ERR,
1342                         "Failed to allocate %u bytes needed to store "
1343                         "MAC addresses",
1344                         ETHER_ADDR_LEN * hw->mac.num_rar_entries);
1345                 return -ENOMEM;
1346         }
1347
1348         /* Generate a random MAC address, if none was assigned by PF. */
1349         if (is_zero_ether_addr(perm_addr)) {
1350                 generate_random_mac_addr(perm_addr);
1351                 diag = ixgbe_set_rar_vf(hw, 1, perm_addr->addr_bytes, 0, 1);
1352                 if (diag) {
1353                         rte_free(eth_dev->data->mac_addrs);
1354                         eth_dev->data->mac_addrs = NULL;
1355                         return diag;
1356                 }
1357                 PMD_INIT_LOG(INFO, "\tVF MAC address not assigned by Host PF");
1358                 PMD_INIT_LOG(INFO, "\tAssign randomly generated MAC address "
1359                              "%02x:%02x:%02x:%02x:%02x:%02x",
1360                              perm_addr->addr_bytes[0],
1361                              perm_addr->addr_bytes[1],
1362                              perm_addr->addr_bytes[2],
1363                              perm_addr->addr_bytes[3],
1364                              perm_addr->addr_bytes[4],
1365                              perm_addr->addr_bytes[5]);
1366         }
1367
1368         /* Copy the permanent MAC address */
1369         ether_addr_copy(perm_addr, &eth_dev->data->mac_addrs[0]);
1370
1371         /* reset the hardware with the new settings */
1372         diag = hw->mac.ops.start_hw(hw);
1373         switch (diag) {
1374                 case  0:
1375                         break;
1376
1377                 default:
1378                         PMD_INIT_LOG(ERR, "VF Initialization Failure: %d", diag);
1379                         return -EIO;
1380         }
1381
1382         PMD_INIT_LOG(DEBUG, "port %d vendorID=0x%x deviceID=0x%x mac.type=%s",
1383                      eth_dev->data->port_id, pci_dev->id.vendor_id,
1384                      pci_dev->id.device_id, "ixgbe_mac_82599_vf");
1385
1386         return 0;
1387 }
1388
1389 /* Virtual Function device uninit */
1390
1391 static int
1392 eth_ixgbevf_dev_uninit(struct rte_eth_dev *eth_dev)
1393 {
1394         struct ixgbe_hw *hw;
1395         unsigned i;
1396
1397         PMD_INIT_FUNC_TRACE();
1398
1399         if (rte_eal_process_type() != RTE_PROC_PRIMARY)
1400                 return -EPERM;
1401
1402         hw = IXGBE_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
1403
1404         if (hw->adapter_stopped == 0)
1405                 ixgbevf_dev_close(eth_dev);
1406
1407         eth_dev->dev_ops = NULL;
1408         eth_dev->rx_pkt_burst = NULL;
1409         eth_dev->tx_pkt_burst = NULL;
1410
1411         /* Disable the interrupts for VF */
1412         ixgbevf_intr_disable(hw);
1413
1414         for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
1415                 ixgbe_dev_rx_queue_release(eth_dev->data->rx_queues[i]);
1416                 eth_dev->data->rx_queues[i] = NULL;
1417         }
1418         eth_dev->data->nb_rx_queues = 0;
1419
1420         for (i = 0; i < eth_dev->data->nb_tx_queues; i++) {
1421                 ixgbe_dev_tx_queue_release(eth_dev->data->tx_queues[i]);
1422                 eth_dev->data->tx_queues[i] = NULL;
1423         }
1424         eth_dev->data->nb_tx_queues = 0;
1425
1426         rte_free(eth_dev->data->mac_addrs);
1427         eth_dev->data->mac_addrs = NULL;
1428
1429         return 0;
1430 }
1431
1432 static struct eth_driver rte_ixgbe_pmd = {
1433         .pci_drv = {
1434                 .name = "rte_ixgbe_pmd",
1435                 .id_table = pci_id_ixgbe_map,
1436                 .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC |
1437                         RTE_PCI_DRV_DETACHABLE,
1438         },
1439         .eth_dev_init = eth_ixgbe_dev_init,
1440         .eth_dev_uninit = eth_ixgbe_dev_uninit,
1441         .dev_private_size = sizeof(struct ixgbe_adapter),
1442 };
1443
1444 /*
1445  * virtual function driver struct
1446  */
1447 static struct eth_driver rte_ixgbevf_pmd = {
1448         .pci_drv = {
1449                 .name = "rte_ixgbevf_pmd",
1450                 .id_table = pci_id_ixgbevf_map,
1451                 .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_DETACHABLE,
1452         },
1453         .eth_dev_init = eth_ixgbevf_dev_init,
1454         .eth_dev_uninit = eth_ixgbevf_dev_uninit,
1455         .dev_private_size = sizeof(struct ixgbe_adapter),
1456 };
1457
1458 /*
1459  * Driver initialization routine.
1460  * Invoked once at EAL init time.
1461  * Register itself as the [Poll Mode] Driver of PCI IXGBE devices.
1462  */
1463 static int
1464 rte_ixgbe_pmd_init(const char *name __rte_unused, const char *params __rte_unused)
1465 {
1466         PMD_INIT_FUNC_TRACE();
1467
1468         rte_eth_driver_register(&rte_ixgbe_pmd);
1469         return 0;
1470 }
1471
1472 /*
1473  * VF Driver initialization routine.
1474  * Invoked one at EAL init time.
1475  * Register itself as the [Virtual Poll Mode] Driver of PCI niantic devices.
1476  */
1477 static int
1478 rte_ixgbevf_pmd_init(const char *name __rte_unused, const char *param __rte_unused)
1479 {
1480         PMD_INIT_FUNC_TRACE();
1481
1482         rte_eth_driver_register(&rte_ixgbevf_pmd);
1483         return 0;
1484 }
1485
1486 static int
1487 ixgbe_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
1488 {
1489         struct ixgbe_hw *hw =
1490                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1491         struct ixgbe_vfta * shadow_vfta =
1492                 IXGBE_DEV_PRIVATE_TO_VFTA(dev->data->dev_private);
1493         uint32_t vfta;
1494         uint32_t vid_idx;
1495         uint32_t vid_bit;
1496
1497         vid_idx = (uint32_t) ((vlan_id >> 5) & 0x7F);
1498         vid_bit = (uint32_t) (1 << (vlan_id & 0x1F));
1499         vfta = IXGBE_READ_REG(hw, IXGBE_VFTA(vid_idx));
1500         if (on)
1501                 vfta |= vid_bit;
1502         else
1503                 vfta &= ~vid_bit;
1504         IXGBE_WRITE_REG(hw, IXGBE_VFTA(vid_idx), vfta);
1505
1506         /* update local VFTA copy */
1507         shadow_vfta->vfta[vid_idx] = vfta;
1508
1509         return 0;
1510 }
1511
1512 static void
1513 ixgbe_vlan_strip_queue_set(struct rte_eth_dev *dev, uint16_t queue, int on)
1514 {
1515         if (on)
1516                 ixgbe_vlan_hw_strip_enable(dev, queue);
1517         else
1518                 ixgbe_vlan_hw_strip_disable(dev, queue);
1519 }
1520
1521 static int
1522 ixgbe_vlan_tpid_set(struct rte_eth_dev *dev,
1523                     enum rte_vlan_type vlan_type,
1524                     uint16_t tpid)
1525 {
1526         struct ixgbe_hw *hw =
1527                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1528         int ret = 0;
1529
1530         switch (vlan_type) {
1531         case ETH_VLAN_TYPE_INNER:
1532                 /* Only the high 16-bits is valid */
1533                 IXGBE_WRITE_REG(hw, IXGBE_EXVET, tpid << 16);
1534                 break;
1535         default:
1536                 ret = -EINVAL;
1537                 PMD_DRV_LOG(ERR, "Unsupported vlan type %d\n", vlan_type);
1538                 break;
1539         }
1540
1541         return ret;
1542 }
1543
1544 void
1545 ixgbe_vlan_hw_filter_disable(struct rte_eth_dev *dev)
1546 {
1547         struct ixgbe_hw *hw =
1548                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1549         uint32_t vlnctrl;
1550
1551         PMD_INIT_FUNC_TRACE();
1552
1553         /* Filter Table Disable */
1554         vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
1555         vlnctrl &= ~IXGBE_VLNCTRL_VFE;
1556
1557         IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
1558 }
1559
1560 void
1561 ixgbe_vlan_hw_filter_enable(struct rte_eth_dev *dev)
1562 {
1563         struct ixgbe_hw *hw =
1564                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1565         struct ixgbe_vfta * shadow_vfta =
1566                 IXGBE_DEV_PRIVATE_TO_VFTA(dev->data->dev_private);
1567         uint32_t vlnctrl;
1568         uint16_t i;
1569
1570         PMD_INIT_FUNC_TRACE();
1571
1572         /* Filter Table Enable */
1573         vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
1574         vlnctrl &= ~IXGBE_VLNCTRL_CFIEN;
1575         vlnctrl |= IXGBE_VLNCTRL_VFE;
1576
1577         IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
1578
1579         /* write whatever is in local vfta copy */
1580         for (i = 0; i < IXGBE_VFTA_SIZE; i++)
1581                 IXGBE_WRITE_REG(hw, IXGBE_VFTA(i), shadow_vfta->vfta[i]);
1582 }
1583
1584 static void
1585 ixgbe_vlan_hw_strip_bitmap_set(struct rte_eth_dev *dev, uint16_t queue, bool on)
1586 {
1587         struct ixgbe_hwstrip *hwstrip =
1588                 IXGBE_DEV_PRIVATE_TO_HWSTRIP_BITMAP(dev->data->dev_private);
1589
1590         if(queue >= IXGBE_MAX_RX_QUEUE_NUM)
1591                 return;
1592
1593         if (on)
1594                 IXGBE_SET_HWSTRIP(hwstrip, queue);
1595         else
1596                 IXGBE_CLEAR_HWSTRIP(hwstrip, queue);
1597 }
1598
1599 static void
1600 ixgbe_vlan_hw_strip_disable(struct rte_eth_dev *dev, uint16_t queue)
1601 {
1602         struct ixgbe_hw *hw =
1603                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1604         uint32_t ctrl;
1605
1606         PMD_INIT_FUNC_TRACE();
1607
1608         if (hw->mac.type == ixgbe_mac_82598EB) {
1609                 /* No queue level support */
1610                 PMD_INIT_LOG(NOTICE, "82598EB not support queue level hw strip");
1611                 return;
1612         }
1613         else {
1614                 /* Other 10G NIC, the VLAN strip can be setup per queue in RXDCTL */
1615                 ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(queue));
1616                 ctrl &= ~IXGBE_RXDCTL_VME;
1617                 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(queue), ctrl);
1618         }
1619         /* record those setting for HW strip per queue */
1620         ixgbe_vlan_hw_strip_bitmap_set(dev, queue, 0);
1621 }
1622
1623 static void
1624 ixgbe_vlan_hw_strip_enable(struct rte_eth_dev *dev, uint16_t queue)
1625 {
1626         struct ixgbe_hw *hw =
1627                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1628         uint32_t ctrl;
1629
1630         PMD_INIT_FUNC_TRACE();
1631
1632         if (hw->mac.type == ixgbe_mac_82598EB) {
1633                 /* No queue level supported */
1634                 PMD_INIT_LOG(NOTICE, "82598EB not support queue level hw strip");
1635                 return;
1636         }
1637         else {
1638                 /* Other 10G NIC, the VLAN strip can be setup per queue in RXDCTL */
1639                 ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(queue));
1640                 ctrl |= IXGBE_RXDCTL_VME;
1641                 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(queue), ctrl);
1642         }
1643         /* record those setting for HW strip per queue */
1644         ixgbe_vlan_hw_strip_bitmap_set(dev, queue, 1);
1645 }
1646
1647 void
1648 ixgbe_vlan_hw_strip_disable_all(struct rte_eth_dev *dev)
1649 {
1650         struct ixgbe_hw *hw =
1651                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1652         uint32_t ctrl;
1653         uint16_t i;
1654
1655         PMD_INIT_FUNC_TRACE();
1656
1657         if (hw->mac.type == ixgbe_mac_82598EB) {
1658                 ctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
1659                 ctrl &= ~IXGBE_VLNCTRL_VME;
1660                 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, ctrl);
1661         }
1662         else {
1663                 /* Other 10G NIC, the VLAN strip can be setup per queue in RXDCTL */
1664                 for (i = 0; i < dev->data->nb_rx_queues; i++) {
1665                         ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(i));
1666                         ctrl &= ~IXGBE_RXDCTL_VME;
1667                         IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(i), ctrl);
1668
1669                         /* record those setting for HW strip per queue */
1670                         ixgbe_vlan_hw_strip_bitmap_set(dev, i, 0);
1671                 }
1672         }
1673 }
1674
1675 void
1676 ixgbe_vlan_hw_strip_enable_all(struct rte_eth_dev *dev)
1677 {
1678         struct ixgbe_hw *hw =
1679                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1680         uint32_t ctrl;
1681         uint16_t i;
1682
1683         PMD_INIT_FUNC_TRACE();
1684
1685         if (hw->mac.type == ixgbe_mac_82598EB) {
1686                 ctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
1687                 ctrl |= IXGBE_VLNCTRL_VME;
1688                 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, ctrl);
1689         }
1690         else {
1691                 /* Other 10G NIC, the VLAN strip can be setup per queue in RXDCTL */
1692                 for (i = 0; i < dev->data->nb_rx_queues; i++) {
1693                         ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(i));
1694                         ctrl |= IXGBE_RXDCTL_VME;
1695                         IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(i), ctrl);
1696
1697                         /* record those setting for HW strip per queue */
1698                         ixgbe_vlan_hw_strip_bitmap_set(dev, i, 1);
1699                 }
1700         }
1701 }
1702
1703 static void
1704 ixgbe_vlan_hw_extend_disable(struct rte_eth_dev *dev)
1705 {
1706         struct ixgbe_hw *hw =
1707                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1708         uint32_t ctrl;
1709
1710         PMD_INIT_FUNC_TRACE();
1711
1712         /* DMATXCTRL: Geric Double VLAN Disable */
1713         ctrl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
1714         ctrl &= ~IXGBE_DMATXCTL_GDV;
1715         IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, ctrl);
1716
1717         /* CTRL_EXT: Global Double VLAN Disable */
1718         ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
1719         ctrl &= ~IXGBE_EXTENDED_VLAN;
1720         IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl);
1721
1722 }
1723
1724 static void
1725 ixgbe_vlan_hw_extend_enable(struct rte_eth_dev *dev)
1726 {
1727         struct ixgbe_hw *hw =
1728                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1729         uint32_t ctrl;
1730
1731         PMD_INIT_FUNC_TRACE();
1732
1733         /* DMATXCTRL: Geric Double VLAN Enable */
1734         ctrl  = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
1735         ctrl |= IXGBE_DMATXCTL_GDV;
1736         IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, ctrl);
1737
1738         /* CTRL_EXT: Global Double VLAN Enable */
1739         ctrl  = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
1740         ctrl |= IXGBE_EXTENDED_VLAN;
1741         IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl);
1742
1743         /*
1744          * VET EXT field in the EXVET register = 0x8100 by default
1745          * So no need to change. Same to VT field of DMATXCTL register
1746          */
1747 }
1748
1749 static void
1750 ixgbe_vlan_offload_set(struct rte_eth_dev *dev, int mask)
1751 {
1752         if(mask & ETH_VLAN_STRIP_MASK){
1753                 if (dev->data->dev_conf.rxmode.hw_vlan_strip)
1754                         ixgbe_vlan_hw_strip_enable_all(dev);
1755                 else
1756                         ixgbe_vlan_hw_strip_disable_all(dev);
1757         }
1758
1759         if(mask & ETH_VLAN_FILTER_MASK){
1760                 if (dev->data->dev_conf.rxmode.hw_vlan_filter)
1761                         ixgbe_vlan_hw_filter_enable(dev);
1762                 else
1763                         ixgbe_vlan_hw_filter_disable(dev);
1764         }
1765
1766         if(mask & ETH_VLAN_EXTEND_MASK){
1767                 if (dev->data->dev_conf.rxmode.hw_vlan_extend)
1768                         ixgbe_vlan_hw_extend_enable(dev);
1769                 else
1770                         ixgbe_vlan_hw_extend_disable(dev);
1771         }
1772 }
1773
1774 static void
1775 ixgbe_vmdq_vlan_hw_filter_enable(struct rte_eth_dev *dev)
1776 {
1777         struct ixgbe_hw *hw =
1778                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1779         /* VLNCTRL: enable vlan filtering and allow all vlan tags through */
1780         uint32_t vlanctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
1781         vlanctrl |= IXGBE_VLNCTRL_VFE ; /* enable vlan filters */
1782         IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlanctrl);
1783 }
1784
1785 static int
1786 ixgbe_check_vf_rss_rxq_num(struct rte_eth_dev *dev, uint16_t nb_rx_q)
1787 {
1788         switch (nb_rx_q) {
1789         case 1:
1790         case 2:
1791                 RTE_ETH_DEV_SRIOV(dev).active = ETH_64_POOLS;
1792                 break;
1793         case 4:
1794                 RTE_ETH_DEV_SRIOV(dev).active = ETH_32_POOLS;
1795                 break;
1796         default:
1797                 return -EINVAL;
1798         }
1799
1800         RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool = nb_rx_q;
1801         RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx = dev->pci_dev->max_vfs * nb_rx_q;
1802
1803         return 0;
1804 }
1805
1806 static int
1807 ixgbe_check_mq_mode(struct rte_eth_dev *dev)
1808 {
1809         struct rte_eth_conf *dev_conf = &dev->data->dev_conf;
1810         uint16_t nb_rx_q = dev->data->nb_rx_queues;
1811         uint16_t nb_tx_q = dev->data->nb_rx_queues;
1812
1813         if (RTE_ETH_DEV_SRIOV(dev).active != 0) {
1814                 /* check multi-queue mode */
1815                 switch (dev_conf->rxmode.mq_mode) {
1816                 case ETH_MQ_RX_VMDQ_DCB:
1817                 case ETH_MQ_RX_VMDQ_DCB_RSS:
1818                         /* DCB/RSS VMDQ in SRIOV mode, not implement yet */
1819                         PMD_INIT_LOG(ERR, "SRIOV active,"
1820                                         " unsupported mq_mode rx %d.",
1821                                         dev_conf->rxmode.mq_mode);
1822                         return -EINVAL;
1823                 case ETH_MQ_RX_RSS:
1824                 case ETH_MQ_RX_VMDQ_RSS:
1825                         dev->data->dev_conf.rxmode.mq_mode = ETH_MQ_RX_VMDQ_RSS;
1826                         if (nb_rx_q <= RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool)
1827                                 if (ixgbe_check_vf_rss_rxq_num(dev, nb_rx_q)) {
1828                                         PMD_INIT_LOG(ERR, "SRIOV is active,"
1829                                                 " invalid queue number"
1830                                                 " for VMDQ RSS, allowed"
1831                                                 " value are 1, 2 or 4.");
1832                                         return -EINVAL;
1833                                 }
1834                         break;
1835                 case ETH_MQ_RX_VMDQ_ONLY:
1836                 case ETH_MQ_RX_NONE:
1837                         /* if nothing mq mode configure, use default scheme */
1838                         dev->data->dev_conf.rxmode.mq_mode = ETH_MQ_RX_VMDQ_ONLY;
1839                         if (RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool > 1)
1840                                 RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool = 1;
1841                         break;
1842                 default: /* ETH_MQ_RX_DCB, ETH_MQ_RX_DCB_RSS or ETH_MQ_TX_DCB*/
1843                         /* SRIOV only works in VMDq enable mode */
1844                         PMD_INIT_LOG(ERR, "SRIOV is active,"
1845                                         " wrong mq_mode rx %d.",
1846                                         dev_conf->rxmode.mq_mode);
1847                         return -EINVAL;
1848                 }
1849
1850                 switch (dev_conf->txmode.mq_mode) {
1851                 case ETH_MQ_TX_VMDQ_DCB:
1852                         /* DCB VMDQ in SRIOV mode, not implement yet */
1853                         PMD_INIT_LOG(ERR, "SRIOV is active,"
1854                                         " unsupported VMDQ mq_mode tx %d.",
1855                                         dev_conf->txmode.mq_mode);
1856                         return -EINVAL;
1857                 default: /* ETH_MQ_TX_VMDQ_ONLY or ETH_MQ_TX_NONE */
1858                         dev->data->dev_conf.txmode.mq_mode = ETH_MQ_TX_VMDQ_ONLY;
1859                         break;
1860                 }
1861
1862                 /* check valid queue number */
1863                 if ((nb_rx_q > RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool) ||
1864                     (nb_tx_q > RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool)) {
1865                         PMD_INIT_LOG(ERR, "SRIOV is active,"
1866                                         " queue number must less equal to %d.",
1867                                         RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool);
1868                         return -EINVAL;
1869                 }
1870         } else {
1871                 /* check configuration for vmdb+dcb mode */
1872                 if (dev_conf->rxmode.mq_mode == ETH_MQ_RX_VMDQ_DCB) {
1873                         const struct rte_eth_vmdq_dcb_conf *conf;
1874
1875                         if (nb_rx_q != IXGBE_VMDQ_DCB_NB_QUEUES) {
1876                                 PMD_INIT_LOG(ERR, "VMDQ+DCB, nb_rx_q != %d.",
1877                                                 IXGBE_VMDQ_DCB_NB_QUEUES);
1878                                 return -EINVAL;
1879                         }
1880                         conf = &dev_conf->rx_adv_conf.vmdq_dcb_conf;
1881                         if (!(conf->nb_queue_pools == ETH_16_POOLS ||
1882                                conf->nb_queue_pools == ETH_32_POOLS)) {
1883                                 PMD_INIT_LOG(ERR, "VMDQ+DCB selected,"
1884                                                 " nb_queue_pools must be %d or %d.",
1885                                                 ETH_16_POOLS, ETH_32_POOLS);
1886                                 return -EINVAL;
1887                         }
1888                 }
1889                 if (dev_conf->txmode.mq_mode == ETH_MQ_TX_VMDQ_DCB) {
1890                         const struct rte_eth_vmdq_dcb_tx_conf *conf;
1891
1892                         if (nb_tx_q != IXGBE_VMDQ_DCB_NB_QUEUES) {
1893                                 PMD_INIT_LOG(ERR, "VMDQ+DCB, nb_tx_q != %d",
1894                                                  IXGBE_VMDQ_DCB_NB_QUEUES);
1895                                 return -EINVAL;
1896                         }
1897                         conf = &dev_conf->tx_adv_conf.vmdq_dcb_tx_conf;
1898                         if (!(conf->nb_queue_pools == ETH_16_POOLS ||
1899                                conf->nb_queue_pools == ETH_32_POOLS)) {
1900                                 PMD_INIT_LOG(ERR, "VMDQ+DCB selected,"
1901                                                 " nb_queue_pools != %d and"
1902                                                 " nb_queue_pools != %d.",
1903                                                 ETH_16_POOLS, ETH_32_POOLS);
1904                                 return -EINVAL;
1905                         }
1906                 }
1907
1908                 /* For DCB mode check our configuration before we go further */
1909                 if (dev_conf->rxmode.mq_mode == ETH_MQ_RX_DCB) {
1910                         const struct rte_eth_dcb_rx_conf *conf;
1911
1912                         if (nb_rx_q != IXGBE_DCB_NB_QUEUES) {
1913                                 PMD_INIT_LOG(ERR, "DCB selected, nb_rx_q != %d.",
1914                                                  IXGBE_DCB_NB_QUEUES);
1915                                 return -EINVAL;
1916                         }
1917                         conf = &dev_conf->rx_adv_conf.dcb_rx_conf;
1918                         if (!(conf->nb_tcs == ETH_4_TCS ||
1919                                conf->nb_tcs == ETH_8_TCS)) {
1920                                 PMD_INIT_LOG(ERR, "DCB selected, nb_tcs != %d"
1921                                                 " and nb_tcs != %d.",
1922                                                 ETH_4_TCS, ETH_8_TCS);
1923                                 return -EINVAL;
1924                         }
1925                 }
1926
1927                 if (dev_conf->txmode.mq_mode == ETH_MQ_TX_DCB) {
1928                         const struct rte_eth_dcb_tx_conf *conf;
1929
1930                         if (nb_tx_q != IXGBE_DCB_NB_QUEUES) {
1931                                 PMD_INIT_LOG(ERR, "DCB, nb_tx_q != %d.",
1932                                                  IXGBE_DCB_NB_QUEUES);
1933                                 return -EINVAL;
1934                         }
1935                         conf = &dev_conf->tx_adv_conf.dcb_tx_conf;
1936                         if (!(conf->nb_tcs == ETH_4_TCS ||
1937                                conf->nb_tcs == ETH_8_TCS)) {
1938                                 PMD_INIT_LOG(ERR, "DCB selected, nb_tcs != %d"
1939                                                 " and nb_tcs != %d.",
1940                                                 ETH_4_TCS, ETH_8_TCS);
1941                                 return -EINVAL;
1942                         }
1943                 }
1944         }
1945         return 0;
1946 }
1947
1948 static int
1949 ixgbe_dev_configure(struct rte_eth_dev *dev)
1950 {
1951         struct ixgbe_interrupt *intr =
1952                 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
1953         struct ixgbe_adapter *adapter =
1954                 (struct ixgbe_adapter *)dev->data->dev_private;
1955         int ret;
1956
1957         PMD_INIT_FUNC_TRACE();
1958         /* multipe queue mode checking */
1959         ret  = ixgbe_check_mq_mode(dev);
1960         if (ret != 0) {
1961                 PMD_DRV_LOG(ERR, "ixgbe_check_mq_mode fails with %d.",
1962                             ret);
1963                 return ret;
1964         }
1965
1966         /* set flag to update link status after init */
1967         intr->flags |= IXGBE_FLAG_NEED_LINK_UPDATE;
1968
1969         /*
1970          * Initialize to TRUE. If any of Rx queues doesn't meet the bulk
1971          * allocation or vector Rx preconditions we will reset it.
1972          */
1973         adapter->rx_bulk_alloc_allowed = true;
1974         adapter->rx_vec_allowed = true;
1975
1976         return 0;
1977 }
1978
1979 /*
1980  * Configure device link speed and setup link.
1981  * It returns 0 on success.
1982  */
1983 static int
1984 ixgbe_dev_start(struct rte_eth_dev *dev)
1985 {
1986         struct ixgbe_hw *hw =
1987                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1988         struct ixgbe_vf_info *vfinfo =
1989                 *IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private);
1990         struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle;
1991         uint32_t intr_vector = 0;
1992         int err, link_up = 0, negotiate = 0;
1993         uint32_t speed = 0;
1994         int mask = 0;
1995         int status;
1996         uint16_t vf, idx;
1997
1998         PMD_INIT_FUNC_TRACE();
1999
2000         /* IXGBE devices don't support half duplex */
2001         if ((dev->data->dev_conf.link_duplex != ETH_LINK_AUTONEG_DUPLEX) &&
2002                         (dev->data->dev_conf.link_duplex != ETH_LINK_FULL_DUPLEX)) {
2003                 PMD_INIT_LOG(ERR, "Invalid link_duplex (%hu) for port %hhu",
2004                              dev->data->dev_conf.link_duplex,
2005                              dev->data->port_id);
2006                 return -EINVAL;
2007         }
2008
2009         /* disable uio/vfio intr/eventfd mapping */
2010         rte_intr_disable(intr_handle);
2011
2012         /* stop adapter */
2013         hw->adapter_stopped = 0;
2014         ixgbe_stop_adapter(hw);
2015
2016         /* reinitialize adapter
2017          * this calls reset and start */
2018         status = ixgbe_pf_reset_hw(hw);
2019         if (status != 0)
2020                 return -1;
2021         hw->mac.ops.start_hw(hw);
2022         hw->mac.get_link_status = true;
2023
2024         /* configure PF module if SRIOV enabled */
2025         ixgbe_pf_host_configure(dev);
2026
2027         /* check and configure queue intr-vector mapping */
2028         if ((rte_intr_cap_multiple(intr_handle) ||
2029              !RTE_ETH_DEV_SRIOV(dev).active) &&
2030             dev->data->dev_conf.intr_conf.rxq != 0) {
2031                 intr_vector = dev->data->nb_rx_queues;
2032                 if (rte_intr_efd_enable(intr_handle, intr_vector))
2033                         return -1;
2034         }
2035
2036         if (rte_intr_dp_is_en(intr_handle) && !intr_handle->intr_vec) {
2037                 intr_handle->intr_vec =
2038                         rte_zmalloc("intr_vec",
2039                                     dev->data->nb_rx_queues * sizeof(int), 0);
2040                 if (intr_handle->intr_vec == NULL) {
2041                         PMD_INIT_LOG(ERR, "Failed to allocate %d rx_queues"
2042                                      " intr_vec\n", dev->data->nb_rx_queues);
2043                         return -ENOMEM;
2044                 }
2045         }
2046
2047         /* confiugre msix for sleep until rx interrupt */
2048         ixgbe_configure_msix(dev);
2049
2050         /* initialize transmission unit */
2051         ixgbe_dev_tx_init(dev);
2052
2053         /* This can fail when allocating mbufs for descriptor rings */
2054         err = ixgbe_dev_rx_init(dev);
2055         if (err) {
2056                 PMD_INIT_LOG(ERR, "Unable to initialize RX hardware");
2057                 goto error;
2058         }
2059
2060         err = ixgbe_dev_rxtx_start(dev);
2061         if (err < 0) {
2062                 PMD_INIT_LOG(ERR, "Unable to start rxtx queues");
2063                 goto error;
2064         }
2065
2066         /* Skip link setup if loopback mode is enabled for 82599. */
2067         if (hw->mac.type == ixgbe_mac_82599EB &&
2068                         dev->data->dev_conf.lpbk_mode == IXGBE_LPBK_82599_TX_RX)
2069                 goto skip_link_setup;
2070
2071         if (ixgbe_is_sfp(hw) && hw->phy.multispeed_fiber) {
2072                 err = hw->mac.ops.setup_sfp(hw);
2073                 if (err)
2074                         goto error;
2075         }
2076
2077         if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_copper) {
2078                 /* Turn on the copper */
2079                 ixgbe_set_phy_power(hw, true);
2080         } else {
2081                 /* Turn on the laser */
2082                 ixgbe_enable_tx_laser(hw);
2083         }
2084
2085         err = ixgbe_check_link(hw, &speed, &link_up, 0);
2086         if (err)
2087                 goto error;
2088         dev->data->dev_link.link_status = link_up;
2089
2090         err = ixgbe_get_link_capabilities(hw, &speed, &negotiate);
2091         if (err)
2092                 goto error;
2093
2094         switch(dev->data->dev_conf.link_speed) {
2095         case ETH_LINK_SPEED_AUTONEG:
2096                 speed = (hw->mac.type != ixgbe_mac_82598EB) ?
2097                                 IXGBE_LINK_SPEED_82599_AUTONEG :
2098                                 IXGBE_LINK_SPEED_82598_AUTONEG;
2099                 break;
2100         case ETH_LINK_SPEED_100:
2101                 /*
2102                  * Invalid for 82598 but error will be detected by
2103                  * ixgbe_setup_link()
2104                  */
2105                 speed = IXGBE_LINK_SPEED_100_FULL;
2106                 break;
2107         case ETH_LINK_SPEED_1000:
2108                 speed = IXGBE_LINK_SPEED_1GB_FULL;
2109                 break;
2110         case ETH_LINK_SPEED_10000:
2111                 speed = IXGBE_LINK_SPEED_10GB_FULL;
2112                 break;
2113         default:
2114                 PMD_INIT_LOG(ERR, "Invalid link_speed (%hu) for port %hhu",
2115                              dev->data->dev_conf.link_speed,
2116                              dev->data->port_id);
2117                 goto error;
2118         }
2119
2120         err = ixgbe_setup_link(hw, speed, link_up);
2121         if (err)
2122                 goto error;
2123
2124 skip_link_setup:
2125
2126         if (rte_intr_allow_others(intr_handle)) {
2127                 /* check if lsc interrupt is enabled */
2128                 if (dev->data->dev_conf.intr_conf.lsc != 0)
2129                         ixgbe_dev_lsc_interrupt_setup(dev);
2130         } else {
2131                 rte_intr_callback_unregister(intr_handle,
2132                                              ixgbe_dev_interrupt_handler,
2133                                              (void *)dev);
2134                 if (dev->data->dev_conf.intr_conf.lsc != 0)
2135                         PMD_INIT_LOG(INFO, "lsc won't enable because of"
2136                                      " no intr multiplex\n");
2137         }
2138
2139         /* check if rxq interrupt is enabled */
2140         if (dev->data->dev_conf.intr_conf.rxq != 0 &&
2141             rte_intr_dp_is_en(intr_handle))
2142                 ixgbe_dev_rxq_interrupt_setup(dev);
2143
2144         /* enable uio/vfio intr/eventfd mapping */
2145         rte_intr_enable(intr_handle);
2146
2147         /* resume enabled intr since hw reset */
2148         ixgbe_enable_intr(dev);
2149
2150         mask = ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK | \
2151                 ETH_VLAN_EXTEND_MASK;
2152         ixgbe_vlan_offload_set(dev, mask);
2153
2154         if (dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_VMDQ_ONLY) {
2155                 /* Enable vlan filtering for VMDq */
2156                 ixgbe_vmdq_vlan_hw_filter_enable(dev);
2157         }
2158
2159         /* Configure DCB hw */
2160         ixgbe_configure_dcb(dev);
2161
2162         if (dev->data->dev_conf.fdir_conf.mode != RTE_FDIR_MODE_NONE) {
2163                 err = ixgbe_fdir_configure(dev);
2164                 if (err)
2165                         goto error;
2166         }
2167
2168         /* Restore vf rate limit */
2169         if (vfinfo != NULL) {
2170                 for (vf = 0; vf < dev->pci_dev->max_vfs; vf++)
2171                         for (idx = 0; idx < IXGBE_MAX_QUEUE_NUM_PER_VF; idx++)
2172                                 if (vfinfo[vf].tx_rate[idx] != 0)
2173                                         ixgbe_set_vf_rate_limit(dev, vf,
2174                                                 vfinfo[vf].tx_rate[idx],
2175                                                 1 << idx);
2176         }
2177
2178         ixgbe_restore_statistics_mapping(dev);
2179
2180         return 0;
2181
2182 error:
2183         PMD_INIT_LOG(ERR, "failure in ixgbe_dev_start(): %d", err);
2184         ixgbe_dev_clear_queues(dev);
2185         return -EIO;
2186 }
2187
2188 /*
2189  * Stop device: disable rx and tx functions to allow for reconfiguring.
2190  */
2191 static void
2192 ixgbe_dev_stop(struct rte_eth_dev *dev)
2193 {
2194         struct rte_eth_link link;
2195         struct ixgbe_hw *hw =
2196                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2197         struct ixgbe_vf_info *vfinfo =
2198                 *IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private);
2199         struct ixgbe_filter_info *filter_info =
2200                 IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
2201         struct ixgbe_5tuple_filter *p_5tuple, *p_5tuple_next;
2202         struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle;
2203         int vf;
2204
2205         PMD_INIT_FUNC_TRACE();
2206
2207         /* disable interrupts */
2208         ixgbe_disable_intr(hw);
2209
2210         /* disable intr eventfd mapping */
2211         rte_intr_disable(intr_handle);
2212
2213         /* reset the NIC */
2214         ixgbe_pf_reset_hw(hw);
2215         hw->adapter_stopped = 0;
2216
2217         /* stop adapter */
2218         ixgbe_stop_adapter(hw);
2219
2220         for (vf = 0; vfinfo != NULL &&
2221                      vf < dev->pci_dev->max_vfs; vf++)
2222                 vfinfo[vf].clear_to_send = false;
2223
2224         if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_copper) {
2225                 /* Turn off the copper */
2226                 ixgbe_set_phy_power(hw, false);
2227         } else {
2228                 /* Turn off the laser */
2229                 ixgbe_disable_tx_laser(hw);
2230         }
2231
2232         ixgbe_dev_clear_queues(dev);
2233
2234         /* Clear stored conf */
2235         dev->data->scattered_rx = 0;
2236         dev->data->lro = 0;
2237
2238         /* Clear recorded link status */
2239         memset(&link, 0, sizeof(link));
2240         rte_ixgbe_dev_atomic_write_link_status(dev, &link);
2241
2242         /* Remove all ntuple filters of the device */
2243         for (p_5tuple = TAILQ_FIRST(&filter_info->fivetuple_list);
2244              p_5tuple != NULL; p_5tuple = p_5tuple_next) {
2245                 p_5tuple_next = TAILQ_NEXT(p_5tuple, entries);
2246                 TAILQ_REMOVE(&filter_info->fivetuple_list,
2247                              p_5tuple, entries);
2248                 rte_free(p_5tuple);
2249         }
2250         memset(filter_info->fivetuple_mask, 0,
2251                 sizeof(uint32_t) * IXGBE_5TUPLE_ARRAY_SIZE);
2252
2253         if (!rte_intr_allow_others(intr_handle))
2254                 /* resume to the default handler */
2255                 rte_intr_callback_register(intr_handle,
2256                                            ixgbe_dev_interrupt_handler,
2257                                            (void *)dev);
2258
2259         /* Clean datapath event and queue/vec mapping */
2260         rte_intr_efd_disable(intr_handle);
2261         if (intr_handle->intr_vec != NULL) {
2262                 rte_free(intr_handle->intr_vec);
2263                 intr_handle->intr_vec = NULL;
2264         }
2265 }
2266
2267 /*
2268  * Set device link up: enable tx.
2269  */
2270 static int
2271 ixgbe_dev_set_link_up(struct rte_eth_dev *dev)
2272 {
2273         struct ixgbe_hw *hw =
2274                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2275         if (hw->mac.type == ixgbe_mac_82599EB) {
2276 #ifdef RTE_NIC_BYPASS
2277                 if (hw->device_id == IXGBE_DEV_ID_82599_BYPASS) {
2278                         /* Not suported in bypass mode */
2279                         PMD_INIT_LOG(ERR, "Set link up is not supported "
2280                                      "by device id 0x%x", hw->device_id);
2281                         return -ENOTSUP;
2282                 }
2283 #endif
2284         }
2285
2286         if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_copper) {
2287                 /* Turn on the copper */
2288                 ixgbe_set_phy_power(hw, true);
2289         } else {
2290                 /* Turn on the laser */
2291                 ixgbe_enable_tx_laser(hw);
2292         }
2293
2294         return 0;
2295 }
2296
2297 /*
2298  * Set device link down: disable tx.
2299  */
2300 static int
2301 ixgbe_dev_set_link_down(struct rte_eth_dev *dev)
2302 {
2303         struct ixgbe_hw *hw =
2304                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2305         if (hw->mac.type == ixgbe_mac_82599EB) {
2306 #ifdef RTE_NIC_BYPASS
2307                 if (hw->device_id == IXGBE_DEV_ID_82599_BYPASS) {
2308                         /* Not suported in bypass mode */
2309                         PMD_INIT_LOG(ERR, "Set link down is not supported "
2310                                      "by device id 0x%x", hw->device_id);
2311                         return -ENOTSUP;
2312                 }
2313 #endif
2314         }
2315
2316         if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_copper) {
2317                 /* Turn off the copper */
2318                 ixgbe_set_phy_power(hw, false);
2319         } else {
2320                 /* Turn off the laser */
2321                 ixgbe_disable_tx_laser(hw);
2322         }
2323
2324         return 0;
2325 }
2326
2327 /*
2328  * Reest and stop device.
2329  */
2330 static void
2331 ixgbe_dev_close(struct rte_eth_dev *dev)
2332 {
2333         struct ixgbe_hw *hw =
2334                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2335
2336         PMD_INIT_FUNC_TRACE();
2337
2338         ixgbe_pf_reset_hw(hw);
2339
2340         ixgbe_dev_stop(dev);
2341         hw->adapter_stopped = 1;
2342
2343         ixgbe_dev_free_queues(dev);
2344
2345         ixgbe_disable_pcie_master(hw);
2346
2347         /* reprogram the RAR[0] in case user changed it. */
2348         ixgbe_set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
2349 }
2350
2351 static void
2352 ixgbe_read_stats_registers(struct ixgbe_hw *hw,
2353                            struct ixgbe_hw_stats *hw_stats,
2354                            uint64_t *total_missed_rx, uint64_t *total_qbrc,
2355                            uint64_t *total_qprc, uint64_t *total_qprdc)
2356 {
2357         uint32_t bprc, lxon, lxoff, total;
2358         uint32_t delta_gprc = 0;
2359         unsigned i;
2360         /* Workaround for RX byte count not including CRC bytes when CRC
2361 +        * strip is enabled. CRC bytes are removed from counters when crc_strip
2362          * is disabled.
2363 +        */
2364         int crc_strip = (IXGBE_READ_REG(hw, IXGBE_HLREG0) &
2365                         IXGBE_HLREG0_RXCRCSTRP);
2366
2367         hw_stats->crcerrs += IXGBE_READ_REG(hw, IXGBE_CRCERRS);
2368         hw_stats->illerrc += IXGBE_READ_REG(hw, IXGBE_ILLERRC);
2369         hw_stats->errbc += IXGBE_READ_REG(hw, IXGBE_ERRBC);
2370         hw_stats->mspdc += IXGBE_READ_REG(hw, IXGBE_MSPDC);
2371
2372         for (i = 0; i < 8; i++) {
2373                 uint32_t mp;
2374                 mp = IXGBE_READ_REG(hw, IXGBE_MPC(i));
2375                 /* global total per queue */
2376                 hw_stats->mpc[i] += mp;
2377                 /* Running comprehensive total for stats display */
2378                 *total_missed_rx += hw_stats->mpc[i];
2379                 if (hw->mac.type == ixgbe_mac_82598EB) {
2380                         hw_stats->rnbc[i] +=
2381                             IXGBE_READ_REG(hw, IXGBE_RNBC(i));
2382                         hw_stats->pxonrxc[i] +=
2383                                 IXGBE_READ_REG(hw, IXGBE_PXONRXC(i));
2384                         hw_stats->pxoffrxc[i] +=
2385                                 IXGBE_READ_REG(hw, IXGBE_PXOFFRXC(i));
2386                 } else {
2387                         hw_stats->pxonrxc[i] +=
2388                                 IXGBE_READ_REG(hw, IXGBE_PXONRXCNT(i));
2389                         hw_stats->pxoffrxc[i] +=
2390                                 IXGBE_READ_REG(hw, IXGBE_PXOFFRXCNT(i));
2391                         hw_stats->pxon2offc[i] +=
2392                                 IXGBE_READ_REG(hw, IXGBE_PXON2OFFCNT(i));
2393                 }
2394                 hw_stats->pxontxc[i] +=
2395                     IXGBE_READ_REG(hw, IXGBE_PXONTXC(i));
2396                 hw_stats->pxofftxc[i] +=
2397                     IXGBE_READ_REG(hw, IXGBE_PXOFFTXC(i));
2398         }
2399         for (i = 0; i < IXGBE_QUEUE_STAT_COUNTERS; i++) {
2400                 uint32_t delta_qprc = IXGBE_READ_REG(hw, IXGBE_QPRC(i));
2401                 uint32_t delta_qptc = IXGBE_READ_REG(hw, IXGBE_QPTC(i));
2402                 uint32_t delta_qprdc = IXGBE_READ_REG(hw, IXGBE_QPRDC(i));
2403
2404                 delta_gprc += delta_qprc;
2405
2406                 hw_stats->qprc[i] += delta_qprc;
2407                 hw_stats->qptc[i] += delta_qptc;
2408
2409                 hw_stats->qbrc[i] += IXGBE_READ_REG(hw, IXGBE_QBRC_L(i));
2410                 hw_stats->qbrc[i] +=
2411                     ((uint64_t)IXGBE_READ_REG(hw, IXGBE_QBRC_H(i)) << 32);
2412                 if (crc_strip == 0)
2413                         hw_stats->qbrc[i] -= delta_qprc * ETHER_CRC_LEN;
2414
2415                 hw_stats->qbtc[i] += IXGBE_READ_REG(hw, IXGBE_QBTC_L(i));
2416                 hw_stats->qbtc[i] +=
2417                     ((uint64_t)IXGBE_READ_REG(hw, IXGBE_QBTC_H(i)) << 32);
2418
2419                 hw_stats->qprdc[i] += delta_qprdc;
2420                 *total_qprdc += hw_stats->qprdc[i];
2421
2422                 *total_qprc += hw_stats->qprc[i];
2423                 *total_qbrc += hw_stats->qbrc[i];
2424         }
2425         hw_stats->mlfc += IXGBE_READ_REG(hw, IXGBE_MLFC);
2426         hw_stats->mrfc += IXGBE_READ_REG(hw, IXGBE_MRFC);
2427         hw_stats->rlec += IXGBE_READ_REG(hw, IXGBE_RLEC);
2428
2429         /*
2430          * An errata states that gprc actually counts good + missed packets:
2431          * Workaround to set gprc to summated queue packet receives
2432          */
2433         hw_stats->gprc = *total_qprc;
2434
2435         if (hw->mac.type != ixgbe_mac_82598EB) {
2436                 hw_stats->gorc += IXGBE_READ_REG(hw, IXGBE_GORCL);
2437                 hw_stats->gorc += ((u64)IXGBE_READ_REG(hw, IXGBE_GORCH) << 32);
2438                 hw_stats->gotc += IXGBE_READ_REG(hw, IXGBE_GOTCL);
2439                 hw_stats->gotc += ((u64)IXGBE_READ_REG(hw, IXGBE_GOTCH) << 32);
2440                 hw_stats->tor += IXGBE_READ_REG(hw, IXGBE_TORL);
2441                 hw_stats->tor += ((u64)IXGBE_READ_REG(hw, IXGBE_TORH) << 32);
2442                 hw_stats->lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXCNT);
2443                 hw_stats->lxoffrxc += IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT);
2444         } else {
2445                 hw_stats->lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXC);
2446                 hw_stats->lxoffrxc += IXGBE_READ_REG(hw, IXGBE_LXOFFRXC);
2447                 /* 82598 only has a counter in the high register */
2448                 hw_stats->gorc += IXGBE_READ_REG(hw, IXGBE_GORCH);
2449                 hw_stats->gotc += IXGBE_READ_REG(hw, IXGBE_GOTCH);
2450                 hw_stats->tor += IXGBE_READ_REG(hw, IXGBE_TORH);
2451         }
2452         uint64_t old_tpr = hw_stats->tpr;
2453
2454         hw_stats->tpr += IXGBE_READ_REG(hw, IXGBE_TPR);
2455         hw_stats->tpt += IXGBE_READ_REG(hw, IXGBE_TPT);
2456
2457         if (crc_strip == 0)
2458                 hw_stats->gorc -= delta_gprc * ETHER_CRC_LEN;
2459
2460         uint64_t delta_gptc = IXGBE_READ_REG(hw, IXGBE_GPTC);
2461         hw_stats->gptc += delta_gptc;
2462         hw_stats->gotc -= delta_gptc * ETHER_CRC_LEN;
2463         hw_stats->tor -= (hw_stats->tpr - old_tpr) * ETHER_CRC_LEN;
2464
2465         /*
2466          * Workaround: mprc hardware is incorrectly counting
2467          * broadcasts, so for now we subtract those.
2468          */
2469         bprc = IXGBE_READ_REG(hw, IXGBE_BPRC);
2470         hw_stats->bprc += bprc;
2471         hw_stats->mprc += IXGBE_READ_REG(hw, IXGBE_MPRC);
2472         if (hw->mac.type == ixgbe_mac_82598EB)
2473                 hw_stats->mprc -= bprc;
2474
2475         hw_stats->prc64 += IXGBE_READ_REG(hw, IXGBE_PRC64);
2476         hw_stats->prc127 += IXGBE_READ_REG(hw, IXGBE_PRC127);
2477         hw_stats->prc255 += IXGBE_READ_REG(hw, IXGBE_PRC255);
2478         hw_stats->prc511 += IXGBE_READ_REG(hw, IXGBE_PRC511);
2479         hw_stats->prc1023 += IXGBE_READ_REG(hw, IXGBE_PRC1023);
2480         hw_stats->prc1522 += IXGBE_READ_REG(hw, IXGBE_PRC1522);
2481
2482         lxon = IXGBE_READ_REG(hw, IXGBE_LXONTXC);
2483         hw_stats->lxontxc += lxon;
2484         lxoff = IXGBE_READ_REG(hw, IXGBE_LXOFFTXC);
2485         hw_stats->lxofftxc += lxoff;
2486         total = lxon + lxoff;
2487
2488         hw_stats->mptc += IXGBE_READ_REG(hw, IXGBE_MPTC);
2489         hw_stats->ptc64 += IXGBE_READ_REG(hw, IXGBE_PTC64);
2490         hw_stats->gptc -= total;
2491         hw_stats->mptc -= total;
2492         hw_stats->ptc64 -= total;
2493         hw_stats->gotc -= total * ETHER_MIN_LEN;
2494
2495         hw_stats->ruc += IXGBE_READ_REG(hw, IXGBE_RUC);
2496         hw_stats->rfc += IXGBE_READ_REG(hw, IXGBE_RFC);
2497         hw_stats->roc += IXGBE_READ_REG(hw, IXGBE_ROC);
2498         hw_stats->rjc += IXGBE_READ_REG(hw, IXGBE_RJC);
2499         hw_stats->mngprc += IXGBE_READ_REG(hw, IXGBE_MNGPRC);
2500         hw_stats->mngpdc += IXGBE_READ_REG(hw, IXGBE_MNGPDC);
2501         hw_stats->mngptc += IXGBE_READ_REG(hw, IXGBE_MNGPTC);
2502         hw_stats->ptc127 += IXGBE_READ_REG(hw, IXGBE_PTC127);
2503         hw_stats->ptc255 += IXGBE_READ_REG(hw, IXGBE_PTC255);
2504         hw_stats->ptc511 += IXGBE_READ_REG(hw, IXGBE_PTC511);
2505         hw_stats->ptc1023 += IXGBE_READ_REG(hw, IXGBE_PTC1023);
2506         hw_stats->ptc1522 += IXGBE_READ_REG(hw, IXGBE_PTC1522);
2507         hw_stats->bptc += IXGBE_READ_REG(hw, IXGBE_BPTC);
2508         hw_stats->xec += IXGBE_READ_REG(hw, IXGBE_XEC);
2509         hw_stats->fccrc += IXGBE_READ_REG(hw, IXGBE_FCCRC);
2510         hw_stats->fclast += IXGBE_READ_REG(hw, IXGBE_FCLAST);
2511         /* Only read FCOE on 82599 */
2512         if (hw->mac.type != ixgbe_mac_82598EB) {
2513                 hw_stats->fcoerpdc += IXGBE_READ_REG(hw, IXGBE_FCOERPDC);
2514                 hw_stats->fcoeprc += IXGBE_READ_REG(hw, IXGBE_FCOEPRC);
2515                 hw_stats->fcoeptc += IXGBE_READ_REG(hw, IXGBE_FCOEPTC);
2516                 hw_stats->fcoedwrc += IXGBE_READ_REG(hw, IXGBE_FCOEDWRC);
2517                 hw_stats->fcoedwtc += IXGBE_READ_REG(hw, IXGBE_FCOEDWTC);
2518         }
2519
2520         /* Flow Director Stats registers */
2521         hw_stats->fdirmatch += IXGBE_READ_REG(hw, IXGBE_FDIRMATCH);
2522         hw_stats->fdirmiss += IXGBE_READ_REG(hw, IXGBE_FDIRMISS);
2523 }
2524
2525 /*
2526  * This function is based on ixgbe_update_stats_counters() in ixgbe/ixgbe.c
2527  */
2528 static void
2529 ixgbe_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
2530 {
2531         struct ixgbe_hw *hw =
2532                         IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2533         struct ixgbe_hw_stats *hw_stats =
2534                         IXGBE_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
2535         uint64_t total_missed_rx, total_qbrc, total_qprc, total_qprdc;
2536         unsigned i;
2537
2538         total_missed_rx = 0;
2539         total_qbrc = 0;
2540         total_qprc = 0;
2541         total_qprdc = 0;
2542
2543         ixgbe_read_stats_registers(hw, hw_stats, &total_missed_rx, &total_qbrc,
2544                         &total_qprc, &total_qprdc);
2545
2546         if (stats == NULL)
2547                 return;
2548
2549         /* Fill out the rte_eth_stats statistics structure */
2550         stats->ipackets = total_qprc;
2551         stats->ibytes = total_qbrc;
2552         stats->opackets = hw_stats->gptc;
2553         stats->obytes = hw_stats->gotc;
2554
2555         for (i = 0; i < IXGBE_QUEUE_STAT_COUNTERS; i++) {
2556                 stats->q_ipackets[i] = hw_stats->qprc[i];
2557                 stats->q_opackets[i] = hw_stats->qptc[i];
2558                 stats->q_ibytes[i] = hw_stats->qbrc[i];
2559                 stats->q_obytes[i] = hw_stats->qbtc[i];
2560                 stats->q_errors[i] = hw_stats->qprdc[i];
2561         }
2562
2563         /* Rx Errors */
2564         stats->imissed  = total_missed_rx;
2565         stats->ierrors  = hw_stats->crcerrs +
2566                           hw_stats->mspdc +
2567                           hw_stats->rlec +
2568                           hw_stats->ruc +
2569                           hw_stats->roc +
2570                           total_missed_rx +
2571                           hw_stats->illerrc +
2572                           hw_stats->errbc +
2573                           hw_stats->rfc +
2574                           hw_stats->fccrc +
2575                           hw_stats->fclast;
2576
2577         /* Tx Errors */
2578         stats->oerrors  = 0;
2579 }
2580
2581 static void
2582 ixgbe_dev_stats_reset(struct rte_eth_dev *dev)
2583 {
2584         struct ixgbe_hw_stats *stats =
2585                         IXGBE_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
2586
2587         /* HW registers are cleared on read */
2588         ixgbe_dev_stats_get(dev, NULL);
2589
2590         /* Reset software totals */
2591         memset(stats, 0, sizeof(*stats));
2592 }
2593
2594 /* This function calculates the number of xstats based on the current config */
2595 static unsigned
2596 ixgbe_xstats_calc_num(void) {
2597         return IXGBE_NB_HW_STATS + (IXGBE_NB_RXQ_PRIO_STATS * 8) +
2598                 (IXGBE_NB_TXQ_PRIO_STATS * 8);
2599 }
2600
2601 static int
2602 ixgbe_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstats *xstats,
2603                                          unsigned n)
2604 {
2605         struct ixgbe_hw *hw =
2606                         IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2607         struct ixgbe_hw_stats *hw_stats =
2608                         IXGBE_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
2609         uint64_t total_missed_rx, total_qbrc, total_qprc, total_qprdc;
2610         unsigned i, stat, count = 0;
2611
2612         count = ixgbe_xstats_calc_num();
2613
2614         if (n < count)
2615                 return count;
2616
2617         total_missed_rx = 0;
2618         total_qbrc = 0;
2619         total_qprc = 0;
2620         total_qprdc = 0;
2621
2622         ixgbe_read_stats_registers(hw, hw_stats, &total_missed_rx, &total_qbrc,
2623                                    &total_qprc, &total_qprdc);
2624
2625         /* If this is a reset xstats is NULL, and we have cleared the
2626          * registers by reading them.
2627          */
2628         if (!xstats)
2629                 return 0;
2630
2631         /* Extended stats from ixgbe_hw_stats */
2632         count = 0;
2633         for (i = 0; i < IXGBE_NB_HW_STATS; i++) {
2634                 snprintf(xstats[count].name, sizeof(xstats[count].name), "%s",
2635                          rte_ixgbe_stats_strings[i].name);
2636                 xstats[count].value = *(uint64_t *)(((char *)hw_stats) +
2637                                 rte_ixgbe_stats_strings[i].offset);
2638                 count++;
2639         }
2640
2641         /* RX Priority Stats */
2642         for (stat = 0; stat < IXGBE_NB_RXQ_PRIO_STATS; stat++) {
2643                 for (i = 0; i < 8; i++) {
2644                         snprintf(xstats[count].name, sizeof(xstats[count].name),
2645                                  "rx_priority%u_%s", i,
2646                                  rte_ixgbe_rxq_strings[stat].name);
2647                         xstats[count].value = *(uint64_t *)(((char *)hw_stats) +
2648                                         rte_ixgbe_rxq_strings[stat].offset +
2649                                         (sizeof(uint64_t) * i));
2650                         count++;
2651                 }
2652         }
2653
2654         /* TX Priority Stats */
2655         for (stat = 0; stat < IXGBE_NB_TXQ_PRIO_STATS; stat++) {
2656                 for (i = 0; i < 8; i++) {
2657                         snprintf(xstats[count].name, sizeof(xstats[count].name),
2658                                  "tx_priority%u_%s", i,
2659                                  rte_ixgbe_txq_strings[stat].name);
2660                         xstats[count].value = *(uint64_t *)(((char *)hw_stats) +
2661                                         rte_ixgbe_txq_strings[stat].offset +
2662                                         (sizeof(uint64_t) * i));
2663                         count++;
2664                 }
2665         }
2666
2667         return count;
2668 }
2669
2670 static void
2671 ixgbe_dev_xstats_reset(struct rte_eth_dev *dev)
2672 {
2673         struct ixgbe_hw_stats *stats =
2674                         IXGBE_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
2675
2676         unsigned count = ixgbe_xstats_calc_num();
2677
2678         /* HW registers are cleared on read */
2679         ixgbe_dev_xstats_get(dev, NULL, count);
2680
2681         /* Reset software totals */
2682         memset(stats, 0, sizeof(*stats));
2683 }
2684
2685 static void
2686 ixgbevf_update_stats(struct rte_eth_dev *dev)
2687 {
2688         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2689         struct ixgbevf_hw_stats *hw_stats = (struct ixgbevf_hw_stats*)
2690                           IXGBE_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
2691
2692         /* Good Rx packet, include VF loopback */
2693         UPDATE_VF_STAT(IXGBE_VFGPRC,
2694             hw_stats->last_vfgprc, hw_stats->vfgprc);
2695
2696         /* Good Rx octets, include VF loopback */
2697         UPDATE_VF_STAT_36BIT(IXGBE_VFGORC_LSB, IXGBE_VFGORC_MSB,
2698             hw_stats->last_vfgorc, hw_stats->vfgorc);
2699
2700         /* Good Tx packet, include VF loopback */
2701         UPDATE_VF_STAT(IXGBE_VFGPTC,
2702             hw_stats->last_vfgptc, hw_stats->vfgptc);
2703
2704         /* Good Tx octets, include VF loopback */
2705         UPDATE_VF_STAT_36BIT(IXGBE_VFGOTC_LSB, IXGBE_VFGOTC_MSB,
2706             hw_stats->last_vfgotc, hw_stats->vfgotc);
2707
2708         /* Rx Multicst Packet */
2709         UPDATE_VF_STAT(IXGBE_VFMPRC,
2710             hw_stats->last_vfmprc, hw_stats->vfmprc);
2711 }
2712
2713 static int
2714 ixgbevf_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstats *xstats,
2715                        unsigned n)
2716 {
2717         struct ixgbevf_hw_stats *hw_stats = (struct ixgbevf_hw_stats *)
2718                         IXGBE_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
2719         unsigned i;
2720
2721         if (n < IXGBEVF_NB_XSTATS)
2722                 return IXGBEVF_NB_XSTATS;
2723
2724         ixgbevf_update_stats(dev);
2725
2726         if (!xstats)
2727                 return 0;
2728
2729         /* Extended stats */
2730         for (i = 0; i < IXGBEVF_NB_XSTATS; i++) {
2731                 snprintf(xstats[i].name, sizeof(xstats[i].name),
2732                          "%s", rte_ixgbevf_stats_strings[i].name);
2733                 xstats[i].value = *(uint64_t *)(((char *)hw_stats) +
2734                         rte_ixgbevf_stats_strings[i].offset);
2735         }
2736
2737         return IXGBEVF_NB_XSTATS;
2738 }
2739
2740 static void
2741 ixgbevf_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
2742 {
2743         struct ixgbevf_hw_stats *hw_stats = (struct ixgbevf_hw_stats *)
2744                           IXGBE_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
2745
2746         ixgbevf_update_stats(dev);
2747
2748         if (stats == NULL)
2749                 return;
2750
2751         stats->ipackets = hw_stats->vfgprc;
2752         stats->ibytes = hw_stats->vfgorc;
2753         stats->opackets = hw_stats->vfgptc;
2754         stats->obytes = hw_stats->vfgotc;
2755         stats->imcasts = hw_stats->vfmprc;
2756         /* stats->imcasts should be removed as imcasts is deprecated */
2757 }
2758
2759 static void
2760 ixgbevf_dev_stats_reset(struct rte_eth_dev *dev)
2761 {
2762         struct ixgbevf_hw_stats *hw_stats = (struct ixgbevf_hw_stats*)
2763                         IXGBE_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
2764
2765         /* Sync HW register to the last stats */
2766         ixgbevf_dev_stats_get(dev, NULL);
2767
2768         /* reset HW current stats*/
2769         hw_stats->vfgprc = 0;
2770         hw_stats->vfgorc = 0;
2771         hw_stats->vfgptc = 0;
2772         hw_stats->vfgotc = 0;
2773         hw_stats->vfmprc = 0;
2774
2775 }
2776
2777 static void
2778 ixgbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
2779 {
2780         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2781
2782         dev_info->max_rx_queues = (uint16_t)hw->mac.max_rx_queues;
2783         dev_info->max_tx_queues = (uint16_t)hw->mac.max_tx_queues;
2784         dev_info->min_rx_bufsize = 1024; /* cf BSIZEPACKET in SRRCTL register */
2785         dev_info->max_rx_pktlen = 15872; /* includes CRC, cf MAXFRS register */
2786         dev_info->max_mac_addrs = hw->mac.num_rar_entries;
2787         dev_info->max_hash_mac_addrs = IXGBE_VMDQ_NUM_UC_MAC;
2788         dev_info->max_vfs = dev->pci_dev->max_vfs;
2789         if (hw->mac.type == ixgbe_mac_82598EB)
2790                 dev_info->max_vmdq_pools = ETH_16_POOLS;
2791         else
2792                 dev_info->max_vmdq_pools = ETH_64_POOLS;
2793         dev_info->vmdq_queue_num = dev_info->max_rx_queues;
2794         dev_info->rx_offload_capa =
2795                 DEV_RX_OFFLOAD_VLAN_STRIP |
2796                 DEV_RX_OFFLOAD_IPV4_CKSUM |
2797                 DEV_RX_OFFLOAD_UDP_CKSUM  |
2798                 DEV_RX_OFFLOAD_TCP_CKSUM;
2799
2800         /*
2801          * RSC is only supported by 82599 and x540 PF devices in a non-SR-IOV
2802          * mode.
2803          */
2804         if ((hw->mac.type == ixgbe_mac_82599EB ||
2805              hw->mac.type == ixgbe_mac_X540) &&
2806             !RTE_ETH_DEV_SRIOV(dev).active)
2807                 dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_TCP_LRO;
2808
2809         dev_info->tx_offload_capa =
2810                 DEV_TX_OFFLOAD_VLAN_INSERT |
2811                 DEV_TX_OFFLOAD_IPV4_CKSUM  |
2812                 DEV_TX_OFFLOAD_UDP_CKSUM   |
2813                 DEV_TX_OFFLOAD_TCP_CKSUM   |
2814                 DEV_TX_OFFLOAD_SCTP_CKSUM  |
2815                 DEV_TX_OFFLOAD_TCP_TSO;
2816
2817         dev_info->default_rxconf = (struct rte_eth_rxconf) {
2818                 .rx_thresh = {
2819                         .pthresh = IXGBE_DEFAULT_RX_PTHRESH,
2820                         .hthresh = IXGBE_DEFAULT_RX_HTHRESH,
2821                         .wthresh = IXGBE_DEFAULT_RX_WTHRESH,
2822                 },
2823                 .rx_free_thresh = IXGBE_DEFAULT_RX_FREE_THRESH,
2824                 .rx_drop_en = 0,
2825         };
2826
2827         dev_info->default_txconf = (struct rte_eth_txconf) {
2828                 .tx_thresh = {
2829                         .pthresh = IXGBE_DEFAULT_TX_PTHRESH,
2830                         .hthresh = IXGBE_DEFAULT_TX_HTHRESH,
2831                         .wthresh = IXGBE_DEFAULT_TX_WTHRESH,
2832                 },
2833                 .tx_free_thresh = IXGBE_DEFAULT_TX_FREE_THRESH,
2834                 .tx_rs_thresh = IXGBE_DEFAULT_TX_RSBIT_THRESH,
2835                 .txq_flags = ETH_TXQ_FLAGS_NOMULTSEGS |
2836                                 ETH_TXQ_FLAGS_NOOFFLOADS,
2837         };
2838
2839         dev_info->rx_desc_lim = rx_desc_lim;
2840         dev_info->tx_desc_lim = tx_desc_lim;
2841
2842         dev_info->hash_key_size = IXGBE_HKEY_MAX_INDEX * sizeof(uint32_t);
2843         dev_info->reta_size = ixgbe_reta_size_get(hw->mac.type);
2844         dev_info->flow_type_rss_offloads = IXGBE_RSS_OFFLOAD_ALL;
2845 }
2846
2847 static void
2848 ixgbevf_dev_info_get(struct rte_eth_dev *dev,
2849                      struct rte_eth_dev_info *dev_info)
2850 {
2851         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2852
2853         dev_info->max_rx_queues = (uint16_t)hw->mac.max_rx_queues;
2854         dev_info->max_tx_queues = (uint16_t)hw->mac.max_tx_queues;
2855         dev_info->min_rx_bufsize = 1024; /* cf BSIZEPACKET in SRRCTL reg */
2856         dev_info->max_rx_pktlen = 15872; /* includes CRC, cf MAXFRS reg */
2857         dev_info->max_mac_addrs = hw->mac.num_rar_entries;
2858         dev_info->max_hash_mac_addrs = IXGBE_VMDQ_NUM_UC_MAC;
2859         dev_info->max_vfs = dev->pci_dev->max_vfs;
2860         if (hw->mac.type == ixgbe_mac_82598EB)
2861                 dev_info->max_vmdq_pools = ETH_16_POOLS;
2862         else
2863                 dev_info->max_vmdq_pools = ETH_64_POOLS;
2864         dev_info->rx_offload_capa = DEV_RX_OFFLOAD_VLAN_STRIP |
2865                                 DEV_RX_OFFLOAD_IPV4_CKSUM |
2866                                 DEV_RX_OFFLOAD_UDP_CKSUM  |
2867                                 DEV_RX_OFFLOAD_TCP_CKSUM;
2868         dev_info->tx_offload_capa = DEV_TX_OFFLOAD_VLAN_INSERT |
2869                                 DEV_TX_OFFLOAD_IPV4_CKSUM  |
2870                                 DEV_TX_OFFLOAD_UDP_CKSUM   |
2871                                 DEV_TX_OFFLOAD_TCP_CKSUM   |
2872                                 DEV_TX_OFFLOAD_SCTP_CKSUM  |
2873                                 DEV_TX_OFFLOAD_TCP_TSO;
2874
2875         dev_info->default_rxconf = (struct rte_eth_rxconf) {
2876                 .rx_thresh = {
2877                         .pthresh = IXGBE_DEFAULT_RX_PTHRESH,
2878                         .hthresh = IXGBE_DEFAULT_RX_HTHRESH,
2879                         .wthresh = IXGBE_DEFAULT_RX_WTHRESH,
2880                 },
2881                 .rx_free_thresh = IXGBE_DEFAULT_RX_FREE_THRESH,
2882                 .rx_drop_en = 0,
2883         };
2884
2885         dev_info->default_txconf = (struct rte_eth_txconf) {
2886                 .tx_thresh = {
2887                         .pthresh = IXGBE_DEFAULT_TX_PTHRESH,
2888                         .hthresh = IXGBE_DEFAULT_TX_HTHRESH,
2889                         .wthresh = IXGBE_DEFAULT_TX_WTHRESH,
2890                 },
2891                 .tx_free_thresh = IXGBE_DEFAULT_TX_FREE_THRESH,
2892                 .tx_rs_thresh = IXGBE_DEFAULT_TX_RSBIT_THRESH,
2893                 .txq_flags = ETH_TXQ_FLAGS_NOMULTSEGS |
2894                                 ETH_TXQ_FLAGS_NOOFFLOADS,
2895         };
2896
2897         dev_info->rx_desc_lim = rx_desc_lim;
2898         dev_info->tx_desc_lim = tx_desc_lim;
2899 }
2900
2901 /* return 0 means link status changed, -1 means not changed */
2902 static int
2903 ixgbe_dev_link_update(struct rte_eth_dev *dev, int wait_to_complete)
2904 {
2905         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2906         struct rte_eth_link link, old;
2907         ixgbe_link_speed link_speed = IXGBE_LINK_SPEED_UNKNOWN;
2908         int link_up;
2909         int diag;
2910
2911         link.link_status = 0;
2912         link.link_speed = 0;
2913         link.link_duplex = 0;
2914         memset(&old, 0, sizeof(old));
2915         rte_ixgbe_dev_atomic_read_link_status(dev, &old);
2916
2917         hw->mac.get_link_status = true;
2918
2919         /* check if it needs to wait to complete, if lsc interrupt is enabled */
2920         if (wait_to_complete == 0 || dev->data->dev_conf.intr_conf.lsc != 0)
2921                 diag = ixgbe_check_link(hw, &link_speed, &link_up, 0);
2922         else
2923                 diag = ixgbe_check_link(hw, &link_speed, &link_up, 1);
2924
2925         if (diag != 0) {
2926                 link.link_speed = ETH_LINK_SPEED_100;
2927                 link.link_duplex = ETH_LINK_HALF_DUPLEX;
2928                 rte_ixgbe_dev_atomic_write_link_status(dev, &link);
2929                 if (link.link_status == old.link_status)
2930                         return -1;
2931                 return 0;
2932         }
2933
2934         if (link_up == 0) {
2935                 rte_ixgbe_dev_atomic_write_link_status(dev, &link);
2936                 if (link.link_status == old.link_status)
2937                         return -1;
2938                 return 0;
2939         }
2940         link.link_status = 1;
2941         link.link_duplex = ETH_LINK_FULL_DUPLEX;
2942
2943         switch (link_speed) {
2944         default:
2945         case IXGBE_LINK_SPEED_UNKNOWN:
2946                 link.link_duplex = ETH_LINK_HALF_DUPLEX;
2947                 link.link_speed = ETH_LINK_SPEED_100;
2948                 break;
2949
2950         case IXGBE_LINK_SPEED_100_FULL:
2951                 link.link_speed = ETH_LINK_SPEED_100;
2952                 break;
2953
2954         case IXGBE_LINK_SPEED_1GB_FULL:
2955                 link.link_speed = ETH_LINK_SPEED_1000;
2956                 break;
2957
2958         case IXGBE_LINK_SPEED_10GB_FULL:
2959                 link.link_speed = ETH_LINK_SPEED_10000;
2960                 break;
2961         }
2962         rte_ixgbe_dev_atomic_write_link_status(dev, &link);
2963
2964         if (link.link_status == old.link_status)
2965                 return -1;
2966
2967         return 0;
2968 }
2969
2970 static void
2971 ixgbe_dev_promiscuous_enable(struct rte_eth_dev *dev)
2972 {
2973         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2974         uint32_t fctrl;
2975
2976         fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
2977         fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
2978         IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
2979 }
2980
2981 static void
2982 ixgbe_dev_promiscuous_disable(struct rte_eth_dev *dev)
2983 {
2984         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2985         uint32_t fctrl;
2986
2987         fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
2988         fctrl &= (~IXGBE_FCTRL_UPE);
2989         if (dev->data->all_multicast == 1)
2990                 fctrl |= IXGBE_FCTRL_MPE;
2991         else
2992                 fctrl &= (~IXGBE_FCTRL_MPE);
2993         IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
2994 }
2995
2996 static void
2997 ixgbe_dev_allmulticast_enable(struct rte_eth_dev *dev)
2998 {
2999         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3000         uint32_t fctrl;
3001
3002         fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
3003         fctrl |= IXGBE_FCTRL_MPE;
3004         IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
3005 }
3006
3007 static void
3008 ixgbe_dev_allmulticast_disable(struct rte_eth_dev *dev)
3009 {
3010         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3011         uint32_t fctrl;
3012
3013         if (dev->data->promiscuous == 1)
3014                 return; /* must remain in all_multicast mode */
3015
3016         fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
3017         fctrl &= (~IXGBE_FCTRL_MPE);
3018         IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
3019 }
3020
3021 /**
3022  * It clears the interrupt causes and enables the interrupt.
3023  * It will be called once only during nic initialized.
3024  *
3025  * @param dev
3026  *  Pointer to struct rte_eth_dev.
3027  *
3028  * @return
3029  *  - On success, zero.
3030  *  - On failure, a negative value.
3031  */
3032 static int
3033 ixgbe_dev_lsc_interrupt_setup(struct rte_eth_dev *dev)
3034 {
3035         struct ixgbe_interrupt *intr =
3036                 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
3037
3038         ixgbe_dev_link_status_print(dev);
3039         intr->mask |= IXGBE_EICR_LSC;
3040
3041         return 0;
3042 }
3043
3044 /**
3045  * It clears the interrupt causes and enables the interrupt.
3046  * It will be called once only during nic initialized.
3047  *
3048  * @param dev
3049  *  Pointer to struct rte_eth_dev.
3050  *
3051  * @return
3052  *  - On success, zero.
3053  *  - On failure, a negative value.
3054  */
3055 static int
3056 ixgbe_dev_rxq_interrupt_setup(struct rte_eth_dev *dev)
3057 {
3058         struct ixgbe_interrupt *intr =
3059                 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
3060
3061         intr->mask |= IXGBE_EICR_RTX_QUEUE;
3062
3063         return 0;
3064 }
3065
3066 /*
3067  * It reads ICR and sets flag (IXGBE_EICR_LSC) for the link_update.
3068  *
3069  * @param dev
3070  *  Pointer to struct rte_eth_dev.
3071  *
3072  * @return
3073  *  - On success, zero.
3074  *  - On failure, a negative value.
3075  */
3076 static int
3077 ixgbe_dev_interrupt_get_status(struct rte_eth_dev *dev)
3078 {
3079         uint32_t eicr;
3080         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3081         struct ixgbe_interrupt *intr =
3082                 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
3083
3084         /* clear all cause mask */
3085         ixgbe_disable_intr(hw);
3086
3087         /* read-on-clear nic registers here */
3088         eicr = IXGBE_READ_REG(hw, IXGBE_EICR);
3089         PMD_DRV_LOG(DEBUG, "eicr %x", eicr);
3090
3091         intr->flags = 0;
3092
3093         /* set flag for async link update */
3094         if (eicr & IXGBE_EICR_LSC)
3095                 intr->flags |= IXGBE_FLAG_NEED_LINK_UPDATE;
3096
3097         if (eicr & IXGBE_EICR_MAILBOX)
3098                 intr->flags |= IXGBE_FLAG_MAILBOX;
3099
3100         return 0;
3101 }
3102
3103 /**
3104  * It gets and then prints the link status.
3105  *
3106  * @param dev
3107  *  Pointer to struct rte_eth_dev.
3108  *
3109  * @return
3110  *  - On success, zero.
3111  *  - On failure, a negative value.
3112  */
3113 static void
3114 ixgbe_dev_link_status_print(struct rte_eth_dev *dev)
3115 {
3116         struct rte_eth_link link;
3117
3118         memset(&link, 0, sizeof(link));
3119         rte_ixgbe_dev_atomic_read_link_status(dev, &link);
3120         if (link.link_status) {
3121                 PMD_INIT_LOG(INFO, "Port %d: Link Up - speed %u Mbps - %s",
3122                                         (int)(dev->data->port_id),
3123                                         (unsigned)link.link_speed,
3124                         link.link_duplex == ETH_LINK_FULL_DUPLEX ?
3125                                         "full-duplex" : "half-duplex");
3126         } else {
3127                 PMD_INIT_LOG(INFO, " Port %d: Link Down",
3128                                 (int)(dev->data->port_id));
3129         }
3130         PMD_INIT_LOG(DEBUG, "PCI Address: %04d:%02d:%02d:%d",
3131                                 dev->pci_dev->addr.domain,
3132                                 dev->pci_dev->addr.bus,
3133                                 dev->pci_dev->addr.devid,
3134                                 dev->pci_dev->addr.function);
3135 }
3136
3137 /*
3138  * It executes link_update after knowing an interrupt occurred.
3139  *
3140  * @param dev
3141  *  Pointer to struct rte_eth_dev.
3142  *
3143  * @return
3144  *  - On success, zero.
3145  *  - On failure, a negative value.
3146  */
3147 static int
3148 ixgbe_dev_interrupt_action(struct rte_eth_dev *dev)
3149 {
3150         struct ixgbe_interrupt *intr =
3151                 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
3152         int64_t timeout;
3153         struct rte_eth_link link;
3154         int intr_enable_delay = false;
3155
3156         PMD_DRV_LOG(DEBUG, "intr action type %d", intr->flags);
3157
3158         if (intr->flags & IXGBE_FLAG_MAILBOX) {
3159                 ixgbe_pf_mbx_process(dev);
3160                 intr->flags &= ~IXGBE_FLAG_MAILBOX;
3161         }
3162
3163         if (intr->flags & IXGBE_FLAG_NEED_LINK_UPDATE) {
3164                 /* get the link status before link update, for predicting later */
3165                 memset(&link, 0, sizeof(link));
3166                 rte_ixgbe_dev_atomic_read_link_status(dev, &link);
3167
3168                 ixgbe_dev_link_update(dev, 0);
3169
3170                 /* likely to up */
3171                 if (!link.link_status)
3172                         /* handle it 1 sec later, wait it being stable */
3173                         timeout = IXGBE_LINK_UP_CHECK_TIMEOUT;
3174                 /* likely to down */
3175                 else
3176                         /* handle it 4 sec later, wait it being stable */
3177                         timeout = IXGBE_LINK_DOWN_CHECK_TIMEOUT;
3178
3179                 ixgbe_dev_link_status_print(dev);
3180
3181                 intr_enable_delay = true;
3182         }
3183
3184         if (intr_enable_delay) {
3185                 if (rte_eal_alarm_set(timeout * 1000,
3186                                       ixgbe_dev_interrupt_delayed_handler, (void*)dev) < 0)
3187                         PMD_DRV_LOG(ERR, "Error setting alarm");
3188         } else {
3189                 PMD_DRV_LOG(DEBUG, "enable intr immediately");
3190                 ixgbe_enable_intr(dev);
3191                 rte_intr_enable(&(dev->pci_dev->intr_handle));
3192         }
3193
3194
3195         return 0;
3196 }
3197
3198 /**
3199  * Interrupt handler which shall be registered for alarm callback for delayed
3200  * handling specific interrupt to wait for the stable nic state. As the
3201  * NIC interrupt state is not stable for ixgbe after link is just down,
3202  * it needs to wait 4 seconds to get the stable status.
3203  *
3204  * @param handle
3205  *  Pointer to interrupt handle.
3206  * @param param
3207  *  The address of parameter (struct rte_eth_dev *) regsitered before.
3208  *
3209  * @return
3210  *  void
3211  */
3212 static void
3213 ixgbe_dev_interrupt_delayed_handler(void *param)
3214 {
3215         struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
3216         struct ixgbe_interrupt *intr =
3217                 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
3218         struct ixgbe_hw *hw =
3219                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3220         uint32_t eicr;
3221
3222         eicr = IXGBE_READ_REG(hw, IXGBE_EICR);
3223         if (eicr & IXGBE_EICR_MAILBOX)
3224                 ixgbe_pf_mbx_process(dev);
3225
3226         if (intr->flags & IXGBE_FLAG_NEED_LINK_UPDATE) {
3227                 ixgbe_dev_link_update(dev, 0);
3228                 intr->flags &= ~IXGBE_FLAG_NEED_LINK_UPDATE;
3229                 ixgbe_dev_link_status_print(dev);
3230                 _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC);
3231         }
3232
3233         PMD_DRV_LOG(DEBUG, "enable intr in delayed handler S[%08x]", eicr);
3234         ixgbe_enable_intr(dev);
3235         rte_intr_enable(&(dev->pci_dev->intr_handle));
3236 }
3237
3238 /**
3239  * Interrupt handler triggered by NIC  for handling
3240  * specific interrupt.
3241  *
3242  * @param handle
3243  *  Pointer to interrupt handle.
3244  * @param param
3245  *  The address of parameter (struct rte_eth_dev *) regsitered before.
3246  *
3247  * @return
3248  *  void
3249  */
3250 static void
3251 ixgbe_dev_interrupt_handler(__rte_unused struct rte_intr_handle *handle,
3252                             void *param)
3253 {
3254         struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
3255
3256         ixgbe_dev_interrupt_get_status(dev);
3257         ixgbe_dev_interrupt_action(dev);
3258 }
3259
3260 static int
3261 ixgbe_dev_led_on(struct rte_eth_dev *dev)
3262 {
3263         struct ixgbe_hw *hw;
3264
3265         hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3266         return ixgbe_led_on(hw, 0) == IXGBE_SUCCESS ? 0 : -ENOTSUP;
3267 }
3268
3269 static int
3270 ixgbe_dev_led_off(struct rte_eth_dev *dev)
3271 {
3272         struct ixgbe_hw *hw;
3273
3274         hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3275         return ixgbe_led_off(hw, 0) == IXGBE_SUCCESS ? 0 : -ENOTSUP;
3276 }
3277
3278 static int
3279 ixgbe_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
3280 {
3281         struct ixgbe_hw *hw;
3282         uint32_t mflcn_reg;
3283         uint32_t fccfg_reg;
3284         int rx_pause;
3285         int tx_pause;
3286
3287         hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3288
3289         fc_conf->pause_time = hw->fc.pause_time;
3290         fc_conf->high_water = hw->fc.high_water[0];
3291         fc_conf->low_water = hw->fc.low_water[0];
3292         fc_conf->send_xon = hw->fc.send_xon;
3293         fc_conf->autoneg = !hw->fc.disable_fc_autoneg;
3294
3295         /*
3296          * Return rx_pause status according to actual setting of
3297          * MFLCN register.
3298          */
3299         mflcn_reg = IXGBE_READ_REG(hw, IXGBE_MFLCN);
3300         if (mflcn_reg & (IXGBE_MFLCN_RPFCE | IXGBE_MFLCN_RFCE))
3301                 rx_pause = 1;
3302         else
3303                 rx_pause = 0;
3304
3305         /*
3306          * Return tx_pause status according to actual setting of
3307          * FCCFG register.
3308          */
3309         fccfg_reg = IXGBE_READ_REG(hw, IXGBE_FCCFG);
3310         if (fccfg_reg & (IXGBE_FCCFG_TFCE_802_3X | IXGBE_FCCFG_TFCE_PRIORITY))
3311                 tx_pause = 1;
3312         else
3313                 tx_pause = 0;
3314
3315         if (rx_pause && tx_pause)
3316                 fc_conf->mode = RTE_FC_FULL;
3317         else if (rx_pause)
3318                 fc_conf->mode = RTE_FC_RX_PAUSE;
3319         else if (tx_pause)
3320                 fc_conf->mode = RTE_FC_TX_PAUSE;
3321         else
3322                 fc_conf->mode = RTE_FC_NONE;
3323
3324         return 0;
3325 }
3326
3327 static int
3328 ixgbe_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
3329 {
3330         struct ixgbe_hw *hw;
3331         int err;
3332         uint32_t rx_buf_size;
3333         uint32_t max_high_water;
3334         uint32_t mflcn;
3335         enum ixgbe_fc_mode rte_fcmode_2_ixgbe_fcmode[] = {
3336                 ixgbe_fc_none,
3337                 ixgbe_fc_rx_pause,
3338                 ixgbe_fc_tx_pause,
3339                 ixgbe_fc_full
3340         };
3341
3342         PMD_INIT_FUNC_TRACE();
3343
3344         hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3345         rx_buf_size = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(0));
3346         PMD_INIT_LOG(DEBUG, "Rx packet buffer size = 0x%x", rx_buf_size);
3347
3348         /*
3349          * At least reserve one Ethernet frame for watermark
3350          * high_water/low_water in kilo bytes for ixgbe
3351          */
3352         max_high_water = (rx_buf_size - ETHER_MAX_LEN) >> IXGBE_RXPBSIZE_SHIFT;
3353         if ((fc_conf->high_water > max_high_water) ||
3354                 (fc_conf->high_water < fc_conf->low_water)) {
3355                 PMD_INIT_LOG(ERR, "Invalid high/low water setup value in KB");
3356                 PMD_INIT_LOG(ERR, "High_water must <= 0x%x", max_high_water);
3357                 return -EINVAL;
3358         }
3359
3360         hw->fc.requested_mode = rte_fcmode_2_ixgbe_fcmode[fc_conf->mode];
3361         hw->fc.pause_time     = fc_conf->pause_time;
3362         hw->fc.high_water[0]  = fc_conf->high_water;
3363         hw->fc.low_water[0]   = fc_conf->low_water;
3364         hw->fc.send_xon       = fc_conf->send_xon;
3365         hw->fc.disable_fc_autoneg = !fc_conf->autoneg;
3366
3367         err = ixgbe_fc_enable(hw);
3368
3369         /* Not negotiated is not an error case */
3370         if ((err == IXGBE_SUCCESS) || (err == IXGBE_ERR_FC_NOT_NEGOTIATED)) {
3371
3372                 /* check if we want to forward MAC frames - driver doesn't have native
3373                  * capability to do that, so we'll write the registers ourselves */
3374
3375                 mflcn = IXGBE_READ_REG(hw, IXGBE_MFLCN);
3376
3377                 /* set or clear MFLCN.PMCF bit depending on configuration */
3378                 if (fc_conf->mac_ctrl_frame_fwd != 0)
3379                         mflcn |= IXGBE_MFLCN_PMCF;
3380                 else
3381                         mflcn &= ~IXGBE_MFLCN_PMCF;
3382
3383                 IXGBE_WRITE_REG(hw, IXGBE_MFLCN, mflcn);
3384                 IXGBE_WRITE_FLUSH(hw);
3385
3386                 return 0;
3387         }
3388
3389         PMD_INIT_LOG(ERR, "ixgbe_fc_enable = 0x%x", err);
3390         return -EIO;
3391 }
3392
3393 /**
3394  *  ixgbe_pfc_enable_generic - Enable flow control
3395  *  @hw: pointer to hardware structure
3396  *  @tc_num: traffic class number
3397  *  Enable flow control according to the current settings.
3398  */
3399 static int
3400 ixgbe_dcb_pfc_enable_generic(struct ixgbe_hw *hw,uint8_t tc_num)
3401 {
3402         int ret_val = 0;
3403         uint32_t mflcn_reg, fccfg_reg;
3404         uint32_t reg;
3405         uint32_t fcrtl, fcrth;
3406         uint8_t i;
3407         uint8_t nb_rx_en;
3408
3409         /* Validate the water mark configuration */
3410         if (!hw->fc.pause_time) {
3411                 ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
3412                 goto out;
3413         }
3414
3415         /* Low water mark of zero causes XOFF floods */
3416         if (hw->fc.current_mode & ixgbe_fc_tx_pause) {
3417                  /* High/Low water can not be 0 */
3418                 if( (!hw->fc.high_water[tc_num])|| (!hw->fc.low_water[tc_num])) {
3419                         PMD_INIT_LOG(ERR, "Invalid water mark configuration");
3420                         ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
3421                         goto out;
3422                 }
3423
3424                 if(hw->fc.low_water[tc_num] >= hw->fc.high_water[tc_num]) {
3425                         PMD_INIT_LOG(ERR, "Invalid water mark configuration");
3426                         ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
3427                         goto out;
3428                 }
3429         }
3430         /* Negotiate the fc mode to use */
3431         ixgbe_fc_autoneg(hw);
3432
3433         /* Disable any previous flow control settings */
3434         mflcn_reg = IXGBE_READ_REG(hw, IXGBE_MFLCN);
3435         mflcn_reg &= ~(IXGBE_MFLCN_RPFCE_SHIFT | IXGBE_MFLCN_RFCE|IXGBE_MFLCN_RPFCE);
3436
3437         fccfg_reg = IXGBE_READ_REG(hw, IXGBE_FCCFG);
3438         fccfg_reg &= ~(IXGBE_FCCFG_TFCE_802_3X | IXGBE_FCCFG_TFCE_PRIORITY);
3439
3440         switch (hw->fc.current_mode) {
3441         case ixgbe_fc_none:
3442                 /*
3443                  * If the count of enabled RX Priority Flow control >1,
3444                  * and the TX pause can not be disabled
3445                  */
3446                 nb_rx_en = 0;
3447                 for (i =0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
3448                         reg = IXGBE_READ_REG(hw, IXGBE_FCRTH_82599(i));
3449                         if (reg & IXGBE_FCRTH_FCEN)
3450                                 nb_rx_en++;
3451                 }
3452                 if (nb_rx_en > 1)
3453                         fccfg_reg |=IXGBE_FCCFG_TFCE_PRIORITY;
3454                 break;
3455         case ixgbe_fc_rx_pause:
3456                 /*
3457                  * Rx Flow control is enabled and Tx Flow control is
3458                  * disabled by software override. Since there really
3459                  * isn't a way to advertise that we are capable of RX
3460                  * Pause ONLY, we will advertise that we support both
3461                  * symmetric and asymmetric Rx PAUSE.  Later, we will
3462                  * disable the adapter's ability to send PAUSE frames.
3463                  */
3464                 mflcn_reg |= IXGBE_MFLCN_RPFCE;
3465                 /*
3466                  * If the count of enabled RX Priority Flow control >1,
3467                  * and the TX pause can not be disabled
3468                  */
3469                 nb_rx_en = 0;
3470                 for (i =0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
3471                         reg = IXGBE_READ_REG(hw, IXGBE_FCRTH_82599(i));
3472                         if (reg & IXGBE_FCRTH_FCEN)
3473                                 nb_rx_en++;
3474                 }
3475                 if (nb_rx_en > 1)
3476                         fccfg_reg |=IXGBE_FCCFG_TFCE_PRIORITY;
3477                 break;
3478         case ixgbe_fc_tx_pause:
3479                 /*
3480                  * Tx Flow control is enabled, and Rx Flow control is
3481                  * disabled by software override.
3482                  */
3483                 fccfg_reg |=IXGBE_FCCFG_TFCE_PRIORITY;
3484                 break;
3485         case ixgbe_fc_full:
3486                 /* Flow control (both Rx and Tx) is enabled by SW override. */
3487                 mflcn_reg |= IXGBE_MFLCN_RPFCE;
3488                 fccfg_reg |= IXGBE_FCCFG_TFCE_PRIORITY;
3489                 break;
3490         default:
3491                 PMD_DRV_LOG(DEBUG, "Flow control param set incorrectly");
3492                 ret_val = IXGBE_ERR_CONFIG;
3493                 goto out;
3494                 break;
3495         }
3496
3497         /* Set 802.3x based flow control settings. */
3498         mflcn_reg |= IXGBE_MFLCN_DPF;
3499         IXGBE_WRITE_REG(hw, IXGBE_MFLCN, mflcn_reg);
3500         IXGBE_WRITE_REG(hw, IXGBE_FCCFG, fccfg_reg);
3501
3502         /* Set up and enable Rx high/low water mark thresholds, enable XON. */
3503         if ((hw->fc.current_mode & ixgbe_fc_tx_pause) &&
3504                 hw->fc.high_water[tc_num]) {
3505                 fcrtl = (hw->fc.low_water[tc_num] << 10) | IXGBE_FCRTL_XONE;
3506                 IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(tc_num), fcrtl);
3507                 fcrth = (hw->fc.high_water[tc_num] << 10) | IXGBE_FCRTH_FCEN;
3508         } else {
3509                 IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(tc_num), 0);
3510                 /*
3511                  * In order to prevent Tx hangs when the internal Tx
3512                  * switch is enabled we must set the high water mark
3513                  * to the maximum FCRTH value.  This allows the Tx
3514                  * switch to function even under heavy Rx workloads.
3515                  */
3516                 fcrth = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(tc_num)) - 32;
3517         }
3518         IXGBE_WRITE_REG(hw, IXGBE_FCRTH_82599(tc_num), fcrth);
3519
3520         /* Configure pause time (2 TCs per register) */
3521         reg = hw->fc.pause_time * 0x00010001;
3522         for (i = 0; i < (IXGBE_DCB_MAX_TRAFFIC_CLASS / 2); i++)
3523                 IXGBE_WRITE_REG(hw, IXGBE_FCTTV(i), reg);
3524
3525         /* Configure flow control refresh threshold value */
3526         IXGBE_WRITE_REG(hw, IXGBE_FCRTV, hw->fc.pause_time / 2);
3527
3528 out:
3529         return ret_val;
3530 }
3531
3532 static int
3533 ixgbe_dcb_pfc_enable(struct rte_eth_dev *dev,uint8_t tc_num)
3534 {
3535         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3536         int32_t ret_val = IXGBE_NOT_IMPLEMENTED;
3537
3538         if(hw->mac.type != ixgbe_mac_82598EB) {
3539                 ret_val = ixgbe_dcb_pfc_enable_generic(hw,tc_num);
3540         }
3541         return ret_val;
3542 }
3543
3544 static int
3545 ixgbe_priority_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_pfc_conf *pfc_conf)
3546 {
3547         int err;
3548         uint32_t rx_buf_size;
3549         uint32_t max_high_water;
3550         uint8_t tc_num;
3551         uint8_t  map[IXGBE_DCB_MAX_USER_PRIORITY] = { 0 };
3552         struct ixgbe_hw *hw =
3553                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3554         struct ixgbe_dcb_config *dcb_config =
3555                 IXGBE_DEV_PRIVATE_TO_DCB_CFG(dev->data->dev_private);
3556
3557         enum ixgbe_fc_mode rte_fcmode_2_ixgbe_fcmode[] = {
3558                 ixgbe_fc_none,
3559                 ixgbe_fc_rx_pause,
3560                 ixgbe_fc_tx_pause,
3561                 ixgbe_fc_full
3562         };
3563
3564         PMD_INIT_FUNC_TRACE();
3565
3566         ixgbe_dcb_unpack_map_cee(dcb_config, IXGBE_DCB_RX_CONFIG, map);
3567         tc_num = map[pfc_conf->priority];
3568         rx_buf_size = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(tc_num));
3569         PMD_INIT_LOG(DEBUG, "Rx packet buffer size = 0x%x", rx_buf_size);
3570         /*
3571          * At least reserve one Ethernet frame for watermark
3572          * high_water/low_water in kilo bytes for ixgbe
3573          */
3574         max_high_water = (rx_buf_size - ETHER_MAX_LEN) >> IXGBE_RXPBSIZE_SHIFT;
3575         if ((pfc_conf->fc.high_water > max_high_water) ||
3576             (pfc_conf->fc.high_water <= pfc_conf->fc.low_water)) {
3577                 PMD_INIT_LOG(ERR, "Invalid high/low water setup value in KB");
3578                 PMD_INIT_LOG(ERR, "High_water must <= 0x%x", max_high_water);
3579                 return -EINVAL;
3580         }
3581
3582         hw->fc.requested_mode = rte_fcmode_2_ixgbe_fcmode[pfc_conf->fc.mode];
3583         hw->fc.pause_time = pfc_conf->fc.pause_time;
3584         hw->fc.send_xon = pfc_conf->fc.send_xon;
3585         hw->fc.low_water[tc_num] =  pfc_conf->fc.low_water;
3586         hw->fc.high_water[tc_num] = pfc_conf->fc.high_water;
3587
3588         err = ixgbe_dcb_pfc_enable(dev,tc_num);
3589
3590         /* Not negotiated is not an error case */
3591         if ((err == IXGBE_SUCCESS) || (err == IXGBE_ERR_FC_NOT_NEGOTIATED))
3592                 return 0;
3593
3594         PMD_INIT_LOG(ERR, "ixgbe_dcb_pfc_enable = 0x%x", err);
3595         return -EIO;
3596 }
3597
3598 static int
3599 ixgbe_dev_rss_reta_update(struct rte_eth_dev *dev,
3600                           struct rte_eth_rss_reta_entry64 *reta_conf,
3601                           uint16_t reta_size)
3602 {
3603         uint8_t i, j, mask;
3604         uint32_t reta, r;
3605         uint16_t idx, shift;
3606         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3607         uint16_t sp_reta_size;
3608         uint32_t reta_reg;
3609
3610         PMD_INIT_FUNC_TRACE();
3611
3612         if (!ixgbe_rss_update_sp(hw->mac.type)) {
3613                 PMD_DRV_LOG(ERR, "RSS reta update is not supported on this "
3614                         "NIC.");
3615                 return -ENOTSUP;
3616         }
3617
3618         sp_reta_size = ixgbe_reta_size_get(hw->mac.type);
3619         if (reta_size != sp_reta_size) {
3620                 PMD_DRV_LOG(ERR, "The size of hash lookup table configured "
3621                         "(%d) doesn't match the number hardware can supported "
3622                         "(%d)\n", reta_size, sp_reta_size);
3623                 return -EINVAL;
3624         }
3625
3626         for (i = 0; i < reta_size; i += IXGBE_4_BIT_WIDTH) {
3627                 idx = i / RTE_RETA_GROUP_SIZE;
3628                 shift = i % RTE_RETA_GROUP_SIZE;
3629                 mask = (uint8_t)((reta_conf[idx].mask >> shift) &
3630                                                 IXGBE_4_BIT_MASK);
3631                 if (!mask)
3632                         continue;
3633                 reta_reg = ixgbe_reta_reg_get(hw->mac.type, i);
3634                 if (mask == IXGBE_4_BIT_MASK)
3635                         r = 0;
3636                 else
3637                         r = IXGBE_READ_REG(hw, reta_reg);
3638                 for (j = 0, reta = 0; j < IXGBE_4_BIT_WIDTH; j++) {
3639                         if (mask & (0x1 << j))
3640                                 reta |= reta_conf[idx].reta[shift + j] <<
3641                                                         (CHAR_BIT * j);
3642                         else
3643                                 reta |= r & (IXGBE_8_BIT_MASK <<
3644                                                 (CHAR_BIT * j));
3645                 }
3646                 IXGBE_WRITE_REG(hw, reta_reg, reta);
3647         }
3648
3649         return 0;
3650 }
3651
3652 static int
3653 ixgbe_dev_rss_reta_query(struct rte_eth_dev *dev,
3654                          struct rte_eth_rss_reta_entry64 *reta_conf,
3655                          uint16_t reta_size)
3656 {
3657         uint8_t i, j, mask;
3658         uint32_t reta;
3659         uint16_t idx, shift;
3660         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3661         uint16_t sp_reta_size;
3662         uint32_t reta_reg;
3663
3664         PMD_INIT_FUNC_TRACE();
3665         sp_reta_size = ixgbe_reta_size_get(hw->mac.type);
3666         if (reta_size != sp_reta_size) {
3667                 PMD_DRV_LOG(ERR, "The size of hash lookup table configured "
3668                         "(%d) doesn't match the number hardware can supported "
3669                         "(%d)\n", reta_size, sp_reta_size);
3670                 return -EINVAL;
3671         }
3672
3673         for (i = 0; i < reta_size; i += IXGBE_4_BIT_WIDTH) {
3674                 idx = i / RTE_RETA_GROUP_SIZE;
3675                 shift = i % RTE_RETA_GROUP_SIZE;
3676                 mask = (uint8_t)((reta_conf[idx].mask >> shift) &
3677                                                 IXGBE_4_BIT_MASK);
3678                 if (!mask)
3679                         continue;
3680
3681                 reta_reg = ixgbe_reta_reg_get(hw->mac.type, i);
3682                 reta = IXGBE_READ_REG(hw, reta_reg);
3683                 for (j = 0; j < IXGBE_4_BIT_WIDTH; j++) {
3684                         if (mask & (0x1 << j))
3685                                 reta_conf[idx].reta[shift + j] =
3686                                         ((reta >> (CHAR_BIT * j)) &
3687                                                 IXGBE_8_BIT_MASK);
3688                 }
3689         }
3690
3691         return 0;
3692 }
3693
3694 static void
3695 ixgbe_add_rar(struct rte_eth_dev *dev, struct ether_addr *mac_addr,
3696                                 uint32_t index, uint32_t pool)
3697 {
3698         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3699         uint32_t enable_addr = 1;
3700
3701         ixgbe_set_rar(hw, index, mac_addr->addr_bytes, pool, enable_addr);
3702 }
3703
3704 static void
3705 ixgbe_remove_rar(struct rte_eth_dev *dev, uint32_t index)
3706 {
3707         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3708
3709         ixgbe_clear_rar(hw, index);
3710 }
3711
3712 static void
3713 ixgbe_set_default_mac_addr(struct rte_eth_dev *dev, struct ether_addr *addr)
3714 {
3715         ixgbe_remove_rar(dev, 0);
3716
3717         ixgbe_add_rar(dev, addr, 0, 0);
3718 }
3719
3720 static int
3721 ixgbe_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
3722 {
3723         uint32_t hlreg0;
3724         uint32_t maxfrs;
3725         struct ixgbe_hw *hw;
3726         struct rte_eth_dev_info dev_info;
3727         uint32_t frame_size = mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
3728
3729         ixgbe_dev_info_get(dev, &dev_info);
3730
3731         /* check that mtu is within the allowed range */
3732         if ((mtu < ETHER_MIN_MTU) || (frame_size > dev_info.max_rx_pktlen))
3733                 return -EINVAL;
3734
3735         /* refuse mtu that requires the support of scattered packets when this
3736          * feature has not been enabled before. */
3737         if (!dev->data->scattered_rx &&
3738             (frame_size + 2 * IXGBE_VLAN_TAG_SIZE >
3739              dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM))
3740                 return -EINVAL;
3741
3742         hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3743         hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0);
3744
3745         /* switch to jumbo mode if needed */
3746         if (frame_size > ETHER_MAX_LEN) {
3747                 dev->data->dev_conf.rxmode.jumbo_frame = 1;
3748                 hlreg0 |= IXGBE_HLREG0_JUMBOEN;
3749         } else {
3750                 dev->data->dev_conf.rxmode.jumbo_frame = 0;
3751                 hlreg0 &= ~IXGBE_HLREG0_JUMBOEN;
3752         }
3753         IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0);
3754
3755         /* update max frame size */
3756         dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
3757
3758         maxfrs = IXGBE_READ_REG(hw, IXGBE_MAXFRS);
3759         maxfrs &= 0x0000FFFF;
3760         maxfrs |= (dev->data->dev_conf.rxmode.max_rx_pkt_len << 16);
3761         IXGBE_WRITE_REG(hw, IXGBE_MAXFRS, maxfrs);
3762
3763         return 0;
3764 }
3765
3766 /*
3767  * Virtual Function operations
3768  */
3769 static void
3770 ixgbevf_intr_disable(struct ixgbe_hw *hw)
3771 {
3772         PMD_INIT_FUNC_TRACE();
3773
3774         /* Clear interrupt mask to stop from interrupts being generated */
3775         IXGBE_WRITE_REG(hw, IXGBE_VTEIMC, IXGBE_VF_IRQ_CLEAR_MASK);
3776
3777         IXGBE_WRITE_FLUSH(hw);
3778 }
3779
3780 static void
3781 ixgbevf_intr_enable(struct ixgbe_hw *hw)
3782 {
3783         PMD_INIT_FUNC_TRACE();
3784
3785         /* VF enable interrupt autoclean */
3786         IXGBE_WRITE_REG(hw, IXGBE_VTEIAM, IXGBE_VF_IRQ_ENABLE_MASK);
3787         IXGBE_WRITE_REG(hw, IXGBE_VTEIAC, IXGBE_VF_IRQ_ENABLE_MASK);
3788         IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, IXGBE_VF_IRQ_ENABLE_MASK);
3789
3790         IXGBE_WRITE_FLUSH(hw);
3791 }
3792
3793 static int
3794 ixgbevf_dev_configure(struct rte_eth_dev *dev)
3795 {
3796         struct rte_eth_conf* conf = &dev->data->dev_conf;
3797         struct ixgbe_adapter *adapter =
3798                         (struct ixgbe_adapter *)dev->data->dev_private;
3799
3800         PMD_INIT_LOG(DEBUG, "Configured Virtual Function port id: %d",
3801                      dev->data->port_id);
3802
3803         /*
3804          * VF has no ability to enable/disable HW CRC
3805          * Keep the persistent behavior the same as Host PF
3806          */
3807 #ifndef RTE_LIBRTE_IXGBE_PF_DISABLE_STRIP_CRC
3808         if (!conf->rxmode.hw_strip_crc) {
3809                 PMD_INIT_LOG(NOTICE, "VF can't disable HW CRC Strip");
3810                 conf->rxmode.hw_strip_crc = 1;
3811         }
3812 #else
3813         if (conf->rxmode.hw_strip_crc) {
3814                 PMD_INIT_LOG(NOTICE, "VF can't enable HW CRC Strip");
3815                 conf->rxmode.hw_strip_crc = 0;
3816         }
3817 #endif
3818
3819         /*
3820          * Initialize to TRUE. If any of Rx queues doesn't meet the bulk
3821          * allocation or vector Rx preconditions we will reset it.
3822          */
3823         adapter->rx_bulk_alloc_allowed = true;
3824         adapter->rx_vec_allowed = true;
3825
3826         return 0;
3827 }
3828
3829 static int
3830 ixgbevf_dev_start(struct rte_eth_dev *dev)
3831 {
3832         struct ixgbe_hw *hw =
3833                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3834         uint32_t intr_vector = 0;
3835         struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle;
3836
3837         int err, mask = 0;
3838
3839         PMD_INIT_FUNC_TRACE();
3840
3841         hw->mac.ops.reset_hw(hw);
3842         hw->mac.get_link_status = true;
3843
3844         /* negotiate mailbox API version to use with the PF. */
3845         ixgbevf_negotiate_api(hw);
3846
3847         ixgbevf_dev_tx_init(dev);
3848
3849         /* This can fail when allocating mbufs for descriptor rings */
3850         err = ixgbevf_dev_rx_init(dev);
3851         if (err) {
3852                 PMD_INIT_LOG(ERR, "Unable to initialize RX hardware (%d)", err);
3853                 ixgbe_dev_clear_queues(dev);
3854                 return err;
3855         }
3856
3857         /* Set vfta */
3858         ixgbevf_set_vfta_all(dev,1);
3859
3860         /* Set HW strip */
3861         mask = ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK | \
3862                 ETH_VLAN_EXTEND_MASK;
3863         ixgbevf_vlan_offload_set(dev, mask);
3864
3865         ixgbevf_dev_rxtx_start(dev);
3866
3867         /* check and configure queue intr-vector mapping */
3868         if (dev->data->dev_conf.intr_conf.rxq != 0) {
3869                 intr_vector = dev->data->nb_rx_queues;
3870                 if (rte_intr_efd_enable(intr_handle, intr_vector))
3871                         return -1;
3872         }
3873
3874         if (rte_intr_dp_is_en(intr_handle) && !intr_handle->intr_vec) {
3875                 intr_handle->intr_vec =
3876                         rte_zmalloc("intr_vec",
3877                                     dev->data->nb_rx_queues * sizeof(int), 0);
3878                 if (intr_handle->intr_vec == NULL) {
3879                         PMD_INIT_LOG(ERR, "Failed to allocate %d rx_queues"
3880                                      " intr_vec\n", dev->data->nb_rx_queues);
3881                         return -ENOMEM;
3882                 }
3883         }
3884         ixgbevf_configure_msix(dev);
3885
3886         rte_intr_enable(intr_handle);
3887
3888         /* Re-enable interrupt for VF */
3889         ixgbevf_intr_enable(hw);
3890
3891         return 0;
3892 }
3893
3894 static void
3895 ixgbevf_dev_stop(struct rte_eth_dev *dev)
3896 {
3897         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3898         struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle;
3899
3900         PMD_INIT_FUNC_TRACE();
3901
3902         hw->adapter_stopped = 1;
3903         ixgbe_stop_adapter(hw);
3904
3905         /*
3906           * Clear what we set, but we still keep shadow_vfta to
3907           * restore after device starts
3908           */
3909         ixgbevf_set_vfta_all(dev,0);
3910
3911         /* Clear stored conf */
3912         dev->data->scattered_rx = 0;
3913
3914         ixgbe_dev_clear_queues(dev);
3915
3916         /* disable intr eventfd mapping */
3917         rte_intr_disable(intr_handle);
3918
3919         /* Clean datapath event and queue/vec mapping */
3920         rte_intr_efd_disable(intr_handle);
3921         if (intr_handle->intr_vec != NULL) {
3922                 rte_free(intr_handle->intr_vec);
3923                 intr_handle->intr_vec = NULL;
3924         }
3925 }
3926
3927 static void
3928 ixgbevf_dev_close(struct rte_eth_dev *dev)
3929 {
3930         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3931
3932         PMD_INIT_FUNC_TRACE();
3933
3934         ixgbe_reset_hw(hw);
3935
3936         ixgbevf_dev_stop(dev);
3937
3938         ixgbe_dev_free_queues(dev);
3939
3940         /* reprogram the RAR[0] in case user changed it. */
3941         ixgbe_set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
3942 }
3943
3944 static void ixgbevf_set_vfta_all(struct rte_eth_dev *dev, bool on)
3945 {
3946         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3947         struct ixgbe_vfta * shadow_vfta =
3948                 IXGBE_DEV_PRIVATE_TO_VFTA(dev->data->dev_private);
3949         int i = 0, j = 0, vfta = 0, mask = 1;
3950
3951         for (i = 0; i < IXGBE_VFTA_SIZE; i++){
3952                 vfta = shadow_vfta->vfta[i];
3953                 if(vfta){
3954                         mask = 1;
3955                         for (j = 0; j < 32; j++){
3956                                 if(vfta & mask)
3957                                         ixgbe_set_vfta(hw, (i<<5)+j, 0, on);
3958                                 mask<<=1;
3959                         }
3960                 }
3961         }
3962
3963 }
3964
3965 static int
3966 ixgbevf_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
3967 {
3968         struct ixgbe_hw *hw =
3969                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3970         struct ixgbe_vfta * shadow_vfta =
3971                 IXGBE_DEV_PRIVATE_TO_VFTA(dev->data->dev_private);
3972         uint32_t vid_idx = 0;
3973         uint32_t vid_bit = 0;
3974         int ret = 0;
3975
3976         PMD_INIT_FUNC_TRACE();
3977
3978         /* vind is not used in VF driver, set to 0, check ixgbe_set_vfta_vf */
3979         ret = ixgbe_set_vfta(hw, vlan_id, 0, !!on);
3980         if(ret){
3981                 PMD_INIT_LOG(ERR, "Unable to set VF vlan");
3982                 return ret;
3983         }
3984         vid_idx = (uint32_t) ((vlan_id >> 5) & 0x7F);
3985         vid_bit = (uint32_t) (1 << (vlan_id & 0x1F));
3986
3987         /* Save what we set and retore it after device reset */
3988         if (on)
3989                 shadow_vfta->vfta[vid_idx] |= vid_bit;
3990         else
3991                 shadow_vfta->vfta[vid_idx] &= ~vid_bit;
3992
3993         return 0;
3994 }
3995
3996 static void
3997 ixgbevf_vlan_strip_queue_set(struct rte_eth_dev *dev, uint16_t queue, int on)
3998 {
3999         struct ixgbe_hw *hw =
4000                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4001         uint32_t ctrl;
4002
4003         PMD_INIT_FUNC_TRACE();
4004
4005         if(queue >= hw->mac.max_rx_queues)
4006                 return;
4007
4008         ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(queue));
4009         if(on)
4010                 ctrl |= IXGBE_RXDCTL_VME;
4011         else
4012                 ctrl &= ~IXGBE_RXDCTL_VME;
4013         IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(queue), ctrl);
4014
4015         ixgbe_vlan_hw_strip_bitmap_set( dev, queue, on);
4016 }
4017
4018 static void
4019 ixgbevf_vlan_offload_set(struct rte_eth_dev *dev, int mask)
4020 {
4021         struct ixgbe_hw *hw =
4022                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4023         uint16_t i;
4024         int on = 0;
4025
4026         /* VF function only support hw strip feature, others are not support */
4027         if(mask & ETH_VLAN_STRIP_MASK){
4028                 on = !!(dev->data->dev_conf.rxmode.hw_vlan_strip);
4029
4030                 for(i=0; i < hw->mac.max_rx_queues; i++)
4031                         ixgbevf_vlan_strip_queue_set(dev,i,on);
4032         }
4033 }
4034
4035 static int
4036 ixgbe_vmdq_mode_check(struct ixgbe_hw *hw)
4037 {
4038         uint32_t reg_val;
4039
4040         /* we only need to do this if VMDq is enabled */
4041         reg_val = IXGBE_READ_REG(hw, IXGBE_VT_CTL);
4042         if (!(reg_val & IXGBE_VT_CTL_VT_ENABLE)) {
4043                 PMD_INIT_LOG(ERR, "VMDq must be enabled for this setting");
4044                 return -1;
4045         }
4046
4047         return 0;
4048 }
4049
4050 static uint32_t
4051 ixgbe_uta_vector(struct ixgbe_hw *hw, struct ether_addr* uc_addr)
4052 {
4053         uint32_t vector = 0;
4054         switch (hw->mac.mc_filter_type) {
4055         case 0:   /* use bits [47:36] of the address */
4056                 vector = ((uc_addr->addr_bytes[4] >> 4) |
4057                         (((uint16_t)uc_addr->addr_bytes[5]) << 4));
4058                 break;
4059         case 1:   /* use bits [46:35] of the address */
4060                 vector = ((uc_addr->addr_bytes[4] >> 3) |
4061                         (((uint16_t)uc_addr->addr_bytes[5]) << 5));
4062                 break;
4063         case 2:   /* use bits [45:34] of the address */
4064                 vector = ((uc_addr->addr_bytes[4] >> 2) |
4065                         (((uint16_t)uc_addr->addr_bytes[5]) << 6));
4066                 break;
4067         case 3:   /* use bits [43:32] of the address */
4068                 vector = ((uc_addr->addr_bytes[4]) |
4069                         (((uint16_t)uc_addr->addr_bytes[5]) << 8));
4070                 break;
4071         default:  /* Invalid mc_filter_type */
4072                 break;
4073         }
4074
4075         /* vector can only be 12-bits or boundary will be exceeded */
4076         vector &= 0xFFF;
4077         return vector;
4078 }
4079
4080 static int
4081 ixgbe_uc_hash_table_set(struct rte_eth_dev *dev,struct ether_addr* mac_addr,
4082                                uint8_t on)
4083 {
4084         uint32_t vector;
4085         uint32_t uta_idx;
4086         uint32_t reg_val;
4087         uint32_t uta_shift;
4088         uint32_t rc;
4089         const uint32_t ixgbe_uta_idx_mask = 0x7F;
4090         const uint32_t ixgbe_uta_bit_shift = 5;
4091         const uint32_t ixgbe_uta_bit_mask = (0x1 << ixgbe_uta_bit_shift) - 1;
4092         const uint32_t bit1 = 0x1;
4093
4094         struct ixgbe_hw *hw =
4095                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4096         struct ixgbe_uta_info *uta_info =
4097                 IXGBE_DEV_PRIVATE_TO_UTA(dev->data->dev_private);
4098
4099         /* The UTA table only exists on 82599 hardware and newer */
4100         if (hw->mac.type < ixgbe_mac_82599EB)
4101                 return -ENOTSUP;
4102
4103         vector = ixgbe_uta_vector(hw,mac_addr);
4104         uta_idx = (vector >> ixgbe_uta_bit_shift) & ixgbe_uta_idx_mask;
4105         uta_shift = vector & ixgbe_uta_bit_mask;
4106
4107         rc = ((uta_info->uta_shadow[uta_idx] >> uta_shift & bit1) != 0);
4108         if(rc == on)
4109                 return 0;
4110
4111         reg_val = IXGBE_READ_REG(hw, IXGBE_UTA(uta_idx));
4112         if (on) {
4113                 uta_info->uta_in_use++;
4114                 reg_val |= (bit1 << uta_shift);
4115                 uta_info->uta_shadow[uta_idx] |= (bit1 << uta_shift);
4116         } else {
4117                 uta_info->uta_in_use--;
4118                 reg_val &= ~(bit1 << uta_shift);
4119                 uta_info->uta_shadow[uta_idx] &= ~(bit1 << uta_shift);
4120         }
4121
4122         IXGBE_WRITE_REG(hw, IXGBE_UTA(uta_idx), reg_val);
4123
4124         if (uta_info->uta_in_use > 0)
4125                 IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL,
4126                                 IXGBE_MCSTCTRL_MFE | hw->mac.mc_filter_type);
4127         else
4128                 IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL,hw->mac.mc_filter_type);
4129
4130         return 0;
4131 }
4132
4133 static int
4134 ixgbe_uc_all_hash_table_set(struct rte_eth_dev *dev, uint8_t on)
4135 {
4136         int i;
4137         struct ixgbe_hw *hw =
4138                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4139         struct ixgbe_uta_info *uta_info =
4140                 IXGBE_DEV_PRIVATE_TO_UTA(dev->data->dev_private);
4141
4142         /* The UTA table only exists on 82599 hardware and newer */
4143         if (hw->mac.type < ixgbe_mac_82599EB)
4144                 return -ENOTSUP;
4145
4146         if(on) {
4147                 for (i = 0; i < ETH_VMDQ_NUM_UC_HASH_ARRAY; i++) {
4148                         uta_info->uta_shadow[i] = ~0;
4149                         IXGBE_WRITE_REG(hw, IXGBE_UTA(i), ~0);
4150                 }
4151         } else {
4152                 for (i = 0; i < ETH_VMDQ_NUM_UC_HASH_ARRAY; i++) {
4153                         uta_info->uta_shadow[i] = 0;
4154                         IXGBE_WRITE_REG(hw, IXGBE_UTA(i), 0);
4155                 }
4156         }
4157         return 0;
4158
4159 }
4160
4161 uint32_t
4162 ixgbe_convert_vm_rx_mask_to_val(uint16_t rx_mask, uint32_t orig_val)
4163 {
4164         uint32_t new_val = orig_val;
4165
4166         if (rx_mask & ETH_VMDQ_ACCEPT_UNTAG)
4167                 new_val |= IXGBE_VMOLR_AUPE;
4168         if (rx_mask & ETH_VMDQ_ACCEPT_HASH_MC)
4169                 new_val |= IXGBE_VMOLR_ROMPE;
4170         if (rx_mask & ETH_VMDQ_ACCEPT_HASH_UC)
4171                 new_val |= IXGBE_VMOLR_ROPE;
4172         if (rx_mask & ETH_VMDQ_ACCEPT_BROADCAST)
4173                 new_val |= IXGBE_VMOLR_BAM;
4174         if (rx_mask & ETH_VMDQ_ACCEPT_MULTICAST)
4175                 new_val |= IXGBE_VMOLR_MPE;
4176
4177         return new_val;
4178 }
4179
4180 static int
4181 ixgbe_set_pool_rx_mode(struct rte_eth_dev *dev, uint16_t pool,
4182                                uint16_t rx_mask, uint8_t on)
4183 {
4184         int val = 0;
4185
4186         struct ixgbe_hw *hw =
4187                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4188         uint32_t vmolr = IXGBE_READ_REG(hw, IXGBE_VMOLR(pool));
4189
4190         if (hw->mac.type == ixgbe_mac_82598EB) {
4191                 PMD_INIT_LOG(ERR, "setting VF receive mode set should be done"
4192                              " on 82599 hardware and newer");
4193                 return -ENOTSUP;
4194         }
4195         if (ixgbe_vmdq_mode_check(hw) < 0)
4196                 return -ENOTSUP;
4197
4198         val = ixgbe_convert_vm_rx_mask_to_val(rx_mask, val);
4199
4200         if (on)
4201                 vmolr |= val;
4202         else
4203                 vmolr &= ~val;
4204
4205         IXGBE_WRITE_REG(hw, IXGBE_VMOLR(pool), vmolr);
4206
4207         return 0;
4208 }
4209
4210 static int
4211 ixgbe_set_pool_rx(struct rte_eth_dev *dev, uint16_t pool, uint8_t on)
4212 {
4213         uint32_t reg,addr;
4214         uint32_t val;
4215         const uint8_t bit1 = 0x1;
4216
4217         struct ixgbe_hw *hw =
4218                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4219
4220         if (ixgbe_vmdq_mode_check(hw) < 0)
4221                 return -ENOTSUP;
4222
4223         addr = IXGBE_VFRE(pool >= ETH_64_POOLS/2);
4224         reg = IXGBE_READ_REG(hw, addr);
4225         val = bit1 << pool;
4226
4227         if (on)
4228                 reg |= val;
4229         else
4230                 reg &= ~val;
4231
4232         IXGBE_WRITE_REG(hw, addr,reg);
4233
4234         return 0;
4235 }
4236
4237 static int
4238 ixgbe_set_pool_tx(struct rte_eth_dev *dev, uint16_t pool, uint8_t on)
4239 {
4240         uint32_t reg,addr;
4241         uint32_t val;
4242         const uint8_t bit1 = 0x1;
4243
4244         struct ixgbe_hw *hw =
4245                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4246
4247         if (ixgbe_vmdq_mode_check(hw) < 0)
4248                 return -ENOTSUP;
4249
4250         addr = IXGBE_VFTE(pool >= ETH_64_POOLS/2);
4251         reg = IXGBE_READ_REG(hw, addr);
4252         val = bit1 << pool;
4253
4254         if (on)
4255                 reg |= val;
4256         else
4257                 reg &= ~val;
4258
4259         IXGBE_WRITE_REG(hw, addr,reg);
4260
4261         return 0;
4262 }
4263
4264 static int
4265 ixgbe_set_pool_vlan_filter(struct rte_eth_dev *dev, uint16_t vlan,
4266                         uint64_t pool_mask, uint8_t vlan_on)
4267 {
4268         int ret = 0;
4269         uint16_t pool_idx;
4270         struct ixgbe_hw *hw =
4271                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4272
4273         if (ixgbe_vmdq_mode_check(hw) < 0)
4274                 return -ENOTSUP;
4275         for (pool_idx = 0; pool_idx < ETH_64_POOLS; pool_idx++) {
4276                 if (pool_mask & ((uint64_t)(1ULL << pool_idx)))
4277                         ret = hw->mac.ops.set_vfta(hw,vlan,pool_idx,vlan_on);
4278                         if (ret < 0)
4279                                 return ret;
4280         }
4281
4282         return ret;
4283 }
4284
4285 #define IXGBE_MRCTL_VPME  0x01 /* Virtual Pool Mirroring. */
4286 #define IXGBE_MRCTL_UPME  0x02 /* Uplink Port Mirroring. */
4287 #define IXGBE_MRCTL_DPME  0x04 /* Downlink Port Mirroring. */
4288 #define IXGBE_MRCTL_VLME  0x08 /* VLAN Mirroring. */
4289 #define IXGBE_INVALID_MIRROR_TYPE(mirror_type) \
4290         ((mirror_type) & ~(uint8_t)(ETH_MIRROR_VIRTUAL_POOL_UP | \
4291         ETH_MIRROR_UPLINK_PORT | ETH_MIRROR_DOWNLINK_PORT | ETH_MIRROR_VLAN))
4292
4293 static int
4294 ixgbe_mirror_rule_set(struct rte_eth_dev *dev,
4295                         struct rte_eth_mirror_conf *mirror_conf,
4296                         uint8_t rule_id, uint8_t on)
4297 {
4298         uint32_t mr_ctl,vlvf;
4299         uint32_t mp_lsb = 0;
4300         uint32_t mv_msb = 0;
4301         uint32_t mv_lsb = 0;
4302         uint32_t mp_msb = 0;
4303         uint8_t i = 0;
4304         int reg_index = 0;
4305         uint64_t vlan_mask = 0;
4306
4307         const uint8_t pool_mask_offset = 32;
4308         const uint8_t vlan_mask_offset = 32;
4309         const uint8_t dst_pool_offset = 8;
4310         const uint8_t rule_mr_offset  = 4;
4311         const uint8_t mirror_rule_mask= 0x0F;
4312
4313         struct ixgbe_mirror_info *mr_info =
4314                         (IXGBE_DEV_PRIVATE_TO_PFDATA(dev->data->dev_private));
4315         struct ixgbe_hw *hw =
4316                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4317         uint8_t mirror_type = 0;
4318
4319         if (ixgbe_vmdq_mode_check(hw) < 0)
4320                 return -ENOTSUP;
4321
4322         if (rule_id >= IXGBE_MAX_MIRROR_RULES)
4323                 return -EINVAL;
4324
4325         if (IXGBE_INVALID_MIRROR_TYPE(mirror_conf->rule_type)) {
4326                 PMD_DRV_LOG(ERR, "unsupported mirror type 0x%x.",
4327                         mirror_conf->rule_type);
4328                 return -EINVAL;
4329         }
4330
4331         if (mirror_conf->rule_type & ETH_MIRROR_VLAN) {
4332                 mirror_type |= IXGBE_MRCTL_VLME;
4333                 /* Check if vlan id is valid and find conresponding VLAN ID index in VLVF */
4334                 for (i = 0;i < IXGBE_VLVF_ENTRIES; i++) {
4335                         if (mirror_conf->vlan.vlan_mask & (1ULL << i)) {
4336                                 /* search vlan id related pool vlan filter index */
4337                                 reg_index = ixgbe_find_vlvf_slot(hw,
4338                                                 mirror_conf->vlan.vlan_id[i]);
4339                                 if(reg_index < 0)
4340                                         return -EINVAL;
4341                                 vlvf = IXGBE_READ_REG(hw, IXGBE_VLVF(reg_index));
4342                                 if ((vlvf & IXGBE_VLVF_VIEN) &&
4343                                     ((vlvf & IXGBE_VLVF_VLANID_MASK) ==
4344                                       mirror_conf->vlan.vlan_id[i]))
4345                                         vlan_mask |= (1ULL << reg_index);
4346                                 else
4347                                         return -EINVAL;
4348                         }
4349                 }
4350
4351                 if (on) {
4352                         mv_lsb = vlan_mask & 0xFFFFFFFF;
4353                         mv_msb = vlan_mask >> vlan_mask_offset;
4354
4355                         mr_info->mr_conf[rule_id].vlan.vlan_mask =
4356                                                 mirror_conf->vlan.vlan_mask;
4357                         for(i = 0 ;i < ETH_VMDQ_MAX_VLAN_FILTERS; i++) {
4358                                 if(mirror_conf->vlan.vlan_mask & (1ULL << i))
4359                                         mr_info->mr_conf[rule_id].vlan.vlan_id[i] =
4360                                                 mirror_conf->vlan.vlan_id[i];
4361                         }
4362                 } else {
4363                         mv_lsb = 0;
4364                         mv_msb = 0;
4365                         mr_info->mr_conf[rule_id].vlan.vlan_mask = 0;
4366                         for(i = 0 ;i < ETH_VMDQ_MAX_VLAN_FILTERS; i++)
4367                                 mr_info->mr_conf[rule_id].vlan.vlan_id[i] = 0;
4368                 }
4369         }
4370
4371         /*
4372          * if enable pool mirror, write related pool mask register,if disable
4373          * pool mirror, clear PFMRVM register
4374          */
4375         if (mirror_conf->rule_type & ETH_MIRROR_VIRTUAL_POOL_UP) {
4376                 mirror_type |= IXGBE_MRCTL_VPME;
4377                 if (on) {
4378                         mp_lsb = mirror_conf->pool_mask & 0xFFFFFFFF;
4379                         mp_msb = mirror_conf->pool_mask >> pool_mask_offset;
4380                         mr_info->mr_conf[rule_id].pool_mask =
4381                                         mirror_conf->pool_mask;
4382
4383                 } else {
4384                         mp_lsb = 0;
4385                         mp_msb = 0;
4386                         mr_info->mr_conf[rule_id].pool_mask = 0;
4387                 }
4388         }
4389         if (mirror_conf->rule_type & ETH_MIRROR_UPLINK_PORT)
4390                 mirror_type |= IXGBE_MRCTL_UPME;
4391         if (mirror_conf->rule_type & ETH_MIRROR_DOWNLINK_PORT)
4392                 mirror_type |= IXGBE_MRCTL_DPME;
4393
4394         /* read  mirror control register and recalculate it */
4395         mr_ctl = IXGBE_READ_REG(hw, IXGBE_MRCTL(rule_id));
4396
4397         if (on) {
4398                 mr_ctl |= mirror_type;
4399                 mr_ctl &= mirror_rule_mask;
4400                 mr_ctl |= mirror_conf->dst_pool << dst_pool_offset;
4401         } else
4402                 mr_ctl &= ~(mirror_conf->rule_type & mirror_rule_mask);
4403
4404         mr_info->mr_conf[rule_id].rule_type = mirror_conf->rule_type;
4405         mr_info->mr_conf[rule_id].dst_pool = mirror_conf->dst_pool;
4406
4407         /* write mirrror control  register */
4408         IXGBE_WRITE_REG(hw, IXGBE_MRCTL(rule_id), mr_ctl);
4409
4410         /* write pool mirrror control  register */
4411         if (mirror_conf->rule_type == ETH_MIRROR_VIRTUAL_POOL_UP) {
4412                 IXGBE_WRITE_REG(hw, IXGBE_VMRVM(rule_id), mp_lsb);
4413                 IXGBE_WRITE_REG(hw, IXGBE_VMRVM(rule_id + rule_mr_offset),
4414                                 mp_msb);
4415         }
4416         /* write VLAN mirrror control  register */
4417         if (mirror_conf->rule_type == ETH_MIRROR_VLAN) {
4418                 IXGBE_WRITE_REG(hw, IXGBE_VMRVLAN(rule_id), mv_lsb);
4419                 IXGBE_WRITE_REG(hw, IXGBE_VMRVLAN(rule_id + rule_mr_offset),
4420                                 mv_msb);
4421         }
4422
4423         return 0;
4424 }
4425
4426 static int
4427 ixgbe_mirror_rule_reset(struct rte_eth_dev *dev, uint8_t rule_id)
4428 {
4429         int mr_ctl = 0;
4430         uint32_t lsb_val = 0;
4431         uint32_t msb_val = 0;
4432         const uint8_t rule_mr_offset = 4;
4433
4434         struct ixgbe_hw *hw =
4435                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4436         struct ixgbe_mirror_info *mr_info =
4437                 (IXGBE_DEV_PRIVATE_TO_PFDATA(dev->data->dev_private));
4438
4439         if (ixgbe_vmdq_mode_check(hw) < 0)
4440                 return -ENOTSUP;
4441
4442         memset(&mr_info->mr_conf[rule_id], 0,
4443                 sizeof(struct rte_eth_mirror_conf));
4444
4445         /* clear PFVMCTL register */
4446         IXGBE_WRITE_REG(hw, IXGBE_MRCTL(rule_id), mr_ctl);
4447
4448         /* clear pool mask register */
4449         IXGBE_WRITE_REG(hw, IXGBE_VMRVM(rule_id), lsb_val);
4450         IXGBE_WRITE_REG(hw, IXGBE_VMRVM(rule_id + rule_mr_offset), msb_val);
4451
4452         /* clear vlan mask register */
4453         IXGBE_WRITE_REG(hw, IXGBE_VMRVLAN(rule_id), lsb_val);
4454         IXGBE_WRITE_REG(hw, IXGBE_VMRVLAN(rule_id + rule_mr_offset), msb_val);
4455
4456         return 0;
4457 }
4458
4459 static int
4460 ixgbevf_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id)
4461 {
4462         uint32_t mask;
4463         struct ixgbe_hw *hw =
4464                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4465
4466         mask = IXGBE_READ_REG(hw, IXGBE_VTEIMS);
4467         mask |= (1 << IXGBE_MISC_VEC_ID);
4468         RTE_SET_USED(queue_id);
4469         IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, mask);
4470
4471         rte_intr_enable(&dev->pci_dev->intr_handle);
4472
4473         return 0;
4474 }
4475
4476 static int
4477 ixgbevf_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id)
4478 {
4479         uint32_t mask;
4480         struct ixgbe_hw *hw =
4481                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4482
4483         mask = IXGBE_READ_REG(hw, IXGBE_VTEIMS);
4484         mask &= ~(1 << IXGBE_MISC_VEC_ID);
4485         RTE_SET_USED(queue_id);
4486         IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, mask);
4487
4488         return 0;
4489 }
4490
4491 static int
4492 ixgbe_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id)
4493 {
4494         uint32_t mask;
4495         struct ixgbe_hw *hw =
4496                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4497         struct ixgbe_interrupt *intr =
4498                 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
4499
4500         if (queue_id < 16) {
4501                 ixgbe_disable_intr(hw);
4502                 intr->mask |= (1 << queue_id);
4503                 ixgbe_enable_intr(dev);
4504         } else if (queue_id < 32) {
4505                 mask = IXGBE_READ_REG(hw, IXGBE_EIMS_EX(0));
4506                 mask &= (1 << queue_id);
4507                 IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(0), mask);
4508         } else if (queue_id < 64) {
4509                 mask = IXGBE_READ_REG(hw, IXGBE_EIMS_EX(1));
4510                 mask &= (1 << (queue_id - 32));
4511                 IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(1), mask);
4512         }
4513         rte_intr_enable(&dev->pci_dev->intr_handle);
4514
4515         return 0;
4516 }
4517
4518 static int
4519 ixgbe_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id)
4520 {
4521         uint32_t mask;
4522         struct ixgbe_hw *hw =
4523                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4524         struct ixgbe_interrupt *intr =
4525                 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
4526
4527         if (queue_id < 16) {
4528                 ixgbe_disable_intr(hw);
4529                 intr->mask &= ~(1 << queue_id);
4530                 ixgbe_enable_intr(dev);
4531         } else if (queue_id < 32) {
4532                 mask = IXGBE_READ_REG(hw, IXGBE_EIMS_EX(0));
4533                 mask &= ~(1 << queue_id);
4534                 IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(0), mask);
4535         } else if (queue_id < 64) {
4536                 mask = IXGBE_READ_REG(hw, IXGBE_EIMS_EX(1));
4537                 mask &= ~(1 << (queue_id - 32));
4538                 IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(1), mask);
4539         }
4540
4541         return 0;
4542 }
4543
4544 static void
4545 ixgbevf_set_ivar_map(struct ixgbe_hw *hw, int8_t direction,
4546                      uint8_t queue, uint8_t msix_vector)
4547 {
4548         uint32_t tmp, idx;
4549
4550         if (direction == -1) {
4551                 /* other causes */
4552                 msix_vector |= IXGBE_IVAR_ALLOC_VAL;
4553                 tmp = IXGBE_READ_REG(hw, IXGBE_VTIVAR_MISC);
4554                 tmp &= ~0xFF;
4555                 tmp |= msix_vector;
4556                 IXGBE_WRITE_REG(hw, IXGBE_VTIVAR_MISC, tmp);
4557         } else {
4558                 /* rx or tx cause */
4559                 msix_vector |= IXGBE_IVAR_ALLOC_VAL;
4560                 idx = ((16 * (queue & 1)) + (8 * direction));
4561                 tmp = IXGBE_READ_REG(hw, IXGBE_VTIVAR(queue >> 1));
4562                 tmp &= ~(0xFF << idx);
4563                 tmp |= (msix_vector << idx);
4564                 IXGBE_WRITE_REG(hw, IXGBE_VTIVAR(queue >> 1), tmp);
4565         }
4566 }
4567
4568 /**
4569  * set the IVAR registers, mapping interrupt causes to vectors
4570  * @param hw
4571  *  pointer to ixgbe_hw struct
4572  * @direction
4573  *  0 for Rx, 1 for Tx, -1 for other causes
4574  * @queue
4575  *  queue to map the corresponding interrupt to
4576  * @msix_vector
4577  *  the vector to map to the corresponding queue
4578  */
4579 static void
4580 ixgbe_set_ivar_map(struct ixgbe_hw *hw, int8_t direction,
4581                    uint8_t queue, uint8_t msix_vector)
4582 {
4583         uint32_t tmp, idx;
4584
4585         msix_vector |= IXGBE_IVAR_ALLOC_VAL;
4586         if (hw->mac.type == ixgbe_mac_82598EB) {
4587                 if (direction == -1)
4588                         direction = 0;
4589                 idx = (((direction * 64) + queue) >> 2) & 0x1F;
4590                 tmp = IXGBE_READ_REG(hw, IXGBE_IVAR(idx));
4591                 tmp &= ~(0xFF << (8 * (queue & 0x3)));
4592                 tmp |= (msix_vector << (8 * (queue & 0x3)));
4593                 IXGBE_WRITE_REG(hw, IXGBE_IVAR(idx), tmp);
4594         } else if ((hw->mac.type == ixgbe_mac_82599EB) ||
4595                         (hw->mac.type == ixgbe_mac_X540)) {
4596                 if (direction == -1) {
4597                         /* other causes */
4598                         idx = ((queue & 1) * 8);
4599                         tmp = IXGBE_READ_REG(hw, IXGBE_IVAR_MISC);
4600                         tmp &= ~(0xFF << idx);
4601                         tmp |= (msix_vector << idx);
4602                         IXGBE_WRITE_REG(hw, IXGBE_IVAR_MISC, tmp);
4603                 } else {
4604                         /* rx or tx causes */
4605                         idx = ((16 * (queue & 1)) + (8 * direction));
4606                         tmp = IXGBE_READ_REG(hw, IXGBE_IVAR(queue >> 1));
4607                         tmp &= ~(0xFF << idx);
4608                         tmp |= (msix_vector << idx);
4609                         IXGBE_WRITE_REG(hw, IXGBE_IVAR(queue >> 1), tmp);
4610                 }
4611         }
4612 }
4613
4614 static void
4615 ixgbevf_configure_msix(struct rte_eth_dev *dev)
4616 {
4617         struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle;
4618         struct ixgbe_hw *hw =
4619                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4620         uint32_t q_idx;
4621         uint32_t vector_idx = IXGBE_MISC_VEC_ID;
4622
4623         /* won't configure msix register if no mapping is done
4624          * between intr vector and event fd.
4625          */
4626         if (!rte_intr_dp_is_en(intr_handle))
4627                 return;
4628
4629         /* Configure all RX queues of VF */
4630         for (q_idx = 0; q_idx < dev->data->nb_rx_queues; q_idx++) {
4631                 /* Force all queue use vector 0,
4632                  * as IXGBE_VF_MAXMSIVECOTR = 1
4633                  */
4634                 ixgbevf_set_ivar_map(hw, 0, q_idx, vector_idx);
4635                 intr_handle->intr_vec[q_idx] = vector_idx;
4636         }
4637
4638         /* Configure VF other cause ivar */
4639         ixgbevf_set_ivar_map(hw, -1, 1, vector_idx);
4640 }
4641
4642 /**
4643  * Sets up the hardware to properly generate MSI-X interrupts
4644  * @hw
4645  *  board private structure
4646  */
4647 static void
4648 ixgbe_configure_msix(struct rte_eth_dev *dev)
4649 {
4650         struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle;
4651         struct ixgbe_hw *hw =
4652                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4653         uint32_t queue_id, base = IXGBE_MISC_VEC_ID;
4654         uint32_t vec = IXGBE_MISC_VEC_ID;
4655         uint32_t mask;
4656         uint32_t gpie;
4657
4658         /* won't configure msix register if no mapping is done
4659          * between intr vector and event fd
4660          */
4661         if (!rte_intr_dp_is_en(intr_handle))
4662                 return;
4663
4664         if (rte_intr_allow_others(intr_handle))
4665                 vec = base = IXGBE_RX_VEC_START;
4666
4667         /* setup GPIE for MSI-x mode */
4668         gpie = IXGBE_READ_REG(hw, IXGBE_GPIE);
4669         gpie |= IXGBE_GPIE_MSIX_MODE | IXGBE_GPIE_PBA_SUPPORT |
4670                 IXGBE_GPIE_OCD | IXGBE_GPIE_EIAME;
4671         /* auto clearing and auto setting corresponding bits in EIMS
4672          * when MSI-X interrupt is triggered
4673          */
4674         if (hw->mac.type == ixgbe_mac_82598EB) {
4675                 IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
4676         } else {
4677                 IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(0), 0xFFFFFFFF);
4678                 IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(1), 0xFFFFFFFF);
4679         }
4680         IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
4681
4682         /* Populate the IVAR table and set the ITR values to the
4683          * corresponding register.
4684          */
4685         for (queue_id = 0; queue_id < dev->data->nb_rx_queues;
4686              queue_id++) {
4687                 /* by default, 1:1 mapping */
4688                 ixgbe_set_ivar_map(hw, 0, queue_id, vec);
4689                 intr_handle->intr_vec[queue_id] = vec;
4690                 if (vec < base + intr_handle->nb_efd - 1)
4691                         vec++;
4692         }
4693
4694         switch (hw->mac.type) {
4695         case ixgbe_mac_82598EB:
4696                 ixgbe_set_ivar_map(hw, -1, IXGBE_IVAR_OTHER_CAUSES_INDEX,
4697                                    IXGBE_MISC_VEC_ID);
4698                 break;
4699         case ixgbe_mac_82599EB:
4700         case ixgbe_mac_X540:
4701                 ixgbe_set_ivar_map(hw, -1, 1, IXGBE_MISC_VEC_ID);
4702                 break;
4703         default:
4704                 break;
4705         }
4706         IXGBE_WRITE_REG(hw, IXGBE_EITR(IXGBE_MISC_VEC_ID),
4707                         IXGBE_MIN_INTER_INTERRUPT_INTERVAL_DEFAULT & 0xFFF);
4708
4709         /* set up to autoclear timer, and the vectors */
4710         mask = IXGBE_EIMS_ENABLE_MASK;
4711         mask &= ~(IXGBE_EIMS_OTHER |
4712                   IXGBE_EIMS_MAILBOX |
4713                   IXGBE_EIMS_LSC);
4714
4715         IXGBE_WRITE_REG(hw, IXGBE_EIAC, mask);
4716 }
4717
4718 static int ixgbe_set_queue_rate_limit(struct rte_eth_dev *dev,
4719         uint16_t queue_idx, uint16_t tx_rate)
4720 {
4721         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4722         uint32_t rf_dec, rf_int;
4723         uint32_t bcnrc_val;
4724         uint16_t link_speed = dev->data->dev_link.link_speed;
4725
4726         if (queue_idx >= hw->mac.max_tx_queues)
4727                 return -EINVAL;
4728
4729         if (tx_rate != 0) {
4730                 /* Calculate the rate factor values to set */
4731                 rf_int = (uint32_t)link_speed / (uint32_t)tx_rate;
4732                 rf_dec = (uint32_t)link_speed % (uint32_t)tx_rate;
4733                 rf_dec = (rf_dec << IXGBE_RTTBCNRC_RF_INT_SHIFT) / tx_rate;
4734
4735                 bcnrc_val = IXGBE_RTTBCNRC_RS_ENA;
4736                 bcnrc_val |= ((rf_int << IXGBE_RTTBCNRC_RF_INT_SHIFT) &
4737                                 IXGBE_RTTBCNRC_RF_INT_MASK_M);
4738                 bcnrc_val |= (rf_dec & IXGBE_RTTBCNRC_RF_DEC_MASK);
4739         } else {
4740                 bcnrc_val = 0;
4741         }
4742
4743         /*
4744          * Set global transmit compensation time to the MMW_SIZE in RTTBCNRM
4745          * register. MMW_SIZE=0x014 if 9728-byte jumbo is supported, otherwise
4746          * set as 0x4.
4747          */
4748         if ((dev->data->dev_conf.rxmode.jumbo_frame == 1) &&
4749                 (dev->data->dev_conf.rxmode.max_rx_pkt_len >=
4750                                 IXGBE_MAX_JUMBO_FRAME_SIZE))
4751                 IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRM,
4752                         IXGBE_MMW_SIZE_JUMBO_FRAME);
4753         else
4754                 IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRM,
4755                         IXGBE_MMW_SIZE_DEFAULT);
4756
4757         /* Set RTTBCNRC of queue X */
4758         IXGBE_WRITE_REG(hw, IXGBE_RTTDQSEL, queue_idx);
4759         IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRC, bcnrc_val);
4760         IXGBE_WRITE_FLUSH(hw);
4761
4762         return 0;
4763 }
4764
4765 static int ixgbe_set_vf_rate_limit(struct rte_eth_dev *dev, uint16_t vf,
4766         uint16_t tx_rate, uint64_t q_msk)
4767 {
4768         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4769         struct ixgbe_vf_info *vfinfo =
4770                 *(IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private));
4771         uint8_t  nb_q_per_pool = RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool;
4772         uint32_t queue_stride =
4773                 IXGBE_MAX_RX_QUEUE_NUM / RTE_ETH_DEV_SRIOV(dev).active;
4774         uint32_t queue_idx = vf * queue_stride, idx = 0, vf_idx;
4775         uint32_t queue_end = queue_idx + nb_q_per_pool - 1;
4776         uint16_t total_rate = 0;
4777
4778         if (queue_end >= hw->mac.max_tx_queues)
4779                 return -EINVAL;
4780
4781         if (vfinfo != NULL) {
4782                 for (vf_idx = 0; vf_idx < dev->pci_dev->max_vfs; vf_idx++) {
4783                         if (vf_idx == vf)
4784                                 continue;
4785                         for (idx = 0; idx < RTE_DIM(vfinfo[vf_idx].tx_rate);
4786                                 idx++)
4787                                 total_rate += vfinfo[vf_idx].tx_rate[idx];
4788                 }
4789         } else
4790                 return -EINVAL;
4791
4792         /* Store tx_rate for this vf. */
4793         for (idx = 0; idx < nb_q_per_pool; idx++) {
4794                 if (((uint64_t)0x1 << idx) & q_msk) {
4795                         if (vfinfo[vf].tx_rate[idx] != tx_rate)
4796                                 vfinfo[vf].tx_rate[idx] = tx_rate;
4797                         total_rate += tx_rate;
4798                 }
4799         }
4800
4801         if (total_rate > dev->data->dev_link.link_speed) {
4802                 /*
4803                  * Reset stored TX rate of the VF if it causes exceed
4804                  * link speed.
4805                  */
4806                 memset(vfinfo[vf].tx_rate, 0, sizeof(vfinfo[vf].tx_rate));
4807                 return -EINVAL;
4808         }
4809
4810         /* Set RTTBCNRC of each queue/pool for vf X  */
4811         for (; queue_idx <= queue_end; queue_idx++) {
4812                 if (0x1 & q_msk)
4813                         ixgbe_set_queue_rate_limit(dev, queue_idx, tx_rate);
4814                 q_msk = q_msk >> 1;
4815         }
4816
4817         return 0;
4818 }
4819
4820 static void
4821 ixgbevf_add_mac_addr(struct rte_eth_dev *dev, struct ether_addr *mac_addr,
4822                      __attribute__((unused)) uint32_t index,
4823                      __attribute__((unused)) uint32_t pool)
4824 {
4825         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4826         int diag;
4827
4828         /*
4829          * On a 82599 VF, adding again the same MAC addr is not an idempotent
4830          * operation. Trap this case to avoid exhausting the [very limited]
4831          * set of PF resources used to store VF MAC addresses.
4832          */
4833         if (memcmp(hw->mac.perm_addr, mac_addr, sizeof(struct ether_addr)) == 0)
4834                 return;
4835         diag = ixgbevf_set_uc_addr_vf(hw, 2, mac_addr->addr_bytes);
4836         if (diag == 0)
4837                 return;
4838         PMD_DRV_LOG(ERR, "Unable to add MAC address - diag=%d", diag);
4839 }
4840
4841 static void
4842 ixgbevf_remove_mac_addr(struct rte_eth_dev *dev, uint32_t index)
4843 {
4844         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4845         struct ether_addr *perm_addr = (struct ether_addr *) hw->mac.perm_addr;
4846         struct ether_addr *mac_addr;
4847         uint32_t i;
4848         int diag;
4849
4850         /*
4851          * The IXGBE_VF_SET_MACVLAN command of the ixgbe-pf driver does
4852          * not support the deletion of a given MAC address.
4853          * Instead, it imposes to delete all MAC addresses, then to add again
4854          * all MAC addresses with the exception of the one to be deleted.
4855          */
4856         (void) ixgbevf_set_uc_addr_vf(hw, 0, NULL);
4857
4858         /*
4859          * Add again all MAC addresses, with the exception of the deleted one
4860          * and of the permanent MAC address.
4861          */
4862         for (i = 0, mac_addr = dev->data->mac_addrs;
4863              i < hw->mac.num_rar_entries; i++, mac_addr++) {
4864                 /* Skip the deleted MAC address */
4865                 if (i == index)
4866                         continue;
4867                 /* Skip NULL MAC addresses */
4868                 if (is_zero_ether_addr(mac_addr))
4869                         continue;
4870                 /* Skip the permanent MAC address */
4871                 if (memcmp(perm_addr, mac_addr, sizeof(struct ether_addr)) == 0)
4872                         continue;
4873                 diag = ixgbevf_set_uc_addr_vf(hw, 2, mac_addr->addr_bytes);
4874                 if (diag != 0)
4875                         PMD_DRV_LOG(ERR,
4876                                     "Adding again MAC address "
4877                                     "%02x:%02x:%02x:%02x:%02x:%02x failed "
4878                                     "diag=%d",
4879                                     mac_addr->addr_bytes[0],
4880                                     mac_addr->addr_bytes[1],
4881                                     mac_addr->addr_bytes[2],
4882                                     mac_addr->addr_bytes[3],
4883                                     mac_addr->addr_bytes[4],
4884                                     mac_addr->addr_bytes[5],
4885                                     diag);
4886         }
4887 }
4888
4889 static void
4890 ixgbevf_set_default_mac_addr(struct rte_eth_dev *dev, struct ether_addr *addr)
4891 {
4892         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4893
4894         hw->mac.ops.set_rar(hw, 0, (void *)addr, 0, 0);
4895 }
4896
4897 #define MAC_TYPE_FILTER_SUP(type)    do {\
4898         if ((type) != ixgbe_mac_82599EB && (type) != ixgbe_mac_X540 &&\
4899                 (type) != ixgbe_mac_X550)\
4900                 return -ENOTSUP;\
4901 } while (0)
4902
4903 static int
4904 ixgbe_syn_filter_set(struct rte_eth_dev *dev,
4905                         struct rte_eth_syn_filter *filter,
4906                         bool add)
4907 {
4908         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4909         uint32_t synqf;
4910
4911         if (filter->queue >= IXGBE_MAX_RX_QUEUE_NUM)
4912                 return -EINVAL;
4913
4914         synqf = IXGBE_READ_REG(hw, IXGBE_SYNQF);
4915
4916         if (add) {
4917                 if (synqf & IXGBE_SYN_FILTER_ENABLE)
4918                         return -EINVAL;
4919                 synqf = (uint32_t)(((filter->queue << IXGBE_SYN_FILTER_QUEUE_SHIFT) &
4920                         IXGBE_SYN_FILTER_QUEUE) | IXGBE_SYN_FILTER_ENABLE);
4921
4922                 if (filter->hig_pri)
4923                         synqf |= IXGBE_SYN_FILTER_SYNQFP;
4924                 else
4925                         synqf &= ~IXGBE_SYN_FILTER_SYNQFP;
4926         } else {
4927                 if (!(synqf & IXGBE_SYN_FILTER_ENABLE))
4928                         return -ENOENT;
4929                 synqf &= ~(IXGBE_SYN_FILTER_QUEUE | IXGBE_SYN_FILTER_ENABLE);
4930         }
4931         IXGBE_WRITE_REG(hw, IXGBE_SYNQF, synqf);
4932         IXGBE_WRITE_FLUSH(hw);
4933         return 0;
4934 }
4935
4936 static int
4937 ixgbe_syn_filter_get(struct rte_eth_dev *dev,
4938                         struct rte_eth_syn_filter *filter)
4939 {
4940         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4941         uint32_t synqf = IXGBE_READ_REG(hw, IXGBE_SYNQF);
4942
4943         if (synqf & IXGBE_SYN_FILTER_ENABLE) {
4944                 filter->hig_pri = (synqf & IXGBE_SYN_FILTER_SYNQFP) ? 1 : 0;
4945                 filter->queue = (uint16_t)((synqf & IXGBE_SYN_FILTER_QUEUE) >> 1);
4946                 return 0;
4947         }
4948         return -ENOENT;
4949 }
4950
4951 static int
4952 ixgbe_syn_filter_handle(struct rte_eth_dev *dev,
4953                         enum rte_filter_op filter_op,
4954                         void *arg)
4955 {
4956         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4957         int ret;
4958
4959         MAC_TYPE_FILTER_SUP(hw->mac.type);
4960
4961         if (filter_op == RTE_ETH_FILTER_NOP)
4962                 return 0;
4963
4964         if (arg == NULL) {
4965                 PMD_DRV_LOG(ERR, "arg shouldn't be NULL for operation %u",
4966                             filter_op);
4967                 return -EINVAL;
4968         }
4969
4970         switch (filter_op) {
4971         case RTE_ETH_FILTER_ADD:
4972                 ret = ixgbe_syn_filter_set(dev,
4973                                 (struct rte_eth_syn_filter *)arg,
4974                                 TRUE);
4975                 break;
4976         case RTE_ETH_FILTER_DELETE:
4977                 ret = ixgbe_syn_filter_set(dev,
4978                                 (struct rte_eth_syn_filter *)arg,
4979                                 FALSE);
4980                 break;
4981         case RTE_ETH_FILTER_GET:
4982                 ret = ixgbe_syn_filter_get(dev,
4983                                 (struct rte_eth_syn_filter *)arg);
4984                 break;
4985         default:
4986                 PMD_DRV_LOG(ERR, "unsupported operation %u\n", filter_op);
4987                 ret = -EINVAL;
4988                 break;
4989         }
4990
4991         return ret;
4992 }
4993
4994
4995 static inline enum ixgbe_5tuple_protocol
4996 convert_protocol_type(uint8_t protocol_value)
4997 {
4998         if (protocol_value == IPPROTO_TCP)
4999                 return IXGBE_FILTER_PROTOCOL_TCP;
5000         else if (protocol_value == IPPROTO_UDP)
5001                 return IXGBE_FILTER_PROTOCOL_UDP;
5002         else if (protocol_value == IPPROTO_SCTP)
5003                 return IXGBE_FILTER_PROTOCOL_SCTP;
5004         else
5005                 return IXGBE_FILTER_PROTOCOL_NONE;
5006 }
5007
5008 /*
5009  * add a 5tuple filter
5010  *
5011  * @param
5012  * dev: Pointer to struct rte_eth_dev.
5013  * index: the index the filter allocates.
5014  * filter: ponter to the filter that will be added.
5015  * rx_queue: the queue id the filter assigned to.
5016  *
5017  * @return
5018  *    - On success, zero.
5019  *    - On failure, a negative value.
5020  */
5021 static int
5022 ixgbe_add_5tuple_filter(struct rte_eth_dev *dev,
5023                         struct ixgbe_5tuple_filter *filter)
5024 {
5025         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5026         struct ixgbe_filter_info *filter_info =
5027                 IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
5028         int i, idx, shift;
5029         uint32_t ftqf, sdpqf;
5030         uint32_t l34timir = 0;
5031         uint8_t mask = 0xff;
5032
5033         /*
5034          * look for an unused 5tuple filter index,
5035          * and insert the filter to list.
5036          */
5037         for (i = 0; i < IXGBE_MAX_FTQF_FILTERS; i++) {
5038                 idx = i / (sizeof(uint32_t) * NBBY);
5039                 shift = i % (sizeof(uint32_t) * NBBY);
5040                 if (!(filter_info->fivetuple_mask[idx] & (1 << shift))) {
5041                         filter_info->fivetuple_mask[idx] |= 1 << shift;
5042                         filter->index = i;
5043                         TAILQ_INSERT_TAIL(&filter_info->fivetuple_list,
5044                                           filter,
5045                                           entries);
5046                         break;
5047                 }
5048         }
5049         if (i >= IXGBE_MAX_FTQF_FILTERS) {
5050                 PMD_DRV_LOG(ERR, "5tuple filters are full.");
5051                 return -ENOSYS;
5052         }
5053
5054         sdpqf = (uint32_t)(filter->filter_info.dst_port <<
5055                                 IXGBE_SDPQF_DSTPORT_SHIFT);
5056         sdpqf = sdpqf | (filter->filter_info.src_port & IXGBE_SDPQF_SRCPORT);
5057
5058         ftqf = (uint32_t)(filter->filter_info.proto &
5059                 IXGBE_FTQF_PROTOCOL_MASK);
5060         ftqf |= (uint32_t)((filter->filter_info.priority &
5061                 IXGBE_FTQF_PRIORITY_MASK) << IXGBE_FTQF_PRIORITY_SHIFT);
5062         if (filter->filter_info.src_ip_mask == 0) /* 0 means compare. */
5063                 mask &= IXGBE_FTQF_SOURCE_ADDR_MASK;
5064         if (filter->filter_info.dst_ip_mask == 0)
5065                 mask &= IXGBE_FTQF_DEST_ADDR_MASK;
5066         if (filter->filter_info.src_port_mask == 0)
5067                 mask &= IXGBE_FTQF_SOURCE_PORT_MASK;
5068         if (filter->filter_info.dst_port_mask == 0)
5069                 mask &= IXGBE_FTQF_DEST_PORT_MASK;
5070         if (filter->filter_info.proto_mask == 0)
5071                 mask &= IXGBE_FTQF_PROTOCOL_COMP_MASK;
5072         ftqf |= mask << IXGBE_FTQF_5TUPLE_MASK_SHIFT;
5073         ftqf |= IXGBE_FTQF_POOL_MASK_EN;
5074         ftqf |= IXGBE_FTQF_QUEUE_ENABLE;
5075
5076         IXGBE_WRITE_REG(hw, IXGBE_DAQF(i), filter->filter_info.dst_ip);
5077         IXGBE_WRITE_REG(hw, IXGBE_SAQF(i), filter->filter_info.src_ip);
5078         IXGBE_WRITE_REG(hw, IXGBE_SDPQF(i), sdpqf);
5079         IXGBE_WRITE_REG(hw, IXGBE_FTQF(i), ftqf);
5080
5081         l34timir |= IXGBE_L34T_IMIR_RESERVE;
5082         l34timir |= (uint32_t)(filter->queue <<
5083                                 IXGBE_L34T_IMIR_QUEUE_SHIFT);
5084         IXGBE_WRITE_REG(hw, IXGBE_L34T_IMIR(i), l34timir);
5085         return 0;
5086 }
5087
5088 /*
5089  * remove a 5tuple filter
5090  *
5091  * @param
5092  * dev: Pointer to struct rte_eth_dev.
5093  * filter: the pointer of the filter will be removed.
5094  */
5095 static void
5096 ixgbe_remove_5tuple_filter(struct rte_eth_dev *dev,
5097                         struct ixgbe_5tuple_filter *filter)
5098 {
5099         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5100         struct ixgbe_filter_info *filter_info =
5101                 IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
5102         uint16_t index = filter->index;
5103
5104         filter_info->fivetuple_mask[index / (sizeof(uint32_t) * NBBY)] &=
5105                                 ~(1 << (index % (sizeof(uint32_t) * NBBY)));
5106         TAILQ_REMOVE(&filter_info->fivetuple_list, filter, entries);
5107         rte_free(filter);
5108
5109         IXGBE_WRITE_REG(hw, IXGBE_DAQF(index), 0);
5110         IXGBE_WRITE_REG(hw, IXGBE_SAQF(index), 0);
5111         IXGBE_WRITE_REG(hw, IXGBE_SDPQF(index), 0);
5112         IXGBE_WRITE_REG(hw, IXGBE_FTQF(index), 0);
5113         IXGBE_WRITE_REG(hw, IXGBE_L34T_IMIR(index), 0);
5114 }
5115
5116 static int
5117 ixgbevf_dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu)
5118 {
5119         struct ixgbe_hw *hw;
5120         uint32_t max_frame = mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
5121
5122         hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5123
5124         if ((mtu < ETHER_MIN_MTU) || (max_frame > ETHER_MAX_JUMBO_FRAME_LEN))
5125                 return -EINVAL;
5126
5127         /* refuse mtu that requires the support of scattered packets when this
5128          * feature has not been enabled before. */
5129         if (!dev->data->scattered_rx &&
5130             (max_frame + 2 * IXGBE_VLAN_TAG_SIZE >
5131              dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM))
5132                 return -EINVAL;
5133
5134         /*
5135          * When supported by the underlying PF driver, use the IXGBE_VF_SET_MTU
5136          * request of the version 2.0 of the mailbox API.
5137          * For now, use the IXGBE_VF_SET_LPE request of the version 1.0
5138          * of the mailbox API.
5139          * This call to IXGBE_SET_LPE action won't work with ixgbe pf drivers
5140          * prior to 3.11.33 which contains the following change:
5141          * "ixgbe: Enable jumbo frames support w/ SR-IOV"
5142          */
5143         ixgbevf_rlpml_set_vf(hw, max_frame);
5144
5145         /* update max frame size */
5146         dev->data->dev_conf.rxmode.max_rx_pkt_len = max_frame;
5147         return 0;
5148 }
5149
5150 #define MAC_TYPE_FILTER_SUP_EXT(type)    do {\
5151         if ((type) != ixgbe_mac_82599EB && (type) != ixgbe_mac_X540)\
5152                 return -ENOTSUP;\
5153 } while (0)
5154
5155 static inline struct ixgbe_5tuple_filter *
5156 ixgbe_5tuple_filter_lookup(struct ixgbe_5tuple_filter_list *filter_list,
5157                         struct ixgbe_5tuple_filter_info *key)
5158 {
5159         struct ixgbe_5tuple_filter *it;
5160
5161         TAILQ_FOREACH(it, filter_list, entries) {
5162                 if (memcmp(key, &it->filter_info,
5163                         sizeof(struct ixgbe_5tuple_filter_info)) == 0) {
5164                         return it;
5165                 }
5166         }
5167         return NULL;
5168 }
5169
5170 /* translate elements in struct rte_eth_ntuple_filter to struct ixgbe_5tuple_filter_info*/
5171 static inline int
5172 ntuple_filter_to_5tuple(struct rte_eth_ntuple_filter *filter,
5173                         struct ixgbe_5tuple_filter_info *filter_info)
5174 {
5175         if (filter->queue >= IXGBE_MAX_RX_QUEUE_NUM ||
5176                 filter->priority > IXGBE_5TUPLE_MAX_PRI ||
5177                 filter->priority < IXGBE_5TUPLE_MIN_PRI)
5178                 return -EINVAL;
5179
5180         switch (filter->dst_ip_mask) {
5181         case UINT32_MAX:
5182                 filter_info->dst_ip_mask = 0;
5183                 filter_info->dst_ip = filter->dst_ip;
5184                 break;
5185         case 0:
5186                 filter_info->dst_ip_mask = 1;
5187                 break;
5188         default:
5189                 PMD_DRV_LOG(ERR, "invalid dst_ip mask.");
5190                 return -EINVAL;
5191         }
5192
5193         switch (filter->src_ip_mask) {
5194         case UINT32_MAX:
5195                 filter_info->src_ip_mask = 0;
5196                 filter_info->src_ip = filter->src_ip;
5197                 break;
5198         case 0:
5199                 filter_info->src_ip_mask = 1;
5200                 break;
5201         default:
5202                 PMD_DRV_LOG(ERR, "invalid src_ip mask.");
5203                 return -EINVAL;
5204         }
5205
5206         switch (filter->dst_port_mask) {
5207         case UINT16_MAX:
5208                 filter_info->dst_port_mask = 0;
5209                 filter_info->dst_port = filter->dst_port;
5210                 break;
5211         case 0:
5212                 filter_info->dst_port_mask = 1;
5213                 break;
5214         default:
5215                 PMD_DRV_LOG(ERR, "invalid dst_port mask.");
5216                 return -EINVAL;
5217         }
5218
5219         switch (filter->src_port_mask) {
5220         case UINT16_MAX:
5221                 filter_info->src_port_mask = 0;
5222                 filter_info->src_port = filter->src_port;
5223                 break;
5224         case 0:
5225                 filter_info->src_port_mask = 1;
5226                 break;
5227         default:
5228                 PMD_DRV_LOG(ERR, "invalid src_port mask.");
5229                 return -EINVAL;
5230         }
5231
5232         switch (filter->proto_mask) {
5233         case UINT8_MAX:
5234                 filter_info->proto_mask = 0;
5235                 filter_info->proto =
5236                         convert_protocol_type(filter->proto);
5237                 break;
5238         case 0:
5239                 filter_info->proto_mask = 1;
5240                 break;
5241         default:
5242                 PMD_DRV_LOG(ERR, "invalid protocol mask.");
5243                 return -EINVAL;
5244         }
5245
5246         filter_info->priority = (uint8_t)filter->priority;
5247         return 0;
5248 }
5249
5250 /*
5251  * add or delete a ntuple filter
5252  *
5253  * @param
5254  * dev: Pointer to struct rte_eth_dev.
5255  * ntuple_filter: Pointer to struct rte_eth_ntuple_filter
5256  * add: if true, add filter, if false, remove filter
5257  *
5258  * @return
5259  *    - On success, zero.
5260  *    - On failure, a negative value.
5261  */
5262 static int
5263 ixgbe_add_del_ntuple_filter(struct rte_eth_dev *dev,
5264                         struct rte_eth_ntuple_filter *ntuple_filter,
5265                         bool add)
5266 {
5267         struct ixgbe_filter_info *filter_info =
5268                 IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
5269         struct ixgbe_5tuple_filter_info filter_5tuple;
5270         struct ixgbe_5tuple_filter *filter;
5271         int ret;
5272
5273         if (ntuple_filter->flags != RTE_5TUPLE_FLAGS) {
5274                 PMD_DRV_LOG(ERR, "only 5tuple is supported.");
5275                 return -EINVAL;
5276         }
5277
5278         memset(&filter_5tuple, 0, sizeof(struct ixgbe_5tuple_filter_info));
5279         ret = ntuple_filter_to_5tuple(ntuple_filter, &filter_5tuple);
5280         if (ret < 0)
5281                 return ret;
5282
5283         filter = ixgbe_5tuple_filter_lookup(&filter_info->fivetuple_list,
5284                                          &filter_5tuple);
5285         if (filter != NULL && add) {
5286                 PMD_DRV_LOG(ERR, "filter exists.");
5287                 return -EEXIST;
5288         }
5289         if (filter == NULL && !add) {
5290                 PMD_DRV_LOG(ERR, "filter doesn't exist.");
5291                 return -ENOENT;
5292         }
5293
5294         if (add) {
5295                 filter = rte_zmalloc("ixgbe_5tuple_filter",
5296                                 sizeof(struct ixgbe_5tuple_filter), 0);
5297                 if (filter == NULL)
5298                         return -ENOMEM;
5299                 (void)rte_memcpy(&filter->filter_info,
5300                                  &filter_5tuple,
5301                                  sizeof(struct ixgbe_5tuple_filter_info));
5302                 filter->queue = ntuple_filter->queue;
5303                 ret = ixgbe_add_5tuple_filter(dev, filter);
5304                 if (ret < 0) {
5305                         rte_free(filter);
5306                         return ret;
5307                 }
5308         } else
5309                 ixgbe_remove_5tuple_filter(dev, filter);
5310
5311         return 0;
5312 }
5313
5314 /*
5315  * get a ntuple filter
5316  *
5317  * @param
5318  * dev: Pointer to struct rte_eth_dev.
5319  * ntuple_filter: Pointer to struct rte_eth_ntuple_filter
5320  *
5321  * @return
5322  *    - On success, zero.
5323  *    - On failure, a negative value.
5324  */
5325 static int
5326 ixgbe_get_ntuple_filter(struct rte_eth_dev *dev,
5327                         struct rte_eth_ntuple_filter *ntuple_filter)
5328 {
5329         struct ixgbe_filter_info *filter_info =
5330                 IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
5331         struct ixgbe_5tuple_filter_info filter_5tuple;
5332         struct ixgbe_5tuple_filter *filter;
5333         int ret;
5334
5335         if (ntuple_filter->flags != RTE_5TUPLE_FLAGS) {
5336                 PMD_DRV_LOG(ERR, "only 5tuple is supported.");
5337                 return -EINVAL;
5338         }
5339
5340         memset(&filter_5tuple, 0, sizeof(struct ixgbe_5tuple_filter_info));
5341         ret = ntuple_filter_to_5tuple(ntuple_filter, &filter_5tuple);
5342         if (ret < 0)
5343                 return ret;
5344
5345         filter = ixgbe_5tuple_filter_lookup(&filter_info->fivetuple_list,
5346                                          &filter_5tuple);
5347         if (filter == NULL) {
5348                 PMD_DRV_LOG(ERR, "filter doesn't exist.");
5349                 return -ENOENT;
5350         }
5351         ntuple_filter->queue = filter->queue;
5352         return 0;
5353 }
5354
5355 /*
5356  * ixgbe_ntuple_filter_handle - Handle operations for ntuple filter.
5357  * @dev: pointer to rte_eth_dev structure
5358  * @filter_op:operation will be taken.
5359  * @arg: a pointer to specific structure corresponding to the filter_op
5360  *
5361  * @return
5362  *    - On success, zero.
5363  *    - On failure, a negative value.
5364  */
5365 static int
5366 ixgbe_ntuple_filter_handle(struct rte_eth_dev *dev,
5367                                 enum rte_filter_op filter_op,
5368                                 void *arg)
5369 {
5370         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5371         int ret;
5372
5373         MAC_TYPE_FILTER_SUP_EXT(hw->mac.type);
5374
5375         if (filter_op == RTE_ETH_FILTER_NOP)
5376                 return 0;
5377
5378         if (arg == NULL) {
5379                 PMD_DRV_LOG(ERR, "arg shouldn't be NULL for operation %u.",
5380                             filter_op);
5381                 return -EINVAL;
5382         }
5383
5384         switch (filter_op) {
5385         case RTE_ETH_FILTER_ADD:
5386                 ret = ixgbe_add_del_ntuple_filter(dev,
5387                         (struct rte_eth_ntuple_filter *)arg,
5388                         TRUE);
5389                 break;
5390         case RTE_ETH_FILTER_DELETE:
5391                 ret = ixgbe_add_del_ntuple_filter(dev,
5392                         (struct rte_eth_ntuple_filter *)arg,
5393                         FALSE);
5394                 break;
5395         case RTE_ETH_FILTER_GET:
5396                 ret = ixgbe_get_ntuple_filter(dev,
5397                         (struct rte_eth_ntuple_filter *)arg);
5398                 break;
5399         default:
5400                 PMD_DRV_LOG(ERR, "unsupported operation %u.", filter_op);
5401                 ret = -EINVAL;
5402                 break;
5403         }
5404         return ret;
5405 }
5406
5407 static inline int
5408 ixgbe_ethertype_filter_lookup(struct ixgbe_filter_info *filter_info,
5409                         uint16_t ethertype)
5410 {
5411         int i;
5412
5413         for (i = 0; i < IXGBE_MAX_ETQF_FILTERS; i++) {
5414                 if (filter_info->ethertype_filters[i] == ethertype &&
5415                     (filter_info->ethertype_mask & (1 << i)))
5416                         return i;
5417         }
5418         return -1;
5419 }
5420
5421 static inline int
5422 ixgbe_ethertype_filter_insert(struct ixgbe_filter_info *filter_info,
5423                         uint16_t ethertype)
5424 {
5425         int i;
5426
5427         for (i = 0; i < IXGBE_MAX_ETQF_FILTERS; i++) {
5428                 if (!(filter_info->ethertype_mask & (1 << i))) {
5429                         filter_info->ethertype_mask |= 1 << i;
5430                         filter_info->ethertype_filters[i] = ethertype;
5431                         return i;
5432                 }
5433         }
5434         return -1;
5435 }
5436
5437 static inline int
5438 ixgbe_ethertype_filter_remove(struct ixgbe_filter_info *filter_info,
5439                         uint8_t idx)
5440 {
5441         if (idx >= IXGBE_MAX_ETQF_FILTERS)
5442                 return -1;
5443         filter_info->ethertype_mask &= ~(1 << idx);
5444         filter_info->ethertype_filters[idx] = 0;
5445         return idx;
5446 }
5447
5448 static int
5449 ixgbe_add_del_ethertype_filter(struct rte_eth_dev *dev,
5450                         struct rte_eth_ethertype_filter *filter,
5451                         bool add)
5452 {
5453         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5454         struct ixgbe_filter_info *filter_info =
5455                 IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
5456         uint32_t etqf = 0;
5457         uint32_t etqs = 0;
5458         int ret;
5459
5460         if (filter->queue >= IXGBE_MAX_RX_QUEUE_NUM)
5461                 return -EINVAL;
5462
5463         if (filter->ether_type == ETHER_TYPE_IPv4 ||
5464                 filter->ether_type == ETHER_TYPE_IPv6) {
5465                 PMD_DRV_LOG(ERR, "unsupported ether_type(0x%04x) in"
5466                         " ethertype filter.", filter->ether_type);
5467                 return -EINVAL;
5468         }
5469
5470         if (filter->flags & RTE_ETHTYPE_FLAGS_MAC) {
5471                 PMD_DRV_LOG(ERR, "mac compare is unsupported.");
5472                 return -EINVAL;
5473         }
5474         if (filter->flags & RTE_ETHTYPE_FLAGS_DROP) {
5475                 PMD_DRV_LOG(ERR, "drop option is unsupported.");
5476                 return -EINVAL;
5477         }
5478
5479         ret = ixgbe_ethertype_filter_lookup(filter_info, filter->ether_type);
5480         if (ret >= 0 && add) {
5481                 PMD_DRV_LOG(ERR, "ethertype (0x%04x) filter exists.",
5482                             filter->ether_type);
5483                 return -EEXIST;
5484         }
5485         if (ret < 0 && !add) {
5486                 PMD_DRV_LOG(ERR, "ethertype (0x%04x) filter doesn't exist.",
5487                             filter->ether_type);
5488                 return -ENOENT;
5489         }
5490
5491         if (add) {
5492                 ret = ixgbe_ethertype_filter_insert(filter_info,
5493                         filter->ether_type);
5494                 if (ret < 0) {
5495                         PMD_DRV_LOG(ERR, "ethertype filters are full.");
5496                         return -ENOSYS;
5497                 }
5498                 etqf = IXGBE_ETQF_FILTER_EN;
5499                 etqf |= (uint32_t)filter->ether_type;
5500                 etqs |= (uint32_t)((filter->queue <<
5501                                     IXGBE_ETQS_RX_QUEUE_SHIFT) &
5502                                     IXGBE_ETQS_RX_QUEUE);
5503                 etqs |= IXGBE_ETQS_QUEUE_EN;
5504         } else {
5505                 ret = ixgbe_ethertype_filter_remove(filter_info, (uint8_t)ret);
5506                 if (ret < 0)
5507                         return -ENOSYS;
5508         }
5509         IXGBE_WRITE_REG(hw, IXGBE_ETQF(ret), etqf);
5510         IXGBE_WRITE_REG(hw, IXGBE_ETQS(ret), etqs);
5511         IXGBE_WRITE_FLUSH(hw);
5512
5513         return 0;
5514 }
5515
5516 static int
5517 ixgbe_get_ethertype_filter(struct rte_eth_dev *dev,
5518                         struct rte_eth_ethertype_filter *filter)
5519 {
5520         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5521         struct ixgbe_filter_info *filter_info =
5522                 IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
5523         uint32_t etqf, etqs;
5524         int ret;
5525
5526         ret = ixgbe_ethertype_filter_lookup(filter_info, filter->ether_type);
5527         if (ret < 0) {
5528                 PMD_DRV_LOG(ERR, "ethertype (0x%04x) filter doesn't exist.",
5529                             filter->ether_type);
5530                 return -ENOENT;
5531         }
5532
5533         etqf = IXGBE_READ_REG(hw, IXGBE_ETQF(ret));
5534         if (etqf & IXGBE_ETQF_FILTER_EN) {
5535                 etqs = IXGBE_READ_REG(hw, IXGBE_ETQS(ret));
5536                 filter->ether_type = etqf & IXGBE_ETQF_ETHERTYPE;
5537                 filter->flags = 0;
5538                 filter->queue = (etqs & IXGBE_ETQS_RX_QUEUE) >>
5539                                IXGBE_ETQS_RX_QUEUE_SHIFT;
5540                 return 0;
5541         }
5542         return -ENOENT;
5543 }
5544
5545 /*
5546  * ixgbe_ethertype_filter_handle - Handle operations for ethertype filter.
5547  * @dev: pointer to rte_eth_dev structure
5548  * @filter_op:operation will be taken.
5549  * @arg: a pointer to specific structure corresponding to the filter_op
5550  */
5551 static int
5552 ixgbe_ethertype_filter_handle(struct rte_eth_dev *dev,
5553                                 enum rte_filter_op filter_op,
5554                                 void *arg)
5555 {
5556         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5557         int ret;
5558
5559         MAC_TYPE_FILTER_SUP(hw->mac.type);
5560
5561         if (filter_op == RTE_ETH_FILTER_NOP)
5562                 return 0;
5563
5564         if (arg == NULL) {
5565                 PMD_DRV_LOG(ERR, "arg shouldn't be NULL for operation %u.",
5566                             filter_op);
5567                 return -EINVAL;
5568         }
5569
5570         switch (filter_op) {
5571         case RTE_ETH_FILTER_ADD:
5572                 ret = ixgbe_add_del_ethertype_filter(dev,
5573                         (struct rte_eth_ethertype_filter *)arg,
5574                         TRUE);
5575                 break;
5576         case RTE_ETH_FILTER_DELETE:
5577                 ret = ixgbe_add_del_ethertype_filter(dev,
5578                         (struct rte_eth_ethertype_filter *)arg,
5579                         FALSE);
5580                 break;
5581         case RTE_ETH_FILTER_GET:
5582                 ret = ixgbe_get_ethertype_filter(dev,
5583                         (struct rte_eth_ethertype_filter *)arg);
5584                 break;
5585         default:
5586                 PMD_DRV_LOG(ERR, "unsupported operation %u.", filter_op);
5587                 ret = -EINVAL;
5588                 break;
5589         }
5590         return ret;
5591 }
5592
5593 static int
5594 ixgbe_dev_filter_ctrl(struct rte_eth_dev *dev,
5595                      enum rte_filter_type filter_type,
5596                      enum rte_filter_op filter_op,
5597                      void *arg)
5598 {
5599         int ret = -EINVAL;
5600
5601         switch (filter_type) {
5602         case RTE_ETH_FILTER_NTUPLE:
5603                 ret = ixgbe_ntuple_filter_handle(dev, filter_op, arg);
5604                 break;
5605         case RTE_ETH_FILTER_ETHERTYPE:
5606                 ret = ixgbe_ethertype_filter_handle(dev, filter_op, arg);
5607                 break;
5608         case RTE_ETH_FILTER_SYN:
5609                 ret = ixgbe_syn_filter_handle(dev, filter_op, arg);
5610                 break;
5611         case RTE_ETH_FILTER_FDIR:
5612                 ret = ixgbe_fdir_ctrl_func(dev, filter_op, arg);
5613                 break;
5614         default:
5615                 PMD_DRV_LOG(WARNING, "Filter type (%d) not supported",
5616                                                         filter_type);
5617                 break;
5618         }
5619
5620         return ret;
5621 }
5622
5623 static u8 *
5624 ixgbe_dev_addr_list_itr(__attribute__((unused)) struct ixgbe_hw *hw,
5625                         u8 **mc_addr_ptr, u32 *vmdq)
5626 {
5627         u8 *mc_addr;
5628
5629         *vmdq = 0;
5630         mc_addr = *mc_addr_ptr;
5631         *mc_addr_ptr = (mc_addr + sizeof(struct ether_addr));
5632         return mc_addr;
5633 }
5634
5635 static int
5636 ixgbe_dev_set_mc_addr_list(struct rte_eth_dev *dev,
5637                           struct ether_addr *mc_addr_set,
5638                           uint32_t nb_mc_addr)
5639 {
5640         struct ixgbe_hw *hw;
5641         u8 *mc_addr_list;
5642
5643         hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5644         mc_addr_list = (u8 *)mc_addr_set;
5645         return ixgbe_update_mc_addr_list(hw, mc_addr_list, nb_mc_addr,
5646                                          ixgbe_dev_addr_list_itr, TRUE);
5647 }
5648
5649 static uint64_t
5650 ixgbe_read_systime_cyclecounter(struct rte_eth_dev *dev)
5651 {
5652         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5653         uint64_t systime_cycles;
5654
5655         switch (hw->mac.type) {
5656         case ixgbe_mac_X550:
5657                 /* SYSTIMEL stores ns and SYSTIMEH stores seconds. */
5658                 systime_cycles = (uint64_t)IXGBE_READ_REG(hw, IXGBE_SYSTIML);
5659                 systime_cycles += (uint64_t)IXGBE_READ_REG(hw, IXGBE_SYSTIMH)
5660                                 * NSEC_PER_SEC;
5661                 break;
5662         default:
5663                 systime_cycles = (uint64_t)IXGBE_READ_REG(hw, IXGBE_SYSTIML);
5664                 systime_cycles |= (uint64_t)IXGBE_READ_REG(hw, IXGBE_SYSTIMH)
5665                                 << 32;
5666         }
5667
5668         return systime_cycles;
5669 }
5670
5671 static uint64_t
5672 ixgbe_read_rx_tstamp_cyclecounter(struct rte_eth_dev *dev)
5673 {
5674         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5675         uint64_t rx_tstamp_cycles;
5676
5677         switch (hw->mac.type) {
5678         case ixgbe_mac_X550:
5679                 /* RXSTMPL stores ns and RXSTMPH stores seconds. */
5680                 rx_tstamp_cycles = (uint64_t)IXGBE_READ_REG(hw, IXGBE_RXSTMPL);
5681                 rx_tstamp_cycles += (uint64_t)IXGBE_READ_REG(hw, IXGBE_RXSTMPH)
5682                                 * NSEC_PER_SEC;
5683                 break;
5684         default:
5685                 /* RXSTMPL stores ns and RXSTMPH stores seconds. */
5686                 rx_tstamp_cycles = (uint64_t)IXGBE_READ_REG(hw, IXGBE_RXSTMPL);
5687                 rx_tstamp_cycles |= (uint64_t)IXGBE_READ_REG(hw, IXGBE_RXSTMPH)
5688                                 << 32;
5689         }
5690
5691         return rx_tstamp_cycles;
5692 }
5693
5694 static uint64_t
5695 ixgbe_read_tx_tstamp_cyclecounter(struct rte_eth_dev *dev)
5696 {
5697         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5698         uint64_t tx_tstamp_cycles;
5699
5700         switch (hw->mac.type) {
5701         case ixgbe_mac_X550:
5702                 /* TXSTMPL stores ns and TXSTMPH stores seconds. */
5703                 tx_tstamp_cycles = (uint64_t)IXGBE_READ_REG(hw, IXGBE_TXSTMPL);
5704                 tx_tstamp_cycles += (uint64_t)IXGBE_READ_REG(hw, IXGBE_TXSTMPH)
5705                                 * NSEC_PER_SEC;
5706                 break;
5707         default:
5708                 /* TXSTMPL stores ns and TXSTMPH stores seconds. */
5709                 tx_tstamp_cycles = (uint64_t)IXGBE_READ_REG(hw, IXGBE_TXSTMPL);
5710                 tx_tstamp_cycles |= (uint64_t)IXGBE_READ_REG(hw, IXGBE_TXSTMPH)
5711                                 << 32;
5712         }
5713
5714         return tx_tstamp_cycles;
5715 }
5716
5717 static void
5718 ixgbe_start_timecounters(struct rte_eth_dev *dev)
5719 {
5720         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5721         struct ixgbe_adapter *adapter =
5722                 (struct ixgbe_adapter *)dev->data->dev_private;
5723         struct rte_eth_link link;
5724         uint32_t incval = 0;
5725         uint32_t shift = 0;
5726
5727         /* Get current link speed. */
5728         memset(&link, 0, sizeof(link));
5729         ixgbe_dev_link_update(dev, 1);
5730         rte_ixgbe_dev_atomic_read_link_status(dev, &link);
5731
5732         switch (link.link_speed) {
5733         case ETH_LINK_SPEED_100:
5734                 incval = IXGBE_INCVAL_100;
5735                 shift = IXGBE_INCVAL_SHIFT_100;
5736                 break;
5737         case ETH_LINK_SPEED_1000:
5738                 incval = IXGBE_INCVAL_1GB;
5739                 shift = IXGBE_INCVAL_SHIFT_1GB;
5740                 break;
5741         case ETH_LINK_SPEED_10000:
5742         default:
5743                 incval = IXGBE_INCVAL_10GB;
5744                 shift = IXGBE_INCVAL_SHIFT_10GB;
5745                 break;
5746         }
5747
5748         switch (hw->mac.type) {
5749         case ixgbe_mac_X550:
5750                 /* Independent of link speed. */
5751                 incval = 1;
5752                 /* Cycles read will be interpreted as ns. */
5753                 shift = 0;
5754                 /* Fall-through */
5755         case ixgbe_mac_X540:
5756                 IXGBE_WRITE_REG(hw, IXGBE_TIMINCA, incval);
5757                 break;
5758         case ixgbe_mac_82599EB:
5759                 incval >>= IXGBE_INCVAL_SHIFT_82599;
5760                 shift -= IXGBE_INCVAL_SHIFT_82599;
5761                 IXGBE_WRITE_REG(hw, IXGBE_TIMINCA,
5762                                 (1 << IXGBE_INCPER_SHIFT_82599) | incval);
5763                 break;
5764         default:
5765                 /* Not supported. */
5766                 return;
5767         }
5768
5769         memset(&adapter->systime_tc, 0, sizeof(struct rte_timecounter));
5770         memset(&adapter->rx_tstamp_tc, 0, sizeof(struct rte_timecounter));
5771         memset(&adapter->tx_tstamp_tc, 0, sizeof(struct rte_timecounter));
5772
5773         adapter->systime_tc.cc_mask = IXGBE_CYCLECOUNTER_MASK;
5774         adapter->systime_tc.cc_shift = shift;
5775         adapter->systime_tc.nsec_mask = (1ULL << shift) - 1;
5776
5777         adapter->rx_tstamp_tc.cc_mask = IXGBE_CYCLECOUNTER_MASK;
5778         adapter->rx_tstamp_tc.cc_shift = shift;
5779         adapter->rx_tstamp_tc.nsec_mask = (1ULL << shift) - 1;
5780
5781         adapter->tx_tstamp_tc.cc_mask = IXGBE_CYCLECOUNTER_MASK;
5782         adapter->tx_tstamp_tc.cc_shift = shift;
5783         adapter->tx_tstamp_tc.nsec_mask = (1ULL << shift) - 1;
5784 }
5785
5786 static int
5787 ixgbe_timesync_adjust_time(struct rte_eth_dev *dev, int64_t delta)
5788 {
5789         struct ixgbe_adapter *adapter =
5790                         (struct ixgbe_adapter *)dev->data->dev_private;
5791
5792         adapter->systime_tc.nsec += delta;
5793         adapter->rx_tstamp_tc.nsec += delta;
5794         adapter->tx_tstamp_tc.nsec += delta;
5795
5796         return 0;
5797 }
5798
5799 static int
5800 ixgbe_timesync_write_time(struct rte_eth_dev *dev, const struct timespec *ts)
5801 {
5802         uint64_t ns;
5803         struct ixgbe_adapter *adapter =
5804                         (struct ixgbe_adapter *)dev->data->dev_private;
5805
5806         ns = rte_timespec_to_ns(ts);
5807         /* Set the timecounters to a new value. */
5808         adapter->systime_tc.nsec = ns;
5809         adapter->rx_tstamp_tc.nsec = ns;
5810         adapter->tx_tstamp_tc.nsec = ns;
5811
5812         return 0;
5813 }
5814
5815 static int
5816 ixgbe_timesync_read_time(struct rte_eth_dev *dev, struct timespec *ts)
5817 {
5818         uint64_t ns, systime_cycles;
5819         struct ixgbe_adapter *adapter =
5820                         (struct ixgbe_adapter *)dev->data->dev_private;
5821
5822         systime_cycles = ixgbe_read_systime_cyclecounter(dev);
5823         ns = rte_timecounter_update(&adapter->systime_tc, systime_cycles);
5824         *ts = rte_ns_to_timespec(ns);
5825
5826         return 0;
5827 }
5828
5829 static int
5830 ixgbe_timesync_enable(struct rte_eth_dev *dev)
5831 {
5832         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5833         uint32_t tsync_ctl;
5834         uint32_t tsauxc;
5835
5836         /* Stop the timesync system time. */
5837         IXGBE_WRITE_REG(hw, IXGBE_TIMINCA, 0x0);
5838         /* Reset the timesync system time value. */
5839         IXGBE_WRITE_REG(hw, IXGBE_SYSTIML, 0x0);
5840         IXGBE_WRITE_REG(hw, IXGBE_SYSTIMH, 0x0);
5841
5842         /* Enable system time for platforms where it isn't on by default. */
5843         tsauxc = IXGBE_READ_REG(hw, IXGBE_TSAUXC);
5844         tsauxc &= ~IXGBE_TSAUXC_DISABLE_SYSTIME;
5845         IXGBE_WRITE_REG(hw, IXGBE_TSAUXC, tsauxc);
5846
5847         ixgbe_start_timecounters(dev);
5848
5849         /* Enable L2 filtering of IEEE1588/802.1AS Ethernet frame types. */
5850         IXGBE_WRITE_REG(hw, IXGBE_ETQF(IXGBE_ETQF_FILTER_1588),
5851                         (ETHER_TYPE_1588 |
5852                          IXGBE_ETQF_FILTER_EN |
5853                          IXGBE_ETQF_1588));
5854
5855         /* Enable timestamping of received PTP packets. */
5856         tsync_ctl = IXGBE_READ_REG(hw, IXGBE_TSYNCRXCTL);
5857         tsync_ctl |= IXGBE_TSYNCRXCTL_ENABLED;
5858         IXGBE_WRITE_REG(hw, IXGBE_TSYNCRXCTL, tsync_ctl);
5859
5860         /* Enable timestamping of transmitted PTP packets. */
5861         tsync_ctl = IXGBE_READ_REG(hw, IXGBE_TSYNCTXCTL);
5862         tsync_ctl |= IXGBE_TSYNCTXCTL_ENABLED;
5863         IXGBE_WRITE_REG(hw, IXGBE_TSYNCTXCTL, tsync_ctl);
5864
5865         IXGBE_WRITE_FLUSH(hw);
5866
5867         return 0;
5868 }
5869
5870 static int
5871 ixgbe_timesync_disable(struct rte_eth_dev *dev)
5872 {
5873         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5874         uint32_t tsync_ctl;
5875
5876         /* Disable timestamping of transmitted PTP packets. */
5877         tsync_ctl = IXGBE_READ_REG(hw, IXGBE_TSYNCTXCTL);
5878         tsync_ctl &= ~IXGBE_TSYNCTXCTL_ENABLED;
5879         IXGBE_WRITE_REG(hw, IXGBE_TSYNCTXCTL, tsync_ctl);
5880
5881         /* Disable timestamping of received PTP packets. */
5882         tsync_ctl = IXGBE_READ_REG(hw, IXGBE_TSYNCRXCTL);
5883         tsync_ctl &= ~IXGBE_TSYNCRXCTL_ENABLED;
5884         IXGBE_WRITE_REG(hw, IXGBE_TSYNCRXCTL, tsync_ctl);
5885
5886         /* Disable L2 filtering of IEEE1588/802.1AS Ethernet frame types. */
5887         IXGBE_WRITE_REG(hw, IXGBE_ETQF(IXGBE_ETQF_FILTER_1588), 0);
5888
5889         /* Stop incrementating the System Time registers. */
5890         IXGBE_WRITE_REG(hw, IXGBE_TIMINCA, 0);
5891
5892         return 0;
5893 }
5894
5895 static int
5896 ixgbe_timesync_read_rx_timestamp(struct rte_eth_dev *dev,
5897                                  struct timespec *timestamp,
5898                                  uint32_t flags __rte_unused)
5899 {
5900         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5901         struct ixgbe_adapter *adapter =
5902                 (struct ixgbe_adapter *)dev->data->dev_private;
5903         uint32_t tsync_rxctl;
5904         uint64_t rx_tstamp_cycles;
5905         uint64_t ns;
5906
5907         tsync_rxctl = IXGBE_READ_REG(hw, IXGBE_TSYNCRXCTL);
5908         if ((tsync_rxctl & IXGBE_TSYNCRXCTL_VALID) == 0)
5909                 return -EINVAL;
5910
5911         rx_tstamp_cycles = ixgbe_read_rx_tstamp_cyclecounter(dev);
5912         ns = rte_timecounter_update(&adapter->rx_tstamp_tc, rx_tstamp_cycles);
5913         *timestamp = rte_ns_to_timespec(ns);
5914
5915         return  0;
5916 }
5917
5918 static int
5919 ixgbe_timesync_read_tx_timestamp(struct rte_eth_dev *dev,
5920                                  struct timespec *timestamp)
5921 {
5922         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5923         struct ixgbe_adapter *adapter =
5924                 (struct ixgbe_adapter *)dev->data->dev_private;
5925         uint32_t tsync_txctl;
5926         uint64_t tx_tstamp_cycles;
5927         uint64_t ns;
5928
5929         tsync_txctl = IXGBE_READ_REG(hw, IXGBE_TSYNCTXCTL);
5930         if ((tsync_txctl & IXGBE_TSYNCTXCTL_VALID) == 0)
5931                 return -EINVAL;
5932
5933         tx_tstamp_cycles = ixgbe_read_tx_tstamp_cyclecounter(dev);
5934         ns = rte_timecounter_update(&adapter->tx_tstamp_tc, tx_tstamp_cycles);
5935         *timestamp = rte_ns_to_timespec(ns);
5936
5937         return 0;
5938 }
5939
5940 static int
5941 ixgbe_get_reg_length(struct rte_eth_dev *dev)
5942 {
5943         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5944         int count = 0;
5945         int g_ind = 0;
5946         const struct reg_info *reg_group;
5947         const struct reg_info **reg_set = (hw->mac.type == ixgbe_mac_82598EB) ?
5948                                     ixgbe_regs_mac_82598EB : ixgbe_regs_others;
5949
5950         while ((reg_group = reg_set[g_ind++]))
5951                 count += ixgbe_regs_group_count(reg_group);
5952
5953         return count;
5954 }
5955
5956 static int
5957 ixgbevf_get_reg_length(struct rte_eth_dev *dev __rte_unused)
5958 {
5959         int count = 0;
5960         int g_ind = 0;
5961         const struct reg_info *reg_group;
5962
5963         while ((reg_group = ixgbevf_regs[g_ind++]))
5964                 count += ixgbe_regs_group_count(reg_group);
5965
5966         return count;
5967 }
5968
5969 static int
5970 ixgbe_get_regs(struct rte_eth_dev *dev,
5971               struct rte_dev_reg_info *regs)
5972 {
5973         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5974         uint32_t *data = regs->data;
5975         int g_ind = 0;
5976         int count = 0;
5977         const struct reg_info *reg_group;
5978         const struct reg_info **reg_set = (hw->mac.type == ixgbe_mac_82598EB) ?
5979                                     ixgbe_regs_mac_82598EB : ixgbe_regs_others;
5980
5981         /* Support only full register dump */
5982         if ((regs->length == 0) ||
5983             (regs->length == (uint32_t)ixgbe_get_reg_length(dev))) {
5984                 regs->version = hw->mac.type << 24 | hw->revision_id << 16 |
5985                         hw->device_id;
5986                 while ((reg_group = reg_set[g_ind++]))
5987                         count += ixgbe_read_regs_group(dev, &data[count],
5988                                 reg_group);
5989                 return 0;
5990         }
5991
5992         return -ENOTSUP;
5993 }
5994
5995 static int
5996 ixgbevf_get_regs(struct rte_eth_dev *dev,
5997                 struct rte_dev_reg_info *regs)
5998 {
5999         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
6000         uint32_t *data = regs->data;
6001         int g_ind = 0;
6002         int count = 0;
6003         const struct reg_info *reg_group;
6004
6005         /* Support only full register dump */
6006         if ((regs->length == 0) ||
6007             (regs->length == (uint32_t)ixgbevf_get_reg_length(dev))) {
6008                 regs->version = hw->mac.type << 24 | hw->revision_id << 16 |
6009                         hw->device_id;
6010                 while ((reg_group = ixgbevf_regs[g_ind++]))
6011                         count += ixgbe_read_regs_group(dev, &data[count],
6012                                                       reg_group);
6013                 return 0;
6014         }
6015
6016         return -ENOTSUP;
6017 }
6018
6019 static int
6020 ixgbe_get_eeprom_length(struct rte_eth_dev *dev)
6021 {
6022         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
6023
6024         /* Return unit is byte count */
6025         return hw->eeprom.word_size * 2;
6026 }
6027
6028 static int
6029 ixgbe_get_eeprom(struct rte_eth_dev *dev,
6030                 struct rte_dev_eeprom_info *in_eeprom)
6031 {
6032         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
6033         struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
6034         uint16_t *data = in_eeprom->data;
6035         int first, length;
6036
6037         first = in_eeprom->offset >> 1;
6038         length = in_eeprom->length >> 1;
6039         if ((first > hw->eeprom.word_size) ||
6040             ((first + length) > hw->eeprom.word_size))
6041                 return -EINVAL;
6042
6043         in_eeprom->magic = hw->vendor_id | (hw->device_id << 16);
6044
6045         return eeprom->ops.read_buffer(hw, first, length, data);
6046 }
6047
6048 static int
6049 ixgbe_set_eeprom(struct rte_eth_dev *dev,
6050                 struct rte_dev_eeprom_info *in_eeprom)
6051 {
6052         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
6053         struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
6054         uint16_t *data = in_eeprom->data;
6055         int first, length;
6056
6057         first = in_eeprom->offset >> 1;
6058         length = in_eeprom->length >> 1;
6059         if ((first > hw->eeprom.word_size) ||
6060             ((first + length) > hw->eeprom.word_size))
6061                 return -EINVAL;
6062
6063         in_eeprom->magic = hw->vendor_id | (hw->device_id << 16);
6064
6065         return eeprom->ops.write_buffer(hw,  first, length, data);
6066 }
6067
6068 uint16_t
6069 ixgbe_reta_size_get(enum ixgbe_mac_type mac_type) {
6070         switch (mac_type) {
6071         case ixgbe_mac_X550:
6072         case ixgbe_mac_X550EM_x:
6073                 return ETH_RSS_RETA_SIZE_512;
6074         case ixgbe_mac_X550_vf:
6075         case ixgbe_mac_X550EM_x_vf:
6076                 return ETH_RSS_RETA_SIZE_64;
6077         default:
6078                 return ETH_RSS_RETA_SIZE_128;
6079         }
6080 }
6081
6082 uint32_t
6083 ixgbe_reta_reg_get(enum ixgbe_mac_type mac_type, uint16_t reta_idx) {
6084         switch (mac_type) {
6085         case ixgbe_mac_X550:
6086         case ixgbe_mac_X550EM_x:
6087                 if (reta_idx < ETH_RSS_RETA_SIZE_128)
6088                         return IXGBE_RETA(reta_idx >> 2);
6089                 else
6090                         return IXGBE_ERETA((reta_idx - ETH_RSS_RETA_SIZE_128) >> 2);
6091         case ixgbe_mac_X550_vf:
6092         case ixgbe_mac_X550EM_x_vf:
6093                 return IXGBE_VFRETA(reta_idx >> 2);
6094         default:
6095                 return IXGBE_RETA(reta_idx >> 2);
6096         }
6097 }
6098
6099 uint32_t
6100 ixgbe_mrqc_reg_get(enum ixgbe_mac_type mac_type) {
6101         switch (mac_type) {
6102         case ixgbe_mac_X550_vf:
6103         case ixgbe_mac_X550EM_x_vf:
6104                 return IXGBE_VFMRQC;
6105         default:
6106                 return IXGBE_MRQC;
6107         }
6108 }
6109
6110 uint32_t
6111 ixgbe_rssrk_reg_get(enum ixgbe_mac_type mac_type, uint8_t i) {
6112         switch (mac_type) {
6113         case ixgbe_mac_X550_vf:
6114         case ixgbe_mac_X550EM_x_vf:
6115                 return IXGBE_VFRSSRK(i);
6116         default:
6117                 return IXGBE_RSSRK(i);
6118         }
6119 }
6120
6121 bool
6122 ixgbe_rss_update_sp(enum ixgbe_mac_type mac_type) {
6123         switch (mac_type) {
6124         case ixgbe_mac_82599_vf:
6125         case ixgbe_mac_X540_vf:
6126                 return 0;
6127         default:
6128                 return 1;
6129         }
6130 }
6131
6132 static int
6133 ixgbe_dev_get_dcb_info(struct rte_eth_dev *dev,
6134                         struct rte_eth_dcb_info *dcb_info)
6135 {
6136         struct ixgbe_dcb_config *dcb_config =
6137                         IXGBE_DEV_PRIVATE_TO_DCB_CFG(dev->data->dev_private);
6138         struct ixgbe_dcb_tc_config *tc;
6139         uint8_t i, j;
6140
6141         if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_DCB_FLAG)
6142                 dcb_info->nb_tcs = dcb_config->num_tcs.pg_tcs;
6143         else
6144                 dcb_info->nb_tcs = 1;
6145
6146         if (dcb_config->vt_mode) { /* vt is enabled*/
6147                 struct rte_eth_vmdq_dcb_conf *vmdq_rx_conf =
6148                                 &dev->data->dev_conf.rx_adv_conf.vmdq_dcb_conf;
6149                 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++)
6150                         dcb_info->prio_tc[i] = vmdq_rx_conf->dcb_tc[i];
6151                 for (i = 0; i < vmdq_rx_conf->nb_queue_pools; i++) {
6152                         for (j = 0; j < dcb_info->nb_tcs; j++) {
6153                                 dcb_info->tc_queue.tc_rxq[i][j].base =
6154                                                 i * dcb_info->nb_tcs + j;
6155                                 dcb_info->tc_queue.tc_rxq[i][j].nb_queue = 1;
6156                                 dcb_info->tc_queue.tc_txq[i][j].base =
6157                                                 i * dcb_info->nb_tcs + j;
6158                                 dcb_info->tc_queue.tc_txq[i][j].nb_queue = 1;
6159                         }
6160                 }
6161         } else { /* vt is disabled*/
6162                 struct rte_eth_dcb_rx_conf *rx_conf =
6163                                 &dev->data->dev_conf.rx_adv_conf.dcb_rx_conf;
6164                 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++)
6165                         dcb_info->prio_tc[i] = rx_conf->dcb_tc[i];
6166                 if (dcb_info->nb_tcs == ETH_4_TCS) {
6167                         for (i = 0; i < dcb_info->nb_tcs; i++) {
6168                                 dcb_info->tc_queue.tc_rxq[0][i].base = i * 32;
6169                                 dcb_info->tc_queue.tc_rxq[0][i].nb_queue = 16;
6170                         }
6171                         dcb_info->tc_queue.tc_txq[0][0].base = 0;
6172                         dcb_info->tc_queue.tc_txq[0][1].base = 64;
6173                         dcb_info->tc_queue.tc_txq[0][2].base = 96;
6174                         dcb_info->tc_queue.tc_txq[0][3].base = 112;
6175                         dcb_info->tc_queue.tc_txq[0][0].nb_queue = 64;
6176                         dcb_info->tc_queue.tc_txq[0][1].nb_queue = 32;
6177                         dcb_info->tc_queue.tc_txq[0][2].nb_queue = 16;
6178                         dcb_info->tc_queue.tc_txq[0][3].nb_queue = 16;
6179                 } else if (dcb_info->nb_tcs == ETH_8_TCS) {
6180                         for (i = 0; i < dcb_info->nb_tcs; i++) {
6181                                 dcb_info->tc_queue.tc_rxq[0][i].base = i * 16;
6182                                 dcb_info->tc_queue.tc_rxq[0][i].nb_queue = 16;
6183                         }
6184                         dcb_info->tc_queue.tc_txq[0][0].base = 0;
6185                         dcb_info->tc_queue.tc_txq[0][1].base = 32;
6186                         dcb_info->tc_queue.tc_txq[0][2].base = 64;
6187                         dcb_info->tc_queue.tc_txq[0][3].base = 80;
6188                         dcb_info->tc_queue.tc_txq[0][4].base = 96;
6189                         dcb_info->tc_queue.tc_txq[0][5].base = 104;
6190                         dcb_info->tc_queue.tc_txq[0][6].base = 112;
6191                         dcb_info->tc_queue.tc_txq[0][7].base = 120;
6192                         dcb_info->tc_queue.tc_txq[0][0].nb_queue = 32;
6193                         dcb_info->tc_queue.tc_txq[0][1].nb_queue = 32;
6194                         dcb_info->tc_queue.tc_txq[0][2].nb_queue = 16;
6195                         dcb_info->tc_queue.tc_txq[0][3].nb_queue = 16;
6196                         dcb_info->tc_queue.tc_txq[0][4].nb_queue = 8;
6197                         dcb_info->tc_queue.tc_txq[0][5].nb_queue = 8;
6198                         dcb_info->tc_queue.tc_txq[0][6].nb_queue = 8;
6199                         dcb_info->tc_queue.tc_txq[0][7].nb_queue = 8;
6200                 }
6201         }
6202         for (i = 0; i < dcb_info->nb_tcs; i++) {
6203                 tc = &dcb_config->tc_config[i];
6204                 dcb_info->tc_bws[i] = tc->path[IXGBE_DCB_TX_CONFIG].bwg_percent;
6205         }
6206         return 0;
6207 }
6208
6209 static struct rte_driver rte_ixgbe_driver = {
6210         .type = PMD_PDEV,
6211         .init = rte_ixgbe_pmd_init,
6212 };
6213
6214 static struct rte_driver rte_ixgbevf_driver = {
6215         .type = PMD_PDEV,
6216         .init = rte_ixgbevf_pmd_init,
6217 };
6218
6219 PMD_REGISTER_DRIVER(rte_ixgbe_driver);
6220 PMD_REGISTER_DRIVER(rte_ixgbevf_driver);