ixgbe: fix Rx errors statistics for UDP checksum
[dpdk.git] / drivers / net / ixgbe / ixgbe_ethdev.c
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33
34 #include <sys/queue.h>
35 #include <stdio.h>
36 #include <errno.h>
37 #include <stdint.h>
38 #include <string.h>
39 #include <unistd.h>
40 #include <stdarg.h>
41 #include <inttypes.h>
42 #include <netinet/in.h>
43 #include <rte_byteorder.h>
44 #include <rte_common.h>
45 #include <rte_cycles.h>
46
47 #include <rte_interrupts.h>
48 #include <rte_log.h>
49 #include <rte_debug.h>
50 #include <rte_pci.h>
51 #include <rte_atomic.h>
52 #include <rte_branch_prediction.h>
53 #include <rte_memory.h>
54 #include <rte_memzone.h>
55 #include <rte_eal.h>
56 #include <rte_alarm.h>
57 #include <rte_ether.h>
58 #include <rte_ethdev.h>
59 #include <rte_atomic.h>
60 #include <rte_malloc.h>
61 #include <rte_random.h>
62 #include <rte_dev.h>
63
64 #include "ixgbe_logs.h"
65 #include "base/ixgbe_api.h"
66 #include "base/ixgbe_vf.h"
67 #include "base/ixgbe_common.h"
68 #include "ixgbe_ethdev.h"
69 #include "ixgbe_bypass.h"
70 #include "ixgbe_rxtx.h"
71 #include "base/ixgbe_type.h"
72 #include "base/ixgbe_phy.h"
73 #include "ixgbe_regs.h"
74
75 /*
76  * High threshold controlling when to start sending XOFF frames. Must be at
77  * least 8 bytes less than receive packet buffer size. This value is in units
78  * of 1024 bytes.
79  */
80 #define IXGBE_FC_HI    0x80
81
82 /*
83  * Low threshold controlling when to start sending XON frames. This value is
84  * in units of 1024 bytes.
85  */
86 #define IXGBE_FC_LO    0x40
87
88 /* Default minimum inter-interrupt interval for EITR configuration */
89 #define IXGBE_MIN_INTER_INTERRUPT_INTERVAL_DEFAULT    0x79E
90
91 /* Timer value included in XOFF frames. */
92 #define IXGBE_FC_PAUSE 0x680
93
94 #define IXGBE_LINK_DOWN_CHECK_TIMEOUT 4000 /* ms */
95 #define IXGBE_LINK_UP_CHECK_TIMEOUT   1000 /* ms */
96 #define IXGBE_VMDQ_NUM_UC_MAC         4096 /* Maximum nb. of UC MAC addr. */
97
98 #define IXGBE_MMW_SIZE_DEFAULT        0x4
99 #define IXGBE_MMW_SIZE_JUMBO_FRAME    0x14
100 #define IXGBE_MAX_RING_DESC           4096 /* replicate define from rxtx */
101
102 /*
103  *  Default values for RX/TX configuration
104  */
105 #define IXGBE_DEFAULT_RX_FREE_THRESH  32
106 #define IXGBE_DEFAULT_RX_PTHRESH      8
107 #define IXGBE_DEFAULT_RX_HTHRESH      8
108 #define IXGBE_DEFAULT_RX_WTHRESH      0
109
110 #define IXGBE_DEFAULT_TX_FREE_THRESH  32
111 #define IXGBE_DEFAULT_TX_PTHRESH      32
112 #define IXGBE_DEFAULT_TX_HTHRESH      0
113 #define IXGBE_DEFAULT_TX_WTHRESH      0
114 #define IXGBE_DEFAULT_TX_RSBIT_THRESH 32
115
116 /* Bit shift and mask */
117 #define IXGBE_4_BIT_WIDTH  (CHAR_BIT / 2)
118 #define IXGBE_4_BIT_MASK   RTE_LEN2MASK(IXGBE_4_BIT_WIDTH, uint8_t)
119 #define IXGBE_8_BIT_WIDTH  CHAR_BIT
120 #define IXGBE_8_BIT_MASK   UINT8_MAX
121
122 #define IXGBEVF_PMD_NAME "rte_ixgbevf_pmd" /* PMD name */
123
124 #define IXGBE_QUEUE_STAT_COUNTERS (sizeof(hw_stats->qprc) / sizeof(hw_stats->qprc[0]))
125
126 #define IXGBE_HKEY_MAX_INDEX 10
127
128 /* Additional timesync values. */
129 #define IXGBE_TIMINCA_16NS_SHIFT 24
130 #define IXGBE_TIMINCA_INCVALUE   16000000
131 #define IXGBE_TIMINCA_INIT       ((0x02 << IXGBE_TIMINCA_16NS_SHIFT) \
132                                   | IXGBE_TIMINCA_INCVALUE)
133
134 static int eth_ixgbe_dev_init(struct rte_eth_dev *eth_dev);
135 static int eth_ixgbe_dev_uninit(struct rte_eth_dev *eth_dev);
136 static int  ixgbe_dev_configure(struct rte_eth_dev *dev);
137 static int  ixgbe_dev_start(struct rte_eth_dev *dev);
138 static void ixgbe_dev_stop(struct rte_eth_dev *dev);
139 static int  ixgbe_dev_set_link_up(struct rte_eth_dev *dev);
140 static int  ixgbe_dev_set_link_down(struct rte_eth_dev *dev);
141 static void ixgbe_dev_close(struct rte_eth_dev *dev);
142 static void ixgbe_dev_promiscuous_enable(struct rte_eth_dev *dev);
143 static void ixgbe_dev_promiscuous_disable(struct rte_eth_dev *dev);
144 static void ixgbe_dev_allmulticast_enable(struct rte_eth_dev *dev);
145 static void ixgbe_dev_allmulticast_disable(struct rte_eth_dev *dev);
146 static int ixgbe_dev_link_update(struct rte_eth_dev *dev,
147                                 int wait_to_complete);
148 static void ixgbe_dev_stats_get(struct rte_eth_dev *dev,
149                                 struct rte_eth_stats *stats);
150 static int ixgbe_dev_xstats_get(struct rte_eth_dev *dev,
151                                 struct rte_eth_xstats *xstats, unsigned n);
152 static int ixgbevf_dev_xstats_get(struct rte_eth_dev *dev,
153                                   struct rte_eth_xstats *xstats, unsigned n);
154 static void ixgbe_dev_stats_reset(struct rte_eth_dev *dev);
155 static void ixgbe_dev_xstats_reset(struct rte_eth_dev *dev);
156 static int ixgbe_dev_queue_stats_mapping_set(struct rte_eth_dev *eth_dev,
157                                              uint16_t queue_id,
158                                              uint8_t stat_idx,
159                                              uint8_t is_rx);
160 static void ixgbe_dev_info_get(struct rte_eth_dev *dev,
161                                struct rte_eth_dev_info *dev_info);
162 static void ixgbevf_dev_info_get(struct rte_eth_dev *dev,
163                                  struct rte_eth_dev_info *dev_info);
164 static int ixgbe_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu);
165
166 static int ixgbe_vlan_filter_set(struct rte_eth_dev *dev,
167                 uint16_t vlan_id, int on);
168 static void ixgbe_vlan_tpid_set(struct rte_eth_dev *dev, uint16_t tpid_id);
169 static void ixgbe_vlan_hw_strip_bitmap_set(struct rte_eth_dev *dev,
170                 uint16_t queue, bool on);
171 static void ixgbe_vlan_strip_queue_set(struct rte_eth_dev *dev, uint16_t queue,
172                 int on);
173 static void ixgbe_vlan_offload_set(struct rte_eth_dev *dev, int mask);
174 static void ixgbe_vlan_hw_strip_enable(struct rte_eth_dev *dev, uint16_t queue);
175 static void ixgbe_vlan_hw_strip_disable(struct rte_eth_dev *dev, uint16_t queue);
176 static void ixgbe_vlan_hw_extend_enable(struct rte_eth_dev *dev);
177 static void ixgbe_vlan_hw_extend_disable(struct rte_eth_dev *dev);
178
179 static int ixgbe_dev_led_on(struct rte_eth_dev *dev);
180 static int ixgbe_dev_led_off(struct rte_eth_dev *dev);
181 static int ixgbe_flow_ctrl_get(struct rte_eth_dev *dev,
182                                struct rte_eth_fc_conf *fc_conf);
183 static int ixgbe_flow_ctrl_set(struct rte_eth_dev *dev,
184                                struct rte_eth_fc_conf *fc_conf);
185 static int ixgbe_priority_flow_ctrl_set(struct rte_eth_dev *dev,
186                 struct rte_eth_pfc_conf *pfc_conf);
187 static int ixgbe_dev_rss_reta_update(struct rte_eth_dev *dev,
188                         struct rte_eth_rss_reta_entry64 *reta_conf,
189                         uint16_t reta_size);
190 static int ixgbe_dev_rss_reta_query(struct rte_eth_dev *dev,
191                         struct rte_eth_rss_reta_entry64 *reta_conf,
192                         uint16_t reta_size);
193 static void ixgbe_dev_link_status_print(struct rte_eth_dev *dev);
194 static int ixgbe_dev_lsc_interrupt_setup(struct rte_eth_dev *dev);
195 static int ixgbe_dev_rxq_interrupt_setup(struct rte_eth_dev *dev);
196 static int ixgbe_dev_interrupt_get_status(struct rte_eth_dev *dev);
197 static int ixgbe_dev_interrupt_action(struct rte_eth_dev *dev);
198 static void ixgbe_dev_interrupt_handler(struct rte_intr_handle *handle,
199                 void *param);
200 static void ixgbe_dev_interrupt_delayed_handler(void *param);
201 static void ixgbe_add_rar(struct rte_eth_dev *dev, struct ether_addr *mac_addr,
202                 uint32_t index, uint32_t pool);
203 static void ixgbe_remove_rar(struct rte_eth_dev *dev, uint32_t index);
204 static void ixgbe_set_default_mac_addr(struct rte_eth_dev *dev,
205                                            struct ether_addr *mac_addr);
206 static void ixgbe_dcb_init(struct ixgbe_hw *hw,struct ixgbe_dcb_config *dcb_config);
207
208 /* For Virtual Function support */
209 static int eth_ixgbevf_dev_init(struct rte_eth_dev *eth_dev);
210 static int eth_ixgbevf_dev_uninit(struct rte_eth_dev *eth_dev);
211 static int  ixgbevf_dev_configure(struct rte_eth_dev *dev);
212 static int  ixgbevf_dev_start(struct rte_eth_dev *dev);
213 static void ixgbevf_dev_stop(struct rte_eth_dev *dev);
214 static void ixgbevf_dev_close(struct rte_eth_dev *dev);
215 static void ixgbevf_intr_disable(struct ixgbe_hw *hw);
216 static void ixgbevf_intr_enable(struct ixgbe_hw *hw);
217 static void ixgbevf_dev_stats_get(struct rte_eth_dev *dev,
218                 struct rte_eth_stats *stats);
219 static void ixgbevf_dev_stats_reset(struct rte_eth_dev *dev);
220 static int ixgbevf_vlan_filter_set(struct rte_eth_dev *dev,
221                 uint16_t vlan_id, int on);
222 static void ixgbevf_vlan_strip_queue_set(struct rte_eth_dev *dev,
223                 uint16_t queue, int on);
224 static void ixgbevf_vlan_offload_set(struct rte_eth_dev *dev, int mask);
225 static void ixgbevf_set_vfta_all(struct rte_eth_dev *dev, bool on);
226 static int ixgbevf_dev_rx_queue_intr_enable(struct rte_eth_dev *dev,
227                                             uint16_t queue_id);
228 static int ixgbevf_dev_rx_queue_intr_disable(struct rte_eth_dev *dev,
229                                              uint16_t queue_id);
230 static void ixgbevf_set_ivar_map(struct ixgbe_hw *hw, int8_t direction,
231                                  uint8_t queue, uint8_t msix_vector);
232 static void ixgbevf_configure_msix(struct rte_eth_dev *dev);
233
234 /* For Eth VMDQ APIs support */
235 static int ixgbe_uc_hash_table_set(struct rte_eth_dev *dev, struct
236                 ether_addr* mac_addr,uint8_t on);
237 static int ixgbe_uc_all_hash_table_set(struct rte_eth_dev *dev,uint8_t on);
238 static int  ixgbe_set_pool_rx_mode(struct rte_eth_dev *dev,  uint16_t pool,
239                 uint16_t rx_mask, uint8_t on);
240 static int ixgbe_set_pool_rx(struct rte_eth_dev *dev,uint16_t pool,uint8_t on);
241 static int ixgbe_set_pool_tx(struct rte_eth_dev *dev,uint16_t pool,uint8_t on);
242 static int ixgbe_set_pool_vlan_filter(struct rte_eth_dev *dev, uint16_t vlan,
243                 uint64_t pool_mask,uint8_t vlan_on);
244 static int ixgbe_mirror_rule_set(struct rte_eth_dev *dev,
245                 struct rte_eth_mirror_conf *mirror_conf,
246                 uint8_t rule_id, uint8_t on);
247 static int ixgbe_mirror_rule_reset(struct rte_eth_dev *dev,
248                 uint8_t rule_id);
249 static int ixgbe_dev_rx_queue_intr_enable(struct rte_eth_dev *dev,
250                                           uint16_t queue_id);
251 static int ixgbe_dev_rx_queue_intr_disable(struct rte_eth_dev *dev,
252                                            uint16_t queue_id);
253 static void ixgbe_set_ivar_map(struct ixgbe_hw *hw, int8_t direction,
254                                uint8_t queue, uint8_t msix_vector);
255 static void ixgbe_configure_msix(struct rte_eth_dev *dev);
256
257 static int ixgbe_set_queue_rate_limit(struct rte_eth_dev *dev,
258                 uint16_t queue_idx, uint16_t tx_rate);
259 static int ixgbe_set_vf_rate_limit(struct rte_eth_dev *dev, uint16_t vf,
260                 uint16_t tx_rate, uint64_t q_msk);
261
262 static void ixgbevf_add_mac_addr(struct rte_eth_dev *dev,
263                                  struct ether_addr *mac_addr,
264                                  uint32_t index, uint32_t pool);
265 static void ixgbevf_remove_mac_addr(struct rte_eth_dev *dev, uint32_t index);
266 static void ixgbevf_set_default_mac_addr(struct rte_eth_dev *dev,
267                                              struct ether_addr *mac_addr);
268 static int ixgbe_syn_filter_set(struct rte_eth_dev *dev,
269                         struct rte_eth_syn_filter *filter,
270                         bool add);
271 static int ixgbe_syn_filter_get(struct rte_eth_dev *dev,
272                         struct rte_eth_syn_filter *filter);
273 static int ixgbe_syn_filter_handle(struct rte_eth_dev *dev,
274                         enum rte_filter_op filter_op,
275                         void *arg);
276 static int ixgbe_add_5tuple_filter(struct rte_eth_dev *dev,
277                         struct ixgbe_5tuple_filter *filter);
278 static void ixgbe_remove_5tuple_filter(struct rte_eth_dev *dev,
279                         struct ixgbe_5tuple_filter *filter);
280 static int ixgbe_add_del_ntuple_filter(struct rte_eth_dev *dev,
281                         struct rte_eth_ntuple_filter *filter,
282                         bool add);
283 static int ixgbe_ntuple_filter_handle(struct rte_eth_dev *dev,
284                                 enum rte_filter_op filter_op,
285                                 void *arg);
286 static int ixgbe_get_ntuple_filter(struct rte_eth_dev *dev,
287                         struct rte_eth_ntuple_filter *filter);
288 static int ixgbe_add_del_ethertype_filter(struct rte_eth_dev *dev,
289                         struct rte_eth_ethertype_filter *filter,
290                         bool add);
291 static int ixgbe_ethertype_filter_handle(struct rte_eth_dev *dev,
292                                 enum rte_filter_op filter_op,
293                                 void *arg);
294 static int ixgbe_get_ethertype_filter(struct rte_eth_dev *dev,
295                         struct rte_eth_ethertype_filter *filter);
296 static int ixgbe_dev_filter_ctrl(struct rte_eth_dev *dev,
297                      enum rte_filter_type filter_type,
298                      enum rte_filter_op filter_op,
299                      void *arg);
300 static int ixgbevf_dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu);
301
302 static int ixgbe_dev_set_mc_addr_list(struct rte_eth_dev *dev,
303                                       struct ether_addr *mc_addr_set,
304                                       uint32_t nb_mc_addr);
305 static int ixgbe_dev_get_dcb_info(struct rte_eth_dev *dev,
306                                    struct rte_eth_dcb_info *dcb_info);
307
308 static int ixgbe_get_reg_length(struct rte_eth_dev *dev);
309 static int ixgbe_get_regs(struct rte_eth_dev *dev,
310                             struct rte_dev_reg_info *regs);
311 static int ixgbe_get_eeprom_length(struct rte_eth_dev *dev);
312 static int ixgbe_get_eeprom(struct rte_eth_dev *dev,
313                                 struct rte_dev_eeprom_info *eeprom);
314 static int ixgbe_set_eeprom(struct rte_eth_dev *dev,
315                                 struct rte_dev_eeprom_info *eeprom);
316
317 static int ixgbevf_get_reg_length(struct rte_eth_dev *dev);
318 static int ixgbevf_get_regs(struct rte_eth_dev *dev,
319                                 struct rte_dev_reg_info *regs);
320
321 static int ixgbe_timesync_enable(struct rte_eth_dev *dev);
322 static int ixgbe_timesync_disable(struct rte_eth_dev *dev);
323 static int ixgbe_timesync_read_rx_timestamp(struct rte_eth_dev *dev,
324                                             struct timespec *timestamp,
325                                             uint32_t flags);
326 static int ixgbe_timesync_read_tx_timestamp(struct rte_eth_dev *dev,
327                                             struct timespec *timestamp);
328
329 /*
330  * Define VF Stats MACRO for Non "cleared on read" register
331  */
332 #define UPDATE_VF_STAT(reg, last, cur)                          \
333 {                                                               \
334         uint32_t latest = IXGBE_READ_REG(hw, reg);              \
335         cur += (latest - last) & UINT_MAX;                      \
336         last = latest;                                          \
337 }
338
339 #define UPDATE_VF_STAT_36BIT(lsb, msb, last, cur)                \
340 {                                                                \
341         u64 new_lsb = IXGBE_READ_REG(hw, lsb);                   \
342         u64 new_msb = IXGBE_READ_REG(hw, msb);                   \
343         u64 latest = ((new_msb << 32) | new_lsb);                \
344         cur += (0x1000000000LL + latest - last) & 0xFFFFFFFFFLL; \
345         last = latest;                                           \
346 }
347
348 #define IXGBE_SET_HWSTRIP(h, q) do{\
349                 uint32_t idx = (q) / (sizeof ((h)->bitmap[0]) * NBBY); \
350                 uint32_t bit = (q) % (sizeof ((h)->bitmap[0]) * NBBY); \
351                 (h)->bitmap[idx] |= 1 << bit;\
352         }while(0)
353
354 #define IXGBE_CLEAR_HWSTRIP(h, q) do{\
355                 uint32_t idx = (q) / (sizeof ((h)->bitmap[0]) * NBBY); \
356                 uint32_t bit = (q) % (sizeof ((h)->bitmap[0]) * NBBY); \
357                 (h)->bitmap[idx] &= ~(1 << bit);\
358         }while(0)
359
360 #define IXGBE_GET_HWSTRIP(h, q, r) do{\
361                 uint32_t idx = (q) / (sizeof ((h)->bitmap[0]) * NBBY); \
362                 uint32_t bit = (q) % (sizeof ((h)->bitmap[0]) * NBBY); \
363                 (r) = (h)->bitmap[idx] >> bit & 1;\
364         }while(0)
365
366 /*
367  * The set of PCI devices this driver supports
368  */
369 static const struct rte_pci_id pci_id_ixgbe_map[] = {
370
371 #define RTE_PCI_DEV_ID_DECL_IXGBE(vend, dev) {RTE_PCI_DEVICE(vend, dev)},
372 #include "rte_pci_dev_ids.h"
373
374 { .vendor_id = 0, /* sentinel */ },
375 };
376
377
378 /*
379  * The set of PCI devices this driver supports (for 82599 VF)
380  */
381 static const struct rte_pci_id pci_id_ixgbevf_map[] = {
382
383 #define RTE_PCI_DEV_ID_DECL_IXGBEVF(vend, dev) {RTE_PCI_DEVICE(vend, dev)},
384 #include "rte_pci_dev_ids.h"
385 { .vendor_id = 0, /* sentinel */ },
386
387 };
388
389 static const struct rte_eth_desc_lim rx_desc_lim = {
390         .nb_max = IXGBE_MAX_RING_DESC,
391         .nb_min = IXGBE_MIN_RING_DESC,
392         .nb_align = IXGBE_RXD_ALIGN,
393 };
394
395 static const struct rte_eth_desc_lim tx_desc_lim = {
396         .nb_max = IXGBE_MAX_RING_DESC,
397         .nb_min = IXGBE_MIN_RING_DESC,
398         .nb_align = IXGBE_TXD_ALIGN,
399 };
400
401 static const struct eth_dev_ops ixgbe_eth_dev_ops = {
402         .dev_configure        = ixgbe_dev_configure,
403         .dev_start            = ixgbe_dev_start,
404         .dev_stop             = ixgbe_dev_stop,
405         .dev_set_link_up    = ixgbe_dev_set_link_up,
406         .dev_set_link_down  = ixgbe_dev_set_link_down,
407         .dev_close            = ixgbe_dev_close,
408         .promiscuous_enable   = ixgbe_dev_promiscuous_enable,
409         .promiscuous_disable  = ixgbe_dev_promiscuous_disable,
410         .allmulticast_enable  = ixgbe_dev_allmulticast_enable,
411         .allmulticast_disable = ixgbe_dev_allmulticast_disable,
412         .link_update          = ixgbe_dev_link_update,
413         .stats_get            = ixgbe_dev_stats_get,
414         .xstats_get           = ixgbe_dev_xstats_get,
415         .stats_reset          = ixgbe_dev_stats_reset,
416         .xstats_reset         = ixgbe_dev_xstats_reset,
417         .queue_stats_mapping_set = ixgbe_dev_queue_stats_mapping_set,
418         .dev_infos_get        = ixgbe_dev_info_get,
419         .mtu_set              = ixgbe_dev_mtu_set,
420         .vlan_filter_set      = ixgbe_vlan_filter_set,
421         .vlan_tpid_set        = ixgbe_vlan_tpid_set,
422         .vlan_offload_set     = ixgbe_vlan_offload_set,
423         .vlan_strip_queue_set = ixgbe_vlan_strip_queue_set,
424         .rx_queue_start       = ixgbe_dev_rx_queue_start,
425         .rx_queue_stop        = ixgbe_dev_rx_queue_stop,
426         .tx_queue_start       = ixgbe_dev_tx_queue_start,
427         .tx_queue_stop        = ixgbe_dev_tx_queue_stop,
428         .rx_queue_setup       = ixgbe_dev_rx_queue_setup,
429         .rx_queue_intr_enable = ixgbe_dev_rx_queue_intr_enable,
430         .rx_queue_intr_disable = ixgbe_dev_rx_queue_intr_disable,
431         .rx_queue_release     = ixgbe_dev_rx_queue_release,
432         .rx_queue_count       = ixgbe_dev_rx_queue_count,
433         .rx_descriptor_done   = ixgbe_dev_rx_descriptor_done,
434         .tx_queue_setup       = ixgbe_dev_tx_queue_setup,
435         .tx_queue_release     = ixgbe_dev_tx_queue_release,
436         .dev_led_on           = ixgbe_dev_led_on,
437         .dev_led_off          = ixgbe_dev_led_off,
438         .flow_ctrl_get        = ixgbe_flow_ctrl_get,
439         .flow_ctrl_set        = ixgbe_flow_ctrl_set,
440         .priority_flow_ctrl_set = ixgbe_priority_flow_ctrl_set,
441         .mac_addr_add         = ixgbe_add_rar,
442         .mac_addr_remove      = ixgbe_remove_rar,
443         .mac_addr_set         = ixgbe_set_default_mac_addr,
444         .uc_hash_table_set    = ixgbe_uc_hash_table_set,
445         .uc_all_hash_table_set  = ixgbe_uc_all_hash_table_set,
446         .mirror_rule_set      = ixgbe_mirror_rule_set,
447         .mirror_rule_reset    = ixgbe_mirror_rule_reset,
448         .set_vf_rx_mode       = ixgbe_set_pool_rx_mode,
449         .set_vf_rx            = ixgbe_set_pool_rx,
450         .set_vf_tx            = ixgbe_set_pool_tx,
451         .set_vf_vlan_filter   = ixgbe_set_pool_vlan_filter,
452         .set_queue_rate_limit = ixgbe_set_queue_rate_limit,
453         .set_vf_rate_limit    = ixgbe_set_vf_rate_limit,
454         .reta_update          = ixgbe_dev_rss_reta_update,
455         .reta_query           = ixgbe_dev_rss_reta_query,
456 #ifdef RTE_NIC_BYPASS
457         .bypass_init          = ixgbe_bypass_init,
458         .bypass_state_set     = ixgbe_bypass_state_store,
459         .bypass_state_show    = ixgbe_bypass_state_show,
460         .bypass_event_set     = ixgbe_bypass_event_store,
461         .bypass_event_show    = ixgbe_bypass_event_show,
462         .bypass_wd_timeout_set  = ixgbe_bypass_wd_timeout_store,
463         .bypass_wd_timeout_show = ixgbe_bypass_wd_timeout_show,
464         .bypass_ver_show      = ixgbe_bypass_ver_show,
465         .bypass_wd_reset      = ixgbe_bypass_wd_reset,
466 #endif /* RTE_NIC_BYPASS */
467         .rss_hash_update      = ixgbe_dev_rss_hash_update,
468         .rss_hash_conf_get    = ixgbe_dev_rss_hash_conf_get,
469         .filter_ctrl          = ixgbe_dev_filter_ctrl,
470         .set_mc_addr_list     = ixgbe_dev_set_mc_addr_list,
471         .rxq_info_get         = ixgbe_rxq_info_get,
472         .txq_info_get         = ixgbe_txq_info_get,
473         .timesync_enable      = ixgbe_timesync_enable,
474         .timesync_disable     = ixgbe_timesync_disable,
475         .timesync_read_rx_timestamp = ixgbe_timesync_read_rx_timestamp,
476         .timesync_read_tx_timestamp = ixgbe_timesync_read_tx_timestamp,
477         .get_reg_length       = ixgbe_get_reg_length,
478         .get_reg              = ixgbe_get_regs,
479         .get_eeprom_length    = ixgbe_get_eeprom_length,
480         .get_eeprom           = ixgbe_get_eeprom,
481         .set_eeprom           = ixgbe_set_eeprom,
482         .get_dcb_info         = ixgbe_dev_get_dcb_info,
483 };
484
485 /*
486  * dev_ops for virtual function, bare necessities for basic vf
487  * operation have been implemented
488  */
489 static const struct eth_dev_ops ixgbevf_eth_dev_ops = {
490         .dev_configure        = ixgbevf_dev_configure,
491         .dev_start            = ixgbevf_dev_start,
492         .dev_stop             = ixgbevf_dev_stop,
493         .link_update          = ixgbe_dev_link_update,
494         .stats_get            = ixgbevf_dev_stats_get,
495         .xstats_get           = ixgbevf_dev_xstats_get,
496         .stats_reset          = ixgbevf_dev_stats_reset,
497         .xstats_reset         = ixgbevf_dev_stats_reset,
498         .dev_close            = ixgbevf_dev_close,
499         .dev_infos_get        = ixgbevf_dev_info_get,
500         .mtu_set              = ixgbevf_dev_set_mtu,
501         .vlan_filter_set      = ixgbevf_vlan_filter_set,
502         .vlan_strip_queue_set = ixgbevf_vlan_strip_queue_set,
503         .vlan_offload_set     = ixgbevf_vlan_offload_set,
504         .rx_queue_setup       = ixgbe_dev_rx_queue_setup,
505         .rx_queue_release     = ixgbe_dev_rx_queue_release,
506         .rx_descriptor_done   = ixgbe_dev_rx_descriptor_done,
507         .tx_queue_setup       = ixgbe_dev_tx_queue_setup,
508         .tx_queue_release     = ixgbe_dev_tx_queue_release,
509         .rx_queue_intr_enable = ixgbevf_dev_rx_queue_intr_enable,
510         .rx_queue_intr_disable = ixgbevf_dev_rx_queue_intr_disable,
511         .mac_addr_add         = ixgbevf_add_mac_addr,
512         .mac_addr_remove      = ixgbevf_remove_mac_addr,
513         .set_mc_addr_list     = ixgbe_dev_set_mc_addr_list,
514         .rxq_info_get         = ixgbe_rxq_info_get,
515         .txq_info_get         = ixgbe_txq_info_get,
516         .mac_addr_set         = ixgbevf_set_default_mac_addr,
517         .get_reg_length       = ixgbevf_get_reg_length,
518         .get_reg              = ixgbevf_get_regs,
519         .reta_update          = ixgbe_dev_rss_reta_update,
520         .reta_query           = ixgbe_dev_rss_reta_query,
521         .rss_hash_update      = ixgbe_dev_rss_hash_update,
522         .rss_hash_conf_get    = ixgbe_dev_rss_hash_conf_get,
523 };
524
525 /* store statistics names and its offset in stats structure */
526 struct rte_ixgbe_xstats_name_off {
527         char name[RTE_ETH_XSTATS_NAME_SIZE];
528         unsigned offset;
529 };
530
531 static const struct rte_ixgbe_xstats_name_off rte_ixgbe_stats_strings[] = {
532         {"rx_crc_errors", offsetof(struct ixgbe_hw_stats, crcerrs)},
533         {"rx_illegal_byte_errors", offsetof(struct ixgbe_hw_stats, illerrc)},
534         {"rx_error_bytes", offsetof(struct ixgbe_hw_stats, errbc)},
535         {"mac_local_errors", offsetof(struct ixgbe_hw_stats, mlfc)},
536         {"mac_remote_errors", offsetof(struct ixgbe_hw_stats, mrfc)},
537         {"rx_length_errors", offsetof(struct ixgbe_hw_stats, rlec)},
538         {"tx_xon_packets", offsetof(struct ixgbe_hw_stats, lxontxc)},
539         {"rx_xon_packets", offsetof(struct ixgbe_hw_stats, lxonrxc)},
540         {"tx_xoff_packets", offsetof(struct ixgbe_hw_stats, lxofftxc)},
541         {"rx_xoff_packets", offsetof(struct ixgbe_hw_stats, lxoffrxc)},
542         {"rx_size_64_packets", offsetof(struct ixgbe_hw_stats, prc64)},
543         {"rx_size_65_to_127_packets", offsetof(struct ixgbe_hw_stats, prc127)},
544         {"rx_size_128_to_255_packets", offsetof(struct ixgbe_hw_stats, prc255)},
545         {"rx_size_256_to_511_packets", offsetof(struct ixgbe_hw_stats, prc511)},
546         {"rx_size_512_to_1023_packets", offsetof(struct ixgbe_hw_stats,
547                 prc1023)},
548         {"rx_size_1024_to_max_packets", offsetof(struct ixgbe_hw_stats,
549                 prc1522)},
550         {"rx_broadcast_packets", offsetof(struct ixgbe_hw_stats, bprc)},
551         {"rx_multicast_packets", offsetof(struct ixgbe_hw_stats, mprc)},
552         {"rx_fragment_errors", offsetof(struct ixgbe_hw_stats, rfc)},
553         {"rx_undersize_errors", offsetof(struct ixgbe_hw_stats, ruc)},
554         {"rx_oversize_errors", offsetof(struct ixgbe_hw_stats, roc)},
555         {"rx_jabber_errors", offsetof(struct ixgbe_hw_stats, rjc)},
556         {"rx_management_packets", offsetof(struct ixgbe_hw_stats, mngprc)},
557         {"rx_management_dropped", offsetof(struct ixgbe_hw_stats, mngpdc)},
558         {"tx_management_packets", offsetof(struct ixgbe_hw_stats, mngptc)},
559         {"rx_total_packets", offsetof(struct ixgbe_hw_stats, tpr)},
560         {"rx_total_bytes", offsetof(struct ixgbe_hw_stats, tor)},
561         {"tx_total_packets", offsetof(struct ixgbe_hw_stats, tpt)},
562         {"tx_size_64_packets", offsetof(struct ixgbe_hw_stats, ptc64)},
563         {"tx_size_65_to_127_packets", offsetof(struct ixgbe_hw_stats, ptc127)},
564         {"tx_size_128_to_255_packets", offsetof(struct ixgbe_hw_stats, ptc255)},
565         {"tx_size_256_to_511_packets", offsetof(struct ixgbe_hw_stats, ptc511)},
566         {"tx_size_512_to_1023_packets", offsetof(struct ixgbe_hw_stats,
567                 ptc1023)},
568         {"tx_size_1024_to_max_packets", offsetof(struct ixgbe_hw_stats,
569                 ptc1522)},
570         {"tx_multicast_packets", offsetof(struct ixgbe_hw_stats, mptc)},
571         {"tx_broadcast_packets", offsetof(struct ixgbe_hw_stats, bptc)},
572         {"rx_mac_short_packet_dropped", offsetof(struct ixgbe_hw_stats, mspdc)},
573         {"rx_l3_l4_xsum_error", offsetof(struct ixgbe_hw_stats, xec)},
574
575         {"flow_director_added_filters", offsetof(struct ixgbe_hw_stats,
576                 fdirustat_add)},
577         {"flow_director_removed_filters", offsetof(struct ixgbe_hw_stats,
578                 fdirustat_remove)},
579         {"flow_director_filter_add_errors", offsetof(struct ixgbe_hw_stats,
580                 fdirfstat_fadd)},
581         {"flow_director_filter_remove_errors", offsetof(struct ixgbe_hw_stats,
582                 fdirfstat_fremove)},
583         {"flow_director_matched_filters", offsetof(struct ixgbe_hw_stats,
584                 fdirmatch)},
585         {"flow_director_missed_filters", offsetof(struct ixgbe_hw_stats,
586                 fdirmiss)},
587
588         {"rx_fcoe_crc_errors", offsetof(struct ixgbe_hw_stats, fccrc)},
589         {"rx_fcoe_dropped", offsetof(struct ixgbe_hw_stats, fcoerpdc)},
590         {"rx_fcoe_mbuf_allocation_errors", offsetof(struct ixgbe_hw_stats,
591                 fclast)},
592         {"rx_fcoe_packets", offsetof(struct ixgbe_hw_stats, fcoeprc)},
593         {"tx_fcoe_packets", offsetof(struct ixgbe_hw_stats, fcoeptc)},
594         {"rx_fcoe_bytes", offsetof(struct ixgbe_hw_stats, fcoedwrc)},
595         {"tx_fcoe_bytes", offsetof(struct ixgbe_hw_stats, fcoedwtc)},
596         {"rx_fcoe_no_direct_data_placement", offsetof(struct ixgbe_hw_stats,
597                 fcoe_noddp)},
598         {"rx_fcoe_no_direct_data_placement_ext_buff",
599                 offsetof(struct ixgbe_hw_stats, fcoe_noddp_ext_buff)},
600
601         {"tx_flow_control_xon_packets", offsetof(struct ixgbe_hw_stats,
602                 lxontxc)},
603         {"rx_flow_control_xon_packets", offsetof(struct ixgbe_hw_stats,
604                 lxonrxc)},
605         {"tx_flow_control_xoff_packets", offsetof(struct ixgbe_hw_stats,
606                 lxofftxc)},
607         {"rx_flow_control_xoff_packets", offsetof(struct ixgbe_hw_stats,
608                 lxoffrxc)},
609         {"rx_total_missed_packets", offsetof(struct ixgbe_hw_stats, mpctotal)},
610 };
611
612 #define IXGBE_NB_HW_STATS (sizeof(rte_ixgbe_stats_strings) / \
613                            sizeof(rte_ixgbe_stats_strings[0]))
614
615 /* Per-queue statistics */
616 #define IXBGE_NB_8_PER_Q_STATS (8 * 7)
617 #define IXBGE_NB_16_PER_Q_STATS (16 * 5)
618 #define IXGBE_NB_Q_STATS (IXBGE_NB_8_PER_Q_STATS + IXBGE_NB_16_PER_Q_STATS)
619
620 #define IXGBE_NB_XSTATS (IXGBE_NB_HW_STATS + IXGBE_NB_Q_STATS)
621
622 static const struct rte_ixgbe_xstats_name_off rte_ixgbevf_stats_strings[] = {
623         {"rx_multicast_packets", offsetof(struct ixgbevf_hw_stats, vfmprc)},
624 };
625
626 #define IXGBEVF_NB_XSTATS (sizeof(rte_ixgbevf_stats_strings) /  \
627                 sizeof(rte_ixgbevf_stats_strings[0]))
628
629 /**
630  * Atomically reads the link status information from global
631  * structure rte_eth_dev.
632  *
633  * @param dev
634  *   - Pointer to the structure rte_eth_dev to read from.
635  *   - Pointer to the buffer to be saved with the link status.
636  *
637  * @return
638  *   - On success, zero.
639  *   - On failure, negative value.
640  */
641 static inline int
642 rte_ixgbe_dev_atomic_read_link_status(struct rte_eth_dev *dev,
643                                 struct rte_eth_link *link)
644 {
645         struct rte_eth_link *dst = link;
646         struct rte_eth_link *src = &(dev->data->dev_link);
647
648         if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
649                                         *(uint64_t *)src) == 0)
650                 return -1;
651
652         return 0;
653 }
654
655 /**
656  * Atomically writes the link status information into global
657  * structure rte_eth_dev.
658  *
659  * @param dev
660  *   - Pointer to the structure rte_eth_dev to read from.
661  *   - Pointer to the buffer to be saved with the link status.
662  *
663  * @return
664  *   - On success, zero.
665  *   - On failure, negative value.
666  */
667 static inline int
668 rte_ixgbe_dev_atomic_write_link_status(struct rte_eth_dev *dev,
669                                 struct rte_eth_link *link)
670 {
671         struct rte_eth_link *dst = &(dev->data->dev_link);
672         struct rte_eth_link *src = link;
673
674         if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
675                                         *(uint64_t *)src) == 0)
676                 return -1;
677
678         return 0;
679 }
680
681 /*
682  * This function is the same as ixgbe_is_sfp() in base/ixgbe.h.
683  */
684 static inline int
685 ixgbe_is_sfp(struct ixgbe_hw *hw)
686 {
687         switch (hw->phy.type) {
688         case ixgbe_phy_sfp_avago:
689         case ixgbe_phy_sfp_ftl:
690         case ixgbe_phy_sfp_intel:
691         case ixgbe_phy_sfp_unknown:
692         case ixgbe_phy_sfp_passive_tyco:
693         case ixgbe_phy_sfp_passive_unknown:
694                 return 1;
695         default:
696                 return 0;
697         }
698 }
699
700 static inline int32_t
701 ixgbe_pf_reset_hw(struct ixgbe_hw *hw)
702 {
703         uint32_t ctrl_ext;
704         int32_t status;
705
706         status = ixgbe_reset_hw(hw);
707
708         ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
709         /* Set PF Reset Done bit so PF/VF Mail Ops can work */
710         ctrl_ext |= IXGBE_CTRL_EXT_PFRSTD;
711         IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
712         IXGBE_WRITE_FLUSH(hw);
713
714         return status;
715 }
716
717 static inline void
718 ixgbe_enable_intr(struct rte_eth_dev *dev)
719 {
720         struct ixgbe_interrupt *intr =
721                 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
722         struct ixgbe_hw *hw =
723                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
724
725         IXGBE_WRITE_REG(hw, IXGBE_EIMS, intr->mask);
726         IXGBE_WRITE_FLUSH(hw);
727 }
728
729 /*
730  * This function is based on ixgbe_disable_intr() in base/ixgbe.h.
731  */
732 static void
733 ixgbe_disable_intr(struct ixgbe_hw *hw)
734 {
735         PMD_INIT_FUNC_TRACE();
736
737         if (hw->mac.type == ixgbe_mac_82598EB) {
738                 IXGBE_WRITE_REG(hw, IXGBE_EIMC, ~0);
739         } else {
740                 IXGBE_WRITE_REG(hw, IXGBE_EIMC, 0xFFFF0000);
741                 IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(0), ~0);
742                 IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(1), ~0);
743         }
744         IXGBE_WRITE_FLUSH(hw);
745 }
746
747 /*
748  * This function resets queue statistics mapping registers.
749  * From Niantic datasheet, Initialization of Statistics section:
750  * "...if software requires the queue counters, the RQSMR and TQSM registers
751  * must be re-programmed following a device reset.
752  */
753 static void
754 ixgbe_reset_qstat_mappings(struct ixgbe_hw *hw)
755 {
756         uint32_t i;
757
758         for(i = 0; i != IXGBE_NB_STAT_MAPPING_REGS; i++) {
759                 IXGBE_WRITE_REG(hw, IXGBE_RQSMR(i), 0);
760                 IXGBE_WRITE_REG(hw, IXGBE_TQSM(i), 0);
761         }
762 }
763
764
765 static int
766 ixgbe_dev_queue_stats_mapping_set(struct rte_eth_dev *eth_dev,
767                                   uint16_t queue_id,
768                                   uint8_t stat_idx,
769                                   uint8_t is_rx)
770 {
771 #define QSM_REG_NB_BITS_PER_QMAP_FIELD 8
772 #define NB_QMAP_FIELDS_PER_QSM_REG 4
773 #define QMAP_FIELD_RESERVED_BITS_MASK 0x0f
774
775         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
776         struct ixgbe_stat_mapping_registers *stat_mappings =
777                 IXGBE_DEV_PRIVATE_TO_STAT_MAPPINGS(eth_dev->data->dev_private);
778         uint32_t qsmr_mask = 0;
779         uint32_t clearing_mask = QMAP_FIELD_RESERVED_BITS_MASK;
780         uint32_t q_map;
781         uint8_t n, offset;
782
783         if ((hw->mac.type != ixgbe_mac_82599EB) &&
784                 (hw->mac.type != ixgbe_mac_X540) &&
785                 (hw->mac.type != ixgbe_mac_X550) &&
786                 (hw->mac.type != ixgbe_mac_X550EM_x))
787                 return -ENOSYS;
788
789         PMD_INIT_LOG(DEBUG, "Setting port %d, %s queue_id %d to stat index %d",
790                      (int)(eth_dev->data->port_id), is_rx ? "RX" : "TX",
791                      queue_id, stat_idx);
792
793         n = (uint8_t)(queue_id / NB_QMAP_FIELDS_PER_QSM_REG);
794         if (n >= IXGBE_NB_STAT_MAPPING_REGS) {
795                 PMD_INIT_LOG(ERR, "Nb of stat mapping registers exceeded");
796                 return -EIO;
797         }
798         offset = (uint8_t)(queue_id % NB_QMAP_FIELDS_PER_QSM_REG);
799
800         /* Now clear any previous stat_idx set */
801         clearing_mask <<= (QSM_REG_NB_BITS_PER_QMAP_FIELD * offset);
802         if (!is_rx)
803                 stat_mappings->tqsm[n] &= ~clearing_mask;
804         else
805                 stat_mappings->rqsmr[n] &= ~clearing_mask;
806
807         q_map = (uint32_t)stat_idx;
808         q_map &= QMAP_FIELD_RESERVED_BITS_MASK;
809         qsmr_mask = q_map << (QSM_REG_NB_BITS_PER_QMAP_FIELD * offset);
810         if (!is_rx)
811                 stat_mappings->tqsm[n] |= qsmr_mask;
812         else
813                 stat_mappings->rqsmr[n] |= qsmr_mask;
814
815         PMD_INIT_LOG(DEBUG, "Set port %d, %s queue_id %d to stat index %d",
816                      (int)(eth_dev->data->port_id), is_rx ? "RX" : "TX",
817                      queue_id, stat_idx);
818         PMD_INIT_LOG(DEBUG, "%s[%d] = 0x%08x", is_rx ? "RQSMR" : "TQSM", n,
819                      is_rx ? stat_mappings->rqsmr[n] : stat_mappings->tqsm[n]);
820
821         /* Now write the mapping in the appropriate register */
822         if (is_rx) {
823                 PMD_INIT_LOG(DEBUG, "Write 0x%x to RX IXGBE stat mapping reg:%d",
824                              stat_mappings->rqsmr[n], n);
825                 IXGBE_WRITE_REG(hw, IXGBE_RQSMR(n), stat_mappings->rqsmr[n]);
826         }
827         else {
828                 PMD_INIT_LOG(DEBUG, "Write 0x%x to TX IXGBE stat mapping reg:%d",
829                              stat_mappings->tqsm[n], n);
830                 IXGBE_WRITE_REG(hw, IXGBE_TQSM(n), stat_mappings->tqsm[n]);
831         }
832         return 0;
833 }
834
835 static void
836 ixgbe_restore_statistics_mapping(struct rte_eth_dev * dev)
837 {
838         struct ixgbe_stat_mapping_registers *stat_mappings =
839                 IXGBE_DEV_PRIVATE_TO_STAT_MAPPINGS(dev->data->dev_private);
840         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
841         int i;
842
843         /* write whatever was in stat mapping table to the NIC */
844         for (i = 0; i < IXGBE_NB_STAT_MAPPING_REGS; i++) {
845                 /* rx */
846                 IXGBE_WRITE_REG(hw, IXGBE_RQSMR(i), stat_mappings->rqsmr[i]);
847
848                 /* tx */
849                 IXGBE_WRITE_REG(hw, IXGBE_TQSM(i), stat_mappings->tqsm[i]);
850         }
851 }
852
853 static void
854 ixgbe_dcb_init(struct ixgbe_hw *hw,struct ixgbe_dcb_config *dcb_config)
855 {
856         uint8_t i;
857         struct ixgbe_dcb_tc_config *tc;
858         uint8_t dcb_max_tc = IXGBE_DCB_MAX_TRAFFIC_CLASS;
859
860         dcb_config->num_tcs.pg_tcs = dcb_max_tc;
861         dcb_config->num_tcs.pfc_tcs = dcb_max_tc;
862         for (i = 0; i < dcb_max_tc; i++) {
863                 tc = &dcb_config->tc_config[i];
864                 tc->path[IXGBE_DCB_TX_CONFIG].bwg_id = i;
865                 tc->path[IXGBE_DCB_TX_CONFIG].bwg_percent =
866                                  (uint8_t)(100/dcb_max_tc + (i & 1));
867                 tc->path[IXGBE_DCB_RX_CONFIG].bwg_id = i;
868                 tc->path[IXGBE_DCB_RX_CONFIG].bwg_percent =
869                                  (uint8_t)(100/dcb_max_tc + (i & 1));
870                 tc->pfc = ixgbe_dcb_pfc_disabled;
871         }
872
873         /* Initialize default user to priority mapping, UPx->TC0 */
874         tc = &dcb_config->tc_config[0];
875         tc->path[IXGBE_DCB_TX_CONFIG].up_to_tc_bitmap = 0xFF;
876         tc->path[IXGBE_DCB_RX_CONFIG].up_to_tc_bitmap = 0xFF;
877         for (i = 0; i< IXGBE_DCB_MAX_BW_GROUP; i++) {
878                 dcb_config->bw_percentage[IXGBE_DCB_TX_CONFIG][i] = 100;
879                 dcb_config->bw_percentage[IXGBE_DCB_RX_CONFIG][i] = 100;
880         }
881         dcb_config->rx_pba_cfg = ixgbe_dcb_pba_equal;
882         dcb_config->pfc_mode_enable = false;
883         dcb_config->vt_mode = true;
884         dcb_config->round_robin_enable = false;
885         /* support all DCB capabilities in 82599 */
886         dcb_config->support.capabilities = 0xFF;
887
888         /*we only support 4 Tcs for X540, X550 */
889         if (hw->mac.type == ixgbe_mac_X540 ||
890                 hw->mac.type == ixgbe_mac_X550 ||
891                 hw->mac.type == ixgbe_mac_X550EM_x) {
892                 dcb_config->num_tcs.pg_tcs = 4;
893                 dcb_config->num_tcs.pfc_tcs = 4;
894         }
895 }
896
897 /*
898  * Ensure that all locks are released before first NVM or PHY access
899  */
900 static void
901 ixgbe_swfw_lock_reset(struct ixgbe_hw *hw)
902 {
903         uint16_t mask;
904
905         /*
906          * Phy lock should not fail in this early stage. If this is the case,
907          * it is due to an improper exit of the application.
908          * So force the release of the faulty lock. Release of common lock
909          * is done automatically by swfw_sync function.
910          */
911         mask = IXGBE_GSSR_PHY0_SM << hw->bus.func;
912         if (ixgbe_acquire_swfw_semaphore(hw, mask) < 0) {
913                 PMD_DRV_LOG(DEBUG, "SWFW phy%d lock released", hw->bus.func);
914         }
915         ixgbe_release_swfw_semaphore(hw, mask);
916
917         /*
918          * These ones are more tricky since they are common to all ports; but
919          * swfw_sync retries last long enough (1s) to be almost sure that if
920          * lock can not be taken it is due to an improper lock of the
921          * semaphore.
922          */
923         mask = IXGBE_GSSR_EEP_SM | IXGBE_GSSR_MAC_CSR_SM | IXGBE_GSSR_SW_MNG_SM;
924         if (ixgbe_acquire_swfw_semaphore(hw, mask) < 0) {
925                 PMD_DRV_LOG(DEBUG, "SWFW common locks released");
926         }
927         ixgbe_release_swfw_semaphore(hw, mask);
928 }
929
930 /*
931  * This function is based on code in ixgbe_attach() in base/ixgbe.c.
932  * It returns 0 on success.
933  */
934 static int
935 eth_ixgbe_dev_init(struct rte_eth_dev *eth_dev)
936 {
937         struct rte_pci_device *pci_dev;
938         struct ixgbe_hw *hw =
939                 IXGBE_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
940         struct ixgbe_vfta * shadow_vfta =
941                 IXGBE_DEV_PRIVATE_TO_VFTA(eth_dev->data->dev_private);
942         struct ixgbe_hwstrip *hwstrip =
943                 IXGBE_DEV_PRIVATE_TO_HWSTRIP_BITMAP(eth_dev->data->dev_private);
944         struct ixgbe_dcb_config *dcb_config =
945                 IXGBE_DEV_PRIVATE_TO_DCB_CFG(eth_dev->data->dev_private);
946         struct ixgbe_filter_info *filter_info =
947                 IXGBE_DEV_PRIVATE_TO_FILTER_INFO(eth_dev->data->dev_private);
948         uint32_t ctrl_ext;
949         uint16_t csum;
950         int diag, i;
951
952         PMD_INIT_FUNC_TRACE();
953
954         eth_dev->dev_ops = &ixgbe_eth_dev_ops;
955         eth_dev->rx_pkt_burst = &ixgbe_recv_pkts;
956         eth_dev->tx_pkt_burst = &ixgbe_xmit_pkts;
957
958         /*
959          * For secondary processes, we don't initialise any further as primary
960          * has already done this work. Only check we don't need a different
961          * RX and TX function.
962          */
963         if (rte_eal_process_type() != RTE_PROC_PRIMARY){
964                 struct ixgbe_tx_queue *txq;
965                 /* TX queue function in primary, set by last queue initialized
966                  * Tx queue may not initialized by primary process */
967                 if (eth_dev->data->tx_queues) {
968                         txq = eth_dev->data->tx_queues[eth_dev->data->nb_tx_queues-1];
969                         ixgbe_set_tx_function(eth_dev, txq);
970                 } else {
971                         /* Use default TX function if we get here */
972                         PMD_INIT_LOG(NOTICE, "No TX queues configured yet. "
973                                              "Using default TX function.");
974                 }
975
976                 ixgbe_set_rx_function(eth_dev);
977
978                 return 0;
979         }
980         pci_dev = eth_dev->pci_dev;
981
982         rte_eth_copy_pci_info(eth_dev, pci_dev);
983
984         /* Vendor and Device ID need to be set before init of shared code */
985         hw->device_id = pci_dev->id.device_id;
986         hw->vendor_id = pci_dev->id.vendor_id;
987         hw->hw_addr = (void *)pci_dev->mem_resource[0].addr;
988         hw->allow_unsupported_sfp = 1;
989
990         /* Initialize the shared code (base driver) */
991 #ifdef RTE_NIC_BYPASS
992         diag = ixgbe_bypass_init_shared_code(hw);
993 #else
994         diag = ixgbe_init_shared_code(hw);
995 #endif /* RTE_NIC_BYPASS */
996
997         if (diag != IXGBE_SUCCESS) {
998                 PMD_INIT_LOG(ERR, "Shared code init failed: %d", diag);
999                 return -EIO;
1000         }
1001
1002         /* pick up the PCI bus settings for reporting later */
1003         ixgbe_get_bus_info(hw);
1004
1005         /* Unlock any pending hardware semaphore */
1006         ixgbe_swfw_lock_reset(hw);
1007
1008         /* Initialize DCB configuration*/
1009         memset(dcb_config, 0, sizeof(struct ixgbe_dcb_config));
1010         ixgbe_dcb_init(hw,dcb_config);
1011         /* Get Hardware Flow Control setting */
1012         hw->fc.requested_mode = ixgbe_fc_full;
1013         hw->fc.current_mode = ixgbe_fc_full;
1014         hw->fc.pause_time = IXGBE_FC_PAUSE;
1015         for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
1016                 hw->fc.low_water[i] = IXGBE_FC_LO;
1017                 hw->fc.high_water[i] = IXGBE_FC_HI;
1018         }
1019         hw->fc.send_xon = 1;
1020
1021         /* Make sure we have a good EEPROM before we read from it */
1022         diag = ixgbe_validate_eeprom_checksum(hw, &csum);
1023         if (diag != IXGBE_SUCCESS) {
1024                 PMD_INIT_LOG(ERR, "The EEPROM checksum is not valid: %d", diag);
1025                 return -EIO;
1026         }
1027
1028 #ifdef RTE_NIC_BYPASS
1029         diag = ixgbe_bypass_init_hw(hw);
1030 #else
1031         diag = ixgbe_init_hw(hw);
1032 #endif /* RTE_NIC_BYPASS */
1033
1034         /*
1035          * Devices with copper phys will fail to initialise if ixgbe_init_hw()
1036          * is called too soon after the kernel driver unbinding/binding occurs.
1037          * The failure occurs in ixgbe_identify_phy_generic() for all devices,
1038          * but for non-copper devies, ixgbe_identify_sfp_module_generic() is
1039          * also called. See ixgbe_identify_phy_82599(). The reason for the
1040          * failure is not known, and only occuts when virtualisation features
1041          * are disabled in the bios. A delay of 100ms  was found to be enough by
1042          * trial-and-error, and is doubled to be safe.
1043          */
1044         if (diag && (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_copper)) {
1045                 rte_delay_ms(200);
1046                 diag = ixgbe_init_hw(hw);
1047         }
1048
1049         if (diag == IXGBE_ERR_EEPROM_VERSION) {
1050                 PMD_INIT_LOG(ERR, "This device is a pre-production adapter/"
1051                     "LOM.  Please be aware there may be issues associated "
1052                     "with your hardware.");
1053                 PMD_INIT_LOG(ERR, "If you are experiencing problems "
1054                     "please contact your Intel or hardware representative "
1055                     "who provided you with this hardware.");
1056         } else if (diag == IXGBE_ERR_SFP_NOT_SUPPORTED)
1057                 PMD_INIT_LOG(ERR, "Unsupported SFP+ Module");
1058         if (diag) {
1059                 PMD_INIT_LOG(ERR, "Hardware Initialization Failure: %d", diag);
1060                 return -EIO;
1061         }
1062
1063         /* Reset the hw statistics */
1064         ixgbe_dev_stats_reset(eth_dev);
1065
1066         /* disable interrupt */
1067         ixgbe_disable_intr(hw);
1068
1069         /* reset mappings for queue statistics hw counters*/
1070         ixgbe_reset_qstat_mappings(hw);
1071
1072         /* Allocate memory for storing MAC addresses */
1073         eth_dev->data->mac_addrs = rte_zmalloc("ixgbe", ETHER_ADDR_LEN *
1074                         hw->mac.num_rar_entries, 0);
1075         if (eth_dev->data->mac_addrs == NULL) {
1076                 PMD_INIT_LOG(ERR,
1077                         "Failed to allocate %u bytes needed to store "
1078                         "MAC addresses",
1079                         ETHER_ADDR_LEN * hw->mac.num_rar_entries);
1080                 return -ENOMEM;
1081         }
1082         /* Copy the permanent MAC address */
1083         ether_addr_copy((struct ether_addr *) hw->mac.perm_addr,
1084                         &eth_dev->data->mac_addrs[0]);
1085
1086         /* Allocate memory for storing hash filter MAC addresses */
1087         eth_dev->data->hash_mac_addrs = rte_zmalloc("ixgbe", ETHER_ADDR_LEN *
1088                         IXGBE_VMDQ_NUM_UC_MAC, 0);
1089         if (eth_dev->data->hash_mac_addrs == NULL) {
1090                 PMD_INIT_LOG(ERR,
1091                         "Failed to allocate %d bytes needed to store MAC addresses",
1092                         ETHER_ADDR_LEN * IXGBE_VMDQ_NUM_UC_MAC);
1093                 return -ENOMEM;
1094         }
1095
1096         /* initialize the vfta */
1097         memset(shadow_vfta, 0, sizeof(*shadow_vfta));
1098
1099         /* initialize the hw strip bitmap*/
1100         memset(hwstrip, 0, sizeof(*hwstrip));
1101
1102         /* initialize PF if max_vfs not zero */
1103         ixgbe_pf_host_init(eth_dev);
1104
1105         ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
1106         /* let hardware know driver is loaded */
1107         ctrl_ext |= IXGBE_CTRL_EXT_DRV_LOAD;
1108         /* Set PF Reset Done bit so PF/VF Mail Ops can work */
1109         ctrl_ext |= IXGBE_CTRL_EXT_PFRSTD;
1110         IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
1111         IXGBE_WRITE_FLUSH(hw);
1112
1113         if (ixgbe_is_sfp(hw) && hw->phy.sfp_type != ixgbe_sfp_type_not_present)
1114                 PMD_INIT_LOG(DEBUG, "MAC: %d, PHY: %d, SFP+: %d",
1115                              (int) hw->mac.type, (int) hw->phy.type,
1116                              (int) hw->phy.sfp_type);
1117         else
1118                 PMD_INIT_LOG(DEBUG, "MAC: %d, PHY: %d",
1119                              (int) hw->mac.type, (int) hw->phy.type);
1120
1121         PMD_INIT_LOG(DEBUG, "port %d vendorID=0x%x deviceID=0x%x",
1122                         eth_dev->data->port_id, pci_dev->id.vendor_id,
1123                         pci_dev->id.device_id);
1124
1125         rte_intr_callback_register(&pci_dev->intr_handle,
1126                                    ixgbe_dev_interrupt_handler,
1127                                    (void *)eth_dev);
1128
1129         /* enable uio/vfio intr/eventfd mapping */
1130         rte_intr_enable(&pci_dev->intr_handle);
1131
1132         /* enable support intr */
1133         ixgbe_enable_intr(eth_dev);
1134
1135         /* initialize 5tuple filter list */
1136         TAILQ_INIT(&filter_info->fivetuple_list);
1137         memset(filter_info->fivetuple_mask, 0,
1138                 sizeof(uint32_t) * IXGBE_5TUPLE_ARRAY_SIZE);
1139
1140         return 0;
1141 }
1142
1143 static int
1144 eth_ixgbe_dev_uninit(struct rte_eth_dev *eth_dev)
1145 {
1146         struct rte_pci_device *pci_dev;
1147         struct ixgbe_hw *hw;
1148
1149         PMD_INIT_FUNC_TRACE();
1150
1151         if (rte_eal_process_type() != RTE_PROC_PRIMARY)
1152                 return -EPERM;
1153
1154         hw = IXGBE_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
1155         pci_dev = eth_dev->pci_dev;
1156
1157         if (hw->adapter_stopped == 0)
1158                 ixgbe_dev_close(eth_dev);
1159
1160         eth_dev->dev_ops = NULL;
1161         eth_dev->rx_pkt_burst = NULL;
1162         eth_dev->tx_pkt_burst = NULL;
1163
1164         /* Unlock any pending hardware semaphore */
1165         ixgbe_swfw_lock_reset(hw);
1166
1167         /* disable uio intr before callback unregister */
1168         rte_intr_disable(&(pci_dev->intr_handle));
1169         rte_intr_callback_unregister(&(pci_dev->intr_handle),
1170                 ixgbe_dev_interrupt_handler, (void *)eth_dev);
1171
1172         /* uninitialize PF if max_vfs not zero */
1173         ixgbe_pf_host_uninit(eth_dev);
1174
1175         rte_free(eth_dev->data->mac_addrs);
1176         eth_dev->data->mac_addrs = NULL;
1177
1178         rte_free(eth_dev->data->hash_mac_addrs);
1179         eth_dev->data->hash_mac_addrs = NULL;
1180
1181         return 0;
1182 }
1183
1184 /*
1185  * Negotiate mailbox API version with the PF.
1186  * After reset API version is always set to the basic one (ixgbe_mbox_api_10).
1187  * Then we try to negotiate starting with the most recent one.
1188  * If all negotiation attempts fail, then we will proceed with
1189  * the default one (ixgbe_mbox_api_10).
1190  */
1191 static void
1192 ixgbevf_negotiate_api(struct ixgbe_hw *hw)
1193 {
1194         int32_t i;
1195
1196         /* start with highest supported, proceed down */
1197         static const enum ixgbe_pfvf_api_rev sup_ver[] = {
1198                 ixgbe_mbox_api_11,
1199                 ixgbe_mbox_api_10,
1200         };
1201
1202         for (i = 0;
1203                         i != RTE_DIM(sup_ver) &&
1204                         ixgbevf_negotiate_api_version(hw, sup_ver[i]) != 0;
1205                         i++)
1206                 ;
1207 }
1208
1209 static void
1210 generate_random_mac_addr(struct ether_addr *mac_addr)
1211 {
1212         uint64_t random;
1213
1214         /* Set Organizationally Unique Identifier (OUI) prefix. */
1215         mac_addr->addr_bytes[0] = 0x00;
1216         mac_addr->addr_bytes[1] = 0x09;
1217         mac_addr->addr_bytes[2] = 0xC0;
1218         /* Force indication of locally assigned MAC address. */
1219         mac_addr->addr_bytes[0] |= ETHER_LOCAL_ADMIN_ADDR;
1220         /* Generate the last 3 bytes of the MAC address with a random number. */
1221         random = rte_rand();
1222         memcpy(&mac_addr->addr_bytes[3], &random, 3);
1223 }
1224
1225 /*
1226  * Virtual Function device init
1227  */
1228 static int
1229 eth_ixgbevf_dev_init(struct rte_eth_dev *eth_dev)
1230 {
1231         int diag;
1232         uint32_t tc, tcs;
1233         struct rte_pci_device *pci_dev;
1234         struct ixgbe_hw *hw =
1235                 IXGBE_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
1236         struct ixgbe_vfta * shadow_vfta =
1237                 IXGBE_DEV_PRIVATE_TO_VFTA(eth_dev->data->dev_private);
1238         struct ixgbe_hwstrip *hwstrip =
1239                 IXGBE_DEV_PRIVATE_TO_HWSTRIP_BITMAP(eth_dev->data->dev_private);
1240         struct ether_addr *perm_addr = (struct ether_addr *) hw->mac.perm_addr;
1241
1242         PMD_INIT_FUNC_TRACE();
1243
1244         eth_dev->dev_ops = &ixgbevf_eth_dev_ops;
1245         eth_dev->rx_pkt_burst = &ixgbe_recv_pkts;
1246         eth_dev->tx_pkt_burst = &ixgbe_xmit_pkts;
1247
1248         /* for secondary processes, we don't initialise any further as primary
1249          * has already done this work. Only check we don't need a different
1250          * RX function */
1251         if (rte_eal_process_type() != RTE_PROC_PRIMARY){
1252                 if (eth_dev->data->scattered_rx)
1253                         eth_dev->rx_pkt_burst = ixgbe_recv_pkts_lro_single_alloc;
1254                 return 0;
1255         }
1256
1257         pci_dev = eth_dev->pci_dev;
1258
1259         rte_eth_copy_pci_info(eth_dev, pci_dev);
1260
1261         hw->device_id = pci_dev->id.device_id;
1262         hw->vendor_id = pci_dev->id.vendor_id;
1263         hw->hw_addr = (void *)pci_dev->mem_resource[0].addr;
1264
1265         /* initialize the vfta */
1266         memset(shadow_vfta, 0, sizeof(*shadow_vfta));
1267
1268         /* initialize the hw strip bitmap*/
1269         memset(hwstrip, 0, sizeof(*hwstrip));
1270
1271         /* Initialize the shared code (base driver) */
1272         diag = ixgbe_init_shared_code(hw);
1273         if (diag != IXGBE_SUCCESS) {
1274                 PMD_INIT_LOG(ERR, "Shared code init failed for ixgbevf: %d", diag);
1275                 return -EIO;
1276         }
1277
1278         /* init_mailbox_params */
1279         hw->mbx.ops.init_params(hw);
1280
1281         /* Reset the hw statistics */
1282         ixgbevf_dev_stats_reset(eth_dev);
1283
1284         /* Disable the interrupts for VF */
1285         ixgbevf_intr_disable(hw);
1286
1287         hw->mac.num_rar_entries = 128; /* The MAX of the underlying PF */
1288         diag = hw->mac.ops.reset_hw(hw);
1289
1290         /*
1291          * The VF reset operation returns the IXGBE_ERR_INVALID_MAC_ADDR when
1292          * the underlying PF driver has not assigned a MAC address to the VF.
1293          * In this case, assign a random MAC address.
1294          */
1295         if ((diag != IXGBE_SUCCESS) && (diag != IXGBE_ERR_INVALID_MAC_ADDR)) {
1296                 PMD_INIT_LOG(ERR, "VF Initialization Failure: %d", diag);
1297                 return (diag);
1298         }
1299
1300         /* negotiate mailbox API version to use with the PF. */
1301         ixgbevf_negotiate_api(hw);
1302
1303         /* Get Rx/Tx queue count via mailbox, which is ready after reset_hw */
1304         ixgbevf_get_queues(hw, &tcs, &tc);
1305
1306         /* Allocate memory for storing MAC addresses */
1307         eth_dev->data->mac_addrs = rte_zmalloc("ixgbevf", ETHER_ADDR_LEN *
1308                         hw->mac.num_rar_entries, 0);
1309         if (eth_dev->data->mac_addrs == NULL) {
1310                 PMD_INIT_LOG(ERR,
1311                         "Failed to allocate %u bytes needed to store "
1312                         "MAC addresses",
1313                         ETHER_ADDR_LEN * hw->mac.num_rar_entries);
1314                 return -ENOMEM;
1315         }
1316
1317         /* Generate a random MAC address, if none was assigned by PF. */
1318         if (is_zero_ether_addr(perm_addr)) {
1319                 generate_random_mac_addr(perm_addr);
1320                 diag = ixgbe_set_rar_vf(hw, 1, perm_addr->addr_bytes, 0, 1);
1321                 if (diag) {
1322                         rte_free(eth_dev->data->mac_addrs);
1323                         eth_dev->data->mac_addrs = NULL;
1324                         return diag;
1325                 }
1326                 PMD_INIT_LOG(INFO, "\tVF MAC address not assigned by Host PF");
1327                 PMD_INIT_LOG(INFO, "\tAssign randomly generated MAC address "
1328                              "%02x:%02x:%02x:%02x:%02x:%02x",
1329                              perm_addr->addr_bytes[0],
1330                              perm_addr->addr_bytes[1],
1331                              perm_addr->addr_bytes[2],
1332                              perm_addr->addr_bytes[3],
1333                              perm_addr->addr_bytes[4],
1334                              perm_addr->addr_bytes[5]);
1335         }
1336
1337         /* Copy the permanent MAC address */
1338         ether_addr_copy(perm_addr, &eth_dev->data->mac_addrs[0]);
1339
1340         /* reset the hardware with the new settings */
1341         diag = hw->mac.ops.start_hw(hw);
1342         switch (diag) {
1343                 case  0:
1344                         break;
1345
1346                 default:
1347                         PMD_INIT_LOG(ERR, "VF Initialization Failure: %d", diag);
1348                         return (-EIO);
1349         }
1350
1351         PMD_INIT_LOG(DEBUG, "port %d vendorID=0x%x deviceID=0x%x mac.type=%s",
1352                      eth_dev->data->port_id, pci_dev->id.vendor_id,
1353                      pci_dev->id.device_id, "ixgbe_mac_82599_vf");
1354
1355         return 0;
1356 }
1357
1358 /* Virtual Function device uninit */
1359
1360 static int
1361 eth_ixgbevf_dev_uninit(struct rte_eth_dev *eth_dev)
1362 {
1363         struct ixgbe_hw *hw;
1364         unsigned i;
1365
1366         PMD_INIT_FUNC_TRACE();
1367
1368         if (rte_eal_process_type() != RTE_PROC_PRIMARY)
1369                 return -EPERM;
1370
1371         hw = IXGBE_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
1372
1373         if (hw->adapter_stopped == 0)
1374                 ixgbevf_dev_close(eth_dev);
1375
1376         eth_dev->dev_ops = NULL;
1377         eth_dev->rx_pkt_burst = NULL;
1378         eth_dev->tx_pkt_burst = NULL;
1379
1380         /* Disable the interrupts for VF */
1381         ixgbevf_intr_disable(hw);
1382
1383         for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
1384                 ixgbe_dev_rx_queue_release(eth_dev->data->rx_queues[i]);
1385                 eth_dev->data->rx_queues[i] = NULL;
1386         }
1387         eth_dev->data->nb_rx_queues = 0;
1388
1389         for (i = 0; i < eth_dev->data->nb_tx_queues; i++) {
1390                 ixgbe_dev_tx_queue_release(eth_dev->data->tx_queues[i]);
1391                 eth_dev->data->tx_queues[i] = NULL;
1392         }
1393         eth_dev->data->nb_tx_queues = 0;
1394
1395         rte_free(eth_dev->data->mac_addrs);
1396         eth_dev->data->mac_addrs = NULL;
1397
1398         return 0;
1399 }
1400
1401 static struct eth_driver rte_ixgbe_pmd = {
1402         .pci_drv = {
1403                 .name = "rte_ixgbe_pmd",
1404                 .id_table = pci_id_ixgbe_map,
1405                 .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC |
1406                         RTE_PCI_DRV_DETACHABLE,
1407         },
1408         .eth_dev_init = eth_ixgbe_dev_init,
1409         .eth_dev_uninit = eth_ixgbe_dev_uninit,
1410         .dev_private_size = sizeof(struct ixgbe_adapter),
1411 };
1412
1413 /*
1414  * virtual function driver struct
1415  */
1416 static struct eth_driver rte_ixgbevf_pmd = {
1417         .pci_drv = {
1418                 .name = "rte_ixgbevf_pmd",
1419                 .id_table = pci_id_ixgbevf_map,
1420                 .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_DETACHABLE,
1421         },
1422         .eth_dev_init = eth_ixgbevf_dev_init,
1423         .eth_dev_uninit = eth_ixgbevf_dev_uninit,
1424         .dev_private_size = sizeof(struct ixgbe_adapter),
1425 };
1426
1427 /*
1428  * Driver initialization routine.
1429  * Invoked once at EAL init time.
1430  * Register itself as the [Poll Mode] Driver of PCI IXGBE devices.
1431  */
1432 static int
1433 rte_ixgbe_pmd_init(const char *name __rte_unused, const char *params __rte_unused)
1434 {
1435         PMD_INIT_FUNC_TRACE();
1436
1437         rte_eth_driver_register(&rte_ixgbe_pmd);
1438         return 0;
1439 }
1440
1441 /*
1442  * VF Driver initialization routine.
1443  * Invoked one at EAL init time.
1444  * Register itself as the [Virtual Poll Mode] Driver of PCI niantic devices.
1445  */
1446 static int
1447 rte_ixgbevf_pmd_init(const char *name __rte_unused, const char *param __rte_unused)
1448 {
1449         PMD_INIT_FUNC_TRACE();
1450
1451         rte_eth_driver_register(&rte_ixgbevf_pmd);
1452         return (0);
1453 }
1454
1455 static int
1456 ixgbe_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
1457 {
1458         struct ixgbe_hw *hw =
1459                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1460         struct ixgbe_vfta * shadow_vfta =
1461                 IXGBE_DEV_PRIVATE_TO_VFTA(dev->data->dev_private);
1462         uint32_t vfta;
1463         uint32_t vid_idx;
1464         uint32_t vid_bit;
1465
1466         vid_idx = (uint32_t) ((vlan_id >> 5) & 0x7F);
1467         vid_bit = (uint32_t) (1 << (vlan_id & 0x1F));
1468         vfta = IXGBE_READ_REG(hw, IXGBE_VFTA(vid_idx));
1469         if (on)
1470                 vfta |= vid_bit;
1471         else
1472                 vfta &= ~vid_bit;
1473         IXGBE_WRITE_REG(hw, IXGBE_VFTA(vid_idx), vfta);
1474
1475         /* update local VFTA copy */
1476         shadow_vfta->vfta[vid_idx] = vfta;
1477
1478         return 0;
1479 }
1480
1481 static void
1482 ixgbe_vlan_strip_queue_set(struct rte_eth_dev *dev, uint16_t queue, int on)
1483 {
1484         if (on)
1485                 ixgbe_vlan_hw_strip_enable(dev, queue);
1486         else
1487                 ixgbe_vlan_hw_strip_disable(dev, queue);
1488 }
1489
1490 static void
1491 ixgbe_vlan_tpid_set(struct rte_eth_dev *dev, uint16_t tpid)
1492 {
1493         struct ixgbe_hw *hw =
1494                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1495
1496         /* Only the high 16-bits is valid */
1497         IXGBE_WRITE_REG(hw, IXGBE_EXVET, tpid << 16);
1498 }
1499
1500 void
1501 ixgbe_vlan_hw_filter_disable(struct rte_eth_dev *dev)
1502 {
1503         struct ixgbe_hw *hw =
1504                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1505         uint32_t vlnctrl;
1506
1507         PMD_INIT_FUNC_TRACE();
1508
1509         /* Filter Table Disable */
1510         vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
1511         vlnctrl &= ~IXGBE_VLNCTRL_VFE;
1512
1513         IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
1514 }
1515
1516 void
1517 ixgbe_vlan_hw_filter_enable(struct rte_eth_dev *dev)
1518 {
1519         struct ixgbe_hw *hw =
1520                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1521         struct ixgbe_vfta * shadow_vfta =
1522                 IXGBE_DEV_PRIVATE_TO_VFTA(dev->data->dev_private);
1523         uint32_t vlnctrl;
1524         uint16_t i;
1525
1526         PMD_INIT_FUNC_TRACE();
1527
1528         /* Filter Table Enable */
1529         vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
1530         vlnctrl &= ~IXGBE_VLNCTRL_CFIEN;
1531         vlnctrl |= IXGBE_VLNCTRL_VFE;
1532
1533         IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
1534
1535         /* write whatever is in local vfta copy */
1536         for (i = 0; i < IXGBE_VFTA_SIZE; i++)
1537                 IXGBE_WRITE_REG(hw, IXGBE_VFTA(i), shadow_vfta->vfta[i]);
1538 }
1539
1540 static void
1541 ixgbe_vlan_hw_strip_bitmap_set(struct rte_eth_dev *dev, uint16_t queue, bool on)
1542 {
1543         struct ixgbe_hwstrip *hwstrip =
1544                 IXGBE_DEV_PRIVATE_TO_HWSTRIP_BITMAP(dev->data->dev_private);
1545
1546         if(queue >= IXGBE_MAX_RX_QUEUE_NUM)
1547                 return;
1548
1549         if (on)
1550                 IXGBE_SET_HWSTRIP(hwstrip, queue);
1551         else
1552                 IXGBE_CLEAR_HWSTRIP(hwstrip, queue);
1553 }
1554
1555 static void
1556 ixgbe_vlan_hw_strip_disable(struct rte_eth_dev *dev, uint16_t queue)
1557 {
1558         struct ixgbe_hw *hw =
1559                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1560         uint32_t ctrl;
1561
1562         PMD_INIT_FUNC_TRACE();
1563
1564         if (hw->mac.type == ixgbe_mac_82598EB) {
1565                 /* No queue level support */
1566                 PMD_INIT_LOG(NOTICE, "82598EB not support queue level hw strip");
1567                 return;
1568         }
1569         else {
1570                 /* Other 10G NIC, the VLAN strip can be setup per queue in RXDCTL */
1571                 ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(queue));
1572                 ctrl &= ~IXGBE_RXDCTL_VME;
1573                 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(queue), ctrl);
1574         }
1575         /* record those setting for HW strip per queue */
1576         ixgbe_vlan_hw_strip_bitmap_set(dev, queue, 0);
1577 }
1578
1579 static void
1580 ixgbe_vlan_hw_strip_enable(struct rte_eth_dev *dev, uint16_t queue)
1581 {
1582         struct ixgbe_hw *hw =
1583                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1584         uint32_t ctrl;
1585
1586         PMD_INIT_FUNC_TRACE();
1587
1588         if (hw->mac.type == ixgbe_mac_82598EB) {
1589                 /* No queue level supported */
1590                 PMD_INIT_LOG(NOTICE, "82598EB not support queue level hw strip");
1591                 return;
1592         }
1593         else {
1594                 /* Other 10G NIC, the VLAN strip can be setup per queue in RXDCTL */
1595                 ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(queue));
1596                 ctrl |= IXGBE_RXDCTL_VME;
1597                 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(queue), ctrl);
1598         }
1599         /* record those setting for HW strip per queue */
1600         ixgbe_vlan_hw_strip_bitmap_set(dev, queue, 1);
1601 }
1602
1603 void
1604 ixgbe_vlan_hw_strip_disable_all(struct rte_eth_dev *dev)
1605 {
1606         struct ixgbe_hw *hw =
1607                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1608         uint32_t ctrl;
1609         uint16_t i;
1610
1611         PMD_INIT_FUNC_TRACE();
1612
1613         if (hw->mac.type == ixgbe_mac_82598EB) {
1614                 ctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
1615                 ctrl &= ~IXGBE_VLNCTRL_VME;
1616                 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, ctrl);
1617         }
1618         else {
1619                 /* Other 10G NIC, the VLAN strip can be setup per queue in RXDCTL */
1620                 for (i = 0; i < dev->data->nb_rx_queues; i++) {
1621                         ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(i));
1622                         ctrl &= ~IXGBE_RXDCTL_VME;
1623                         IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(i), ctrl);
1624
1625                         /* record those setting for HW strip per queue */
1626                         ixgbe_vlan_hw_strip_bitmap_set(dev, i, 0);
1627                 }
1628         }
1629 }
1630
1631 void
1632 ixgbe_vlan_hw_strip_enable_all(struct rte_eth_dev *dev)
1633 {
1634         struct ixgbe_hw *hw =
1635                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1636         uint32_t ctrl;
1637         uint16_t i;
1638
1639         PMD_INIT_FUNC_TRACE();
1640
1641         if (hw->mac.type == ixgbe_mac_82598EB) {
1642                 ctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
1643                 ctrl |= IXGBE_VLNCTRL_VME;
1644                 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, ctrl);
1645         }
1646         else {
1647                 /* Other 10G NIC, the VLAN strip can be setup per queue in RXDCTL */
1648                 for (i = 0; i < dev->data->nb_rx_queues; i++) {
1649                         ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(i));
1650                         ctrl |= IXGBE_RXDCTL_VME;
1651                         IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(i), ctrl);
1652
1653                         /* record those setting for HW strip per queue */
1654                         ixgbe_vlan_hw_strip_bitmap_set(dev, i, 1);
1655                 }
1656         }
1657 }
1658
1659 static void
1660 ixgbe_vlan_hw_extend_disable(struct rte_eth_dev *dev)
1661 {
1662         struct ixgbe_hw *hw =
1663                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1664         uint32_t ctrl;
1665
1666         PMD_INIT_FUNC_TRACE();
1667
1668         /* DMATXCTRL: Geric Double VLAN Disable */
1669         ctrl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
1670         ctrl &= ~IXGBE_DMATXCTL_GDV;
1671         IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, ctrl);
1672
1673         /* CTRL_EXT: Global Double VLAN Disable */
1674         ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
1675         ctrl &= ~IXGBE_EXTENDED_VLAN;
1676         IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl);
1677
1678 }
1679
1680 static void
1681 ixgbe_vlan_hw_extend_enable(struct rte_eth_dev *dev)
1682 {
1683         struct ixgbe_hw *hw =
1684                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1685         uint32_t ctrl;
1686
1687         PMD_INIT_FUNC_TRACE();
1688
1689         /* DMATXCTRL: Geric Double VLAN Enable */
1690         ctrl  = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
1691         ctrl |= IXGBE_DMATXCTL_GDV;
1692         IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, ctrl);
1693
1694         /* CTRL_EXT: Global Double VLAN Enable */
1695         ctrl  = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
1696         ctrl |= IXGBE_EXTENDED_VLAN;
1697         IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl);
1698
1699         /*
1700          * VET EXT field in the EXVET register = 0x8100 by default
1701          * So no need to change. Same to VT field of DMATXCTL register
1702          */
1703 }
1704
1705 static void
1706 ixgbe_vlan_offload_set(struct rte_eth_dev *dev, int mask)
1707 {
1708         if(mask & ETH_VLAN_STRIP_MASK){
1709                 if (dev->data->dev_conf.rxmode.hw_vlan_strip)
1710                         ixgbe_vlan_hw_strip_enable_all(dev);
1711                 else
1712                         ixgbe_vlan_hw_strip_disable_all(dev);
1713         }
1714
1715         if(mask & ETH_VLAN_FILTER_MASK){
1716                 if (dev->data->dev_conf.rxmode.hw_vlan_filter)
1717                         ixgbe_vlan_hw_filter_enable(dev);
1718                 else
1719                         ixgbe_vlan_hw_filter_disable(dev);
1720         }
1721
1722         if(mask & ETH_VLAN_EXTEND_MASK){
1723                 if (dev->data->dev_conf.rxmode.hw_vlan_extend)
1724                         ixgbe_vlan_hw_extend_enable(dev);
1725                 else
1726                         ixgbe_vlan_hw_extend_disable(dev);
1727         }
1728 }
1729
1730 static void
1731 ixgbe_vmdq_vlan_hw_filter_enable(struct rte_eth_dev *dev)
1732 {
1733         struct ixgbe_hw *hw =
1734                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1735         /* VLNCTRL: enable vlan filtering and allow all vlan tags through */
1736         uint32_t vlanctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
1737         vlanctrl |= IXGBE_VLNCTRL_VFE ; /* enable vlan filters */
1738         IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlanctrl);
1739 }
1740
1741 static int
1742 ixgbe_check_vf_rss_rxq_num(struct rte_eth_dev *dev, uint16_t nb_rx_q)
1743 {
1744         switch (nb_rx_q) {
1745         case 1:
1746         case 2:
1747                 RTE_ETH_DEV_SRIOV(dev).active = ETH_64_POOLS;
1748                 break;
1749         case 4:
1750                 RTE_ETH_DEV_SRIOV(dev).active = ETH_32_POOLS;
1751                 break;
1752         default:
1753                 return -EINVAL;
1754         }
1755
1756         RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool = nb_rx_q;
1757         RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx = dev->pci_dev->max_vfs * nb_rx_q;
1758
1759         return 0;
1760 }
1761
1762 static int
1763 ixgbe_check_mq_mode(struct rte_eth_dev *dev)
1764 {
1765         struct rte_eth_conf *dev_conf = &dev->data->dev_conf;
1766         uint16_t nb_rx_q = dev->data->nb_rx_queues;
1767         uint16_t nb_tx_q = dev->data->nb_rx_queues;
1768
1769         if (RTE_ETH_DEV_SRIOV(dev).active != 0) {
1770                 /* check multi-queue mode */
1771                 switch (dev_conf->rxmode.mq_mode) {
1772                 case ETH_MQ_RX_VMDQ_DCB:
1773                 case ETH_MQ_RX_VMDQ_DCB_RSS:
1774                         /* DCB/RSS VMDQ in SRIOV mode, not implement yet */
1775                         PMD_INIT_LOG(ERR, "SRIOV active,"
1776                                         " unsupported mq_mode rx %d.",
1777                                         dev_conf->rxmode.mq_mode);
1778                         return -EINVAL;
1779                 case ETH_MQ_RX_RSS:
1780                 case ETH_MQ_RX_VMDQ_RSS:
1781                         dev->data->dev_conf.rxmode.mq_mode = ETH_MQ_RX_VMDQ_RSS;
1782                         if (nb_rx_q <= RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool)
1783                                 if (ixgbe_check_vf_rss_rxq_num(dev, nb_rx_q)) {
1784                                         PMD_INIT_LOG(ERR, "SRIOV is active,"
1785                                                 " invalid queue number"
1786                                                 " for VMDQ RSS, allowed"
1787                                                 " value are 1, 2 or 4.");
1788                                         return -EINVAL;
1789                                 }
1790                         break;
1791                 case ETH_MQ_RX_VMDQ_ONLY:
1792                 case ETH_MQ_RX_NONE:
1793                         /* if nothing mq mode configure, use default scheme */
1794                         dev->data->dev_conf.rxmode.mq_mode = ETH_MQ_RX_VMDQ_ONLY;
1795                         if (RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool > 1)
1796                                 RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool = 1;
1797                         break;
1798                 default: /* ETH_MQ_RX_DCB, ETH_MQ_RX_DCB_RSS or ETH_MQ_TX_DCB*/
1799                         /* SRIOV only works in VMDq enable mode */
1800                         PMD_INIT_LOG(ERR, "SRIOV is active,"
1801                                         " wrong mq_mode rx %d.",
1802                                         dev_conf->rxmode.mq_mode);
1803                         return -EINVAL;
1804                 }
1805
1806                 switch (dev_conf->txmode.mq_mode) {
1807                 case ETH_MQ_TX_VMDQ_DCB:
1808                         /* DCB VMDQ in SRIOV mode, not implement yet */
1809                         PMD_INIT_LOG(ERR, "SRIOV is active,"
1810                                         " unsupported VMDQ mq_mode tx %d.",
1811                                         dev_conf->txmode.mq_mode);
1812                         return -EINVAL;
1813                 default: /* ETH_MQ_TX_VMDQ_ONLY or ETH_MQ_TX_NONE */
1814                         dev->data->dev_conf.txmode.mq_mode = ETH_MQ_TX_VMDQ_ONLY;
1815                         break;
1816                 }
1817
1818                 /* check valid queue number */
1819                 if ((nb_rx_q > RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool) ||
1820                     (nb_tx_q > RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool)) {
1821                         PMD_INIT_LOG(ERR, "SRIOV is active,"
1822                                         " queue number must less equal to %d.",
1823                                         RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool);
1824                         return -EINVAL;
1825                 }
1826         } else {
1827                 /* check configuration for vmdb+dcb mode */
1828                 if (dev_conf->rxmode.mq_mode == ETH_MQ_RX_VMDQ_DCB) {
1829                         const struct rte_eth_vmdq_dcb_conf *conf;
1830
1831                         if (nb_rx_q != IXGBE_VMDQ_DCB_NB_QUEUES) {
1832                                 PMD_INIT_LOG(ERR, "VMDQ+DCB, nb_rx_q != %d.",
1833                                                 IXGBE_VMDQ_DCB_NB_QUEUES);
1834                                 return -EINVAL;
1835                         }
1836                         conf = &dev_conf->rx_adv_conf.vmdq_dcb_conf;
1837                         if (!(conf->nb_queue_pools == ETH_16_POOLS ||
1838                                conf->nb_queue_pools == ETH_32_POOLS)) {
1839                                 PMD_INIT_LOG(ERR, "VMDQ+DCB selected,"
1840                                                 " nb_queue_pools must be %d or %d.",
1841                                                 ETH_16_POOLS, ETH_32_POOLS);
1842                                 return -EINVAL;
1843                         }
1844                 }
1845                 if (dev_conf->txmode.mq_mode == ETH_MQ_TX_VMDQ_DCB) {
1846                         const struct rte_eth_vmdq_dcb_tx_conf *conf;
1847
1848                         if (nb_tx_q != IXGBE_VMDQ_DCB_NB_QUEUES) {
1849                                 PMD_INIT_LOG(ERR, "VMDQ+DCB, nb_tx_q != %d",
1850                                                  IXGBE_VMDQ_DCB_NB_QUEUES);
1851                                 return -EINVAL;
1852                         }
1853                         conf = &dev_conf->tx_adv_conf.vmdq_dcb_tx_conf;
1854                         if (!(conf->nb_queue_pools == ETH_16_POOLS ||
1855                                conf->nb_queue_pools == ETH_32_POOLS)) {
1856                                 PMD_INIT_LOG(ERR, "VMDQ+DCB selected,"
1857                                                 " nb_queue_pools != %d and"
1858                                                 " nb_queue_pools != %d.",
1859                                                 ETH_16_POOLS, ETH_32_POOLS);
1860                                 return -EINVAL;
1861                         }
1862                 }
1863
1864                 /* For DCB mode check our configuration before we go further */
1865                 if (dev_conf->rxmode.mq_mode == ETH_MQ_RX_DCB) {
1866                         const struct rte_eth_dcb_rx_conf *conf;
1867
1868                         if (nb_rx_q != IXGBE_DCB_NB_QUEUES) {
1869                                 PMD_INIT_LOG(ERR, "DCB selected, nb_rx_q != %d.",
1870                                                  IXGBE_DCB_NB_QUEUES);
1871                                 return -EINVAL;
1872                         }
1873                         conf = &dev_conf->rx_adv_conf.dcb_rx_conf;
1874                         if (!(conf->nb_tcs == ETH_4_TCS ||
1875                                conf->nb_tcs == ETH_8_TCS)) {
1876                                 PMD_INIT_LOG(ERR, "DCB selected, nb_tcs != %d"
1877                                                 " and nb_tcs != %d.",
1878                                                 ETH_4_TCS, ETH_8_TCS);
1879                                 return -EINVAL;
1880                         }
1881                 }
1882
1883                 if (dev_conf->txmode.mq_mode == ETH_MQ_TX_DCB) {
1884                         const struct rte_eth_dcb_tx_conf *conf;
1885
1886                         if (nb_tx_q != IXGBE_DCB_NB_QUEUES) {
1887                                 PMD_INIT_LOG(ERR, "DCB, nb_tx_q != %d.",
1888                                                  IXGBE_DCB_NB_QUEUES);
1889                                 return -EINVAL;
1890                         }
1891                         conf = &dev_conf->tx_adv_conf.dcb_tx_conf;
1892                         if (!(conf->nb_tcs == ETH_4_TCS ||
1893                                conf->nb_tcs == ETH_8_TCS)) {
1894                                 PMD_INIT_LOG(ERR, "DCB selected, nb_tcs != %d"
1895                                                 " and nb_tcs != %d.",
1896                                                 ETH_4_TCS, ETH_8_TCS);
1897                                 return -EINVAL;
1898                         }
1899                 }
1900         }
1901         return 0;
1902 }
1903
1904 static int
1905 ixgbe_dev_configure(struct rte_eth_dev *dev)
1906 {
1907         struct ixgbe_interrupt *intr =
1908                 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
1909         struct ixgbe_adapter *adapter =
1910                 (struct ixgbe_adapter *)dev->data->dev_private;
1911         int ret;
1912
1913         PMD_INIT_FUNC_TRACE();
1914         /* multipe queue mode checking */
1915         ret  = ixgbe_check_mq_mode(dev);
1916         if (ret != 0) {
1917                 PMD_DRV_LOG(ERR, "ixgbe_check_mq_mode fails with %d.",
1918                             ret);
1919                 return ret;
1920         }
1921
1922         /* set flag to update link status after init */
1923         intr->flags |= IXGBE_FLAG_NEED_LINK_UPDATE;
1924
1925         /*
1926          * Initialize to TRUE. If any of Rx queues doesn't meet the bulk
1927          * allocation or vector Rx preconditions we will reset it.
1928          */
1929         adapter->rx_bulk_alloc_allowed = true;
1930         adapter->rx_vec_allowed = true;
1931
1932         return 0;
1933 }
1934
1935 /*
1936  * Configure device link speed and setup link.
1937  * It returns 0 on success.
1938  */
1939 static int
1940 ixgbe_dev_start(struct rte_eth_dev *dev)
1941 {
1942         struct ixgbe_hw *hw =
1943                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1944         struct ixgbe_vf_info *vfinfo =
1945                 *IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private);
1946         struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle;
1947         uint32_t intr_vector = 0;
1948         int err, link_up = 0, negotiate = 0;
1949         uint32_t speed = 0;
1950         int mask = 0;
1951         int status;
1952         uint16_t vf, idx;
1953
1954         PMD_INIT_FUNC_TRACE();
1955
1956         /* IXGBE devices don't support half duplex */
1957         if ((dev->data->dev_conf.link_duplex != ETH_LINK_AUTONEG_DUPLEX) &&
1958                         (dev->data->dev_conf.link_duplex != ETH_LINK_FULL_DUPLEX)) {
1959                 PMD_INIT_LOG(ERR, "Invalid link_duplex (%hu) for port %hhu",
1960                              dev->data->dev_conf.link_duplex,
1961                              dev->data->port_id);
1962                 return -EINVAL;
1963         }
1964
1965         /* stop adapter */
1966         hw->adapter_stopped = 0;
1967         ixgbe_stop_adapter(hw);
1968
1969         /* reinitialize adapter
1970          * this calls reset and start */
1971         status = ixgbe_pf_reset_hw(hw);
1972         if (status != 0)
1973                 return -1;
1974         hw->mac.ops.start_hw(hw);
1975         hw->mac.get_link_status = true;
1976
1977         /* configure PF module if SRIOV enabled */
1978         ixgbe_pf_host_configure(dev);
1979
1980         /* check and configure queue intr-vector mapping */
1981         if ((rte_intr_cap_multiple(intr_handle) ||
1982              !RTE_ETH_DEV_SRIOV(dev).active) &&
1983             dev->data->dev_conf.intr_conf.rxq != 0) {
1984                 intr_vector = dev->data->nb_rx_queues;
1985                 if (rte_intr_efd_enable(intr_handle, intr_vector))
1986                         return -1;
1987         }
1988
1989         if (rte_intr_dp_is_en(intr_handle) && !intr_handle->intr_vec) {
1990                 intr_handle->intr_vec =
1991                         rte_zmalloc("intr_vec",
1992                                     dev->data->nb_rx_queues * sizeof(int), 0);
1993                 if (intr_handle->intr_vec == NULL) {
1994                         PMD_INIT_LOG(ERR, "Failed to allocate %d rx_queues"
1995                                      " intr_vec\n", dev->data->nb_rx_queues);
1996                         return -ENOMEM;
1997                 }
1998         }
1999
2000         /* confiugre msix for sleep until rx interrupt */
2001         ixgbe_configure_msix(dev);
2002
2003         /* initialize transmission unit */
2004         ixgbe_dev_tx_init(dev);
2005
2006         /* This can fail when allocating mbufs for descriptor rings */
2007         err = ixgbe_dev_rx_init(dev);
2008         if (err) {
2009                 PMD_INIT_LOG(ERR, "Unable to initialize RX hardware");
2010                 goto error;
2011         }
2012
2013         err = ixgbe_dev_rxtx_start(dev);
2014         if (err < 0) {
2015                 PMD_INIT_LOG(ERR, "Unable to start rxtx queues");
2016                 goto error;
2017         }
2018
2019         /* Skip link setup if loopback mode is enabled for 82599. */
2020         if (hw->mac.type == ixgbe_mac_82599EB &&
2021                         dev->data->dev_conf.lpbk_mode == IXGBE_LPBK_82599_TX_RX)
2022                 goto skip_link_setup;
2023
2024         if (ixgbe_is_sfp(hw) && hw->phy.multispeed_fiber) {
2025                 err = hw->mac.ops.setup_sfp(hw);
2026                 if (err)
2027                         goto error;
2028         }
2029
2030         if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_copper) {
2031                 /* Turn on the copper */
2032                 ixgbe_set_phy_power(hw, true);
2033         } else {
2034                 /* Turn on the laser */
2035                 ixgbe_enable_tx_laser(hw);
2036         }
2037
2038         err = ixgbe_check_link(hw, &speed, &link_up, 0);
2039         if (err)
2040                 goto error;
2041         dev->data->dev_link.link_status = link_up;
2042
2043         err = ixgbe_get_link_capabilities(hw, &speed, &negotiate);
2044         if (err)
2045                 goto error;
2046
2047         switch(dev->data->dev_conf.link_speed) {
2048         case ETH_LINK_SPEED_AUTONEG:
2049                 speed = (hw->mac.type != ixgbe_mac_82598EB) ?
2050                                 IXGBE_LINK_SPEED_82599_AUTONEG :
2051                                 IXGBE_LINK_SPEED_82598_AUTONEG;
2052                 break;
2053         case ETH_LINK_SPEED_100:
2054                 /*
2055                  * Invalid for 82598 but error will be detected by
2056                  * ixgbe_setup_link()
2057                  */
2058                 speed = IXGBE_LINK_SPEED_100_FULL;
2059                 break;
2060         case ETH_LINK_SPEED_1000:
2061                 speed = IXGBE_LINK_SPEED_1GB_FULL;
2062                 break;
2063         case ETH_LINK_SPEED_10000:
2064                 speed = IXGBE_LINK_SPEED_10GB_FULL;
2065                 break;
2066         default:
2067                 PMD_INIT_LOG(ERR, "Invalid link_speed (%hu) for port %hhu",
2068                              dev->data->dev_conf.link_speed,
2069                              dev->data->port_id);
2070                 goto error;
2071         }
2072
2073         err = ixgbe_setup_link(hw, speed, link_up);
2074         if (err)
2075                 goto error;
2076
2077 skip_link_setup:
2078
2079         if (rte_intr_allow_others(intr_handle)) {
2080                 /* check if lsc interrupt is enabled */
2081                 if (dev->data->dev_conf.intr_conf.lsc != 0)
2082                         ixgbe_dev_lsc_interrupt_setup(dev);
2083         } else {
2084                 rte_intr_callback_unregister(intr_handle,
2085                                              ixgbe_dev_interrupt_handler,
2086                                              (void *)dev);
2087                 if (dev->data->dev_conf.intr_conf.lsc != 0)
2088                         PMD_INIT_LOG(INFO, "lsc won't enable because of"
2089                                      " no intr multiplex\n");
2090         }
2091
2092         /* check if rxq interrupt is enabled */
2093         if (dev->data->dev_conf.intr_conf.rxq != 0 &&
2094             rte_intr_dp_is_en(intr_handle))
2095                 ixgbe_dev_rxq_interrupt_setup(dev);
2096
2097         /* enable uio/vfio intr/eventfd mapping */
2098         rte_intr_enable(intr_handle);
2099
2100         /* resume enabled intr since hw reset */
2101         ixgbe_enable_intr(dev);
2102
2103         mask = ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK | \
2104                 ETH_VLAN_EXTEND_MASK;
2105         ixgbe_vlan_offload_set(dev, mask);
2106
2107         if (dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_VMDQ_ONLY) {
2108                 /* Enable vlan filtering for VMDq */
2109                 ixgbe_vmdq_vlan_hw_filter_enable(dev);
2110         }
2111
2112         /* Configure DCB hw */
2113         ixgbe_configure_dcb(dev);
2114
2115         if (dev->data->dev_conf.fdir_conf.mode != RTE_FDIR_MODE_NONE) {
2116                 err = ixgbe_fdir_configure(dev);
2117                 if (err)
2118                         goto error;
2119         }
2120
2121         /* Restore vf rate limit */
2122         if (vfinfo != NULL) {
2123                 for (vf = 0; vf < dev->pci_dev->max_vfs; vf++)
2124                         for (idx = 0; idx < IXGBE_MAX_QUEUE_NUM_PER_VF; idx++)
2125                                 if (vfinfo[vf].tx_rate[idx] != 0)
2126                                         ixgbe_set_vf_rate_limit(dev, vf,
2127                                                 vfinfo[vf].tx_rate[idx],
2128                                                 1 << idx);
2129         }
2130
2131         ixgbe_restore_statistics_mapping(dev);
2132
2133         return (0);
2134
2135 error:
2136         PMD_INIT_LOG(ERR, "failure in ixgbe_dev_start(): %d", err);
2137         ixgbe_dev_clear_queues(dev);
2138         return -EIO;
2139 }
2140
2141 /*
2142  * Stop device: disable rx and tx functions to allow for reconfiguring.
2143  */
2144 static void
2145 ixgbe_dev_stop(struct rte_eth_dev *dev)
2146 {
2147         struct rte_eth_link link;
2148         struct ixgbe_hw *hw =
2149                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2150         struct ixgbe_vf_info *vfinfo =
2151                 *IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private);
2152         struct ixgbe_filter_info *filter_info =
2153                 IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
2154         struct ixgbe_5tuple_filter *p_5tuple, *p_5tuple_next;
2155         struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle;
2156         int vf;
2157
2158         PMD_INIT_FUNC_TRACE();
2159
2160         /* disable interrupts */
2161         ixgbe_disable_intr(hw);
2162
2163         /* disable intr eventfd mapping */
2164         rte_intr_disable(intr_handle);
2165
2166         /* reset the NIC */
2167         ixgbe_pf_reset_hw(hw);
2168         hw->adapter_stopped = 0;
2169
2170         /* stop adapter */
2171         ixgbe_stop_adapter(hw);
2172
2173         for (vf = 0; vfinfo != NULL &&
2174                      vf < dev->pci_dev->max_vfs; vf++)
2175                 vfinfo[vf].clear_to_send = false;
2176
2177         if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_copper) {
2178                 /* Turn off the copper */
2179                 ixgbe_set_phy_power(hw, false);
2180         } else {
2181                 /* Turn off the laser */
2182                 ixgbe_disable_tx_laser(hw);
2183         }
2184
2185         ixgbe_dev_clear_queues(dev);
2186
2187         /* Clear stored conf */
2188         dev->data->scattered_rx = 0;
2189         dev->data->lro = 0;
2190
2191         /* Clear recorded link status */
2192         memset(&link, 0, sizeof(link));
2193         rte_ixgbe_dev_atomic_write_link_status(dev, &link);
2194
2195         /* Remove all ntuple filters of the device */
2196         for (p_5tuple = TAILQ_FIRST(&filter_info->fivetuple_list);
2197              p_5tuple != NULL; p_5tuple = p_5tuple_next) {
2198                 p_5tuple_next = TAILQ_NEXT(p_5tuple, entries);
2199                 TAILQ_REMOVE(&filter_info->fivetuple_list,
2200                              p_5tuple, entries);
2201                 rte_free(p_5tuple);
2202         }
2203         memset(filter_info->fivetuple_mask, 0,
2204                 sizeof(uint32_t) * IXGBE_5TUPLE_ARRAY_SIZE);
2205
2206         if (!rte_intr_allow_others(intr_handle))
2207                 /* resume to the default handler */
2208                 rte_intr_callback_register(intr_handle,
2209                                            ixgbe_dev_interrupt_handler,
2210                                            (void *)dev);
2211
2212         /* Clean datapath event and queue/vec mapping */
2213         rte_intr_efd_disable(intr_handle);
2214         if (intr_handle->intr_vec != NULL) {
2215                 rte_free(intr_handle->intr_vec);
2216                 intr_handle->intr_vec = NULL;
2217         }
2218 }
2219
2220 /*
2221  * Set device link up: enable tx.
2222  */
2223 static int
2224 ixgbe_dev_set_link_up(struct rte_eth_dev *dev)
2225 {
2226         struct ixgbe_hw *hw =
2227                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2228         if (hw->mac.type == ixgbe_mac_82599EB) {
2229 #ifdef RTE_NIC_BYPASS
2230                 if (hw->device_id == IXGBE_DEV_ID_82599_BYPASS) {
2231                         /* Not suported in bypass mode */
2232                         PMD_INIT_LOG(ERR, "Set link up is not supported "
2233                                      "by device id 0x%x", hw->device_id);
2234                         return -ENOTSUP;
2235                 }
2236 #endif
2237         }
2238
2239         if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_copper) {
2240                 /* Turn on the copper */
2241                 ixgbe_set_phy_power(hw, true);
2242         } else {
2243                 /* Turn on the laser */
2244                 ixgbe_enable_tx_laser(hw);
2245         }
2246
2247         return 0;
2248 }
2249
2250 /*
2251  * Set device link down: disable tx.
2252  */
2253 static int
2254 ixgbe_dev_set_link_down(struct rte_eth_dev *dev)
2255 {
2256         struct ixgbe_hw *hw =
2257                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2258         if (hw->mac.type == ixgbe_mac_82599EB) {
2259 #ifdef RTE_NIC_BYPASS
2260                 if (hw->device_id == IXGBE_DEV_ID_82599_BYPASS) {
2261                         /* Not suported in bypass mode */
2262                         PMD_INIT_LOG(ERR, "Set link down is not supported "
2263                                      "by device id 0x%x", hw->device_id);
2264                         return -ENOTSUP;
2265                 }
2266 #endif
2267         }
2268
2269         if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_copper) {
2270                 /* Turn off the copper */
2271                 ixgbe_set_phy_power(hw, false);
2272         } else {
2273                 /* Turn off the laser */
2274                 ixgbe_disable_tx_laser(hw);
2275         }
2276
2277         return 0;
2278 }
2279
2280 /*
2281  * Reest and stop device.
2282  */
2283 static void
2284 ixgbe_dev_close(struct rte_eth_dev *dev)
2285 {
2286         struct ixgbe_hw *hw =
2287                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2288
2289         PMD_INIT_FUNC_TRACE();
2290
2291         ixgbe_pf_reset_hw(hw);
2292
2293         ixgbe_dev_stop(dev);
2294         hw->adapter_stopped = 1;
2295
2296         ixgbe_dev_free_queues(dev);
2297
2298         ixgbe_disable_pcie_master(hw);
2299
2300         /* reprogram the RAR[0] in case user changed it. */
2301         ixgbe_set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
2302 }
2303
2304 static void
2305 ixgbe_read_stats_registers(struct ixgbe_hw *hw, struct ixgbe_hw_stats
2306                                                    *hw_stats, uint64_t *total_missed_rx,
2307                                                    uint64_t *total_qbrc, uint64_t *total_qprc,
2308                                                    uint64_t *total_qprdc)
2309 {
2310         uint32_t bprc, lxon, lxoff, total;
2311         unsigned i;
2312
2313         hw_stats->crcerrs += IXGBE_READ_REG(hw, IXGBE_CRCERRS);
2314         hw_stats->illerrc += IXGBE_READ_REG(hw, IXGBE_ILLERRC);
2315         hw_stats->errbc += IXGBE_READ_REG(hw, IXGBE_ERRBC);
2316         hw_stats->mspdc += IXGBE_READ_REG(hw, IXGBE_MSPDC);
2317
2318         for (i = 0; i < 8; i++) {
2319                 uint32_t mp;
2320                 mp = IXGBE_READ_REG(hw, IXGBE_MPC(i));
2321                 /* global total per queue */
2322                 hw_stats->mpc[i] += mp;
2323                 /* Running comprehensive total for stats display */
2324                 *total_missed_rx += hw_stats->mpc[i];
2325                 if (hw->mac.type == ixgbe_mac_82598EB) {
2326                         hw_stats->rnbc[i] +=
2327                             IXGBE_READ_REG(hw, IXGBE_RNBC(i));
2328                         hw_stats->pxonrxc[i] +=
2329                                 IXGBE_READ_REG(hw, IXGBE_PXONRXC(i));
2330                         hw_stats->pxoffrxc[i] +=
2331                                 IXGBE_READ_REG(hw, IXGBE_PXOFFRXC(i));
2332                 } else {
2333                         hw_stats->pxonrxc[i] +=
2334                                 IXGBE_READ_REG(hw, IXGBE_PXONRXCNT(i));
2335                         hw_stats->pxoffrxc[i] +=
2336                                 IXGBE_READ_REG(hw, IXGBE_PXOFFRXCNT(i));
2337                         hw_stats->pxon2offc[i] +=
2338                                 IXGBE_READ_REG(hw, IXGBE_PXON2OFFCNT(i));
2339                 }
2340                 hw_stats->pxontxc[i] +=
2341                     IXGBE_READ_REG(hw, IXGBE_PXONTXC(i));
2342                 hw_stats->pxofftxc[i] +=
2343                     IXGBE_READ_REG(hw, IXGBE_PXOFFTXC(i));
2344         }
2345         for (i = 0; i < IXGBE_QUEUE_STAT_COUNTERS; i++) {
2346                 hw_stats->qprc[i] += IXGBE_READ_REG(hw, IXGBE_QPRC(i));
2347                 hw_stats->qptc[i] += IXGBE_READ_REG(hw, IXGBE_QPTC(i));
2348                 hw_stats->qbrc[i] += IXGBE_READ_REG(hw, IXGBE_QBRC_L(i));
2349                 hw_stats->qbrc[i] +=
2350                     ((uint64_t)IXGBE_READ_REG(hw, IXGBE_QBRC_H(i)) << 32);
2351                 hw_stats->qbtc[i] += IXGBE_READ_REG(hw, IXGBE_QBTC_L(i));
2352                 hw_stats->qbtc[i] +=
2353                     ((uint64_t)IXGBE_READ_REG(hw, IXGBE_QBTC_H(i)) << 32);
2354                 *total_qprdc += hw_stats->qprdc[i] +=
2355                                 IXGBE_READ_REG(hw, IXGBE_QPRDC(i));
2356
2357                 *total_qprc += hw_stats->qprc[i];
2358                 *total_qbrc += hw_stats->qbrc[i];
2359         }
2360         hw_stats->mlfc += IXGBE_READ_REG(hw, IXGBE_MLFC);
2361         hw_stats->mrfc += IXGBE_READ_REG(hw, IXGBE_MRFC);
2362         hw_stats->rlec += IXGBE_READ_REG(hw, IXGBE_RLEC);
2363
2364         /* Note that gprc counts missed packets */
2365         hw_stats->gprc += IXGBE_READ_REG(hw, IXGBE_GPRC);
2366
2367         if (hw->mac.type != ixgbe_mac_82598EB) {
2368                 hw_stats->gorc += IXGBE_READ_REG(hw, IXGBE_GORCL);
2369                 hw_stats->gorc += ((u64)IXGBE_READ_REG(hw, IXGBE_GORCH) << 32);
2370                 hw_stats->gotc += IXGBE_READ_REG(hw, IXGBE_GOTCL);
2371                 hw_stats->gotc += ((u64)IXGBE_READ_REG(hw, IXGBE_GOTCH) << 32);
2372                 hw_stats->tor += IXGBE_READ_REG(hw, IXGBE_TORL);
2373                 hw_stats->tor += ((u64)IXGBE_READ_REG(hw, IXGBE_TORH) << 32);
2374                 hw_stats->lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXCNT);
2375                 hw_stats->lxoffrxc += IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT);
2376         } else {
2377                 hw_stats->lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXC);
2378                 hw_stats->lxoffrxc += IXGBE_READ_REG(hw, IXGBE_LXOFFRXC);
2379                 /* 82598 only has a counter in the high register */
2380                 hw_stats->gorc += IXGBE_READ_REG(hw, IXGBE_GORCH);
2381                 hw_stats->gotc += IXGBE_READ_REG(hw, IXGBE_GOTCH);
2382                 hw_stats->tor += IXGBE_READ_REG(hw, IXGBE_TORH);
2383         }
2384
2385         /*
2386          * Workaround: mprc hardware is incorrectly counting
2387          * broadcasts, so for now we subtract those.
2388          */
2389         bprc = IXGBE_READ_REG(hw, IXGBE_BPRC);
2390         hw_stats->bprc += bprc;
2391         hw_stats->mprc += IXGBE_READ_REG(hw, IXGBE_MPRC);
2392         if (hw->mac.type == ixgbe_mac_82598EB)
2393                 hw_stats->mprc -= bprc;
2394
2395         hw_stats->prc64 += IXGBE_READ_REG(hw, IXGBE_PRC64);
2396         hw_stats->prc127 += IXGBE_READ_REG(hw, IXGBE_PRC127);
2397         hw_stats->prc255 += IXGBE_READ_REG(hw, IXGBE_PRC255);
2398         hw_stats->prc511 += IXGBE_READ_REG(hw, IXGBE_PRC511);
2399         hw_stats->prc1023 += IXGBE_READ_REG(hw, IXGBE_PRC1023);
2400         hw_stats->prc1522 += IXGBE_READ_REG(hw, IXGBE_PRC1522);
2401
2402         lxon = IXGBE_READ_REG(hw, IXGBE_LXONTXC);
2403         hw_stats->lxontxc += lxon;
2404         lxoff = IXGBE_READ_REG(hw, IXGBE_LXOFFTXC);
2405         hw_stats->lxofftxc += lxoff;
2406         total = lxon + lxoff;
2407
2408         hw_stats->gptc += IXGBE_READ_REG(hw, IXGBE_GPTC);
2409         hw_stats->mptc += IXGBE_READ_REG(hw, IXGBE_MPTC);
2410         hw_stats->ptc64 += IXGBE_READ_REG(hw, IXGBE_PTC64);
2411         hw_stats->gptc -= total;
2412         hw_stats->mptc -= total;
2413         hw_stats->ptc64 -= total;
2414         hw_stats->gotc -= total * ETHER_MIN_LEN;
2415
2416         hw_stats->ruc += IXGBE_READ_REG(hw, IXGBE_RUC);
2417         hw_stats->rfc += IXGBE_READ_REG(hw, IXGBE_RFC);
2418         hw_stats->roc += IXGBE_READ_REG(hw, IXGBE_ROC);
2419         hw_stats->rjc += IXGBE_READ_REG(hw, IXGBE_RJC);
2420         hw_stats->mngprc += IXGBE_READ_REG(hw, IXGBE_MNGPRC);
2421         hw_stats->mngpdc += IXGBE_READ_REG(hw, IXGBE_MNGPDC);
2422         hw_stats->mngptc += IXGBE_READ_REG(hw, IXGBE_MNGPTC);
2423         hw_stats->tpr += IXGBE_READ_REG(hw, IXGBE_TPR);
2424         hw_stats->tpt += IXGBE_READ_REG(hw, IXGBE_TPT);
2425         hw_stats->ptc127 += IXGBE_READ_REG(hw, IXGBE_PTC127);
2426         hw_stats->ptc255 += IXGBE_READ_REG(hw, IXGBE_PTC255);
2427         hw_stats->ptc511 += IXGBE_READ_REG(hw, IXGBE_PTC511);
2428         hw_stats->ptc1023 += IXGBE_READ_REG(hw, IXGBE_PTC1023);
2429         hw_stats->ptc1522 += IXGBE_READ_REG(hw, IXGBE_PTC1522);
2430         hw_stats->bptc += IXGBE_READ_REG(hw, IXGBE_BPTC);
2431         hw_stats->xec += IXGBE_READ_REG(hw, IXGBE_XEC);
2432         hw_stats->fccrc += IXGBE_READ_REG(hw, IXGBE_FCCRC);
2433         hw_stats->fclast += IXGBE_READ_REG(hw, IXGBE_FCLAST);
2434         /* Only read FCOE on 82599 */
2435         if (hw->mac.type != ixgbe_mac_82598EB) {
2436                 hw_stats->fcoerpdc += IXGBE_READ_REG(hw, IXGBE_FCOERPDC);
2437                 hw_stats->fcoeprc += IXGBE_READ_REG(hw, IXGBE_FCOEPRC);
2438                 hw_stats->fcoeptc += IXGBE_READ_REG(hw, IXGBE_FCOEPTC);
2439                 hw_stats->fcoedwrc += IXGBE_READ_REG(hw, IXGBE_FCOEDWRC);
2440                 hw_stats->fcoedwtc += IXGBE_READ_REG(hw, IXGBE_FCOEDWTC);
2441         }
2442
2443         /* Flow Director Stats registers */
2444         hw_stats->fdirmatch += IXGBE_READ_REG(hw, IXGBE_FDIRMATCH);
2445         hw_stats->fdirmiss += IXGBE_READ_REG(hw, IXGBE_FDIRMISS);
2446 }
2447
2448 /*
2449  * This function is based on ixgbe_update_stats_counters() in ixgbe/ixgbe.c
2450  */
2451 static void
2452 ixgbe_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
2453 {
2454         struct ixgbe_hw *hw =
2455                         IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2456         struct ixgbe_hw_stats *hw_stats =
2457                         IXGBE_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
2458         uint64_t total_missed_rx, total_qbrc, total_qprc, total_qprdc;
2459         unsigned i;
2460
2461         total_missed_rx = 0;
2462         total_qbrc = 0;
2463         total_qprc = 0;
2464         total_qprdc = 0;
2465
2466         ixgbe_read_stats_registers(hw, hw_stats, &total_missed_rx, &total_qbrc,
2467                         &total_qprc, &total_qprdc);
2468
2469         if (stats == NULL)
2470                 return;
2471
2472         /* Fill out the rte_eth_stats statistics structure */
2473         stats->ipackets = total_qprc;
2474         stats->ibytes = total_qbrc;
2475         stats->opackets = hw_stats->gptc;
2476         stats->obytes = hw_stats->gotc;
2477
2478         for (i = 0; i < IXGBE_QUEUE_STAT_COUNTERS; i++) {
2479                 stats->q_ipackets[i] = hw_stats->qprc[i];
2480                 stats->q_opackets[i] = hw_stats->qptc[i];
2481                 stats->q_ibytes[i] = hw_stats->qbrc[i];
2482                 stats->q_obytes[i] = hw_stats->qbtc[i];
2483                 stats->q_errors[i] = hw_stats->qprdc[i];
2484         }
2485
2486         /* Rx Errors */
2487         stats->ierrors  = hw_stats->crcerrs +
2488                           hw_stats->mspdc +
2489                           hw_stats->rlec +
2490                           hw_stats->ruc +
2491                           hw_stats->roc +
2492                           total_missed_rx +
2493                           hw_stats->illerrc +
2494                           hw_stats->errbc +
2495                           hw_stats->rfc +
2496                           hw_stats->fccrc +
2497                           hw_stats->fclast;
2498
2499         /* Tx Errors */
2500         stats->oerrors  = 0;
2501 }
2502
2503 static void
2504 ixgbe_dev_stats_reset(struct rte_eth_dev *dev)
2505 {
2506         struct ixgbe_hw_stats *stats =
2507                         IXGBE_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
2508
2509         /* HW registers are cleared on read */
2510         ixgbe_dev_stats_get(dev, NULL);
2511
2512         /* Reset software totals */
2513         memset(stats, 0, sizeof(*stats));
2514 }
2515
2516 static int
2517 ixgbe_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstats *xstats,
2518                                          unsigned n)
2519 {
2520         struct ixgbe_hw *hw =
2521                         IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2522         struct ixgbe_hw_stats *hw_stats =
2523                         IXGBE_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
2524         uint64_t total_missed_rx, total_qbrc, total_qprc, total_qprdc;
2525         unsigned i, count = IXGBE_NB_XSTATS;
2526
2527         if (n < count)
2528                 return count;
2529
2530         total_missed_rx = 0;
2531         total_qbrc = 0;
2532         total_qprc = 0;
2533         total_qprdc = 0;
2534
2535         ixgbe_read_stats_registers(hw, hw_stats, &total_missed_rx, &total_qbrc,
2536                                    &total_qprc, &total_qprdc);
2537
2538         /* If this is a reset xstats is NULL, and we have cleared the
2539          * registers by reading them.
2540          */
2541         if (!xstats)
2542                 return 0;
2543
2544         /* Extended stats from ixgbe_hw_stats */
2545         count = 0;
2546         for (i = 0; i < IXGBE_NB_HW_STATS; i++) {
2547                 snprintf(xstats[count].name, sizeof(xstats[count].name), "%s",
2548                          rte_ixgbe_stats_strings[i].name);
2549                 xstats[count].value = *(uint64_t *)(((char *)hw_stats) +
2550                                 rte_ixgbe_stats_strings[i].offset);
2551                 count++;
2552         }
2553
2554         /* Per-Q stats, with 8 queues available */
2555         for (i = 0; i < 8; i++) {
2556                 snprintf(xstats[count].name, sizeof(xstats[count].name),
2557                          "rx_q%u_mbuf_allocation_errors", i);
2558                 xstats[count].value = *(uint64_t *)(((char *)hw_stats) +
2559                                 offsetof(struct ixgbe_hw_stats, rnbc[i]));
2560                 count++;
2561
2562                 snprintf(xstats[count].name, sizeof(xstats[count].name),
2563                          "rx_q%u_missed_packets", i);
2564                 xstats[count].value = *(uint64_t *)(((char *)hw_stats) +
2565                                 offsetof(struct ixgbe_hw_stats, mpc[i]));
2566                 count++;
2567
2568                 snprintf(xstats[count].name, sizeof(xstats[count].name),
2569                          "rx_q%u_xon_priority_packets", i);
2570                 xstats[count].value = *(uint64_t *)(((char *)hw_stats) +
2571                                 offsetof(struct ixgbe_hw_stats, pxonrxc[i]));
2572                 count++;
2573
2574                 snprintf(xstats[count].name, sizeof(xstats[count].name),
2575                          "tx_q%u_xon_priority_packets", i);
2576                 xstats[count].value = *(uint64_t *)(((char *)hw_stats) +
2577                                 offsetof(struct ixgbe_hw_stats, pxontxc[i]));
2578                 count++;
2579
2580                 snprintf(xstats[count].name, sizeof(xstats[count].name),
2581                          "rx_q%u_xoff_priority_packets", i);
2582                 xstats[count].value = *(uint64_t *)(((char *)hw_stats) +
2583                                 offsetof(struct ixgbe_hw_stats, pxoffrxc[i]));
2584                 count++;
2585
2586                 snprintf(xstats[count].name, sizeof(xstats[count].name),
2587                          "tx_q%u_xoff_priority_packets", i);
2588                 xstats[count].value = *(uint64_t *)(((char *)hw_stats) +
2589                                 offsetof(struct ixgbe_hw_stats, pxofftxc[i]));
2590                 count++;
2591
2592                 snprintf(xstats[count].name, sizeof(xstats[count].name),
2593                          "xx_q%u_xon_to_xoff_priority_packets", i);
2594                 xstats[count].value = *(uint64_t *)(((char *)hw_stats) +
2595                                 offsetof(struct ixgbe_hw_stats, pxon2offc[i]));
2596                 count++;
2597         }
2598
2599         for (i = 0; i < 16; i++) {
2600                 snprintf(xstats[count].name, sizeof(xstats[count].name),
2601                          "rx_q%u_packets", i);
2602                 xstats[count].value = *(uint64_t *)(((char *)hw_stats) +
2603                                 offsetof(struct ixgbe_hw_stats, qprc[i]));
2604                 count++;
2605
2606                 snprintf(xstats[count].name, sizeof(xstats[count].name),
2607                          "rx_q%u_bytes", i);
2608                 xstats[count].value = *(uint64_t *)(((char *)hw_stats) +
2609                                 offsetof(struct ixgbe_hw_stats, qbrc[i]));
2610                 count++;
2611
2612                 snprintf(xstats[count].name, sizeof(xstats[count].name),
2613                          "tx_q%u_packets", i);
2614                 xstats[count].value = *(uint64_t *)(((char *)hw_stats) +
2615                                 offsetof(struct ixgbe_hw_stats, qptc[i]));
2616                 count++;
2617
2618                 snprintf(xstats[count].name, sizeof(xstats[count].name),
2619                          "tx_q%u_bytes", i);
2620                 xstats[count].value = *(uint64_t *)(((char *)hw_stats) +
2621                                 offsetof(struct ixgbe_hw_stats, qbtc[i]));
2622                 count++;
2623
2624                 snprintf(xstats[count].name, sizeof(xstats[count].name),
2625                          "rx_q%u_dropped", i);
2626                 xstats[count].value = *(uint64_t *)(((char *)hw_stats) +
2627                                 offsetof(struct ixgbe_hw_stats, qprdc[i]));
2628                 count++;
2629         }
2630
2631         return count;
2632 }
2633
2634 static void
2635 ixgbe_dev_xstats_reset(struct rte_eth_dev *dev)
2636 {
2637         struct ixgbe_hw_stats *stats =
2638                         IXGBE_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
2639
2640         /* HW registers are cleared on read */
2641         ixgbe_dev_xstats_get(dev, NULL, IXGBE_NB_XSTATS);
2642
2643         /* Reset software totals */
2644         memset(stats, 0, sizeof(*stats));
2645 }
2646
2647 static void
2648 ixgbevf_update_stats(struct rte_eth_dev *dev)
2649 {
2650         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2651         struct ixgbevf_hw_stats *hw_stats = (struct ixgbevf_hw_stats*)
2652                           IXGBE_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
2653
2654         /* Good Rx packet, include VF loopback */
2655         UPDATE_VF_STAT(IXGBE_VFGPRC,
2656             hw_stats->last_vfgprc, hw_stats->vfgprc);
2657
2658         /* Good Rx octets, include VF loopback */
2659         UPDATE_VF_STAT_36BIT(IXGBE_VFGORC_LSB, IXGBE_VFGORC_MSB,
2660             hw_stats->last_vfgorc, hw_stats->vfgorc);
2661
2662         /* Good Tx packet, include VF loopback */
2663         UPDATE_VF_STAT(IXGBE_VFGPTC,
2664             hw_stats->last_vfgptc, hw_stats->vfgptc);
2665
2666         /* Good Tx octets, include VF loopback */
2667         UPDATE_VF_STAT_36BIT(IXGBE_VFGOTC_LSB, IXGBE_VFGOTC_MSB,
2668             hw_stats->last_vfgotc, hw_stats->vfgotc);
2669
2670         /* Rx Multicst Packet */
2671         UPDATE_VF_STAT(IXGBE_VFMPRC,
2672             hw_stats->last_vfmprc, hw_stats->vfmprc);
2673 }
2674
2675 static int
2676 ixgbevf_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstats *xstats,
2677                        unsigned n)
2678 {
2679         struct ixgbevf_hw_stats *hw_stats = (struct ixgbevf_hw_stats *)
2680                         IXGBE_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
2681         unsigned i;
2682
2683         if (n < IXGBEVF_NB_XSTATS)
2684                 return IXGBEVF_NB_XSTATS;
2685
2686         ixgbevf_update_stats(dev);
2687
2688         if (!xstats)
2689                 return 0;
2690
2691         /* Extended stats */
2692         for (i = 0; i < IXGBEVF_NB_XSTATS; i++) {
2693                 snprintf(xstats[i].name, sizeof(xstats[i].name),
2694                          "%s", rte_ixgbevf_stats_strings[i].name);
2695                 xstats[i].value = *(uint64_t *)(((char *)hw_stats) +
2696                         rte_ixgbevf_stats_strings[i].offset);
2697         }
2698
2699         return IXGBEVF_NB_XSTATS;
2700 }
2701
2702 static void
2703 ixgbevf_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
2704 {
2705         struct ixgbevf_hw_stats *hw_stats = (struct ixgbevf_hw_stats *)
2706                           IXGBE_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
2707
2708         ixgbevf_update_stats(dev);
2709
2710         if (stats == NULL)
2711                 return;
2712
2713         stats->ipackets = hw_stats->vfgprc;
2714         stats->ibytes = hw_stats->vfgorc;
2715         stats->opackets = hw_stats->vfgptc;
2716         stats->obytes = hw_stats->vfgotc;
2717         stats->imcasts = hw_stats->vfmprc;
2718         /* stats->imcasts should be removed as imcasts is deprecated */
2719 }
2720
2721 static void
2722 ixgbevf_dev_stats_reset(struct rte_eth_dev *dev)
2723 {
2724         struct ixgbevf_hw_stats *hw_stats = (struct ixgbevf_hw_stats*)
2725                         IXGBE_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
2726
2727         /* Sync HW register to the last stats */
2728         ixgbevf_dev_stats_get(dev, NULL);
2729
2730         /* reset HW current stats*/
2731         hw_stats->vfgprc = 0;
2732         hw_stats->vfgorc = 0;
2733         hw_stats->vfgptc = 0;
2734         hw_stats->vfgotc = 0;
2735         hw_stats->vfmprc = 0;
2736
2737 }
2738
2739 static void
2740 ixgbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
2741 {
2742         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2743
2744         dev_info->max_rx_queues = (uint16_t)hw->mac.max_rx_queues;
2745         dev_info->max_tx_queues = (uint16_t)hw->mac.max_tx_queues;
2746         dev_info->min_rx_bufsize = 1024; /* cf BSIZEPACKET in SRRCTL register */
2747         dev_info->max_rx_pktlen = 15872; /* includes CRC, cf MAXFRS register */
2748         dev_info->max_mac_addrs = hw->mac.num_rar_entries;
2749         dev_info->max_hash_mac_addrs = IXGBE_VMDQ_NUM_UC_MAC;
2750         dev_info->max_vfs = dev->pci_dev->max_vfs;
2751         if (hw->mac.type == ixgbe_mac_82598EB)
2752                 dev_info->max_vmdq_pools = ETH_16_POOLS;
2753         else
2754                 dev_info->max_vmdq_pools = ETH_64_POOLS;
2755         dev_info->vmdq_queue_num = dev_info->max_rx_queues;
2756         dev_info->rx_offload_capa =
2757                 DEV_RX_OFFLOAD_VLAN_STRIP |
2758                 DEV_RX_OFFLOAD_IPV4_CKSUM |
2759                 DEV_RX_OFFLOAD_UDP_CKSUM  |
2760                 DEV_RX_OFFLOAD_TCP_CKSUM;
2761
2762         /*
2763          * RSC is only supported by 82599 and x540 PF devices in a non-SR-IOV
2764          * mode.
2765          */
2766         if ((hw->mac.type == ixgbe_mac_82599EB ||
2767              hw->mac.type == ixgbe_mac_X540) &&
2768             !RTE_ETH_DEV_SRIOV(dev).active)
2769                 dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_TCP_LRO;
2770
2771         dev_info->tx_offload_capa =
2772                 DEV_TX_OFFLOAD_VLAN_INSERT |
2773                 DEV_TX_OFFLOAD_IPV4_CKSUM  |
2774                 DEV_TX_OFFLOAD_UDP_CKSUM   |
2775                 DEV_TX_OFFLOAD_TCP_CKSUM   |
2776                 DEV_TX_OFFLOAD_SCTP_CKSUM  |
2777                 DEV_TX_OFFLOAD_TCP_TSO;
2778
2779         dev_info->default_rxconf = (struct rte_eth_rxconf) {
2780                 .rx_thresh = {
2781                         .pthresh = IXGBE_DEFAULT_RX_PTHRESH,
2782                         .hthresh = IXGBE_DEFAULT_RX_HTHRESH,
2783                         .wthresh = IXGBE_DEFAULT_RX_WTHRESH,
2784                 },
2785                 .rx_free_thresh = IXGBE_DEFAULT_RX_FREE_THRESH,
2786                 .rx_drop_en = 0,
2787         };
2788
2789         dev_info->default_txconf = (struct rte_eth_txconf) {
2790                 .tx_thresh = {
2791                         .pthresh = IXGBE_DEFAULT_TX_PTHRESH,
2792                         .hthresh = IXGBE_DEFAULT_TX_HTHRESH,
2793                         .wthresh = IXGBE_DEFAULT_TX_WTHRESH,
2794                 },
2795                 .tx_free_thresh = IXGBE_DEFAULT_TX_FREE_THRESH,
2796                 .tx_rs_thresh = IXGBE_DEFAULT_TX_RSBIT_THRESH,
2797                 .txq_flags = ETH_TXQ_FLAGS_NOMULTSEGS |
2798                                 ETH_TXQ_FLAGS_NOOFFLOADS,
2799         };
2800
2801         dev_info->rx_desc_lim = rx_desc_lim;
2802         dev_info->tx_desc_lim = tx_desc_lim;
2803
2804         dev_info->hash_key_size = IXGBE_HKEY_MAX_INDEX * sizeof(uint32_t);
2805         dev_info->reta_size = ixgbe_reta_size_get(hw->mac.type);
2806         dev_info->flow_type_rss_offloads = IXGBE_RSS_OFFLOAD_ALL;
2807 }
2808
2809 static void
2810 ixgbevf_dev_info_get(struct rte_eth_dev *dev,
2811                      struct rte_eth_dev_info *dev_info)
2812 {
2813         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2814
2815         dev_info->max_rx_queues = (uint16_t)hw->mac.max_rx_queues;
2816         dev_info->max_tx_queues = (uint16_t)hw->mac.max_tx_queues;
2817         dev_info->min_rx_bufsize = 1024; /* cf BSIZEPACKET in SRRCTL reg */
2818         dev_info->max_rx_pktlen = 15872; /* includes CRC, cf MAXFRS reg */
2819         dev_info->max_mac_addrs = hw->mac.num_rar_entries;
2820         dev_info->max_hash_mac_addrs = IXGBE_VMDQ_NUM_UC_MAC;
2821         dev_info->max_vfs = dev->pci_dev->max_vfs;
2822         if (hw->mac.type == ixgbe_mac_82598EB)
2823                 dev_info->max_vmdq_pools = ETH_16_POOLS;
2824         else
2825                 dev_info->max_vmdq_pools = ETH_64_POOLS;
2826         dev_info->rx_offload_capa = DEV_RX_OFFLOAD_VLAN_STRIP |
2827                                 DEV_RX_OFFLOAD_IPV4_CKSUM |
2828                                 DEV_RX_OFFLOAD_UDP_CKSUM  |
2829                                 DEV_RX_OFFLOAD_TCP_CKSUM;
2830         dev_info->tx_offload_capa = DEV_TX_OFFLOAD_VLAN_INSERT |
2831                                 DEV_TX_OFFLOAD_IPV4_CKSUM  |
2832                                 DEV_TX_OFFLOAD_UDP_CKSUM   |
2833                                 DEV_TX_OFFLOAD_TCP_CKSUM   |
2834                                 DEV_TX_OFFLOAD_SCTP_CKSUM  |
2835                                 DEV_TX_OFFLOAD_TCP_TSO;
2836
2837         dev_info->default_rxconf = (struct rte_eth_rxconf) {
2838                 .rx_thresh = {
2839                         .pthresh = IXGBE_DEFAULT_RX_PTHRESH,
2840                         .hthresh = IXGBE_DEFAULT_RX_HTHRESH,
2841                         .wthresh = IXGBE_DEFAULT_RX_WTHRESH,
2842                 },
2843                 .rx_free_thresh = IXGBE_DEFAULT_RX_FREE_THRESH,
2844                 .rx_drop_en = 0,
2845         };
2846
2847         dev_info->default_txconf = (struct rte_eth_txconf) {
2848                 .tx_thresh = {
2849                         .pthresh = IXGBE_DEFAULT_TX_PTHRESH,
2850                         .hthresh = IXGBE_DEFAULT_TX_HTHRESH,
2851                         .wthresh = IXGBE_DEFAULT_TX_WTHRESH,
2852                 },
2853                 .tx_free_thresh = IXGBE_DEFAULT_TX_FREE_THRESH,
2854                 .tx_rs_thresh = IXGBE_DEFAULT_TX_RSBIT_THRESH,
2855                 .txq_flags = ETH_TXQ_FLAGS_NOMULTSEGS |
2856                                 ETH_TXQ_FLAGS_NOOFFLOADS,
2857         };
2858
2859         dev_info->rx_desc_lim = rx_desc_lim;
2860         dev_info->tx_desc_lim = tx_desc_lim;
2861 }
2862
2863 /* return 0 means link status changed, -1 means not changed */
2864 static int
2865 ixgbe_dev_link_update(struct rte_eth_dev *dev, int wait_to_complete)
2866 {
2867         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2868         struct rte_eth_link link, old;
2869         ixgbe_link_speed link_speed = IXGBE_LINK_SPEED_UNKNOWN;
2870         int link_up;
2871         int diag;
2872
2873         link.link_status = 0;
2874         link.link_speed = 0;
2875         link.link_duplex = 0;
2876         memset(&old, 0, sizeof(old));
2877         rte_ixgbe_dev_atomic_read_link_status(dev, &old);
2878
2879         hw->mac.get_link_status = true;
2880
2881         /* check if it needs to wait to complete, if lsc interrupt is enabled */
2882         if (wait_to_complete == 0 || dev->data->dev_conf.intr_conf.lsc != 0)
2883                 diag = ixgbe_check_link(hw, &link_speed, &link_up, 0);
2884         else
2885                 diag = ixgbe_check_link(hw, &link_speed, &link_up, 1);
2886
2887         if (diag != 0) {
2888                 link.link_speed = ETH_LINK_SPEED_100;
2889                 link.link_duplex = ETH_LINK_HALF_DUPLEX;
2890                 rte_ixgbe_dev_atomic_write_link_status(dev, &link);
2891                 if (link.link_status == old.link_status)
2892                         return -1;
2893                 return 0;
2894         }
2895
2896         if (link_up == 0) {
2897                 rte_ixgbe_dev_atomic_write_link_status(dev, &link);
2898                 if (link.link_status == old.link_status)
2899                         return -1;
2900                 return 0;
2901         }
2902         link.link_status = 1;
2903         link.link_duplex = ETH_LINK_FULL_DUPLEX;
2904
2905         switch (link_speed) {
2906         default:
2907         case IXGBE_LINK_SPEED_UNKNOWN:
2908                 link.link_duplex = ETH_LINK_HALF_DUPLEX;
2909                 link.link_speed = ETH_LINK_SPEED_100;
2910                 break;
2911
2912         case IXGBE_LINK_SPEED_100_FULL:
2913                 link.link_speed = ETH_LINK_SPEED_100;
2914                 break;
2915
2916         case IXGBE_LINK_SPEED_1GB_FULL:
2917                 link.link_speed = ETH_LINK_SPEED_1000;
2918                 break;
2919
2920         case IXGBE_LINK_SPEED_10GB_FULL:
2921                 link.link_speed = ETH_LINK_SPEED_10000;
2922                 break;
2923         }
2924         rte_ixgbe_dev_atomic_write_link_status(dev, &link);
2925
2926         if (link.link_status == old.link_status)
2927                 return -1;
2928
2929         return 0;
2930 }
2931
2932 static void
2933 ixgbe_dev_promiscuous_enable(struct rte_eth_dev *dev)
2934 {
2935         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2936         uint32_t fctrl;
2937
2938         fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
2939         fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
2940         IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
2941 }
2942
2943 static void
2944 ixgbe_dev_promiscuous_disable(struct rte_eth_dev *dev)
2945 {
2946         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2947         uint32_t fctrl;
2948
2949         fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
2950         fctrl &= (~IXGBE_FCTRL_UPE);
2951         if (dev->data->all_multicast == 1)
2952                 fctrl |= IXGBE_FCTRL_MPE;
2953         else
2954                 fctrl &= (~IXGBE_FCTRL_MPE);
2955         IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
2956 }
2957
2958 static void
2959 ixgbe_dev_allmulticast_enable(struct rte_eth_dev *dev)
2960 {
2961         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2962         uint32_t fctrl;
2963
2964         fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
2965         fctrl |= IXGBE_FCTRL_MPE;
2966         IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
2967 }
2968
2969 static void
2970 ixgbe_dev_allmulticast_disable(struct rte_eth_dev *dev)
2971 {
2972         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2973         uint32_t fctrl;
2974
2975         if (dev->data->promiscuous == 1)
2976                 return; /* must remain in all_multicast mode */
2977
2978         fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
2979         fctrl &= (~IXGBE_FCTRL_MPE);
2980         IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
2981 }
2982
2983 /**
2984  * It clears the interrupt causes and enables the interrupt.
2985  * It will be called once only during nic initialized.
2986  *
2987  * @param dev
2988  *  Pointer to struct rte_eth_dev.
2989  *
2990  * @return
2991  *  - On success, zero.
2992  *  - On failure, a negative value.
2993  */
2994 static int
2995 ixgbe_dev_lsc_interrupt_setup(struct rte_eth_dev *dev)
2996 {
2997         struct ixgbe_interrupt *intr =
2998                 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
2999
3000         ixgbe_dev_link_status_print(dev);
3001         intr->mask |= IXGBE_EICR_LSC;
3002
3003         return 0;
3004 }
3005
3006 /**
3007  * It clears the interrupt causes and enables the interrupt.
3008  * It will be called once only during nic initialized.
3009  *
3010  * @param dev
3011  *  Pointer to struct rte_eth_dev.
3012  *
3013  * @return
3014  *  - On success, zero.
3015  *  - On failure, a negative value.
3016  */
3017 static int
3018 ixgbe_dev_rxq_interrupt_setup(struct rte_eth_dev *dev)
3019 {
3020         struct ixgbe_interrupt *intr =
3021                 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
3022
3023         intr->mask |= IXGBE_EICR_RTX_QUEUE;
3024
3025         return 0;
3026 }
3027
3028 /*
3029  * It reads ICR and sets flag (IXGBE_EICR_LSC) for the link_update.
3030  *
3031  * @param dev
3032  *  Pointer to struct rte_eth_dev.
3033  *
3034  * @return
3035  *  - On success, zero.
3036  *  - On failure, a negative value.
3037  */
3038 static int
3039 ixgbe_dev_interrupt_get_status(struct rte_eth_dev *dev)
3040 {
3041         uint32_t eicr;
3042         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3043         struct ixgbe_interrupt *intr =
3044                 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
3045
3046         /* clear all cause mask */
3047         ixgbe_disable_intr(hw);
3048
3049         /* read-on-clear nic registers here */
3050         eicr = IXGBE_READ_REG(hw, IXGBE_EICR);
3051         PMD_DRV_LOG(DEBUG, "eicr %x", eicr);
3052
3053         intr->flags = 0;
3054
3055         /* set flag for async link update */
3056         if (eicr & IXGBE_EICR_LSC)
3057                 intr->flags |= IXGBE_FLAG_NEED_LINK_UPDATE;
3058
3059         if (eicr & IXGBE_EICR_MAILBOX)
3060                 intr->flags |= IXGBE_FLAG_MAILBOX;
3061
3062         return 0;
3063 }
3064
3065 /**
3066  * It gets and then prints the link status.
3067  *
3068  * @param dev
3069  *  Pointer to struct rte_eth_dev.
3070  *
3071  * @return
3072  *  - On success, zero.
3073  *  - On failure, a negative value.
3074  */
3075 static void
3076 ixgbe_dev_link_status_print(struct rte_eth_dev *dev)
3077 {
3078         struct rte_eth_link link;
3079
3080         memset(&link, 0, sizeof(link));
3081         rte_ixgbe_dev_atomic_read_link_status(dev, &link);
3082         if (link.link_status) {
3083                 PMD_INIT_LOG(INFO, "Port %d: Link Up - speed %u Mbps - %s",
3084                                         (int)(dev->data->port_id),
3085                                         (unsigned)link.link_speed,
3086                         link.link_duplex == ETH_LINK_FULL_DUPLEX ?
3087                                         "full-duplex" : "half-duplex");
3088         } else {
3089                 PMD_INIT_LOG(INFO, " Port %d: Link Down",
3090                                 (int)(dev->data->port_id));
3091         }
3092         PMD_INIT_LOG(DEBUG, "PCI Address: %04d:%02d:%02d:%d",
3093                                 dev->pci_dev->addr.domain,
3094                                 dev->pci_dev->addr.bus,
3095                                 dev->pci_dev->addr.devid,
3096                                 dev->pci_dev->addr.function);
3097 }
3098
3099 /*
3100  * It executes link_update after knowing an interrupt occurred.
3101  *
3102  * @param dev
3103  *  Pointer to struct rte_eth_dev.
3104  *
3105  * @return
3106  *  - On success, zero.
3107  *  - On failure, a negative value.
3108  */
3109 static int
3110 ixgbe_dev_interrupt_action(struct rte_eth_dev *dev)
3111 {
3112         struct ixgbe_interrupt *intr =
3113                 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
3114         int64_t timeout;
3115         struct rte_eth_link link;
3116         int intr_enable_delay = false;
3117
3118         PMD_DRV_LOG(DEBUG, "intr action type %d", intr->flags);
3119
3120         if (intr->flags & IXGBE_FLAG_MAILBOX) {
3121                 ixgbe_pf_mbx_process(dev);
3122                 intr->flags &= ~IXGBE_FLAG_MAILBOX;
3123         }
3124
3125         if (intr->flags & IXGBE_FLAG_NEED_LINK_UPDATE) {
3126                 /* get the link status before link update, for predicting later */
3127                 memset(&link, 0, sizeof(link));
3128                 rte_ixgbe_dev_atomic_read_link_status(dev, &link);
3129
3130                 ixgbe_dev_link_update(dev, 0);
3131
3132                 /* likely to up */
3133                 if (!link.link_status)
3134                         /* handle it 1 sec later, wait it being stable */
3135                         timeout = IXGBE_LINK_UP_CHECK_TIMEOUT;
3136                 /* likely to down */
3137                 else
3138                         /* handle it 4 sec later, wait it being stable */
3139                         timeout = IXGBE_LINK_DOWN_CHECK_TIMEOUT;
3140
3141                 ixgbe_dev_link_status_print(dev);
3142
3143                 intr_enable_delay = true;
3144         }
3145
3146         if (intr_enable_delay) {
3147                 if (rte_eal_alarm_set(timeout * 1000,
3148                                       ixgbe_dev_interrupt_delayed_handler, (void*)dev) < 0)
3149                         PMD_DRV_LOG(ERR, "Error setting alarm");
3150         } else {
3151                 PMD_DRV_LOG(DEBUG, "enable intr immediately");
3152                 ixgbe_enable_intr(dev);
3153                 rte_intr_enable(&(dev->pci_dev->intr_handle));
3154         }
3155
3156
3157         return 0;
3158 }
3159
3160 /**
3161  * Interrupt handler which shall be registered for alarm callback for delayed
3162  * handling specific interrupt to wait for the stable nic state. As the
3163  * NIC interrupt state is not stable for ixgbe after link is just down,
3164  * it needs to wait 4 seconds to get the stable status.
3165  *
3166  * @param handle
3167  *  Pointer to interrupt handle.
3168  * @param param
3169  *  The address of parameter (struct rte_eth_dev *) regsitered before.
3170  *
3171  * @return
3172  *  void
3173  */
3174 static void
3175 ixgbe_dev_interrupt_delayed_handler(void *param)
3176 {
3177         struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
3178         struct ixgbe_interrupt *intr =
3179                 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
3180         struct ixgbe_hw *hw =
3181                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3182         uint32_t eicr;
3183
3184         eicr = IXGBE_READ_REG(hw, IXGBE_EICR);
3185         if (eicr & IXGBE_EICR_MAILBOX)
3186                 ixgbe_pf_mbx_process(dev);
3187
3188         if (intr->flags & IXGBE_FLAG_NEED_LINK_UPDATE) {
3189                 ixgbe_dev_link_update(dev, 0);
3190                 intr->flags &= ~IXGBE_FLAG_NEED_LINK_UPDATE;
3191                 ixgbe_dev_link_status_print(dev);
3192                 _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC);
3193         }
3194
3195         PMD_DRV_LOG(DEBUG, "enable intr in delayed handler S[%08x]", eicr);
3196         ixgbe_enable_intr(dev);
3197         rte_intr_enable(&(dev->pci_dev->intr_handle));
3198 }
3199
3200 /**
3201  * Interrupt handler triggered by NIC  for handling
3202  * specific interrupt.
3203  *
3204  * @param handle
3205  *  Pointer to interrupt handle.
3206  * @param param
3207  *  The address of parameter (struct rte_eth_dev *) regsitered before.
3208  *
3209  * @return
3210  *  void
3211  */
3212 static void
3213 ixgbe_dev_interrupt_handler(__rte_unused struct rte_intr_handle *handle,
3214                             void *param)
3215 {
3216         struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
3217
3218         ixgbe_dev_interrupt_get_status(dev);
3219         ixgbe_dev_interrupt_action(dev);
3220 }
3221
3222 static int
3223 ixgbe_dev_led_on(struct rte_eth_dev *dev)
3224 {
3225         struct ixgbe_hw *hw;
3226
3227         hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3228         return (ixgbe_led_on(hw, 0) == IXGBE_SUCCESS ? 0 : -ENOTSUP);
3229 }
3230
3231 static int
3232 ixgbe_dev_led_off(struct rte_eth_dev *dev)
3233 {
3234         struct ixgbe_hw *hw;
3235
3236         hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3237         return (ixgbe_led_off(hw, 0) == IXGBE_SUCCESS ? 0 : -ENOTSUP);
3238 }
3239
3240 static int
3241 ixgbe_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
3242 {
3243         struct ixgbe_hw *hw;
3244         uint32_t mflcn_reg;
3245         uint32_t fccfg_reg;
3246         int rx_pause;
3247         int tx_pause;
3248
3249         hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3250
3251         fc_conf->pause_time = hw->fc.pause_time;
3252         fc_conf->high_water = hw->fc.high_water[0];
3253         fc_conf->low_water = hw->fc.low_water[0];
3254         fc_conf->send_xon = hw->fc.send_xon;
3255         fc_conf->autoneg = !hw->fc.disable_fc_autoneg;
3256
3257         /*
3258          * Return rx_pause status according to actual setting of
3259          * MFLCN register.
3260          */
3261         mflcn_reg = IXGBE_READ_REG(hw, IXGBE_MFLCN);
3262         if (mflcn_reg & (IXGBE_MFLCN_RPFCE | IXGBE_MFLCN_RFCE))
3263                 rx_pause = 1;
3264         else
3265                 rx_pause = 0;
3266
3267         /*
3268          * Return tx_pause status according to actual setting of
3269          * FCCFG register.
3270          */
3271         fccfg_reg = IXGBE_READ_REG(hw, IXGBE_FCCFG);
3272         if (fccfg_reg & (IXGBE_FCCFG_TFCE_802_3X | IXGBE_FCCFG_TFCE_PRIORITY))
3273                 tx_pause = 1;
3274         else
3275                 tx_pause = 0;
3276
3277         if (rx_pause && tx_pause)
3278                 fc_conf->mode = RTE_FC_FULL;
3279         else if (rx_pause)
3280                 fc_conf->mode = RTE_FC_RX_PAUSE;
3281         else if (tx_pause)
3282                 fc_conf->mode = RTE_FC_TX_PAUSE;
3283         else
3284                 fc_conf->mode = RTE_FC_NONE;
3285
3286         return 0;
3287 }
3288
3289 static int
3290 ixgbe_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
3291 {
3292         struct ixgbe_hw *hw;
3293         int err;
3294         uint32_t rx_buf_size;
3295         uint32_t max_high_water;
3296         uint32_t mflcn;
3297         enum ixgbe_fc_mode rte_fcmode_2_ixgbe_fcmode[] = {
3298                 ixgbe_fc_none,
3299                 ixgbe_fc_rx_pause,
3300                 ixgbe_fc_tx_pause,
3301                 ixgbe_fc_full
3302         };
3303
3304         PMD_INIT_FUNC_TRACE();
3305
3306         hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3307         rx_buf_size = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(0));
3308         PMD_INIT_LOG(DEBUG, "Rx packet buffer size = 0x%x", rx_buf_size);
3309
3310         /*
3311          * At least reserve one Ethernet frame for watermark
3312          * high_water/low_water in kilo bytes for ixgbe
3313          */
3314         max_high_water = (rx_buf_size - ETHER_MAX_LEN) >> IXGBE_RXPBSIZE_SHIFT;
3315         if ((fc_conf->high_water > max_high_water) ||
3316                 (fc_conf->high_water < fc_conf->low_water)) {
3317                 PMD_INIT_LOG(ERR, "Invalid high/low water setup value in KB");
3318                 PMD_INIT_LOG(ERR, "High_water must <= 0x%x", max_high_water);
3319                 return (-EINVAL);
3320         }
3321
3322         hw->fc.requested_mode = rte_fcmode_2_ixgbe_fcmode[fc_conf->mode];
3323         hw->fc.pause_time     = fc_conf->pause_time;
3324         hw->fc.high_water[0]  = fc_conf->high_water;
3325         hw->fc.low_water[0]   = fc_conf->low_water;
3326         hw->fc.send_xon       = fc_conf->send_xon;
3327         hw->fc.disable_fc_autoneg = !fc_conf->autoneg;
3328
3329         err = ixgbe_fc_enable(hw);
3330
3331         /* Not negotiated is not an error case */
3332         if ((err == IXGBE_SUCCESS) || (err == IXGBE_ERR_FC_NOT_NEGOTIATED)) {
3333
3334                 /* check if we want to forward MAC frames - driver doesn't have native
3335                  * capability to do that, so we'll write the registers ourselves */
3336
3337                 mflcn = IXGBE_READ_REG(hw, IXGBE_MFLCN);
3338
3339                 /* set or clear MFLCN.PMCF bit depending on configuration */
3340                 if (fc_conf->mac_ctrl_frame_fwd != 0)
3341                         mflcn |= IXGBE_MFLCN_PMCF;
3342                 else
3343                         mflcn &= ~IXGBE_MFLCN_PMCF;
3344
3345                 IXGBE_WRITE_REG(hw, IXGBE_MFLCN, mflcn);
3346                 IXGBE_WRITE_FLUSH(hw);
3347
3348                 return 0;
3349         }
3350
3351         PMD_INIT_LOG(ERR, "ixgbe_fc_enable = 0x%x", err);
3352         return -EIO;
3353 }
3354
3355 /**
3356  *  ixgbe_pfc_enable_generic - Enable flow control
3357  *  @hw: pointer to hardware structure
3358  *  @tc_num: traffic class number
3359  *  Enable flow control according to the current settings.
3360  */
3361 static int
3362 ixgbe_dcb_pfc_enable_generic(struct ixgbe_hw *hw,uint8_t tc_num)
3363 {
3364         int ret_val = 0;
3365         uint32_t mflcn_reg, fccfg_reg;
3366         uint32_t reg;
3367         uint32_t fcrtl, fcrth;
3368         uint8_t i;
3369         uint8_t nb_rx_en;
3370
3371         /* Validate the water mark configuration */
3372         if (!hw->fc.pause_time) {
3373                 ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
3374                 goto out;
3375         }
3376
3377         /* Low water mark of zero causes XOFF floods */
3378         if (hw->fc.current_mode & ixgbe_fc_tx_pause) {
3379                  /* High/Low water can not be 0 */
3380                 if( (!hw->fc.high_water[tc_num])|| (!hw->fc.low_water[tc_num])) {
3381                         PMD_INIT_LOG(ERR, "Invalid water mark configuration");
3382                         ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
3383                         goto out;
3384                 }
3385
3386                 if(hw->fc.low_water[tc_num] >= hw->fc.high_water[tc_num]) {
3387                         PMD_INIT_LOG(ERR, "Invalid water mark configuration");
3388                         ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
3389                         goto out;
3390                 }
3391         }
3392         /* Negotiate the fc mode to use */
3393         ixgbe_fc_autoneg(hw);
3394
3395         /* Disable any previous flow control settings */
3396         mflcn_reg = IXGBE_READ_REG(hw, IXGBE_MFLCN);
3397         mflcn_reg &= ~(IXGBE_MFLCN_RPFCE_SHIFT | IXGBE_MFLCN_RFCE|IXGBE_MFLCN_RPFCE);
3398
3399         fccfg_reg = IXGBE_READ_REG(hw, IXGBE_FCCFG);
3400         fccfg_reg &= ~(IXGBE_FCCFG_TFCE_802_3X | IXGBE_FCCFG_TFCE_PRIORITY);
3401
3402         switch (hw->fc.current_mode) {
3403         case ixgbe_fc_none:
3404                 /*
3405                  * If the count of enabled RX Priority Flow control >1,
3406                  * and the TX pause can not be disabled
3407                  */
3408                 nb_rx_en = 0;
3409                 for (i =0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
3410                         reg = IXGBE_READ_REG(hw, IXGBE_FCRTH_82599(i));
3411                         if (reg & IXGBE_FCRTH_FCEN)
3412                                 nb_rx_en++;
3413                 }
3414                 if (nb_rx_en > 1)
3415                         fccfg_reg |=IXGBE_FCCFG_TFCE_PRIORITY;
3416                 break;
3417         case ixgbe_fc_rx_pause:
3418                 /*
3419                  * Rx Flow control is enabled and Tx Flow control is
3420                  * disabled by software override. Since there really
3421                  * isn't a way to advertise that we are capable of RX
3422                  * Pause ONLY, we will advertise that we support both
3423                  * symmetric and asymmetric Rx PAUSE.  Later, we will
3424                  * disable the adapter's ability to send PAUSE frames.
3425                  */
3426                 mflcn_reg |= IXGBE_MFLCN_RPFCE;
3427                 /*
3428                  * If the count of enabled RX Priority Flow control >1,
3429                  * and the TX pause can not be disabled
3430                  */
3431                 nb_rx_en = 0;
3432                 for (i =0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
3433                         reg = IXGBE_READ_REG(hw, IXGBE_FCRTH_82599(i));
3434                         if (reg & IXGBE_FCRTH_FCEN)
3435                                 nb_rx_en++;
3436                 }
3437                 if (nb_rx_en > 1)
3438                         fccfg_reg |=IXGBE_FCCFG_TFCE_PRIORITY;
3439                 break;
3440         case ixgbe_fc_tx_pause:
3441                 /*
3442                  * Tx Flow control is enabled, and Rx Flow control is
3443                  * disabled by software override.
3444                  */
3445                 fccfg_reg |=IXGBE_FCCFG_TFCE_PRIORITY;
3446                 break;
3447         case ixgbe_fc_full:
3448                 /* Flow control (both Rx and Tx) is enabled by SW override. */
3449                 mflcn_reg |= IXGBE_MFLCN_RPFCE;
3450                 fccfg_reg |= IXGBE_FCCFG_TFCE_PRIORITY;
3451                 break;
3452         default:
3453                 PMD_DRV_LOG(DEBUG, "Flow control param set incorrectly");
3454                 ret_val = IXGBE_ERR_CONFIG;
3455                 goto out;
3456                 break;
3457         }
3458
3459         /* Set 802.3x based flow control settings. */
3460         mflcn_reg |= IXGBE_MFLCN_DPF;
3461         IXGBE_WRITE_REG(hw, IXGBE_MFLCN, mflcn_reg);
3462         IXGBE_WRITE_REG(hw, IXGBE_FCCFG, fccfg_reg);
3463
3464         /* Set up and enable Rx high/low water mark thresholds, enable XON. */
3465         if ((hw->fc.current_mode & ixgbe_fc_tx_pause) &&
3466                 hw->fc.high_water[tc_num]) {
3467                 fcrtl = (hw->fc.low_water[tc_num] << 10) | IXGBE_FCRTL_XONE;
3468                 IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(tc_num), fcrtl);
3469                 fcrth = (hw->fc.high_water[tc_num] << 10) | IXGBE_FCRTH_FCEN;
3470         } else {
3471                 IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(tc_num), 0);
3472                 /*
3473                  * In order to prevent Tx hangs when the internal Tx
3474                  * switch is enabled we must set the high water mark
3475                  * to the maximum FCRTH value.  This allows the Tx
3476                  * switch to function even under heavy Rx workloads.
3477                  */
3478                 fcrth = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(tc_num)) - 32;
3479         }
3480         IXGBE_WRITE_REG(hw, IXGBE_FCRTH_82599(tc_num), fcrth);
3481
3482         /* Configure pause time (2 TCs per register) */
3483         reg = hw->fc.pause_time * 0x00010001;
3484         for (i = 0; i < (IXGBE_DCB_MAX_TRAFFIC_CLASS / 2); i++)
3485                 IXGBE_WRITE_REG(hw, IXGBE_FCTTV(i), reg);
3486
3487         /* Configure flow control refresh threshold value */
3488         IXGBE_WRITE_REG(hw, IXGBE_FCRTV, hw->fc.pause_time / 2);
3489
3490 out:
3491         return ret_val;
3492 }
3493
3494 static int
3495 ixgbe_dcb_pfc_enable(struct rte_eth_dev *dev,uint8_t tc_num)
3496 {
3497         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3498         int32_t ret_val = IXGBE_NOT_IMPLEMENTED;
3499
3500         if(hw->mac.type != ixgbe_mac_82598EB) {
3501                 ret_val = ixgbe_dcb_pfc_enable_generic(hw,tc_num);
3502         }
3503         return ret_val;
3504 }
3505
3506 static int
3507 ixgbe_priority_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_pfc_conf *pfc_conf)
3508 {
3509         int err;
3510         uint32_t rx_buf_size;
3511         uint32_t max_high_water;
3512         uint8_t tc_num;
3513         uint8_t  map[IXGBE_DCB_MAX_USER_PRIORITY] = { 0 };
3514         struct ixgbe_hw *hw =
3515                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3516         struct ixgbe_dcb_config *dcb_config =
3517                 IXGBE_DEV_PRIVATE_TO_DCB_CFG(dev->data->dev_private);
3518
3519         enum ixgbe_fc_mode rte_fcmode_2_ixgbe_fcmode[] = {
3520                 ixgbe_fc_none,
3521                 ixgbe_fc_rx_pause,
3522                 ixgbe_fc_tx_pause,
3523                 ixgbe_fc_full
3524         };
3525
3526         PMD_INIT_FUNC_TRACE();
3527
3528         ixgbe_dcb_unpack_map_cee(dcb_config, IXGBE_DCB_RX_CONFIG, map);
3529         tc_num = map[pfc_conf->priority];
3530         rx_buf_size = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(tc_num));
3531         PMD_INIT_LOG(DEBUG, "Rx packet buffer size = 0x%x", rx_buf_size);
3532         /*
3533          * At least reserve one Ethernet frame for watermark
3534          * high_water/low_water in kilo bytes for ixgbe
3535          */
3536         max_high_water = (rx_buf_size - ETHER_MAX_LEN) >> IXGBE_RXPBSIZE_SHIFT;
3537         if ((pfc_conf->fc.high_water > max_high_water) ||
3538             (pfc_conf->fc.high_water <= pfc_conf->fc.low_water)) {
3539                 PMD_INIT_LOG(ERR, "Invalid high/low water setup value in KB");
3540                 PMD_INIT_LOG(ERR, "High_water must <= 0x%x", max_high_water);
3541                 return (-EINVAL);
3542         }
3543
3544         hw->fc.requested_mode = rte_fcmode_2_ixgbe_fcmode[pfc_conf->fc.mode];
3545         hw->fc.pause_time = pfc_conf->fc.pause_time;
3546         hw->fc.send_xon = pfc_conf->fc.send_xon;
3547         hw->fc.low_water[tc_num] =  pfc_conf->fc.low_water;
3548         hw->fc.high_water[tc_num] = pfc_conf->fc.high_water;
3549
3550         err = ixgbe_dcb_pfc_enable(dev,tc_num);
3551
3552         /* Not negotiated is not an error case */
3553         if ((err == IXGBE_SUCCESS) || (err == IXGBE_ERR_FC_NOT_NEGOTIATED))
3554                 return 0;
3555
3556         PMD_INIT_LOG(ERR, "ixgbe_dcb_pfc_enable = 0x%x", err);
3557         return -EIO;
3558 }
3559
3560 static int
3561 ixgbe_dev_rss_reta_update(struct rte_eth_dev *dev,
3562                           struct rte_eth_rss_reta_entry64 *reta_conf,
3563                           uint16_t reta_size)
3564 {
3565         uint8_t i, j, mask;
3566         uint32_t reta, r;
3567         uint16_t idx, shift;
3568         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3569         uint16_t sp_reta_size;
3570         uint32_t reta_reg;
3571
3572         PMD_INIT_FUNC_TRACE();
3573
3574         if (!ixgbe_rss_update_sp(hw->mac.type)) {
3575                 PMD_DRV_LOG(ERR, "RSS reta update is not supported on this "
3576                         "NIC.");
3577                 return -ENOTSUP;
3578         }
3579
3580         sp_reta_size = ixgbe_reta_size_get(hw->mac.type);
3581         if (reta_size != sp_reta_size) {
3582                 PMD_DRV_LOG(ERR, "The size of hash lookup table configured "
3583                         "(%d) doesn't match the number hardware can supported "
3584                         "(%d)\n", reta_size, sp_reta_size);
3585                 return -EINVAL;
3586         }
3587
3588         for (i = 0; i < reta_size; i += IXGBE_4_BIT_WIDTH) {
3589                 idx = i / RTE_RETA_GROUP_SIZE;
3590                 shift = i % RTE_RETA_GROUP_SIZE;
3591                 mask = (uint8_t)((reta_conf[idx].mask >> shift) &
3592                                                 IXGBE_4_BIT_MASK);
3593                 if (!mask)
3594                         continue;
3595                 reta_reg = ixgbe_reta_reg_get(hw->mac.type, i);
3596                 if (mask == IXGBE_4_BIT_MASK)
3597                         r = 0;
3598                 else
3599                         r = IXGBE_READ_REG(hw, reta_reg);
3600                 for (j = 0, reta = 0; j < IXGBE_4_BIT_WIDTH; j++) {
3601                         if (mask & (0x1 << j))
3602                                 reta |= reta_conf[idx].reta[shift + j] <<
3603                                                         (CHAR_BIT * j);
3604                         else
3605                                 reta |= r & (IXGBE_8_BIT_MASK <<
3606                                                 (CHAR_BIT * j));
3607                 }
3608                 IXGBE_WRITE_REG(hw, reta_reg, reta);
3609         }
3610
3611         return 0;
3612 }
3613
3614 static int
3615 ixgbe_dev_rss_reta_query(struct rte_eth_dev *dev,
3616                          struct rte_eth_rss_reta_entry64 *reta_conf,
3617                          uint16_t reta_size)
3618 {
3619         uint8_t i, j, mask;
3620         uint32_t reta;
3621         uint16_t idx, shift;
3622         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3623         uint16_t sp_reta_size;
3624         uint32_t reta_reg;
3625
3626         PMD_INIT_FUNC_TRACE();
3627         sp_reta_size = ixgbe_reta_size_get(hw->mac.type);
3628         if (reta_size != sp_reta_size) {
3629                 PMD_DRV_LOG(ERR, "The size of hash lookup table configured "
3630                         "(%d) doesn't match the number hardware can supported "
3631                         "(%d)\n", reta_size, sp_reta_size);
3632                 return -EINVAL;
3633         }
3634
3635         for (i = 0; i < reta_size; i += IXGBE_4_BIT_WIDTH) {
3636                 idx = i / RTE_RETA_GROUP_SIZE;
3637                 shift = i % RTE_RETA_GROUP_SIZE;
3638                 mask = (uint8_t)((reta_conf[idx].mask >> shift) &
3639                                                 IXGBE_4_BIT_MASK);
3640                 if (!mask)
3641                         continue;
3642
3643                 reta_reg = ixgbe_reta_reg_get(hw->mac.type, i);
3644                 reta = IXGBE_READ_REG(hw, reta_reg);
3645                 for (j = 0; j < IXGBE_4_BIT_WIDTH; j++) {
3646                         if (mask & (0x1 << j))
3647                                 reta_conf[idx].reta[shift + j] =
3648                                         ((reta >> (CHAR_BIT * j)) &
3649                                                 IXGBE_8_BIT_MASK);
3650                 }
3651         }
3652
3653         return 0;
3654 }
3655
3656 static void
3657 ixgbe_add_rar(struct rte_eth_dev *dev, struct ether_addr *mac_addr,
3658                                 uint32_t index, uint32_t pool)
3659 {
3660         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3661         uint32_t enable_addr = 1;
3662
3663         ixgbe_set_rar(hw, index, mac_addr->addr_bytes, pool, enable_addr);
3664 }
3665
3666 static void
3667 ixgbe_remove_rar(struct rte_eth_dev *dev, uint32_t index)
3668 {
3669         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3670
3671         ixgbe_clear_rar(hw, index);
3672 }
3673
3674 static void
3675 ixgbe_set_default_mac_addr(struct rte_eth_dev *dev, struct ether_addr *addr)
3676 {
3677         ixgbe_remove_rar(dev, 0);
3678
3679         ixgbe_add_rar(dev, addr, 0, 0);
3680 }
3681
3682 static int
3683 ixgbe_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
3684 {
3685         uint32_t hlreg0;
3686         uint32_t maxfrs;
3687         struct ixgbe_hw *hw;
3688         struct rte_eth_dev_info dev_info;
3689         uint32_t frame_size = mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
3690
3691         ixgbe_dev_info_get(dev, &dev_info);
3692
3693         /* check that mtu is within the allowed range */
3694         if ((mtu < ETHER_MIN_MTU) || (frame_size > dev_info.max_rx_pktlen))
3695                 return -EINVAL;
3696
3697         /* refuse mtu that requires the support of scattered packets when this
3698          * feature has not been enabled before. */
3699         if (!dev->data->scattered_rx &&
3700             (frame_size + 2 * IXGBE_VLAN_TAG_SIZE >
3701              dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM))
3702                 return -EINVAL;
3703
3704         hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3705         hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0);
3706
3707         /* switch to jumbo mode if needed */
3708         if (frame_size > ETHER_MAX_LEN) {
3709                 dev->data->dev_conf.rxmode.jumbo_frame = 1;
3710                 hlreg0 |= IXGBE_HLREG0_JUMBOEN;
3711         } else {
3712                 dev->data->dev_conf.rxmode.jumbo_frame = 0;
3713                 hlreg0 &= ~IXGBE_HLREG0_JUMBOEN;
3714         }
3715         IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0);
3716
3717         /* update max frame size */
3718         dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
3719
3720         maxfrs = IXGBE_READ_REG(hw, IXGBE_MAXFRS);
3721         maxfrs &= 0x0000FFFF;
3722         maxfrs |= (dev->data->dev_conf.rxmode.max_rx_pkt_len << 16);
3723         IXGBE_WRITE_REG(hw, IXGBE_MAXFRS, maxfrs);
3724
3725         return 0;
3726 }
3727
3728 /*
3729  * Virtual Function operations
3730  */
3731 static void
3732 ixgbevf_intr_disable(struct ixgbe_hw *hw)
3733 {
3734         PMD_INIT_FUNC_TRACE();
3735
3736         /* Clear interrupt mask to stop from interrupts being generated */
3737         IXGBE_WRITE_REG(hw, IXGBE_VTEIMC, IXGBE_VF_IRQ_CLEAR_MASK);
3738
3739         IXGBE_WRITE_FLUSH(hw);
3740 }
3741
3742 static void
3743 ixgbevf_intr_enable(struct ixgbe_hw *hw)
3744 {
3745         PMD_INIT_FUNC_TRACE();
3746
3747         /* VF enable interrupt autoclean */
3748         IXGBE_WRITE_REG(hw, IXGBE_VTEIAM, IXGBE_VF_IRQ_ENABLE_MASK);
3749         IXGBE_WRITE_REG(hw, IXGBE_VTEIAC, IXGBE_VF_IRQ_ENABLE_MASK);
3750         IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, IXGBE_VF_IRQ_ENABLE_MASK);
3751
3752         IXGBE_WRITE_FLUSH(hw);
3753 }
3754
3755 static int
3756 ixgbevf_dev_configure(struct rte_eth_dev *dev)
3757 {
3758         struct rte_eth_conf* conf = &dev->data->dev_conf;
3759         struct ixgbe_adapter *adapter =
3760                         (struct ixgbe_adapter *)dev->data->dev_private;
3761
3762         PMD_INIT_LOG(DEBUG, "Configured Virtual Function port id: %d",
3763                      dev->data->port_id);
3764
3765         /*
3766          * VF has no ability to enable/disable HW CRC
3767          * Keep the persistent behavior the same as Host PF
3768          */
3769 #ifndef RTE_LIBRTE_IXGBE_PF_DISABLE_STRIP_CRC
3770         if (!conf->rxmode.hw_strip_crc) {
3771                 PMD_INIT_LOG(NOTICE, "VF can't disable HW CRC Strip");
3772                 conf->rxmode.hw_strip_crc = 1;
3773         }
3774 #else
3775         if (conf->rxmode.hw_strip_crc) {
3776                 PMD_INIT_LOG(NOTICE, "VF can't enable HW CRC Strip");
3777                 conf->rxmode.hw_strip_crc = 0;
3778         }
3779 #endif
3780
3781         /*
3782          * Initialize to TRUE. If any of Rx queues doesn't meet the bulk
3783          * allocation or vector Rx preconditions we will reset it.
3784          */
3785         adapter->rx_bulk_alloc_allowed = true;
3786         adapter->rx_vec_allowed = true;
3787
3788         return 0;
3789 }
3790
3791 static int
3792 ixgbevf_dev_start(struct rte_eth_dev *dev)
3793 {
3794         struct ixgbe_hw *hw =
3795                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3796         uint32_t intr_vector = 0;
3797         struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle;
3798
3799         int err, mask = 0;
3800
3801         PMD_INIT_FUNC_TRACE();
3802
3803         hw->mac.ops.reset_hw(hw);
3804         hw->mac.get_link_status = true;
3805
3806         /* negotiate mailbox API version to use with the PF. */
3807         ixgbevf_negotiate_api(hw);
3808
3809         ixgbevf_dev_tx_init(dev);
3810
3811         /* This can fail when allocating mbufs for descriptor rings */
3812         err = ixgbevf_dev_rx_init(dev);
3813         if (err) {
3814                 PMD_INIT_LOG(ERR, "Unable to initialize RX hardware (%d)", err);
3815                 ixgbe_dev_clear_queues(dev);
3816                 return err;
3817         }
3818
3819         /* Set vfta */
3820         ixgbevf_set_vfta_all(dev,1);
3821
3822         /* Set HW strip */
3823         mask = ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK | \
3824                 ETH_VLAN_EXTEND_MASK;
3825         ixgbevf_vlan_offload_set(dev, mask);
3826
3827         ixgbevf_dev_rxtx_start(dev);
3828
3829         /* check and configure queue intr-vector mapping */
3830         if (dev->data->dev_conf.intr_conf.rxq != 0) {
3831                 intr_vector = dev->data->nb_rx_queues;
3832                 if (rte_intr_efd_enable(intr_handle, intr_vector))
3833                         return -1;
3834         }
3835
3836         if (rte_intr_dp_is_en(intr_handle) && !intr_handle->intr_vec) {
3837                 intr_handle->intr_vec =
3838                         rte_zmalloc("intr_vec",
3839                                     dev->data->nb_rx_queues * sizeof(int), 0);
3840                 if (intr_handle->intr_vec == NULL) {
3841                         PMD_INIT_LOG(ERR, "Failed to allocate %d rx_queues"
3842                                      " intr_vec\n", dev->data->nb_rx_queues);
3843                         return -ENOMEM;
3844                 }
3845         }
3846         ixgbevf_configure_msix(dev);
3847
3848         rte_intr_enable(intr_handle);
3849
3850         /* Re-enable interrupt for VF */
3851         ixgbevf_intr_enable(hw);
3852
3853         return 0;
3854 }
3855
3856 static void
3857 ixgbevf_dev_stop(struct rte_eth_dev *dev)
3858 {
3859         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3860         struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle;
3861
3862         PMD_INIT_FUNC_TRACE();
3863
3864         hw->adapter_stopped = 1;
3865         ixgbe_stop_adapter(hw);
3866
3867         /*
3868           * Clear what we set, but we still keep shadow_vfta to
3869           * restore after device starts
3870           */
3871         ixgbevf_set_vfta_all(dev,0);
3872
3873         /* Clear stored conf */
3874         dev->data->scattered_rx = 0;
3875
3876         ixgbe_dev_clear_queues(dev);
3877
3878         /* disable intr eventfd mapping */
3879         rte_intr_disable(intr_handle);
3880
3881         /* Clean datapath event and queue/vec mapping */
3882         rte_intr_efd_disable(intr_handle);
3883         if (intr_handle->intr_vec != NULL) {
3884                 rte_free(intr_handle->intr_vec);
3885                 intr_handle->intr_vec = NULL;
3886         }
3887 }
3888
3889 static void
3890 ixgbevf_dev_close(struct rte_eth_dev *dev)
3891 {
3892         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3893
3894         PMD_INIT_FUNC_TRACE();
3895
3896         ixgbe_reset_hw(hw);
3897
3898         ixgbevf_dev_stop(dev);
3899
3900         ixgbe_dev_free_queues(dev);
3901
3902         /* reprogram the RAR[0] in case user changed it. */
3903         ixgbe_set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
3904 }
3905
3906 static void ixgbevf_set_vfta_all(struct rte_eth_dev *dev, bool on)
3907 {
3908         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3909         struct ixgbe_vfta * shadow_vfta =
3910                 IXGBE_DEV_PRIVATE_TO_VFTA(dev->data->dev_private);
3911         int i = 0, j = 0, vfta = 0, mask = 1;
3912
3913         for (i = 0; i < IXGBE_VFTA_SIZE; i++){
3914                 vfta = shadow_vfta->vfta[i];
3915                 if(vfta){
3916                         mask = 1;
3917                         for (j = 0; j < 32; j++){
3918                                 if(vfta & mask)
3919                                         ixgbe_set_vfta(hw, (i<<5)+j, 0, on);
3920                                 mask<<=1;
3921                         }
3922                 }
3923         }
3924
3925 }
3926
3927 static int
3928 ixgbevf_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
3929 {
3930         struct ixgbe_hw *hw =
3931                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3932         struct ixgbe_vfta * shadow_vfta =
3933                 IXGBE_DEV_PRIVATE_TO_VFTA(dev->data->dev_private);
3934         uint32_t vid_idx = 0;
3935         uint32_t vid_bit = 0;
3936         int ret = 0;
3937
3938         PMD_INIT_FUNC_TRACE();
3939
3940         /* vind is not used in VF driver, set to 0, check ixgbe_set_vfta_vf */
3941         ret = ixgbe_set_vfta(hw, vlan_id, 0, !!on);
3942         if(ret){
3943                 PMD_INIT_LOG(ERR, "Unable to set VF vlan");
3944                 return ret;
3945         }
3946         vid_idx = (uint32_t) ((vlan_id >> 5) & 0x7F);
3947         vid_bit = (uint32_t) (1 << (vlan_id & 0x1F));
3948
3949         /* Save what we set and retore it after device reset */
3950         if (on)
3951                 shadow_vfta->vfta[vid_idx] |= vid_bit;
3952         else
3953                 shadow_vfta->vfta[vid_idx] &= ~vid_bit;
3954
3955         return 0;
3956 }
3957
3958 static void
3959 ixgbevf_vlan_strip_queue_set(struct rte_eth_dev *dev, uint16_t queue, int on)
3960 {
3961         struct ixgbe_hw *hw =
3962                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3963         uint32_t ctrl;
3964
3965         PMD_INIT_FUNC_TRACE();
3966
3967         if(queue >= hw->mac.max_rx_queues)
3968                 return;
3969
3970         ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(queue));
3971         if(on)
3972                 ctrl |= IXGBE_RXDCTL_VME;
3973         else
3974                 ctrl &= ~IXGBE_RXDCTL_VME;
3975         IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(queue), ctrl);
3976
3977         ixgbe_vlan_hw_strip_bitmap_set( dev, queue, on);
3978 }
3979
3980 static void
3981 ixgbevf_vlan_offload_set(struct rte_eth_dev *dev, int mask)
3982 {
3983         struct ixgbe_hw *hw =
3984                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3985         uint16_t i;
3986         int on = 0;
3987
3988         /* VF function only support hw strip feature, others are not support */
3989         if(mask & ETH_VLAN_STRIP_MASK){
3990                 on = !!(dev->data->dev_conf.rxmode.hw_vlan_strip);
3991
3992                 for(i=0; i < hw->mac.max_rx_queues; i++)
3993                         ixgbevf_vlan_strip_queue_set(dev,i,on);
3994         }
3995 }
3996
3997 static int
3998 ixgbe_vmdq_mode_check(struct ixgbe_hw *hw)
3999 {
4000         uint32_t reg_val;
4001
4002         /* we only need to do this if VMDq is enabled */
4003         reg_val = IXGBE_READ_REG(hw, IXGBE_VT_CTL);
4004         if (!(reg_val & IXGBE_VT_CTL_VT_ENABLE)) {
4005                 PMD_INIT_LOG(ERR, "VMDq must be enabled for this setting");
4006                 return (-1);
4007         }
4008
4009         return 0;
4010 }
4011
4012 static uint32_t
4013 ixgbe_uta_vector(struct ixgbe_hw *hw, struct ether_addr* uc_addr)
4014 {
4015         uint32_t vector = 0;
4016         switch (hw->mac.mc_filter_type) {
4017         case 0:   /* use bits [47:36] of the address */
4018                 vector = ((uc_addr->addr_bytes[4] >> 4) |
4019                         (((uint16_t)uc_addr->addr_bytes[5]) << 4));
4020                 break;
4021         case 1:   /* use bits [46:35] of the address */
4022                 vector = ((uc_addr->addr_bytes[4] >> 3) |
4023                         (((uint16_t)uc_addr->addr_bytes[5]) << 5));
4024                 break;
4025         case 2:   /* use bits [45:34] of the address */
4026                 vector = ((uc_addr->addr_bytes[4] >> 2) |
4027                         (((uint16_t)uc_addr->addr_bytes[5]) << 6));
4028                 break;
4029         case 3:   /* use bits [43:32] of the address */
4030                 vector = ((uc_addr->addr_bytes[4]) |
4031                         (((uint16_t)uc_addr->addr_bytes[5]) << 8));
4032                 break;
4033         default:  /* Invalid mc_filter_type */
4034                 break;
4035         }
4036
4037         /* vector can only be 12-bits or boundary will be exceeded */
4038         vector &= 0xFFF;
4039         return vector;
4040 }
4041
4042 static int
4043 ixgbe_uc_hash_table_set(struct rte_eth_dev *dev,struct ether_addr* mac_addr,
4044                                uint8_t on)
4045 {
4046         uint32_t vector;
4047         uint32_t uta_idx;
4048         uint32_t reg_val;
4049         uint32_t uta_shift;
4050         uint32_t rc;
4051         const uint32_t ixgbe_uta_idx_mask = 0x7F;
4052         const uint32_t ixgbe_uta_bit_shift = 5;
4053         const uint32_t ixgbe_uta_bit_mask = (0x1 << ixgbe_uta_bit_shift) - 1;
4054         const uint32_t bit1 = 0x1;
4055
4056         struct ixgbe_hw *hw =
4057                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4058         struct ixgbe_uta_info *uta_info =
4059                 IXGBE_DEV_PRIVATE_TO_UTA(dev->data->dev_private);
4060
4061         /* The UTA table only exists on 82599 hardware and newer */
4062         if (hw->mac.type < ixgbe_mac_82599EB)
4063                 return (-ENOTSUP);
4064
4065         vector = ixgbe_uta_vector(hw,mac_addr);
4066         uta_idx = (vector >> ixgbe_uta_bit_shift) & ixgbe_uta_idx_mask;
4067         uta_shift = vector & ixgbe_uta_bit_mask;
4068
4069         rc = ((uta_info->uta_shadow[uta_idx] >> uta_shift & bit1) != 0);
4070         if(rc == on)
4071                 return 0;
4072
4073         reg_val = IXGBE_READ_REG(hw, IXGBE_UTA(uta_idx));
4074         if (on) {
4075                 uta_info->uta_in_use++;
4076                 reg_val |= (bit1 << uta_shift);
4077                 uta_info->uta_shadow[uta_idx] |= (bit1 << uta_shift);
4078         } else {
4079                 uta_info->uta_in_use--;
4080                 reg_val &= ~(bit1 << uta_shift);
4081                 uta_info->uta_shadow[uta_idx] &= ~(bit1 << uta_shift);
4082         }
4083
4084         IXGBE_WRITE_REG(hw, IXGBE_UTA(uta_idx), reg_val);
4085
4086         if (uta_info->uta_in_use > 0)
4087                 IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL,
4088                                 IXGBE_MCSTCTRL_MFE | hw->mac.mc_filter_type);
4089         else
4090                 IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL,hw->mac.mc_filter_type);
4091
4092         return 0;
4093 }
4094
4095 static int
4096 ixgbe_uc_all_hash_table_set(struct rte_eth_dev *dev, uint8_t on)
4097 {
4098         int i;
4099         struct ixgbe_hw *hw =
4100                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4101         struct ixgbe_uta_info *uta_info =
4102                 IXGBE_DEV_PRIVATE_TO_UTA(dev->data->dev_private);
4103
4104         /* The UTA table only exists on 82599 hardware and newer */
4105         if (hw->mac.type < ixgbe_mac_82599EB)
4106                 return (-ENOTSUP);
4107
4108         if(on) {
4109                 for (i = 0; i < ETH_VMDQ_NUM_UC_HASH_ARRAY; i++) {
4110                         uta_info->uta_shadow[i] = ~0;
4111                         IXGBE_WRITE_REG(hw, IXGBE_UTA(i), ~0);
4112                 }
4113         } else {
4114                 for (i = 0; i < ETH_VMDQ_NUM_UC_HASH_ARRAY; i++) {
4115                         uta_info->uta_shadow[i] = 0;
4116                         IXGBE_WRITE_REG(hw, IXGBE_UTA(i), 0);
4117                 }
4118         }
4119         return 0;
4120
4121 }
4122
4123 uint32_t
4124 ixgbe_convert_vm_rx_mask_to_val(uint16_t rx_mask, uint32_t orig_val)
4125 {
4126         uint32_t new_val = orig_val;
4127
4128         if (rx_mask & ETH_VMDQ_ACCEPT_UNTAG)
4129                 new_val |= IXGBE_VMOLR_AUPE;
4130         if (rx_mask & ETH_VMDQ_ACCEPT_HASH_MC)
4131                 new_val |= IXGBE_VMOLR_ROMPE;
4132         if (rx_mask & ETH_VMDQ_ACCEPT_HASH_UC)
4133                 new_val |= IXGBE_VMOLR_ROPE;
4134         if (rx_mask & ETH_VMDQ_ACCEPT_BROADCAST)
4135                 new_val |= IXGBE_VMOLR_BAM;
4136         if (rx_mask & ETH_VMDQ_ACCEPT_MULTICAST)
4137                 new_val |= IXGBE_VMOLR_MPE;
4138
4139         return new_val;
4140 }
4141
4142 static int
4143 ixgbe_set_pool_rx_mode(struct rte_eth_dev *dev, uint16_t pool,
4144                                uint16_t rx_mask, uint8_t on)
4145 {
4146         int val = 0;
4147
4148         struct ixgbe_hw *hw =
4149                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4150         uint32_t vmolr = IXGBE_READ_REG(hw, IXGBE_VMOLR(pool));
4151
4152         if (hw->mac.type == ixgbe_mac_82598EB) {
4153                 PMD_INIT_LOG(ERR, "setting VF receive mode set should be done"
4154                              " on 82599 hardware and newer");
4155                 return (-ENOTSUP);
4156         }
4157         if (ixgbe_vmdq_mode_check(hw) < 0)
4158                 return (-ENOTSUP);
4159
4160         val = ixgbe_convert_vm_rx_mask_to_val(rx_mask, val);
4161
4162         if (on)
4163                 vmolr |= val;
4164         else
4165                 vmolr &= ~val;
4166
4167         IXGBE_WRITE_REG(hw, IXGBE_VMOLR(pool), vmolr);
4168
4169         return 0;
4170 }
4171
4172 static int
4173 ixgbe_set_pool_rx(struct rte_eth_dev *dev, uint16_t pool, uint8_t on)
4174 {
4175         uint32_t reg,addr;
4176         uint32_t val;
4177         const uint8_t bit1 = 0x1;
4178
4179         struct ixgbe_hw *hw =
4180                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4181
4182         if (ixgbe_vmdq_mode_check(hw) < 0)
4183                 return (-ENOTSUP);
4184
4185         addr = IXGBE_VFRE(pool >= ETH_64_POOLS/2);
4186         reg = IXGBE_READ_REG(hw, addr);
4187         val = bit1 << pool;
4188
4189         if (on)
4190                 reg |= val;
4191         else
4192                 reg &= ~val;
4193
4194         IXGBE_WRITE_REG(hw, addr,reg);
4195
4196         return 0;
4197 }
4198
4199 static int
4200 ixgbe_set_pool_tx(struct rte_eth_dev *dev, uint16_t pool, uint8_t on)
4201 {
4202         uint32_t reg,addr;
4203         uint32_t val;
4204         const uint8_t bit1 = 0x1;
4205
4206         struct ixgbe_hw *hw =
4207                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4208
4209         if (ixgbe_vmdq_mode_check(hw) < 0)
4210                 return (-ENOTSUP);
4211
4212         addr = IXGBE_VFTE(pool >= ETH_64_POOLS/2);
4213         reg = IXGBE_READ_REG(hw, addr);
4214         val = bit1 << pool;
4215
4216         if (on)
4217                 reg |= val;
4218         else
4219                 reg &= ~val;
4220
4221         IXGBE_WRITE_REG(hw, addr,reg);
4222
4223         return 0;
4224 }
4225
4226 static int
4227 ixgbe_set_pool_vlan_filter(struct rte_eth_dev *dev, uint16_t vlan,
4228                         uint64_t pool_mask, uint8_t vlan_on)
4229 {
4230         int ret = 0;
4231         uint16_t pool_idx;
4232         struct ixgbe_hw *hw =
4233                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4234
4235         if (ixgbe_vmdq_mode_check(hw) < 0)
4236                 return (-ENOTSUP);
4237         for (pool_idx = 0; pool_idx < ETH_64_POOLS; pool_idx++) {
4238                 if (pool_mask & ((uint64_t)(1ULL << pool_idx)))
4239                         ret = hw->mac.ops.set_vfta(hw,vlan,pool_idx,vlan_on);
4240                         if (ret < 0)
4241                                 return ret;
4242         }
4243
4244         return ret;
4245 }
4246
4247 #define IXGBE_MRCTL_VPME  0x01 /* Virtual Pool Mirroring. */
4248 #define IXGBE_MRCTL_UPME  0x02 /* Uplink Port Mirroring. */
4249 #define IXGBE_MRCTL_DPME  0x04 /* Downlink Port Mirroring. */
4250 #define IXGBE_MRCTL_VLME  0x08 /* VLAN Mirroring. */
4251 #define IXGBE_INVALID_MIRROR_TYPE(mirror_type) \
4252         ((mirror_type) & ~(uint8_t)(ETH_MIRROR_VIRTUAL_POOL_UP | \
4253         ETH_MIRROR_UPLINK_PORT | ETH_MIRROR_DOWNLINK_PORT | ETH_MIRROR_VLAN))
4254
4255 static int
4256 ixgbe_mirror_rule_set(struct rte_eth_dev *dev,
4257                         struct rte_eth_mirror_conf *mirror_conf,
4258                         uint8_t rule_id, uint8_t on)
4259 {
4260         uint32_t mr_ctl,vlvf;
4261         uint32_t mp_lsb = 0;
4262         uint32_t mv_msb = 0;
4263         uint32_t mv_lsb = 0;
4264         uint32_t mp_msb = 0;
4265         uint8_t i = 0;
4266         int reg_index = 0;
4267         uint64_t vlan_mask = 0;
4268
4269         const uint8_t pool_mask_offset = 32;
4270         const uint8_t vlan_mask_offset = 32;
4271         const uint8_t dst_pool_offset = 8;
4272         const uint8_t rule_mr_offset  = 4;
4273         const uint8_t mirror_rule_mask= 0x0F;
4274
4275         struct ixgbe_mirror_info *mr_info =
4276                         (IXGBE_DEV_PRIVATE_TO_PFDATA(dev->data->dev_private));
4277         struct ixgbe_hw *hw =
4278                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4279         uint8_t mirror_type = 0;
4280
4281         if (ixgbe_vmdq_mode_check(hw) < 0)
4282                 return -ENOTSUP;
4283
4284         if (rule_id >= IXGBE_MAX_MIRROR_RULES)
4285                 return -EINVAL;
4286
4287         if (IXGBE_INVALID_MIRROR_TYPE(mirror_conf->rule_type)) {
4288                 PMD_DRV_LOG(ERR, "unsupported mirror type 0x%x.",
4289                         mirror_conf->rule_type);
4290                 return -EINVAL;
4291         }
4292
4293         if (mirror_conf->rule_type & ETH_MIRROR_VLAN) {
4294                 mirror_type |= IXGBE_MRCTL_VLME;
4295                 /* Check if vlan id is valid and find conresponding VLAN ID index in VLVF */
4296                 for (i = 0;i < IXGBE_VLVF_ENTRIES; i++) {
4297                         if (mirror_conf->vlan.vlan_mask & (1ULL << i)) {
4298                                 /* search vlan id related pool vlan filter index */
4299                                 reg_index = ixgbe_find_vlvf_slot(hw,
4300                                                 mirror_conf->vlan.vlan_id[i]);
4301                                 if(reg_index < 0)
4302                                         return -EINVAL;
4303                                 vlvf = IXGBE_READ_REG(hw, IXGBE_VLVF(reg_index));
4304                                 if ((vlvf & IXGBE_VLVF_VIEN) &&
4305                                     ((vlvf & IXGBE_VLVF_VLANID_MASK) ==
4306                                       mirror_conf->vlan.vlan_id[i]))
4307                                         vlan_mask |= (1ULL << reg_index);
4308                                 else
4309                                         return -EINVAL;
4310                         }
4311                 }
4312
4313                 if (on) {
4314                         mv_lsb = vlan_mask & 0xFFFFFFFF;
4315                         mv_msb = vlan_mask >> vlan_mask_offset;
4316
4317                         mr_info->mr_conf[rule_id].vlan.vlan_mask =
4318                                                 mirror_conf->vlan.vlan_mask;
4319                         for(i = 0 ;i < ETH_VMDQ_MAX_VLAN_FILTERS; i++) {
4320                                 if(mirror_conf->vlan.vlan_mask & (1ULL << i))
4321                                         mr_info->mr_conf[rule_id].vlan.vlan_id[i] =
4322                                                 mirror_conf->vlan.vlan_id[i];
4323                         }
4324                 } else {
4325                         mv_lsb = 0;
4326                         mv_msb = 0;
4327                         mr_info->mr_conf[rule_id].vlan.vlan_mask = 0;
4328                         for(i = 0 ;i < ETH_VMDQ_MAX_VLAN_FILTERS; i++)
4329                                 mr_info->mr_conf[rule_id].vlan.vlan_id[i] = 0;
4330                 }
4331         }
4332
4333         /*
4334          * if enable pool mirror, write related pool mask register,if disable
4335          * pool mirror, clear PFMRVM register
4336          */
4337         if (mirror_conf->rule_type & ETH_MIRROR_VIRTUAL_POOL_UP) {
4338                 mirror_type |= IXGBE_MRCTL_VPME;
4339                 if (on) {
4340                         mp_lsb = mirror_conf->pool_mask & 0xFFFFFFFF;
4341                         mp_msb = mirror_conf->pool_mask >> pool_mask_offset;
4342                         mr_info->mr_conf[rule_id].pool_mask =
4343                                         mirror_conf->pool_mask;
4344
4345                 } else {
4346                         mp_lsb = 0;
4347                         mp_msb = 0;
4348                         mr_info->mr_conf[rule_id].pool_mask = 0;
4349                 }
4350         }
4351         if (mirror_conf->rule_type & ETH_MIRROR_UPLINK_PORT)
4352                 mirror_type |= IXGBE_MRCTL_UPME;
4353         if (mirror_conf->rule_type & ETH_MIRROR_DOWNLINK_PORT)
4354                 mirror_type |= IXGBE_MRCTL_DPME;
4355
4356         /* read  mirror control register and recalculate it */
4357         mr_ctl = IXGBE_READ_REG(hw, IXGBE_MRCTL(rule_id));
4358
4359         if (on) {
4360                 mr_ctl |= mirror_type;
4361                 mr_ctl &= mirror_rule_mask;
4362                 mr_ctl |= mirror_conf->dst_pool << dst_pool_offset;
4363         } else
4364                 mr_ctl &= ~(mirror_conf->rule_type & mirror_rule_mask);
4365
4366         mr_info->mr_conf[rule_id].rule_type = mirror_conf->rule_type;
4367         mr_info->mr_conf[rule_id].dst_pool = mirror_conf->dst_pool;
4368
4369         /* write mirrror control  register */
4370         IXGBE_WRITE_REG(hw, IXGBE_MRCTL(rule_id), mr_ctl);
4371
4372         /* write pool mirrror control  register */
4373         if (mirror_conf->rule_type == ETH_MIRROR_VIRTUAL_POOL_UP) {
4374                 IXGBE_WRITE_REG(hw, IXGBE_VMRVM(rule_id), mp_lsb);
4375                 IXGBE_WRITE_REG(hw, IXGBE_VMRVM(rule_id + rule_mr_offset),
4376                                 mp_msb);
4377         }
4378         /* write VLAN mirrror control  register */
4379         if (mirror_conf->rule_type == ETH_MIRROR_VLAN) {
4380                 IXGBE_WRITE_REG(hw, IXGBE_VMRVLAN(rule_id), mv_lsb);
4381                 IXGBE_WRITE_REG(hw, IXGBE_VMRVLAN(rule_id + rule_mr_offset),
4382                                 mv_msb);
4383         }
4384
4385         return 0;
4386 }
4387
4388 static int
4389 ixgbe_mirror_rule_reset(struct rte_eth_dev *dev, uint8_t rule_id)
4390 {
4391         int mr_ctl = 0;
4392         uint32_t lsb_val = 0;
4393         uint32_t msb_val = 0;
4394         const uint8_t rule_mr_offset = 4;
4395
4396         struct ixgbe_hw *hw =
4397                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4398         struct ixgbe_mirror_info *mr_info =
4399                 (IXGBE_DEV_PRIVATE_TO_PFDATA(dev->data->dev_private));
4400
4401         if (ixgbe_vmdq_mode_check(hw) < 0)
4402                 return (-ENOTSUP);
4403
4404         memset(&mr_info->mr_conf[rule_id], 0,
4405                 sizeof(struct rte_eth_mirror_conf));
4406
4407         /* clear PFVMCTL register */
4408         IXGBE_WRITE_REG(hw, IXGBE_MRCTL(rule_id), mr_ctl);
4409
4410         /* clear pool mask register */
4411         IXGBE_WRITE_REG(hw, IXGBE_VMRVM(rule_id), lsb_val);
4412         IXGBE_WRITE_REG(hw, IXGBE_VMRVM(rule_id + rule_mr_offset), msb_val);
4413
4414         /* clear vlan mask register */
4415         IXGBE_WRITE_REG(hw, IXGBE_VMRVLAN(rule_id), lsb_val);
4416         IXGBE_WRITE_REG(hw, IXGBE_VMRVLAN(rule_id + rule_mr_offset), msb_val);
4417
4418         return 0;
4419 }
4420
4421 static int
4422 ixgbevf_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id)
4423 {
4424         uint32_t mask;
4425         struct ixgbe_hw *hw =
4426                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4427
4428         mask = IXGBE_READ_REG(hw, IXGBE_VTEIMS);
4429         mask |= (1 << IXGBE_MISC_VEC_ID);
4430         RTE_SET_USED(queue_id);
4431         IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, mask);
4432
4433         rte_intr_enable(&dev->pci_dev->intr_handle);
4434
4435         return 0;
4436 }
4437
4438 static int
4439 ixgbevf_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id)
4440 {
4441         uint32_t mask;
4442         struct ixgbe_hw *hw =
4443                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4444
4445         mask = IXGBE_READ_REG(hw, IXGBE_VTEIMS);
4446         mask &= ~(1 << IXGBE_MISC_VEC_ID);
4447         RTE_SET_USED(queue_id);
4448         IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, mask);
4449
4450         return 0;
4451 }
4452
4453 static int
4454 ixgbe_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id)
4455 {
4456         uint32_t mask;
4457         struct ixgbe_hw *hw =
4458                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4459         struct ixgbe_interrupt *intr =
4460                 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
4461
4462         if (queue_id < 16) {
4463                 ixgbe_disable_intr(hw);
4464                 intr->mask |= (1 << queue_id);
4465                 ixgbe_enable_intr(dev);
4466         } else if (queue_id < 32) {
4467                 mask = IXGBE_READ_REG(hw, IXGBE_EIMS_EX(0));
4468                 mask &= (1 << queue_id);
4469                 IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(0), mask);
4470         } else if (queue_id < 64) {
4471                 mask = IXGBE_READ_REG(hw, IXGBE_EIMS_EX(1));
4472                 mask &= (1 << (queue_id - 32));
4473                 IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(1), mask);
4474         }
4475         rte_intr_enable(&dev->pci_dev->intr_handle);
4476
4477         return 0;
4478 }
4479
4480 static int
4481 ixgbe_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id)
4482 {
4483         uint32_t mask;
4484         struct ixgbe_hw *hw =
4485                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4486         struct ixgbe_interrupt *intr =
4487                 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
4488
4489         if (queue_id < 16) {
4490                 ixgbe_disable_intr(hw);
4491                 intr->mask &= ~(1 << queue_id);
4492                 ixgbe_enable_intr(dev);
4493         } else if (queue_id < 32) {
4494                 mask = IXGBE_READ_REG(hw, IXGBE_EIMS_EX(0));
4495                 mask &= ~(1 << queue_id);
4496                 IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(0), mask);
4497         } else if (queue_id < 64) {
4498                 mask = IXGBE_READ_REG(hw, IXGBE_EIMS_EX(1));
4499                 mask &= ~(1 << (queue_id - 32));
4500                 IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(1), mask);
4501         }
4502
4503         return 0;
4504 }
4505
4506 static void
4507 ixgbevf_set_ivar_map(struct ixgbe_hw *hw, int8_t direction,
4508                      uint8_t queue, uint8_t msix_vector)
4509 {
4510         uint32_t tmp, idx;
4511
4512         if (direction == -1) {
4513                 /* other causes */
4514                 msix_vector |= IXGBE_IVAR_ALLOC_VAL;
4515                 tmp = IXGBE_READ_REG(hw, IXGBE_VTIVAR_MISC);
4516                 tmp &= ~0xFF;
4517                 tmp |= msix_vector;
4518                 IXGBE_WRITE_REG(hw, IXGBE_VTIVAR_MISC, tmp);
4519         } else {
4520                 /* rx or tx cause */
4521                 msix_vector |= IXGBE_IVAR_ALLOC_VAL;
4522                 idx = ((16 * (queue & 1)) + (8 * direction));
4523                 tmp = IXGBE_READ_REG(hw, IXGBE_VTIVAR(queue >> 1));
4524                 tmp &= ~(0xFF << idx);
4525                 tmp |= (msix_vector << idx);
4526                 IXGBE_WRITE_REG(hw, IXGBE_VTIVAR(queue >> 1), tmp);
4527         }
4528 }
4529
4530 /**
4531  * set the IVAR registers, mapping interrupt causes to vectors
4532  * @param hw
4533  *  pointer to ixgbe_hw struct
4534  * @direction
4535  *  0 for Rx, 1 for Tx, -1 for other causes
4536  * @queue
4537  *  queue to map the corresponding interrupt to
4538  * @msix_vector
4539  *  the vector to map to the corresponding queue
4540  */
4541 static void
4542 ixgbe_set_ivar_map(struct ixgbe_hw *hw, int8_t direction,
4543                    uint8_t queue, uint8_t msix_vector)
4544 {
4545         uint32_t tmp, idx;
4546
4547         msix_vector |= IXGBE_IVAR_ALLOC_VAL;
4548         if (hw->mac.type == ixgbe_mac_82598EB) {
4549                 if (direction == -1)
4550                         direction = 0;
4551                 idx = (((direction * 64) + queue) >> 2) & 0x1F;
4552                 tmp = IXGBE_READ_REG(hw, IXGBE_IVAR(idx));
4553                 tmp &= ~(0xFF << (8 * (queue & 0x3)));
4554                 tmp |= (msix_vector << (8 * (queue & 0x3)));
4555                 IXGBE_WRITE_REG(hw, IXGBE_IVAR(idx), tmp);
4556         } else if ((hw->mac.type == ixgbe_mac_82599EB) ||
4557                         (hw->mac.type == ixgbe_mac_X540)) {
4558                 if (direction == -1) {
4559                         /* other causes */
4560                         idx = ((queue & 1) * 8);
4561                         tmp = IXGBE_READ_REG(hw, IXGBE_IVAR_MISC);
4562                         tmp &= ~(0xFF << idx);
4563                         tmp |= (msix_vector << idx);
4564                         IXGBE_WRITE_REG(hw, IXGBE_IVAR_MISC, tmp);
4565                 } else {
4566                         /* rx or tx causes */
4567                         idx = ((16 * (queue & 1)) + (8 * direction));
4568                         tmp = IXGBE_READ_REG(hw, IXGBE_IVAR(queue >> 1));
4569                         tmp &= ~(0xFF << idx);
4570                         tmp |= (msix_vector << idx);
4571                         IXGBE_WRITE_REG(hw, IXGBE_IVAR(queue >> 1), tmp);
4572                 }
4573         }
4574 }
4575
4576 static void
4577 ixgbevf_configure_msix(struct rte_eth_dev *dev)
4578 {
4579         struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle;
4580         struct ixgbe_hw *hw =
4581                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4582         uint32_t q_idx;
4583         uint32_t vector_idx = IXGBE_MISC_VEC_ID;
4584
4585         /* won't configure msix register if no mapping is done
4586          * between intr vector and event fd.
4587          */
4588         if (!rte_intr_dp_is_en(intr_handle))
4589                 return;
4590
4591         /* Configure all RX queues of VF */
4592         for (q_idx = 0; q_idx < dev->data->nb_rx_queues; q_idx++) {
4593                 /* Force all queue use vector 0,
4594                  * as IXGBE_VF_MAXMSIVECOTR = 1
4595                  */
4596                 ixgbevf_set_ivar_map(hw, 0, q_idx, vector_idx);
4597                 intr_handle->intr_vec[q_idx] = vector_idx;
4598         }
4599
4600         /* Configure VF other cause ivar */
4601         ixgbevf_set_ivar_map(hw, -1, 1, vector_idx);
4602 }
4603
4604 /**
4605  * Sets up the hardware to properly generate MSI-X interrupts
4606  * @hw
4607  *  board private structure
4608  */
4609 static void
4610 ixgbe_configure_msix(struct rte_eth_dev *dev)
4611 {
4612         struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle;
4613         struct ixgbe_hw *hw =
4614                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4615         uint32_t queue_id, base = IXGBE_MISC_VEC_ID;
4616         uint32_t vec = IXGBE_MISC_VEC_ID;
4617         uint32_t mask;
4618         uint32_t gpie;
4619
4620         /* won't configure msix register if no mapping is done
4621          * between intr vector and event fd
4622          */
4623         if (!rte_intr_dp_is_en(intr_handle))
4624                 return;
4625
4626         if (rte_intr_allow_others(intr_handle))
4627                 vec = base = IXGBE_RX_VEC_START;
4628
4629         /* setup GPIE for MSI-x mode */
4630         gpie = IXGBE_READ_REG(hw, IXGBE_GPIE);
4631         gpie |= IXGBE_GPIE_MSIX_MODE | IXGBE_GPIE_PBA_SUPPORT |
4632                 IXGBE_GPIE_OCD | IXGBE_GPIE_EIAME;
4633         /* auto clearing and auto setting corresponding bits in EIMS
4634          * when MSI-X interrupt is triggered
4635          */
4636         if (hw->mac.type == ixgbe_mac_82598EB) {
4637                 IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
4638         } else {
4639                 IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(0), 0xFFFFFFFF);
4640                 IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(1), 0xFFFFFFFF);
4641         }
4642         IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
4643
4644         /* Populate the IVAR table and set the ITR values to the
4645          * corresponding register.
4646          */
4647         for (queue_id = 0; queue_id < dev->data->nb_rx_queues;
4648              queue_id++) {
4649                 /* by default, 1:1 mapping */
4650                 ixgbe_set_ivar_map(hw, 0, queue_id, vec);
4651                 intr_handle->intr_vec[queue_id] = vec;
4652                 if (vec < base + intr_handle->nb_efd - 1)
4653                         vec++;
4654         }
4655
4656         switch (hw->mac.type) {
4657         case ixgbe_mac_82598EB:
4658                 ixgbe_set_ivar_map(hw, -1, IXGBE_IVAR_OTHER_CAUSES_INDEX,
4659                                    IXGBE_MISC_VEC_ID);
4660                 break;
4661         case ixgbe_mac_82599EB:
4662         case ixgbe_mac_X540:
4663                 ixgbe_set_ivar_map(hw, -1, 1, IXGBE_MISC_VEC_ID);
4664                 break;
4665         default:
4666                 break;
4667         }
4668         IXGBE_WRITE_REG(hw, IXGBE_EITR(IXGBE_MISC_VEC_ID),
4669                         IXGBE_MIN_INTER_INTERRUPT_INTERVAL_DEFAULT & 0xFFF);
4670
4671         /* set up to autoclear timer, and the vectors */
4672         mask = IXGBE_EIMS_ENABLE_MASK;
4673         mask &= ~(IXGBE_EIMS_OTHER |
4674                   IXGBE_EIMS_MAILBOX |
4675                   IXGBE_EIMS_LSC);
4676
4677         IXGBE_WRITE_REG(hw, IXGBE_EIAC, mask);
4678 }
4679
4680 static int ixgbe_set_queue_rate_limit(struct rte_eth_dev *dev,
4681         uint16_t queue_idx, uint16_t tx_rate)
4682 {
4683         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4684         uint32_t rf_dec, rf_int;
4685         uint32_t bcnrc_val;
4686         uint16_t link_speed = dev->data->dev_link.link_speed;
4687
4688         if (queue_idx >= hw->mac.max_tx_queues)
4689                 return -EINVAL;
4690
4691         if (tx_rate != 0) {
4692                 /* Calculate the rate factor values to set */
4693                 rf_int = (uint32_t)link_speed / (uint32_t)tx_rate;
4694                 rf_dec = (uint32_t)link_speed % (uint32_t)tx_rate;
4695                 rf_dec = (rf_dec << IXGBE_RTTBCNRC_RF_INT_SHIFT) / tx_rate;
4696
4697                 bcnrc_val = IXGBE_RTTBCNRC_RS_ENA;
4698                 bcnrc_val |= ((rf_int << IXGBE_RTTBCNRC_RF_INT_SHIFT) &
4699                                 IXGBE_RTTBCNRC_RF_INT_MASK_M);
4700                 bcnrc_val |= (rf_dec & IXGBE_RTTBCNRC_RF_DEC_MASK);
4701         } else {
4702                 bcnrc_val = 0;
4703         }
4704
4705         /*
4706          * Set global transmit compensation time to the MMW_SIZE in RTTBCNRM
4707          * register. MMW_SIZE=0x014 if 9728-byte jumbo is supported, otherwise
4708          * set as 0x4.
4709          */
4710         if ((dev->data->dev_conf.rxmode.jumbo_frame == 1) &&
4711                 (dev->data->dev_conf.rxmode.max_rx_pkt_len >=
4712                                 IXGBE_MAX_JUMBO_FRAME_SIZE))
4713                 IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRM,
4714                         IXGBE_MMW_SIZE_JUMBO_FRAME);
4715         else
4716                 IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRM,
4717                         IXGBE_MMW_SIZE_DEFAULT);
4718
4719         /* Set RTTBCNRC of queue X */
4720         IXGBE_WRITE_REG(hw, IXGBE_RTTDQSEL, queue_idx);
4721         IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRC, bcnrc_val);
4722         IXGBE_WRITE_FLUSH(hw);
4723
4724         return 0;
4725 }
4726
4727 static int ixgbe_set_vf_rate_limit(struct rte_eth_dev *dev, uint16_t vf,
4728         uint16_t tx_rate, uint64_t q_msk)
4729 {
4730         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4731         struct ixgbe_vf_info *vfinfo =
4732                 *(IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private));
4733         uint8_t  nb_q_per_pool = RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool;
4734         uint32_t queue_stride =
4735                 IXGBE_MAX_RX_QUEUE_NUM / RTE_ETH_DEV_SRIOV(dev).active;
4736         uint32_t queue_idx = vf * queue_stride, idx = 0, vf_idx;
4737         uint32_t queue_end = queue_idx + nb_q_per_pool - 1;
4738         uint16_t total_rate = 0;
4739
4740         if (queue_end >= hw->mac.max_tx_queues)
4741                 return -EINVAL;
4742
4743         if (vfinfo != NULL) {
4744                 for (vf_idx = 0; vf_idx < dev->pci_dev->max_vfs; vf_idx++) {
4745                         if (vf_idx == vf)
4746                                 continue;
4747                         for (idx = 0; idx < RTE_DIM(vfinfo[vf_idx].tx_rate);
4748                                 idx++)
4749                                 total_rate += vfinfo[vf_idx].tx_rate[idx];
4750                 }
4751         } else
4752                 return -EINVAL;
4753
4754         /* Store tx_rate for this vf. */
4755         for (idx = 0; idx < nb_q_per_pool; idx++) {
4756                 if (((uint64_t)0x1 << idx) & q_msk) {
4757                         if (vfinfo[vf].tx_rate[idx] != tx_rate)
4758                                 vfinfo[vf].tx_rate[idx] = tx_rate;
4759                         total_rate += tx_rate;
4760                 }
4761         }
4762
4763         if (total_rate > dev->data->dev_link.link_speed) {
4764                 /*
4765                  * Reset stored TX rate of the VF if it causes exceed
4766                  * link speed.
4767                  */
4768                 memset(vfinfo[vf].tx_rate, 0, sizeof(vfinfo[vf].tx_rate));
4769                 return -EINVAL;
4770         }
4771
4772         /* Set RTTBCNRC of each queue/pool for vf X  */
4773         for (; queue_idx <= queue_end; queue_idx++) {
4774                 if (0x1 & q_msk)
4775                         ixgbe_set_queue_rate_limit(dev, queue_idx, tx_rate);
4776                 q_msk = q_msk >> 1;
4777         }
4778
4779         return 0;
4780 }
4781
4782 static void
4783 ixgbevf_add_mac_addr(struct rte_eth_dev *dev, struct ether_addr *mac_addr,
4784                      __attribute__((unused)) uint32_t index,
4785                      __attribute__((unused)) uint32_t pool)
4786 {
4787         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4788         int diag;
4789
4790         /*
4791          * On a 82599 VF, adding again the same MAC addr is not an idempotent
4792          * operation. Trap this case to avoid exhausting the [very limited]
4793          * set of PF resources used to store VF MAC addresses.
4794          */
4795         if (memcmp(hw->mac.perm_addr, mac_addr, sizeof(struct ether_addr)) == 0)
4796                 return;
4797         diag = ixgbevf_set_uc_addr_vf(hw, 2, mac_addr->addr_bytes);
4798         if (diag == 0)
4799                 return;
4800         PMD_DRV_LOG(ERR, "Unable to add MAC address - diag=%d", diag);
4801 }
4802
4803 static void
4804 ixgbevf_remove_mac_addr(struct rte_eth_dev *dev, uint32_t index)
4805 {
4806         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4807         struct ether_addr *perm_addr = (struct ether_addr *) hw->mac.perm_addr;
4808         struct ether_addr *mac_addr;
4809         uint32_t i;
4810         int diag;
4811
4812         /*
4813          * The IXGBE_VF_SET_MACVLAN command of the ixgbe-pf driver does
4814          * not support the deletion of a given MAC address.
4815          * Instead, it imposes to delete all MAC addresses, then to add again
4816          * all MAC addresses with the exception of the one to be deleted.
4817          */
4818         (void) ixgbevf_set_uc_addr_vf(hw, 0, NULL);
4819
4820         /*
4821          * Add again all MAC addresses, with the exception of the deleted one
4822          * and of the permanent MAC address.
4823          */
4824         for (i = 0, mac_addr = dev->data->mac_addrs;
4825              i < hw->mac.num_rar_entries; i++, mac_addr++) {
4826                 /* Skip the deleted MAC address */
4827                 if (i == index)
4828                         continue;
4829                 /* Skip NULL MAC addresses */
4830                 if (is_zero_ether_addr(mac_addr))
4831                         continue;
4832                 /* Skip the permanent MAC address */
4833                 if (memcmp(perm_addr, mac_addr, sizeof(struct ether_addr)) == 0)
4834                         continue;
4835                 diag = ixgbevf_set_uc_addr_vf(hw, 2, mac_addr->addr_bytes);
4836                 if (diag != 0)
4837                         PMD_DRV_LOG(ERR,
4838                                     "Adding again MAC address "
4839                                     "%02x:%02x:%02x:%02x:%02x:%02x failed "
4840                                     "diag=%d",
4841                                     mac_addr->addr_bytes[0],
4842                                     mac_addr->addr_bytes[1],
4843                                     mac_addr->addr_bytes[2],
4844                                     mac_addr->addr_bytes[3],
4845                                     mac_addr->addr_bytes[4],
4846                                     mac_addr->addr_bytes[5],
4847                                     diag);
4848         }
4849 }
4850
4851 static void
4852 ixgbevf_set_default_mac_addr(struct rte_eth_dev *dev, struct ether_addr *addr)
4853 {
4854         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4855
4856         hw->mac.ops.set_rar(hw, 0, (void *)addr, 0, 0);
4857 }
4858
4859 #define MAC_TYPE_FILTER_SUP(type)    do {\
4860         if ((type) != ixgbe_mac_82599EB && (type) != ixgbe_mac_X540 &&\
4861                 (type) != ixgbe_mac_X550)\
4862                 return -ENOTSUP;\
4863 } while (0)
4864
4865 static int
4866 ixgbe_syn_filter_set(struct rte_eth_dev *dev,
4867                         struct rte_eth_syn_filter *filter,
4868                         bool add)
4869 {
4870         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4871         uint32_t synqf;
4872
4873         if (filter->queue >= IXGBE_MAX_RX_QUEUE_NUM)
4874                 return -EINVAL;
4875
4876         synqf = IXGBE_READ_REG(hw, IXGBE_SYNQF);
4877
4878         if (add) {
4879                 if (synqf & IXGBE_SYN_FILTER_ENABLE)
4880                         return -EINVAL;
4881                 synqf = (uint32_t)(((filter->queue << IXGBE_SYN_FILTER_QUEUE_SHIFT) &
4882                         IXGBE_SYN_FILTER_QUEUE) | IXGBE_SYN_FILTER_ENABLE);
4883
4884                 if (filter->hig_pri)
4885                         synqf |= IXGBE_SYN_FILTER_SYNQFP;
4886                 else
4887                         synqf &= ~IXGBE_SYN_FILTER_SYNQFP;
4888         } else {
4889                 if (!(synqf & IXGBE_SYN_FILTER_ENABLE))
4890                         return -ENOENT;
4891                 synqf &= ~(IXGBE_SYN_FILTER_QUEUE | IXGBE_SYN_FILTER_ENABLE);
4892         }
4893         IXGBE_WRITE_REG(hw, IXGBE_SYNQF, synqf);
4894         IXGBE_WRITE_FLUSH(hw);
4895         return 0;
4896 }
4897
4898 static int
4899 ixgbe_syn_filter_get(struct rte_eth_dev *dev,
4900                         struct rte_eth_syn_filter *filter)
4901 {
4902         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4903         uint32_t synqf = IXGBE_READ_REG(hw, IXGBE_SYNQF);
4904
4905         if (synqf & IXGBE_SYN_FILTER_ENABLE) {
4906                 filter->hig_pri = (synqf & IXGBE_SYN_FILTER_SYNQFP) ? 1 : 0;
4907                 filter->queue = (uint16_t)((synqf & IXGBE_SYN_FILTER_QUEUE) >> 1);
4908                 return 0;
4909         }
4910         return -ENOENT;
4911 }
4912
4913 static int
4914 ixgbe_syn_filter_handle(struct rte_eth_dev *dev,
4915                         enum rte_filter_op filter_op,
4916                         void *arg)
4917 {
4918         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4919         int ret;
4920
4921         MAC_TYPE_FILTER_SUP(hw->mac.type);
4922
4923         if (filter_op == RTE_ETH_FILTER_NOP)
4924                 return 0;
4925
4926         if (arg == NULL) {
4927                 PMD_DRV_LOG(ERR, "arg shouldn't be NULL for operation %u",
4928                             filter_op);
4929                 return -EINVAL;
4930         }
4931
4932         switch (filter_op) {
4933         case RTE_ETH_FILTER_ADD:
4934                 ret = ixgbe_syn_filter_set(dev,
4935                                 (struct rte_eth_syn_filter *)arg,
4936                                 TRUE);
4937                 break;
4938         case RTE_ETH_FILTER_DELETE:
4939                 ret = ixgbe_syn_filter_set(dev,
4940                                 (struct rte_eth_syn_filter *)arg,
4941                                 FALSE);
4942                 break;
4943         case RTE_ETH_FILTER_GET:
4944                 ret = ixgbe_syn_filter_get(dev,
4945                                 (struct rte_eth_syn_filter *)arg);
4946                 break;
4947         default:
4948                 PMD_DRV_LOG(ERR, "unsupported operation %u\n", filter_op);
4949                 ret = -EINVAL;
4950                 break;
4951         }
4952
4953         return ret;
4954 }
4955
4956
4957 static inline enum ixgbe_5tuple_protocol
4958 convert_protocol_type(uint8_t protocol_value)
4959 {
4960         if (protocol_value == IPPROTO_TCP)
4961                 return IXGBE_FILTER_PROTOCOL_TCP;
4962         else if (protocol_value == IPPROTO_UDP)
4963                 return IXGBE_FILTER_PROTOCOL_UDP;
4964         else if (protocol_value == IPPROTO_SCTP)
4965                 return IXGBE_FILTER_PROTOCOL_SCTP;
4966         else
4967                 return IXGBE_FILTER_PROTOCOL_NONE;
4968 }
4969
4970 /*
4971  * add a 5tuple filter
4972  *
4973  * @param
4974  * dev: Pointer to struct rte_eth_dev.
4975  * index: the index the filter allocates.
4976  * filter: ponter to the filter that will be added.
4977  * rx_queue: the queue id the filter assigned to.
4978  *
4979  * @return
4980  *    - On success, zero.
4981  *    - On failure, a negative value.
4982  */
4983 static int
4984 ixgbe_add_5tuple_filter(struct rte_eth_dev *dev,
4985                         struct ixgbe_5tuple_filter *filter)
4986 {
4987         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4988         struct ixgbe_filter_info *filter_info =
4989                 IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
4990         int i, idx, shift;
4991         uint32_t ftqf, sdpqf;
4992         uint32_t l34timir = 0;
4993         uint8_t mask = 0xff;
4994
4995         /*
4996          * look for an unused 5tuple filter index,
4997          * and insert the filter to list.
4998          */
4999         for (i = 0; i < IXGBE_MAX_FTQF_FILTERS; i++) {
5000                 idx = i / (sizeof(uint32_t) * NBBY);
5001                 shift = i % (sizeof(uint32_t) * NBBY);
5002                 if (!(filter_info->fivetuple_mask[idx] & (1 << shift))) {
5003                         filter_info->fivetuple_mask[idx] |= 1 << shift;
5004                         filter->index = i;
5005                         TAILQ_INSERT_TAIL(&filter_info->fivetuple_list,
5006                                           filter,
5007                                           entries);
5008                         break;
5009                 }
5010         }
5011         if (i >= IXGBE_MAX_FTQF_FILTERS) {
5012                 PMD_DRV_LOG(ERR, "5tuple filters are full.");
5013                 return -ENOSYS;
5014         }
5015
5016         sdpqf = (uint32_t)(filter->filter_info.dst_port <<
5017                                 IXGBE_SDPQF_DSTPORT_SHIFT);
5018         sdpqf = sdpqf | (filter->filter_info.src_port & IXGBE_SDPQF_SRCPORT);
5019
5020         ftqf = (uint32_t)(filter->filter_info.proto &
5021                 IXGBE_FTQF_PROTOCOL_MASK);
5022         ftqf |= (uint32_t)((filter->filter_info.priority &
5023                 IXGBE_FTQF_PRIORITY_MASK) << IXGBE_FTQF_PRIORITY_SHIFT);
5024         if (filter->filter_info.src_ip_mask == 0) /* 0 means compare. */
5025                 mask &= IXGBE_FTQF_SOURCE_ADDR_MASK;
5026         if (filter->filter_info.dst_ip_mask == 0)
5027                 mask &= IXGBE_FTQF_DEST_ADDR_MASK;
5028         if (filter->filter_info.src_port_mask == 0)
5029                 mask &= IXGBE_FTQF_SOURCE_PORT_MASK;
5030         if (filter->filter_info.dst_port_mask == 0)
5031                 mask &= IXGBE_FTQF_DEST_PORT_MASK;
5032         if (filter->filter_info.proto_mask == 0)
5033                 mask &= IXGBE_FTQF_PROTOCOL_COMP_MASK;
5034         ftqf |= mask << IXGBE_FTQF_5TUPLE_MASK_SHIFT;
5035         ftqf |= IXGBE_FTQF_POOL_MASK_EN;
5036         ftqf |= IXGBE_FTQF_QUEUE_ENABLE;
5037
5038         IXGBE_WRITE_REG(hw, IXGBE_DAQF(i), filter->filter_info.dst_ip);
5039         IXGBE_WRITE_REG(hw, IXGBE_SAQF(i), filter->filter_info.src_ip);
5040         IXGBE_WRITE_REG(hw, IXGBE_SDPQF(i), sdpqf);
5041         IXGBE_WRITE_REG(hw, IXGBE_FTQF(i), ftqf);
5042
5043         l34timir |= IXGBE_L34T_IMIR_RESERVE;
5044         l34timir |= (uint32_t)(filter->queue <<
5045                                 IXGBE_L34T_IMIR_QUEUE_SHIFT);
5046         IXGBE_WRITE_REG(hw, IXGBE_L34T_IMIR(i), l34timir);
5047         return 0;
5048 }
5049
5050 /*
5051  * remove a 5tuple filter
5052  *
5053  * @param
5054  * dev: Pointer to struct rte_eth_dev.
5055  * filter: the pointer of the filter will be removed.
5056  */
5057 static void
5058 ixgbe_remove_5tuple_filter(struct rte_eth_dev *dev,
5059                         struct ixgbe_5tuple_filter *filter)
5060 {
5061         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5062         struct ixgbe_filter_info *filter_info =
5063                 IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
5064         uint16_t index = filter->index;
5065
5066         filter_info->fivetuple_mask[index / (sizeof(uint32_t) * NBBY)] &=
5067                                 ~(1 << (index % (sizeof(uint32_t) * NBBY)));
5068         TAILQ_REMOVE(&filter_info->fivetuple_list, filter, entries);
5069         rte_free(filter);
5070
5071         IXGBE_WRITE_REG(hw, IXGBE_DAQF(index), 0);
5072         IXGBE_WRITE_REG(hw, IXGBE_SAQF(index), 0);
5073         IXGBE_WRITE_REG(hw, IXGBE_SDPQF(index), 0);
5074         IXGBE_WRITE_REG(hw, IXGBE_FTQF(index), 0);
5075         IXGBE_WRITE_REG(hw, IXGBE_L34T_IMIR(index), 0);
5076 }
5077
5078 static int
5079 ixgbevf_dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu)
5080 {
5081         struct ixgbe_hw *hw;
5082         uint32_t max_frame = mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
5083
5084         hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5085
5086         if ((mtu < ETHER_MIN_MTU) || (max_frame > ETHER_MAX_JUMBO_FRAME_LEN))
5087                 return -EINVAL;
5088
5089         /* refuse mtu that requires the support of scattered packets when this
5090          * feature has not been enabled before. */
5091         if (!dev->data->scattered_rx &&
5092             (max_frame + 2 * IXGBE_VLAN_TAG_SIZE >
5093              dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM))
5094                 return -EINVAL;
5095
5096         /*
5097          * When supported by the underlying PF driver, use the IXGBE_VF_SET_MTU
5098          * request of the version 2.0 of the mailbox API.
5099          * For now, use the IXGBE_VF_SET_LPE request of the version 1.0
5100          * of the mailbox API.
5101          * This call to IXGBE_SET_LPE action won't work with ixgbe pf drivers
5102          * prior to 3.11.33 which contains the following change:
5103          * "ixgbe: Enable jumbo frames support w/ SR-IOV"
5104          */
5105         ixgbevf_rlpml_set_vf(hw, max_frame);
5106
5107         /* update max frame size */
5108         dev->data->dev_conf.rxmode.max_rx_pkt_len = max_frame;
5109         return 0;
5110 }
5111
5112 #define MAC_TYPE_FILTER_SUP_EXT(type)    do {\
5113         if ((type) != ixgbe_mac_82599EB && (type) != ixgbe_mac_X540)\
5114                 return -ENOTSUP;\
5115 } while (0)
5116
5117 static inline struct ixgbe_5tuple_filter *
5118 ixgbe_5tuple_filter_lookup(struct ixgbe_5tuple_filter_list *filter_list,
5119                         struct ixgbe_5tuple_filter_info *key)
5120 {
5121         struct ixgbe_5tuple_filter *it;
5122
5123         TAILQ_FOREACH(it, filter_list, entries) {
5124                 if (memcmp(key, &it->filter_info,
5125                         sizeof(struct ixgbe_5tuple_filter_info)) == 0) {
5126                         return it;
5127                 }
5128         }
5129         return NULL;
5130 }
5131
5132 /* translate elements in struct rte_eth_ntuple_filter to struct ixgbe_5tuple_filter_info*/
5133 static inline int
5134 ntuple_filter_to_5tuple(struct rte_eth_ntuple_filter *filter,
5135                         struct ixgbe_5tuple_filter_info *filter_info)
5136 {
5137         if (filter->queue >= IXGBE_MAX_RX_QUEUE_NUM ||
5138                 filter->priority > IXGBE_5TUPLE_MAX_PRI ||
5139                 filter->priority < IXGBE_5TUPLE_MIN_PRI)
5140                 return -EINVAL;
5141
5142         switch (filter->dst_ip_mask) {
5143         case UINT32_MAX:
5144                 filter_info->dst_ip_mask = 0;
5145                 filter_info->dst_ip = filter->dst_ip;
5146                 break;
5147         case 0:
5148                 filter_info->dst_ip_mask = 1;
5149                 break;
5150         default:
5151                 PMD_DRV_LOG(ERR, "invalid dst_ip mask.");
5152                 return -EINVAL;
5153         }
5154
5155         switch (filter->src_ip_mask) {
5156         case UINT32_MAX:
5157                 filter_info->src_ip_mask = 0;
5158                 filter_info->src_ip = filter->src_ip;
5159                 break;
5160         case 0:
5161                 filter_info->src_ip_mask = 1;
5162                 break;
5163         default:
5164                 PMD_DRV_LOG(ERR, "invalid src_ip mask.");
5165                 return -EINVAL;
5166         }
5167
5168         switch (filter->dst_port_mask) {
5169         case UINT16_MAX:
5170                 filter_info->dst_port_mask = 0;
5171                 filter_info->dst_port = filter->dst_port;
5172                 break;
5173         case 0:
5174                 filter_info->dst_port_mask = 1;
5175                 break;
5176         default:
5177                 PMD_DRV_LOG(ERR, "invalid dst_port mask.");
5178                 return -EINVAL;
5179         }
5180
5181         switch (filter->src_port_mask) {
5182         case UINT16_MAX:
5183                 filter_info->src_port_mask = 0;
5184                 filter_info->src_port = filter->src_port;
5185                 break;
5186         case 0:
5187                 filter_info->src_port_mask = 1;
5188                 break;
5189         default:
5190                 PMD_DRV_LOG(ERR, "invalid src_port mask.");
5191                 return -EINVAL;
5192         }
5193
5194         switch (filter->proto_mask) {
5195         case UINT8_MAX:
5196                 filter_info->proto_mask = 0;
5197                 filter_info->proto =
5198                         convert_protocol_type(filter->proto);
5199                 break;
5200         case 0:
5201                 filter_info->proto_mask = 1;
5202                 break;
5203         default:
5204                 PMD_DRV_LOG(ERR, "invalid protocol mask.");
5205                 return -EINVAL;
5206         }
5207
5208         filter_info->priority = (uint8_t)filter->priority;
5209         return 0;
5210 }
5211
5212 /*
5213  * add or delete a ntuple filter
5214  *
5215  * @param
5216  * dev: Pointer to struct rte_eth_dev.
5217  * ntuple_filter: Pointer to struct rte_eth_ntuple_filter
5218  * add: if true, add filter, if false, remove filter
5219  *
5220  * @return
5221  *    - On success, zero.
5222  *    - On failure, a negative value.
5223  */
5224 static int
5225 ixgbe_add_del_ntuple_filter(struct rte_eth_dev *dev,
5226                         struct rte_eth_ntuple_filter *ntuple_filter,
5227                         bool add)
5228 {
5229         struct ixgbe_filter_info *filter_info =
5230                 IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
5231         struct ixgbe_5tuple_filter_info filter_5tuple;
5232         struct ixgbe_5tuple_filter *filter;
5233         int ret;
5234
5235         if (ntuple_filter->flags != RTE_5TUPLE_FLAGS) {
5236                 PMD_DRV_LOG(ERR, "only 5tuple is supported.");
5237                 return -EINVAL;
5238         }
5239
5240         memset(&filter_5tuple, 0, sizeof(struct ixgbe_5tuple_filter_info));
5241         ret = ntuple_filter_to_5tuple(ntuple_filter, &filter_5tuple);
5242         if (ret < 0)
5243                 return ret;
5244
5245         filter = ixgbe_5tuple_filter_lookup(&filter_info->fivetuple_list,
5246                                          &filter_5tuple);
5247         if (filter != NULL && add) {
5248                 PMD_DRV_LOG(ERR, "filter exists.");
5249                 return -EEXIST;
5250         }
5251         if (filter == NULL && !add) {
5252                 PMD_DRV_LOG(ERR, "filter doesn't exist.");
5253                 return -ENOENT;
5254         }
5255
5256         if (add) {
5257                 filter = rte_zmalloc("ixgbe_5tuple_filter",
5258                                 sizeof(struct ixgbe_5tuple_filter), 0);
5259                 if (filter == NULL)
5260                         return -ENOMEM;
5261                 (void)rte_memcpy(&filter->filter_info,
5262                                  &filter_5tuple,
5263                                  sizeof(struct ixgbe_5tuple_filter_info));
5264                 filter->queue = ntuple_filter->queue;
5265                 ret = ixgbe_add_5tuple_filter(dev, filter);
5266                 if (ret < 0) {
5267                         rte_free(filter);
5268                         return ret;
5269                 }
5270         } else
5271                 ixgbe_remove_5tuple_filter(dev, filter);
5272
5273         return 0;
5274 }
5275
5276 /*
5277  * get a ntuple filter
5278  *
5279  * @param
5280  * dev: Pointer to struct rte_eth_dev.
5281  * ntuple_filter: Pointer to struct rte_eth_ntuple_filter
5282  *
5283  * @return
5284  *    - On success, zero.
5285  *    - On failure, a negative value.
5286  */
5287 static int
5288 ixgbe_get_ntuple_filter(struct rte_eth_dev *dev,
5289                         struct rte_eth_ntuple_filter *ntuple_filter)
5290 {
5291         struct ixgbe_filter_info *filter_info =
5292                 IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
5293         struct ixgbe_5tuple_filter_info filter_5tuple;
5294         struct ixgbe_5tuple_filter *filter;
5295         int ret;
5296
5297         if (ntuple_filter->flags != RTE_5TUPLE_FLAGS) {
5298                 PMD_DRV_LOG(ERR, "only 5tuple is supported.");
5299                 return -EINVAL;
5300         }
5301
5302         memset(&filter_5tuple, 0, sizeof(struct ixgbe_5tuple_filter_info));
5303         ret = ntuple_filter_to_5tuple(ntuple_filter, &filter_5tuple);
5304         if (ret < 0)
5305                 return ret;
5306
5307         filter = ixgbe_5tuple_filter_lookup(&filter_info->fivetuple_list,
5308                                          &filter_5tuple);
5309         if (filter == NULL) {
5310                 PMD_DRV_LOG(ERR, "filter doesn't exist.");
5311                 return -ENOENT;
5312         }
5313         ntuple_filter->queue = filter->queue;
5314         return 0;
5315 }
5316
5317 /*
5318  * ixgbe_ntuple_filter_handle - Handle operations for ntuple filter.
5319  * @dev: pointer to rte_eth_dev structure
5320  * @filter_op:operation will be taken.
5321  * @arg: a pointer to specific structure corresponding to the filter_op
5322  *
5323  * @return
5324  *    - On success, zero.
5325  *    - On failure, a negative value.
5326  */
5327 static int
5328 ixgbe_ntuple_filter_handle(struct rte_eth_dev *dev,
5329                                 enum rte_filter_op filter_op,
5330                                 void *arg)
5331 {
5332         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5333         int ret;
5334
5335         MAC_TYPE_FILTER_SUP_EXT(hw->mac.type);
5336
5337         if (filter_op == RTE_ETH_FILTER_NOP)
5338                 return 0;
5339
5340         if (arg == NULL) {
5341                 PMD_DRV_LOG(ERR, "arg shouldn't be NULL for operation %u.",
5342                             filter_op);
5343                 return -EINVAL;
5344         }
5345
5346         switch (filter_op) {
5347         case RTE_ETH_FILTER_ADD:
5348                 ret = ixgbe_add_del_ntuple_filter(dev,
5349                         (struct rte_eth_ntuple_filter *)arg,
5350                         TRUE);
5351                 break;
5352         case RTE_ETH_FILTER_DELETE:
5353                 ret = ixgbe_add_del_ntuple_filter(dev,
5354                         (struct rte_eth_ntuple_filter *)arg,
5355                         FALSE);
5356                 break;
5357         case RTE_ETH_FILTER_GET:
5358                 ret = ixgbe_get_ntuple_filter(dev,
5359                         (struct rte_eth_ntuple_filter *)arg);
5360                 break;
5361         default:
5362                 PMD_DRV_LOG(ERR, "unsupported operation %u.", filter_op);
5363                 ret = -EINVAL;
5364                 break;
5365         }
5366         return ret;
5367 }
5368
5369 static inline int
5370 ixgbe_ethertype_filter_lookup(struct ixgbe_filter_info *filter_info,
5371                         uint16_t ethertype)
5372 {
5373         int i;
5374
5375         for (i = 0; i < IXGBE_MAX_ETQF_FILTERS; i++) {
5376                 if (filter_info->ethertype_filters[i] == ethertype &&
5377                     (filter_info->ethertype_mask & (1 << i)))
5378                         return i;
5379         }
5380         return -1;
5381 }
5382
5383 static inline int
5384 ixgbe_ethertype_filter_insert(struct ixgbe_filter_info *filter_info,
5385                         uint16_t ethertype)
5386 {
5387         int i;
5388
5389         for (i = 0; i < IXGBE_MAX_ETQF_FILTERS; i++) {
5390                 if (!(filter_info->ethertype_mask & (1 << i))) {
5391                         filter_info->ethertype_mask |= 1 << i;
5392                         filter_info->ethertype_filters[i] = ethertype;
5393                         return i;
5394                 }
5395         }
5396         return -1;
5397 }
5398
5399 static inline int
5400 ixgbe_ethertype_filter_remove(struct ixgbe_filter_info *filter_info,
5401                         uint8_t idx)
5402 {
5403         if (idx >= IXGBE_MAX_ETQF_FILTERS)
5404                 return -1;
5405         filter_info->ethertype_mask &= ~(1 << idx);
5406         filter_info->ethertype_filters[idx] = 0;
5407         return idx;
5408 }
5409
5410 static int
5411 ixgbe_add_del_ethertype_filter(struct rte_eth_dev *dev,
5412                         struct rte_eth_ethertype_filter *filter,
5413                         bool add)
5414 {
5415         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5416         struct ixgbe_filter_info *filter_info =
5417                 IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
5418         uint32_t etqf = 0;
5419         uint32_t etqs = 0;
5420         int ret;
5421
5422         if (filter->queue >= IXGBE_MAX_RX_QUEUE_NUM)
5423                 return -EINVAL;
5424
5425         if (filter->ether_type == ETHER_TYPE_IPv4 ||
5426                 filter->ether_type == ETHER_TYPE_IPv6) {
5427                 PMD_DRV_LOG(ERR, "unsupported ether_type(0x%04x) in"
5428                         " ethertype filter.", filter->ether_type);
5429                 return -EINVAL;
5430         }
5431
5432         if (filter->flags & RTE_ETHTYPE_FLAGS_MAC) {
5433                 PMD_DRV_LOG(ERR, "mac compare is unsupported.");
5434                 return -EINVAL;
5435         }
5436         if (filter->flags & RTE_ETHTYPE_FLAGS_DROP) {
5437                 PMD_DRV_LOG(ERR, "drop option is unsupported.");
5438                 return -EINVAL;
5439         }
5440
5441         ret = ixgbe_ethertype_filter_lookup(filter_info, filter->ether_type);
5442         if (ret >= 0 && add) {
5443                 PMD_DRV_LOG(ERR, "ethertype (0x%04x) filter exists.",
5444                             filter->ether_type);
5445                 return -EEXIST;
5446         }
5447         if (ret < 0 && !add) {
5448                 PMD_DRV_LOG(ERR, "ethertype (0x%04x) filter doesn't exist.",
5449                             filter->ether_type);
5450                 return -ENOENT;
5451         }
5452
5453         if (add) {
5454                 ret = ixgbe_ethertype_filter_insert(filter_info,
5455                         filter->ether_type);
5456                 if (ret < 0) {
5457                         PMD_DRV_LOG(ERR, "ethertype filters are full.");
5458                         return -ENOSYS;
5459                 }
5460                 etqf = IXGBE_ETQF_FILTER_EN;
5461                 etqf |= (uint32_t)filter->ether_type;
5462                 etqs |= (uint32_t)((filter->queue <<
5463                                     IXGBE_ETQS_RX_QUEUE_SHIFT) &
5464                                     IXGBE_ETQS_RX_QUEUE);
5465                 etqs |= IXGBE_ETQS_QUEUE_EN;
5466         } else {
5467                 ret = ixgbe_ethertype_filter_remove(filter_info, (uint8_t)ret);
5468                 if (ret < 0)
5469                         return -ENOSYS;
5470         }
5471         IXGBE_WRITE_REG(hw, IXGBE_ETQF(ret), etqf);
5472         IXGBE_WRITE_REG(hw, IXGBE_ETQS(ret), etqs);
5473         IXGBE_WRITE_FLUSH(hw);
5474
5475         return 0;
5476 }
5477
5478 static int
5479 ixgbe_get_ethertype_filter(struct rte_eth_dev *dev,
5480                         struct rte_eth_ethertype_filter *filter)
5481 {
5482         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5483         struct ixgbe_filter_info *filter_info =
5484                 IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
5485         uint32_t etqf, etqs;
5486         int ret;
5487
5488         ret = ixgbe_ethertype_filter_lookup(filter_info, filter->ether_type);
5489         if (ret < 0) {
5490                 PMD_DRV_LOG(ERR, "ethertype (0x%04x) filter doesn't exist.",
5491                             filter->ether_type);
5492                 return -ENOENT;
5493         }
5494
5495         etqf = IXGBE_READ_REG(hw, IXGBE_ETQF(ret));
5496         if (etqf & IXGBE_ETQF_FILTER_EN) {
5497                 etqs = IXGBE_READ_REG(hw, IXGBE_ETQS(ret));
5498                 filter->ether_type = etqf & IXGBE_ETQF_ETHERTYPE;
5499                 filter->flags = 0;
5500                 filter->queue = (etqs & IXGBE_ETQS_RX_QUEUE) >>
5501                                IXGBE_ETQS_RX_QUEUE_SHIFT;
5502                 return 0;
5503         }
5504         return -ENOENT;
5505 }
5506
5507 /*
5508  * ixgbe_ethertype_filter_handle - Handle operations for ethertype filter.
5509  * @dev: pointer to rte_eth_dev structure
5510  * @filter_op:operation will be taken.
5511  * @arg: a pointer to specific structure corresponding to the filter_op
5512  */
5513 static int
5514 ixgbe_ethertype_filter_handle(struct rte_eth_dev *dev,
5515                                 enum rte_filter_op filter_op,
5516                                 void *arg)
5517 {
5518         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5519         int ret;
5520
5521         MAC_TYPE_FILTER_SUP(hw->mac.type);
5522
5523         if (filter_op == RTE_ETH_FILTER_NOP)
5524                 return 0;
5525
5526         if (arg == NULL) {
5527                 PMD_DRV_LOG(ERR, "arg shouldn't be NULL for operation %u.",
5528                             filter_op);
5529                 return -EINVAL;
5530         }
5531
5532         switch (filter_op) {
5533         case RTE_ETH_FILTER_ADD:
5534                 ret = ixgbe_add_del_ethertype_filter(dev,
5535                         (struct rte_eth_ethertype_filter *)arg,
5536                         TRUE);
5537                 break;
5538         case RTE_ETH_FILTER_DELETE:
5539                 ret = ixgbe_add_del_ethertype_filter(dev,
5540                         (struct rte_eth_ethertype_filter *)arg,
5541                         FALSE);
5542                 break;
5543         case RTE_ETH_FILTER_GET:
5544                 ret = ixgbe_get_ethertype_filter(dev,
5545                         (struct rte_eth_ethertype_filter *)arg);
5546                 break;
5547         default:
5548                 PMD_DRV_LOG(ERR, "unsupported operation %u.", filter_op);
5549                 ret = -EINVAL;
5550                 break;
5551         }
5552         return ret;
5553 }
5554
5555 static int
5556 ixgbe_dev_filter_ctrl(struct rte_eth_dev *dev,
5557                      enum rte_filter_type filter_type,
5558                      enum rte_filter_op filter_op,
5559                      void *arg)
5560 {
5561         int ret = -EINVAL;
5562
5563         switch (filter_type) {
5564         case RTE_ETH_FILTER_NTUPLE:
5565                 ret = ixgbe_ntuple_filter_handle(dev, filter_op, arg);
5566                 break;
5567         case RTE_ETH_FILTER_ETHERTYPE:
5568                 ret = ixgbe_ethertype_filter_handle(dev, filter_op, arg);
5569                 break;
5570         case RTE_ETH_FILTER_SYN:
5571                 ret = ixgbe_syn_filter_handle(dev, filter_op, arg);
5572                 break;
5573         case RTE_ETH_FILTER_FDIR:
5574                 ret = ixgbe_fdir_ctrl_func(dev, filter_op, arg);
5575                 break;
5576         default:
5577                 PMD_DRV_LOG(WARNING, "Filter type (%d) not supported",
5578                                                         filter_type);
5579                 break;
5580         }
5581
5582         return ret;
5583 }
5584
5585 static u8 *
5586 ixgbe_dev_addr_list_itr(__attribute__((unused)) struct ixgbe_hw *hw,
5587                         u8 **mc_addr_ptr, u32 *vmdq)
5588 {
5589         u8 *mc_addr;
5590
5591         *vmdq = 0;
5592         mc_addr = *mc_addr_ptr;
5593         *mc_addr_ptr = (mc_addr + sizeof(struct ether_addr));
5594         return mc_addr;
5595 }
5596
5597 static int
5598 ixgbe_dev_set_mc_addr_list(struct rte_eth_dev *dev,
5599                           struct ether_addr *mc_addr_set,
5600                           uint32_t nb_mc_addr)
5601 {
5602         struct ixgbe_hw *hw;
5603         u8 *mc_addr_list;
5604
5605         hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5606         mc_addr_list = (u8 *)mc_addr_set;
5607         return ixgbe_update_mc_addr_list(hw, mc_addr_list, nb_mc_addr,
5608                                          ixgbe_dev_addr_list_itr, TRUE);
5609 }
5610
5611 static int
5612 ixgbe_timesync_enable(struct rte_eth_dev *dev)
5613 {
5614         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5615         uint32_t tsync_ctl;
5616         uint32_t tsauxc;
5617
5618         /* Enable system time for platforms where it isn't on by default. */
5619         tsauxc = IXGBE_READ_REG(hw, IXGBE_TSAUXC);
5620         tsauxc &= ~IXGBE_TSAUXC_DISABLE_SYSTIME;
5621         IXGBE_WRITE_REG(hw, IXGBE_TSAUXC, tsauxc);
5622
5623         /* Start incrementing the register used to timestamp PTP packets. */
5624         IXGBE_WRITE_REG(hw, IXGBE_TIMINCA, IXGBE_TIMINCA_INIT);
5625
5626         /* Enable L2 filtering of IEEE1588/802.1AS Ethernet frame types. */
5627         IXGBE_WRITE_REG(hw, IXGBE_ETQF(IXGBE_ETQF_FILTER_1588),
5628                         (ETHER_TYPE_1588 |
5629                          IXGBE_ETQF_FILTER_EN |
5630                          IXGBE_ETQF_1588));
5631
5632         /* Enable timestamping of received PTP packets. */
5633         tsync_ctl = IXGBE_READ_REG(hw, IXGBE_TSYNCRXCTL);
5634         tsync_ctl |= IXGBE_TSYNCRXCTL_ENABLED;
5635         IXGBE_WRITE_REG(hw, IXGBE_TSYNCRXCTL, tsync_ctl);
5636
5637         /* Enable timestamping of transmitted PTP packets. */
5638         tsync_ctl = IXGBE_READ_REG(hw, IXGBE_TSYNCTXCTL);
5639         tsync_ctl |= IXGBE_TSYNCTXCTL_ENABLED;
5640         IXGBE_WRITE_REG(hw, IXGBE_TSYNCTXCTL, tsync_ctl);
5641
5642         return 0;
5643 }
5644
5645 static int
5646 ixgbe_timesync_disable(struct rte_eth_dev *dev)
5647 {
5648         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5649         uint32_t tsync_ctl;
5650
5651         /* Disable timestamping of transmitted PTP packets. */
5652         tsync_ctl = IXGBE_READ_REG(hw, IXGBE_TSYNCTXCTL);
5653         tsync_ctl &= ~IXGBE_TSYNCTXCTL_ENABLED;
5654         IXGBE_WRITE_REG(hw, IXGBE_TSYNCTXCTL, tsync_ctl);
5655
5656         /* Disable timestamping of received PTP packets. */
5657         tsync_ctl = IXGBE_READ_REG(hw, IXGBE_TSYNCRXCTL);
5658         tsync_ctl &= ~IXGBE_TSYNCRXCTL_ENABLED;
5659         IXGBE_WRITE_REG(hw, IXGBE_TSYNCRXCTL, tsync_ctl);
5660
5661         /* Disable L2 filtering of IEEE1588/802.1AS Ethernet frame types. */
5662         IXGBE_WRITE_REG(hw, IXGBE_ETQF(IXGBE_ETQF_FILTER_1588), 0);
5663
5664         /* Stop incrementating the System Time registers. */
5665         IXGBE_WRITE_REG(hw, IXGBE_TIMINCA, 0);
5666
5667         return 0;
5668 }
5669
5670 static int
5671 ixgbe_timesync_read_rx_timestamp(struct rte_eth_dev *dev,
5672                                  struct timespec *timestamp,
5673                                  uint32_t flags __rte_unused)
5674 {
5675         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5676         uint32_t tsync_rxctl;
5677         uint32_t rx_stmpl;
5678         uint32_t rx_stmph;
5679
5680         tsync_rxctl = IXGBE_READ_REG(hw, IXGBE_TSYNCRXCTL);
5681         if ((tsync_rxctl & IXGBE_TSYNCRXCTL_VALID) == 0)
5682                 return -EINVAL;
5683
5684         rx_stmpl = IXGBE_READ_REG(hw, IXGBE_RXSTMPL);
5685         rx_stmph = IXGBE_READ_REG(hw, IXGBE_RXSTMPH);
5686
5687         timestamp->tv_sec = (uint64_t)(((uint64_t)rx_stmph << 32) | rx_stmpl);
5688         timestamp->tv_nsec = 0;
5689
5690         return  0;
5691 }
5692
5693 static int
5694 ixgbe_timesync_read_tx_timestamp(struct rte_eth_dev *dev,
5695                                  struct timespec *timestamp)
5696 {
5697         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5698         uint32_t tsync_txctl;
5699         uint32_t tx_stmpl;
5700         uint32_t tx_stmph;
5701
5702         tsync_txctl = IXGBE_READ_REG(hw, IXGBE_TSYNCTXCTL);
5703         if ((tsync_txctl & IXGBE_TSYNCTXCTL_VALID) == 0)
5704                 return -EINVAL;
5705
5706         tx_stmpl = IXGBE_READ_REG(hw, IXGBE_TXSTMPL);
5707         tx_stmph = IXGBE_READ_REG(hw, IXGBE_TXSTMPH);
5708
5709         timestamp->tv_sec = (uint64_t)(((uint64_t)tx_stmph << 32) | tx_stmpl);
5710         timestamp->tv_nsec = 0;
5711
5712         return  0;
5713 }
5714
5715 static int
5716 ixgbe_get_reg_length(struct rte_eth_dev *dev)
5717 {
5718         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5719         int count = 0;
5720         int g_ind = 0;
5721         const struct reg_info *reg_group;
5722         const struct reg_info **reg_set = (hw->mac.type == ixgbe_mac_82598EB) ?
5723                                     ixgbe_regs_mac_82598EB : ixgbe_regs_others;
5724
5725         while ((reg_group = reg_set[g_ind++]))
5726                 count += ixgbe_regs_group_count(reg_group);
5727
5728         return count;
5729 }
5730
5731 static int
5732 ixgbevf_get_reg_length(struct rte_eth_dev *dev __rte_unused)
5733 {
5734         int count = 0;
5735         int g_ind = 0;
5736         const struct reg_info *reg_group;
5737
5738         while ((reg_group = ixgbevf_regs[g_ind++]))
5739                 count += ixgbe_regs_group_count(reg_group);
5740
5741         return count;
5742 }
5743
5744 static int
5745 ixgbe_get_regs(struct rte_eth_dev *dev,
5746               struct rte_dev_reg_info *regs)
5747 {
5748         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5749         uint32_t *data = regs->data;
5750         int g_ind = 0;
5751         int count = 0;
5752         const struct reg_info *reg_group;
5753         const struct reg_info **reg_set = (hw->mac.type == ixgbe_mac_82598EB) ?
5754                                     ixgbe_regs_mac_82598EB : ixgbe_regs_others;
5755
5756         /* Support only full register dump */
5757         if ((regs->length == 0) ||
5758             (regs->length == (uint32_t)ixgbe_get_reg_length(dev))) {
5759                 regs->version = hw->mac.type << 24 | hw->revision_id << 16 |
5760                         hw->device_id;
5761                 while ((reg_group = reg_set[g_ind++]))
5762                         count += ixgbe_read_regs_group(dev, &data[count],
5763                                 reg_group);
5764                 return 0;
5765         }
5766
5767         return -ENOTSUP;
5768 }
5769
5770 static int
5771 ixgbevf_get_regs(struct rte_eth_dev *dev,
5772                 struct rte_dev_reg_info *regs)
5773 {
5774         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5775         uint32_t *data = regs->data;
5776         int g_ind = 0;
5777         int count = 0;
5778         const struct reg_info *reg_group;
5779
5780         /* Support only full register dump */
5781         if ((regs->length == 0) ||
5782             (regs->length == (uint32_t)ixgbevf_get_reg_length(dev))) {
5783                 regs->version = hw->mac.type << 24 | hw->revision_id << 16 |
5784                         hw->device_id;
5785                 while ((reg_group = ixgbevf_regs[g_ind++]))
5786                         count += ixgbe_read_regs_group(dev, &data[count],
5787                                                       reg_group);
5788                 return 0;
5789         }
5790
5791         return -ENOTSUP;
5792 }
5793
5794 static int
5795 ixgbe_get_eeprom_length(struct rte_eth_dev *dev)
5796 {
5797         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5798
5799         /* Return unit is byte count */
5800         return hw->eeprom.word_size * 2;
5801 }
5802
5803 static int
5804 ixgbe_get_eeprom(struct rte_eth_dev *dev,
5805                 struct rte_dev_eeprom_info *in_eeprom)
5806 {
5807         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5808         struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
5809         uint16_t *data = in_eeprom->data;
5810         int first, length;
5811
5812         first = in_eeprom->offset >> 1;
5813         length = in_eeprom->length >> 1;
5814         if ((first > hw->eeprom.word_size) ||
5815             ((first + length) > hw->eeprom.word_size))
5816                 return -EINVAL;
5817
5818         in_eeprom->magic = hw->vendor_id | (hw->device_id << 16);
5819
5820         return eeprom->ops.read_buffer(hw, first, length, data);
5821 }
5822
5823 static int
5824 ixgbe_set_eeprom(struct rte_eth_dev *dev,
5825                 struct rte_dev_eeprom_info *in_eeprom)
5826 {
5827         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5828         struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
5829         uint16_t *data = in_eeprom->data;
5830         int first, length;
5831
5832         first = in_eeprom->offset >> 1;
5833         length = in_eeprom->length >> 1;
5834         if ((first > hw->eeprom.word_size) ||
5835             ((first + length) > hw->eeprom.word_size))
5836                 return -EINVAL;
5837
5838         in_eeprom->magic = hw->vendor_id | (hw->device_id << 16);
5839
5840         return eeprom->ops.write_buffer(hw,  first, length, data);
5841 }
5842
5843 uint16_t
5844 ixgbe_reta_size_get(enum ixgbe_mac_type mac_type) {
5845         switch (mac_type) {
5846         case ixgbe_mac_X550:
5847         case ixgbe_mac_X550EM_x:
5848                 return ETH_RSS_RETA_SIZE_512;
5849         case ixgbe_mac_X550_vf:
5850         case ixgbe_mac_X550EM_x_vf:
5851                 return ETH_RSS_RETA_SIZE_64;
5852         default:
5853                 return ETH_RSS_RETA_SIZE_128;
5854         }
5855 }
5856
5857 uint32_t
5858 ixgbe_reta_reg_get(enum ixgbe_mac_type mac_type, uint16_t reta_idx) {
5859         switch (mac_type) {
5860         case ixgbe_mac_X550:
5861         case ixgbe_mac_X550EM_x:
5862                 if (reta_idx < ETH_RSS_RETA_SIZE_128)
5863                         return IXGBE_RETA(reta_idx >> 2);
5864                 else
5865                         return IXGBE_ERETA((reta_idx - ETH_RSS_RETA_SIZE_128) >> 2);
5866         case ixgbe_mac_X550_vf:
5867         case ixgbe_mac_X550EM_x_vf:
5868                 return IXGBE_VFRETA(reta_idx >> 2);
5869         default:
5870                 return IXGBE_RETA(reta_idx >> 2);
5871         }
5872 }
5873
5874 uint32_t
5875 ixgbe_mrqc_reg_get(enum ixgbe_mac_type mac_type) {
5876         switch (mac_type) {
5877         case ixgbe_mac_X550_vf:
5878         case ixgbe_mac_X550EM_x_vf:
5879                 return IXGBE_VFMRQC;
5880         default:
5881                 return IXGBE_MRQC;
5882         }
5883 }
5884
5885 uint32_t
5886 ixgbe_rssrk_reg_get(enum ixgbe_mac_type mac_type, uint8_t i) {
5887         switch (mac_type) {
5888         case ixgbe_mac_X550_vf:
5889         case ixgbe_mac_X550EM_x_vf:
5890                 return IXGBE_VFRSSRK(i);
5891         default:
5892                 return IXGBE_RSSRK(i);
5893         }
5894 }
5895
5896 bool
5897 ixgbe_rss_update_sp(enum ixgbe_mac_type mac_type) {
5898         switch (mac_type) {
5899         case ixgbe_mac_82599_vf:
5900         case ixgbe_mac_X540_vf:
5901                 return 0;
5902         default:
5903                 return 1;
5904         }
5905 }
5906
5907 static int
5908 ixgbe_dev_get_dcb_info(struct rte_eth_dev *dev,
5909                         struct rte_eth_dcb_info *dcb_info)
5910 {
5911         struct ixgbe_dcb_config *dcb_config =
5912                         IXGBE_DEV_PRIVATE_TO_DCB_CFG(dev->data->dev_private);
5913         struct ixgbe_dcb_tc_config *tc;
5914         uint8_t i, j;
5915
5916         if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_DCB_FLAG)
5917                 dcb_info->nb_tcs = dcb_config->num_tcs.pg_tcs;
5918         else
5919                 dcb_info->nb_tcs = 1;
5920
5921         if (dcb_config->vt_mode) { /* vt is enabled*/
5922                 struct rte_eth_vmdq_dcb_conf *vmdq_rx_conf =
5923                                 &dev->data->dev_conf.rx_adv_conf.vmdq_dcb_conf;
5924                 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++)
5925                         dcb_info->prio_tc[i] = vmdq_rx_conf->dcb_tc[i];
5926                 for (i = 0; i < vmdq_rx_conf->nb_queue_pools; i++) {
5927                         for (j = 0; j < dcb_info->nb_tcs; j++) {
5928                                 dcb_info->tc_queue.tc_rxq[i][j].base =
5929                                                 i * dcb_info->nb_tcs + j;
5930                                 dcb_info->tc_queue.tc_rxq[i][j].nb_queue = 1;
5931                                 dcb_info->tc_queue.tc_txq[i][j].base =
5932                                                 i * dcb_info->nb_tcs + j;
5933                                 dcb_info->tc_queue.tc_txq[i][j].nb_queue = 1;
5934                         }
5935                 }
5936         } else { /* vt is disabled*/
5937                 struct rte_eth_dcb_rx_conf *rx_conf =
5938                                 &dev->data->dev_conf.rx_adv_conf.dcb_rx_conf;
5939                 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++)
5940                         dcb_info->prio_tc[i] = rx_conf->dcb_tc[i];
5941                 if (dcb_info->nb_tcs == ETH_4_TCS) {
5942                         for (i = 0; i < dcb_info->nb_tcs; i++) {
5943                                 dcb_info->tc_queue.tc_rxq[0][i].base = i * 32;
5944                                 dcb_info->tc_queue.tc_rxq[0][i].nb_queue = 16;
5945                         }
5946                         dcb_info->tc_queue.tc_txq[0][0].base = 0;
5947                         dcb_info->tc_queue.tc_txq[0][1].base = 64;
5948                         dcb_info->tc_queue.tc_txq[0][2].base = 96;
5949                         dcb_info->tc_queue.tc_txq[0][3].base = 112;
5950                         dcb_info->tc_queue.tc_txq[0][0].nb_queue = 64;
5951                         dcb_info->tc_queue.tc_txq[0][1].nb_queue = 32;
5952                         dcb_info->tc_queue.tc_txq[0][2].nb_queue = 16;
5953                         dcb_info->tc_queue.tc_txq[0][3].nb_queue = 16;
5954                 } else if (dcb_info->nb_tcs == ETH_8_TCS) {
5955                         for (i = 0; i < dcb_info->nb_tcs; i++) {
5956                                 dcb_info->tc_queue.tc_rxq[0][i].base = i * 16;
5957                                 dcb_info->tc_queue.tc_rxq[0][i].nb_queue = 16;
5958                         }
5959                         dcb_info->tc_queue.tc_txq[0][0].base = 0;
5960                         dcb_info->tc_queue.tc_txq[0][1].base = 32;
5961                         dcb_info->tc_queue.tc_txq[0][2].base = 64;
5962                         dcb_info->tc_queue.tc_txq[0][3].base = 80;
5963                         dcb_info->tc_queue.tc_txq[0][4].base = 96;
5964                         dcb_info->tc_queue.tc_txq[0][5].base = 104;
5965                         dcb_info->tc_queue.tc_txq[0][6].base = 112;
5966                         dcb_info->tc_queue.tc_txq[0][7].base = 120;
5967                         dcb_info->tc_queue.tc_txq[0][0].nb_queue = 32;
5968                         dcb_info->tc_queue.tc_txq[0][1].nb_queue = 32;
5969                         dcb_info->tc_queue.tc_txq[0][2].nb_queue = 16;
5970                         dcb_info->tc_queue.tc_txq[0][3].nb_queue = 16;
5971                         dcb_info->tc_queue.tc_txq[0][4].nb_queue = 8;
5972                         dcb_info->tc_queue.tc_txq[0][5].nb_queue = 8;
5973                         dcb_info->tc_queue.tc_txq[0][6].nb_queue = 8;
5974                         dcb_info->tc_queue.tc_txq[0][7].nb_queue = 8;
5975                 }
5976         }
5977         for (i = 0; i < dcb_info->nb_tcs; i++) {
5978                 tc = &dcb_config->tc_config[i];
5979                 dcb_info->tc_bws[i] = tc->path[IXGBE_DCB_TX_CONFIG].bwg_percent;
5980         }
5981         return 0;
5982 }
5983
5984 static struct rte_driver rte_ixgbe_driver = {
5985         .type = PMD_PDEV,
5986         .init = rte_ixgbe_pmd_init,
5987 };
5988
5989 static struct rte_driver rte_ixgbevf_driver = {
5990         .type = PMD_PDEV,
5991         .init = rte_ixgbevf_pmd_init,
5992 };
5993
5994 PMD_REGISTER_DRIVER(rte_ixgbe_driver);
5995 PMD_REGISTER_DRIVER(rte_ixgbevf_driver);