ixgbe: get queue info and descriptor limits
[dpdk.git] / drivers / net / ixgbe / ixgbe_ethdev.c
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33
34 #include <sys/queue.h>
35 #include <stdio.h>
36 #include <errno.h>
37 #include <stdint.h>
38 #include <string.h>
39 #include <unistd.h>
40 #include <stdarg.h>
41 #include <inttypes.h>
42 #include <netinet/in.h>
43 #include <rte_byteorder.h>
44 #include <rte_common.h>
45 #include <rte_cycles.h>
46
47 #include <rte_interrupts.h>
48 #include <rte_log.h>
49 #include <rte_debug.h>
50 #include <rte_pci.h>
51 #include <rte_atomic.h>
52 #include <rte_branch_prediction.h>
53 #include <rte_memory.h>
54 #include <rte_memzone.h>
55 #include <rte_eal.h>
56 #include <rte_alarm.h>
57 #include <rte_ether.h>
58 #include <rte_ethdev.h>
59 #include <rte_atomic.h>
60 #include <rte_malloc.h>
61 #include <rte_random.h>
62 #include <rte_dev.h>
63
64 #include "ixgbe_logs.h"
65 #include "base/ixgbe_api.h"
66 #include "base/ixgbe_vf.h"
67 #include "base/ixgbe_common.h"
68 #include "ixgbe_ethdev.h"
69 #include "ixgbe_bypass.h"
70 #include "ixgbe_rxtx.h"
71 #include "base/ixgbe_type.h"
72 #include "base/ixgbe_phy.h"
73 #include "ixgbe_regs.h"
74
75 /*
76  * High threshold controlling when to start sending XOFF frames. Must be at
77  * least 8 bytes less than receive packet buffer size. This value is in units
78  * of 1024 bytes.
79  */
80 #define IXGBE_FC_HI    0x80
81
82 /*
83  * Low threshold controlling when to start sending XON frames. This value is
84  * in units of 1024 bytes.
85  */
86 #define IXGBE_FC_LO    0x40
87
88 /* Default minimum inter-interrupt interval for EITR configuration */
89 #define IXGBE_MIN_INTER_INTERRUPT_INTERVAL_DEFAULT    0x79E
90
91 /* Timer value included in XOFF frames. */
92 #define IXGBE_FC_PAUSE 0x680
93
94 #define IXGBE_LINK_DOWN_CHECK_TIMEOUT 4000 /* ms */
95 #define IXGBE_LINK_UP_CHECK_TIMEOUT   1000 /* ms */
96 #define IXGBE_VMDQ_NUM_UC_MAC         4096 /* Maximum nb. of UC MAC addr. */
97
98 #define IXGBE_MMW_SIZE_DEFAULT        0x4
99 #define IXGBE_MMW_SIZE_JUMBO_FRAME    0x14
100 #define IXGBE_MAX_RING_DESC           4096 /* replicate define from rxtx */
101
102 /*
103  *  Default values for RX/TX configuration
104  */
105 #define IXGBE_DEFAULT_RX_FREE_THRESH  32
106 #define IXGBE_DEFAULT_RX_PTHRESH      8
107 #define IXGBE_DEFAULT_RX_HTHRESH      8
108 #define IXGBE_DEFAULT_RX_WTHRESH      0
109
110 #define IXGBE_DEFAULT_TX_FREE_THRESH  32
111 #define IXGBE_DEFAULT_TX_PTHRESH      32
112 #define IXGBE_DEFAULT_TX_HTHRESH      0
113 #define IXGBE_DEFAULT_TX_WTHRESH      0
114 #define IXGBE_DEFAULT_TX_RSBIT_THRESH 32
115
116 /* Bit shift and mask */
117 #define IXGBE_4_BIT_WIDTH  (CHAR_BIT / 2)
118 #define IXGBE_4_BIT_MASK   RTE_LEN2MASK(IXGBE_4_BIT_WIDTH, uint8_t)
119 #define IXGBE_8_BIT_WIDTH  CHAR_BIT
120 #define IXGBE_8_BIT_MASK   UINT8_MAX
121
122 #define IXGBEVF_PMD_NAME "rte_ixgbevf_pmd" /* PMD name */
123
124 #define IXGBE_QUEUE_STAT_COUNTERS (sizeof(hw_stats->qprc) / sizeof(hw_stats->qprc[0]))
125
126 #define IXGBE_HKEY_MAX_INDEX 10
127
128 /* Additional timesync values. */
129 #define IXGBE_TIMINCA_16NS_SHIFT 24
130 #define IXGBE_TIMINCA_INCVALUE   16000000
131 #define IXGBE_TIMINCA_INIT       ((0x02 << IXGBE_TIMINCA_16NS_SHIFT) \
132                                   | IXGBE_TIMINCA_INCVALUE)
133
134 static int eth_ixgbe_dev_init(struct rte_eth_dev *eth_dev);
135 static int eth_ixgbe_dev_uninit(struct rte_eth_dev *eth_dev);
136 static int  ixgbe_dev_configure(struct rte_eth_dev *dev);
137 static int  ixgbe_dev_start(struct rte_eth_dev *dev);
138 static void ixgbe_dev_stop(struct rte_eth_dev *dev);
139 static int  ixgbe_dev_set_link_up(struct rte_eth_dev *dev);
140 static int  ixgbe_dev_set_link_down(struct rte_eth_dev *dev);
141 static void ixgbe_dev_close(struct rte_eth_dev *dev);
142 static void ixgbe_dev_promiscuous_enable(struct rte_eth_dev *dev);
143 static void ixgbe_dev_promiscuous_disable(struct rte_eth_dev *dev);
144 static void ixgbe_dev_allmulticast_enable(struct rte_eth_dev *dev);
145 static void ixgbe_dev_allmulticast_disable(struct rte_eth_dev *dev);
146 static int ixgbe_dev_link_update(struct rte_eth_dev *dev,
147                                 int wait_to_complete);
148 static void ixgbe_dev_stats_get(struct rte_eth_dev *dev,
149                                 struct rte_eth_stats *stats);
150 static int ixgbe_dev_xstats_get(struct rte_eth_dev *dev,
151                                 struct rte_eth_xstats *xstats, unsigned n);
152 static void ixgbe_dev_stats_reset(struct rte_eth_dev *dev);
153 static void ixgbe_dev_xstats_reset(struct rte_eth_dev *dev);
154 static int ixgbe_dev_queue_stats_mapping_set(struct rte_eth_dev *eth_dev,
155                                              uint16_t queue_id,
156                                              uint8_t stat_idx,
157                                              uint8_t is_rx);
158 static void ixgbe_dev_info_get(struct rte_eth_dev *dev,
159                                struct rte_eth_dev_info *dev_info);
160 static void ixgbevf_dev_info_get(struct rte_eth_dev *dev,
161                                  struct rte_eth_dev_info *dev_info);
162 static int ixgbe_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu);
163
164 static int ixgbe_vlan_filter_set(struct rte_eth_dev *dev,
165                 uint16_t vlan_id, int on);
166 static void ixgbe_vlan_tpid_set(struct rte_eth_dev *dev, uint16_t tpid_id);
167 static void ixgbe_vlan_hw_strip_bitmap_set(struct rte_eth_dev *dev,
168                 uint16_t queue, bool on);
169 static void ixgbe_vlan_strip_queue_set(struct rte_eth_dev *dev, uint16_t queue,
170                 int on);
171 static void ixgbe_vlan_offload_set(struct rte_eth_dev *dev, int mask);
172 static void ixgbe_vlan_hw_strip_enable(struct rte_eth_dev *dev, uint16_t queue);
173 static void ixgbe_vlan_hw_strip_disable(struct rte_eth_dev *dev, uint16_t queue);
174 static void ixgbe_vlan_hw_extend_enable(struct rte_eth_dev *dev);
175 static void ixgbe_vlan_hw_extend_disable(struct rte_eth_dev *dev);
176
177 static int ixgbe_dev_led_on(struct rte_eth_dev *dev);
178 static int ixgbe_dev_led_off(struct rte_eth_dev *dev);
179 static int ixgbe_flow_ctrl_get(struct rte_eth_dev *dev,
180                                struct rte_eth_fc_conf *fc_conf);
181 static int ixgbe_flow_ctrl_set(struct rte_eth_dev *dev,
182                                struct rte_eth_fc_conf *fc_conf);
183 static int ixgbe_priority_flow_ctrl_set(struct rte_eth_dev *dev,
184                 struct rte_eth_pfc_conf *pfc_conf);
185 static int ixgbe_dev_rss_reta_update(struct rte_eth_dev *dev,
186                         struct rte_eth_rss_reta_entry64 *reta_conf,
187                         uint16_t reta_size);
188 static int ixgbe_dev_rss_reta_query(struct rte_eth_dev *dev,
189                         struct rte_eth_rss_reta_entry64 *reta_conf,
190                         uint16_t reta_size);
191 static void ixgbe_dev_link_status_print(struct rte_eth_dev *dev);
192 static int ixgbe_dev_lsc_interrupt_setup(struct rte_eth_dev *dev);
193 static int ixgbe_dev_rxq_interrupt_setup(struct rte_eth_dev *dev);
194 static int ixgbe_dev_interrupt_get_status(struct rte_eth_dev *dev);
195 static int ixgbe_dev_interrupt_action(struct rte_eth_dev *dev);
196 static void ixgbe_dev_interrupt_handler(struct rte_intr_handle *handle,
197                 void *param);
198 static void ixgbe_dev_interrupt_delayed_handler(void *param);
199 static void ixgbe_add_rar(struct rte_eth_dev *dev, struct ether_addr *mac_addr,
200                 uint32_t index, uint32_t pool);
201 static void ixgbe_remove_rar(struct rte_eth_dev *dev, uint32_t index);
202 static void ixgbe_set_default_mac_addr(struct rte_eth_dev *dev,
203                                            struct ether_addr *mac_addr);
204 static void ixgbe_dcb_init(struct ixgbe_hw *hw,struct ixgbe_dcb_config *dcb_config);
205
206 /* For Virtual Function support */
207 static int eth_ixgbevf_dev_init(struct rte_eth_dev *eth_dev);
208 static int eth_ixgbevf_dev_uninit(struct rte_eth_dev *eth_dev);
209 static int ixgbevf_dev_interrupt_get_status(struct rte_eth_dev *dev);
210 static int ixgbevf_dev_interrupt_action(struct rte_eth_dev *dev);
211 static int  ixgbevf_dev_configure(struct rte_eth_dev *dev);
212 static int  ixgbevf_dev_start(struct rte_eth_dev *dev);
213 static void ixgbevf_dev_stop(struct rte_eth_dev *dev);
214 static void ixgbevf_dev_close(struct rte_eth_dev *dev);
215 static void ixgbevf_intr_disable(struct ixgbe_hw *hw);
216 static void ixgbevf_intr_enable(struct ixgbe_hw *hw);
217 static void ixgbevf_dev_stats_get(struct rte_eth_dev *dev,
218                 struct rte_eth_stats *stats);
219 static void ixgbevf_dev_stats_reset(struct rte_eth_dev *dev);
220 static int ixgbevf_vlan_filter_set(struct rte_eth_dev *dev,
221                 uint16_t vlan_id, int on);
222 static void ixgbevf_vlan_strip_queue_set(struct rte_eth_dev *dev,
223                 uint16_t queue, int on);
224 static void ixgbevf_vlan_offload_set(struct rte_eth_dev *dev, int mask);
225 static void ixgbevf_set_vfta_all(struct rte_eth_dev *dev, bool on);
226 static void ixgbevf_dev_interrupt_handler(struct rte_intr_handle *handle,
227                                           void *param);
228 static int ixgbevf_dev_rx_queue_intr_enable(struct rte_eth_dev *dev,
229                                             uint16_t queue_id);
230 static int ixgbevf_dev_rx_queue_intr_disable(struct rte_eth_dev *dev,
231                                              uint16_t queue_id);
232 static void ixgbevf_set_ivar_map(struct ixgbe_hw *hw, int8_t direction,
233                                  uint8_t queue, uint8_t msix_vector);
234 static void ixgbevf_configure_msix(struct rte_eth_dev *dev);
235
236 /* For Eth VMDQ APIs support */
237 static int ixgbe_uc_hash_table_set(struct rte_eth_dev *dev, struct
238                 ether_addr* mac_addr,uint8_t on);
239 static int ixgbe_uc_all_hash_table_set(struct rte_eth_dev *dev,uint8_t on);
240 static int  ixgbe_set_pool_rx_mode(struct rte_eth_dev *dev,  uint16_t pool,
241                 uint16_t rx_mask, uint8_t on);
242 static int ixgbe_set_pool_rx(struct rte_eth_dev *dev,uint16_t pool,uint8_t on);
243 static int ixgbe_set_pool_tx(struct rte_eth_dev *dev,uint16_t pool,uint8_t on);
244 static int ixgbe_set_pool_vlan_filter(struct rte_eth_dev *dev, uint16_t vlan,
245                 uint64_t pool_mask,uint8_t vlan_on);
246 static int ixgbe_mirror_rule_set(struct rte_eth_dev *dev,
247                 struct rte_eth_mirror_conf *mirror_conf,
248                 uint8_t rule_id, uint8_t on);
249 static int ixgbe_mirror_rule_reset(struct rte_eth_dev *dev,
250                 uint8_t rule_id);
251 static int ixgbe_dev_rx_queue_intr_enable(struct rte_eth_dev *dev,
252                                           uint16_t queue_id);
253 static int ixgbe_dev_rx_queue_intr_disable(struct rte_eth_dev *dev,
254                                            uint16_t queue_id);
255 static void ixgbe_set_ivar_map(struct ixgbe_hw *hw, int8_t direction,
256                                uint8_t queue, uint8_t msix_vector);
257 static void ixgbe_configure_msix(struct rte_eth_dev *dev);
258
259 static int ixgbe_set_queue_rate_limit(struct rte_eth_dev *dev,
260                 uint16_t queue_idx, uint16_t tx_rate);
261 static int ixgbe_set_vf_rate_limit(struct rte_eth_dev *dev, uint16_t vf,
262                 uint16_t tx_rate, uint64_t q_msk);
263
264 static void ixgbevf_add_mac_addr(struct rte_eth_dev *dev,
265                                  struct ether_addr *mac_addr,
266                                  uint32_t index, uint32_t pool);
267 static void ixgbevf_remove_mac_addr(struct rte_eth_dev *dev, uint32_t index);
268 static void ixgbevf_set_default_mac_addr(struct rte_eth_dev *dev,
269                                              struct ether_addr *mac_addr);
270 static int ixgbe_syn_filter_set(struct rte_eth_dev *dev,
271                         struct rte_eth_syn_filter *filter,
272                         bool add);
273 static int ixgbe_syn_filter_get(struct rte_eth_dev *dev,
274                         struct rte_eth_syn_filter *filter);
275 static int ixgbe_syn_filter_handle(struct rte_eth_dev *dev,
276                         enum rte_filter_op filter_op,
277                         void *arg);
278 static int ixgbe_add_5tuple_filter(struct rte_eth_dev *dev,
279                         struct ixgbe_5tuple_filter *filter);
280 static void ixgbe_remove_5tuple_filter(struct rte_eth_dev *dev,
281                         struct ixgbe_5tuple_filter *filter);
282 static int ixgbe_add_del_ntuple_filter(struct rte_eth_dev *dev,
283                         struct rte_eth_ntuple_filter *filter,
284                         bool add);
285 static int ixgbe_ntuple_filter_handle(struct rte_eth_dev *dev,
286                                 enum rte_filter_op filter_op,
287                                 void *arg);
288 static int ixgbe_get_ntuple_filter(struct rte_eth_dev *dev,
289                         struct rte_eth_ntuple_filter *filter);
290 static int ixgbe_add_del_ethertype_filter(struct rte_eth_dev *dev,
291                         struct rte_eth_ethertype_filter *filter,
292                         bool add);
293 static int ixgbe_ethertype_filter_handle(struct rte_eth_dev *dev,
294                                 enum rte_filter_op filter_op,
295                                 void *arg);
296 static int ixgbe_get_ethertype_filter(struct rte_eth_dev *dev,
297                         struct rte_eth_ethertype_filter *filter);
298 static int ixgbe_dev_filter_ctrl(struct rte_eth_dev *dev,
299                      enum rte_filter_type filter_type,
300                      enum rte_filter_op filter_op,
301                      void *arg);
302 static int ixgbevf_dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu);
303
304 static int ixgbe_dev_set_mc_addr_list(struct rte_eth_dev *dev,
305                                       struct ether_addr *mc_addr_set,
306                                       uint32_t nb_mc_addr);
307 static int ixgbe_dev_get_dcb_info(struct rte_eth_dev *dev,
308                                    struct rte_eth_dcb_info *dcb_info);
309
310 static int ixgbe_get_reg_length(struct rte_eth_dev *dev);
311 static int ixgbe_get_regs(struct rte_eth_dev *dev,
312                             struct rte_dev_reg_info *regs);
313 static int ixgbe_get_eeprom_length(struct rte_eth_dev *dev);
314 static int ixgbe_get_eeprom(struct rte_eth_dev *dev,
315                                 struct rte_dev_eeprom_info *eeprom);
316 static int ixgbe_set_eeprom(struct rte_eth_dev *dev,
317                                 struct rte_dev_eeprom_info *eeprom);
318
319 static int ixgbevf_get_reg_length(struct rte_eth_dev *dev);
320 static int ixgbevf_get_regs(struct rte_eth_dev *dev,
321                                 struct rte_dev_reg_info *regs);
322
323 static int ixgbe_timesync_enable(struct rte_eth_dev *dev);
324 static int ixgbe_timesync_disable(struct rte_eth_dev *dev);
325 static int ixgbe_timesync_read_rx_timestamp(struct rte_eth_dev *dev,
326                                             struct timespec *timestamp,
327                                             uint32_t flags);
328 static int ixgbe_timesync_read_tx_timestamp(struct rte_eth_dev *dev,
329                                             struct timespec *timestamp);
330
331 /*
332  * Define VF Stats MACRO for Non "cleared on read" register
333  */
334 #define UPDATE_VF_STAT(reg, last, cur)                          \
335 {                                                               \
336         uint32_t latest = IXGBE_READ_REG(hw, reg);              \
337         cur += (latest - last) & UINT_MAX;                      \
338         last = latest;                                          \
339 }
340
341 #define UPDATE_VF_STAT_36BIT(lsb, msb, last, cur)                \
342 {                                                                \
343         u64 new_lsb = IXGBE_READ_REG(hw, lsb);                   \
344         u64 new_msb = IXGBE_READ_REG(hw, msb);                   \
345         u64 latest = ((new_msb << 32) | new_lsb);                \
346         cur += (0x1000000000LL + latest - last) & 0xFFFFFFFFFLL; \
347         last = latest;                                           \
348 }
349
350 #define IXGBE_SET_HWSTRIP(h, q) do{\
351                 uint32_t idx = (q) / (sizeof ((h)->bitmap[0]) * NBBY); \
352                 uint32_t bit = (q) % (sizeof ((h)->bitmap[0]) * NBBY); \
353                 (h)->bitmap[idx] |= 1 << bit;\
354         }while(0)
355
356 #define IXGBE_CLEAR_HWSTRIP(h, q) do{\
357                 uint32_t idx = (q) / (sizeof ((h)->bitmap[0]) * NBBY); \
358                 uint32_t bit = (q) % (sizeof ((h)->bitmap[0]) * NBBY); \
359                 (h)->bitmap[idx] &= ~(1 << bit);\
360         }while(0)
361
362 #define IXGBE_GET_HWSTRIP(h, q, r) do{\
363                 uint32_t idx = (q) / (sizeof ((h)->bitmap[0]) * NBBY); \
364                 uint32_t bit = (q) % (sizeof ((h)->bitmap[0]) * NBBY); \
365                 (r) = (h)->bitmap[idx] >> bit & 1;\
366         }while(0)
367
368 /*
369  * The set of PCI devices this driver supports
370  */
371 static const struct rte_pci_id pci_id_ixgbe_map[] = {
372
373 #define RTE_PCI_DEV_ID_DECL_IXGBE(vend, dev) {RTE_PCI_DEVICE(vend, dev)},
374 #include "rte_pci_dev_ids.h"
375
376 { .vendor_id = 0, /* sentinel */ },
377 };
378
379
380 /*
381  * The set of PCI devices this driver supports (for 82599 VF)
382  */
383 static const struct rte_pci_id pci_id_ixgbevf_map[] = {
384
385 #define RTE_PCI_DEV_ID_DECL_IXGBEVF(vend, dev) {RTE_PCI_DEVICE(vend, dev)},
386 #include "rte_pci_dev_ids.h"
387 { .vendor_id = 0, /* sentinel */ },
388
389 };
390
391 static const struct rte_eth_desc_lim rx_desc_lim = {
392         .nb_max = IXGBE_MAX_RING_DESC,
393         .nb_min = IXGBE_MIN_RING_DESC,
394         .nb_align = IXGBE_RXD_ALIGN,
395 };
396
397 static const struct rte_eth_desc_lim tx_desc_lim = {
398         .nb_max = IXGBE_MAX_RING_DESC,
399         .nb_min = IXGBE_MIN_RING_DESC,
400         .nb_align = IXGBE_TXD_ALIGN,
401 };
402
403 static const struct eth_dev_ops ixgbe_eth_dev_ops = {
404         .dev_configure        = ixgbe_dev_configure,
405         .dev_start            = ixgbe_dev_start,
406         .dev_stop             = ixgbe_dev_stop,
407         .dev_set_link_up    = ixgbe_dev_set_link_up,
408         .dev_set_link_down  = ixgbe_dev_set_link_down,
409         .dev_close            = ixgbe_dev_close,
410         .promiscuous_enable   = ixgbe_dev_promiscuous_enable,
411         .promiscuous_disable  = ixgbe_dev_promiscuous_disable,
412         .allmulticast_enable  = ixgbe_dev_allmulticast_enable,
413         .allmulticast_disable = ixgbe_dev_allmulticast_disable,
414         .link_update          = ixgbe_dev_link_update,
415         .stats_get            = ixgbe_dev_stats_get,
416         .xstats_get           = ixgbe_dev_xstats_get,
417         .stats_reset          = ixgbe_dev_stats_reset,
418         .xstats_reset         = ixgbe_dev_xstats_reset,
419         .queue_stats_mapping_set = ixgbe_dev_queue_stats_mapping_set,
420         .dev_infos_get        = ixgbe_dev_info_get,
421         .mtu_set              = ixgbe_dev_mtu_set,
422         .vlan_filter_set      = ixgbe_vlan_filter_set,
423         .vlan_tpid_set        = ixgbe_vlan_tpid_set,
424         .vlan_offload_set     = ixgbe_vlan_offload_set,
425         .vlan_strip_queue_set = ixgbe_vlan_strip_queue_set,
426         .rx_queue_start       = ixgbe_dev_rx_queue_start,
427         .rx_queue_stop        = ixgbe_dev_rx_queue_stop,
428         .tx_queue_start       = ixgbe_dev_tx_queue_start,
429         .tx_queue_stop        = ixgbe_dev_tx_queue_stop,
430         .rx_queue_setup       = ixgbe_dev_rx_queue_setup,
431         .rx_queue_intr_enable = ixgbe_dev_rx_queue_intr_enable,
432         .rx_queue_intr_disable = ixgbe_dev_rx_queue_intr_disable,
433         .rx_queue_release     = ixgbe_dev_rx_queue_release,
434         .rx_queue_count       = ixgbe_dev_rx_queue_count,
435         .rx_descriptor_done   = ixgbe_dev_rx_descriptor_done,
436         .tx_queue_setup       = ixgbe_dev_tx_queue_setup,
437         .tx_queue_release     = ixgbe_dev_tx_queue_release,
438         .dev_led_on           = ixgbe_dev_led_on,
439         .dev_led_off          = ixgbe_dev_led_off,
440         .flow_ctrl_get        = ixgbe_flow_ctrl_get,
441         .flow_ctrl_set        = ixgbe_flow_ctrl_set,
442         .priority_flow_ctrl_set = ixgbe_priority_flow_ctrl_set,
443         .mac_addr_add         = ixgbe_add_rar,
444         .mac_addr_remove      = ixgbe_remove_rar,
445         .mac_addr_set         = ixgbe_set_default_mac_addr,
446         .uc_hash_table_set    = ixgbe_uc_hash_table_set,
447         .uc_all_hash_table_set  = ixgbe_uc_all_hash_table_set,
448         .mirror_rule_set      = ixgbe_mirror_rule_set,
449         .mirror_rule_reset    = ixgbe_mirror_rule_reset,
450         .set_vf_rx_mode       = ixgbe_set_pool_rx_mode,
451         .set_vf_rx            = ixgbe_set_pool_rx,
452         .set_vf_tx            = ixgbe_set_pool_tx,
453         .set_vf_vlan_filter   = ixgbe_set_pool_vlan_filter,
454         .set_queue_rate_limit = ixgbe_set_queue_rate_limit,
455         .set_vf_rate_limit    = ixgbe_set_vf_rate_limit,
456         .reta_update          = ixgbe_dev_rss_reta_update,
457         .reta_query           = ixgbe_dev_rss_reta_query,
458 #ifdef RTE_NIC_BYPASS
459         .bypass_init          = ixgbe_bypass_init,
460         .bypass_state_set     = ixgbe_bypass_state_store,
461         .bypass_state_show    = ixgbe_bypass_state_show,
462         .bypass_event_set     = ixgbe_bypass_event_store,
463         .bypass_event_show    = ixgbe_bypass_event_show,
464         .bypass_wd_timeout_set  = ixgbe_bypass_wd_timeout_store,
465         .bypass_wd_timeout_show = ixgbe_bypass_wd_timeout_show,
466         .bypass_ver_show      = ixgbe_bypass_ver_show,
467         .bypass_wd_reset      = ixgbe_bypass_wd_reset,
468 #endif /* RTE_NIC_BYPASS */
469         .rss_hash_update      = ixgbe_dev_rss_hash_update,
470         .rss_hash_conf_get    = ixgbe_dev_rss_hash_conf_get,
471         .filter_ctrl          = ixgbe_dev_filter_ctrl,
472         .set_mc_addr_list     = ixgbe_dev_set_mc_addr_list,
473         .rxq_info_get         = ixgbe_rxq_info_get,
474         .txq_info_get         = ixgbe_txq_info_get,
475         .timesync_enable      = ixgbe_timesync_enable,
476         .timesync_disable     = ixgbe_timesync_disable,
477         .timesync_read_rx_timestamp = ixgbe_timesync_read_rx_timestamp,
478         .timesync_read_tx_timestamp = ixgbe_timesync_read_tx_timestamp,
479         .get_reg_length       = ixgbe_get_reg_length,
480         .get_reg              = ixgbe_get_regs,
481         .get_eeprom_length    = ixgbe_get_eeprom_length,
482         .get_eeprom           = ixgbe_get_eeprom,
483         .set_eeprom           = ixgbe_set_eeprom,
484         .get_dcb_info         = ixgbe_dev_get_dcb_info,
485 };
486
487 /*
488  * dev_ops for virtual function, bare necessities for basic vf
489  * operation have been implemented
490  */
491 static const struct eth_dev_ops ixgbevf_eth_dev_ops = {
492         .dev_configure        = ixgbevf_dev_configure,
493         .dev_start            = ixgbevf_dev_start,
494         .dev_stop             = ixgbevf_dev_stop,
495         .link_update          = ixgbe_dev_link_update,
496         .stats_get            = ixgbevf_dev_stats_get,
497         .stats_reset          = ixgbevf_dev_stats_reset,
498         .dev_close            = ixgbevf_dev_close,
499         .dev_infos_get        = ixgbevf_dev_info_get,
500         .mtu_set              = ixgbevf_dev_set_mtu,
501         .vlan_filter_set      = ixgbevf_vlan_filter_set,
502         .vlan_strip_queue_set = ixgbevf_vlan_strip_queue_set,
503         .vlan_offload_set     = ixgbevf_vlan_offload_set,
504         .rx_queue_setup       = ixgbe_dev_rx_queue_setup,
505         .rx_queue_release     = ixgbe_dev_rx_queue_release,
506         .rx_descriptor_done   = ixgbe_dev_rx_descriptor_done,
507         .tx_queue_setup       = ixgbe_dev_tx_queue_setup,
508         .tx_queue_release     = ixgbe_dev_tx_queue_release,
509         .rx_queue_intr_enable = ixgbevf_dev_rx_queue_intr_enable,
510         .rx_queue_intr_disable = ixgbevf_dev_rx_queue_intr_disable,
511         .mac_addr_add         = ixgbevf_add_mac_addr,
512         .mac_addr_remove      = ixgbevf_remove_mac_addr,
513         .set_mc_addr_list     = ixgbe_dev_set_mc_addr_list,
514         .rxq_info_get         = ixgbe_rxq_info_get,
515         .txq_info_get         = ixgbe_txq_info_get,
516         .mac_addr_set         = ixgbevf_set_default_mac_addr,
517         .get_reg_length       = ixgbevf_get_reg_length,
518         .get_reg              = ixgbevf_get_regs,
519         .reta_update          = ixgbe_dev_rss_reta_update,
520         .reta_query           = ixgbe_dev_rss_reta_query,
521         .rss_hash_update      = ixgbe_dev_rss_hash_update,
522         .rss_hash_conf_get    = ixgbe_dev_rss_hash_conf_get,
523 };
524
525 /* store statistics names and its offset in stats structure */
526 struct rte_ixgbe_xstats_name_off {
527         char name[RTE_ETH_XSTATS_NAME_SIZE];
528         unsigned offset;
529 };
530
531 static const struct rte_ixgbe_xstats_name_off rte_ixgbe_stats_strings[] = {
532         {"rx_illegal_byte_err", offsetof(struct ixgbe_hw_stats, errbc)},
533         {"rx_len_err", offsetof(struct ixgbe_hw_stats, rlec)},
534         {"rx_undersize_count", offsetof(struct ixgbe_hw_stats, ruc)},
535         {"rx_oversize_count", offsetof(struct ixgbe_hw_stats, roc)},
536         {"rx_fragment_count", offsetof(struct ixgbe_hw_stats, rfc)},
537         {"rx_jabber_count", offsetof(struct ixgbe_hw_stats, rjc)},
538         {"l3_l4_xsum_error", offsetof(struct ixgbe_hw_stats, xec)},
539         {"mac_local_fault", offsetof(struct ixgbe_hw_stats, mlfc)},
540         {"mac_remote_fault", offsetof(struct ixgbe_hw_stats, mrfc)},
541         {"mac_short_pkt_discard", offsetof(struct ixgbe_hw_stats, mspdc)},
542         {"fccrc_error", offsetof(struct ixgbe_hw_stats, fccrc)},
543         {"fcoe_drop", offsetof(struct ixgbe_hw_stats, fcoerpdc)},
544         {"fc_last_error", offsetof(struct ixgbe_hw_stats, fclast)},
545         {"rx_broadcast_packets", offsetof(struct ixgbe_hw_stats, bprc)},
546         {"rx_phy_multicast_packets", offsetof(struct ixgbe_hw_stats, mprc)},
547         {"mgmt_pkts_dropped", offsetof(struct ixgbe_hw_stats, mngpdc)},
548         {"rx_crc_errors", offsetof(struct ixgbe_hw_stats, crcerrs)},
549         {"fdir_match", offsetof(struct ixgbe_hw_stats, fdirmatch)},
550         {"fdir_miss", offsetof(struct ixgbe_hw_stats, fdirmiss)},
551         {"tx_flow_control_xon", offsetof(struct ixgbe_hw_stats, lxontxc)},
552         {"rx_flow_control_xon", offsetof(struct ixgbe_hw_stats, lxonrxc)},
553         {"tx_flow_control_xoff", offsetof(struct ixgbe_hw_stats, lxofftxc)},
554         {"rx_flow_control_xoff", offsetof(struct ixgbe_hw_stats, lxoffrxc)},
555 };
556
557 #define IXGBE_NB_XSTATS (sizeof(rte_ixgbe_stats_strings) /      \
558                 sizeof(rte_ixgbe_stats_strings[0]))
559
560 /**
561  * Atomically reads the link status information from global
562  * structure rte_eth_dev.
563  *
564  * @param dev
565  *   - Pointer to the structure rte_eth_dev to read from.
566  *   - Pointer to the buffer to be saved with the link status.
567  *
568  * @return
569  *   - On success, zero.
570  *   - On failure, negative value.
571  */
572 static inline int
573 rte_ixgbe_dev_atomic_read_link_status(struct rte_eth_dev *dev,
574                                 struct rte_eth_link *link)
575 {
576         struct rte_eth_link *dst = link;
577         struct rte_eth_link *src = &(dev->data->dev_link);
578
579         if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
580                                         *(uint64_t *)src) == 0)
581                 return -1;
582
583         return 0;
584 }
585
586 /**
587  * Atomically writes the link status information into global
588  * structure rte_eth_dev.
589  *
590  * @param dev
591  *   - Pointer to the structure rte_eth_dev to read from.
592  *   - Pointer to the buffer to be saved with the link status.
593  *
594  * @return
595  *   - On success, zero.
596  *   - On failure, negative value.
597  */
598 static inline int
599 rte_ixgbe_dev_atomic_write_link_status(struct rte_eth_dev *dev,
600                                 struct rte_eth_link *link)
601 {
602         struct rte_eth_link *dst = &(dev->data->dev_link);
603         struct rte_eth_link *src = link;
604
605         if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
606                                         *(uint64_t *)src) == 0)
607                 return -1;
608
609         return 0;
610 }
611
612 /*
613  * This function is the same as ixgbe_is_sfp() in base/ixgbe.h.
614  */
615 static inline int
616 ixgbe_is_sfp(struct ixgbe_hw *hw)
617 {
618         switch (hw->phy.type) {
619         case ixgbe_phy_sfp_avago:
620         case ixgbe_phy_sfp_ftl:
621         case ixgbe_phy_sfp_intel:
622         case ixgbe_phy_sfp_unknown:
623         case ixgbe_phy_sfp_passive_tyco:
624         case ixgbe_phy_sfp_passive_unknown:
625                 return 1;
626         default:
627                 return 0;
628         }
629 }
630
631 static inline int32_t
632 ixgbe_pf_reset_hw(struct ixgbe_hw *hw)
633 {
634         uint32_t ctrl_ext;
635         int32_t status;
636
637         status = ixgbe_reset_hw(hw);
638
639         ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
640         /* Set PF Reset Done bit so PF/VF Mail Ops can work */
641         ctrl_ext |= IXGBE_CTRL_EXT_PFRSTD;
642         IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
643         IXGBE_WRITE_FLUSH(hw);
644
645         return status;
646 }
647
648 static inline void
649 ixgbe_enable_intr(struct rte_eth_dev *dev)
650 {
651         struct ixgbe_interrupt *intr =
652                 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
653         struct ixgbe_hw *hw =
654                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
655
656         IXGBE_WRITE_REG(hw, IXGBE_EIMS, intr->mask);
657         IXGBE_WRITE_FLUSH(hw);
658 }
659
660 /*
661  * This function is based on ixgbe_disable_intr() in base/ixgbe.h.
662  */
663 static void
664 ixgbe_disable_intr(struct ixgbe_hw *hw)
665 {
666         PMD_INIT_FUNC_TRACE();
667
668         if (hw->mac.type == ixgbe_mac_82598EB) {
669                 IXGBE_WRITE_REG(hw, IXGBE_EIMC, ~0);
670         } else {
671                 IXGBE_WRITE_REG(hw, IXGBE_EIMC, 0xFFFF0000);
672                 IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(0), ~0);
673                 IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(1), ~0);
674         }
675         IXGBE_WRITE_FLUSH(hw);
676 }
677
678 /*
679  * This function resets queue statistics mapping registers.
680  * From Niantic datasheet, Initialization of Statistics section:
681  * "...if software requires the queue counters, the RQSMR and TQSM registers
682  * must be re-programmed following a device reset.
683  */
684 static void
685 ixgbe_reset_qstat_mappings(struct ixgbe_hw *hw)
686 {
687         uint32_t i;
688
689         for(i = 0; i != IXGBE_NB_STAT_MAPPING_REGS; i++) {
690                 IXGBE_WRITE_REG(hw, IXGBE_RQSMR(i), 0);
691                 IXGBE_WRITE_REG(hw, IXGBE_TQSM(i), 0);
692         }
693 }
694
695
696 static int
697 ixgbe_dev_queue_stats_mapping_set(struct rte_eth_dev *eth_dev,
698                                   uint16_t queue_id,
699                                   uint8_t stat_idx,
700                                   uint8_t is_rx)
701 {
702 #define QSM_REG_NB_BITS_PER_QMAP_FIELD 8
703 #define NB_QMAP_FIELDS_PER_QSM_REG 4
704 #define QMAP_FIELD_RESERVED_BITS_MASK 0x0f
705
706         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
707         struct ixgbe_stat_mapping_registers *stat_mappings =
708                 IXGBE_DEV_PRIVATE_TO_STAT_MAPPINGS(eth_dev->data->dev_private);
709         uint32_t qsmr_mask = 0;
710         uint32_t clearing_mask = QMAP_FIELD_RESERVED_BITS_MASK;
711         uint32_t q_map;
712         uint8_t n, offset;
713
714         if ((hw->mac.type != ixgbe_mac_82599EB) &&
715                 (hw->mac.type != ixgbe_mac_X540) &&
716                 (hw->mac.type != ixgbe_mac_X550) &&
717                 (hw->mac.type != ixgbe_mac_X550EM_x))
718                 return -ENOSYS;
719
720         PMD_INIT_LOG(DEBUG, "Setting port %d, %s queue_id %d to stat index %d",
721                      (int)(eth_dev->data->port_id), is_rx ? "RX" : "TX",
722                      queue_id, stat_idx);
723
724         n = (uint8_t)(queue_id / NB_QMAP_FIELDS_PER_QSM_REG);
725         if (n >= IXGBE_NB_STAT_MAPPING_REGS) {
726                 PMD_INIT_LOG(ERR, "Nb of stat mapping registers exceeded");
727                 return -EIO;
728         }
729         offset = (uint8_t)(queue_id % NB_QMAP_FIELDS_PER_QSM_REG);
730
731         /* Now clear any previous stat_idx set */
732         clearing_mask <<= (QSM_REG_NB_BITS_PER_QMAP_FIELD * offset);
733         if (!is_rx)
734                 stat_mappings->tqsm[n] &= ~clearing_mask;
735         else
736                 stat_mappings->rqsmr[n] &= ~clearing_mask;
737
738         q_map = (uint32_t)stat_idx;
739         q_map &= QMAP_FIELD_RESERVED_BITS_MASK;
740         qsmr_mask = q_map << (QSM_REG_NB_BITS_PER_QMAP_FIELD * offset);
741         if (!is_rx)
742                 stat_mappings->tqsm[n] |= qsmr_mask;
743         else
744                 stat_mappings->rqsmr[n] |= qsmr_mask;
745
746         PMD_INIT_LOG(DEBUG, "Set port %d, %s queue_id %d to stat index %d",
747                      (int)(eth_dev->data->port_id), is_rx ? "RX" : "TX",
748                      queue_id, stat_idx);
749         PMD_INIT_LOG(DEBUG, "%s[%d] = 0x%08x", is_rx ? "RQSMR" : "TQSM", n,
750                      is_rx ? stat_mappings->rqsmr[n] : stat_mappings->tqsm[n]);
751
752         /* Now write the mapping in the appropriate register */
753         if (is_rx) {
754                 PMD_INIT_LOG(DEBUG, "Write 0x%x to RX IXGBE stat mapping reg:%d",
755                              stat_mappings->rqsmr[n], n);
756                 IXGBE_WRITE_REG(hw, IXGBE_RQSMR(n), stat_mappings->rqsmr[n]);
757         }
758         else {
759                 PMD_INIT_LOG(DEBUG, "Write 0x%x to TX IXGBE stat mapping reg:%d",
760                              stat_mappings->tqsm[n], n);
761                 IXGBE_WRITE_REG(hw, IXGBE_TQSM(n), stat_mappings->tqsm[n]);
762         }
763         return 0;
764 }
765
766 static void
767 ixgbe_restore_statistics_mapping(struct rte_eth_dev * dev)
768 {
769         struct ixgbe_stat_mapping_registers *stat_mappings =
770                 IXGBE_DEV_PRIVATE_TO_STAT_MAPPINGS(dev->data->dev_private);
771         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
772         int i;
773
774         /* write whatever was in stat mapping table to the NIC */
775         for (i = 0; i < IXGBE_NB_STAT_MAPPING_REGS; i++) {
776                 /* rx */
777                 IXGBE_WRITE_REG(hw, IXGBE_RQSMR(i), stat_mappings->rqsmr[i]);
778
779                 /* tx */
780                 IXGBE_WRITE_REG(hw, IXGBE_TQSM(i), stat_mappings->tqsm[i]);
781         }
782 }
783
784 static void
785 ixgbe_dcb_init(struct ixgbe_hw *hw,struct ixgbe_dcb_config *dcb_config)
786 {
787         uint8_t i;
788         struct ixgbe_dcb_tc_config *tc;
789         uint8_t dcb_max_tc = IXGBE_DCB_MAX_TRAFFIC_CLASS;
790
791         dcb_config->num_tcs.pg_tcs = dcb_max_tc;
792         dcb_config->num_tcs.pfc_tcs = dcb_max_tc;
793         for (i = 0; i < dcb_max_tc; i++) {
794                 tc = &dcb_config->tc_config[i];
795                 tc->path[IXGBE_DCB_TX_CONFIG].bwg_id = i;
796                 tc->path[IXGBE_DCB_TX_CONFIG].bwg_percent =
797                                  (uint8_t)(100/dcb_max_tc + (i & 1));
798                 tc->path[IXGBE_DCB_RX_CONFIG].bwg_id = i;
799                 tc->path[IXGBE_DCB_RX_CONFIG].bwg_percent =
800                                  (uint8_t)(100/dcb_max_tc + (i & 1));
801                 tc->pfc = ixgbe_dcb_pfc_disabled;
802         }
803
804         /* Initialize default user to priority mapping, UPx->TC0 */
805         tc = &dcb_config->tc_config[0];
806         tc->path[IXGBE_DCB_TX_CONFIG].up_to_tc_bitmap = 0xFF;
807         tc->path[IXGBE_DCB_RX_CONFIG].up_to_tc_bitmap = 0xFF;
808         for (i = 0; i< IXGBE_DCB_MAX_BW_GROUP; i++) {
809                 dcb_config->bw_percentage[IXGBE_DCB_TX_CONFIG][i] = 100;
810                 dcb_config->bw_percentage[IXGBE_DCB_RX_CONFIG][i] = 100;
811         }
812         dcb_config->rx_pba_cfg = ixgbe_dcb_pba_equal;
813         dcb_config->pfc_mode_enable = false;
814         dcb_config->vt_mode = true;
815         dcb_config->round_robin_enable = false;
816         /* support all DCB capabilities in 82599 */
817         dcb_config->support.capabilities = 0xFF;
818
819         /*we only support 4 Tcs for X540, X550 */
820         if (hw->mac.type == ixgbe_mac_X540 ||
821                 hw->mac.type == ixgbe_mac_X550 ||
822                 hw->mac.type == ixgbe_mac_X550EM_x) {
823                 dcb_config->num_tcs.pg_tcs = 4;
824                 dcb_config->num_tcs.pfc_tcs = 4;
825         }
826 }
827
828 /*
829  * Ensure that all locks are released before first NVM or PHY access
830  */
831 static void
832 ixgbe_swfw_lock_reset(struct ixgbe_hw *hw)
833 {
834         uint16_t mask;
835
836         /*
837          * Phy lock should not fail in this early stage. If this is the case,
838          * it is due to an improper exit of the application.
839          * So force the release of the faulty lock. Release of common lock
840          * is done automatically by swfw_sync function.
841          */
842         mask = IXGBE_GSSR_PHY0_SM << hw->bus.func;
843         if (ixgbe_acquire_swfw_semaphore(hw, mask) < 0) {
844                 PMD_DRV_LOG(DEBUG, "SWFW phy%d lock released", hw->bus.func);
845         }
846         ixgbe_release_swfw_semaphore(hw, mask);
847
848         /*
849          * These ones are more tricky since they are common to all ports; but
850          * swfw_sync retries last long enough (1s) to be almost sure that if
851          * lock can not be taken it is due to an improper lock of the
852          * semaphore.
853          */
854         mask = IXGBE_GSSR_EEP_SM | IXGBE_GSSR_MAC_CSR_SM | IXGBE_GSSR_SW_MNG_SM;
855         if (ixgbe_acquire_swfw_semaphore(hw, mask) < 0) {
856                 PMD_DRV_LOG(DEBUG, "SWFW common locks released");
857         }
858         ixgbe_release_swfw_semaphore(hw, mask);
859 }
860
861 /*
862  * This function is based on code in ixgbe_attach() in base/ixgbe.c.
863  * It returns 0 on success.
864  */
865 static int
866 eth_ixgbe_dev_init(struct rte_eth_dev *eth_dev)
867 {
868         struct rte_pci_device *pci_dev;
869         struct ixgbe_hw *hw =
870                 IXGBE_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
871         struct ixgbe_vfta * shadow_vfta =
872                 IXGBE_DEV_PRIVATE_TO_VFTA(eth_dev->data->dev_private);
873         struct ixgbe_hwstrip *hwstrip =
874                 IXGBE_DEV_PRIVATE_TO_HWSTRIP_BITMAP(eth_dev->data->dev_private);
875         struct ixgbe_dcb_config *dcb_config =
876                 IXGBE_DEV_PRIVATE_TO_DCB_CFG(eth_dev->data->dev_private);
877         struct ixgbe_filter_info *filter_info =
878                 IXGBE_DEV_PRIVATE_TO_FILTER_INFO(eth_dev->data->dev_private);
879         uint32_t ctrl_ext;
880         uint16_t csum;
881         int diag, i;
882
883         PMD_INIT_FUNC_TRACE();
884
885         eth_dev->dev_ops = &ixgbe_eth_dev_ops;
886         eth_dev->rx_pkt_burst = &ixgbe_recv_pkts;
887         eth_dev->tx_pkt_burst = &ixgbe_xmit_pkts;
888
889         /*
890          * For secondary processes, we don't initialise any further as primary
891          * has already done this work. Only check we don't need a different
892          * RX and TX function.
893          */
894         if (rte_eal_process_type() != RTE_PROC_PRIMARY){
895                 struct ixgbe_tx_queue *txq;
896                 /* TX queue function in primary, set by last queue initialized
897                  * Tx queue may not initialized by primary process */
898                 if (eth_dev->data->tx_queues) {
899                         txq = eth_dev->data->tx_queues[eth_dev->data->nb_tx_queues-1];
900                         ixgbe_set_tx_function(eth_dev, txq);
901                 } else {
902                         /* Use default TX function if we get here */
903                         PMD_INIT_LOG(NOTICE, "No TX queues configured yet. "
904                                              "Using default TX function.");
905                 }
906
907                 ixgbe_set_rx_function(eth_dev);
908
909                 return 0;
910         }
911         pci_dev = eth_dev->pci_dev;
912
913         /* Vendor and Device ID need to be set before init of shared code */
914         hw->device_id = pci_dev->id.device_id;
915         hw->vendor_id = pci_dev->id.vendor_id;
916         hw->hw_addr = (void *)pci_dev->mem_resource[0].addr;
917         hw->allow_unsupported_sfp = 1;
918
919         /* Initialize the shared code (base driver) */
920 #ifdef RTE_NIC_BYPASS
921         diag = ixgbe_bypass_init_shared_code(hw);
922 #else
923         diag = ixgbe_init_shared_code(hw);
924 #endif /* RTE_NIC_BYPASS */
925
926         if (diag != IXGBE_SUCCESS) {
927                 PMD_INIT_LOG(ERR, "Shared code init failed: %d", diag);
928                 return -EIO;
929         }
930
931         /* pick up the PCI bus settings for reporting later */
932         ixgbe_get_bus_info(hw);
933
934         /* Unlock any pending hardware semaphore */
935         ixgbe_swfw_lock_reset(hw);
936
937         /* Initialize DCB configuration*/
938         memset(dcb_config, 0, sizeof(struct ixgbe_dcb_config));
939         ixgbe_dcb_init(hw,dcb_config);
940         /* Get Hardware Flow Control setting */
941         hw->fc.requested_mode = ixgbe_fc_full;
942         hw->fc.current_mode = ixgbe_fc_full;
943         hw->fc.pause_time = IXGBE_FC_PAUSE;
944         for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
945                 hw->fc.low_water[i] = IXGBE_FC_LO;
946                 hw->fc.high_water[i] = IXGBE_FC_HI;
947         }
948         hw->fc.send_xon = 1;
949
950         /* Make sure we have a good EEPROM before we read from it */
951         diag = ixgbe_validate_eeprom_checksum(hw, &csum);
952         if (diag != IXGBE_SUCCESS) {
953                 PMD_INIT_LOG(ERR, "The EEPROM checksum is not valid: %d", diag);
954                 return -EIO;
955         }
956
957 #ifdef RTE_NIC_BYPASS
958         diag = ixgbe_bypass_init_hw(hw);
959 #else
960         diag = ixgbe_init_hw(hw);
961 #endif /* RTE_NIC_BYPASS */
962
963         /*
964          * Devices with copper phys will fail to initialise if ixgbe_init_hw()
965          * is called too soon after the kernel driver unbinding/binding occurs.
966          * The failure occurs in ixgbe_identify_phy_generic() for all devices,
967          * but for non-copper devies, ixgbe_identify_sfp_module_generic() is
968          * also called. See ixgbe_identify_phy_82599(). The reason for the
969          * failure is not known, and only occuts when virtualisation features
970          * are disabled in the bios. A delay of 100ms  was found to be enough by
971          * trial-and-error, and is doubled to be safe.
972          */
973         if (diag && (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_copper)) {
974                 rte_delay_ms(200);
975                 diag = ixgbe_init_hw(hw);
976         }
977
978         if (diag == IXGBE_ERR_EEPROM_VERSION) {
979                 PMD_INIT_LOG(ERR, "This device is a pre-production adapter/"
980                     "LOM.  Please be aware there may be issues associated "
981                     "with your hardware.");
982                 PMD_INIT_LOG(ERR, "If you are experiencing problems "
983                     "please contact your Intel or hardware representative "
984                     "who provided you with this hardware.");
985         } else if (diag == IXGBE_ERR_SFP_NOT_SUPPORTED)
986                 PMD_INIT_LOG(ERR, "Unsupported SFP+ Module");
987         if (diag) {
988                 PMD_INIT_LOG(ERR, "Hardware Initialization Failure: %d", diag);
989                 return -EIO;
990         }
991
992         /* Reset the hw statistics */
993         ixgbe_dev_stats_reset(eth_dev);
994
995         /* disable interrupt */
996         ixgbe_disable_intr(hw);
997
998         /* reset mappings for queue statistics hw counters*/
999         ixgbe_reset_qstat_mappings(hw);
1000
1001         /* Allocate memory for storing MAC addresses */
1002         eth_dev->data->mac_addrs = rte_zmalloc("ixgbe", ETHER_ADDR_LEN *
1003                         hw->mac.num_rar_entries, 0);
1004         if (eth_dev->data->mac_addrs == NULL) {
1005                 PMD_INIT_LOG(ERR,
1006                         "Failed to allocate %u bytes needed to store "
1007                         "MAC addresses",
1008                         ETHER_ADDR_LEN * hw->mac.num_rar_entries);
1009                 return -ENOMEM;
1010         }
1011         /* Copy the permanent MAC address */
1012         ether_addr_copy((struct ether_addr *) hw->mac.perm_addr,
1013                         &eth_dev->data->mac_addrs[0]);
1014
1015         /* Allocate memory for storing hash filter MAC addresses */
1016         eth_dev->data->hash_mac_addrs = rte_zmalloc("ixgbe", ETHER_ADDR_LEN *
1017                         IXGBE_VMDQ_NUM_UC_MAC, 0);
1018         if (eth_dev->data->hash_mac_addrs == NULL) {
1019                 PMD_INIT_LOG(ERR,
1020                         "Failed to allocate %d bytes needed to store MAC addresses",
1021                         ETHER_ADDR_LEN * IXGBE_VMDQ_NUM_UC_MAC);
1022                 return -ENOMEM;
1023         }
1024
1025         /* initialize the vfta */
1026         memset(shadow_vfta, 0, sizeof(*shadow_vfta));
1027
1028         /* initialize the hw strip bitmap*/
1029         memset(hwstrip, 0, sizeof(*hwstrip));
1030
1031         /* initialize PF if max_vfs not zero */
1032         ixgbe_pf_host_init(eth_dev);
1033
1034         ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
1035         /* let hardware know driver is loaded */
1036         ctrl_ext |= IXGBE_CTRL_EXT_DRV_LOAD;
1037         /* Set PF Reset Done bit so PF/VF Mail Ops can work */
1038         ctrl_ext |= IXGBE_CTRL_EXT_PFRSTD;
1039         IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
1040         IXGBE_WRITE_FLUSH(hw);
1041
1042         if (ixgbe_is_sfp(hw) && hw->phy.sfp_type != ixgbe_sfp_type_not_present)
1043                 PMD_INIT_LOG(DEBUG, "MAC: %d, PHY: %d, SFP+: %d",
1044                              (int) hw->mac.type, (int) hw->phy.type,
1045                              (int) hw->phy.sfp_type);
1046         else
1047                 PMD_INIT_LOG(DEBUG, "MAC: %d, PHY: %d",
1048                              (int) hw->mac.type, (int) hw->phy.type);
1049
1050         PMD_INIT_LOG(DEBUG, "port %d vendorID=0x%x deviceID=0x%x",
1051                         eth_dev->data->port_id, pci_dev->id.vendor_id,
1052                         pci_dev->id.device_id);
1053
1054         /* enable support intr */
1055         ixgbe_enable_intr(eth_dev);
1056
1057         /* initialize 5tuple filter list */
1058         TAILQ_INIT(&filter_info->fivetuple_list);
1059         memset(filter_info->fivetuple_mask, 0,
1060                 sizeof(uint32_t) * IXGBE_5TUPLE_ARRAY_SIZE);
1061
1062         return 0;
1063 }
1064
1065 static int
1066 eth_ixgbe_dev_uninit(struct rte_eth_dev *eth_dev)
1067 {
1068         struct rte_pci_device *pci_dev;
1069         struct ixgbe_hw *hw;
1070
1071         PMD_INIT_FUNC_TRACE();
1072
1073         if (rte_eal_process_type() != RTE_PROC_PRIMARY)
1074                 return -EPERM;
1075
1076         hw = IXGBE_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
1077         pci_dev = eth_dev->pci_dev;
1078
1079         if (hw->adapter_stopped == 0)
1080                 ixgbe_dev_close(eth_dev);
1081
1082         eth_dev->dev_ops = NULL;
1083         eth_dev->rx_pkt_burst = NULL;
1084         eth_dev->tx_pkt_burst = NULL;
1085
1086         /* Unlock any pending hardware semaphore */
1087         ixgbe_swfw_lock_reset(hw);
1088
1089         /* disable uio intr before callback unregister */
1090         rte_intr_disable(&(pci_dev->intr_handle));
1091         rte_intr_callback_unregister(&(pci_dev->intr_handle),
1092                 ixgbe_dev_interrupt_handler, (void *)eth_dev);
1093
1094         /* uninitialize PF if max_vfs not zero */
1095         ixgbe_pf_host_uninit(eth_dev);
1096
1097         rte_free(eth_dev->data->mac_addrs);
1098         eth_dev->data->mac_addrs = NULL;
1099
1100         rte_free(eth_dev->data->hash_mac_addrs);
1101         eth_dev->data->hash_mac_addrs = NULL;
1102
1103         return 0;
1104 }
1105
1106 /*
1107  * Negotiate mailbox API version with the PF.
1108  * After reset API version is always set to the basic one (ixgbe_mbox_api_10).
1109  * Then we try to negotiate starting with the most recent one.
1110  * If all negotiation attempts fail, then we will proceed with
1111  * the default one (ixgbe_mbox_api_10).
1112  */
1113 static void
1114 ixgbevf_negotiate_api(struct ixgbe_hw *hw)
1115 {
1116         int32_t i;
1117
1118         /* start with highest supported, proceed down */
1119         static const enum ixgbe_pfvf_api_rev sup_ver[] = {
1120                 ixgbe_mbox_api_11,
1121                 ixgbe_mbox_api_10,
1122         };
1123
1124         for (i = 0;
1125                         i != RTE_DIM(sup_ver) &&
1126                         ixgbevf_negotiate_api_version(hw, sup_ver[i]) != 0;
1127                         i++)
1128                 ;
1129 }
1130
1131 static void
1132 generate_random_mac_addr(struct ether_addr *mac_addr)
1133 {
1134         uint64_t random;
1135
1136         /* Set Organizationally Unique Identifier (OUI) prefix. */
1137         mac_addr->addr_bytes[0] = 0x00;
1138         mac_addr->addr_bytes[1] = 0x09;
1139         mac_addr->addr_bytes[2] = 0xC0;
1140         /* Force indication of locally assigned MAC address. */
1141         mac_addr->addr_bytes[0] |= ETHER_LOCAL_ADMIN_ADDR;
1142         /* Generate the last 3 bytes of the MAC address with a random number. */
1143         random = rte_rand();
1144         memcpy(&mac_addr->addr_bytes[3], &random, 3);
1145 }
1146
1147 /*
1148  * Virtual Function device init
1149  */
1150 static int
1151 eth_ixgbevf_dev_init(struct rte_eth_dev *eth_dev)
1152 {
1153         int diag;
1154         uint32_t tc, tcs;
1155         struct rte_pci_device *pci_dev;
1156         struct ixgbe_hw *hw =
1157                 IXGBE_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
1158         struct ixgbe_vfta * shadow_vfta =
1159                 IXGBE_DEV_PRIVATE_TO_VFTA(eth_dev->data->dev_private);
1160         struct ixgbe_hwstrip *hwstrip =
1161                 IXGBE_DEV_PRIVATE_TO_HWSTRIP_BITMAP(eth_dev->data->dev_private);
1162         struct ether_addr *perm_addr = (struct ether_addr *) hw->mac.perm_addr;
1163
1164         PMD_INIT_FUNC_TRACE();
1165
1166         eth_dev->dev_ops = &ixgbevf_eth_dev_ops;
1167         eth_dev->rx_pkt_burst = &ixgbe_recv_pkts;
1168         eth_dev->tx_pkt_burst = &ixgbe_xmit_pkts;
1169
1170         /* for secondary processes, we don't initialise any further as primary
1171          * has already done this work. Only check we don't need a different
1172          * RX function */
1173         if (rte_eal_process_type() != RTE_PROC_PRIMARY){
1174                 if (eth_dev->data->scattered_rx)
1175                         eth_dev->rx_pkt_burst = ixgbe_recv_pkts_lro_single_alloc;
1176                 return 0;
1177         }
1178
1179         pci_dev = eth_dev->pci_dev;
1180
1181         hw->device_id = pci_dev->id.device_id;
1182         hw->vendor_id = pci_dev->id.vendor_id;
1183         hw->hw_addr = (void *)pci_dev->mem_resource[0].addr;
1184
1185         /* initialize the vfta */
1186         memset(shadow_vfta, 0, sizeof(*shadow_vfta));
1187
1188         /* initialize the hw strip bitmap*/
1189         memset(hwstrip, 0, sizeof(*hwstrip));
1190
1191         /* Initialize the shared code (base driver) */
1192         diag = ixgbe_init_shared_code(hw);
1193         if (diag != IXGBE_SUCCESS) {
1194                 PMD_INIT_LOG(ERR, "Shared code init failed for ixgbevf: %d", diag);
1195                 return -EIO;
1196         }
1197
1198         /* init_mailbox_params */
1199         hw->mbx.ops.init_params(hw);
1200
1201         /* Reset the hw statistics */
1202         ixgbevf_dev_stats_reset(eth_dev);
1203
1204         /* Disable the interrupts for VF */
1205         ixgbevf_intr_disable(hw);
1206
1207         hw->mac.num_rar_entries = 128; /* The MAX of the underlying PF */
1208         diag = hw->mac.ops.reset_hw(hw);
1209
1210         /*
1211          * The VF reset operation returns the IXGBE_ERR_INVALID_MAC_ADDR when
1212          * the underlying PF driver has not assigned a MAC address to the VF.
1213          * In this case, assign a random MAC address.
1214          */
1215         if ((diag != IXGBE_SUCCESS) && (diag != IXGBE_ERR_INVALID_MAC_ADDR)) {
1216                 PMD_INIT_LOG(ERR, "VF Initialization Failure: %d", diag);
1217                 return (diag);
1218         }
1219
1220         /* negotiate mailbox API version to use with the PF. */
1221         ixgbevf_negotiate_api(hw);
1222
1223         /* Get Rx/Tx queue count via mailbox, which is ready after reset_hw */
1224         ixgbevf_get_queues(hw, &tcs, &tc);
1225
1226         /* Allocate memory for storing MAC addresses */
1227         eth_dev->data->mac_addrs = rte_zmalloc("ixgbevf", ETHER_ADDR_LEN *
1228                         hw->mac.num_rar_entries, 0);
1229         if (eth_dev->data->mac_addrs == NULL) {
1230                 PMD_INIT_LOG(ERR,
1231                         "Failed to allocate %u bytes needed to store "
1232                         "MAC addresses",
1233                         ETHER_ADDR_LEN * hw->mac.num_rar_entries);
1234                 return -ENOMEM;
1235         }
1236
1237         /* Generate a random MAC address, if none was assigned by PF. */
1238         if (is_zero_ether_addr(perm_addr)) {
1239                 generate_random_mac_addr(perm_addr);
1240                 diag = ixgbe_set_rar_vf(hw, 1, perm_addr->addr_bytes, 0, 1);
1241                 if (diag) {
1242                         rte_free(eth_dev->data->mac_addrs);
1243                         eth_dev->data->mac_addrs = NULL;
1244                         return diag;
1245                 }
1246                 PMD_INIT_LOG(INFO, "\tVF MAC address not assigned by Host PF");
1247                 PMD_INIT_LOG(INFO, "\tAssign randomly generated MAC address "
1248                              "%02x:%02x:%02x:%02x:%02x:%02x",
1249                              perm_addr->addr_bytes[0],
1250                              perm_addr->addr_bytes[1],
1251                              perm_addr->addr_bytes[2],
1252                              perm_addr->addr_bytes[3],
1253                              perm_addr->addr_bytes[4],
1254                              perm_addr->addr_bytes[5]);
1255         }
1256
1257         /* Copy the permanent MAC address */
1258         ether_addr_copy(perm_addr, &eth_dev->data->mac_addrs[0]);
1259
1260         /* reset the hardware with the new settings */
1261         diag = hw->mac.ops.start_hw(hw);
1262         switch (diag) {
1263                 case  0:
1264                         break;
1265
1266                 default:
1267                         PMD_INIT_LOG(ERR, "VF Initialization Failure: %d", diag);
1268                         return (-EIO);
1269         }
1270
1271         PMD_INIT_LOG(DEBUG, "port %d vendorID=0x%x deviceID=0x%x mac.type=%s",
1272                      eth_dev->data->port_id, pci_dev->id.vendor_id,
1273                      pci_dev->id.device_id, "ixgbe_mac_82599_vf");
1274
1275         return 0;
1276 }
1277
1278 /* Virtual Function device uninit */
1279
1280 static int
1281 eth_ixgbevf_dev_uninit(struct rte_eth_dev *eth_dev)
1282 {
1283         struct ixgbe_hw *hw;
1284         unsigned i;
1285
1286         PMD_INIT_FUNC_TRACE();
1287
1288         if (rte_eal_process_type() != RTE_PROC_PRIMARY)
1289                 return -EPERM;
1290
1291         hw = IXGBE_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
1292
1293         if (hw->adapter_stopped == 0)
1294                 ixgbevf_dev_close(eth_dev);
1295
1296         eth_dev->dev_ops = NULL;
1297         eth_dev->rx_pkt_burst = NULL;
1298         eth_dev->tx_pkt_burst = NULL;
1299
1300         /* Disable the interrupts for VF */
1301         ixgbevf_intr_disable(hw);
1302
1303         for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
1304                 ixgbe_dev_rx_queue_release(eth_dev->data->rx_queues[i]);
1305                 eth_dev->data->rx_queues[i] = NULL;
1306         }
1307         eth_dev->data->nb_rx_queues = 0;
1308
1309         for (i = 0; i < eth_dev->data->nb_tx_queues; i++) {
1310                 ixgbe_dev_tx_queue_release(eth_dev->data->tx_queues[i]);
1311                 eth_dev->data->tx_queues[i] = NULL;
1312         }
1313         eth_dev->data->nb_tx_queues = 0;
1314
1315         rte_free(eth_dev->data->mac_addrs);
1316         eth_dev->data->mac_addrs = NULL;
1317
1318         return 0;
1319 }
1320
1321 static struct eth_driver rte_ixgbe_pmd = {
1322         .pci_drv = {
1323                 .name = "rte_ixgbe_pmd",
1324                 .id_table = pci_id_ixgbe_map,
1325                 .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC |
1326                         RTE_PCI_DRV_DETACHABLE,
1327         },
1328         .eth_dev_init = eth_ixgbe_dev_init,
1329         .eth_dev_uninit = eth_ixgbe_dev_uninit,
1330         .dev_private_size = sizeof(struct ixgbe_adapter),
1331 };
1332
1333 /*
1334  * virtual function driver struct
1335  */
1336 static struct eth_driver rte_ixgbevf_pmd = {
1337         .pci_drv = {
1338                 .name = "rte_ixgbevf_pmd",
1339                 .id_table = pci_id_ixgbevf_map,
1340                 .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_DETACHABLE,
1341         },
1342         .eth_dev_init = eth_ixgbevf_dev_init,
1343         .eth_dev_uninit = eth_ixgbevf_dev_uninit,
1344         .dev_private_size = sizeof(struct ixgbe_adapter),
1345 };
1346
1347 /*
1348  * Driver initialization routine.
1349  * Invoked once at EAL init time.
1350  * Register itself as the [Poll Mode] Driver of PCI IXGBE devices.
1351  */
1352 static int
1353 rte_ixgbe_pmd_init(const char *name __rte_unused, const char *params __rte_unused)
1354 {
1355         PMD_INIT_FUNC_TRACE();
1356
1357         rte_eth_driver_register(&rte_ixgbe_pmd);
1358         return 0;
1359 }
1360
1361 /*
1362  * VF Driver initialization routine.
1363  * Invoked one at EAL init time.
1364  * Register itself as the [Virtual Poll Mode] Driver of PCI niantic devices.
1365  */
1366 static int
1367 rte_ixgbevf_pmd_init(const char *name __rte_unused, const char *param __rte_unused)
1368 {
1369         PMD_INIT_FUNC_TRACE();
1370
1371         rte_eth_driver_register(&rte_ixgbevf_pmd);
1372         return (0);
1373 }
1374
1375 static int
1376 ixgbe_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
1377 {
1378         struct ixgbe_hw *hw =
1379                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1380         struct ixgbe_vfta * shadow_vfta =
1381                 IXGBE_DEV_PRIVATE_TO_VFTA(dev->data->dev_private);
1382         uint32_t vfta;
1383         uint32_t vid_idx;
1384         uint32_t vid_bit;
1385
1386         vid_idx = (uint32_t) ((vlan_id >> 5) & 0x7F);
1387         vid_bit = (uint32_t) (1 << (vlan_id & 0x1F));
1388         vfta = IXGBE_READ_REG(hw, IXGBE_VFTA(vid_idx));
1389         if (on)
1390                 vfta |= vid_bit;
1391         else
1392                 vfta &= ~vid_bit;
1393         IXGBE_WRITE_REG(hw, IXGBE_VFTA(vid_idx), vfta);
1394
1395         /* update local VFTA copy */
1396         shadow_vfta->vfta[vid_idx] = vfta;
1397
1398         return 0;
1399 }
1400
1401 static void
1402 ixgbe_vlan_strip_queue_set(struct rte_eth_dev *dev, uint16_t queue, int on)
1403 {
1404         if (on)
1405                 ixgbe_vlan_hw_strip_enable(dev, queue);
1406         else
1407                 ixgbe_vlan_hw_strip_disable(dev, queue);
1408 }
1409
1410 static void
1411 ixgbe_vlan_tpid_set(struct rte_eth_dev *dev, uint16_t tpid)
1412 {
1413         struct ixgbe_hw *hw =
1414                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1415
1416         /* Only the high 16-bits is valid */
1417         IXGBE_WRITE_REG(hw, IXGBE_EXVET, tpid << 16);
1418 }
1419
1420 void
1421 ixgbe_vlan_hw_filter_disable(struct rte_eth_dev *dev)
1422 {
1423         struct ixgbe_hw *hw =
1424                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1425         uint32_t vlnctrl;
1426
1427         PMD_INIT_FUNC_TRACE();
1428
1429         /* Filter Table Disable */
1430         vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
1431         vlnctrl &= ~IXGBE_VLNCTRL_VFE;
1432
1433         IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
1434 }
1435
1436 void
1437 ixgbe_vlan_hw_filter_enable(struct rte_eth_dev *dev)
1438 {
1439         struct ixgbe_hw *hw =
1440                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1441         struct ixgbe_vfta * shadow_vfta =
1442                 IXGBE_DEV_PRIVATE_TO_VFTA(dev->data->dev_private);
1443         uint32_t vlnctrl;
1444         uint16_t i;
1445
1446         PMD_INIT_FUNC_TRACE();
1447
1448         /* Filter Table Enable */
1449         vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
1450         vlnctrl &= ~IXGBE_VLNCTRL_CFIEN;
1451         vlnctrl |= IXGBE_VLNCTRL_VFE;
1452
1453         IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
1454
1455         /* write whatever is in local vfta copy */
1456         for (i = 0; i < IXGBE_VFTA_SIZE; i++)
1457                 IXGBE_WRITE_REG(hw, IXGBE_VFTA(i), shadow_vfta->vfta[i]);
1458 }
1459
1460 static void
1461 ixgbe_vlan_hw_strip_bitmap_set(struct rte_eth_dev *dev, uint16_t queue, bool on)
1462 {
1463         struct ixgbe_hwstrip *hwstrip =
1464                 IXGBE_DEV_PRIVATE_TO_HWSTRIP_BITMAP(dev->data->dev_private);
1465
1466         if(queue >= IXGBE_MAX_RX_QUEUE_NUM)
1467                 return;
1468
1469         if (on)
1470                 IXGBE_SET_HWSTRIP(hwstrip, queue);
1471         else
1472                 IXGBE_CLEAR_HWSTRIP(hwstrip, queue);
1473 }
1474
1475 static void
1476 ixgbe_vlan_hw_strip_disable(struct rte_eth_dev *dev, uint16_t queue)
1477 {
1478         struct ixgbe_hw *hw =
1479                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1480         uint32_t ctrl;
1481
1482         PMD_INIT_FUNC_TRACE();
1483
1484         if (hw->mac.type == ixgbe_mac_82598EB) {
1485                 /* No queue level support */
1486                 PMD_INIT_LOG(NOTICE, "82598EB not support queue level hw strip");
1487                 return;
1488         }
1489         else {
1490                 /* Other 10G NIC, the VLAN strip can be setup per queue in RXDCTL */
1491                 ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(queue));
1492                 ctrl &= ~IXGBE_RXDCTL_VME;
1493                 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(queue), ctrl);
1494         }
1495         /* record those setting for HW strip per queue */
1496         ixgbe_vlan_hw_strip_bitmap_set(dev, queue, 0);
1497 }
1498
1499 static void
1500 ixgbe_vlan_hw_strip_enable(struct rte_eth_dev *dev, uint16_t queue)
1501 {
1502         struct ixgbe_hw *hw =
1503                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1504         uint32_t ctrl;
1505
1506         PMD_INIT_FUNC_TRACE();
1507
1508         if (hw->mac.type == ixgbe_mac_82598EB) {
1509                 /* No queue level supported */
1510                 PMD_INIT_LOG(NOTICE, "82598EB not support queue level hw strip");
1511                 return;
1512         }
1513         else {
1514                 /* Other 10G NIC, the VLAN strip can be setup per queue in RXDCTL */
1515                 ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(queue));
1516                 ctrl |= IXGBE_RXDCTL_VME;
1517                 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(queue), ctrl);
1518         }
1519         /* record those setting for HW strip per queue */
1520         ixgbe_vlan_hw_strip_bitmap_set(dev, queue, 1);
1521 }
1522
1523 void
1524 ixgbe_vlan_hw_strip_disable_all(struct rte_eth_dev *dev)
1525 {
1526         struct ixgbe_hw *hw =
1527                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1528         uint32_t ctrl;
1529         uint16_t i;
1530
1531         PMD_INIT_FUNC_TRACE();
1532
1533         if (hw->mac.type == ixgbe_mac_82598EB) {
1534                 ctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
1535                 ctrl &= ~IXGBE_VLNCTRL_VME;
1536                 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, ctrl);
1537         }
1538         else {
1539                 /* Other 10G NIC, the VLAN strip can be setup per queue in RXDCTL */
1540                 for (i = 0; i < dev->data->nb_rx_queues; i++) {
1541                         ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(i));
1542                         ctrl &= ~IXGBE_RXDCTL_VME;
1543                         IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(i), ctrl);
1544
1545                         /* record those setting for HW strip per queue */
1546                         ixgbe_vlan_hw_strip_bitmap_set(dev, i, 0);
1547                 }
1548         }
1549 }
1550
1551 void
1552 ixgbe_vlan_hw_strip_enable_all(struct rte_eth_dev *dev)
1553 {
1554         struct ixgbe_hw *hw =
1555                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1556         uint32_t ctrl;
1557         uint16_t i;
1558
1559         PMD_INIT_FUNC_TRACE();
1560
1561         if (hw->mac.type == ixgbe_mac_82598EB) {
1562                 ctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
1563                 ctrl |= IXGBE_VLNCTRL_VME;
1564                 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, ctrl);
1565         }
1566         else {
1567                 /* Other 10G NIC, the VLAN strip can be setup per queue in RXDCTL */
1568                 for (i = 0; i < dev->data->nb_rx_queues; i++) {
1569                         ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(i));
1570                         ctrl |= IXGBE_RXDCTL_VME;
1571                         IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(i), ctrl);
1572
1573                         /* record those setting for HW strip per queue */
1574                         ixgbe_vlan_hw_strip_bitmap_set(dev, i, 1);
1575                 }
1576         }
1577 }
1578
1579 static void
1580 ixgbe_vlan_hw_extend_disable(struct rte_eth_dev *dev)
1581 {
1582         struct ixgbe_hw *hw =
1583                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1584         uint32_t ctrl;
1585
1586         PMD_INIT_FUNC_TRACE();
1587
1588         /* DMATXCTRL: Geric Double VLAN Disable */
1589         ctrl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
1590         ctrl &= ~IXGBE_DMATXCTL_GDV;
1591         IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, ctrl);
1592
1593         /* CTRL_EXT: Global Double VLAN Disable */
1594         ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
1595         ctrl &= ~IXGBE_EXTENDED_VLAN;
1596         IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl);
1597
1598 }
1599
1600 static void
1601 ixgbe_vlan_hw_extend_enable(struct rte_eth_dev *dev)
1602 {
1603         struct ixgbe_hw *hw =
1604                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1605         uint32_t ctrl;
1606
1607         PMD_INIT_FUNC_TRACE();
1608
1609         /* DMATXCTRL: Geric Double VLAN Enable */
1610         ctrl  = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
1611         ctrl |= IXGBE_DMATXCTL_GDV;
1612         IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, ctrl);
1613
1614         /* CTRL_EXT: Global Double VLAN Enable */
1615         ctrl  = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
1616         ctrl |= IXGBE_EXTENDED_VLAN;
1617         IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl);
1618
1619         /*
1620          * VET EXT field in the EXVET register = 0x8100 by default
1621          * So no need to change. Same to VT field of DMATXCTL register
1622          */
1623 }
1624
1625 static void
1626 ixgbe_vlan_offload_set(struct rte_eth_dev *dev, int mask)
1627 {
1628         if(mask & ETH_VLAN_STRIP_MASK){
1629                 if (dev->data->dev_conf.rxmode.hw_vlan_strip)
1630                         ixgbe_vlan_hw_strip_enable_all(dev);
1631                 else
1632                         ixgbe_vlan_hw_strip_disable_all(dev);
1633         }
1634
1635         if(mask & ETH_VLAN_FILTER_MASK){
1636                 if (dev->data->dev_conf.rxmode.hw_vlan_filter)
1637                         ixgbe_vlan_hw_filter_enable(dev);
1638                 else
1639                         ixgbe_vlan_hw_filter_disable(dev);
1640         }
1641
1642         if(mask & ETH_VLAN_EXTEND_MASK){
1643                 if (dev->data->dev_conf.rxmode.hw_vlan_extend)
1644                         ixgbe_vlan_hw_extend_enable(dev);
1645                 else
1646                         ixgbe_vlan_hw_extend_disable(dev);
1647         }
1648 }
1649
1650 static void
1651 ixgbe_vmdq_vlan_hw_filter_enable(struct rte_eth_dev *dev)
1652 {
1653         struct ixgbe_hw *hw =
1654                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1655         /* VLNCTRL: enable vlan filtering and allow all vlan tags through */
1656         uint32_t vlanctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
1657         vlanctrl |= IXGBE_VLNCTRL_VFE ; /* enable vlan filters */
1658         IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlanctrl);
1659 }
1660
1661 static int
1662 ixgbe_check_vf_rss_rxq_num(struct rte_eth_dev *dev, uint16_t nb_rx_q)
1663 {
1664         switch (nb_rx_q) {
1665         case 1:
1666         case 2:
1667                 RTE_ETH_DEV_SRIOV(dev).active = ETH_64_POOLS;
1668                 break;
1669         case 4:
1670                 RTE_ETH_DEV_SRIOV(dev).active = ETH_32_POOLS;
1671                 break;
1672         default:
1673                 return -EINVAL;
1674         }
1675
1676         RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool = nb_rx_q;
1677         RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx = dev->pci_dev->max_vfs * nb_rx_q;
1678
1679         return 0;
1680 }
1681
1682 static int
1683 ixgbe_check_mq_mode(struct rte_eth_dev *dev)
1684 {
1685         struct rte_eth_conf *dev_conf = &dev->data->dev_conf;
1686         uint16_t nb_rx_q = dev->data->nb_rx_queues;
1687         uint16_t nb_tx_q = dev->data->nb_rx_queues;
1688
1689         if (RTE_ETH_DEV_SRIOV(dev).active != 0) {
1690                 /* check multi-queue mode */
1691                 switch (dev_conf->rxmode.mq_mode) {
1692                 case ETH_MQ_RX_VMDQ_DCB:
1693                 case ETH_MQ_RX_VMDQ_DCB_RSS:
1694                         /* DCB/RSS VMDQ in SRIOV mode, not implement yet */
1695                         PMD_INIT_LOG(ERR, "SRIOV active,"
1696                                         " unsupported mq_mode rx %d.",
1697                                         dev_conf->rxmode.mq_mode);
1698                         return -EINVAL;
1699                 case ETH_MQ_RX_RSS:
1700                 case ETH_MQ_RX_VMDQ_RSS:
1701                         dev->data->dev_conf.rxmode.mq_mode = ETH_MQ_RX_VMDQ_RSS;
1702                         if (nb_rx_q <= RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool)
1703                                 if (ixgbe_check_vf_rss_rxq_num(dev, nb_rx_q)) {
1704                                         PMD_INIT_LOG(ERR, "SRIOV is active,"
1705                                                 " invalid queue number"
1706                                                 " for VMDQ RSS, allowed"
1707                                                 " value are 1, 2 or 4.");
1708                                         return -EINVAL;
1709                                 }
1710                         break;
1711                 case ETH_MQ_RX_VMDQ_ONLY:
1712                 case ETH_MQ_RX_NONE:
1713                         /* if nothing mq mode configure, use default scheme */
1714                         dev->data->dev_conf.rxmode.mq_mode = ETH_MQ_RX_VMDQ_ONLY;
1715                         if (RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool > 1)
1716                                 RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool = 1;
1717                         break;
1718                 default: /* ETH_MQ_RX_DCB, ETH_MQ_RX_DCB_RSS or ETH_MQ_TX_DCB*/
1719                         /* SRIOV only works in VMDq enable mode */
1720                         PMD_INIT_LOG(ERR, "SRIOV is active,"
1721                                         " wrong mq_mode rx %d.",
1722                                         dev_conf->rxmode.mq_mode);
1723                         return -EINVAL;
1724                 }
1725
1726                 switch (dev_conf->txmode.mq_mode) {
1727                 case ETH_MQ_TX_VMDQ_DCB:
1728                         /* DCB VMDQ in SRIOV mode, not implement yet */
1729                         PMD_INIT_LOG(ERR, "SRIOV is active,"
1730                                         " unsupported VMDQ mq_mode tx %d.",
1731                                         dev_conf->txmode.mq_mode);
1732                         return -EINVAL;
1733                 default: /* ETH_MQ_TX_VMDQ_ONLY or ETH_MQ_TX_NONE */
1734                         dev->data->dev_conf.txmode.mq_mode = ETH_MQ_TX_VMDQ_ONLY;
1735                         break;
1736                 }
1737
1738                 /* check valid queue number */
1739                 if ((nb_rx_q > RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool) ||
1740                     (nb_tx_q > RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool)) {
1741                         PMD_INIT_LOG(ERR, "SRIOV is active,"
1742                                         " queue number must less equal to %d.",
1743                                         RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool);
1744                         return -EINVAL;
1745                 }
1746         } else {
1747                 /* check configuration for vmdb+dcb mode */
1748                 if (dev_conf->rxmode.mq_mode == ETH_MQ_RX_VMDQ_DCB) {
1749                         const struct rte_eth_vmdq_dcb_conf *conf;
1750
1751                         if (nb_rx_q != IXGBE_VMDQ_DCB_NB_QUEUES) {
1752                                 PMD_INIT_LOG(ERR, "VMDQ+DCB, nb_rx_q != %d.",
1753                                                 IXGBE_VMDQ_DCB_NB_QUEUES);
1754                                 return -EINVAL;
1755                         }
1756                         conf = &dev_conf->rx_adv_conf.vmdq_dcb_conf;
1757                         if (!(conf->nb_queue_pools == ETH_16_POOLS ||
1758                                conf->nb_queue_pools == ETH_32_POOLS)) {
1759                                 PMD_INIT_LOG(ERR, "VMDQ+DCB selected,"
1760                                                 " nb_queue_pools must be %d or %d.",
1761                                                 ETH_16_POOLS, ETH_32_POOLS);
1762                                 return -EINVAL;
1763                         }
1764                 }
1765                 if (dev_conf->txmode.mq_mode == ETH_MQ_TX_VMDQ_DCB) {
1766                         const struct rte_eth_vmdq_dcb_tx_conf *conf;
1767
1768                         if (nb_tx_q != IXGBE_VMDQ_DCB_NB_QUEUES) {
1769                                 PMD_INIT_LOG(ERR, "VMDQ+DCB, nb_tx_q != %d",
1770                                                  IXGBE_VMDQ_DCB_NB_QUEUES);
1771                                 return -EINVAL;
1772                         }
1773                         conf = &dev_conf->tx_adv_conf.vmdq_dcb_tx_conf;
1774                         if (!(conf->nb_queue_pools == ETH_16_POOLS ||
1775                                conf->nb_queue_pools == ETH_32_POOLS)) {
1776                                 PMD_INIT_LOG(ERR, "VMDQ+DCB selected,"
1777                                                 " nb_queue_pools != %d and"
1778                                                 " nb_queue_pools != %d.",
1779                                                 ETH_16_POOLS, ETH_32_POOLS);
1780                                 return -EINVAL;
1781                         }
1782                 }
1783
1784                 /* For DCB mode check our configuration before we go further */
1785                 if (dev_conf->rxmode.mq_mode == ETH_MQ_RX_DCB) {
1786                         const struct rte_eth_dcb_rx_conf *conf;
1787
1788                         if (nb_rx_q != IXGBE_DCB_NB_QUEUES) {
1789                                 PMD_INIT_LOG(ERR, "DCB selected, nb_rx_q != %d.",
1790                                                  IXGBE_DCB_NB_QUEUES);
1791                                 return -EINVAL;
1792                         }
1793                         conf = &dev_conf->rx_adv_conf.dcb_rx_conf;
1794                         if (!(conf->nb_tcs == ETH_4_TCS ||
1795                                conf->nb_tcs == ETH_8_TCS)) {
1796                                 PMD_INIT_LOG(ERR, "DCB selected, nb_tcs != %d"
1797                                                 " and nb_tcs != %d.",
1798                                                 ETH_4_TCS, ETH_8_TCS);
1799                                 return -EINVAL;
1800                         }
1801                 }
1802
1803                 if (dev_conf->txmode.mq_mode == ETH_MQ_TX_DCB) {
1804                         const struct rte_eth_dcb_tx_conf *conf;
1805
1806                         if (nb_tx_q != IXGBE_DCB_NB_QUEUES) {
1807                                 PMD_INIT_LOG(ERR, "DCB, nb_tx_q != %d.",
1808                                                  IXGBE_DCB_NB_QUEUES);
1809                                 return -EINVAL;
1810                         }
1811                         conf = &dev_conf->tx_adv_conf.dcb_tx_conf;
1812                         if (!(conf->nb_tcs == ETH_4_TCS ||
1813                                conf->nb_tcs == ETH_8_TCS)) {
1814                                 PMD_INIT_LOG(ERR, "DCB selected, nb_tcs != %d"
1815                                                 " and nb_tcs != %d.",
1816                                                 ETH_4_TCS, ETH_8_TCS);
1817                                 return -EINVAL;
1818                         }
1819                 }
1820         }
1821         return 0;
1822 }
1823
1824 static int
1825 ixgbe_dev_configure(struct rte_eth_dev *dev)
1826 {
1827         struct ixgbe_interrupt *intr =
1828                 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
1829         struct ixgbe_adapter *adapter =
1830                 (struct ixgbe_adapter *)dev->data->dev_private;
1831         int ret;
1832
1833         PMD_INIT_FUNC_TRACE();
1834         /* multipe queue mode checking */
1835         ret  = ixgbe_check_mq_mode(dev);
1836         if (ret != 0) {
1837                 PMD_DRV_LOG(ERR, "ixgbe_check_mq_mode fails with %d.",
1838                             ret);
1839                 return ret;
1840         }
1841
1842         /* set flag to update link status after init */
1843         intr->flags |= IXGBE_FLAG_NEED_LINK_UPDATE;
1844
1845         /*
1846          * Initialize to TRUE. If any of Rx queues doesn't meet the bulk
1847          * allocation or vector Rx preconditions we will reset it.
1848          */
1849         adapter->rx_bulk_alloc_allowed = true;
1850         adapter->rx_vec_allowed = true;
1851
1852         return 0;
1853 }
1854
1855 /*
1856  * Configure device link speed and setup link.
1857  * It returns 0 on success.
1858  */
1859 static int
1860 ixgbe_dev_start(struct rte_eth_dev *dev)
1861 {
1862         struct ixgbe_hw *hw =
1863                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1864         struct ixgbe_vf_info *vfinfo =
1865                 *IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private);
1866         struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle;
1867         uint32_t intr_vector = 0;
1868         int err, link_up = 0, negotiate = 0;
1869         uint32_t speed = 0;
1870         int mask = 0;
1871         int status;
1872         uint16_t vf, idx;
1873
1874         PMD_INIT_FUNC_TRACE();
1875
1876         /* IXGBE devices don't support half duplex */
1877         if ((dev->data->dev_conf.link_duplex != ETH_LINK_AUTONEG_DUPLEX) &&
1878                         (dev->data->dev_conf.link_duplex != ETH_LINK_FULL_DUPLEX)) {
1879                 PMD_INIT_LOG(ERR, "Invalid link_duplex (%hu) for port %hhu",
1880                              dev->data->dev_conf.link_duplex,
1881                              dev->data->port_id);
1882                 return -EINVAL;
1883         }
1884
1885         /* stop adapter */
1886         hw->adapter_stopped = 0;
1887         ixgbe_stop_adapter(hw);
1888
1889         /* reinitialize adapter
1890          * this calls reset and start */
1891         status = ixgbe_pf_reset_hw(hw);
1892         if (status != 0)
1893                 return -1;
1894         hw->mac.ops.start_hw(hw);
1895         hw->mac.get_link_status = true;
1896
1897         /* configure PF module if SRIOV enabled */
1898         ixgbe_pf_host_configure(dev);
1899
1900         /* check and configure queue intr-vector mapping */
1901         if (dev->data->dev_conf.intr_conf.rxq != 0)
1902                 intr_vector = dev->data->nb_rx_queues;
1903
1904         if (rte_intr_efd_enable(intr_handle, intr_vector))
1905                 return -1;
1906
1907         if (rte_intr_dp_is_en(intr_handle) && !intr_handle->intr_vec) {
1908                 intr_handle->intr_vec =
1909                         rte_zmalloc("intr_vec",
1910                                     dev->data->nb_rx_queues * sizeof(int),
1911                                     0);
1912                 if (intr_handle->intr_vec == NULL) {
1913                         PMD_INIT_LOG(ERR, "Failed to allocate %d rx_queues"
1914                                      " intr_vec\n", dev->data->nb_rx_queues);
1915                         return -ENOMEM;
1916                 }
1917         }
1918
1919         /* confiugre msix for sleep until rx interrupt */
1920         ixgbe_configure_msix(dev);
1921
1922         /* initialize transmission unit */
1923         ixgbe_dev_tx_init(dev);
1924
1925         /* This can fail when allocating mbufs for descriptor rings */
1926         err = ixgbe_dev_rx_init(dev);
1927         if (err) {
1928                 PMD_INIT_LOG(ERR, "Unable to initialize RX hardware");
1929                 goto error;
1930         }
1931
1932         err = ixgbe_dev_rxtx_start(dev);
1933         if (err < 0) {
1934                 PMD_INIT_LOG(ERR, "Unable to start rxtx queues");
1935                 goto error;
1936         }
1937
1938         /* Skip link setup if loopback mode is enabled for 82599. */
1939         if (hw->mac.type == ixgbe_mac_82599EB &&
1940                         dev->data->dev_conf.lpbk_mode == IXGBE_LPBK_82599_TX_RX)
1941                 goto skip_link_setup;
1942
1943         if (ixgbe_is_sfp(hw) && hw->phy.multispeed_fiber) {
1944                 err = hw->mac.ops.setup_sfp(hw);
1945                 if (err)
1946                         goto error;
1947         }
1948
1949         if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_copper) {
1950                 /* Turn on the copper */
1951                 ixgbe_set_phy_power(hw, true);
1952         } else {
1953                 /* Turn on the laser */
1954                 ixgbe_enable_tx_laser(hw);
1955         }
1956
1957         err = ixgbe_check_link(hw, &speed, &link_up, 0);
1958         if (err)
1959                 goto error;
1960         dev->data->dev_link.link_status = link_up;
1961
1962         err = ixgbe_get_link_capabilities(hw, &speed, &negotiate);
1963         if (err)
1964                 goto error;
1965
1966         switch(dev->data->dev_conf.link_speed) {
1967         case ETH_LINK_SPEED_AUTONEG:
1968                 speed = (hw->mac.type != ixgbe_mac_82598EB) ?
1969                                 IXGBE_LINK_SPEED_82599_AUTONEG :
1970                                 IXGBE_LINK_SPEED_82598_AUTONEG;
1971                 break;
1972         case ETH_LINK_SPEED_100:
1973                 /*
1974                  * Invalid for 82598 but error will be detected by
1975                  * ixgbe_setup_link()
1976                  */
1977                 speed = IXGBE_LINK_SPEED_100_FULL;
1978                 break;
1979         case ETH_LINK_SPEED_1000:
1980                 speed = IXGBE_LINK_SPEED_1GB_FULL;
1981                 break;
1982         case ETH_LINK_SPEED_10000:
1983                 speed = IXGBE_LINK_SPEED_10GB_FULL;
1984                 break;
1985         default:
1986                 PMD_INIT_LOG(ERR, "Invalid link_speed (%hu) for port %hhu",
1987                              dev->data->dev_conf.link_speed,
1988                              dev->data->port_id);
1989                 goto error;
1990         }
1991
1992         err = ixgbe_setup_link(hw, speed, link_up);
1993         if (err)
1994                 goto error;
1995
1996 skip_link_setup:
1997
1998         /* check if lsc interrupt is enabled */
1999         if (dev->data->dev_conf.intr_conf.lsc != 0) {
2000                 if (rte_intr_allow_others(intr_handle)) {
2001                         rte_intr_callback_register(intr_handle,
2002                                                    ixgbe_dev_interrupt_handler,
2003                                                    (void *)dev);
2004                         ixgbe_dev_lsc_interrupt_setup(dev);
2005                 } else
2006                         PMD_INIT_LOG(INFO, "lsc won't enable because of"
2007                                      " no intr multiplex\n");
2008         }
2009
2010         /* check if rxq interrupt is enabled */
2011         if (dev->data->dev_conf.intr_conf.rxq != 0)
2012                 ixgbe_dev_rxq_interrupt_setup(dev);
2013
2014         /* enable uio/vfio intr/eventfd mapping */
2015         rte_intr_enable(intr_handle);
2016
2017         /* resume enabled intr since hw reset */
2018         ixgbe_enable_intr(dev);
2019
2020         mask = ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK | \
2021                 ETH_VLAN_EXTEND_MASK;
2022         ixgbe_vlan_offload_set(dev, mask);
2023
2024         if (dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_VMDQ_ONLY) {
2025                 /* Enable vlan filtering for VMDq */
2026                 ixgbe_vmdq_vlan_hw_filter_enable(dev);
2027         }
2028
2029         /* Configure DCB hw */
2030         ixgbe_configure_dcb(dev);
2031
2032         if (dev->data->dev_conf.fdir_conf.mode != RTE_FDIR_MODE_NONE) {
2033                 err = ixgbe_fdir_configure(dev);
2034                 if (err)
2035                         goto error;
2036         }
2037
2038         /* Restore vf rate limit */
2039         if (vfinfo != NULL) {
2040                 for (vf = 0; vf < dev->pci_dev->max_vfs; vf++)
2041                         for (idx = 0; idx < IXGBE_MAX_QUEUE_NUM_PER_VF; idx++)
2042                                 if (vfinfo[vf].tx_rate[idx] != 0)
2043                                         ixgbe_set_vf_rate_limit(dev, vf,
2044                                                 vfinfo[vf].tx_rate[idx],
2045                                                 1 << idx);
2046         }
2047
2048         ixgbe_restore_statistics_mapping(dev);
2049
2050         return (0);
2051
2052 error:
2053         PMD_INIT_LOG(ERR, "failure in ixgbe_dev_start(): %d", err);
2054         ixgbe_dev_clear_queues(dev);
2055         return -EIO;
2056 }
2057
2058 /*
2059  * Stop device: disable rx and tx functions to allow for reconfiguring.
2060  */
2061 static void
2062 ixgbe_dev_stop(struct rte_eth_dev *dev)
2063 {
2064         struct rte_eth_link link;
2065         struct ixgbe_hw *hw =
2066                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2067         struct ixgbe_vf_info *vfinfo =
2068                 *IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private);
2069         struct ixgbe_filter_info *filter_info =
2070                 IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
2071         struct ixgbe_5tuple_filter *p_5tuple, *p_5tuple_next;
2072         struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle;
2073         int vf;
2074
2075         PMD_INIT_FUNC_TRACE();
2076
2077         /* disable interrupts */
2078         ixgbe_disable_intr(hw);
2079
2080         /* disable intr eventfd mapping */
2081         rte_intr_disable(intr_handle);
2082
2083         /* reset the NIC */
2084         ixgbe_pf_reset_hw(hw);
2085         hw->adapter_stopped = 0;
2086
2087         /* stop adapter */
2088         ixgbe_stop_adapter(hw);
2089
2090         for (vf = 0; vfinfo != NULL &&
2091                      vf < dev->pci_dev->max_vfs; vf++)
2092                 vfinfo[vf].clear_to_send = false;
2093
2094         if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_copper) {
2095                 /* Turn off the copper */
2096                 ixgbe_set_phy_power(hw, false);
2097         } else {
2098                 /* Turn off the laser */
2099                 ixgbe_disable_tx_laser(hw);
2100         }
2101
2102         ixgbe_dev_clear_queues(dev);
2103
2104         /* Clear stored conf */
2105         dev->data->scattered_rx = 0;
2106         dev->data->lro = 0;
2107
2108         /* Clear recorded link status */
2109         memset(&link, 0, sizeof(link));
2110         rte_ixgbe_dev_atomic_write_link_status(dev, &link);
2111
2112         /* Remove all ntuple filters of the device */
2113         for (p_5tuple = TAILQ_FIRST(&filter_info->fivetuple_list);
2114              p_5tuple != NULL; p_5tuple = p_5tuple_next) {
2115                 p_5tuple_next = TAILQ_NEXT(p_5tuple, entries);
2116                 TAILQ_REMOVE(&filter_info->fivetuple_list,
2117                              p_5tuple, entries);
2118                 rte_free(p_5tuple);
2119         }
2120         memset(filter_info->fivetuple_mask, 0,
2121                 sizeof(uint32_t) * IXGBE_5TUPLE_ARRAY_SIZE);
2122
2123         /* Clean datapath event and queue/vec mapping */
2124         rte_intr_efd_disable(intr_handle);
2125         if (intr_handle->intr_vec != NULL) {
2126                 rte_free(intr_handle->intr_vec);
2127                 intr_handle->intr_vec = NULL;
2128         }
2129 }
2130
2131 /*
2132  * Set device link up: enable tx.
2133  */
2134 static int
2135 ixgbe_dev_set_link_up(struct rte_eth_dev *dev)
2136 {
2137         struct ixgbe_hw *hw =
2138                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2139         if (hw->mac.type == ixgbe_mac_82599EB) {
2140 #ifdef RTE_NIC_BYPASS
2141                 if (hw->device_id == IXGBE_DEV_ID_82599_BYPASS) {
2142                         /* Not suported in bypass mode */
2143                         PMD_INIT_LOG(ERR, "Set link up is not supported "
2144                                      "by device id 0x%x", hw->device_id);
2145                         return -ENOTSUP;
2146                 }
2147 #endif
2148         }
2149
2150         if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_copper) {
2151                 /* Turn on the copper */
2152                 ixgbe_set_phy_power(hw, true);
2153         } else {
2154                 /* Turn on the laser */
2155                 ixgbe_enable_tx_laser(hw);
2156         }
2157
2158         return 0;
2159 }
2160
2161 /*
2162  * Set device link down: disable tx.
2163  */
2164 static int
2165 ixgbe_dev_set_link_down(struct rte_eth_dev *dev)
2166 {
2167         struct ixgbe_hw *hw =
2168                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2169         if (hw->mac.type == ixgbe_mac_82599EB) {
2170 #ifdef RTE_NIC_BYPASS
2171                 if (hw->device_id == IXGBE_DEV_ID_82599_BYPASS) {
2172                         /* Not suported in bypass mode */
2173                         PMD_INIT_LOG(ERR, "Set link down is not supported "
2174                                      "by device id 0x%x", hw->device_id);
2175                         return -ENOTSUP;
2176                 }
2177 #endif
2178         }
2179
2180         if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_copper) {
2181                 /* Turn off the copper */
2182                 ixgbe_set_phy_power(hw, false);
2183         } else {
2184                 /* Turn off the laser */
2185                 ixgbe_disable_tx_laser(hw);
2186         }
2187
2188         return 0;
2189 }
2190
2191 /*
2192  * Reest and stop device.
2193  */
2194 static void
2195 ixgbe_dev_close(struct rte_eth_dev *dev)
2196 {
2197         struct ixgbe_hw *hw =
2198                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2199
2200         PMD_INIT_FUNC_TRACE();
2201
2202         ixgbe_pf_reset_hw(hw);
2203
2204         ixgbe_dev_stop(dev);
2205         hw->adapter_stopped = 1;
2206
2207         ixgbe_dev_free_queues(dev);
2208
2209         ixgbe_disable_pcie_master(hw);
2210
2211         /* reprogram the RAR[0] in case user changed it. */
2212         ixgbe_set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
2213 }
2214
2215 static void
2216 ixgbe_read_stats_registers(struct ixgbe_hw *hw, struct ixgbe_hw_stats
2217                                                    *hw_stats, uint64_t *total_missed_rx,
2218                                                    uint64_t *total_qbrc, uint64_t *total_qprc,
2219                                                    uint64_t *total_qprdc)
2220 {
2221         uint32_t bprc, lxon, lxoff, total;
2222         unsigned i;
2223
2224         hw_stats->crcerrs += IXGBE_READ_REG(hw, IXGBE_CRCERRS);
2225         hw_stats->illerrc += IXGBE_READ_REG(hw, IXGBE_ILLERRC);
2226         hw_stats->errbc += IXGBE_READ_REG(hw, IXGBE_ERRBC);
2227         hw_stats->mspdc += IXGBE_READ_REG(hw, IXGBE_MSPDC);
2228
2229         for (i = 0; i < 8; i++) {
2230                 uint32_t mp;
2231                 mp = IXGBE_READ_REG(hw, IXGBE_MPC(i));
2232                 /* global total per queue */
2233                 hw_stats->mpc[i] += mp;
2234                 /* Running comprehensive total for stats display */
2235                 *total_missed_rx += hw_stats->mpc[i];
2236                 if (hw->mac.type == ixgbe_mac_82598EB) {
2237                         hw_stats->rnbc[i] +=
2238                             IXGBE_READ_REG(hw, IXGBE_RNBC(i));
2239                         hw_stats->pxonrxc[i] +=
2240                                 IXGBE_READ_REG(hw, IXGBE_PXONRXC(i));
2241                         hw_stats->pxoffrxc[i] +=
2242                                 IXGBE_READ_REG(hw, IXGBE_PXOFFRXC(i));
2243                 } else {
2244                         hw_stats->pxonrxc[i] +=
2245                                 IXGBE_READ_REG(hw, IXGBE_PXONRXCNT(i));
2246                         hw_stats->pxoffrxc[i] +=
2247                                 IXGBE_READ_REG(hw, IXGBE_PXOFFRXCNT(i));
2248                         hw_stats->pxon2offc[i] +=
2249                                 IXGBE_READ_REG(hw, IXGBE_PXON2OFFCNT(i));
2250                 }
2251                 hw_stats->pxontxc[i] +=
2252                     IXGBE_READ_REG(hw, IXGBE_PXONTXC(i));
2253                 hw_stats->pxofftxc[i] +=
2254                     IXGBE_READ_REG(hw, IXGBE_PXOFFTXC(i));
2255         }
2256         for (i = 0; i < IXGBE_QUEUE_STAT_COUNTERS; i++) {
2257                 hw_stats->qprc[i] += IXGBE_READ_REG(hw, IXGBE_QPRC(i));
2258                 hw_stats->qptc[i] += IXGBE_READ_REG(hw, IXGBE_QPTC(i));
2259                 hw_stats->qbrc[i] += IXGBE_READ_REG(hw, IXGBE_QBRC_L(i));
2260                 hw_stats->qbrc[i] +=
2261                     ((uint64_t)IXGBE_READ_REG(hw, IXGBE_QBRC_H(i)) << 32);
2262                 hw_stats->qbtc[i] += IXGBE_READ_REG(hw, IXGBE_QBTC_L(i));
2263                 hw_stats->qbtc[i] +=
2264                     ((uint64_t)IXGBE_READ_REG(hw, IXGBE_QBTC_H(i)) << 32);
2265                 *total_qprdc += hw_stats->qprdc[i] +=
2266                                 IXGBE_READ_REG(hw, IXGBE_QPRDC(i));
2267
2268                 *total_qprc += hw_stats->qprc[i];
2269                 *total_qbrc += hw_stats->qbrc[i];
2270         }
2271         hw_stats->mlfc += IXGBE_READ_REG(hw, IXGBE_MLFC);
2272         hw_stats->mrfc += IXGBE_READ_REG(hw, IXGBE_MRFC);
2273         hw_stats->rlec += IXGBE_READ_REG(hw, IXGBE_RLEC);
2274
2275         /* Note that gprc counts missed packets */
2276         hw_stats->gprc += IXGBE_READ_REG(hw, IXGBE_GPRC);
2277
2278         if (hw->mac.type != ixgbe_mac_82598EB) {
2279                 hw_stats->gorc += IXGBE_READ_REG(hw, IXGBE_GORCL);
2280                 hw_stats->gorc += ((u64)IXGBE_READ_REG(hw, IXGBE_GORCH) << 32);
2281                 hw_stats->gotc += IXGBE_READ_REG(hw, IXGBE_GOTCL);
2282                 hw_stats->gotc += ((u64)IXGBE_READ_REG(hw, IXGBE_GOTCH) << 32);
2283                 hw_stats->tor += IXGBE_READ_REG(hw, IXGBE_TORL);
2284                 hw_stats->tor += ((u64)IXGBE_READ_REG(hw, IXGBE_TORH) << 32);
2285                 hw_stats->lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXCNT);
2286                 hw_stats->lxoffrxc += IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT);
2287         } else {
2288                 hw_stats->lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXC);
2289                 hw_stats->lxoffrxc += IXGBE_READ_REG(hw, IXGBE_LXOFFRXC);
2290                 /* 82598 only has a counter in the high register */
2291                 hw_stats->gorc += IXGBE_READ_REG(hw, IXGBE_GORCH);
2292                 hw_stats->gotc += IXGBE_READ_REG(hw, IXGBE_GOTCH);
2293                 hw_stats->tor += IXGBE_READ_REG(hw, IXGBE_TORH);
2294         }
2295
2296         /*
2297          * Workaround: mprc hardware is incorrectly counting
2298          * broadcasts, so for now we subtract those.
2299          */
2300         bprc = IXGBE_READ_REG(hw, IXGBE_BPRC);
2301         hw_stats->bprc += bprc;
2302         hw_stats->mprc += IXGBE_READ_REG(hw, IXGBE_MPRC);
2303         if (hw->mac.type == ixgbe_mac_82598EB)
2304                 hw_stats->mprc -= bprc;
2305
2306         hw_stats->prc64 += IXGBE_READ_REG(hw, IXGBE_PRC64);
2307         hw_stats->prc127 += IXGBE_READ_REG(hw, IXGBE_PRC127);
2308         hw_stats->prc255 += IXGBE_READ_REG(hw, IXGBE_PRC255);
2309         hw_stats->prc511 += IXGBE_READ_REG(hw, IXGBE_PRC511);
2310         hw_stats->prc1023 += IXGBE_READ_REG(hw, IXGBE_PRC1023);
2311         hw_stats->prc1522 += IXGBE_READ_REG(hw, IXGBE_PRC1522);
2312
2313         lxon = IXGBE_READ_REG(hw, IXGBE_LXONTXC);
2314         hw_stats->lxontxc += lxon;
2315         lxoff = IXGBE_READ_REG(hw, IXGBE_LXOFFTXC);
2316         hw_stats->lxofftxc += lxoff;
2317         total = lxon + lxoff;
2318
2319         hw_stats->gptc += IXGBE_READ_REG(hw, IXGBE_GPTC);
2320         hw_stats->mptc += IXGBE_READ_REG(hw, IXGBE_MPTC);
2321         hw_stats->ptc64 += IXGBE_READ_REG(hw, IXGBE_PTC64);
2322         hw_stats->gptc -= total;
2323         hw_stats->mptc -= total;
2324         hw_stats->ptc64 -= total;
2325         hw_stats->gotc -= total * ETHER_MIN_LEN;
2326
2327         hw_stats->ruc += IXGBE_READ_REG(hw, IXGBE_RUC);
2328         hw_stats->rfc += IXGBE_READ_REG(hw, IXGBE_RFC);
2329         hw_stats->roc += IXGBE_READ_REG(hw, IXGBE_ROC);
2330         hw_stats->rjc += IXGBE_READ_REG(hw, IXGBE_RJC);
2331         hw_stats->mngprc += IXGBE_READ_REG(hw, IXGBE_MNGPRC);
2332         hw_stats->mngpdc += IXGBE_READ_REG(hw, IXGBE_MNGPDC);
2333         hw_stats->mngptc += IXGBE_READ_REG(hw, IXGBE_MNGPTC);
2334         hw_stats->tpr += IXGBE_READ_REG(hw, IXGBE_TPR);
2335         hw_stats->tpt += IXGBE_READ_REG(hw, IXGBE_TPT);
2336         hw_stats->ptc127 += IXGBE_READ_REG(hw, IXGBE_PTC127);
2337         hw_stats->ptc255 += IXGBE_READ_REG(hw, IXGBE_PTC255);
2338         hw_stats->ptc511 += IXGBE_READ_REG(hw, IXGBE_PTC511);
2339         hw_stats->ptc1023 += IXGBE_READ_REG(hw, IXGBE_PTC1023);
2340         hw_stats->ptc1522 += IXGBE_READ_REG(hw, IXGBE_PTC1522);
2341         hw_stats->bptc += IXGBE_READ_REG(hw, IXGBE_BPTC);
2342         hw_stats->xec += IXGBE_READ_REG(hw, IXGBE_XEC);
2343         hw_stats->fccrc += IXGBE_READ_REG(hw, IXGBE_FCCRC);
2344         hw_stats->fclast += IXGBE_READ_REG(hw, IXGBE_FCLAST);
2345         /* Only read FCOE on 82599 */
2346         if (hw->mac.type != ixgbe_mac_82598EB) {
2347                 hw_stats->fcoerpdc += IXGBE_READ_REG(hw, IXGBE_FCOERPDC);
2348                 hw_stats->fcoeprc += IXGBE_READ_REG(hw, IXGBE_FCOEPRC);
2349                 hw_stats->fcoeptc += IXGBE_READ_REG(hw, IXGBE_FCOEPTC);
2350                 hw_stats->fcoedwrc += IXGBE_READ_REG(hw, IXGBE_FCOEDWRC);
2351                 hw_stats->fcoedwtc += IXGBE_READ_REG(hw, IXGBE_FCOEDWTC);
2352         }
2353
2354         /* Flow Director Stats registers */
2355         hw_stats->fdirmatch += IXGBE_READ_REG(hw, IXGBE_FDIRMATCH);
2356         hw_stats->fdirmiss += IXGBE_READ_REG(hw, IXGBE_FDIRMISS);
2357 }
2358
2359 /*
2360  * This function is based on ixgbe_update_stats_counters() in ixgbe/ixgbe.c
2361  */
2362 static void
2363 ixgbe_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
2364 {
2365         struct ixgbe_hw *hw =
2366                         IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2367         struct ixgbe_hw_stats *hw_stats =
2368                         IXGBE_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
2369         uint64_t total_missed_rx, total_qbrc, total_qprc, total_qprdc;
2370         unsigned i;
2371
2372         total_missed_rx = 0;
2373         total_qbrc = 0;
2374         total_qprc = 0;
2375         total_qprdc = 0;
2376
2377         ixgbe_read_stats_registers(hw, hw_stats, &total_missed_rx, &total_qbrc,
2378                         &total_qprc, &total_qprdc);
2379
2380         if (stats == NULL)
2381                 return;
2382
2383         /* Fill out the rte_eth_stats statistics structure */
2384         stats->ipackets = total_qprc;
2385         stats->ibytes = total_qbrc;
2386         stats->opackets = hw_stats->gptc;
2387         stats->obytes = hw_stats->gotc;
2388
2389         for (i = 0; i < IXGBE_QUEUE_STAT_COUNTERS; i++) {
2390                 stats->q_ipackets[i] = hw_stats->qprc[i];
2391                 stats->q_opackets[i] = hw_stats->qptc[i];
2392                 stats->q_ibytes[i] = hw_stats->qbrc[i];
2393                 stats->q_obytes[i] = hw_stats->qbtc[i];
2394                 stats->q_errors[i] = hw_stats->qprdc[i];
2395         }
2396
2397         /* Rx Errors */
2398         stats->ierrors  = hw_stats->crcerrs +
2399                           hw_stats->mspdc +
2400                           hw_stats->rlec +
2401                           hw_stats->ruc +
2402                           hw_stats->roc +
2403                           total_missed_rx +
2404                           hw_stats->illerrc +
2405                           hw_stats->errbc +
2406                           hw_stats->xec +
2407                           hw_stats->mlfc +
2408                           hw_stats->mrfc +
2409                           hw_stats->rfc +
2410                           hw_stats->fccrc +
2411                           hw_stats->fclast;
2412
2413         /* Tx Errors */
2414         stats->oerrors  = 0;
2415 }
2416
2417 static void
2418 ixgbe_dev_stats_reset(struct rte_eth_dev *dev)
2419 {
2420         struct ixgbe_hw_stats *stats =
2421                         IXGBE_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
2422
2423         /* HW registers are cleared on read */
2424         ixgbe_dev_stats_get(dev, NULL);
2425
2426         /* Reset software totals */
2427         memset(stats, 0, sizeof(*stats));
2428 }
2429
2430 static int
2431 ixgbe_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstats *xstats,
2432                                          unsigned n)
2433 {
2434         struct ixgbe_hw *hw =
2435                         IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2436         struct ixgbe_hw_stats *hw_stats =
2437                         IXGBE_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
2438         uint64_t total_missed_rx, total_qbrc, total_qprc, total_qprdc;
2439         unsigned i, count = IXGBE_NB_XSTATS;
2440
2441         if (n < count)
2442                 return count;
2443
2444         total_missed_rx = 0;
2445         total_qbrc = 0;
2446         total_qprc = 0;
2447         total_qprdc = 0;
2448
2449         ixgbe_read_stats_registers(hw, hw_stats, &total_missed_rx, &total_qbrc,
2450                                                            &total_qprc, &total_qprdc);
2451
2452         /* If this is a reset xstats is NULL, and we have cleared the
2453          * registers by reading them.
2454          */
2455         if (!xstats)
2456                 return 0;
2457
2458         /* Extended stats */
2459         for (i = 0; i < IXGBE_NB_XSTATS; i++) {
2460                 snprintf(xstats[i].name, sizeof(xstats[i].name),
2461                                 "%s", rte_ixgbe_stats_strings[i].name);
2462                 xstats[i].value = *(uint64_t *)(((char *)hw_stats) +
2463                                                         rte_ixgbe_stats_strings[i].offset);
2464         }
2465
2466         return count;
2467 }
2468
2469 static void
2470 ixgbe_dev_xstats_reset(struct rte_eth_dev *dev)
2471 {
2472         struct ixgbe_hw_stats *stats =
2473                         IXGBE_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
2474
2475         /* HW registers are cleared on read */
2476         ixgbe_dev_xstats_get(dev, NULL, IXGBE_NB_XSTATS);
2477
2478         /* Reset software totals */
2479         memset(stats, 0, sizeof(*stats));
2480 }
2481
2482 static void
2483 ixgbevf_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
2484 {
2485         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2486         struct ixgbevf_hw_stats *hw_stats = (struct ixgbevf_hw_stats*)
2487                           IXGBE_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
2488
2489         /* Good Rx packet, include VF loopback */
2490         UPDATE_VF_STAT(IXGBE_VFGPRC,
2491             hw_stats->last_vfgprc, hw_stats->vfgprc);
2492
2493         /* Good Rx octets, include VF loopback */
2494         UPDATE_VF_STAT_36BIT(IXGBE_VFGORC_LSB, IXGBE_VFGORC_MSB,
2495             hw_stats->last_vfgorc, hw_stats->vfgorc);
2496
2497         /* Good Tx packet, include VF loopback */
2498         UPDATE_VF_STAT(IXGBE_VFGPTC,
2499             hw_stats->last_vfgptc, hw_stats->vfgptc);
2500
2501         /* Good Tx octets, include VF loopback */
2502         UPDATE_VF_STAT_36BIT(IXGBE_VFGOTC_LSB, IXGBE_VFGOTC_MSB,
2503             hw_stats->last_vfgotc, hw_stats->vfgotc);
2504
2505         /* Rx Multicst Packet */
2506         UPDATE_VF_STAT(IXGBE_VFMPRC,
2507             hw_stats->last_vfmprc, hw_stats->vfmprc);
2508
2509         if (stats == NULL)
2510                 return;
2511
2512         stats->ipackets = hw_stats->vfgprc;
2513         stats->ibytes = hw_stats->vfgorc;
2514         stats->opackets = hw_stats->vfgptc;
2515         stats->obytes = hw_stats->vfgotc;
2516         stats->imcasts = hw_stats->vfmprc;
2517         /* stats->imcasts should be removed as imcasts is deprecated */
2518 }
2519
2520 static void
2521 ixgbevf_dev_stats_reset(struct rte_eth_dev *dev)
2522 {
2523         struct ixgbevf_hw_stats *hw_stats = (struct ixgbevf_hw_stats*)
2524                         IXGBE_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
2525
2526         /* Sync HW register to the last stats */
2527         ixgbevf_dev_stats_get(dev, NULL);
2528
2529         /* reset HW current stats*/
2530         hw_stats->vfgprc = 0;
2531         hw_stats->vfgorc = 0;
2532         hw_stats->vfgptc = 0;
2533         hw_stats->vfgotc = 0;
2534         hw_stats->vfmprc = 0;
2535
2536 }
2537
2538 static void
2539 ixgbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
2540 {
2541         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2542
2543         dev_info->max_rx_queues = (uint16_t)hw->mac.max_rx_queues;
2544         dev_info->max_tx_queues = (uint16_t)hw->mac.max_tx_queues;
2545         dev_info->min_rx_bufsize = 1024; /* cf BSIZEPACKET in SRRCTL register */
2546         dev_info->max_rx_pktlen = 15872; /* includes CRC, cf MAXFRS register */
2547         dev_info->max_mac_addrs = hw->mac.num_rar_entries;
2548         dev_info->max_hash_mac_addrs = IXGBE_VMDQ_NUM_UC_MAC;
2549         dev_info->max_vfs = dev->pci_dev->max_vfs;
2550         if (hw->mac.type == ixgbe_mac_82598EB)
2551                 dev_info->max_vmdq_pools = ETH_16_POOLS;
2552         else
2553                 dev_info->max_vmdq_pools = ETH_64_POOLS;
2554         dev_info->vmdq_queue_num = dev_info->max_rx_queues;
2555         dev_info->rx_offload_capa =
2556                 DEV_RX_OFFLOAD_VLAN_STRIP |
2557                 DEV_RX_OFFLOAD_IPV4_CKSUM |
2558                 DEV_RX_OFFLOAD_UDP_CKSUM  |
2559                 DEV_RX_OFFLOAD_TCP_CKSUM;
2560
2561         /*
2562          * RSC is only supported by 82599 and x540 PF devices in a non-SR-IOV
2563          * mode.
2564          */
2565         if ((hw->mac.type == ixgbe_mac_82599EB ||
2566              hw->mac.type == ixgbe_mac_X540) &&
2567             !RTE_ETH_DEV_SRIOV(dev).active)
2568                 dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_TCP_LRO;
2569
2570         dev_info->tx_offload_capa =
2571                 DEV_TX_OFFLOAD_VLAN_INSERT |
2572                 DEV_TX_OFFLOAD_IPV4_CKSUM  |
2573                 DEV_TX_OFFLOAD_UDP_CKSUM   |
2574                 DEV_TX_OFFLOAD_TCP_CKSUM   |
2575                 DEV_TX_OFFLOAD_SCTP_CKSUM  |
2576                 DEV_TX_OFFLOAD_TCP_TSO;
2577
2578         dev_info->default_rxconf = (struct rte_eth_rxconf) {
2579                 .rx_thresh = {
2580                         .pthresh = IXGBE_DEFAULT_RX_PTHRESH,
2581                         .hthresh = IXGBE_DEFAULT_RX_HTHRESH,
2582                         .wthresh = IXGBE_DEFAULT_RX_WTHRESH,
2583                 },
2584                 .rx_free_thresh = IXGBE_DEFAULT_RX_FREE_THRESH,
2585                 .rx_drop_en = 0,
2586         };
2587
2588         dev_info->default_txconf = (struct rte_eth_txconf) {
2589                 .tx_thresh = {
2590                         .pthresh = IXGBE_DEFAULT_TX_PTHRESH,
2591                         .hthresh = IXGBE_DEFAULT_TX_HTHRESH,
2592                         .wthresh = IXGBE_DEFAULT_TX_WTHRESH,
2593                 },
2594                 .tx_free_thresh = IXGBE_DEFAULT_TX_FREE_THRESH,
2595                 .tx_rs_thresh = IXGBE_DEFAULT_TX_RSBIT_THRESH,
2596                 .txq_flags = ETH_TXQ_FLAGS_NOMULTSEGS |
2597                                 ETH_TXQ_FLAGS_NOOFFLOADS,
2598         };
2599
2600         dev_info->rx_desc_lim = rx_desc_lim;
2601         dev_info->tx_desc_lim = tx_desc_lim;
2602
2603         dev_info->hash_key_size = IXGBE_HKEY_MAX_INDEX * sizeof(uint32_t);
2604         dev_info->reta_size = ixgbe_reta_size_get(hw->mac.type);
2605         dev_info->flow_type_rss_offloads = IXGBE_RSS_OFFLOAD_ALL;
2606 }
2607
2608 static void
2609 ixgbevf_dev_info_get(struct rte_eth_dev *dev,
2610                      struct rte_eth_dev_info *dev_info)
2611 {
2612         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2613
2614         dev_info->max_rx_queues = (uint16_t)hw->mac.max_rx_queues;
2615         dev_info->max_tx_queues = (uint16_t)hw->mac.max_tx_queues;
2616         dev_info->min_rx_bufsize = 1024; /* cf BSIZEPACKET in SRRCTL reg */
2617         dev_info->max_rx_pktlen = 15872; /* includes CRC, cf MAXFRS reg */
2618         dev_info->max_mac_addrs = hw->mac.num_rar_entries;
2619         dev_info->max_hash_mac_addrs = IXGBE_VMDQ_NUM_UC_MAC;
2620         dev_info->max_vfs = dev->pci_dev->max_vfs;
2621         if (hw->mac.type == ixgbe_mac_82598EB)
2622                 dev_info->max_vmdq_pools = ETH_16_POOLS;
2623         else
2624                 dev_info->max_vmdq_pools = ETH_64_POOLS;
2625         dev_info->rx_offload_capa = DEV_RX_OFFLOAD_VLAN_STRIP |
2626                                 DEV_RX_OFFLOAD_IPV4_CKSUM |
2627                                 DEV_RX_OFFLOAD_UDP_CKSUM  |
2628                                 DEV_RX_OFFLOAD_TCP_CKSUM;
2629         dev_info->tx_offload_capa = DEV_TX_OFFLOAD_VLAN_INSERT |
2630                                 DEV_TX_OFFLOAD_IPV4_CKSUM  |
2631                                 DEV_TX_OFFLOAD_UDP_CKSUM   |
2632                                 DEV_TX_OFFLOAD_TCP_CKSUM   |
2633                                 DEV_TX_OFFLOAD_SCTP_CKSUM  |
2634                                 DEV_TX_OFFLOAD_TCP_TSO;
2635
2636         dev_info->default_rxconf = (struct rte_eth_rxconf) {
2637                 .rx_thresh = {
2638                         .pthresh = IXGBE_DEFAULT_RX_PTHRESH,
2639                         .hthresh = IXGBE_DEFAULT_RX_HTHRESH,
2640                         .wthresh = IXGBE_DEFAULT_RX_WTHRESH,
2641                 },
2642                 .rx_free_thresh = IXGBE_DEFAULT_RX_FREE_THRESH,
2643                 .rx_drop_en = 0,
2644         };
2645
2646         dev_info->default_txconf = (struct rte_eth_txconf) {
2647                 .tx_thresh = {
2648                         .pthresh = IXGBE_DEFAULT_TX_PTHRESH,
2649                         .hthresh = IXGBE_DEFAULT_TX_HTHRESH,
2650                         .wthresh = IXGBE_DEFAULT_TX_WTHRESH,
2651                 },
2652                 .tx_free_thresh = IXGBE_DEFAULT_TX_FREE_THRESH,
2653                 .tx_rs_thresh = IXGBE_DEFAULT_TX_RSBIT_THRESH,
2654                 .txq_flags = ETH_TXQ_FLAGS_NOMULTSEGS |
2655                                 ETH_TXQ_FLAGS_NOOFFLOADS,
2656         };
2657
2658         dev_info->rx_desc_lim = rx_desc_lim;
2659         dev_info->tx_desc_lim = tx_desc_lim;
2660 }
2661
2662 /* return 0 means link status changed, -1 means not changed */
2663 static int
2664 ixgbe_dev_link_update(struct rte_eth_dev *dev, int wait_to_complete)
2665 {
2666         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2667         struct rte_eth_link link, old;
2668         ixgbe_link_speed link_speed = IXGBE_LINK_SPEED_UNKNOWN;
2669         int link_up;
2670         int diag;
2671
2672         link.link_status = 0;
2673         link.link_speed = 0;
2674         link.link_duplex = 0;
2675         memset(&old, 0, sizeof(old));
2676         rte_ixgbe_dev_atomic_read_link_status(dev, &old);
2677
2678         hw->mac.get_link_status = true;
2679
2680         /* check if it needs to wait to complete, if lsc interrupt is enabled */
2681         if (wait_to_complete == 0 || dev->data->dev_conf.intr_conf.lsc != 0)
2682                 diag = ixgbe_check_link(hw, &link_speed, &link_up, 0);
2683         else
2684                 diag = ixgbe_check_link(hw, &link_speed, &link_up, 1);
2685
2686         if (diag != 0) {
2687                 link.link_speed = ETH_LINK_SPEED_100;
2688                 link.link_duplex = ETH_LINK_HALF_DUPLEX;
2689                 rte_ixgbe_dev_atomic_write_link_status(dev, &link);
2690                 if (link.link_status == old.link_status)
2691                         return -1;
2692                 return 0;
2693         }
2694
2695         if (link_up == 0) {
2696                 rte_ixgbe_dev_atomic_write_link_status(dev, &link);
2697                 if (link.link_status == old.link_status)
2698                         return -1;
2699                 return 0;
2700         }
2701         link.link_status = 1;
2702         link.link_duplex = ETH_LINK_FULL_DUPLEX;
2703
2704         switch (link_speed) {
2705         default:
2706         case IXGBE_LINK_SPEED_UNKNOWN:
2707                 link.link_duplex = ETH_LINK_HALF_DUPLEX;
2708                 link.link_speed = ETH_LINK_SPEED_100;
2709                 break;
2710
2711         case IXGBE_LINK_SPEED_100_FULL:
2712                 link.link_speed = ETH_LINK_SPEED_100;
2713                 break;
2714
2715         case IXGBE_LINK_SPEED_1GB_FULL:
2716                 link.link_speed = ETH_LINK_SPEED_1000;
2717                 break;
2718
2719         case IXGBE_LINK_SPEED_10GB_FULL:
2720                 link.link_speed = ETH_LINK_SPEED_10000;
2721                 break;
2722         }
2723         rte_ixgbe_dev_atomic_write_link_status(dev, &link);
2724
2725         if (link.link_status == old.link_status)
2726                 return -1;
2727
2728         return 0;
2729 }
2730
2731 static void
2732 ixgbe_dev_promiscuous_enable(struct rte_eth_dev *dev)
2733 {
2734         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2735         uint32_t fctrl;
2736
2737         fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
2738         fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
2739         IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
2740 }
2741
2742 static void
2743 ixgbe_dev_promiscuous_disable(struct rte_eth_dev *dev)
2744 {
2745         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2746         uint32_t fctrl;
2747
2748         fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
2749         fctrl &= (~IXGBE_FCTRL_UPE);
2750         if (dev->data->all_multicast == 1)
2751                 fctrl |= IXGBE_FCTRL_MPE;
2752         else
2753                 fctrl &= (~IXGBE_FCTRL_MPE);
2754         IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
2755 }
2756
2757 static void
2758 ixgbe_dev_allmulticast_enable(struct rte_eth_dev *dev)
2759 {
2760         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2761         uint32_t fctrl;
2762
2763         fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
2764         fctrl |= IXGBE_FCTRL_MPE;
2765         IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
2766 }
2767
2768 static void
2769 ixgbe_dev_allmulticast_disable(struct rte_eth_dev *dev)
2770 {
2771         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2772         uint32_t fctrl;
2773
2774         if (dev->data->promiscuous == 1)
2775                 return; /* must remain in all_multicast mode */
2776
2777         fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
2778         fctrl &= (~IXGBE_FCTRL_MPE);
2779         IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
2780 }
2781
2782 /**
2783  * It clears the interrupt causes and enables the interrupt.
2784  * It will be called once only during nic initialized.
2785  *
2786  * @param dev
2787  *  Pointer to struct rte_eth_dev.
2788  *
2789  * @return
2790  *  - On success, zero.
2791  *  - On failure, a negative value.
2792  */
2793 static int
2794 ixgbe_dev_lsc_interrupt_setup(struct rte_eth_dev *dev)
2795 {
2796         struct ixgbe_interrupt *intr =
2797                 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
2798
2799         ixgbe_dev_link_status_print(dev);
2800         intr->mask |= IXGBE_EICR_LSC;
2801
2802         return 0;
2803 }
2804
2805 /**
2806  * It clears the interrupt causes and enables the interrupt.
2807  * It will be called once only during nic initialized.
2808  *
2809  * @param dev
2810  *  Pointer to struct rte_eth_dev.
2811  *
2812  * @return
2813  *  - On success, zero.
2814  *  - On failure, a negative value.
2815  */
2816 static int
2817 ixgbe_dev_rxq_interrupt_setup(struct rte_eth_dev *dev)
2818 {
2819         struct ixgbe_interrupt *intr =
2820                 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
2821
2822         intr->mask |= IXGBE_EICR_RTX_QUEUE;
2823
2824         return 0;
2825 }
2826
2827 /*
2828  * It reads ICR and sets flag (IXGBE_EICR_LSC) for the link_update.
2829  *
2830  * @param dev
2831  *  Pointer to struct rte_eth_dev.
2832  *
2833  * @return
2834  *  - On success, zero.
2835  *  - On failure, a negative value.
2836  */
2837 static int
2838 ixgbe_dev_interrupt_get_status(struct rte_eth_dev *dev)
2839 {
2840         uint32_t eicr;
2841         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2842         struct ixgbe_interrupt *intr =
2843                 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
2844
2845         /* clear all cause mask */
2846         ixgbe_disable_intr(hw);
2847
2848         /* read-on-clear nic registers here */
2849         eicr = IXGBE_READ_REG(hw, IXGBE_EICR);
2850         PMD_DRV_LOG(DEBUG, "eicr %x", eicr);
2851
2852         intr->flags = 0;
2853
2854         /* set flag for async link update */
2855         if (eicr & IXGBE_EICR_LSC)
2856                 intr->flags |= IXGBE_FLAG_NEED_LINK_UPDATE;
2857
2858         if (eicr & IXGBE_EICR_MAILBOX)
2859                 intr->flags |= IXGBE_FLAG_MAILBOX;
2860
2861         return 0;
2862 }
2863
2864 static int
2865 ixgbevf_dev_interrupt_get_status(struct rte_eth_dev *dev)
2866 {
2867         uint32_t eicr;
2868         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2869         struct ixgbe_interrupt *intr =
2870                 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
2871
2872         /* clear all cause mask */
2873         ixgbevf_intr_disable(hw);
2874
2875         /* read-on-clear nic registers here */
2876         eicr = IXGBE_READ_REG(hw, IXGBE_VTEICR);
2877         PMD_DRV_LOG(INFO, "eicr %x", eicr);
2878
2879         intr->flags = 0;
2880
2881         /* set flag for async link update */
2882         if (eicr & IXGBE_EICR_LSC)
2883                 intr->flags |= IXGBE_FLAG_NEED_LINK_UPDATE;
2884
2885         return 0;
2886 }
2887
2888 /**
2889  * It gets and then prints the link status.
2890  *
2891  * @param dev
2892  *  Pointer to struct rte_eth_dev.
2893  *
2894  * @return
2895  *  - On success, zero.
2896  *  - On failure, a negative value.
2897  */
2898 static void
2899 ixgbe_dev_link_status_print(struct rte_eth_dev *dev)
2900 {
2901         struct rte_eth_link link;
2902
2903         memset(&link, 0, sizeof(link));
2904         rte_ixgbe_dev_atomic_read_link_status(dev, &link);
2905         if (link.link_status) {
2906                 PMD_INIT_LOG(INFO, "Port %d: Link Up - speed %u Mbps - %s",
2907                                         (int)(dev->data->port_id),
2908                                         (unsigned)link.link_speed,
2909                         link.link_duplex == ETH_LINK_FULL_DUPLEX ?
2910                                         "full-duplex" : "half-duplex");
2911         } else {
2912                 PMD_INIT_LOG(INFO, " Port %d: Link Down",
2913                                 (int)(dev->data->port_id));
2914         }
2915         PMD_INIT_LOG(DEBUG, "PCI Address: %04d:%02d:%02d:%d",
2916                                 dev->pci_dev->addr.domain,
2917                                 dev->pci_dev->addr.bus,
2918                                 dev->pci_dev->addr.devid,
2919                                 dev->pci_dev->addr.function);
2920 }
2921
2922 /*
2923  * It executes link_update after knowing an interrupt occurred.
2924  *
2925  * @param dev
2926  *  Pointer to struct rte_eth_dev.
2927  *
2928  * @return
2929  *  - On success, zero.
2930  *  - On failure, a negative value.
2931  */
2932 static int
2933 ixgbe_dev_interrupt_action(struct rte_eth_dev *dev)
2934 {
2935         struct ixgbe_interrupt *intr =
2936                 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
2937         int64_t timeout;
2938         struct rte_eth_link link;
2939         int intr_enable_delay = false;
2940
2941         PMD_DRV_LOG(DEBUG, "intr action type %d", intr->flags);
2942
2943         if (intr->flags & IXGBE_FLAG_MAILBOX) {
2944                 ixgbe_pf_mbx_process(dev);
2945                 intr->flags &= ~IXGBE_FLAG_MAILBOX;
2946         }
2947
2948         if (intr->flags & IXGBE_FLAG_NEED_LINK_UPDATE) {
2949                 /* get the link status before link update, for predicting later */
2950                 memset(&link, 0, sizeof(link));
2951                 rte_ixgbe_dev_atomic_read_link_status(dev, &link);
2952
2953                 ixgbe_dev_link_update(dev, 0);
2954
2955                 /* likely to up */
2956                 if (!link.link_status)
2957                         /* handle it 1 sec later, wait it being stable */
2958                         timeout = IXGBE_LINK_UP_CHECK_TIMEOUT;
2959                 /* likely to down */
2960                 else
2961                         /* handle it 4 sec later, wait it being stable */
2962                         timeout = IXGBE_LINK_DOWN_CHECK_TIMEOUT;
2963
2964                 ixgbe_dev_link_status_print(dev);
2965
2966                 intr_enable_delay = true;
2967         }
2968
2969         if (intr_enable_delay) {
2970                 if (rte_eal_alarm_set(timeout * 1000,
2971                                       ixgbe_dev_interrupt_delayed_handler, (void*)dev) < 0)
2972                         PMD_DRV_LOG(ERR, "Error setting alarm");
2973         } else {
2974                 PMD_DRV_LOG(DEBUG, "enable intr immediately");
2975                 ixgbe_enable_intr(dev);
2976                 rte_intr_enable(&(dev->pci_dev->intr_handle));
2977         }
2978
2979
2980         return 0;
2981 }
2982
2983 static int
2984 ixgbevf_dev_interrupt_action(struct rte_eth_dev *dev)
2985 {
2986         struct ixgbe_hw *hw =
2987                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2988
2989         PMD_DRV_LOG(DEBUG, "enable intr immediately");
2990         ixgbevf_intr_enable(hw);
2991         rte_intr_enable(&dev->pci_dev->intr_handle);
2992         return 0;
2993 }
2994
2995 /**
2996  * Interrupt handler which shall be registered for alarm callback for delayed
2997  * handling specific interrupt to wait for the stable nic state. As the
2998  * NIC interrupt state is not stable for ixgbe after link is just down,
2999  * it needs to wait 4 seconds to get the stable status.
3000  *
3001  * @param handle
3002  *  Pointer to interrupt handle.
3003  * @param param
3004  *  The address of parameter (struct rte_eth_dev *) regsitered before.
3005  *
3006  * @return
3007  *  void
3008  */
3009 static void
3010 ixgbe_dev_interrupt_delayed_handler(void *param)
3011 {
3012         struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
3013         struct ixgbe_interrupt *intr =
3014                 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
3015         struct ixgbe_hw *hw =
3016                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3017         uint32_t eicr;
3018
3019         eicr = IXGBE_READ_REG(hw, IXGBE_EICR);
3020         if (eicr & IXGBE_EICR_MAILBOX)
3021                 ixgbe_pf_mbx_process(dev);
3022
3023         if (intr->flags & IXGBE_FLAG_NEED_LINK_UPDATE) {
3024                 ixgbe_dev_link_update(dev, 0);
3025                 intr->flags &= ~IXGBE_FLAG_NEED_LINK_UPDATE;
3026                 ixgbe_dev_link_status_print(dev);
3027                 _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC);
3028         }
3029
3030         PMD_DRV_LOG(DEBUG, "enable intr in delayed handler S[%08x]", eicr);
3031         ixgbe_enable_intr(dev);
3032         rte_intr_enable(&(dev->pci_dev->intr_handle));
3033 }
3034
3035 /**
3036  * Interrupt handler triggered by NIC  for handling
3037  * specific interrupt.
3038  *
3039  * @param handle
3040  *  Pointer to interrupt handle.
3041  * @param param
3042  *  The address of parameter (struct rte_eth_dev *) regsitered before.
3043  *
3044  * @return
3045  *  void
3046  */
3047 static void
3048 ixgbe_dev_interrupt_handler(__rte_unused struct rte_intr_handle *handle,
3049                             void *param)
3050 {
3051         struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
3052
3053         ixgbe_dev_interrupt_get_status(dev);
3054         ixgbe_dev_interrupt_action(dev);
3055 }
3056
3057 static void
3058 ixgbevf_dev_interrupt_handler(__rte_unused struct rte_intr_handle *handle,
3059                               void *param)
3060 {
3061         struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
3062
3063         ixgbevf_dev_interrupt_get_status(dev);
3064         ixgbevf_dev_interrupt_action(dev);
3065 }
3066
3067 static int
3068 ixgbe_dev_led_on(struct rte_eth_dev *dev)
3069 {
3070         struct ixgbe_hw *hw;
3071
3072         hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3073         return (ixgbe_led_on(hw, 0) == IXGBE_SUCCESS ? 0 : -ENOTSUP);
3074 }
3075
3076 static int
3077 ixgbe_dev_led_off(struct rte_eth_dev *dev)
3078 {
3079         struct ixgbe_hw *hw;
3080
3081         hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3082         return (ixgbe_led_off(hw, 0) == IXGBE_SUCCESS ? 0 : -ENOTSUP);
3083 }
3084
3085 static int
3086 ixgbe_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
3087 {
3088         struct ixgbe_hw *hw;
3089         uint32_t mflcn_reg;
3090         uint32_t fccfg_reg;
3091         int rx_pause;
3092         int tx_pause;
3093
3094         hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3095
3096         fc_conf->pause_time = hw->fc.pause_time;
3097         fc_conf->high_water = hw->fc.high_water[0];
3098         fc_conf->low_water = hw->fc.low_water[0];
3099         fc_conf->send_xon = hw->fc.send_xon;
3100         fc_conf->autoneg = !hw->fc.disable_fc_autoneg;
3101
3102         /*
3103          * Return rx_pause status according to actual setting of
3104          * MFLCN register.
3105          */
3106         mflcn_reg = IXGBE_READ_REG(hw, IXGBE_MFLCN);
3107         if (mflcn_reg & (IXGBE_MFLCN_RPFCE | IXGBE_MFLCN_RFCE))
3108                 rx_pause = 1;
3109         else
3110                 rx_pause = 0;
3111
3112         /*
3113          * Return tx_pause status according to actual setting of
3114          * FCCFG register.
3115          */
3116         fccfg_reg = IXGBE_READ_REG(hw, IXGBE_FCCFG);
3117         if (fccfg_reg & (IXGBE_FCCFG_TFCE_802_3X | IXGBE_FCCFG_TFCE_PRIORITY))
3118                 tx_pause = 1;
3119         else
3120                 tx_pause = 0;
3121
3122         if (rx_pause && tx_pause)
3123                 fc_conf->mode = RTE_FC_FULL;
3124         else if (rx_pause)
3125                 fc_conf->mode = RTE_FC_RX_PAUSE;
3126         else if (tx_pause)
3127                 fc_conf->mode = RTE_FC_TX_PAUSE;
3128         else
3129                 fc_conf->mode = RTE_FC_NONE;
3130
3131         return 0;
3132 }
3133
3134 static int
3135 ixgbe_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
3136 {
3137         struct ixgbe_hw *hw;
3138         int err;
3139         uint32_t rx_buf_size;
3140         uint32_t max_high_water;
3141         uint32_t mflcn;
3142         enum ixgbe_fc_mode rte_fcmode_2_ixgbe_fcmode[] = {
3143                 ixgbe_fc_none,
3144                 ixgbe_fc_rx_pause,
3145                 ixgbe_fc_tx_pause,
3146                 ixgbe_fc_full
3147         };
3148
3149         PMD_INIT_FUNC_TRACE();
3150
3151         hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3152         rx_buf_size = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(0));
3153         PMD_INIT_LOG(DEBUG, "Rx packet buffer size = 0x%x", rx_buf_size);
3154
3155         /*
3156          * At least reserve one Ethernet frame for watermark
3157          * high_water/low_water in kilo bytes for ixgbe
3158          */
3159         max_high_water = (rx_buf_size - ETHER_MAX_LEN) >> IXGBE_RXPBSIZE_SHIFT;
3160         if ((fc_conf->high_water > max_high_water) ||
3161                 (fc_conf->high_water < fc_conf->low_water)) {
3162                 PMD_INIT_LOG(ERR, "Invalid high/low water setup value in KB");
3163                 PMD_INIT_LOG(ERR, "High_water must <= 0x%x", max_high_water);
3164                 return (-EINVAL);
3165         }
3166
3167         hw->fc.requested_mode = rte_fcmode_2_ixgbe_fcmode[fc_conf->mode];
3168         hw->fc.pause_time     = fc_conf->pause_time;
3169         hw->fc.high_water[0]  = fc_conf->high_water;
3170         hw->fc.low_water[0]   = fc_conf->low_water;
3171         hw->fc.send_xon       = fc_conf->send_xon;
3172         hw->fc.disable_fc_autoneg = !fc_conf->autoneg;
3173
3174         err = ixgbe_fc_enable(hw);
3175
3176         /* Not negotiated is not an error case */
3177         if ((err == IXGBE_SUCCESS) || (err == IXGBE_ERR_FC_NOT_NEGOTIATED)) {
3178
3179                 /* check if we want to forward MAC frames - driver doesn't have native
3180                  * capability to do that, so we'll write the registers ourselves */
3181
3182                 mflcn = IXGBE_READ_REG(hw, IXGBE_MFLCN);
3183
3184                 /* set or clear MFLCN.PMCF bit depending on configuration */
3185                 if (fc_conf->mac_ctrl_frame_fwd != 0)
3186                         mflcn |= IXGBE_MFLCN_PMCF;
3187                 else
3188                         mflcn &= ~IXGBE_MFLCN_PMCF;
3189
3190                 IXGBE_WRITE_REG(hw, IXGBE_MFLCN, mflcn);
3191                 IXGBE_WRITE_FLUSH(hw);
3192
3193                 return 0;
3194         }
3195
3196         PMD_INIT_LOG(ERR, "ixgbe_fc_enable = 0x%x", err);
3197         return -EIO;
3198 }
3199
3200 /**
3201  *  ixgbe_pfc_enable_generic - Enable flow control
3202  *  @hw: pointer to hardware structure
3203  *  @tc_num: traffic class number
3204  *  Enable flow control according to the current settings.
3205  */
3206 static int
3207 ixgbe_dcb_pfc_enable_generic(struct ixgbe_hw *hw,uint8_t tc_num)
3208 {
3209         int ret_val = 0;
3210         uint32_t mflcn_reg, fccfg_reg;
3211         uint32_t reg;
3212         uint32_t fcrtl, fcrth;
3213         uint8_t i;
3214         uint8_t nb_rx_en;
3215
3216         /* Validate the water mark configuration */
3217         if (!hw->fc.pause_time) {
3218                 ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
3219                 goto out;
3220         }
3221
3222         /* Low water mark of zero causes XOFF floods */
3223         if (hw->fc.current_mode & ixgbe_fc_tx_pause) {
3224                  /* High/Low water can not be 0 */
3225                 if( (!hw->fc.high_water[tc_num])|| (!hw->fc.low_water[tc_num])) {
3226                         PMD_INIT_LOG(ERR, "Invalid water mark configuration");
3227                         ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
3228                         goto out;
3229                 }
3230
3231                 if(hw->fc.low_water[tc_num] >= hw->fc.high_water[tc_num]) {
3232                         PMD_INIT_LOG(ERR, "Invalid water mark configuration");
3233                         ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
3234                         goto out;
3235                 }
3236         }
3237         /* Negotiate the fc mode to use */
3238         ixgbe_fc_autoneg(hw);
3239
3240         /* Disable any previous flow control settings */
3241         mflcn_reg = IXGBE_READ_REG(hw, IXGBE_MFLCN);
3242         mflcn_reg &= ~(IXGBE_MFLCN_RPFCE_SHIFT | IXGBE_MFLCN_RFCE|IXGBE_MFLCN_RPFCE);
3243
3244         fccfg_reg = IXGBE_READ_REG(hw, IXGBE_FCCFG);
3245         fccfg_reg &= ~(IXGBE_FCCFG_TFCE_802_3X | IXGBE_FCCFG_TFCE_PRIORITY);
3246
3247         switch (hw->fc.current_mode) {
3248         case ixgbe_fc_none:
3249                 /*
3250                  * If the count of enabled RX Priority Flow control >1,
3251                  * and the TX pause can not be disabled
3252                  */
3253                 nb_rx_en = 0;
3254                 for (i =0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
3255                         reg = IXGBE_READ_REG(hw, IXGBE_FCRTH_82599(i));
3256                         if (reg & IXGBE_FCRTH_FCEN)
3257                                 nb_rx_en++;
3258                 }
3259                 if (nb_rx_en > 1)
3260                         fccfg_reg |=IXGBE_FCCFG_TFCE_PRIORITY;
3261                 break;
3262         case ixgbe_fc_rx_pause:
3263                 /*
3264                  * Rx Flow control is enabled and Tx Flow control is
3265                  * disabled by software override. Since there really
3266                  * isn't a way to advertise that we are capable of RX
3267                  * Pause ONLY, we will advertise that we support both
3268                  * symmetric and asymmetric Rx PAUSE.  Later, we will
3269                  * disable the adapter's ability to send PAUSE frames.
3270                  */
3271                 mflcn_reg |= IXGBE_MFLCN_RPFCE;
3272                 /*
3273                  * If the count of enabled RX Priority Flow control >1,
3274                  * and the TX pause can not be disabled
3275                  */
3276                 nb_rx_en = 0;
3277                 for (i =0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
3278                         reg = IXGBE_READ_REG(hw, IXGBE_FCRTH_82599(i));
3279                         if (reg & IXGBE_FCRTH_FCEN)
3280                                 nb_rx_en++;
3281                 }
3282                 if (nb_rx_en > 1)
3283                         fccfg_reg |=IXGBE_FCCFG_TFCE_PRIORITY;
3284                 break;
3285         case ixgbe_fc_tx_pause:
3286                 /*
3287                  * Tx Flow control is enabled, and Rx Flow control is
3288                  * disabled by software override.
3289                  */
3290                 fccfg_reg |=IXGBE_FCCFG_TFCE_PRIORITY;
3291                 break;
3292         case ixgbe_fc_full:
3293                 /* Flow control (both Rx and Tx) is enabled by SW override. */
3294                 mflcn_reg |= IXGBE_MFLCN_RPFCE;
3295                 fccfg_reg |= IXGBE_FCCFG_TFCE_PRIORITY;
3296                 break;
3297         default:
3298                 PMD_DRV_LOG(DEBUG, "Flow control param set incorrectly");
3299                 ret_val = IXGBE_ERR_CONFIG;
3300                 goto out;
3301                 break;
3302         }
3303
3304         /* Set 802.3x based flow control settings. */
3305         mflcn_reg |= IXGBE_MFLCN_DPF;
3306         IXGBE_WRITE_REG(hw, IXGBE_MFLCN, mflcn_reg);
3307         IXGBE_WRITE_REG(hw, IXGBE_FCCFG, fccfg_reg);
3308
3309         /* Set up and enable Rx high/low water mark thresholds, enable XON. */
3310         if ((hw->fc.current_mode & ixgbe_fc_tx_pause) &&
3311                 hw->fc.high_water[tc_num]) {
3312                 fcrtl = (hw->fc.low_water[tc_num] << 10) | IXGBE_FCRTL_XONE;
3313                 IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(tc_num), fcrtl);
3314                 fcrth = (hw->fc.high_water[tc_num] << 10) | IXGBE_FCRTH_FCEN;
3315         } else {
3316                 IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(tc_num), 0);
3317                 /*
3318                  * In order to prevent Tx hangs when the internal Tx
3319                  * switch is enabled we must set the high water mark
3320                  * to the maximum FCRTH value.  This allows the Tx
3321                  * switch to function even under heavy Rx workloads.
3322                  */
3323                 fcrth = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(tc_num)) - 32;
3324         }
3325         IXGBE_WRITE_REG(hw, IXGBE_FCRTH_82599(tc_num), fcrth);
3326
3327         /* Configure pause time (2 TCs per register) */
3328         reg = hw->fc.pause_time * 0x00010001;
3329         for (i = 0; i < (IXGBE_DCB_MAX_TRAFFIC_CLASS / 2); i++)
3330                 IXGBE_WRITE_REG(hw, IXGBE_FCTTV(i), reg);
3331
3332         /* Configure flow control refresh threshold value */
3333         IXGBE_WRITE_REG(hw, IXGBE_FCRTV, hw->fc.pause_time / 2);
3334
3335 out:
3336         return ret_val;
3337 }
3338
3339 static int
3340 ixgbe_dcb_pfc_enable(struct rte_eth_dev *dev,uint8_t tc_num)
3341 {
3342         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3343         int32_t ret_val = IXGBE_NOT_IMPLEMENTED;
3344
3345         if(hw->mac.type != ixgbe_mac_82598EB) {
3346                 ret_val = ixgbe_dcb_pfc_enable_generic(hw,tc_num);
3347         }
3348         return ret_val;
3349 }
3350
3351 static int
3352 ixgbe_priority_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_pfc_conf *pfc_conf)
3353 {
3354         int err;
3355         uint32_t rx_buf_size;
3356         uint32_t max_high_water;
3357         uint8_t tc_num;
3358         uint8_t  map[IXGBE_DCB_MAX_USER_PRIORITY] = { 0 };
3359         struct ixgbe_hw *hw =
3360                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3361         struct ixgbe_dcb_config *dcb_config =
3362                 IXGBE_DEV_PRIVATE_TO_DCB_CFG(dev->data->dev_private);
3363
3364         enum ixgbe_fc_mode rte_fcmode_2_ixgbe_fcmode[] = {
3365                 ixgbe_fc_none,
3366                 ixgbe_fc_rx_pause,
3367                 ixgbe_fc_tx_pause,
3368                 ixgbe_fc_full
3369         };
3370
3371         PMD_INIT_FUNC_TRACE();
3372
3373         ixgbe_dcb_unpack_map_cee(dcb_config, IXGBE_DCB_RX_CONFIG, map);
3374         tc_num = map[pfc_conf->priority];
3375         rx_buf_size = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(tc_num));
3376         PMD_INIT_LOG(DEBUG, "Rx packet buffer size = 0x%x", rx_buf_size);
3377         /*
3378          * At least reserve one Ethernet frame for watermark
3379          * high_water/low_water in kilo bytes for ixgbe
3380          */
3381         max_high_water = (rx_buf_size - ETHER_MAX_LEN) >> IXGBE_RXPBSIZE_SHIFT;
3382         if ((pfc_conf->fc.high_water > max_high_water) ||
3383             (pfc_conf->fc.high_water <= pfc_conf->fc.low_water)) {
3384                 PMD_INIT_LOG(ERR, "Invalid high/low water setup value in KB");
3385                 PMD_INIT_LOG(ERR, "High_water must <= 0x%x", max_high_water);
3386                 return (-EINVAL);
3387         }
3388
3389         hw->fc.requested_mode = rte_fcmode_2_ixgbe_fcmode[pfc_conf->fc.mode];
3390         hw->fc.pause_time = pfc_conf->fc.pause_time;
3391         hw->fc.send_xon = pfc_conf->fc.send_xon;
3392         hw->fc.low_water[tc_num] =  pfc_conf->fc.low_water;
3393         hw->fc.high_water[tc_num] = pfc_conf->fc.high_water;
3394
3395         err = ixgbe_dcb_pfc_enable(dev,tc_num);
3396
3397         /* Not negotiated is not an error case */
3398         if ((err == IXGBE_SUCCESS) || (err == IXGBE_ERR_FC_NOT_NEGOTIATED))
3399                 return 0;
3400
3401         PMD_INIT_LOG(ERR, "ixgbe_dcb_pfc_enable = 0x%x", err);
3402         return -EIO;
3403 }
3404
3405 static int
3406 ixgbe_dev_rss_reta_update(struct rte_eth_dev *dev,
3407                           struct rte_eth_rss_reta_entry64 *reta_conf,
3408                           uint16_t reta_size)
3409 {
3410         uint8_t i, j, mask;
3411         uint32_t reta, r;
3412         uint16_t idx, shift;
3413         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3414         uint16_t sp_reta_size;
3415         uint32_t reta_reg;
3416
3417         PMD_INIT_FUNC_TRACE();
3418
3419         if (!ixgbe_rss_update_sp(hw->mac.type)) {
3420                 PMD_DRV_LOG(ERR, "RSS reta update is not supported on this "
3421                         "NIC.");
3422                 return -ENOTSUP;
3423         }
3424
3425         sp_reta_size = ixgbe_reta_size_get(hw->mac.type);
3426         if (reta_size != sp_reta_size) {
3427                 PMD_DRV_LOG(ERR, "The size of hash lookup table configured "
3428                         "(%d) doesn't match the number hardware can supported "
3429                         "(%d)\n", reta_size, sp_reta_size);
3430                 return -EINVAL;
3431         }
3432
3433         for (i = 0; i < reta_size; i += IXGBE_4_BIT_WIDTH) {
3434                 idx = i / RTE_RETA_GROUP_SIZE;
3435                 shift = i % RTE_RETA_GROUP_SIZE;
3436                 mask = (uint8_t)((reta_conf[idx].mask >> shift) &
3437                                                 IXGBE_4_BIT_MASK);
3438                 if (!mask)
3439                         continue;
3440                 reta_reg = ixgbe_reta_reg_get(hw->mac.type, i);
3441                 if (mask == IXGBE_4_BIT_MASK)
3442                         r = 0;
3443                 else
3444                         r = IXGBE_READ_REG(hw, reta_reg);
3445                 for (j = 0, reta = 0; j < IXGBE_4_BIT_WIDTH; j++) {
3446                         if (mask & (0x1 << j))
3447                                 reta |= reta_conf[idx].reta[shift + j] <<
3448                                                         (CHAR_BIT * j);
3449                         else
3450                                 reta |= r & (IXGBE_8_BIT_MASK <<
3451                                                 (CHAR_BIT * j));
3452                 }
3453                 IXGBE_WRITE_REG(hw, reta_reg, reta);
3454         }
3455
3456         return 0;
3457 }
3458
3459 static int
3460 ixgbe_dev_rss_reta_query(struct rte_eth_dev *dev,
3461                          struct rte_eth_rss_reta_entry64 *reta_conf,
3462                          uint16_t reta_size)
3463 {
3464         uint8_t i, j, mask;
3465         uint32_t reta;
3466         uint16_t idx, shift;
3467         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3468         uint16_t sp_reta_size;
3469         uint32_t reta_reg;
3470
3471         PMD_INIT_FUNC_TRACE();
3472         sp_reta_size = ixgbe_reta_size_get(hw->mac.type);
3473         if (reta_size != sp_reta_size) {
3474                 PMD_DRV_LOG(ERR, "The size of hash lookup table configured "
3475                         "(%d) doesn't match the number hardware can supported "
3476                         "(%d)\n", reta_size, sp_reta_size);
3477                 return -EINVAL;
3478         }
3479
3480         for (i = 0; i < reta_size; i += IXGBE_4_BIT_WIDTH) {
3481                 idx = i / RTE_RETA_GROUP_SIZE;
3482                 shift = i % RTE_RETA_GROUP_SIZE;
3483                 mask = (uint8_t)((reta_conf[idx].mask >> shift) &
3484                                                 IXGBE_4_BIT_MASK);
3485                 if (!mask)
3486                         continue;
3487
3488                 reta_reg = ixgbe_reta_reg_get(hw->mac.type, i);
3489                 reta = IXGBE_READ_REG(hw, reta_reg);
3490                 for (j = 0; j < IXGBE_4_BIT_WIDTH; j++) {
3491                         if (mask & (0x1 << j))
3492                                 reta_conf[idx].reta[shift + j] =
3493                                         ((reta >> (CHAR_BIT * j)) &
3494                                                 IXGBE_8_BIT_MASK);
3495                 }
3496         }
3497
3498         return 0;
3499 }
3500
3501 static void
3502 ixgbe_add_rar(struct rte_eth_dev *dev, struct ether_addr *mac_addr,
3503                                 uint32_t index, uint32_t pool)
3504 {
3505         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3506         uint32_t enable_addr = 1;
3507
3508         ixgbe_set_rar(hw, index, mac_addr->addr_bytes, pool, enable_addr);
3509 }
3510
3511 static void
3512 ixgbe_remove_rar(struct rte_eth_dev *dev, uint32_t index)
3513 {
3514         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3515
3516         ixgbe_clear_rar(hw, index);
3517 }
3518
3519 static void
3520 ixgbe_set_default_mac_addr(struct rte_eth_dev *dev, struct ether_addr *addr)
3521 {
3522         ixgbe_remove_rar(dev, 0);
3523
3524         ixgbe_add_rar(dev, addr, 0, 0);
3525 }
3526
3527 static int
3528 ixgbe_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
3529 {
3530         uint32_t hlreg0;
3531         uint32_t maxfrs;
3532         struct ixgbe_hw *hw;
3533         struct rte_eth_dev_info dev_info;
3534         uint32_t frame_size = mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
3535
3536         ixgbe_dev_info_get(dev, &dev_info);
3537
3538         /* check that mtu is within the allowed range */
3539         if ((mtu < ETHER_MIN_MTU) || (frame_size > dev_info.max_rx_pktlen))
3540                 return -EINVAL;
3541
3542         /* refuse mtu that requires the support of scattered packets when this
3543          * feature has not been enabled before. */
3544         if (!dev->data->scattered_rx &&
3545             (frame_size + 2 * IXGBE_VLAN_TAG_SIZE >
3546              dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM))
3547                 return -EINVAL;
3548
3549         hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3550         hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0);
3551
3552         /* switch to jumbo mode if needed */
3553         if (frame_size > ETHER_MAX_LEN) {
3554                 dev->data->dev_conf.rxmode.jumbo_frame = 1;
3555                 hlreg0 |= IXGBE_HLREG0_JUMBOEN;
3556         } else {
3557                 dev->data->dev_conf.rxmode.jumbo_frame = 0;
3558                 hlreg0 &= ~IXGBE_HLREG0_JUMBOEN;
3559         }
3560         IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0);
3561
3562         /* update max frame size */
3563         dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
3564
3565         maxfrs = IXGBE_READ_REG(hw, IXGBE_MAXFRS);
3566         maxfrs &= 0x0000FFFF;
3567         maxfrs |= (dev->data->dev_conf.rxmode.max_rx_pkt_len << 16);
3568         IXGBE_WRITE_REG(hw, IXGBE_MAXFRS, maxfrs);
3569
3570         return 0;
3571 }
3572
3573 /*
3574  * Virtual Function operations
3575  */
3576 static void
3577 ixgbevf_intr_disable(struct ixgbe_hw *hw)
3578 {
3579         PMD_INIT_FUNC_TRACE();
3580
3581         /* Clear interrupt mask to stop from interrupts being generated */
3582         IXGBE_WRITE_REG(hw, IXGBE_VTEIMC, IXGBE_VF_IRQ_CLEAR_MASK);
3583
3584         IXGBE_WRITE_FLUSH(hw);
3585 }
3586
3587 static void
3588 ixgbevf_intr_enable(struct ixgbe_hw *hw)
3589 {
3590         PMD_INIT_FUNC_TRACE();
3591
3592         /* VF enable interrupt autoclean */
3593         IXGBE_WRITE_REG(hw, IXGBE_VTEIAM, IXGBE_VF_IRQ_ENABLE_MASK);
3594         IXGBE_WRITE_REG(hw, IXGBE_VTEIAC, IXGBE_VF_IRQ_ENABLE_MASK);
3595         IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, IXGBE_VF_IRQ_ENABLE_MASK);
3596
3597         IXGBE_WRITE_FLUSH(hw);
3598 }
3599
3600 static int
3601 ixgbevf_dev_configure(struct rte_eth_dev *dev)
3602 {
3603         struct rte_eth_conf* conf = &dev->data->dev_conf;
3604         struct ixgbe_adapter *adapter =
3605                         (struct ixgbe_adapter *)dev->data->dev_private;
3606
3607         PMD_INIT_LOG(DEBUG, "Configured Virtual Function port id: %d",
3608                      dev->data->port_id);
3609
3610         /*
3611          * VF has no ability to enable/disable HW CRC
3612          * Keep the persistent behavior the same as Host PF
3613          */
3614 #ifndef RTE_LIBRTE_IXGBE_PF_DISABLE_STRIP_CRC
3615         if (!conf->rxmode.hw_strip_crc) {
3616                 PMD_INIT_LOG(NOTICE, "VF can't disable HW CRC Strip");
3617                 conf->rxmode.hw_strip_crc = 1;
3618         }
3619 #else
3620         if (conf->rxmode.hw_strip_crc) {
3621                 PMD_INIT_LOG(NOTICE, "VF can't enable HW CRC Strip");
3622                 conf->rxmode.hw_strip_crc = 0;
3623         }
3624 #endif
3625
3626         /*
3627          * Initialize to TRUE. If any of Rx queues doesn't meet the bulk
3628          * allocation or vector Rx preconditions we will reset it.
3629          */
3630         adapter->rx_bulk_alloc_allowed = true;
3631         adapter->rx_vec_allowed = true;
3632
3633         return 0;
3634 }
3635
3636 static int
3637 ixgbevf_dev_start(struct rte_eth_dev *dev)
3638 {
3639         struct ixgbe_hw *hw =
3640                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3641         uint32_t intr_vector = 0;
3642         struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle;
3643
3644         int err, mask = 0;
3645
3646         PMD_INIT_FUNC_TRACE();
3647
3648         hw->mac.ops.reset_hw(hw);
3649         hw->mac.get_link_status = true;
3650
3651         /* negotiate mailbox API version to use with the PF. */
3652         ixgbevf_negotiate_api(hw);
3653
3654         ixgbevf_dev_tx_init(dev);
3655
3656         /* This can fail when allocating mbufs for descriptor rings */
3657         err = ixgbevf_dev_rx_init(dev);
3658         if (err) {
3659                 PMD_INIT_LOG(ERR, "Unable to initialize RX hardware (%d)", err);
3660                 ixgbe_dev_clear_queues(dev);
3661                 return err;
3662         }
3663
3664         /* Set vfta */
3665         ixgbevf_set_vfta_all(dev,1);
3666
3667         /* Set HW strip */
3668         mask = ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK | \
3669                 ETH_VLAN_EXTEND_MASK;
3670         ixgbevf_vlan_offload_set(dev, mask);
3671
3672         ixgbevf_dev_rxtx_start(dev);
3673
3674         /* check and configure queue intr-vector mapping */
3675         if (dev->data->dev_conf.intr_conf.rxq != 0)
3676                 intr_vector = dev->data->nb_rx_queues;
3677
3678         if (rte_intr_efd_enable(intr_handle, intr_vector))
3679                 return -1;
3680
3681         if (rte_intr_dp_is_en(intr_handle) && !intr_handle->intr_vec) {
3682                 intr_handle->intr_vec =
3683                         rte_zmalloc("intr_vec",
3684                                     dev->data->nb_rx_queues * sizeof(int), 0);
3685                 if (intr_handle->intr_vec == NULL) {
3686                         PMD_INIT_LOG(ERR, "Failed to allocate %d rx_queues"
3687                                      " intr_vec\n", dev->data->nb_rx_queues);
3688                         return -ENOMEM;
3689                 }
3690         }
3691         ixgbevf_configure_msix(dev);
3692
3693         if (dev->data->dev_conf.intr_conf.lsc != 0) {
3694                 if (rte_intr_allow_others(intr_handle))
3695                         rte_intr_callback_register(intr_handle,
3696                                         ixgbevf_dev_interrupt_handler,
3697                                         (void *)dev);
3698                 else
3699                         PMD_INIT_LOG(INFO, "lsc won't enable because of"
3700                                      " no intr multiplex\n");
3701         }
3702
3703         rte_intr_enable(intr_handle);
3704
3705         /* Re-enable interrupt for VF */
3706         ixgbevf_intr_enable(hw);
3707
3708         return 0;
3709 }
3710
3711 static void
3712 ixgbevf_dev_stop(struct rte_eth_dev *dev)
3713 {
3714         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3715         struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle;
3716
3717         PMD_INIT_FUNC_TRACE();
3718
3719         hw->adapter_stopped = 1;
3720         ixgbe_stop_adapter(hw);
3721
3722         /*
3723           * Clear what we set, but we still keep shadow_vfta to
3724           * restore after device starts
3725           */
3726         ixgbevf_set_vfta_all(dev,0);
3727
3728         /* Clear stored conf */
3729         dev->data->scattered_rx = 0;
3730
3731         ixgbe_dev_clear_queues(dev);
3732
3733         /* disable intr eventfd mapping */
3734         rte_intr_disable(intr_handle);
3735
3736         /* Clean datapath event and queue/vec mapping */
3737         rte_intr_efd_disable(intr_handle);
3738         if (intr_handle->intr_vec != NULL) {
3739                 rte_free(intr_handle->intr_vec);
3740                 intr_handle->intr_vec = NULL;
3741         }
3742 }
3743
3744 static void
3745 ixgbevf_dev_close(struct rte_eth_dev *dev)
3746 {
3747         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3748         struct rte_pci_device *pci_dev;
3749
3750         PMD_INIT_FUNC_TRACE();
3751
3752         ixgbe_reset_hw(hw);
3753
3754         ixgbevf_dev_stop(dev);
3755
3756         ixgbe_dev_free_queues(dev);
3757
3758         /* reprogram the RAR[0] in case user changed it. */
3759         ixgbe_set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
3760
3761         pci_dev = dev->pci_dev;
3762         if (pci_dev->intr_handle.intr_vec) {
3763                 rte_free(pci_dev->intr_handle.intr_vec);
3764                 pci_dev->intr_handle.intr_vec = NULL;
3765         }
3766 }
3767
3768 static void ixgbevf_set_vfta_all(struct rte_eth_dev *dev, bool on)
3769 {
3770         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3771         struct ixgbe_vfta * shadow_vfta =
3772                 IXGBE_DEV_PRIVATE_TO_VFTA(dev->data->dev_private);
3773         int i = 0, j = 0, vfta = 0, mask = 1;
3774
3775         for (i = 0; i < IXGBE_VFTA_SIZE; i++){
3776                 vfta = shadow_vfta->vfta[i];
3777                 if(vfta){
3778                         mask = 1;
3779                         for (j = 0; j < 32; j++){
3780                                 if(vfta & mask)
3781                                         ixgbe_set_vfta(hw, (i<<5)+j, 0, on);
3782                                 mask<<=1;
3783                         }
3784                 }
3785         }
3786
3787 }
3788
3789 static int
3790 ixgbevf_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
3791 {
3792         struct ixgbe_hw *hw =
3793                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3794         struct ixgbe_vfta * shadow_vfta =
3795                 IXGBE_DEV_PRIVATE_TO_VFTA(dev->data->dev_private);
3796         uint32_t vid_idx = 0;
3797         uint32_t vid_bit = 0;
3798         int ret = 0;
3799
3800         PMD_INIT_FUNC_TRACE();
3801
3802         /* vind is not used in VF driver, set to 0, check ixgbe_set_vfta_vf */
3803         ret = ixgbe_set_vfta(hw, vlan_id, 0, !!on);
3804         if(ret){
3805                 PMD_INIT_LOG(ERR, "Unable to set VF vlan");
3806                 return ret;
3807         }
3808         vid_idx = (uint32_t) ((vlan_id >> 5) & 0x7F);
3809         vid_bit = (uint32_t) (1 << (vlan_id & 0x1F));
3810
3811         /* Save what we set and retore it after device reset */
3812         if (on)
3813                 shadow_vfta->vfta[vid_idx] |= vid_bit;
3814         else
3815                 shadow_vfta->vfta[vid_idx] &= ~vid_bit;
3816
3817         return 0;
3818 }
3819
3820 static void
3821 ixgbevf_vlan_strip_queue_set(struct rte_eth_dev *dev, uint16_t queue, int on)
3822 {
3823         struct ixgbe_hw *hw =
3824                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3825         uint32_t ctrl;
3826
3827         PMD_INIT_FUNC_TRACE();
3828
3829         if(queue >= hw->mac.max_rx_queues)
3830                 return;
3831
3832         ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(queue));
3833         if(on)
3834                 ctrl |= IXGBE_RXDCTL_VME;
3835         else
3836                 ctrl &= ~IXGBE_RXDCTL_VME;
3837         IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(queue), ctrl);
3838
3839         ixgbe_vlan_hw_strip_bitmap_set( dev, queue, on);
3840 }
3841
3842 static void
3843 ixgbevf_vlan_offload_set(struct rte_eth_dev *dev, int mask)
3844 {
3845         struct ixgbe_hw *hw =
3846                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3847         uint16_t i;
3848         int on = 0;
3849
3850         /* VF function only support hw strip feature, others are not support */
3851         if(mask & ETH_VLAN_STRIP_MASK){
3852                 on = !!(dev->data->dev_conf.rxmode.hw_vlan_strip);
3853
3854                 for(i=0; i < hw->mac.max_rx_queues; i++)
3855                         ixgbevf_vlan_strip_queue_set(dev,i,on);
3856         }
3857 }
3858
3859 static int
3860 ixgbe_vmdq_mode_check(struct ixgbe_hw *hw)
3861 {
3862         uint32_t reg_val;
3863
3864         /* we only need to do this if VMDq is enabled */
3865         reg_val = IXGBE_READ_REG(hw, IXGBE_VT_CTL);
3866         if (!(reg_val & IXGBE_VT_CTL_VT_ENABLE)) {
3867                 PMD_INIT_LOG(ERR, "VMDq must be enabled for this setting");
3868                 return (-1);
3869         }
3870
3871         return 0;
3872 }
3873
3874 static uint32_t
3875 ixgbe_uta_vector(struct ixgbe_hw *hw, struct ether_addr* uc_addr)
3876 {
3877         uint32_t vector = 0;
3878         switch (hw->mac.mc_filter_type) {
3879         case 0:   /* use bits [47:36] of the address */
3880                 vector = ((uc_addr->addr_bytes[4] >> 4) |
3881                         (((uint16_t)uc_addr->addr_bytes[5]) << 4));
3882                 break;
3883         case 1:   /* use bits [46:35] of the address */
3884                 vector = ((uc_addr->addr_bytes[4] >> 3) |
3885                         (((uint16_t)uc_addr->addr_bytes[5]) << 5));
3886                 break;
3887         case 2:   /* use bits [45:34] of the address */
3888                 vector = ((uc_addr->addr_bytes[4] >> 2) |
3889                         (((uint16_t)uc_addr->addr_bytes[5]) << 6));
3890                 break;
3891         case 3:   /* use bits [43:32] of the address */
3892                 vector = ((uc_addr->addr_bytes[4]) |
3893                         (((uint16_t)uc_addr->addr_bytes[5]) << 8));
3894                 break;
3895         default:  /* Invalid mc_filter_type */
3896                 break;
3897         }
3898
3899         /* vector can only be 12-bits or boundary will be exceeded */
3900         vector &= 0xFFF;
3901         return vector;
3902 }
3903
3904 static int
3905 ixgbe_uc_hash_table_set(struct rte_eth_dev *dev,struct ether_addr* mac_addr,
3906                                uint8_t on)
3907 {
3908         uint32_t vector;
3909         uint32_t uta_idx;
3910         uint32_t reg_val;
3911         uint32_t uta_shift;
3912         uint32_t rc;
3913         const uint32_t ixgbe_uta_idx_mask = 0x7F;
3914         const uint32_t ixgbe_uta_bit_shift = 5;
3915         const uint32_t ixgbe_uta_bit_mask = (0x1 << ixgbe_uta_bit_shift) - 1;
3916         const uint32_t bit1 = 0x1;
3917
3918         struct ixgbe_hw *hw =
3919                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3920         struct ixgbe_uta_info *uta_info =
3921                 IXGBE_DEV_PRIVATE_TO_UTA(dev->data->dev_private);
3922
3923         /* The UTA table only exists on 82599 hardware and newer */
3924         if (hw->mac.type < ixgbe_mac_82599EB)
3925                 return (-ENOTSUP);
3926
3927         vector = ixgbe_uta_vector(hw,mac_addr);
3928         uta_idx = (vector >> ixgbe_uta_bit_shift) & ixgbe_uta_idx_mask;
3929         uta_shift = vector & ixgbe_uta_bit_mask;
3930
3931         rc = ((uta_info->uta_shadow[uta_idx] >> uta_shift & bit1) != 0);
3932         if(rc == on)
3933                 return 0;
3934
3935         reg_val = IXGBE_READ_REG(hw, IXGBE_UTA(uta_idx));
3936         if (on) {
3937                 uta_info->uta_in_use++;
3938                 reg_val |= (bit1 << uta_shift);
3939                 uta_info->uta_shadow[uta_idx] |= (bit1 << uta_shift);
3940         } else {
3941                 uta_info->uta_in_use--;
3942                 reg_val &= ~(bit1 << uta_shift);
3943                 uta_info->uta_shadow[uta_idx] &= ~(bit1 << uta_shift);
3944         }
3945
3946         IXGBE_WRITE_REG(hw, IXGBE_UTA(uta_idx), reg_val);
3947
3948         if (uta_info->uta_in_use > 0)
3949                 IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL,
3950                                 IXGBE_MCSTCTRL_MFE | hw->mac.mc_filter_type);
3951         else
3952                 IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL,hw->mac.mc_filter_type);
3953
3954         return 0;
3955 }
3956
3957 static int
3958 ixgbe_uc_all_hash_table_set(struct rte_eth_dev *dev, uint8_t on)
3959 {
3960         int i;
3961         struct ixgbe_hw *hw =
3962                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3963         struct ixgbe_uta_info *uta_info =
3964                 IXGBE_DEV_PRIVATE_TO_UTA(dev->data->dev_private);
3965
3966         /* The UTA table only exists on 82599 hardware and newer */
3967         if (hw->mac.type < ixgbe_mac_82599EB)
3968                 return (-ENOTSUP);
3969
3970         if(on) {
3971                 for (i = 0; i < ETH_VMDQ_NUM_UC_HASH_ARRAY; i++) {
3972                         uta_info->uta_shadow[i] = ~0;
3973                         IXGBE_WRITE_REG(hw, IXGBE_UTA(i), ~0);
3974                 }
3975         } else {
3976                 for (i = 0; i < ETH_VMDQ_NUM_UC_HASH_ARRAY; i++) {
3977                         uta_info->uta_shadow[i] = 0;
3978                         IXGBE_WRITE_REG(hw, IXGBE_UTA(i), 0);
3979                 }
3980         }
3981         return 0;
3982
3983 }
3984
3985 uint32_t
3986 ixgbe_convert_vm_rx_mask_to_val(uint16_t rx_mask, uint32_t orig_val)
3987 {
3988         uint32_t new_val = orig_val;
3989
3990         if (rx_mask & ETH_VMDQ_ACCEPT_UNTAG)
3991                 new_val |= IXGBE_VMOLR_AUPE;
3992         if (rx_mask & ETH_VMDQ_ACCEPT_HASH_MC)
3993                 new_val |= IXGBE_VMOLR_ROMPE;
3994         if (rx_mask & ETH_VMDQ_ACCEPT_HASH_UC)
3995                 new_val |= IXGBE_VMOLR_ROPE;
3996         if (rx_mask & ETH_VMDQ_ACCEPT_BROADCAST)
3997                 new_val |= IXGBE_VMOLR_BAM;
3998         if (rx_mask & ETH_VMDQ_ACCEPT_MULTICAST)
3999                 new_val |= IXGBE_VMOLR_MPE;
4000
4001         return new_val;
4002 }
4003
4004 static int
4005 ixgbe_set_pool_rx_mode(struct rte_eth_dev *dev, uint16_t pool,
4006                                uint16_t rx_mask, uint8_t on)
4007 {
4008         int val = 0;
4009
4010         struct ixgbe_hw *hw =
4011                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4012         uint32_t vmolr = IXGBE_READ_REG(hw, IXGBE_VMOLR(pool));
4013
4014         if (hw->mac.type == ixgbe_mac_82598EB) {
4015                 PMD_INIT_LOG(ERR, "setting VF receive mode set should be done"
4016                              " on 82599 hardware and newer");
4017                 return (-ENOTSUP);
4018         }
4019         if (ixgbe_vmdq_mode_check(hw) < 0)
4020                 return (-ENOTSUP);
4021
4022         val = ixgbe_convert_vm_rx_mask_to_val(rx_mask, val);
4023
4024         if (on)
4025                 vmolr |= val;
4026         else
4027                 vmolr &= ~val;
4028
4029         IXGBE_WRITE_REG(hw, IXGBE_VMOLR(pool), vmolr);
4030
4031         return 0;
4032 }
4033
4034 static int
4035 ixgbe_set_pool_rx(struct rte_eth_dev *dev, uint16_t pool, uint8_t on)
4036 {
4037         uint32_t reg,addr;
4038         uint32_t val;
4039         const uint8_t bit1 = 0x1;
4040
4041         struct ixgbe_hw *hw =
4042                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4043
4044         if (ixgbe_vmdq_mode_check(hw) < 0)
4045                 return (-ENOTSUP);
4046
4047         addr = IXGBE_VFRE(pool >= ETH_64_POOLS/2);
4048         reg = IXGBE_READ_REG(hw, addr);
4049         val = bit1 << pool;
4050
4051         if (on)
4052                 reg |= val;
4053         else
4054                 reg &= ~val;
4055
4056         IXGBE_WRITE_REG(hw, addr,reg);
4057
4058         return 0;
4059 }
4060
4061 static int
4062 ixgbe_set_pool_tx(struct rte_eth_dev *dev, uint16_t pool, uint8_t on)
4063 {
4064         uint32_t reg,addr;
4065         uint32_t val;
4066         const uint8_t bit1 = 0x1;
4067
4068         struct ixgbe_hw *hw =
4069                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4070
4071         if (ixgbe_vmdq_mode_check(hw) < 0)
4072                 return (-ENOTSUP);
4073
4074         addr = IXGBE_VFTE(pool >= ETH_64_POOLS/2);
4075         reg = IXGBE_READ_REG(hw, addr);
4076         val = bit1 << pool;
4077
4078         if (on)
4079                 reg |= val;
4080         else
4081                 reg &= ~val;
4082
4083         IXGBE_WRITE_REG(hw, addr,reg);
4084
4085         return 0;
4086 }
4087
4088 static int
4089 ixgbe_set_pool_vlan_filter(struct rte_eth_dev *dev, uint16_t vlan,
4090                         uint64_t pool_mask, uint8_t vlan_on)
4091 {
4092         int ret = 0;
4093         uint16_t pool_idx;
4094         struct ixgbe_hw *hw =
4095                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4096
4097         if (ixgbe_vmdq_mode_check(hw) < 0)
4098                 return (-ENOTSUP);
4099         for (pool_idx = 0; pool_idx < ETH_64_POOLS; pool_idx++) {
4100                 if (pool_mask & ((uint64_t)(1ULL << pool_idx)))
4101                         ret = hw->mac.ops.set_vfta(hw,vlan,pool_idx,vlan_on);
4102                         if (ret < 0)
4103                                 return ret;
4104         }
4105
4106         return ret;
4107 }
4108
4109 #define IXGBE_MRCTL_VPME  0x01 /* Virtual Pool Mirroring. */
4110 #define IXGBE_MRCTL_UPME  0x02 /* Uplink Port Mirroring. */
4111 #define IXGBE_MRCTL_DPME  0x04 /* Downlink Port Mirroring. */
4112 #define IXGBE_MRCTL_VLME  0x08 /* VLAN Mirroring. */
4113 #define IXGBE_INVALID_MIRROR_TYPE(mirror_type) \
4114         ((mirror_type) & ~(uint8_t)(ETH_MIRROR_VIRTUAL_POOL_UP | \
4115         ETH_MIRROR_UPLINK_PORT | ETH_MIRROR_DOWNLINK_PORT | ETH_MIRROR_VLAN))
4116
4117 static int
4118 ixgbe_mirror_rule_set(struct rte_eth_dev *dev,
4119                         struct rte_eth_mirror_conf *mirror_conf,
4120                         uint8_t rule_id, uint8_t on)
4121 {
4122         uint32_t mr_ctl,vlvf;
4123         uint32_t mp_lsb = 0;
4124         uint32_t mv_msb = 0;
4125         uint32_t mv_lsb = 0;
4126         uint32_t mp_msb = 0;
4127         uint8_t i = 0;
4128         int reg_index = 0;
4129         uint64_t vlan_mask = 0;
4130
4131         const uint8_t pool_mask_offset = 32;
4132         const uint8_t vlan_mask_offset = 32;
4133         const uint8_t dst_pool_offset = 8;
4134         const uint8_t rule_mr_offset  = 4;
4135         const uint8_t mirror_rule_mask= 0x0F;
4136
4137         struct ixgbe_mirror_info *mr_info =
4138                         (IXGBE_DEV_PRIVATE_TO_PFDATA(dev->data->dev_private));
4139         struct ixgbe_hw *hw =
4140                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4141         uint8_t mirror_type = 0;
4142
4143         if (ixgbe_vmdq_mode_check(hw) < 0)
4144                 return -ENOTSUP;
4145
4146         if (rule_id >= IXGBE_MAX_MIRROR_RULES)
4147                 return -EINVAL;
4148
4149         if (IXGBE_INVALID_MIRROR_TYPE(mirror_conf->rule_type)) {
4150                 PMD_DRV_LOG(ERR, "unsupported mirror type 0x%x.",
4151                         mirror_conf->rule_type);
4152                 return -EINVAL;
4153         }
4154
4155         if (mirror_conf->rule_type & ETH_MIRROR_VLAN) {
4156                 mirror_type |= IXGBE_MRCTL_VLME;
4157                 /* Check if vlan id is valid and find conresponding VLAN ID index in VLVF */
4158                 for (i = 0;i < IXGBE_VLVF_ENTRIES; i++) {
4159                         if (mirror_conf->vlan.vlan_mask & (1ULL << i)) {
4160                                 /* search vlan id related pool vlan filter index */
4161                                 reg_index = ixgbe_find_vlvf_slot(hw,
4162                                                 mirror_conf->vlan.vlan_id[i]);
4163                                 if(reg_index < 0)
4164                                         return -EINVAL;
4165                                 vlvf = IXGBE_READ_REG(hw, IXGBE_VLVF(reg_index));
4166                                 if ((vlvf & IXGBE_VLVF_VIEN) &&
4167                                     ((vlvf & IXGBE_VLVF_VLANID_MASK) ==
4168                                       mirror_conf->vlan.vlan_id[i]))
4169                                         vlan_mask |= (1ULL << reg_index);
4170                                 else
4171                                         return -EINVAL;
4172                         }
4173                 }
4174
4175                 if (on) {
4176                         mv_lsb = vlan_mask & 0xFFFFFFFF;
4177                         mv_msb = vlan_mask >> vlan_mask_offset;
4178
4179                         mr_info->mr_conf[rule_id].vlan.vlan_mask =
4180                                                 mirror_conf->vlan.vlan_mask;
4181                         for(i = 0 ;i < ETH_VMDQ_MAX_VLAN_FILTERS; i++) {
4182                                 if(mirror_conf->vlan.vlan_mask & (1ULL << i))
4183                                         mr_info->mr_conf[rule_id].vlan.vlan_id[i] =
4184                                                 mirror_conf->vlan.vlan_id[i];
4185                         }
4186                 } else {
4187                         mv_lsb = 0;
4188                         mv_msb = 0;
4189                         mr_info->mr_conf[rule_id].vlan.vlan_mask = 0;
4190                         for(i = 0 ;i < ETH_VMDQ_MAX_VLAN_FILTERS; i++)
4191                                 mr_info->mr_conf[rule_id].vlan.vlan_id[i] = 0;
4192                 }
4193         }
4194
4195         /*
4196          * if enable pool mirror, write related pool mask register,if disable
4197          * pool mirror, clear PFMRVM register
4198          */
4199         if (mirror_conf->rule_type & ETH_MIRROR_VIRTUAL_POOL_UP) {
4200                 mirror_type |= IXGBE_MRCTL_VPME;
4201                 if (on) {
4202                         mp_lsb = mirror_conf->pool_mask & 0xFFFFFFFF;
4203                         mp_msb = mirror_conf->pool_mask >> pool_mask_offset;
4204                         mr_info->mr_conf[rule_id].pool_mask =
4205                                         mirror_conf->pool_mask;
4206
4207                 } else {
4208                         mp_lsb = 0;
4209                         mp_msb = 0;
4210                         mr_info->mr_conf[rule_id].pool_mask = 0;
4211                 }
4212         }
4213         if (mirror_conf->rule_type & ETH_MIRROR_UPLINK_PORT)
4214                 mirror_type |= IXGBE_MRCTL_UPME;
4215         if (mirror_conf->rule_type & ETH_MIRROR_DOWNLINK_PORT)
4216                 mirror_type |= IXGBE_MRCTL_DPME;
4217
4218         /* read  mirror control register and recalculate it */
4219         mr_ctl = IXGBE_READ_REG(hw, IXGBE_MRCTL(rule_id));
4220
4221         if (on) {
4222                 mr_ctl |= mirror_type;
4223                 mr_ctl &= mirror_rule_mask;
4224                 mr_ctl |= mirror_conf->dst_pool << dst_pool_offset;
4225         } else
4226                 mr_ctl &= ~(mirror_conf->rule_type & mirror_rule_mask);
4227
4228         mr_info->mr_conf[rule_id].rule_type = mirror_conf->rule_type;
4229         mr_info->mr_conf[rule_id].dst_pool = mirror_conf->dst_pool;
4230
4231         /* write mirrror control  register */
4232         IXGBE_WRITE_REG(hw, IXGBE_MRCTL(rule_id), mr_ctl);
4233
4234         /* write pool mirrror control  register */
4235         if (mirror_conf->rule_type == ETH_MIRROR_VIRTUAL_POOL_UP) {
4236                 IXGBE_WRITE_REG(hw, IXGBE_VMRVM(rule_id), mp_lsb);
4237                 IXGBE_WRITE_REG(hw, IXGBE_VMRVM(rule_id + rule_mr_offset),
4238                                 mp_msb);
4239         }
4240         /* write VLAN mirrror control  register */
4241         if (mirror_conf->rule_type == ETH_MIRROR_VLAN) {
4242                 IXGBE_WRITE_REG(hw, IXGBE_VMRVLAN(rule_id), mv_lsb);
4243                 IXGBE_WRITE_REG(hw, IXGBE_VMRVLAN(rule_id + rule_mr_offset),
4244                                 mv_msb);
4245         }
4246
4247         return 0;
4248 }
4249
4250 static int
4251 ixgbe_mirror_rule_reset(struct rte_eth_dev *dev, uint8_t rule_id)
4252 {
4253         int mr_ctl = 0;
4254         uint32_t lsb_val = 0;
4255         uint32_t msb_val = 0;
4256         const uint8_t rule_mr_offset = 4;
4257
4258         struct ixgbe_hw *hw =
4259                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4260         struct ixgbe_mirror_info *mr_info =
4261                 (IXGBE_DEV_PRIVATE_TO_PFDATA(dev->data->dev_private));
4262
4263         if (ixgbe_vmdq_mode_check(hw) < 0)
4264                 return (-ENOTSUP);
4265
4266         memset(&mr_info->mr_conf[rule_id], 0,
4267                 sizeof(struct rte_eth_mirror_conf));
4268
4269         /* clear PFVMCTL register */
4270         IXGBE_WRITE_REG(hw, IXGBE_MRCTL(rule_id), mr_ctl);
4271
4272         /* clear pool mask register */
4273         IXGBE_WRITE_REG(hw, IXGBE_VMRVM(rule_id), lsb_val);
4274         IXGBE_WRITE_REG(hw, IXGBE_VMRVM(rule_id + rule_mr_offset), msb_val);
4275
4276         /* clear vlan mask register */
4277         IXGBE_WRITE_REG(hw, IXGBE_VMRVLAN(rule_id), lsb_val);
4278         IXGBE_WRITE_REG(hw, IXGBE_VMRVLAN(rule_id + rule_mr_offset), msb_val);
4279
4280         return 0;
4281 }
4282
4283 static int
4284 ixgbevf_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id)
4285 {
4286         uint32_t mask;
4287         struct ixgbe_hw *hw =
4288                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4289
4290         mask = IXGBE_READ_REG(hw, IXGBE_VTEIMS);
4291         mask |= (1 << queue_id);
4292         IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, mask);
4293
4294         rte_intr_enable(&dev->pci_dev->intr_handle);
4295
4296         return 0;
4297 }
4298
4299 static int
4300 ixgbevf_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id)
4301 {
4302         uint32_t mask;
4303         struct ixgbe_hw *hw =
4304                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4305
4306         mask = IXGBE_READ_REG(hw, IXGBE_VTEIMS);
4307         mask &= ~(1 << queue_id);
4308         IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, mask);
4309
4310         return 0;
4311 }
4312
4313 static int
4314 ixgbe_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id)
4315 {
4316         uint32_t mask;
4317         struct ixgbe_hw *hw =
4318                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4319         struct ixgbe_interrupt *intr =
4320                 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
4321
4322         if (queue_id < 16) {
4323                 ixgbe_disable_intr(hw);
4324                 intr->mask |= (1 << queue_id);
4325                 ixgbe_enable_intr(dev);
4326         } else if (queue_id < 32) {
4327                 mask = IXGBE_READ_REG(hw, IXGBE_EIMS_EX(0));
4328                 mask &= (1 << queue_id);
4329                 IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(0), mask);
4330         } else if (queue_id < 64) {
4331                 mask = IXGBE_READ_REG(hw, IXGBE_EIMS_EX(1));
4332                 mask &= (1 << (queue_id - 32));
4333                 IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(1), mask);
4334         }
4335         rte_intr_enable(&dev->pci_dev->intr_handle);
4336
4337         return 0;
4338 }
4339
4340 static int
4341 ixgbe_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id)
4342 {
4343         uint32_t mask;
4344         struct ixgbe_hw *hw =
4345                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4346         struct ixgbe_interrupt *intr =
4347                 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
4348
4349         if (queue_id < 16) {
4350                 ixgbe_disable_intr(hw);
4351                 intr->mask &= ~(1 << queue_id);
4352                 ixgbe_enable_intr(dev);
4353         } else if (queue_id < 32) {
4354                 mask = IXGBE_READ_REG(hw, IXGBE_EIMS_EX(0));
4355                 mask &= ~(1 << queue_id);
4356                 IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(0), mask);
4357         } else if (queue_id < 64) {
4358                 mask = IXGBE_READ_REG(hw, IXGBE_EIMS_EX(1));
4359                 mask &= ~(1 << (queue_id - 32));
4360                 IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(1), mask);
4361         }
4362
4363         return 0;
4364 }
4365
4366 static void
4367 ixgbevf_set_ivar_map(struct ixgbe_hw *hw, int8_t direction,
4368                      uint8_t queue, uint8_t msix_vector)
4369 {
4370         uint32_t tmp, idx;
4371
4372         if (direction == -1) {
4373                 /* other causes */
4374                 msix_vector |= IXGBE_IVAR_ALLOC_VAL;
4375                 tmp = IXGBE_READ_REG(hw, IXGBE_VTIVAR_MISC);
4376                 tmp &= ~0xFF;
4377                 tmp |= msix_vector;
4378                 IXGBE_WRITE_REG(hw, IXGBE_VTIVAR_MISC, tmp);
4379         } else {
4380                 /* rx or tx cause */
4381                 msix_vector |= IXGBE_IVAR_ALLOC_VAL;
4382                 idx = ((16 * (queue & 1)) + (8 * direction));
4383                 tmp = IXGBE_READ_REG(hw, IXGBE_VTIVAR(queue >> 1));
4384                 tmp &= ~(0xFF << idx);
4385                 tmp |= (msix_vector << idx);
4386                 IXGBE_WRITE_REG(hw, IXGBE_VTIVAR(queue >> 1), tmp);
4387         }
4388 }
4389
4390 /**
4391  * set the IVAR registers, mapping interrupt causes to vectors
4392  * @param hw
4393  *  pointer to ixgbe_hw struct
4394  * @direction
4395  *  0 for Rx, 1 for Tx, -1 for other causes
4396  * @queue
4397  *  queue to map the corresponding interrupt to
4398  * @msix_vector
4399  *  the vector to map to the corresponding queue
4400  */
4401 static void
4402 ixgbe_set_ivar_map(struct ixgbe_hw *hw, int8_t direction,
4403                    uint8_t queue, uint8_t msix_vector)
4404 {
4405         uint32_t tmp, idx;
4406
4407         msix_vector |= IXGBE_IVAR_ALLOC_VAL;
4408         if (hw->mac.type == ixgbe_mac_82598EB) {
4409                 if (direction == -1)
4410                         direction = 0;
4411                 idx = (((direction * 64) + queue) >> 2) & 0x1F;
4412                 tmp = IXGBE_READ_REG(hw, IXGBE_IVAR(idx));
4413                 tmp &= ~(0xFF << (8 * (queue & 0x3)));
4414                 tmp |= (msix_vector << (8 * (queue & 0x3)));
4415                 IXGBE_WRITE_REG(hw, IXGBE_IVAR(idx), tmp);
4416         } else if ((hw->mac.type == ixgbe_mac_82599EB) ||
4417                         (hw->mac.type == ixgbe_mac_X540)) {
4418                 if (direction == -1) {
4419                         /* other causes */
4420                         idx = ((queue & 1) * 8);
4421                         tmp = IXGBE_READ_REG(hw, IXGBE_IVAR_MISC);
4422                         tmp &= ~(0xFF << idx);
4423                         tmp |= (msix_vector << idx);
4424                         IXGBE_WRITE_REG(hw, IXGBE_IVAR_MISC, tmp);
4425                 } else {
4426                         /* rx or tx causes */
4427                         idx = ((16 * (queue & 1)) + (8 * direction));
4428                         tmp = IXGBE_READ_REG(hw, IXGBE_IVAR(queue >> 1));
4429                         tmp &= ~(0xFF << idx);
4430                         tmp |= (msix_vector << idx);
4431                         IXGBE_WRITE_REG(hw, IXGBE_IVAR(queue >> 1), tmp);
4432                 }
4433         }
4434 }
4435
4436 static void
4437 ixgbevf_configure_msix(struct rte_eth_dev *dev)
4438 {
4439         struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle;
4440         struct ixgbe_hw *hw =
4441                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4442         uint32_t q_idx;
4443         uint32_t vector_idx = 0;
4444
4445         /* won't configure msix register if no mapping is done
4446          * between intr vector and event fd.
4447          */
4448         if (!rte_intr_dp_is_en(intr_handle))
4449                 return;
4450
4451         /* Configure all RX queues of VF */
4452         for (q_idx = 0; q_idx < dev->data->nb_rx_queues; q_idx++) {
4453                 /* Force all queue use vector 0,
4454                  * as IXGBE_VF_MAXMSIVECOTR = 1
4455                  */
4456                 ixgbevf_set_ivar_map(hw, 0, q_idx, vector_idx);
4457                 intr_handle->intr_vec[q_idx] = vector_idx;
4458         }
4459
4460         /* Configure VF Rx queue ivar */
4461         ixgbevf_set_ivar_map(hw, -1, 1, vector_idx);
4462 }
4463
4464 /**
4465  * Sets up the hardware to properly generate MSI-X interrupts
4466  * @hw
4467  *  board private structure
4468  */
4469 static void
4470 ixgbe_configure_msix(struct rte_eth_dev *dev)
4471 {
4472         struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle;
4473         struct ixgbe_hw *hw =
4474                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4475         uint32_t queue_id, vec = 0;
4476         uint32_t mask;
4477         uint32_t gpie;
4478
4479         /* won't configure msix register if no mapping is done
4480          * between intr vector and event fd
4481          */
4482         if (!rte_intr_dp_is_en(intr_handle))
4483                 return;
4484
4485         /* setup GPIE for MSI-x mode */
4486         gpie = IXGBE_READ_REG(hw, IXGBE_GPIE);
4487         gpie |= IXGBE_GPIE_MSIX_MODE | IXGBE_GPIE_PBA_SUPPORT |
4488                 IXGBE_GPIE_OCD | IXGBE_GPIE_EIAME;
4489         /* auto clearing and auto setting corresponding bits in EIMS
4490          * when MSI-X interrupt is triggered
4491          */
4492         if (hw->mac.type == ixgbe_mac_82598EB) {
4493                 IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
4494         } else {
4495                 IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(0), 0xFFFFFFFF);
4496                 IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(1), 0xFFFFFFFF);
4497         }
4498         IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
4499
4500         /* Populate the IVAR table and set the ITR values to the
4501          * corresponding register.
4502          */
4503         for (queue_id = 0; queue_id < dev->data->nb_rx_queues;
4504              queue_id++) {
4505                 /* by default, 1:1 mapping */
4506                 ixgbe_set_ivar_map(hw, 0, queue_id, vec);
4507                 intr_handle->intr_vec[queue_id] = vec;
4508                 if (vec < intr_handle->nb_efd - 1)
4509                         vec++;
4510         }
4511
4512         switch (hw->mac.type) {
4513         case ixgbe_mac_82598EB:
4514                 ixgbe_set_ivar_map(hw, -1, IXGBE_IVAR_OTHER_CAUSES_INDEX,
4515                                    intr_handle->max_intr - 1);
4516                 break;
4517         case ixgbe_mac_82599EB:
4518         case ixgbe_mac_X540:
4519                 ixgbe_set_ivar_map(hw, -1, 1, intr_handle->max_intr - 1);
4520                 break;
4521         default:
4522                 break;
4523         }
4524         IXGBE_WRITE_REG(hw, IXGBE_EITR(queue_id),
4525                         IXGBE_MIN_INTER_INTERRUPT_INTERVAL_DEFAULT & 0xFFF);
4526
4527         /* set up to autoclear timer, and the vectors */
4528         mask = IXGBE_EIMS_ENABLE_MASK;
4529         mask &= ~(IXGBE_EIMS_OTHER |
4530                   IXGBE_EIMS_MAILBOX |
4531                   IXGBE_EIMS_LSC);
4532
4533         IXGBE_WRITE_REG(hw, IXGBE_EIAC, mask);
4534 }
4535
4536 static int ixgbe_set_queue_rate_limit(struct rte_eth_dev *dev,
4537         uint16_t queue_idx, uint16_t tx_rate)
4538 {
4539         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4540         uint32_t rf_dec, rf_int;
4541         uint32_t bcnrc_val;
4542         uint16_t link_speed = dev->data->dev_link.link_speed;
4543
4544         if (queue_idx >= hw->mac.max_tx_queues)
4545                 return -EINVAL;
4546
4547         if (tx_rate != 0) {
4548                 /* Calculate the rate factor values to set */
4549                 rf_int = (uint32_t)link_speed / (uint32_t)tx_rate;
4550                 rf_dec = (uint32_t)link_speed % (uint32_t)tx_rate;
4551                 rf_dec = (rf_dec << IXGBE_RTTBCNRC_RF_INT_SHIFT) / tx_rate;
4552
4553                 bcnrc_val = IXGBE_RTTBCNRC_RS_ENA;
4554                 bcnrc_val |= ((rf_int << IXGBE_RTTBCNRC_RF_INT_SHIFT) &
4555                                 IXGBE_RTTBCNRC_RF_INT_MASK_M);
4556                 bcnrc_val |= (rf_dec & IXGBE_RTTBCNRC_RF_DEC_MASK);
4557         } else {
4558                 bcnrc_val = 0;
4559         }
4560
4561         /*
4562          * Set global transmit compensation time to the MMW_SIZE in RTTBCNRM
4563          * register. MMW_SIZE=0x014 if 9728-byte jumbo is supported, otherwise
4564          * set as 0x4.
4565          */
4566         if ((dev->data->dev_conf.rxmode.jumbo_frame == 1) &&
4567                 (dev->data->dev_conf.rxmode.max_rx_pkt_len >=
4568                                 IXGBE_MAX_JUMBO_FRAME_SIZE))
4569                 IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRM,
4570                         IXGBE_MMW_SIZE_JUMBO_FRAME);
4571         else
4572                 IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRM,
4573                         IXGBE_MMW_SIZE_DEFAULT);
4574
4575         /* Set RTTBCNRC of queue X */
4576         IXGBE_WRITE_REG(hw, IXGBE_RTTDQSEL, queue_idx);
4577         IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRC, bcnrc_val);
4578         IXGBE_WRITE_FLUSH(hw);
4579
4580         return 0;
4581 }
4582
4583 static int ixgbe_set_vf_rate_limit(struct rte_eth_dev *dev, uint16_t vf,
4584         uint16_t tx_rate, uint64_t q_msk)
4585 {
4586         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4587         struct ixgbe_vf_info *vfinfo =
4588                 *(IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private));
4589         uint8_t  nb_q_per_pool = RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool;
4590         uint32_t queue_stride =
4591                 IXGBE_MAX_RX_QUEUE_NUM / RTE_ETH_DEV_SRIOV(dev).active;
4592         uint32_t queue_idx = vf * queue_stride, idx = 0, vf_idx;
4593         uint32_t queue_end = queue_idx + nb_q_per_pool - 1;
4594         uint16_t total_rate = 0;
4595
4596         if (queue_end >= hw->mac.max_tx_queues)
4597                 return -EINVAL;
4598
4599         if (vfinfo != NULL) {
4600                 for (vf_idx = 0; vf_idx < dev->pci_dev->max_vfs; vf_idx++) {
4601                         if (vf_idx == vf)
4602                                 continue;
4603                         for (idx = 0; idx < RTE_DIM(vfinfo[vf_idx].tx_rate);
4604                                 idx++)
4605                                 total_rate += vfinfo[vf_idx].tx_rate[idx];
4606                 }
4607         } else
4608                 return -EINVAL;
4609
4610         /* Store tx_rate for this vf. */
4611         for (idx = 0; idx < nb_q_per_pool; idx++) {
4612                 if (((uint64_t)0x1 << idx) & q_msk) {
4613                         if (vfinfo[vf].tx_rate[idx] != tx_rate)
4614                                 vfinfo[vf].tx_rate[idx] = tx_rate;
4615                         total_rate += tx_rate;
4616                 }
4617         }
4618
4619         if (total_rate > dev->data->dev_link.link_speed) {
4620                 /*
4621                  * Reset stored TX rate of the VF if it causes exceed
4622                  * link speed.
4623                  */
4624                 memset(vfinfo[vf].tx_rate, 0, sizeof(vfinfo[vf].tx_rate));
4625                 return -EINVAL;
4626         }
4627
4628         /* Set RTTBCNRC of each queue/pool for vf X  */
4629         for (; queue_idx <= queue_end; queue_idx++) {
4630                 if (0x1 & q_msk)
4631                         ixgbe_set_queue_rate_limit(dev, queue_idx, tx_rate);
4632                 q_msk = q_msk >> 1;
4633         }
4634
4635         return 0;
4636 }
4637
4638 static void
4639 ixgbevf_add_mac_addr(struct rte_eth_dev *dev, struct ether_addr *mac_addr,
4640                      __attribute__((unused)) uint32_t index,
4641                      __attribute__((unused)) uint32_t pool)
4642 {
4643         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4644         int diag;
4645
4646         /*
4647          * On a 82599 VF, adding again the same MAC addr is not an idempotent
4648          * operation. Trap this case to avoid exhausting the [very limited]
4649          * set of PF resources used to store VF MAC addresses.
4650          */
4651         if (memcmp(hw->mac.perm_addr, mac_addr, sizeof(struct ether_addr)) == 0)
4652                 return;
4653         diag = ixgbevf_set_uc_addr_vf(hw, 2, mac_addr->addr_bytes);
4654         if (diag == 0)
4655                 return;
4656         PMD_DRV_LOG(ERR, "Unable to add MAC address - diag=%d", diag);
4657 }
4658
4659 static void
4660 ixgbevf_remove_mac_addr(struct rte_eth_dev *dev, uint32_t index)
4661 {
4662         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4663         struct ether_addr *perm_addr = (struct ether_addr *) hw->mac.perm_addr;
4664         struct ether_addr *mac_addr;
4665         uint32_t i;
4666         int diag;
4667
4668         /*
4669          * The IXGBE_VF_SET_MACVLAN command of the ixgbe-pf driver does
4670          * not support the deletion of a given MAC address.
4671          * Instead, it imposes to delete all MAC addresses, then to add again
4672          * all MAC addresses with the exception of the one to be deleted.
4673          */
4674         (void) ixgbevf_set_uc_addr_vf(hw, 0, NULL);
4675
4676         /*
4677          * Add again all MAC addresses, with the exception of the deleted one
4678          * and of the permanent MAC address.
4679          */
4680         for (i = 0, mac_addr = dev->data->mac_addrs;
4681              i < hw->mac.num_rar_entries; i++, mac_addr++) {
4682                 /* Skip the deleted MAC address */
4683                 if (i == index)
4684                         continue;
4685                 /* Skip NULL MAC addresses */
4686                 if (is_zero_ether_addr(mac_addr))
4687                         continue;
4688                 /* Skip the permanent MAC address */
4689                 if (memcmp(perm_addr, mac_addr, sizeof(struct ether_addr)) == 0)
4690                         continue;
4691                 diag = ixgbevf_set_uc_addr_vf(hw, 2, mac_addr->addr_bytes);
4692                 if (diag != 0)
4693                         PMD_DRV_LOG(ERR,
4694                                     "Adding again MAC address "
4695                                     "%02x:%02x:%02x:%02x:%02x:%02x failed "
4696                                     "diag=%d",
4697                                     mac_addr->addr_bytes[0],
4698                                     mac_addr->addr_bytes[1],
4699                                     mac_addr->addr_bytes[2],
4700                                     mac_addr->addr_bytes[3],
4701                                     mac_addr->addr_bytes[4],
4702                                     mac_addr->addr_bytes[5],
4703                                     diag);
4704         }
4705 }
4706
4707 static void
4708 ixgbevf_set_default_mac_addr(struct rte_eth_dev *dev, struct ether_addr *addr)
4709 {
4710         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4711
4712         hw->mac.ops.set_rar(hw, 0, (void *)addr, 0, 0);
4713 }
4714
4715 #define MAC_TYPE_FILTER_SUP(type)    do {\
4716         if ((type) != ixgbe_mac_82599EB && (type) != ixgbe_mac_X540 &&\
4717                 (type) != ixgbe_mac_X550)\
4718                 return -ENOTSUP;\
4719 } while (0)
4720
4721 static int
4722 ixgbe_syn_filter_set(struct rte_eth_dev *dev,
4723                         struct rte_eth_syn_filter *filter,
4724                         bool add)
4725 {
4726         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4727         uint32_t synqf;
4728
4729         if (filter->queue >= IXGBE_MAX_RX_QUEUE_NUM)
4730                 return -EINVAL;
4731
4732         synqf = IXGBE_READ_REG(hw, IXGBE_SYNQF);
4733
4734         if (add) {
4735                 if (synqf & IXGBE_SYN_FILTER_ENABLE)
4736                         return -EINVAL;
4737                 synqf = (uint32_t)(((filter->queue << IXGBE_SYN_FILTER_QUEUE_SHIFT) &
4738                         IXGBE_SYN_FILTER_QUEUE) | IXGBE_SYN_FILTER_ENABLE);
4739
4740                 if (filter->hig_pri)
4741                         synqf |= IXGBE_SYN_FILTER_SYNQFP;
4742                 else
4743                         synqf &= ~IXGBE_SYN_FILTER_SYNQFP;
4744         } else {
4745                 if (!(synqf & IXGBE_SYN_FILTER_ENABLE))
4746                         return -ENOENT;
4747                 synqf &= ~(IXGBE_SYN_FILTER_QUEUE | IXGBE_SYN_FILTER_ENABLE);
4748         }
4749         IXGBE_WRITE_REG(hw, IXGBE_SYNQF, synqf);
4750         IXGBE_WRITE_FLUSH(hw);
4751         return 0;
4752 }
4753
4754 static int
4755 ixgbe_syn_filter_get(struct rte_eth_dev *dev,
4756                         struct rte_eth_syn_filter *filter)
4757 {
4758         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4759         uint32_t synqf = IXGBE_READ_REG(hw, IXGBE_SYNQF);
4760
4761         if (synqf & IXGBE_SYN_FILTER_ENABLE) {
4762                 filter->hig_pri = (synqf & IXGBE_SYN_FILTER_SYNQFP) ? 1 : 0;
4763                 filter->queue = (uint16_t)((synqf & IXGBE_SYN_FILTER_QUEUE) >> 1);
4764                 return 0;
4765         }
4766         return -ENOENT;
4767 }
4768
4769 static int
4770 ixgbe_syn_filter_handle(struct rte_eth_dev *dev,
4771                         enum rte_filter_op filter_op,
4772                         void *arg)
4773 {
4774         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4775         int ret;
4776
4777         MAC_TYPE_FILTER_SUP(hw->mac.type);
4778
4779         if (filter_op == RTE_ETH_FILTER_NOP)
4780                 return 0;
4781
4782         if (arg == NULL) {
4783                 PMD_DRV_LOG(ERR, "arg shouldn't be NULL for operation %u",
4784                             filter_op);
4785                 return -EINVAL;
4786         }
4787
4788         switch (filter_op) {
4789         case RTE_ETH_FILTER_ADD:
4790                 ret = ixgbe_syn_filter_set(dev,
4791                                 (struct rte_eth_syn_filter *)arg,
4792                                 TRUE);
4793                 break;
4794         case RTE_ETH_FILTER_DELETE:
4795                 ret = ixgbe_syn_filter_set(dev,
4796                                 (struct rte_eth_syn_filter *)arg,
4797                                 FALSE);
4798                 break;
4799         case RTE_ETH_FILTER_GET:
4800                 ret = ixgbe_syn_filter_get(dev,
4801                                 (struct rte_eth_syn_filter *)arg);
4802                 break;
4803         default:
4804                 PMD_DRV_LOG(ERR, "unsupported operation %u\n", filter_op);
4805                 ret = -EINVAL;
4806                 break;
4807         }
4808
4809         return ret;
4810 }
4811
4812
4813 static inline enum ixgbe_5tuple_protocol
4814 convert_protocol_type(uint8_t protocol_value)
4815 {
4816         if (protocol_value == IPPROTO_TCP)
4817                 return IXGBE_FILTER_PROTOCOL_TCP;
4818         else if (protocol_value == IPPROTO_UDP)
4819                 return IXGBE_FILTER_PROTOCOL_UDP;
4820         else if (protocol_value == IPPROTO_SCTP)
4821                 return IXGBE_FILTER_PROTOCOL_SCTP;
4822         else
4823                 return IXGBE_FILTER_PROTOCOL_NONE;
4824 }
4825
4826 /*
4827  * add a 5tuple filter
4828  *
4829  * @param
4830  * dev: Pointer to struct rte_eth_dev.
4831  * index: the index the filter allocates.
4832  * filter: ponter to the filter that will be added.
4833  * rx_queue: the queue id the filter assigned to.
4834  *
4835  * @return
4836  *    - On success, zero.
4837  *    - On failure, a negative value.
4838  */
4839 static int
4840 ixgbe_add_5tuple_filter(struct rte_eth_dev *dev,
4841                         struct ixgbe_5tuple_filter *filter)
4842 {
4843         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4844         struct ixgbe_filter_info *filter_info =
4845                 IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
4846         int i, idx, shift;
4847         uint32_t ftqf, sdpqf;
4848         uint32_t l34timir = 0;
4849         uint8_t mask = 0xff;
4850
4851         /*
4852          * look for an unused 5tuple filter index,
4853          * and insert the filter to list.
4854          */
4855         for (i = 0; i < IXGBE_MAX_FTQF_FILTERS; i++) {
4856                 idx = i / (sizeof(uint32_t) * NBBY);
4857                 shift = i % (sizeof(uint32_t) * NBBY);
4858                 if (!(filter_info->fivetuple_mask[idx] & (1 << shift))) {
4859                         filter_info->fivetuple_mask[idx] |= 1 << shift;
4860                         filter->index = i;
4861                         TAILQ_INSERT_TAIL(&filter_info->fivetuple_list,
4862                                           filter,
4863                                           entries);
4864                         break;
4865                 }
4866         }
4867         if (i >= IXGBE_MAX_FTQF_FILTERS) {
4868                 PMD_DRV_LOG(ERR, "5tuple filters are full.");
4869                 return -ENOSYS;
4870         }
4871
4872         sdpqf = (uint32_t)(filter->filter_info.dst_port <<
4873                                 IXGBE_SDPQF_DSTPORT_SHIFT);
4874         sdpqf = sdpqf | (filter->filter_info.src_port & IXGBE_SDPQF_SRCPORT);
4875
4876         ftqf = (uint32_t)(filter->filter_info.proto &
4877                 IXGBE_FTQF_PROTOCOL_MASK);
4878         ftqf |= (uint32_t)((filter->filter_info.priority &
4879                 IXGBE_FTQF_PRIORITY_MASK) << IXGBE_FTQF_PRIORITY_SHIFT);
4880         if (filter->filter_info.src_ip_mask == 0) /* 0 means compare. */
4881                 mask &= IXGBE_FTQF_SOURCE_ADDR_MASK;
4882         if (filter->filter_info.dst_ip_mask == 0)
4883                 mask &= IXGBE_FTQF_DEST_ADDR_MASK;
4884         if (filter->filter_info.src_port_mask == 0)
4885                 mask &= IXGBE_FTQF_SOURCE_PORT_MASK;
4886         if (filter->filter_info.dst_port_mask == 0)
4887                 mask &= IXGBE_FTQF_DEST_PORT_MASK;
4888         if (filter->filter_info.proto_mask == 0)
4889                 mask &= IXGBE_FTQF_PROTOCOL_COMP_MASK;
4890         ftqf |= mask << IXGBE_FTQF_5TUPLE_MASK_SHIFT;
4891         ftqf |= IXGBE_FTQF_POOL_MASK_EN;
4892         ftqf |= IXGBE_FTQF_QUEUE_ENABLE;
4893
4894         IXGBE_WRITE_REG(hw, IXGBE_DAQF(i), filter->filter_info.dst_ip);
4895         IXGBE_WRITE_REG(hw, IXGBE_SAQF(i), filter->filter_info.src_ip);
4896         IXGBE_WRITE_REG(hw, IXGBE_SDPQF(i), sdpqf);
4897         IXGBE_WRITE_REG(hw, IXGBE_FTQF(i), ftqf);
4898
4899         l34timir |= IXGBE_L34T_IMIR_RESERVE;
4900         l34timir |= (uint32_t)(filter->queue <<
4901                                 IXGBE_L34T_IMIR_QUEUE_SHIFT);
4902         IXGBE_WRITE_REG(hw, IXGBE_L34T_IMIR(i), l34timir);
4903         return 0;
4904 }
4905
4906 /*
4907  * remove a 5tuple filter
4908  *
4909  * @param
4910  * dev: Pointer to struct rte_eth_dev.
4911  * filter: the pointer of the filter will be removed.
4912  */
4913 static void
4914 ixgbe_remove_5tuple_filter(struct rte_eth_dev *dev,
4915                         struct ixgbe_5tuple_filter *filter)
4916 {
4917         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4918         struct ixgbe_filter_info *filter_info =
4919                 IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
4920         uint16_t index = filter->index;
4921
4922         filter_info->fivetuple_mask[index / (sizeof(uint32_t) * NBBY)] &=
4923                                 ~(1 << (index % (sizeof(uint32_t) * NBBY)));
4924         TAILQ_REMOVE(&filter_info->fivetuple_list, filter, entries);
4925         rte_free(filter);
4926
4927         IXGBE_WRITE_REG(hw, IXGBE_DAQF(index), 0);
4928         IXGBE_WRITE_REG(hw, IXGBE_SAQF(index), 0);
4929         IXGBE_WRITE_REG(hw, IXGBE_SDPQF(index), 0);
4930         IXGBE_WRITE_REG(hw, IXGBE_FTQF(index), 0);
4931         IXGBE_WRITE_REG(hw, IXGBE_L34T_IMIR(index), 0);
4932 }
4933
4934 static int
4935 ixgbevf_dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu)
4936 {
4937         struct ixgbe_hw *hw;
4938         uint32_t max_frame = mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
4939
4940         hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4941
4942         if ((mtu < ETHER_MIN_MTU) || (max_frame > ETHER_MAX_JUMBO_FRAME_LEN))
4943                 return -EINVAL;
4944
4945         /* refuse mtu that requires the support of scattered packets when this
4946          * feature has not been enabled before. */
4947         if (!dev->data->scattered_rx &&
4948             (max_frame + 2 * IXGBE_VLAN_TAG_SIZE >
4949              dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM))
4950                 return -EINVAL;
4951
4952         /*
4953          * When supported by the underlying PF driver, use the IXGBE_VF_SET_MTU
4954          * request of the version 2.0 of the mailbox API.
4955          * For now, use the IXGBE_VF_SET_LPE request of the version 1.0
4956          * of the mailbox API.
4957          * This call to IXGBE_SET_LPE action won't work with ixgbe pf drivers
4958          * prior to 3.11.33 which contains the following change:
4959          * "ixgbe: Enable jumbo frames support w/ SR-IOV"
4960          */
4961         ixgbevf_rlpml_set_vf(hw, max_frame);
4962
4963         /* update max frame size */
4964         dev->data->dev_conf.rxmode.max_rx_pkt_len = max_frame;
4965         return 0;
4966 }
4967
4968 #define MAC_TYPE_FILTER_SUP_EXT(type)    do {\
4969         if ((type) != ixgbe_mac_82599EB && (type) != ixgbe_mac_X540)\
4970                 return -ENOTSUP;\
4971 } while (0)
4972
4973 static inline struct ixgbe_5tuple_filter *
4974 ixgbe_5tuple_filter_lookup(struct ixgbe_5tuple_filter_list *filter_list,
4975                         struct ixgbe_5tuple_filter_info *key)
4976 {
4977         struct ixgbe_5tuple_filter *it;
4978
4979         TAILQ_FOREACH(it, filter_list, entries) {
4980                 if (memcmp(key, &it->filter_info,
4981                         sizeof(struct ixgbe_5tuple_filter_info)) == 0) {
4982                         return it;
4983                 }
4984         }
4985         return NULL;
4986 }
4987
4988 /* translate elements in struct rte_eth_ntuple_filter to struct ixgbe_5tuple_filter_info*/
4989 static inline int
4990 ntuple_filter_to_5tuple(struct rte_eth_ntuple_filter *filter,
4991                         struct ixgbe_5tuple_filter_info *filter_info)
4992 {
4993         if (filter->queue >= IXGBE_MAX_RX_QUEUE_NUM ||
4994                 filter->priority > IXGBE_5TUPLE_MAX_PRI ||
4995                 filter->priority < IXGBE_5TUPLE_MIN_PRI)
4996                 return -EINVAL;
4997
4998         switch (filter->dst_ip_mask) {
4999         case UINT32_MAX:
5000                 filter_info->dst_ip_mask = 0;
5001                 filter_info->dst_ip = filter->dst_ip;
5002                 break;
5003         case 0:
5004                 filter_info->dst_ip_mask = 1;
5005                 break;
5006         default:
5007                 PMD_DRV_LOG(ERR, "invalid dst_ip mask.");
5008                 return -EINVAL;
5009         }
5010
5011         switch (filter->src_ip_mask) {
5012         case UINT32_MAX:
5013                 filter_info->src_ip_mask = 0;
5014                 filter_info->src_ip = filter->src_ip;
5015                 break;
5016         case 0:
5017                 filter_info->src_ip_mask = 1;
5018                 break;
5019         default:
5020                 PMD_DRV_LOG(ERR, "invalid src_ip mask.");
5021                 return -EINVAL;
5022         }
5023
5024         switch (filter->dst_port_mask) {
5025         case UINT16_MAX:
5026                 filter_info->dst_port_mask = 0;
5027                 filter_info->dst_port = filter->dst_port;
5028                 break;
5029         case 0:
5030                 filter_info->dst_port_mask = 1;
5031                 break;
5032         default:
5033                 PMD_DRV_LOG(ERR, "invalid dst_port mask.");
5034                 return -EINVAL;
5035         }
5036
5037         switch (filter->src_port_mask) {
5038         case UINT16_MAX:
5039                 filter_info->src_port_mask = 0;
5040                 filter_info->src_port = filter->src_port;
5041                 break;
5042         case 0:
5043                 filter_info->src_port_mask = 1;
5044                 break;
5045         default:
5046                 PMD_DRV_LOG(ERR, "invalid src_port mask.");
5047                 return -EINVAL;
5048         }
5049
5050         switch (filter->proto_mask) {
5051         case UINT8_MAX:
5052                 filter_info->proto_mask = 0;
5053                 filter_info->proto =
5054                         convert_protocol_type(filter->proto);
5055                 break;
5056         case 0:
5057                 filter_info->proto_mask = 1;
5058                 break;
5059         default:
5060                 PMD_DRV_LOG(ERR, "invalid protocol mask.");
5061                 return -EINVAL;
5062         }
5063
5064         filter_info->priority = (uint8_t)filter->priority;
5065         return 0;
5066 }
5067
5068 /*
5069  * add or delete a ntuple filter
5070  *
5071  * @param
5072  * dev: Pointer to struct rte_eth_dev.
5073  * ntuple_filter: Pointer to struct rte_eth_ntuple_filter
5074  * add: if true, add filter, if false, remove filter
5075  *
5076  * @return
5077  *    - On success, zero.
5078  *    - On failure, a negative value.
5079  */
5080 static int
5081 ixgbe_add_del_ntuple_filter(struct rte_eth_dev *dev,
5082                         struct rte_eth_ntuple_filter *ntuple_filter,
5083                         bool add)
5084 {
5085         struct ixgbe_filter_info *filter_info =
5086                 IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
5087         struct ixgbe_5tuple_filter_info filter_5tuple;
5088         struct ixgbe_5tuple_filter *filter;
5089         int ret;
5090
5091         if (ntuple_filter->flags != RTE_5TUPLE_FLAGS) {
5092                 PMD_DRV_LOG(ERR, "only 5tuple is supported.");
5093                 return -EINVAL;
5094         }
5095
5096         memset(&filter_5tuple, 0, sizeof(struct ixgbe_5tuple_filter_info));
5097         ret = ntuple_filter_to_5tuple(ntuple_filter, &filter_5tuple);
5098         if (ret < 0)
5099                 return ret;
5100
5101         filter = ixgbe_5tuple_filter_lookup(&filter_info->fivetuple_list,
5102                                          &filter_5tuple);
5103         if (filter != NULL && add) {
5104                 PMD_DRV_LOG(ERR, "filter exists.");
5105                 return -EEXIST;
5106         }
5107         if (filter == NULL && !add) {
5108                 PMD_DRV_LOG(ERR, "filter doesn't exist.");
5109                 return -ENOENT;
5110         }
5111
5112         if (add) {
5113                 filter = rte_zmalloc("ixgbe_5tuple_filter",
5114                                 sizeof(struct ixgbe_5tuple_filter), 0);
5115                 if (filter == NULL)
5116                         return -ENOMEM;
5117                 (void)rte_memcpy(&filter->filter_info,
5118                                  &filter_5tuple,
5119                                  sizeof(struct ixgbe_5tuple_filter_info));
5120                 filter->queue = ntuple_filter->queue;
5121                 ret = ixgbe_add_5tuple_filter(dev, filter);
5122                 if (ret < 0) {
5123                         rte_free(filter);
5124                         return ret;
5125                 }
5126         } else
5127                 ixgbe_remove_5tuple_filter(dev, filter);
5128
5129         return 0;
5130 }
5131
5132 /*
5133  * get a ntuple filter
5134  *
5135  * @param
5136  * dev: Pointer to struct rte_eth_dev.
5137  * ntuple_filter: Pointer to struct rte_eth_ntuple_filter
5138  *
5139  * @return
5140  *    - On success, zero.
5141  *    - On failure, a negative value.
5142  */
5143 static int
5144 ixgbe_get_ntuple_filter(struct rte_eth_dev *dev,
5145                         struct rte_eth_ntuple_filter *ntuple_filter)
5146 {
5147         struct ixgbe_filter_info *filter_info =
5148                 IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
5149         struct ixgbe_5tuple_filter_info filter_5tuple;
5150         struct ixgbe_5tuple_filter *filter;
5151         int ret;
5152
5153         if (ntuple_filter->flags != RTE_5TUPLE_FLAGS) {
5154                 PMD_DRV_LOG(ERR, "only 5tuple is supported.");
5155                 return -EINVAL;
5156         }
5157
5158         memset(&filter_5tuple, 0, sizeof(struct ixgbe_5tuple_filter_info));
5159         ret = ntuple_filter_to_5tuple(ntuple_filter, &filter_5tuple);
5160         if (ret < 0)
5161                 return ret;
5162
5163         filter = ixgbe_5tuple_filter_lookup(&filter_info->fivetuple_list,
5164                                          &filter_5tuple);
5165         if (filter == NULL) {
5166                 PMD_DRV_LOG(ERR, "filter doesn't exist.");
5167                 return -ENOENT;
5168         }
5169         ntuple_filter->queue = filter->queue;
5170         return 0;
5171 }
5172
5173 /*
5174  * ixgbe_ntuple_filter_handle - Handle operations for ntuple filter.
5175  * @dev: pointer to rte_eth_dev structure
5176  * @filter_op:operation will be taken.
5177  * @arg: a pointer to specific structure corresponding to the filter_op
5178  *
5179  * @return
5180  *    - On success, zero.
5181  *    - On failure, a negative value.
5182  */
5183 static int
5184 ixgbe_ntuple_filter_handle(struct rte_eth_dev *dev,
5185                                 enum rte_filter_op filter_op,
5186                                 void *arg)
5187 {
5188         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5189         int ret;
5190
5191         MAC_TYPE_FILTER_SUP_EXT(hw->mac.type);
5192
5193         if (filter_op == RTE_ETH_FILTER_NOP)
5194                 return 0;
5195
5196         if (arg == NULL) {
5197                 PMD_DRV_LOG(ERR, "arg shouldn't be NULL for operation %u.",
5198                             filter_op);
5199                 return -EINVAL;
5200         }
5201
5202         switch (filter_op) {
5203         case RTE_ETH_FILTER_ADD:
5204                 ret = ixgbe_add_del_ntuple_filter(dev,
5205                         (struct rte_eth_ntuple_filter *)arg,
5206                         TRUE);
5207                 break;
5208         case RTE_ETH_FILTER_DELETE:
5209                 ret = ixgbe_add_del_ntuple_filter(dev,
5210                         (struct rte_eth_ntuple_filter *)arg,
5211                         FALSE);
5212                 break;
5213         case RTE_ETH_FILTER_GET:
5214                 ret = ixgbe_get_ntuple_filter(dev,
5215                         (struct rte_eth_ntuple_filter *)arg);
5216                 break;
5217         default:
5218                 PMD_DRV_LOG(ERR, "unsupported operation %u.", filter_op);
5219                 ret = -EINVAL;
5220                 break;
5221         }
5222         return ret;
5223 }
5224
5225 static inline int
5226 ixgbe_ethertype_filter_lookup(struct ixgbe_filter_info *filter_info,
5227                         uint16_t ethertype)
5228 {
5229         int i;
5230
5231         for (i = 0; i < IXGBE_MAX_ETQF_FILTERS; i++) {
5232                 if (filter_info->ethertype_filters[i] == ethertype &&
5233                     (filter_info->ethertype_mask & (1 << i)))
5234                         return i;
5235         }
5236         return -1;
5237 }
5238
5239 static inline int
5240 ixgbe_ethertype_filter_insert(struct ixgbe_filter_info *filter_info,
5241                         uint16_t ethertype)
5242 {
5243         int i;
5244
5245         for (i = 0; i < IXGBE_MAX_ETQF_FILTERS; i++) {
5246                 if (!(filter_info->ethertype_mask & (1 << i))) {
5247                         filter_info->ethertype_mask |= 1 << i;
5248                         filter_info->ethertype_filters[i] = ethertype;
5249                         return i;
5250                 }
5251         }
5252         return -1;
5253 }
5254
5255 static inline int
5256 ixgbe_ethertype_filter_remove(struct ixgbe_filter_info *filter_info,
5257                         uint8_t idx)
5258 {
5259         if (idx >= IXGBE_MAX_ETQF_FILTERS)
5260                 return -1;
5261         filter_info->ethertype_mask &= ~(1 << idx);
5262         filter_info->ethertype_filters[idx] = 0;
5263         return idx;
5264 }
5265
5266 static int
5267 ixgbe_add_del_ethertype_filter(struct rte_eth_dev *dev,
5268                         struct rte_eth_ethertype_filter *filter,
5269                         bool add)
5270 {
5271         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5272         struct ixgbe_filter_info *filter_info =
5273                 IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
5274         uint32_t etqf = 0;
5275         uint32_t etqs = 0;
5276         int ret;
5277
5278         if (filter->queue >= IXGBE_MAX_RX_QUEUE_NUM)
5279                 return -EINVAL;
5280
5281         if (filter->ether_type == ETHER_TYPE_IPv4 ||
5282                 filter->ether_type == ETHER_TYPE_IPv6) {
5283                 PMD_DRV_LOG(ERR, "unsupported ether_type(0x%04x) in"
5284                         " ethertype filter.", filter->ether_type);
5285                 return -EINVAL;
5286         }
5287
5288         if (filter->flags & RTE_ETHTYPE_FLAGS_MAC) {
5289                 PMD_DRV_LOG(ERR, "mac compare is unsupported.");
5290                 return -EINVAL;
5291         }
5292         if (filter->flags & RTE_ETHTYPE_FLAGS_DROP) {
5293                 PMD_DRV_LOG(ERR, "drop option is unsupported.");
5294                 return -EINVAL;
5295         }
5296
5297         ret = ixgbe_ethertype_filter_lookup(filter_info, filter->ether_type);
5298         if (ret >= 0 && add) {
5299                 PMD_DRV_LOG(ERR, "ethertype (0x%04x) filter exists.",
5300                             filter->ether_type);
5301                 return -EEXIST;
5302         }
5303         if (ret < 0 && !add) {
5304                 PMD_DRV_LOG(ERR, "ethertype (0x%04x) filter doesn't exist.",
5305                             filter->ether_type);
5306                 return -ENOENT;
5307         }
5308
5309         if (add) {
5310                 ret = ixgbe_ethertype_filter_insert(filter_info,
5311                         filter->ether_type);
5312                 if (ret < 0) {
5313                         PMD_DRV_LOG(ERR, "ethertype filters are full.");
5314                         return -ENOSYS;
5315                 }
5316                 etqf = IXGBE_ETQF_FILTER_EN;
5317                 etqf |= (uint32_t)filter->ether_type;
5318                 etqs |= (uint32_t)((filter->queue <<
5319                                     IXGBE_ETQS_RX_QUEUE_SHIFT) &
5320                                     IXGBE_ETQS_RX_QUEUE);
5321                 etqs |= IXGBE_ETQS_QUEUE_EN;
5322         } else {
5323                 ret = ixgbe_ethertype_filter_remove(filter_info, (uint8_t)ret);
5324                 if (ret < 0)
5325                         return -ENOSYS;
5326         }
5327         IXGBE_WRITE_REG(hw, IXGBE_ETQF(ret), etqf);
5328         IXGBE_WRITE_REG(hw, IXGBE_ETQS(ret), etqs);
5329         IXGBE_WRITE_FLUSH(hw);
5330
5331         return 0;
5332 }
5333
5334 static int
5335 ixgbe_get_ethertype_filter(struct rte_eth_dev *dev,
5336                         struct rte_eth_ethertype_filter *filter)
5337 {
5338         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5339         struct ixgbe_filter_info *filter_info =
5340                 IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
5341         uint32_t etqf, etqs;
5342         int ret;
5343
5344         ret = ixgbe_ethertype_filter_lookup(filter_info, filter->ether_type);
5345         if (ret < 0) {
5346                 PMD_DRV_LOG(ERR, "ethertype (0x%04x) filter doesn't exist.",
5347                             filter->ether_type);
5348                 return -ENOENT;
5349         }
5350
5351         etqf = IXGBE_READ_REG(hw, IXGBE_ETQF(ret));
5352         if (etqf & IXGBE_ETQF_FILTER_EN) {
5353                 etqs = IXGBE_READ_REG(hw, IXGBE_ETQS(ret));
5354                 filter->ether_type = etqf & IXGBE_ETQF_ETHERTYPE;
5355                 filter->flags = 0;
5356                 filter->queue = (etqs & IXGBE_ETQS_RX_QUEUE) >>
5357                                IXGBE_ETQS_RX_QUEUE_SHIFT;
5358                 return 0;
5359         }
5360         return -ENOENT;
5361 }
5362
5363 /*
5364  * ixgbe_ethertype_filter_handle - Handle operations for ethertype filter.
5365  * @dev: pointer to rte_eth_dev structure
5366  * @filter_op:operation will be taken.
5367  * @arg: a pointer to specific structure corresponding to the filter_op
5368  */
5369 static int
5370 ixgbe_ethertype_filter_handle(struct rte_eth_dev *dev,
5371                                 enum rte_filter_op filter_op,
5372                                 void *arg)
5373 {
5374         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5375         int ret;
5376
5377         MAC_TYPE_FILTER_SUP(hw->mac.type);
5378
5379         if (filter_op == RTE_ETH_FILTER_NOP)
5380                 return 0;
5381
5382         if (arg == NULL) {
5383                 PMD_DRV_LOG(ERR, "arg shouldn't be NULL for operation %u.",
5384                             filter_op);
5385                 return -EINVAL;
5386         }
5387
5388         switch (filter_op) {
5389         case RTE_ETH_FILTER_ADD:
5390                 ret = ixgbe_add_del_ethertype_filter(dev,
5391                         (struct rte_eth_ethertype_filter *)arg,
5392                         TRUE);
5393                 break;
5394         case RTE_ETH_FILTER_DELETE:
5395                 ret = ixgbe_add_del_ethertype_filter(dev,
5396                         (struct rte_eth_ethertype_filter *)arg,
5397                         FALSE);
5398                 break;
5399         case RTE_ETH_FILTER_GET:
5400                 ret = ixgbe_get_ethertype_filter(dev,
5401                         (struct rte_eth_ethertype_filter *)arg);
5402                 break;
5403         default:
5404                 PMD_DRV_LOG(ERR, "unsupported operation %u.", filter_op);
5405                 ret = -EINVAL;
5406                 break;
5407         }
5408         return ret;
5409 }
5410
5411 static int
5412 ixgbe_dev_filter_ctrl(struct rte_eth_dev *dev,
5413                      enum rte_filter_type filter_type,
5414                      enum rte_filter_op filter_op,
5415                      void *arg)
5416 {
5417         int ret = -EINVAL;
5418
5419         switch (filter_type) {
5420         case RTE_ETH_FILTER_NTUPLE:
5421                 ret = ixgbe_ntuple_filter_handle(dev, filter_op, arg);
5422                 break;
5423         case RTE_ETH_FILTER_ETHERTYPE:
5424                 ret = ixgbe_ethertype_filter_handle(dev, filter_op, arg);
5425                 break;
5426         case RTE_ETH_FILTER_SYN:
5427                 ret = ixgbe_syn_filter_handle(dev, filter_op, arg);
5428                 break;
5429         case RTE_ETH_FILTER_FDIR:
5430                 ret = ixgbe_fdir_ctrl_func(dev, filter_op, arg);
5431                 break;
5432         default:
5433                 PMD_DRV_LOG(WARNING, "Filter type (%d) not supported",
5434                                                         filter_type);
5435                 break;
5436         }
5437
5438         return ret;
5439 }
5440
5441 static u8 *
5442 ixgbe_dev_addr_list_itr(__attribute__((unused)) struct ixgbe_hw *hw,
5443                         u8 **mc_addr_ptr, u32 *vmdq)
5444 {
5445         u8 *mc_addr;
5446
5447         *vmdq = 0;
5448         mc_addr = *mc_addr_ptr;
5449         *mc_addr_ptr = (mc_addr + sizeof(struct ether_addr));
5450         return mc_addr;
5451 }
5452
5453 static int
5454 ixgbe_dev_set_mc_addr_list(struct rte_eth_dev *dev,
5455                           struct ether_addr *mc_addr_set,
5456                           uint32_t nb_mc_addr)
5457 {
5458         struct ixgbe_hw *hw;
5459         u8 *mc_addr_list;
5460
5461         hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5462         mc_addr_list = (u8 *)mc_addr_set;
5463         return ixgbe_update_mc_addr_list(hw, mc_addr_list, nb_mc_addr,
5464                                          ixgbe_dev_addr_list_itr, TRUE);
5465 }
5466
5467 static int
5468 ixgbe_timesync_enable(struct rte_eth_dev *dev)
5469 {
5470         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5471         uint32_t tsync_ctl;
5472         uint32_t tsauxc;
5473
5474         /* Enable system time for platforms where it isn't on by default. */
5475         tsauxc = IXGBE_READ_REG(hw, IXGBE_TSAUXC);
5476         tsauxc &= ~IXGBE_TSAUXC_DISABLE_SYSTIME;
5477         IXGBE_WRITE_REG(hw, IXGBE_TSAUXC, tsauxc);
5478
5479         /* Start incrementing the register used to timestamp PTP packets. */
5480         IXGBE_WRITE_REG(hw, IXGBE_TIMINCA, IXGBE_TIMINCA_INIT);
5481
5482         /* Enable L2 filtering of IEEE1588/802.1AS Ethernet frame types. */
5483         IXGBE_WRITE_REG(hw, IXGBE_ETQF(IXGBE_ETQF_FILTER_1588),
5484                         (ETHER_TYPE_1588 |
5485                          IXGBE_ETQF_FILTER_EN |
5486                          IXGBE_ETQF_1588));
5487
5488         /* Enable timestamping of received PTP packets. */
5489         tsync_ctl = IXGBE_READ_REG(hw, IXGBE_TSYNCRXCTL);
5490         tsync_ctl |= IXGBE_TSYNCRXCTL_ENABLED;
5491         IXGBE_WRITE_REG(hw, IXGBE_TSYNCRXCTL, tsync_ctl);
5492
5493         /* Enable timestamping of transmitted PTP packets. */
5494         tsync_ctl = IXGBE_READ_REG(hw, IXGBE_TSYNCTXCTL);
5495         tsync_ctl |= IXGBE_TSYNCTXCTL_ENABLED;
5496         IXGBE_WRITE_REG(hw, IXGBE_TSYNCTXCTL, tsync_ctl);
5497
5498         return 0;
5499 }
5500
5501 static int
5502 ixgbe_timesync_disable(struct rte_eth_dev *dev)
5503 {
5504         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5505         uint32_t tsync_ctl;
5506
5507         /* Disable timestamping of transmitted PTP packets. */
5508         tsync_ctl = IXGBE_READ_REG(hw, IXGBE_TSYNCTXCTL);
5509         tsync_ctl &= ~IXGBE_TSYNCTXCTL_ENABLED;
5510         IXGBE_WRITE_REG(hw, IXGBE_TSYNCTXCTL, tsync_ctl);
5511
5512         /* Disable timestamping of received PTP packets. */
5513         tsync_ctl = IXGBE_READ_REG(hw, IXGBE_TSYNCRXCTL);
5514         tsync_ctl &= ~IXGBE_TSYNCRXCTL_ENABLED;
5515         IXGBE_WRITE_REG(hw, IXGBE_TSYNCRXCTL, tsync_ctl);
5516
5517         /* Disable L2 filtering of IEEE1588/802.1AS Ethernet frame types. */
5518         IXGBE_WRITE_REG(hw, IXGBE_ETQF(IXGBE_ETQF_FILTER_1588), 0);
5519
5520         /* Stop incrementating the System Time registers. */
5521         IXGBE_WRITE_REG(hw, IXGBE_TIMINCA, 0);
5522
5523         return 0;
5524 }
5525
5526 static int
5527 ixgbe_timesync_read_rx_timestamp(struct rte_eth_dev *dev,
5528                                  struct timespec *timestamp,
5529                                  uint32_t flags __rte_unused)
5530 {
5531         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5532         uint32_t tsync_rxctl;
5533         uint32_t rx_stmpl;
5534         uint32_t rx_stmph;
5535
5536         tsync_rxctl = IXGBE_READ_REG(hw, IXGBE_TSYNCRXCTL);
5537         if ((tsync_rxctl & IXGBE_TSYNCRXCTL_VALID) == 0)
5538                 return -EINVAL;
5539
5540         rx_stmpl = IXGBE_READ_REG(hw, IXGBE_RXSTMPL);
5541         rx_stmph = IXGBE_READ_REG(hw, IXGBE_RXSTMPH);
5542
5543         timestamp->tv_sec = (uint64_t)(((uint64_t)rx_stmph << 32) | rx_stmpl);
5544         timestamp->tv_nsec = 0;
5545
5546         return  0;
5547 }
5548
5549 static int
5550 ixgbe_timesync_read_tx_timestamp(struct rte_eth_dev *dev,
5551                                  struct timespec *timestamp)
5552 {
5553         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5554         uint32_t tsync_txctl;
5555         uint32_t tx_stmpl;
5556         uint32_t tx_stmph;
5557
5558         tsync_txctl = IXGBE_READ_REG(hw, IXGBE_TSYNCTXCTL);
5559         if ((tsync_txctl & IXGBE_TSYNCTXCTL_VALID) == 0)
5560                 return -EINVAL;
5561
5562         tx_stmpl = IXGBE_READ_REG(hw, IXGBE_TXSTMPL);
5563         tx_stmph = IXGBE_READ_REG(hw, IXGBE_TXSTMPH);
5564
5565         timestamp->tv_sec = (uint64_t)(((uint64_t)tx_stmph << 32) | tx_stmpl);
5566         timestamp->tv_nsec = 0;
5567
5568         return  0;
5569 }
5570
5571 static int
5572 ixgbe_get_reg_length(struct rte_eth_dev *dev)
5573 {
5574         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5575         int count = 0;
5576         int g_ind = 0;
5577         const struct reg_info *reg_group;
5578         const struct reg_info **reg_set = (hw->mac.type == ixgbe_mac_82598EB) ?
5579                                     ixgbe_regs_mac_82598EB : ixgbe_regs_others;
5580
5581         while ((reg_group = reg_set[g_ind++]))
5582                 count += ixgbe_regs_group_count(reg_group);
5583
5584         return count;
5585 }
5586
5587 static int
5588 ixgbevf_get_reg_length(struct rte_eth_dev *dev __rte_unused)
5589 {
5590         int count = 0;
5591         int g_ind = 0;
5592         const struct reg_info *reg_group;
5593
5594         while ((reg_group = ixgbevf_regs[g_ind++]))
5595                 count += ixgbe_regs_group_count(reg_group);
5596
5597         return count;
5598 }
5599
5600 static int
5601 ixgbe_get_regs(struct rte_eth_dev *dev,
5602               struct rte_dev_reg_info *regs)
5603 {
5604         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5605         uint32_t *data = regs->data;
5606         int g_ind = 0;
5607         int count = 0;
5608         const struct reg_info *reg_group;
5609         const struct reg_info **reg_set = (hw->mac.type == ixgbe_mac_82598EB) ?
5610                                     ixgbe_regs_mac_82598EB : ixgbe_regs_others;
5611
5612         /* Support only full register dump */
5613         if ((regs->length == 0) ||
5614             (regs->length == (uint32_t)ixgbe_get_reg_length(dev))) {
5615                 regs->version = hw->mac.type << 24 | hw->revision_id << 16 |
5616                         hw->device_id;
5617                 while ((reg_group = reg_set[g_ind++]))
5618                         count += ixgbe_read_regs_group(dev, &data[count],
5619                                 reg_group);
5620                 return 0;
5621         }
5622
5623         return -ENOTSUP;
5624 }
5625
5626 static int
5627 ixgbevf_get_regs(struct rte_eth_dev *dev,
5628                 struct rte_dev_reg_info *regs)
5629 {
5630         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5631         uint32_t *data = regs->data;
5632         int g_ind = 0;
5633         int count = 0;
5634         const struct reg_info *reg_group;
5635
5636         /* Support only full register dump */
5637         if ((regs->length == 0) ||
5638             (regs->length == (uint32_t)ixgbevf_get_reg_length(dev))) {
5639                 regs->version = hw->mac.type << 24 | hw->revision_id << 16 |
5640                         hw->device_id;
5641                 while ((reg_group = ixgbevf_regs[g_ind++]))
5642                         count += ixgbe_read_regs_group(dev, &data[count],
5643                                                       reg_group);
5644                 return 0;
5645         }
5646
5647         return -ENOTSUP;
5648 }
5649
5650 static int
5651 ixgbe_get_eeprom_length(struct rte_eth_dev *dev)
5652 {
5653         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5654
5655         /* Return unit is byte count */
5656         return hw->eeprom.word_size * 2;
5657 }
5658
5659 static int
5660 ixgbe_get_eeprom(struct rte_eth_dev *dev,
5661                 struct rte_dev_eeprom_info *in_eeprom)
5662 {
5663         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5664         struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
5665         uint16_t *data = in_eeprom->data;
5666         int first, length;
5667
5668         first = in_eeprom->offset >> 1;
5669         length = in_eeprom->length >> 1;
5670         if ((first > hw->eeprom.word_size) ||
5671             ((first + length) > hw->eeprom.word_size))
5672                 return -EINVAL;
5673
5674         in_eeprom->magic = hw->vendor_id | (hw->device_id << 16);
5675
5676         return eeprom->ops.read_buffer(hw, first, length, data);
5677 }
5678
5679 static int
5680 ixgbe_set_eeprom(struct rte_eth_dev *dev,
5681                 struct rte_dev_eeprom_info *in_eeprom)
5682 {
5683         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5684         struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
5685         uint16_t *data = in_eeprom->data;
5686         int first, length;
5687
5688         first = in_eeprom->offset >> 1;
5689         length = in_eeprom->length >> 1;
5690         if ((first > hw->eeprom.word_size) ||
5691             ((first + length) > hw->eeprom.word_size))
5692                 return -EINVAL;
5693
5694         in_eeprom->magic = hw->vendor_id | (hw->device_id << 16);
5695
5696         return eeprom->ops.write_buffer(hw,  first, length, data);
5697 }
5698
5699 uint16_t
5700 ixgbe_reta_size_get(enum ixgbe_mac_type mac_type) {
5701         switch (mac_type) {
5702         case ixgbe_mac_X550:
5703         case ixgbe_mac_X550EM_x:
5704                 return ETH_RSS_RETA_SIZE_512;
5705         case ixgbe_mac_X550_vf:
5706         case ixgbe_mac_X550EM_x_vf:
5707                 return ETH_RSS_RETA_SIZE_64;
5708         default:
5709                 return ETH_RSS_RETA_SIZE_128;
5710         }
5711 }
5712
5713 uint32_t
5714 ixgbe_reta_reg_get(enum ixgbe_mac_type mac_type, uint16_t reta_idx) {
5715         switch (mac_type) {
5716         case ixgbe_mac_X550:
5717         case ixgbe_mac_X550EM_x:
5718                 if (reta_idx < ETH_RSS_RETA_SIZE_128)
5719                         return IXGBE_RETA(reta_idx >> 2);
5720                 else
5721                         return IXGBE_ERETA((reta_idx - ETH_RSS_RETA_SIZE_128) >> 2);
5722         case ixgbe_mac_X550_vf:
5723         case ixgbe_mac_X550EM_x_vf:
5724                 return IXGBE_VFRETA(reta_idx >> 2);
5725         default:
5726                 return IXGBE_RETA(reta_idx >> 2);
5727         }
5728 }
5729
5730 uint32_t
5731 ixgbe_mrqc_reg_get(enum ixgbe_mac_type mac_type) {
5732         switch (mac_type) {
5733         case ixgbe_mac_X550_vf:
5734         case ixgbe_mac_X550EM_x_vf:
5735                 return IXGBE_VFMRQC;
5736         default:
5737                 return IXGBE_MRQC;
5738         }
5739 }
5740
5741 uint32_t
5742 ixgbe_rssrk_reg_get(enum ixgbe_mac_type mac_type, uint8_t i) {
5743         switch (mac_type) {
5744         case ixgbe_mac_X550_vf:
5745         case ixgbe_mac_X550EM_x_vf:
5746                 return IXGBE_VFRSSRK(i);
5747         default:
5748                 return IXGBE_RSSRK(i);
5749         }
5750 }
5751
5752 bool
5753 ixgbe_rss_update_sp(enum ixgbe_mac_type mac_type) {
5754         switch (mac_type) {
5755         case ixgbe_mac_82599_vf:
5756         case ixgbe_mac_X540_vf:
5757                 return 0;
5758         default:
5759                 return 1;
5760         }
5761 }
5762
5763 static int
5764 ixgbe_dev_get_dcb_info(struct rte_eth_dev *dev,
5765                         struct rte_eth_dcb_info *dcb_info)
5766 {
5767         struct ixgbe_dcb_config *dcb_config =
5768                         IXGBE_DEV_PRIVATE_TO_DCB_CFG(dev->data->dev_private);
5769         struct ixgbe_dcb_tc_config *tc;
5770         uint8_t i, j;
5771
5772         if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_DCB_FLAG)
5773                 dcb_info->nb_tcs = dcb_config->num_tcs.pg_tcs;
5774         else
5775                 dcb_info->nb_tcs = 1;
5776
5777         if (dcb_config->vt_mode) { /* vt is enabled*/
5778                 struct rte_eth_vmdq_dcb_conf *vmdq_rx_conf =
5779                                 &dev->data->dev_conf.rx_adv_conf.vmdq_dcb_conf;
5780                 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++)
5781                         dcb_info->prio_tc[i] = vmdq_rx_conf->dcb_tc[i];
5782                 for (i = 0; i < vmdq_rx_conf->nb_queue_pools; i++) {
5783                         for (j = 0; j < dcb_info->nb_tcs; j++) {
5784                                 dcb_info->tc_queue.tc_rxq[i][j].base =
5785                                                 i * dcb_info->nb_tcs + j;
5786                                 dcb_info->tc_queue.tc_rxq[i][j].nb_queue = 1;
5787                                 dcb_info->tc_queue.tc_txq[i][j].base =
5788                                                 i * dcb_info->nb_tcs + j;
5789                                 dcb_info->tc_queue.tc_txq[i][j].nb_queue = 1;
5790                         }
5791                 }
5792         } else { /* vt is disabled*/
5793                 struct rte_eth_dcb_rx_conf *rx_conf =
5794                                 &dev->data->dev_conf.rx_adv_conf.dcb_rx_conf;
5795                 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++)
5796                         dcb_info->prio_tc[i] = rx_conf->dcb_tc[i];
5797                 if (dcb_info->nb_tcs == ETH_4_TCS) {
5798                         for (i = 0; i < dcb_info->nb_tcs; i++) {
5799                                 dcb_info->tc_queue.tc_rxq[0][i].base = i * 32;
5800                                 dcb_info->tc_queue.tc_rxq[0][i].nb_queue = 16;
5801                         }
5802                         dcb_info->tc_queue.tc_txq[0][0].base = 0;
5803                         dcb_info->tc_queue.tc_txq[0][1].base = 64;
5804                         dcb_info->tc_queue.tc_txq[0][2].base = 96;
5805                         dcb_info->tc_queue.tc_txq[0][3].base = 112;
5806                         dcb_info->tc_queue.tc_txq[0][0].nb_queue = 64;
5807                         dcb_info->tc_queue.tc_txq[0][1].nb_queue = 32;
5808                         dcb_info->tc_queue.tc_txq[0][2].nb_queue = 16;
5809                         dcb_info->tc_queue.tc_txq[0][3].nb_queue = 16;
5810                 } else if (dcb_info->nb_tcs == ETH_8_TCS) {
5811                         for (i = 0; i < dcb_info->nb_tcs; i++) {
5812                                 dcb_info->tc_queue.tc_rxq[0][i].base = i * 16;
5813                                 dcb_info->tc_queue.tc_rxq[0][i].nb_queue = 16;
5814                         }
5815                         dcb_info->tc_queue.tc_txq[0][0].base = 0;
5816                         dcb_info->tc_queue.tc_txq[0][1].base = 32;
5817                         dcb_info->tc_queue.tc_txq[0][2].base = 64;
5818                         dcb_info->tc_queue.tc_txq[0][3].base = 80;
5819                         dcb_info->tc_queue.tc_txq[0][4].base = 96;
5820                         dcb_info->tc_queue.tc_txq[0][5].base = 104;
5821                         dcb_info->tc_queue.tc_txq[0][6].base = 112;
5822                         dcb_info->tc_queue.tc_txq[0][7].base = 120;
5823                         dcb_info->tc_queue.tc_txq[0][0].nb_queue = 32;
5824                         dcb_info->tc_queue.tc_txq[0][1].nb_queue = 32;
5825                         dcb_info->tc_queue.tc_txq[0][2].nb_queue = 16;
5826                         dcb_info->tc_queue.tc_txq[0][3].nb_queue = 16;
5827                         dcb_info->tc_queue.tc_txq[0][4].nb_queue = 8;
5828                         dcb_info->tc_queue.tc_txq[0][5].nb_queue = 8;
5829                         dcb_info->tc_queue.tc_txq[0][6].nb_queue = 8;
5830                         dcb_info->tc_queue.tc_txq[0][7].nb_queue = 8;
5831                 }
5832         }
5833         for (i = 0; i < dcb_info->nb_tcs; i++) {
5834                 tc = &dcb_config->tc_config[i];
5835                 dcb_info->tc_bws[i] = tc->path[IXGBE_DCB_TX_CONFIG].bwg_percent;
5836         }
5837         return 0;
5838 }
5839
5840 static struct rte_driver rte_ixgbe_driver = {
5841         .type = PMD_PDEV,
5842         .init = rte_ixgbe_pmd_init,
5843 };
5844
5845 static struct rte_driver rte_ixgbevf_driver = {
5846         .type = PMD_PDEV,
5847         .init = rte_ixgbevf_pmd_init,
5848 };
5849
5850 PMD_REGISTER_DRIVER(rte_ixgbe_driver);
5851 PMD_REGISTER_DRIVER(rte_ixgbevf_driver);