net/ixgbe: add firmware version get
[dpdk.git] / drivers / net / ixgbe / ixgbe_ethdev.c
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright(c) 2010-2016 Intel Corporation. All rights reserved.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33
34 #include <sys/queue.h>
35 #include <stdio.h>
36 #include <errno.h>
37 #include <stdint.h>
38 #include <string.h>
39 #include <unistd.h>
40 #include <stdarg.h>
41 #include <inttypes.h>
42 #include <netinet/in.h>
43 #include <rte_byteorder.h>
44 #include <rte_common.h>
45 #include <rte_cycles.h>
46
47 #include <rte_interrupts.h>
48 #include <rte_log.h>
49 #include <rte_debug.h>
50 #include <rte_pci.h>
51 #include <rte_atomic.h>
52 #include <rte_branch_prediction.h>
53 #include <rte_memory.h>
54 #include <rte_memzone.h>
55 #include <rte_eal.h>
56 #include <rte_alarm.h>
57 #include <rte_ether.h>
58 #include <rte_ethdev.h>
59 #include <rte_atomic.h>
60 #include <rte_malloc.h>
61 #include <rte_random.h>
62 #include <rte_dev.h>
63
64 #include "ixgbe_logs.h"
65 #include "base/ixgbe_api.h"
66 #include "base/ixgbe_vf.h"
67 #include "base/ixgbe_common.h"
68 #include "ixgbe_ethdev.h"
69 #include "ixgbe_bypass.h"
70 #include "ixgbe_rxtx.h"
71 #include "base/ixgbe_type.h"
72 #include "base/ixgbe_phy.h"
73 #include "ixgbe_regs.h"
74
75 #include "rte_pmd_ixgbe.h"
76
77 /*
78  * High threshold controlling when to start sending XOFF frames. Must be at
79  * least 8 bytes less than receive packet buffer size. This value is in units
80  * of 1024 bytes.
81  */
82 #define IXGBE_FC_HI    0x80
83
84 /*
85  * Low threshold controlling when to start sending XON frames. This value is
86  * in units of 1024 bytes.
87  */
88 #define IXGBE_FC_LO    0x40
89
90 /* Default minimum inter-interrupt interval for EITR configuration */
91 #define IXGBE_MIN_INTER_INTERRUPT_INTERVAL_DEFAULT    0x79E
92
93 /* Timer value included in XOFF frames. */
94 #define IXGBE_FC_PAUSE 0x680
95
96 #define IXGBE_LINK_DOWN_CHECK_TIMEOUT 4000 /* ms */
97 #define IXGBE_LINK_UP_CHECK_TIMEOUT   1000 /* ms */
98 #define IXGBE_VMDQ_NUM_UC_MAC         4096 /* Maximum nb. of UC MAC addr. */
99
100 #define IXGBE_MMW_SIZE_DEFAULT        0x4
101 #define IXGBE_MMW_SIZE_JUMBO_FRAME    0x14
102 #define IXGBE_MAX_RING_DESC           4096 /* replicate define from rxtx */
103
104 /*
105  *  Default values for RX/TX configuration
106  */
107 #define IXGBE_DEFAULT_RX_FREE_THRESH  32
108 #define IXGBE_DEFAULT_RX_PTHRESH      8
109 #define IXGBE_DEFAULT_RX_HTHRESH      8
110 #define IXGBE_DEFAULT_RX_WTHRESH      0
111
112 #define IXGBE_DEFAULT_TX_FREE_THRESH  32
113 #define IXGBE_DEFAULT_TX_PTHRESH      32
114 #define IXGBE_DEFAULT_TX_HTHRESH      0
115 #define IXGBE_DEFAULT_TX_WTHRESH      0
116 #define IXGBE_DEFAULT_TX_RSBIT_THRESH 32
117
118 /* Bit shift and mask */
119 #define IXGBE_4_BIT_WIDTH  (CHAR_BIT / 2)
120 #define IXGBE_4_BIT_MASK   RTE_LEN2MASK(IXGBE_4_BIT_WIDTH, uint8_t)
121 #define IXGBE_8_BIT_WIDTH  CHAR_BIT
122 #define IXGBE_8_BIT_MASK   UINT8_MAX
123
124 #define IXGBEVF_PMD_NAME "rte_ixgbevf_pmd" /* PMD name */
125
126 #define IXGBE_QUEUE_STAT_COUNTERS (sizeof(hw_stats->qprc) / sizeof(hw_stats->qprc[0]))
127
128 #define IXGBE_HKEY_MAX_INDEX 10
129
130 /* Additional timesync values. */
131 #define NSEC_PER_SEC             1000000000L
132 #define IXGBE_INCVAL_10GB        0x66666666
133 #define IXGBE_INCVAL_1GB         0x40000000
134 #define IXGBE_INCVAL_100         0x50000000
135 #define IXGBE_INCVAL_SHIFT_10GB  28
136 #define IXGBE_INCVAL_SHIFT_1GB   24
137 #define IXGBE_INCVAL_SHIFT_100   21
138 #define IXGBE_INCVAL_SHIFT_82599 7
139 #define IXGBE_INCPER_SHIFT_82599 24
140
141 #define IXGBE_CYCLECOUNTER_MASK   0xffffffffffffffffULL
142
143 #define IXGBE_VT_CTL_POOLING_MODE_MASK         0x00030000
144 #define IXGBE_VT_CTL_POOLING_MODE_ETAG         0x00010000
145 #define DEFAULT_ETAG_ETYPE                     0x893f
146 #define IXGBE_ETAG_ETYPE                       0x00005084
147 #define IXGBE_ETAG_ETYPE_MASK                  0x0000ffff
148 #define IXGBE_ETAG_ETYPE_VALID                 0x80000000
149 #define IXGBE_RAH_ADTYPE                       0x40000000
150 #define IXGBE_RAL_ETAG_FILTER_MASK             0x00003fff
151 #define IXGBE_VMVIR_TAGA_MASK                  0x18000000
152 #define IXGBE_VMVIR_TAGA_ETAG_INSERT           0x08000000
153 #define IXGBE_VMTIR(_i) (0x00017000 + ((_i) * 4)) /* 64 of these (0-63) */
154 #define IXGBE_QDE_STRIP_TAG                    0x00000004
155 #define IXGBE_VTEICR_MASK                      0x07
156
157 enum ixgbevf_xcast_modes {
158         IXGBEVF_XCAST_MODE_NONE = 0,
159         IXGBEVF_XCAST_MODE_MULTI,
160         IXGBEVF_XCAST_MODE_ALLMULTI,
161 };
162
163 #define IXGBE_EXVET_VET_EXT_SHIFT              16
164 #define IXGBE_DMATXCTL_VT_MASK                 0xFFFF0000
165
166 static int eth_ixgbe_dev_init(struct rte_eth_dev *eth_dev);
167 static int eth_ixgbe_dev_uninit(struct rte_eth_dev *eth_dev);
168 static int  ixgbe_dev_configure(struct rte_eth_dev *dev);
169 static int  ixgbe_dev_start(struct rte_eth_dev *dev);
170 static void ixgbe_dev_stop(struct rte_eth_dev *dev);
171 static int  ixgbe_dev_set_link_up(struct rte_eth_dev *dev);
172 static int  ixgbe_dev_set_link_down(struct rte_eth_dev *dev);
173 static void ixgbe_dev_close(struct rte_eth_dev *dev);
174 static void ixgbe_dev_promiscuous_enable(struct rte_eth_dev *dev);
175 static void ixgbe_dev_promiscuous_disable(struct rte_eth_dev *dev);
176 static void ixgbe_dev_allmulticast_enable(struct rte_eth_dev *dev);
177 static void ixgbe_dev_allmulticast_disable(struct rte_eth_dev *dev);
178 static int ixgbe_dev_link_update(struct rte_eth_dev *dev,
179                                 int wait_to_complete);
180 static void ixgbe_dev_stats_get(struct rte_eth_dev *dev,
181                                 struct rte_eth_stats *stats);
182 static int ixgbe_dev_xstats_get(struct rte_eth_dev *dev,
183                                 struct rte_eth_xstat *xstats, unsigned n);
184 static int ixgbevf_dev_xstats_get(struct rte_eth_dev *dev,
185                                   struct rte_eth_xstat *xstats, unsigned n);
186 static void ixgbe_dev_stats_reset(struct rte_eth_dev *dev);
187 static void ixgbe_dev_xstats_reset(struct rte_eth_dev *dev);
188 static int ixgbe_dev_xstats_get_names(__rte_unused struct rte_eth_dev *dev,
189         struct rte_eth_xstat_name *xstats_names, __rte_unused unsigned limit);
190 static int ixgbevf_dev_xstats_get_names(__rte_unused struct rte_eth_dev *dev,
191         struct rte_eth_xstat_name *xstats_names, __rte_unused unsigned limit);
192 static int ixgbe_dev_queue_stats_mapping_set(struct rte_eth_dev *eth_dev,
193                                              uint16_t queue_id,
194                                              uint8_t stat_idx,
195                                              uint8_t is_rx);
196 static int ixgbe_fw_version_get(struct rte_eth_dev *dev, char *fw_version,
197                                  size_t fw_size);
198 static void ixgbe_dev_info_get(struct rte_eth_dev *dev,
199                                struct rte_eth_dev_info *dev_info);
200 static const uint32_t *ixgbe_dev_supported_ptypes_get(struct rte_eth_dev *dev);
201 static void ixgbevf_dev_info_get(struct rte_eth_dev *dev,
202                                  struct rte_eth_dev_info *dev_info);
203 static int ixgbe_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu);
204
205 static int ixgbe_vlan_filter_set(struct rte_eth_dev *dev,
206                 uint16_t vlan_id, int on);
207 static int ixgbe_vlan_tpid_set(struct rte_eth_dev *dev,
208                                enum rte_vlan_type vlan_type,
209                                uint16_t tpid_id);
210 static void ixgbe_vlan_hw_strip_bitmap_set(struct rte_eth_dev *dev,
211                 uint16_t queue, bool on);
212 static void ixgbe_vlan_strip_queue_set(struct rte_eth_dev *dev, uint16_t queue,
213                 int on);
214 static void ixgbe_vlan_offload_set(struct rte_eth_dev *dev, int mask);
215 static void ixgbe_vlan_hw_strip_enable(struct rte_eth_dev *dev, uint16_t queue);
216 static void ixgbe_vlan_hw_strip_disable(struct rte_eth_dev *dev, uint16_t queue);
217 static void ixgbe_vlan_hw_extend_enable(struct rte_eth_dev *dev);
218 static void ixgbe_vlan_hw_extend_disable(struct rte_eth_dev *dev);
219
220 static int ixgbe_dev_led_on(struct rte_eth_dev *dev);
221 static int ixgbe_dev_led_off(struct rte_eth_dev *dev);
222 static int ixgbe_flow_ctrl_get(struct rte_eth_dev *dev,
223                                struct rte_eth_fc_conf *fc_conf);
224 static int ixgbe_flow_ctrl_set(struct rte_eth_dev *dev,
225                                struct rte_eth_fc_conf *fc_conf);
226 static int ixgbe_priority_flow_ctrl_set(struct rte_eth_dev *dev,
227                 struct rte_eth_pfc_conf *pfc_conf);
228 static int ixgbe_dev_rss_reta_update(struct rte_eth_dev *dev,
229                         struct rte_eth_rss_reta_entry64 *reta_conf,
230                         uint16_t reta_size);
231 static int ixgbe_dev_rss_reta_query(struct rte_eth_dev *dev,
232                         struct rte_eth_rss_reta_entry64 *reta_conf,
233                         uint16_t reta_size);
234 static void ixgbe_dev_link_status_print(struct rte_eth_dev *dev);
235 static int ixgbe_dev_lsc_interrupt_setup(struct rte_eth_dev *dev);
236 static int ixgbe_dev_macsec_interrupt_setup(struct rte_eth_dev *dev);
237 static int ixgbe_dev_rxq_interrupt_setup(struct rte_eth_dev *dev);
238 static int ixgbe_dev_interrupt_get_status(struct rte_eth_dev *dev);
239 static int ixgbe_dev_interrupt_action(struct rte_eth_dev *dev,
240                                       struct rte_intr_handle *handle);
241 static void ixgbe_dev_interrupt_handler(struct rte_intr_handle *handle,
242                 void *param);
243 static void ixgbe_dev_interrupt_delayed_handler(void *param);
244 static void ixgbe_add_rar(struct rte_eth_dev *dev, struct ether_addr *mac_addr,
245                 uint32_t index, uint32_t pool);
246 static void ixgbe_remove_rar(struct rte_eth_dev *dev, uint32_t index);
247 static void ixgbe_set_default_mac_addr(struct rte_eth_dev *dev,
248                                            struct ether_addr *mac_addr);
249 static void ixgbe_dcb_init(struct ixgbe_hw *hw, struct ixgbe_dcb_config *dcb_config);
250
251 /* For Virtual Function support */
252 static int eth_ixgbevf_dev_init(struct rte_eth_dev *eth_dev);
253 static int eth_ixgbevf_dev_uninit(struct rte_eth_dev *eth_dev);
254 static int  ixgbevf_dev_configure(struct rte_eth_dev *dev);
255 static int  ixgbevf_dev_start(struct rte_eth_dev *dev);
256 static void ixgbevf_dev_stop(struct rte_eth_dev *dev);
257 static void ixgbevf_dev_close(struct rte_eth_dev *dev);
258 static void ixgbevf_intr_disable(struct ixgbe_hw *hw);
259 static void ixgbevf_intr_enable(struct ixgbe_hw *hw);
260 static void ixgbevf_dev_stats_get(struct rte_eth_dev *dev,
261                 struct rte_eth_stats *stats);
262 static void ixgbevf_dev_stats_reset(struct rte_eth_dev *dev);
263 static int ixgbevf_vlan_filter_set(struct rte_eth_dev *dev,
264                 uint16_t vlan_id, int on);
265 static void ixgbevf_vlan_strip_queue_set(struct rte_eth_dev *dev,
266                 uint16_t queue, int on);
267 static void ixgbevf_vlan_offload_set(struct rte_eth_dev *dev, int mask);
268 static void ixgbevf_set_vfta_all(struct rte_eth_dev *dev, bool on);
269 static int ixgbevf_dev_rx_queue_intr_enable(struct rte_eth_dev *dev,
270                                             uint16_t queue_id);
271 static int ixgbevf_dev_rx_queue_intr_disable(struct rte_eth_dev *dev,
272                                              uint16_t queue_id);
273 static void ixgbevf_set_ivar_map(struct ixgbe_hw *hw, int8_t direction,
274                                  uint8_t queue, uint8_t msix_vector);
275 static void ixgbevf_configure_msix(struct rte_eth_dev *dev);
276 static void ixgbevf_dev_allmulticast_enable(struct rte_eth_dev *dev);
277 static void ixgbevf_dev_allmulticast_disable(struct rte_eth_dev *dev);
278
279 /* For Eth VMDQ APIs support */
280 static int ixgbe_uc_hash_table_set(struct rte_eth_dev *dev, struct
281                 ether_addr * mac_addr, uint8_t on);
282 static int ixgbe_uc_all_hash_table_set(struct rte_eth_dev *dev, uint8_t on);
283 static int  ixgbe_set_pool_rx_mode(struct rte_eth_dev *dev,  uint16_t pool,
284                 uint16_t rx_mask, uint8_t on);
285 static int ixgbe_set_pool_rx(struct rte_eth_dev *dev, uint16_t pool, uint8_t on);
286 static int ixgbe_set_pool_tx(struct rte_eth_dev *dev, uint16_t pool, uint8_t on);
287 static int ixgbe_set_pool_vlan_filter(struct rte_eth_dev *dev, uint16_t vlan,
288                 uint64_t pool_mask, uint8_t vlan_on);
289 static int ixgbe_mirror_rule_set(struct rte_eth_dev *dev,
290                 struct rte_eth_mirror_conf *mirror_conf,
291                 uint8_t rule_id, uint8_t on);
292 static int ixgbe_mirror_rule_reset(struct rte_eth_dev *dev,
293                 uint8_t rule_id);
294 static int ixgbe_dev_rx_queue_intr_enable(struct rte_eth_dev *dev,
295                                           uint16_t queue_id);
296 static int ixgbe_dev_rx_queue_intr_disable(struct rte_eth_dev *dev,
297                                            uint16_t queue_id);
298 static void ixgbe_set_ivar_map(struct ixgbe_hw *hw, int8_t direction,
299                                uint8_t queue, uint8_t msix_vector);
300 static void ixgbe_configure_msix(struct rte_eth_dev *dev);
301
302 static int ixgbe_set_queue_rate_limit(struct rte_eth_dev *dev,
303                 uint16_t queue_idx, uint16_t tx_rate);
304 static int ixgbe_set_vf_rate_limit(struct rte_eth_dev *dev, uint16_t vf,
305                 uint16_t tx_rate, uint64_t q_msk);
306
307 static void ixgbevf_add_mac_addr(struct rte_eth_dev *dev,
308                                  struct ether_addr *mac_addr,
309                                  uint32_t index, uint32_t pool);
310 static void ixgbevf_remove_mac_addr(struct rte_eth_dev *dev, uint32_t index);
311 static void ixgbevf_set_default_mac_addr(struct rte_eth_dev *dev,
312                                              struct ether_addr *mac_addr);
313 static int ixgbe_syn_filter_set(struct rte_eth_dev *dev,
314                         struct rte_eth_syn_filter *filter,
315                         bool add);
316 static int ixgbe_syn_filter_get(struct rte_eth_dev *dev,
317                         struct rte_eth_syn_filter *filter);
318 static int ixgbe_syn_filter_handle(struct rte_eth_dev *dev,
319                         enum rte_filter_op filter_op,
320                         void *arg);
321 static int ixgbe_add_5tuple_filter(struct rte_eth_dev *dev,
322                         struct ixgbe_5tuple_filter *filter);
323 static void ixgbe_remove_5tuple_filter(struct rte_eth_dev *dev,
324                         struct ixgbe_5tuple_filter *filter);
325 static int ixgbe_add_del_ntuple_filter(struct rte_eth_dev *dev,
326                         struct rte_eth_ntuple_filter *filter,
327                         bool add);
328 static int ixgbe_ntuple_filter_handle(struct rte_eth_dev *dev,
329                                 enum rte_filter_op filter_op,
330                                 void *arg);
331 static int ixgbe_get_ntuple_filter(struct rte_eth_dev *dev,
332                         struct rte_eth_ntuple_filter *filter);
333 static int ixgbe_add_del_ethertype_filter(struct rte_eth_dev *dev,
334                         struct rte_eth_ethertype_filter *filter,
335                         bool add);
336 static int ixgbe_ethertype_filter_handle(struct rte_eth_dev *dev,
337                                 enum rte_filter_op filter_op,
338                                 void *arg);
339 static int ixgbe_get_ethertype_filter(struct rte_eth_dev *dev,
340                         struct rte_eth_ethertype_filter *filter);
341 static int ixgbe_dev_filter_ctrl(struct rte_eth_dev *dev,
342                      enum rte_filter_type filter_type,
343                      enum rte_filter_op filter_op,
344                      void *arg);
345 static int ixgbevf_dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu);
346
347 static int ixgbe_dev_set_mc_addr_list(struct rte_eth_dev *dev,
348                                       struct ether_addr *mc_addr_set,
349                                       uint32_t nb_mc_addr);
350 static int ixgbe_dev_get_dcb_info(struct rte_eth_dev *dev,
351                                    struct rte_eth_dcb_info *dcb_info);
352
353 static int ixgbe_get_reg_length(struct rte_eth_dev *dev);
354 static int ixgbe_get_regs(struct rte_eth_dev *dev,
355                             struct rte_dev_reg_info *regs);
356 static int ixgbe_get_eeprom_length(struct rte_eth_dev *dev);
357 static int ixgbe_get_eeprom(struct rte_eth_dev *dev,
358                                 struct rte_dev_eeprom_info *eeprom);
359 static int ixgbe_set_eeprom(struct rte_eth_dev *dev,
360                                 struct rte_dev_eeprom_info *eeprom);
361
362 static int ixgbevf_get_reg_length(struct rte_eth_dev *dev);
363 static int ixgbevf_get_regs(struct rte_eth_dev *dev,
364                                 struct rte_dev_reg_info *regs);
365
366 static int ixgbe_timesync_enable(struct rte_eth_dev *dev);
367 static int ixgbe_timesync_disable(struct rte_eth_dev *dev);
368 static int ixgbe_timesync_read_rx_timestamp(struct rte_eth_dev *dev,
369                                             struct timespec *timestamp,
370                                             uint32_t flags);
371 static int ixgbe_timesync_read_tx_timestamp(struct rte_eth_dev *dev,
372                                             struct timespec *timestamp);
373 static int ixgbe_timesync_adjust_time(struct rte_eth_dev *dev, int64_t delta);
374 static int ixgbe_timesync_read_time(struct rte_eth_dev *dev,
375                                    struct timespec *timestamp);
376 static int ixgbe_timesync_write_time(struct rte_eth_dev *dev,
377                                    const struct timespec *timestamp);
378 static void ixgbevf_dev_interrupt_handler(struct rte_intr_handle *handle,
379                                           void *param);
380
381 static int ixgbe_dev_l2_tunnel_eth_type_conf
382         (struct rte_eth_dev *dev, struct rte_eth_l2_tunnel_conf *l2_tunnel);
383 static int ixgbe_dev_l2_tunnel_offload_set
384         (struct rte_eth_dev *dev,
385          struct rte_eth_l2_tunnel_conf *l2_tunnel,
386          uint32_t mask,
387          uint8_t en);
388 static int ixgbe_dev_l2_tunnel_filter_handle(struct rte_eth_dev *dev,
389                                              enum rte_filter_op filter_op,
390                                              void *arg);
391
392 static int ixgbe_dev_udp_tunnel_port_add(struct rte_eth_dev *dev,
393                                          struct rte_eth_udp_tunnel *udp_tunnel);
394 static int ixgbe_dev_udp_tunnel_port_del(struct rte_eth_dev *dev,
395                                          struct rte_eth_udp_tunnel *udp_tunnel);
396
397 /*
398  * Define VF Stats MACRO for Non "cleared on read" register
399  */
400 #define UPDATE_VF_STAT(reg, last, cur)                          \
401 {                                                               \
402         uint32_t latest = IXGBE_READ_REG(hw, reg);              \
403         cur += (latest - last) & UINT_MAX;                      \
404         last = latest;                                          \
405 }
406
407 #define UPDATE_VF_STAT_36BIT(lsb, msb, last, cur)                \
408 {                                                                \
409         u64 new_lsb = IXGBE_READ_REG(hw, lsb);                   \
410         u64 new_msb = IXGBE_READ_REG(hw, msb);                   \
411         u64 latest = ((new_msb << 32) | new_lsb);                \
412         cur += (0x1000000000LL + latest - last) & 0xFFFFFFFFFLL; \
413         last = latest;                                           \
414 }
415
416 #define IXGBE_SET_HWSTRIP(h, q) do {\
417                 uint32_t idx = (q) / (sizeof((h)->bitmap[0]) * NBBY); \
418                 uint32_t bit = (q) % (sizeof((h)->bitmap[0]) * NBBY); \
419                 (h)->bitmap[idx] |= 1 << bit;\
420         } while (0)
421
422 #define IXGBE_CLEAR_HWSTRIP(h, q) do {\
423                 uint32_t idx = (q) / (sizeof((h)->bitmap[0]) * NBBY); \
424                 uint32_t bit = (q) % (sizeof((h)->bitmap[0]) * NBBY); \
425                 (h)->bitmap[idx] &= ~(1 << bit);\
426         } while (0)
427
428 #define IXGBE_GET_HWSTRIP(h, q, r) do {\
429                 uint32_t idx = (q) / (sizeof((h)->bitmap[0]) * NBBY); \
430                 uint32_t bit = (q) % (sizeof((h)->bitmap[0]) * NBBY); \
431                 (r) = (h)->bitmap[idx] >> bit & 1;\
432         } while (0)
433
434 /*
435  * The set of PCI devices this driver supports
436  */
437 static const struct rte_pci_id pci_id_ixgbe_map[] = {
438         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598) },
439         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_BX) },
440         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_DUAL_PORT) },
441         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_SINGLE_PORT) },
442         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT) },
443         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT2) },
444         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_SFP_LOM) },
445         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_CX4) },
446         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_CX4_DUAL_PORT) },
447         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_DA_DUAL_PORT) },
448         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM) },
449         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_XF_LR) },
450         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4) },
451         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4_MEZZ) },
452         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KR) },
453         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_COMBO_BACKPLANE) },
454         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_SUBDEV_ID_82599_KX4_KR_MEZZ) },
455         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_CX4) },
456         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP) },
457         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_SUBDEV_ID_82599_SFP) },
458         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_SUBDEV_ID_82599_RNDC) },
459         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_SUBDEV_ID_82599_560FLR) },
460         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_SUBDEV_ID_82599_ECNA_DP) },
461         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_BACKPLANE_FCOE) },
462         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_FCOE) },
463         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_EM) },
464         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_SF2) },
465         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_SF_QP) },
466         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_QSFP_SF_QP) },
467         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599EN_SFP) },
468         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_XAUI_LOM) },
469         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_T3_LOM) },
470         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_LS) },
471         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T) },
472         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T1) },
473         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_SFP) },
474         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_10G_T) },
475         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_1G_T) },
476         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550T) },
477         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550T1) },
478         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_KR) },
479         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_KR_L) },
480         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SFP_N) },
481         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SGMII) },
482         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SGMII_L) },
483         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_10G_T) },
484         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_QSFP) },
485         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_QSFP_N) },
486         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SFP) },
487         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_1G_T) },
488         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_1G_T_L) },
489         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_KX4) },
490         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_KR) },
491 #ifdef RTE_NIC_BYPASS
492         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_BYPASS) },
493 #endif
494         { .vendor_id = 0, /* sentinel */ },
495 };
496
497 /*
498  * The set of PCI devices this driver supports (for 82599 VF)
499  */
500 static const struct rte_pci_id pci_id_ixgbevf_map[] = {
501         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_VF) },
502         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_VF_HV) },
503         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540_VF) },
504         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540_VF_HV) },
505         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550_VF_HV) },
506         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550_VF) },
507         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_VF) },
508         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_VF_HV) },
509         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_VF) },
510         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_VF_HV) },
511         { .vendor_id = 0, /* sentinel */ },
512 };
513
514 static const struct rte_eth_desc_lim rx_desc_lim = {
515         .nb_max = IXGBE_MAX_RING_DESC,
516         .nb_min = IXGBE_MIN_RING_DESC,
517         .nb_align = IXGBE_RXD_ALIGN,
518 };
519
520 static const struct rte_eth_desc_lim tx_desc_lim = {
521         .nb_max = IXGBE_MAX_RING_DESC,
522         .nb_min = IXGBE_MIN_RING_DESC,
523         .nb_align = IXGBE_TXD_ALIGN,
524         .nb_seg_max = IXGBE_TX_MAX_SEG,
525         .nb_mtu_seg_max = IXGBE_TX_MAX_SEG,
526 };
527
528 static const struct eth_dev_ops ixgbe_eth_dev_ops = {
529         .dev_configure        = ixgbe_dev_configure,
530         .dev_start            = ixgbe_dev_start,
531         .dev_stop             = ixgbe_dev_stop,
532         .dev_set_link_up    = ixgbe_dev_set_link_up,
533         .dev_set_link_down  = ixgbe_dev_set_link_down,
534         .dev_close            = ixgbe_dev_close,
535         .promiscuous_enable   = ixgbe_dev_promiscuous_enable,
536         .promiscuous_disable  = ixgbe_dev_promiscuous_disable,
537         .allmulticast_enable  = ixgbe_dev_allmulticast_enable,
538         .allmulticast_disable = ixgbe_dev_allmulticast_disable,
539         .link_update          = ixgbe_dev_link_update,
540         .stats_get            = ixgbe_dev_stats_get,
541         .xstats_get           = ixgbe_dev_xstats_get,
542         .stats_reset          = ixgbe_dev_stats_reset,
543         .xstats_reset         = ixgbe_dev_xstats_reset,
544         .xstats_get_names     = ixgbe_dev_xstats_get_names,
545         .queue_stats_mapping_set = ixgbe_dev_queue_stats_mapping_set,
546         .fw_version_get       = ixgbe_fw_version_get,
547         .dev_infos_get        = ixgbe_dev_info_get,
548         .dev_supported_ptypes_get = ixgbe_dev_supported_ptypes_get,
549         .mtu_set              = ixgbe_dev_mtu_set,
550         .vlan_filter_set      = ixgbe_vlan_filter_set,
551         .vlan_tpid_set        = ixgbe_vlan_tpid_set,
552         .vlan_offload_set     = ixgbe_vlan_offload_set,
553         .vlan_strip_queue_set = ixgbe_vlan_strip_queue_set,
554         .rx_queue_start       = ixgbe_dev_rx_queue_start,
555         .rx_queue_stop        = ixgbe_dev_rx_queue_stop,
556         .tx_queue_start       = ixgbe_dev_tx_queue_start,
557         .tx_queue_stop        = ixgbe_dev_tx_queue_stop,
558         .rx_queue_setup       = ixgbe_dev_rx_queue_setup,
559         .rx_queue_intr_enable = ixgbe_dev_rx_queue_intr_enable,
560         .rx_queue_intr_disable = ixgbe_dev_rx_queue_intr_disable,
561         .rx_queue_release     = ixgbe_dev_rx_queue_release,
562         .rx_queue_count       = ixgbe_dev_rx_queue_count,
563         .rx_descriptor_done   = ixgbe_dev_rx_descriptor_done,
564         .tx_queue_setup       = ixgbe_dev_tx_queue_setup,
565         .tx_queue_release     = ixgbe_dev_tx_queue_release,
566         .dev_led_on           = ixgbe_dev_led_on,
567         .dev_led_off          = ixgbe_dev_led_off,
568         .flow_ctrl_get        = ixgbe_flow_ctrl_get,
569         .flow_ctrl_set        = ixgbe_flow_ctrl_set,
570         .priority_flow_ctrl_set = ixgbe_priority_flow_ctrl_set,
571         .mac_addr_add         = ixgbe_add_rar,
572         .mac_addr_remove      = ixgbe_remove_rar,
573         .mac_addr_set         = ixgbe_set_default_mac_addr,
574         .uc_hash_table_set    = ixgbe_uc_hash_table_set,
575         .uc_all_hash_table_set  = ixgbe_uc_all_hash_table_set,
576         .mirror_rule_set      = ixgbe_mirror_rule_set,
577         .mirror_rule_reset    = ixgbe_mirror_rule_reset,
578         .set_vf_rx_mode       = ixgbe_set_pool_rx_mode,
579         .set_vf_rx            = ixgbe_set_pool_rx,
580         .set_vf_tx            = ixgbe_set_pool_tx,
581         .set_vf_vlan_filter   = ixgbe_set_pool_vlan_filter,
582         .set_queue_rate_limit = ixgbe_set_queue_rate_limit,
583         .set_vf_rate_limit    = ixgbe_set_vf_rate_limit,
584         .reta_update          = ixgbe_dev_rss_reta_update,
585         .reta_query           = ixgbe_dev_rss_reta_query,
586 #ifdef RTE_NIC_BYPASS
587         .bypass_init          = ixgbe_bypass_init,
588         .bypass_state_set     = ixgbe_bypass_state_store,
589         .bypass_state_show    = ixgbe_bypass_state_show,
590         .bypass_event_set     = ixgbe_bypass_event_store,
591         .bypass_event_show    = ixgbe_bypass_event_show,
592         .bypass_wd_timeout_set  = ixgbe_bypass_wd_timeout_store,
593         .bypass_wd_timeout_show = ixgbe_bypass_wd_timeout_show,
594         .bypass_ver_show      = ixgbe_bypass_ver_show,
595         .bypass_wd_reset      = ixgbe_bypass_wd_reset,
596 #endif /* RTE_NIC_BYPASS */
597         .rss_hash_update      = ixgbe_dev_rss_hash_update,
598         .rss_hash_conf_get    = ixgbe_dev_rss_hash_conf_get,
599         .filter_ctrl          = ixgbe_dev_filter_ctrl,
600         .set_mc_addr_list     = ixgbe_dev_set_mc_addr_list,
601         .rxq_info_get         = ixgbe_rxq_info_get,
602         .txq_info_get         = ixgbe_txq_info_get,
603         .timesync_enable      = ixgbe_timesync_enable,
604         .timesync_disable     = ixgbe_timesync_disable,
605         .timesync_read_rx_timestamp = ixgbe_timesync_read_rx_timestamp,
606         .timesync_read_tx_timestamp = ixgbe_timesync_read_tx_timestamp,
607         .get_reg              = ixgbe_get_regs,
608         .get_eeprom_length    = ixgbe_get_eeprom_length,
609         .get_eeprom           = ixgbe_get_eeprom,
610         .set_eeprom           = ixgbe_set_eeprom,
611         .get_dcb_info         = ixgbe_dev_get_dcb_info,
612         .timesync_adjust_time = ixgbe_timesync_adjust_time,
613         .timesync_read_time   = ixgbe_timesync_read_time,
614         .timesync_write_time  = ixgbe_timesync_write_time,
615         .l2_tunnel_eth_type_conf = ixgbe_dev_l2_tunnel_eth_type_conf,
616         .l2_tunnel_offload_set   = ixgbe_dev_l2_tunnel_offload_set,
617         .udp_tunnel_port_add  = ixgbe_dev_udp_tunnel_port_add,
618         .udp_tunnel_port_del  = ixgbe_dev_udp_tunnel_port_del,
619 };
620
621 /*
622  * dev_ops for virtual function, bare necessities for basic vf
623  * operation have been implemented
624  */
625 static const struct eth_dev_ops ixgbevf_eth_dev_ops = {
626         .dev_configure        = ixgbevf_dev_configure,
627         .dev_start            = ixgbevf_dev_start,
628         .dev_stop             = ixgbevf_dev_stop,
629         .link_update          = ixgbe_dev_link_update,
630         .stats_get            = ixgbevf_dev_stats_get,
631         .xstats_get           = ixgbevf_dev_xstats_get,
632         .stats_reset          = ixgbevf_dev_stats_reset,
633         .xstats_reset         = ixgbevf_dev_stats_reset,
634         .xstats_get_names     = ixgbevf_dev_xstats_get_names,
635         .dev_close            = ixgbevf_dev_close,
636         .allmulticast_enable  = ixgbevf_dev_allmulticast_enable,
637         .allmulticast_disable = ixgbevf_dev_allmulticast_disable,
638         .dev_infos_get        = ixgbevf_dev_info_get,
639         .dev_supported_ptypes_get = ixgbe_dev_supported_ptypes_get,
640         .mtu_set              = ixgbevf_dev_set_mtu,
641         .vlan_filter_set      = ixgbevf_vlan_filter_set,
642         .vlan_strip_queue_set = ixgbevf_vlan_strip_queue_set,
643         .vlan_offload_set     = ixgbevf_vlan_offload_set,
644         .rx_queue_setup       = ixgbe_dev_rx_queue_setup,
645         .rx_queue_release     = ixgbe_dev_rx_queue_release,
646         .rx_descriptor_done   = ixgbe_dev_rx_descriptor_done,
647         .tx_queue_setup       = ixgbe_dev_tx_queue_setup,
648         .tx_queue_release     = ixgbe_dev_tx_queue_release,
649         .rx_queue_intr_enable = ixgbevf_dev_rx_queue_intr_enable,
650         .rx_queue_intr_disable = ixgbevf_dev_rx_queue_intr_disable,
651         .mac_addr_add         = ixgbevf_add_mac_addr,
652         .mac_addr_remove      = ixgbevf_remove_mac_addr,
653         .set_mc_addr_list     = ixgbe_dev_set_mc_addr_list,
654         .rxq_info_get         = ixgbe_rxq_info_get,
655         .txq_info_get         = ixgbe_txq_info_get,
656         .mac_addr_set         = ixgbevf_set_default_mac_addr,
657         .get_reg              = ixgbevf_get_regs,
658         .reta_update          = ixgbe_dev_rss_reta_update,
659         .reta_query           = ixgbe_dev_rss_reta_query,
660         .rss_hash_update      = ixgbe_dev_rss_hash_update,
661         .rss_hash_conf_get    = ixgbe_dev_rss_hash_conf_get,
662 };
663
664 /* store statistics names and its offset in stats structure */
665 struct rte_ixgbe_xstats_name_off {
666         char name[RTE_ETH_XSTATS_NAME_SIZE];
667         unsigned offset;
668 };
669
670 static const struct rte_ixgbe_xstats_name_off rte_ixgbe_stats_strings[] = {
671         {"rx_crc_errors", offsetof(struct ixgbe_hw_stats, crcerrs)},
672         {"rx_illegal_byte_errors", offsetof(struct ixgbe_hw_stats, illerrc)},
673         {"rx_error_bytes", offsetof(struct ixgbe_hw_stats, errbc)},
674         {"mac_local_errors", offsetof(struct ixgbe_hw_stats, mlfc)},
675         {"mac_remote_errors", offsetof(struct ixgbe_hw_stats, mrfc)},
676         {"rx_length_errors", offsetof(struct ixgbe_hw_stats, rlec)},
677         {"tx_xon_packets", offsetof(struct ixgbe_hw_stats, lxontxc)},
678         {"rx_xon_packets", offsetof(struct ixgbe_hw_stats, lxonrxc)},
679         {"tx_xoff_packets", offsetof(struct ixgbe_hw_stats, lxofftxc)},
680         {"rx_xoff_packets", offsetof(struct ixgbe_hw_stats, lxoffrxc)},
681         {"rx_size_64_packets", offsetof(struct ixgbe_hw_stats, prc64)},
682         {"rx_size_65_to_127_packets", offsetof(struct ixgbe_hw_stats, prc127)},
683         {"rx_size_128_to_255_packets", offsetof(struct ixgbe_hw_stats, prc255)},
684         {"rx_size_256_to_511_packets", offsetof(struct ixgbe_hw_stats, prc511)},
685         {"rx_size_512_to_1023_packets", offsetof(struct ixgbe_hw_stats,
686                 prc1023)},
687         {"rx_size_1024_to_max_packets", offsetof(struct ixgbe_hw_stats,
688                 prc1522)},
689         {"rx_broadcast_packets", offsetof(struct ixgbe_hw_stats, bprc)},
690         {"rx_multicast_packets", offsetof(struct ixgbe_hw_stats, mprc)},
691         {"rx_fragment_errors", offsetof(struct ixgbe_hw_stats, rfc)},
692         {"rx_undersize_errors", offsetof(struct ixgbe_hw_stats, ruc)},
693         {"rx_oversize_errors", offsetof(struct ixgbe_hw_stats, roc)},
694         {"rx_jabber_errors", offsetof(struct ixgbe_hw_stats, rjc)},
695         {"rx_management_packets", offsetof(struct ixgbe_hw_stats, mngprc)},
696         {"rx_management_dropped", offsetof(struct ixgbe_hw_stats, mngpdc)},
697         {"tx_management_packets", offsetof(struct ixgbe_hw_stats, mngptc)},
698         {"rx_total_packets", offsetof(struct ixgbe_hw_stats, tpr)},
699         {"rx_total_bytes", offsetof(struct ixgbe_hw_stats, tor)},
700         {"tx_total_packets", offsetof(struct ixgbe_hw_stats, tpt)},
701         {"tx_size_64_packets", offsetof(struct ixgbe_hw_stats, ptc64)},
702         {"tx_size_65_to_127_packets", offsetof(struct ixgbe_hw_stats, ptc127)},
703         {"tx_size_128_to_255_packets", offsetof(struct ixgbe_hw_stats, ptc255)},
704         {"tx_size_256_to_511_packets", offsetof(struct ixgbe_hw_stats, ptc511)},
705         {"tx_size_512_to_1023_packets", offsetof(struct ixgbe_hw_stats,
706                 ptc1023)},
707         {"tx_size_1024_to_max_packets", offsetof(struct ixgbe_hw_stats,
708                 ptc1522)},
709         {"tx_multicast_packets", offsetof(struct ixgbe_hw_stats, mptc)},
710         {"tx_broadcast_packets", offsetof(struct ixgbe_hw_stats, bptc)},
711         {"rx_mac_short_packet_dropped", offsetof(struct ixgbe_hw_stats, mspdc)},
712         {"rx_l3_l4_xsum_error", offsetof(struct ixgbe_hw_stats, xec)},
713
714         {"flow_director_added_filters", offsetof(struct ixgbe_hw_stats,
715                 fdirustat_add)},
716         {"flow_director_removed_filters", offsetof(struct ixgbe_hw_stats,
717                 fdirustat_remove)},
718         {"flow_director_filter_add_errors", offsetof(struct ixgbe_hw_stats,
719                 fdirfstat_fadd)},
720         {"flow_director_filter_remove_errors", offsetof(struct ixgbe_hw_stats,
721                 fdirfstat_fremove)},
722         {"flow_director_matched_filters", offsetof(struct ixgbe_hw_stats,
723                 fdirmatch)},
724         {"flow_director_missed_filters", offsetof(struct ixgbe_hw_stats,
725                 fdirmiss)},
726
727         {"rx_fcoe_crc_errors", offsetof(struct ixgbe_hw_stats, fccrc)},
728         {"rx_fcoe_dropped", offsetof(struct ixgbe_hw_stats, fcoerpdc)},
729         {"rx_fcoe_mbuf_allocation_errors", offsetof(struct ixgbe_hw_stats,
730                 fclast)},
731         {"rx_fcoe_packets", offsetof(struct ixgbe_hw_stats, fcoeprc)},
732         {"tx_fcoe_packets", offsetof(struct ixgbe_hw_stats, fcoeptc)},
733         {"rx_fcoe_bytes", offsetof(struct ixgbe_hw_stats, fcoedwrc)},
734         {"tx_fcoe_bytes", offsetof(struct ixgbe_hw_stats, fcoedwtc)},
735         {"rx_fcoe_no_direct_data_placement", offsetof(struct ixgbe_hw_stats,
736                 fcoe_noddp)},
737         {"rx_fcoe_no_direct_data_placement_ext_buff",
738                 offsetof(struct ixgbe_hw_stats, fcoe_noddp_ext_buff)},
739
740         {"tx_flow_control_xon_packets", offsetof(struct ixgbe_hw_stats,
741                 lxontxc)},
742         {"rx_flow_control_xon_packets", offsetof(struct ixgbe_hw_stats,
743                 lxonrxc)},
744         {"tx_flow_control_xoff_packets", offsetof(struct ixgbe_hw_stats,
745                 lxofftxc)},
746         {"rx_flow_control_xoff_packets", offsetof(struct ixgbe_hw_stats,
747                 lxoffrxc)},
748         {"rx_total_missed_packets", offsetof(struct ixgbe_hw_stats, mpctotal)},
749 };
750
751 #define IXGBE_NB_HW_STATS (sizeof(rte_ixgbe_stats_strings) / \
752                            sizeof(rte_ixgbe_stats_strings[0]))
753
754 /* MACsec statistics */
755 static const struct rte_ixgbe_xstats_name_off rte_ixgbe_macsec_strings[] = {
756         {"out_pkts_untagged", offsetof(struct ixgbe_macsec_stats,
757                 out_pkts_untagged)},
758         {"out_pkts_encrypted", offsetof(struct ixgbe_macsec_stats,
759                 out_pkts_encrypted)},
760         {"out_pkts_protected", offsetof(struct ixgbe_macsec_stats,
761                 out_pkts_protected)},
762         {"out_octets_encrypted", offsetof(struct ixgbe_macsec_stats,
763                 out_octets_encrypted)},
764         {"out_octets_protected", offsetof(struct ixgbe_macsec_stats,
765                 out_octets_protected)},
766         {"in_pkts_untagged", offsetof(struct ixgbe_macsec_stats,
767                 in_pkts_untagged)},
768         {"in_pkts_badtag", offsetof(struct ixgbe_macsec_stats,
769                 in_pkts_badtag)},
770         {"in_pkts_nosci", offsetof(struct ixgbe_macsec_stats,
771                 in_pkts_nosci)},
772         {"in_pkts_unknownsci", offsetof(struct ixgbe_macsec_stats,
773                 in_pkts_unknownsci)},
774         {"in_octets_decrypted", offsetof(struct ixgbe_macsec_stats,
775                 in_octets_decrypted)},
776         {"in_octets_validated", offsetof(struct ixgbe_macsec_stats,
777                 in_octets_validated)},
778         {"in_pkts_unchecked", offsetof(struct ixgbe_macsec_stats,
779                 in_pkts_unchecked)},
780         {"in_pkts_delayed", offsetof(struct ixgbe_macsec_stats,
781                 in_pkts_delayed)},
782         {"in_pkts_late", offsetof(struct ixgbe_macsec_stats,
783                 in_pkts_late)},
784         {"in_pkts_ok", offsetof(struct ixgbe_macsec_stats,
785                 in_pkts_ok)},
786         {"in_pkts_invalid", offsetof(struct ixgbe_macsec_stats,
787                 in_pkts_invalid)},
788         {"in_pkts_notvalid", offsetof(struct ixgbe_macsec_stats,
789                 in_pkts_notvalid)},
790         {"in_pkts_unusedsa", offsetof(struct ixgbe_macsec_stats,
791                 in_pkts_unusedsa)},
792         {"in_pkts_notusingsa", offsetof(struct ixgbe_macsec_stats,
793                 in_pkts_notusingsa)},
794 };
795
796 #define IXGBE_NB_MACSEC_STATS (sizeof(rte_ixgbe_macsec_strings) / \
797                            sizeof(rte_ixgbe_macsec_strings[0]))
798
799 /* Per-queue statistics */
800 static const struct rte_ixgbe_xstats_name_off rte_ixgbe_rxq_strings[] = {
801         {"mbuf_allocation_errors", offsetof(struct ixgbe_hw_stats, rnbc)},
802         {"dropped", offsetof(struct ixgbe_hw_stats, mpc)},
803         {"xon_packets", offsetof(struct ixgbe_hw_stats, pxonrxc)},
804         {"xoff_packets", offsetof(struct ixgbe_hw_stats, pxoffrxc)},
805 };
806
807 #define IXGBE_NB_RXQ_PRIO_STATS (sizeof(rte_ixgbe_rxq_strings) / \
808                            sizeof(rte_ixgbe_rxq_strings[0]))
809 #define IXGBE_NB_RXQ_PRIO_VALUES 8
810
811 static const struct rte_ixgbe_xstats_name_off rte_ixgbe_txq_strings[] = {
812         {"xon_packets", offsetof(struct ixgbe_hw_stats, pxontxc)},
813         {"xoff_packets", offsetof(struct ixgbe_hw_stats, pxofftxc)},
814         {"xon_to_xoff_packets", offsetof(struct ixgbe_hw_stats,
815                 pxon2offc)},
816 };
817
818 #define IXGBE_NB_TXQ_PRIO_STATS (sizeof(rte_ixgbe_txq_strings) / \
819                            sizeof(rte_ixgbe_txq_strings[0]))
820 #define IXGBE_NB_TXQ_PRIO_VALUES 8
821
822 static const struct rte_ixgbe_xstats_name_off rte_ixgbevf_stats_strings[] = {
823         {"rx_multicast_packets", offsetof(struct ixgbevf_hw_stats, vfmprc)},
824 };
825
826 #define IXGBEVF_NB_XSTATS (sizeof(rte_ixgbevf_stats_strings) /  \
827                 sizeof(rte_ixgbevf_stats_strings[0]))
828
829 /**
830  * Atomically reads the link status information from global
831  * structure rte_eth_dev.
832  *
833  * @param dev
834  *   - Pointer to the structure rte_eth_dev to read from.
835  *   - Pointer to the buffer to be saved with the link status.
836  *
837  * @return
838  *   - On success, zero.
839  *   - On failure, negative value.
840  */
841 static inline int
842 rte_ixgbe_dev_atomic_read_link_status(struct rte_eth_dev *dev,
843                                 struct rte_eth_link *link)
844 {
845         struct rte_eth_link *dst = link;
846         struct rte_eth_link *src = &(dev->data->dev_link);
847
848         if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
849                                         *(uint64_t *)src) == 0)
850                 return -1;
851
852         return 0;
853 }
854
855 /**
856  * Atomically writes the link status information into global
857  * structure rte_eth_dev.
858  *
859  * @param dev
860  *   - Pointer to the structure rte_eth_dev to read from.
861  *   - Pointer to the buffer to be saved with the link status.
862  *
863  * @return
864  *   - On success, zero.
865  *   - On failure, negative value.
866  */
867 static inline int
868 rte_ixgbe_dev_atomic_write_link_status(struct rte_eth_dev *dev,
869                                 struct rte_eth_link *link)
870 {
871         struct rte_eth_link *dst = &(dev->data->dev_link);
872         struct rte_eth_link *src = link;
873
874         if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
875                                         *(uint64_t *)src) == 0)
876                 return -1;
877
878         return 0;
879 }
880
881 /*
882  * This function is the same as ixgbe_is_sfp() in base/ixgbe.h.
883  */
884 static inline int
885 ixgbe_is_sfp(struct ixgbe_hw *hw)
886 {
887         switch (hw->phy.type) {
888         case ixgbe_phy_sfp_avago:
889         case ixgbe_phy_sfp_ftl:
890         case ixgbe_phy_sfp_intel:
891         case ixgbe_phy_sfp_unknown:
892         case ixgbe_phy_sfp_passive_tyco:
893         case ixgbe_phy_sfp_passive_unknown:
894                 return 1;
895         default:
896                 return 0;
897         }
898 }
899
900 static inline int32_t
901 ixgbe_pf_reset_hw(struct ixgbe_hw *hw)
902 {
903         uint32_t ctrl_ext;
904         int32_t status;
905
906         status = ixgbe_reset_hw(hw);
907
908         ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
909         /* Set PF Reset Done bit so PF/VF Mail Ops can work */
910         ctrl_ext |= IXGBE_CTRL_EXT_PFRSTD;
911         IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
912         IXGBE_WRITE_FLUSH(hw);
913
914         return status;
915 }
916
917 static inline void
918 ixgbe_enable_intr(struct rte_eth_dev *dev)
919 {
920         struct ixgbe_interrupt *intr =
921                 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
922         struct ixgbe_hw *hw =
923                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
924
925         IXGBE_WRITE_REG(hw, IXGBE_EIMS, intr->mask);
926         IXGBE_WRITE_FLUSH(hw);
927 }
928
929 /*
930  * This function is based on ixgbe_disable_intr() in base/ixgbe.h.
931  */
932 static void
933 ixgbe_disable_intr(struct ixgbe_hw *hw)
934 {
935         PMD_INIT_FUNC_TRACE();
936
937         if (hw->mac.type == ixgbe_mac_82598EB) {
938                 IXGBE_WRITE_REG(hw, IXGBE_EIMC, ~0);
939         } else {
940                 IXGBE_WRITE_REG(hw, IXGBE_EIMC, 0xFFFF0000);
941                 IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(0), ~0);
942                 IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(1), ~0);
943         }
944         IXGBE_WRITE_FLUSH(hw);
945 }
946
947 /*
948  * This function resets queue statistics mapping registers.
949  * From Niantic datasheet, Initialization of Statistics section:
950  * "...if software requires the queue counters, the RQSMR and TQSM registers
951  * must be re-programmed following a device reset.
952  */
953 static void
954 ixgbe_reset_qstat_mappings(struct ixgbe_hw *hw)
955 {
956         uint32_t i;
957
958         for (i = 0; i != IXGBE_NB_STAT_MAPPING_REGS; i++) {
959                 IXGBE_WRITE_REG(hw, IXGBE_RQSMR(i), 0);
960                 IXGBE_WRITE_REG(hw, IXGBE_TQSM(i), 0);
961         }
962 }
963
964
965 static int
966 ixgbe_dev_queue_stats_mapping_set(struct rte_eth_dev *eth_dev,
967                                   uint16_t queue_id,
968                                   uint8_t stat_idx,
969                                   uint8_t is_rx)
970 {
971 #define QSM_REG_NB_BITS_PER_QMAP_FIELD 8
972 #define NB_QMAP_FIELDS_PER_QSM_REG 4
973 #define QMAP_FIELD_RESERVED_BITS_MASK 0x0f
974
975         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
976         struct ixgbe_stat_mapping_registers *stat_mappings =
977                 IXGBE_DEV_PRIVATE_TO_STAT_MAPPINGS(eth_dev->data->dev_private);
978         uint32_t qsmr_mask = 0;
979         uint32_t clearing_mask = QMAP_FIELD_RESERVED_BITS_MASK;
980         uint32_t q_map;
981         uint8_t n, offset;
982
983         if ((hw->mac.type != ixgbe_mac_82599EB) &&
984                 (hw->mac.type != ixgbe_mac_X540) &&
985                 (hw->mac.type != ixgbe_mac_X550) &&
986                 (hw->mac.type != ixgbe_mac_X550EM_x) &&
987                 (hw->mac.type != ixgbe_mac_X550EM_a))
988                 return -ENOSYS;
989
990         PMD_INIT_LOG(DEBUG, "Setting port %d, %s queue_id %d to stat index %d",
991                      (int)(eth_dev->data->port_id), is_rx ? "RX" : "TX",
992                      queue_id, stat_idx);
993
994         n = (uint8_t)(queue_id / NB_QMAP_FIELDS_PER_QSM_REG);
995         if (n >= IXGBE_NB_STAT_MAPPING_REGS) {
996                 PMD_INIT_LOG(ERR, "Nb of stat mapping registers exceeded");
997                 return -EIO;
998         }
999         offset = (uint8_t)(queue_id % NB_QMAP_FIELDS_PER_QSM_REG);
1000
1001         /* Now clear any previous stat_idx set */
1002         clearing_mask <<= (QSM_REG_NB_BITS_PER_QMAP_FIELD * offset);
1003         if (!is_rx)
1004                 stat_mappings->tqsm[n] &= ~clearing_mask;
1005         else
1006                 stat_mappings->rqsmr[n] &= ~clearing_mask;
1007
1008         q_map = (uint32_t)stat_idx;
1009         q_map &= QMAP_FIELD_RESERVED_BITS_MASK;
1010         qsmr_mask = q_map << (QSM_REG_NB_BITS_PER_QMAP_FIELD * offset);
1011         if (!is_rx)
1012                 stat_mappings->tqsm[n] |= qsmr_mask;
1013         else
1014                 stat_mappings->rqsmr[n] |= qsmr_mask;
1015
1016         PMD_INIT_LOG(DEBUG, "Set port %d, %s queue_id %d to stat index %d",
1017                      (int)(eth_dev->data->port_id), is_rx ? "RX" : "TX",
1018                      queue_id, stat_idx);
1019         PMD_INIT_LOG(DEBUG, "%s[%d] = 0x%08x", is_rx ? "RQSMR" : "TQSM", n,
1020                      is_rx ? stat_mappings->rqsmr[n] : stat_mappings->tqsm[n]);
1021
1022         /* Now write the mapping in the appropriate register */
1023         if (is_rx) {
1024                 PMD_INIT_LOG(DEBUG, "Write 0x%x to RX IXGBE stat mapping reg:%d",
1025                              stat_mappings->rqsmr[n], n);
1026                 IXGBE_WRITE_REG(hw, IXGBE_RQSMR(n), stat_mappings->rqsmr[n]);
1027         } else {
1028                 PMD_INIT_LOG(DEBUG, "Write 0x%x to TX IXGBE stat mapping reg:%d",
1029                              stat_mappings->tqsm[n], n);
1030                 IXGBE_WRITE_REG(hw, IXGBE_TQSM(n), stat_mappings->tqsm[n]);
1031         }
1032         return 0;
1033 }
1034
1035 static void
1036 ixgbe_restore_statistics_mapping(struct rte_eth_dev *dev)
1037 {
1038         struct ixgbe_stat_mapping_registers *stat_mappings =
1039                 IXGBE_DEV_PRIVATE_TO_STAT_MAPPINGS(dev->data->dev_private);
1040         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1041         int i;
1042
1043         /* write whatever was in stat mapping table to the NIC */
1044         for (i = 0; i < IXGBE_NB_STAT_MAPPING_REGS; i++) {
1045                 /* rx */
1046                 IXGBE_WRITE_REG(hw, IXGBE_RQSMR(i), stat_mappings->rqsmr[i]);
1047
1048                 /* tx */
1049                 IXGBE_WRITE_REG(hw, IXGBE_TQSM(i), stat_mappings->tqsm[i]);
1050         }
1051 }
1052
1053 static void
1054 ixgbe_dcb_init(struct ixgbe_hw *hw, struct ixgbe_dcb_config *dcb_config)
1055 {
1056         uint8_t i;
1057         struct ixgbe_dcb_tc_config *tc;
1058         uint8_t dcb_max_tc = IXGBE_DCB_MAX_TRAFFIC_CLASS;
1059
1060         dcb_config->num_tcs.pg_tcs = dcb_max_tc;
1061         dcb_config->num_tcs.pfc_tcs = dcb_max_tc;
1062         for (i = 0; i < dcb_max_tc; i++) {
1063                 tc = &dcb_config->tc_config[i];
1064                 tc->path[IXGBE_DCB_TX_CONFIG].bwg_id = i;
1065                 tc->path[IXGBE_DCB_TX_CONFIG].bwg_percent =
1066                                  (uint8_t)(100/dcb_max_tc + (i & 1));
1067                 tc->path[IXGBE_DCB_RX_CONFIG].bwg_id = i;
1068                 tc->path[IXGBE_DCB_RX_CONFIG].bwg_percent =
1069                                  (uint8_t)(100/dcb_max_tc + (i & 1));
1070                 tc->pfc = ixgbe_dcb_pfc_disabled;
1071         }
1072
1073         /* Initialize default user to priority mapping, UPx->TC0 */
1074         tc = &dcb_config->tc_config[0];
1075         tc->path[IXGBE_DCB_TX_CONFIG].up_to_tc_bitmap = 0xFF;
1076         tc->path[IXGBE_DCB_RX_CONFIG].up_to_tc_bitmap = 0xFF;
1077         for (i = 0; i < IXGBE_DCB_MAX_BW_GROUP; i++) {
1078                 dcb_config->bw_percentage[IXGBE_DCB_TX_CONFIG][i] = 100;
1079                 dcb_config->bw_percentage[IXGBE_DCB_RX_CONFIG][i] = 100;
1080         }
1081         dcb_config->rx_pba_cfg = ixgbe_dcb_pba_equal;
1082         dcb_config->pfc_mode_enable = false;
1083         dcb_config->vt_mode = true;
1084         dcb_config->round_robin_enable = false;
1085         /* support all DCB capabilities in 82599 */
1086         dcb_config->support.capabilities = 0xFF;
1087
1088         /*we only support 4 Tcs for X540, X550 */
1089         if (hw->mac.type == ixgbe_mac_X540 ||
1090                 hw->mac.type == ixgbe_mac_X550 ||
1091                 hw->mac.type == ixgbe_mac_X550EM_x ||
1092                 hw->mac.type == ixgbe_mac_X550EM_a) {
1093                 dcb_config->num_tcs.pg_tcs = 4;
1094                 dcb_config->num_tcs.pfc_tcs = 4;
1095         }
1096 }
1097
1098 /*
1099  * Ensure that all locks are released before first NVM or PHY access
1100  */
1101 static void
1102 ixgbe_swfw_lock_reset(struct ixgbe_hw *hw)
1103 {
1104         uint16_t mask;
1105
1106         /*
1107          * Phy lock should not fail in this early stage. If this is the case,
1108          * it is due to an improper exit of the application.
1109          * So force the release of the faulty lock. Release of common lock
1110          * is done automatically by swfw_sync function.
1111          */
1112         mask = IXGBE_GSSR_PHY0_SM << hw->bus.func;
1113         if (ixgbe_acquire_swfw_semaphore(hw, mask) < 0) {
1114                 PMD_DRV_LOG(DEBUG, "SWFW phy%d lock released", hw->bus.func);
1115         }
1116         ixgbe_release_swfw_semaphore(hw, mask);
1117
1118         /*
1119          * These ones are more tricky since they are common to all ports; but
1120          * swfw_sync retries last long enough (1s) to be almost sure that if
1121          * lock can not be taken it is due to an improper lock of the
1122          * semaphore.
1123          */
1124         mask = IXGBE_GSSR_EEP_SM | IXGBE_GSSR_MAC_CSR_SM | IXGBE_GSSR_SW_MNG_SM;
1125         if (ixgbe_acquire_swfw_semaphore(hw, mask) < 0) {
1126                 PMD_DRV_LOG(DEBUG, "SWFW common locks released");
1127         }
1128         ixgbe_release_swfw_semaphore(hw, mask);
1129 }
1130
1131 /*
1132  * This function is based on code in ixgbe_attach() in base/ixgbe.c.
1133  * It returns 0 on success.
1134  */
1135 static int
1136 eth_ixgbe_dev_init(struct rte_eth_dev *eth_dev)
1137 {
1138         struct rte_pci_device *pci_dev = IXGBE_DEV_TO_PCI(eth_dev);
1139         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
1140         struct ixgbe_hw *hw =
1141                 IXGBE_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
1142         struct ixgbe_vfta *shadow_vfta =
1143                 IXGBE_DEV_PRIVATE_TO_VFTA(eth_dev->data->dev_private);
1144         struct ixgbe_hwstrip *hwstrip =
1145                 IXGBE_DEV_PRIVATE_TO_HWSTRIP_BITMAP(eth_dev->data->dev_private);
1146         struct ixgbe_dcb_config *dcb_config =
1147                 IXGBE_DEV_PRIVATE_TO_DCB_CFG(eth_dev->data->dev_private);
1148         struct ixgbe_filter_info *filter_info =
1149                 IXGBE_DEV_PRIVATE_TO_FILTER_INFO(eth_dev->data->dev_private);
1150         uint32_t ctrl_ext;
1151         uint16_t csum;
1152         int diag, i;
1153
1154         PMD_INIT_FUNC_TRACE();
1155
1156         eth_dev->dev_ops = &ixgbe_eth_dev_ops;
1157         eth_dev->rx_pkt_burst = &ixgbe_recv_pkts;
1158         eth_dev->tx_pkt_burst = &ixgbe_xmit_pkts;
1159         eth_dev->tx_pkt_prepare = &ixgbe_prep_pkts;
1160
1161         /*
1162          * For secondary processes, we don't initialise any further as primary
1163          * has already done this work. Only check we don't need a different
1164          * RX and TX function.
1165          */
1166         if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
1167                 struct ixgbe_tx_queue *txq;
1168                 /* TX queue function in primary, set by last queue initialized
1169                  * Tx queue may not initialized by primary process
1170                  */
1171                 if (eth_dev->data->tx_queues) {
1172                         txq = eth_dev->data->tx_queues[eth_dev->data->nb_tx_queues-1];
1173                         ixgbe_set_tx_function(eth_dev, txq);
1174                 } else {
1175                         /* Use default TX function if we get here */
1176                         PMD_INIT_LOG(NOTICE, "No TX queues configured yet. "
1177                                      "Using default TX function.");
1178                 }
1179
1180                 ixgbe_set_rx_function(eth_dev);
1181
1182                 return 0;
1183         }
1184
1185         rte_eth_copy_pci_info(eth_dev, pci_dev);
1186         eth_dev->data->dev_flags = RTE_ETH_DEV_DETACHABLE;
1187
1188         /* Vendor and Device ID need to be set before init of shared code */
1189         hw->device_id = pci_dev->id.device_id;
1190         hw->vendor_id = pci_dev->id.vendor_id;
1191         hw->hw_addr = (void *)pci_dev->mem_resource[0].addr;
1192         hw->allow_unsupported_sfp = 1;
1193
1194         /* Initialize the shared code (base driver) */
1195 #ifdef RTE_NIC_BYPASS
1196         diag = ixgbe_bypass_init_shared_code(hw);
1197 #else
1198         diag = ixgbe_init_shared_code(hw);
1199 #endif /* RTE_NIC_BYPASS */
1200
1201         if (diag != IXGBE_SUCCESS) {
1202                 PMD_INIT_LOG(ERR, "Shared code init failed: %d", diag);
1203                 return -EIO;
1204         }
1205
1206         /* pick up the PCI bus settings for reporting later */
1207         ixgbe_get_bus_info(hw);
1208
1209         /* Unlock any pending hardware semaphore */
1210         ixgbe_swfw_lock_reset(hw);
1211
1212         /* Initialize DCB configuration*/
1213         memset(dcb_config, 0, sizeof(struct ixgbe_dcb_config));
1214         ixgbe_dcb_init(hw, dcb_config);
1215         /* Get Hardware Flow Control setting */
1216         hw->fc.requested_mode = ixgbe_fc_full;
1217         hw->fc.current_mode = ixgbe_fc_full;
1218         hw->fc.pause_time = IXGBE_FC_PAUSE;
1219         for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
1220                 hw->fc.low_water[i] = IXGBE_FC_LO;
1221                 hw->fc.high_water[i] = IXGBE_FC_HI;
1222         }
1223         hw->fc.send_xon = 1;
1224
1225         /* Make sure we have a good EEPROM before we read from it */
1226         diag = ixgbe_validate_eeprom_checksum(hw, &csum);
1227         if (diag != IXGBE_SUCCESS) {
1228                 PMD_INIT_LOG(ERR, "The EEPROM checksum is not valid: %d", diag);
1229                 return -EIO;
1230         }
1231
1232 #ifdef RTE_NIC_BYPASS
1233         diag = ixgbe_bypass_init_hw(hw);
1234 #else
1235         diag = ixgbe_init_hw(hw);
1236 #endif /* RTE_NIC_BYPASS */
1237
1238         /*
1239          * Devices with copper phys will fail to initialise if ixgbe_init_hw()
1240          * is called too soon after the kernel driver unbinding/binding occurs.
1241          * The failure occurs in ixgbe_identify_phy_generic() for all devices,
1242          * but for non-copper devies, ixgbe_identify_sfp_module_generic() is
1243          * also called. See ixgbe_identify_phy_82599(). The reason for the
1244          * failure is not known, and only occuts when virtualisation features
1245          * are disabled in the bios. A delay of 100ms  was found to be enough by
1246          * trial-and-error, and is doubled to be safe.
1247          */
1248         if (diag && (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_copper)) {
1249                 rte_delay_ms(200);
1250                 diag = ixgbe_init_hw(hw);
1251         }
1252
1253         if (diag == IXGBE_ERR_EEPROM_VERSION) {
1254                 PMD_INIT_LOG(ERR, "This device is a pre-production adapter/"
1255                              "LOM.  Please be aware there may be issues associated "
1256                              "with your hardware.");
1257                 PMD_INIT_LOG(ERR, "If you are experiencing problems "
1258                              "please contact your Intel or hardware representative "
1259                              "who provided you with this hardware.");
1260         } else if (diag == IXGBE_ERR_SFP_NOT_SUPPORTED)
1261                 PMD_INIT_LOG(ERR, "Unsupported SFP+ Module");
1262         if (diag) {
1263                 PMD_INIT_LOG(ERR, "Hardware Initialization Failure: %d", diag);
1264                 return -EIO;
1265         }
1266
1267         /* Reset the hw statistics */
1268         ixgbe_dev_stats_reset(eth_dev);
1269
1270         /* disable interrupt */
1271         ixgbe_disable_intr(hw);
1272
1273         /* reset mappings for queue statistics hw counters*/
1274         ixgbe_reset_qstat_mappings(hw);
1275
1276         /* Allocate memory for storing MAC addresses */
1277         eth_dev->data->mac_addrs = rte_zmalloc("ixgbe", ETHER_ADDR_LEN *
1278                                                hw->mac.num_rar_entries, 0);
1279         if (eth_dev->data->mac_addrs == NULL) {
1280                 PMD_INIT_LOG(ERR,
1281                              "Failed to allocate %u bytes needed to store "
1282                              "MAC addresses",
1283                              ETHER_ADDR_LEN * hw->mac.num_rar_entries);
1284                 return -ENOMEM;
1285         }
1286         /* Copy the permanent MAC address */
1287         ether_addr_copy((struct ether_addr *) hw->mac.perm_addr,
1288                         &eth_dev->data->mac_addrs[0]);
1289
1290         /* Allocate memory for storing hash filter MAC addresses */
1291         eth_dev->data->hash_mac_addrs = rte_zmalloc("ixgbe", ETHER_ADDR_LEN *
1292                                                     IXGBE_VMDQ_NUM_UC_MAC, 0);
1293         if (eth_dev->data->hash_mac_addrs == NULL) {
1294                 PMD_INIT_LOG(ERR,
1295                              "Failed to allocate %d bytes needed to store MAC addresses",
1296                              ETHER_ADDR_LEN * IXGBE_VMDQ_NUM_UC_MAC);
1297                 return -ENOMEM;
1298         }
1299
1300         /* initialize the vfta */
1301         memset(shadow_vfta, 0, sizeof(*shadow_vfta));
1302
1303         /* initialize the hw strip bitmap*/
1304         memset(hwstrip, 0, sizeof(*hwstrip));
1305
1306         /* initialize PF if max_vfs not zero */
1307         ixgbe_pf_host_init(eth_dev);
1308
1309         ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
1310         /* let hardware know driver is loaded */
1311         ctrl_ext |= IXGBE_CTRL_EXT_DRV_LOAD;
1312         /* Set PF Reset Done bit so PF/VF Mail Ops can work */
1313         ctrl_ext |= IXGBE_CTRL_EXT_PFRSTD;
1314         IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
1315         IXGBE_WRITE_FLUSH(hw);
1316
1317         if (ixgbe_is_sfp(hw) && hw->phy.sfp_type != ixgbe_sfp_type_not_present)
1318                 PMD_INIT_LOG(DEBUG, "MAC: %d, PHY: %d, SFP+: %d",
1319                              (int) hw->mac.type, (int) hw->phy.type,
1320                              (int) hw->phy.sfp_type);
1321         else
1322                 PMD_INIT_LOG(DEBUG, "MAC: %d, PHY: %d",
1323                              (int) hw->mac.type, (int) hw->phy.type);
1324
1325         PMD_INIT_LOG(DEBUG, "port %d vendorID=0x%x deviceID=0x%x",
1326                      eth_dev->data->port_id, pci_dev->id.vendor_id,
1327                      pci_dev->id.device_id);
1328
1329         rte_intr_callback_register(intr_handle,
1330                                    ixgbe_dev_interrupt_handler, eth_dev);
1331
1332         /* enable uio/vfio intr/eventfd mapping */
1333         rte_intr_enable(intr_handle);
1334
1335         /* enable support intr */
1336         ixgbe_enable_intr(eth_dev);
1337
1338         /* initialize 5tuple filter list */
1339         TAILQ_INIT(&filter_info->fivetuple_list);
1340         memset(filter_info->fivetuple_mask, 0,
1341                sizeof(uint32_t) * IXGBE_5TUPLE_ARRAY_SIZE);
1342
1343         return 0;
1344 }
1345
1346 static int
1347 eth_ixgbe_dev_uninit(struct rte_eth_dev *eth_dev)
1348 {
1349         struct rte_pci_device *pci_dev = IXGBE_DEV_TO_PCI(eth_dev);
1350         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
1351         struct ixgbe_hw *hw;
1352
1353         PMD_INIT_FUNC_TRACE();
1354
1355         if (rte_eal_process_type() != RTE_PROC_PRIMARY)
1356                 return -EPERM;
1357
1358         hw = IXGBE_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
1359
1360         if (hw->adapter_stopped == 0)
1361                 ixgbe_dev_close(eth_dev);
1362
1363         eth_dev->dev_ops = NULL;
1364         eth_dev->rx_pkt_burst = NULL;
1365         eth_dev->tx_pkt_burst = NULL;
1366
1367         /* Unlock any pending hardware semaphore */
1368         ixgbe_swfw_lock_reset(hw);
1369
1370         /* disable uio intr before callback unregister */
1371         rte_intr_disable(intr_handle);
1372         rte_intr_callback_unregister(intr_handle,
1373                                      ixgbe_dev_interrupt_handler, eth_dev);
1374
1375         /* uninitialize PF if max_vfs not zero */
1376         ixgbe_pf_host_uninit(eth_dev);
1377
1378         rte_free(eth_dev->data->mac_addrs);
1379         eth_dev->data->mac_addrs = NULL;
1380
1381         rte_free(eth_dev->data->hash_mac_addrs);
1382         eth_dev->data->hash_mac_addrs = NULL;
1383
1384         return 0;
1385 }
1386
1387 /*
1388  * Negotiate mailbox API version with the PF.
1389  * After reset API version is always set to the basic one (ixgbe_mbox_api_10).
1390  * Then we try to negotiate starting with the most recent one.
1391  * If all negotiation attempts fail, then we will proceed with
1392  * the default one (ixgbe_mbox_api_10).
1393  */
1394 static void
1395 ixgbevf_negotiate_api(struct ixgbe_hw *hw)
1396 {
1397         int32_t i;
1398
1399         /* start with highest supported, proceed down */
1400         static const enum ixgbe_pfvf_api_rev sup_ver[] = {
1401                 ixgbe_mbox_api_12,
1402                 ixgbe_mbox_api_11,
1403                 ixgbe_mbox_api_10,
1404         };
1405
1406         for (i = 0;
1407                         i != RTE_DIM(sup_ver) &&
1408                         ixgbevf_negotiate_api_version(hw, sup_ver[i]) != 0;
1409                         i++)
1410                 ;
1411 }
1412
1413 static void
1414 generate_random_mac_addr(struct ether_addr *mac_addr)
1415 {
1416         uint64_t random;
1417
1418         /* Set Organizationally Unique Identifier (OUI) prefix. */
1419         mac_addr->addr_bytes[0] = 0x00;
1420         mac_addr->addr_bytes[1] = 0x09;
1421         mac_addr->addr_bytes[2] = 0xC0;
1422         /* Force indication of locally assigned MAC address. */
1423         mac_addr->addr_bytes[0] |= ETHER_LOCAL_ADMIN_ADDR;
1424         /* Generate the last 3 bytes of the MAC address with a random number. */
1425         random = rte_rand();
1426         memcpy(&mac_addr->addr_bytes[3], &random, 3);
1427 }
1428
1429 /*
1430  * Virtual Function device init
1431  */
1432 static int
1433 eth_ixgbevf_dev_init(struct rte_eth_dev *eth_dev)
1434 {
1435         int diag;
1436         uint32_t tc, tcs;
1437         struct rte_pci_device *pci_dev = IXGBE_DEV_TO_PCI(eth_dev);
1438         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
1439         struct ixgbe_hw *hw =
1440                 IXGBE_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
1441         struct ixgbe_vfta *shadow_vfta =
1442                 IXGBE_DEV_PRIVATE_TO_VFTA(eth_dev->data->dev_private);
1443         struct ixgbe_hwstrip *hwstrip =
1444                 IXGBE_DEV_PRIVATE_TO_HWSTRIP_BITMAP(eth_dev->data->dev_private);
1445         struct ether_addr *perm_addr = (struct ether_addr *) hw->mac.perm_addr;
1446
1447         PMD_INIT_FUNC_TRACE();
1448
1449         eth_dev->dev_ops = &ixgbevf_eth_dev_ops;
1450         eth_dev->rx_pkt_burst = &ixgbe_recv_pkts;
1451         eth_dev->tx_pkt_burst = &ixgbe_xmit_pkts;
1452
1453         /* for secondary processes, we don't initialise any further as primary
1454          * has already done this work. Only check we don't need a different
1455          * RX function
1456          */
1457         if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
1458                 struct ixgbe_tx_queue *txq;
1459                 /* TX queue function in primary, set by last queue initialized
1460                  * Tx queue may not initialized by primary process
1461                  */
1462                 if (eth_dev->data->tx_queues) {
1463                         txq = eth_dev->data->tx_queues[eth_dev->data->nb_tx_queues - 1];
1464                         ixgbe_set_tx_function(eth_dev, txq);
1465                 } else {
1466                         /* Use default TX function if we get here */
1467                         PMD_INIT_LOG(NOTICE,
1468                                      "No TX queues configured yet. Using default TX function.");
1469                 }
1470
1471                 ixgbe_set_rx_function(eth_dev);
1472
1473                 return 0;
1474         }
1475
1476         rte_eth_copy_pci_info(eth_dev, pci_dev);
1477         eth_dev->data->dev_flags = RTE_ETH_DEV_DETACHABLE;
1478
1479         hw->device_id = pci_dev->id.device_id;
1480         hw->vendor_id = pci_dev->id.vendor_id;
1481         hw->hw_addr = (void *)pci_dev->mem_resource[0].addr;
1482
1483         /* initialize the vfta */
1484         memset(shadow_vfta, 0, sizeof(*shadow_vfta));
1485
1486         /* initialize the hw strip bitmap*/
1487         memset(hwstrip, 0, sizeof(*hwstrip));
1488
1489         /* Initialize the shared code (base driver) */
1490         diag = ixgbe_init_shared_code(hw);
1491         if (diag != IXGBE_SUCCESS) {
1492                 PMD_INIT_LOG(ERR, "Shared code init failed for ixgbevf: %d", diag);
1493                 return -EIO;
1494         }
1495
1496         /* init_mailbox_params */
1497         hw->mbx.ops.init_params(hw);
1498
1499         /* Reset the hw statistics */
1500         ixgbevf_dev_stats_reset(eth_dev);
1501
1502         /* Disable the interrupts for VF */
1503         ixgbevf_intr_disable(hw);
1504
1505         hw->mac.num_rar_entries = 128; /* The MAX of the underlying PF */
1506         diag = hw->mac.ops.reset_hw(hw);
1507
1508         /*
1509          * The VF reset operation returns the IXGBE_ERR_INVALID_MAC_ADDR when
1510          * the underlying PF driver has not assigned a MAC address to the VF.
1511          * In this case, assign a random MAC address.
1512          */
1513         if ((diag != IXGBE_SUCCESS) && (diag != IXGBE_ERR_INVALID_MAC_ADDR)) {
1514                 PMD_INIT_LOG(ERR, "VF Initialization Failure: %d", diag);
1515                 return diag;
1516         }
1517
1518         /* negotiate mailbox API version to use with the PF. */
1519         ixgbevf_negotiate_api(hw);
1520
1521         /* Get Rx/Tx queue count via mailbox, which is ready after reset_hw */
1522         ixgbevf_get_queues(hw, &tcs, &tc);
1523
1524         /* Allocate memory for storing MAC addresses */
1525         eth_dev->data->mac_addrs = rte_zmalloc("ixgbevf", ETHER_ADDR_LEN *
1526                                                hw->mac.num_rar_entries, 0);
1527         if (eth_dev->data->mac_addrs == NULL) {
1528                 PMD_INIT_LOG(ERR,
1529                              "Failed to allocate %u bytes needed to store "
1530                              "MAC addresses",
1531                              ETHER_ADDR_LEN * hw->mac.num_rar_entries);
1532                 return -ENOMEM;
1533         }
1534
1535         /* Generate a random MAC address, if none was assigned by PF. */
1536         if (is_zero_ether_addr(perm_addr)) {
1537                 generate_random_mac_addr(perm_addr);
1538                 diag = ixgbe_set_rar_vf(hw, 1, perm_addr->addr_bytes, 0, 1);
1539                 if (diag) {
1540                         rte_free(eth_dev->data->mac_addrs);
1541                         eth_dev->data->mac_addrs = NULL;
1542                         return diag;
1543                 }
1544                 PMD_INIT_LOG(INFO, "\tVF MAC address not assigned by Host PF");
1545                 PMD_INIT_LOG(INFO, "\tAssign randomly generated MAC address "
1546                              "%02x:%02x:%02x:%02x:%02x:%02x",
1547                              perm_addr->addr_bytes[0],
1548                              perm_addr->addr_bytes[1],
1549                              perm_addr->addr_bytes[2],
1550                              perm_addr->addr_bytes[3],
1551                              perm_addr->addr_bytes[4],
1552                              perm_addr->addr_bytes[5]);
1553         }
1554
1555         /* Copy the permanent MAC address */
1556         ether_addr_copy(perm_addr, &eth_dev->data->mac_addrs[0]);
1557
1558         /* reset the hardware with the new settings */
1559         diag = hw->mac.ops.start_hw(hw);
1560         switch (diag) {
1561         case  0:
1562                 break;
1563
1564         default:
1565                 PMD_INIT_LOG(ERR, "VF Initialization Failure: %d", diag);
1566                 return -EIO;
1567         }
1568
1569         rte_intr_callback_register(intr_handle,
1570                                    ixgbevf_dev_interrupt_handler, eth_dev);
1571         rte_intr_enable(intr_handle);
1572         ixgbevf_intr_enable(hw);
1573
1574         PMD_INIT_LOG(DEBUG, "port %d vendorID=0x%x deviceID=0x%x mac.type=%s",
1575                      eth_dev->data->port_id, pci_dev->id.vendor_id,
1576                      pci_dev->id.device_id, "ixgbe_mac_82599_vf");
1577
1578         return 0;
1579 }
1580
1581 /* Virtual Function device uninit */
1582
1583 static int
1584 eth_ixgbevf_dev_uninit(struct rte_eth_dev *eth_dev)
1585 {
1586         struct rte_pci_device *pci_dev = IXGBE_DEV_TO_PCI(eth_dev);
1587         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
1588         struct ixgbe_hw *hw;
1589
1590         PMD_INIT_FUNC_TRACE();
1591
1592         if (rte_eal_process_type() != RTE_PROC_PRIMARY)
1593                 return -EPERM;
1594
1595         hw = IXGBE_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
1596
1597         if (hw->adapter_stopped == 0)
1598                 ixgbevf_dev_close(eth_dev);
1599
1600         eth_dev->dev_ops = NULL;
1601         eth_dev->rx_pkt_burst = NULL;
1602         eth_dev->tx_pkt_burst = NULL;
1603
1604         /* Disable the interrupts for VF */
1605         ixgbevf_intr_disable(hw);
1606
1607         rte_free(eth_dev->data->mac_addrs);
1608         eth_dev->data->mac_addrs = NULL;
1609
1610         rte_intr_disable(intr_handle);
1611         rte_intr_callback_unregister(intr_handle,
1612                                      ixgbevf_dev_interrupt_handler, eth_dev);
1613
1614         return 0;
1615 }
1616
1617 static struct eth_driver rte_ixgbe_pmd = {
1618         .pci_drv = {
1619                 .id_table = pci_id_ixgbe_map,
1620                 .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC,
1621                 .probe = rte_eth_dev_pci_probe,
1622                 .remove = rte_eth_dev_pci_remove,
1623         },
1624         .eth_dev_init = eth_ixgbe_dev_init,
1625         .eth_dev_uninit = eth_ixgbe_dev_uninit,
1626         .dev_private_size = sizeof(struct ixgbe_adapter),
1627 };
1628
1629 /*
1630  * virtual function driver struct
1631  */
1632 static struct eth_driver rte_ixgbevf_pmd = {
1633         .pci_drv = {
1634                 .id_table = pci_id_ixgbevf_map,
1635                 .drv_flags = RTE_PCI_DRV_NEED_MAPPING,
1636                 .probe = rte_eth_dev_pci_probe,
1637                 .remove = rte_eth_dev_pci_remove,
1638         },
1639         .eth_dev_init = eth_ixgbevf_dev_init,
1640         .eth_dev_uninit = eth_ixgbevf_dev_uninit,
1641         .dev_private_size = sizeof(struct ixgbe_adapter),
1642 };
1643
1644 static int
1645 ixgbe_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
1646 {
1647         struct ixgbe_hw *hw =
1648                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1649         struct ixgbe_vfta *shadow_vfta =
1650                 IXGBE_DEV_PRIVATE_TO_VFTA(dev->data->dev_private);
1651         uint32_t vfta;
1652         uint32_t vid_idx;
1653         uint32_t vid_bit;
1654
1655         vid_idx = (uint32_t) ((vlan_id >> 5) & 0x7F);
1656         vid_bit = (uint32_t) (1 << (vlan_id & 0x1F));
1657         vfta = IXGBE_READ_REG(hw, IXGBE_VFTA(vid_idx));
1658         if (on)
1659                 vfta |= vid_bit;
1660         else
1661                 vfta &= ~vid_bit;
1662         IXGBE_WRITE_REG(hw, IXGBE_VFTA(vid_idx), vfta);
1663
1664         /* update local VFTA copy */
1665         shadow_vfta->vfta[vid_idx] = vfta;
1666
1667         return 0;
1668 }
1669
1670 static void
1671 ixgbe_vlan_strip_queue_set(struct rte_eth_dev *dev, uint16_t queue, int on)
1672 {
1673         if (on)
1674                 ixgbe_vlan_hw_strip_enable(dev, queue);
1675         else
1676                 ixgbe_vlan_hw_strip_disable(dev, queue);
1677 }
1678
1679 static int
1680 ixgbe_vlan_tpid_set(struct rte_eth_dev *dev,
1681                     enum rte_vlan_type vlan_type,
1682                     uint16_t tpid)
1683 {
1684         struct ixgbe_hw *hw =
1685                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1686         int ret = 0;
1687         uint32_t reg;
1688         uint32_t qinq;
1689
1690         qinq = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
1691         qinq &= IXGBE_DMATXCTL_GDV;
1692
1693         switch (vlan_type) {
1694         case ETH_VLAN_TYPE_INNER:
1695                 if (qinq) {
1696                         reg = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
1697                         reg = (reg & (~IXGBE_VLNCTRL_VET)) | (uint32_t)tpid;
1698                         IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, reg);
1699                         reg = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
1700                         reg = (reg & (~IXGBE_DMATXCTL_VT_MASK))
1701                                 | ((uint32_t)tpid << IXGBE_DMATXCTL_VT_SHIFT);
1702                         IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, reg);
1703                 } else {
1704                         ret = -ENOTSUP;
1705                         PMD_DRV_LOG(ERR, "Inner type is not supported"
1706                                     " by single VLAN");
1707                 }
1708                 break;
1709         case ETH_VLAN_TYPE_OUTER:
1710                 if (qinq) {
1711                         /* Only the high 16-bits is valid */
1712                         IXGBE_WRITE_REG(hw, IXGBE_EXVET, (uint32_t)tpid <<
1713                                         IXGBE_EXVET_VET_EXT_SHIFT);
1714                 } else {
1715                         reg = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
1716                         reg = (reg & (~IXGBE_VLNCTRL_VET)) | (uint32_t)tpid;
1717                         IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, reg);
1718                         reg = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
1719                         reg = (reg & (~IXGBE_DMATXCTL_VT_MASK))
1720                                 | ((uint32_t)tpid << IXGBE_DMATXCTL_VT_SHIFT);
1721                         IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, reg);
1722                 }
1723
1724                 break;
1725         default:
1726                 ret = -EINVAL;
1727                 PMD_DRV_LOG(ERR, "Unsupported VLAN type %d", vlan_type);
1728                 break;
1729         }
1730
1731         return ret;
1732 }
1733
1734 void
1735 ixgbe_vlan_hw_filter_disable(struct rte_eth_dev *dev)
1736 {
1737         struct ixgbe_hw *hw =
1738                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1739         uint32_t vlnctrl;
1740
1741         PMD_INIT_FUNC_TRACE();
1742
1743         /* Filter Table Disable */
1744         vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
1745         vlnctrl &= ~IXGBE_VLNCTRL_VFE;
1746
1747         IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
1748 }
1749
1750 void
1751 ixgbe_vlan_hw_filter_enable(struct rte_eth_dev *dev)
1752 {
1753         struct ixgbe_hw *hw =
1754                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1755         struct ixgbe_vfta *shadow_vfta =
1756                 IXGBE_DEV_PRIVATE_TO_VFTA(dev->data->dev_private);
1757         uint32_t vlnctrl;
1758         uint16_t i;
1759
1760         PMD_INIT_FUNC_TRACE();
1761
1762         /* Filter Table Enable */
1763         vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
1764         vlnctrl &= ~IXGBE_VLNCTRL_CFIEN;
1765         vlnctrl |= IXGBE_VLNCTRL_VFE;
1766
1767         IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
1768
1769         /* write whatever is in local vfta copy */
1770         for (i = 0; i < IXGBE_VFTA_SIZE; i++)
1771                 IXGBE_WRITE_REG(hw, IXGBE_VFTA(i), shadow_vfta->vfta[i]);
1772 }
1773
1774 static void
1775 ixgbe_vlan_hw_strip_bitmap_set(struct rte_eth_dev *dev, uint16_t queue, bool on)
1776 {
1777         struct ixgbe_hwstrip *hwstrip =
1778                 IXGBE_DEV_PRIVATE_TO_HWSTRIP_BITMAP(dev->data->dev_private);
1779         struct ixgbe_rx_queue *rxq;
1780
1781         if (queue >= IXGBE_MAX_RX_QUEUE_NUM)
1782                 return;
1783
1784         if (on)
1785                 IXGBE_SET_HWSTRIP(hwstrip, queue);
1786         else
1787                 IXGBE_CLEAR_HWSTRIP(hwstrip, queue);
1788
1789         if (queue >= dev->data->nb_rx_queues)
1790                 return;
1791
1792         rxq = dev->data->rx_queues[queue];
1793
1794         if (on)
1795                 rxq->vlan_flags = PKT_RX_VLAN_PKT | PKT_RX_VLAN_STRIPPED;
1796         else
1797                 rxq->vlan_flags = PKT_RX_VLAN_PKT;
1798 }
1799
1800 static void
1801 ixgbe_vlan_hw_strip_disable(struct rte_eth_dev *dev, uint16_t queue)
1802 {
1803         struct ixgbe_hw *hw =
1804                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1805         uint32_t ctrl;
1806
1807         PMD_INIT_FUNC_TRACE();
1808
1809         if (hw->mac.type == ixgbe_mac_82598EB) {
1810                 /* No queue level support */
1811                 PMD_INIT_LOG(NOTICE, "82598EB not support queue level hw strip");
1812                 return;
1813         }
1814
1815         /* Other 10G NIC, the VLAN strip can be setup per queue in RXDCTL */
1816         ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(queue));
1817         ctrl &= ~IXGBE_RXDCTL_VME;
1818         IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(queue), ctrl);
1819
1820         /* record those setting for HW strip per queue */
1821         ixgbe_vlan_hw_strip_bitmap_set(dev, queue, 0);
1822 }
1823
1824 static void
1825 ixgbe_vlan_hw_strip_enable(struct rte_eth_dev *dev, uint16_t queue)
1826 {
1827         struct ixgbe_hw *hw =
1828                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1829         uint32_t ctrl;
1830
1831         PMD_INIT_FUNC_TRACE();
1832
1833         if (hw->mac.type == ixgbe_mac_82598EB) {
1834                 /* No queue level supported */
1835                 PMD_INIT_LOG(NOTICE, "82598EB not support queue level hw strip");
1836                 return;
1837         }
1838
1839         /* Other 10G NIC, the VLAN strip can be setup per queue in RXDCTL */
1840         ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(queue));
1841         ctrl |= IXGBE_RXDCTL_VME;
1842         IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(queue), ctrl);
1843
1844         /* record those setting for HW strip per queue */
1845         ixgbe_vlan_hw_strip_bitmap_set(dev, queue, 1);
1846 }
1847
1848 void
1849 ixgbe_vlan_hw_strip_disable_all(struct rte_eth_dev *dev)
1850 {
1851         struct ixgbe_hw *hw =
1852                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1853         uint32_t ctrl;
1854         uint16_t i;
1855         struct ixgbe_rx_queue *rxq;
1856
1857         PMD_INIT_FUNC_TRACE();
1858
1859         if (hw->mac.type == ixgbe_mac_82598EB) {
1860                 ctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
1861                 ctrl &= ~IXGBE_VLNCTRL_VME;
1862                 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, ctrl);
1863         } else {
1864                 /* Other 10G NIC, the VLAN strip can be setup per queue in RXDCTL */
1865                 for (i = 0; i < dev->data->nb_rx_queues; i++) {
1866                         rxq = dev->data->rx_queues[i];
1867                         ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxq->reg_idx));
1868                         ctrl &= ~IXGBE_RXDCTL_VME;
1869                         IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxq->reg_idx), ctrl);
1870
1871                         /* record those setting for HW strip per queue */
1872                         ixgbe_vlan_hw_strip_bitmap_set(dev, i, 0);
1873                 }
1874         }
1875 }
1876
1877 void
1878 ixgbe_vlan_hw_strip_enable_all(struct rte_eth_dev *dev)
1879 {
1880         struct ixgbe_hw *hw =
1881                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1882         uint32_t ctrl;
1883         uint16_t i;
1884         struct ixgbe_rx_queue *rxq;
1885
1886         PMD_INIT_FUNC_TRACE();
1887
1888         if (hw->mac.type == ixgbe_mac_82598EB) {
1889                 ctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
1890                 ctrl |= IXGBE_VLNCTRL_VME;
1891                 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, ctrl);
1892         } else {
1893                 /* Other 10G NIC, the VLAN strip can be setup per queue in RXDCTL */
1894                 for (i = 0; i < dev->data->nb_rx_queues; i++) {
1895                         rxq = dev->data->rx_queues[i];
1896                         ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxq->reg_idx));
1897                         ctrl |= IXGBE_RXDCTL_VME;
1898                         IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxq->reg_idx), ctrl);
1899
1900                         /* record those setting for HW strip per queue */
1901                         ixgbe_vlan_hw_strip_bitmap_set(dev, i, 1);
1902                 }
1903         }
1904 }
1905
1906 static void
1907 ixgbe_vlan_hw_extend_disable(struct rte_eth_dev *dev)
1908 {
1909         struct ixgbe_hw *hw =
1910                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1911         uint32_t ctrl;
1912
1913         PMD_INIT_FUNC_TRACE();
1914
1915         /* DMATXCTRL: Geric Double VLAN Disable */
1916         ctrl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
1917         ctrl &= ~IXGBE_DMATXCTL_GDV;
1918         IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, ctrl);
1919
1920         /* CTRL_EXT: Global Double VLAN Disable */
1921         ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
1922         ctrl &= ~IXGBE_EXTENDED_VLAN;
1923         IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl);
1924
1925 }
1926
1927 static void
1928 ixgbe_vlan_hw_extend_enable(struct rte_eth_dev *dev)
1929 {
1930         struct ixgbe_hw *hw =
1931                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1932         uint32_t ctrl;
1933
1934         PMD_INIT_FUNC_TRACE();
1935
1936         /* DMATXCTRL: Geric Double VLAN Enable */
1937         ctrl  = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
1938         ctrl |= IXGBE_DMATXCTL_GDV;
1939         IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, ctrl);
1940
1941         /* CTRL_EXT: Global Double VLAN Enable */
1942         ctrl  = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
1943         ctrl |= IXGBE_EXTENDED_VLAN;
1944         IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl);
1945
1946         /* Clear pooling mode of PFVTCTL. It's required by X550. */
1947         if (hw->mac.type == ixgbe_mac_X550 ||
1948             hw->mac.type == ixgbe_mac_X550EM_x ||
1949             hw->mac.type == ixgbe_mac_X550EM_a) {
1950                 ctrl = IXGBE_READ_REG(hw, IXGBE_VT_CTL);
1951                 ctrl &= ~IXGBE_VT_CTL_POOLING_MODE_MASK;
1952                 IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, ctrl);
1953         }
1954
1955         /*
1956          * VET EXT field in the EXVET register = 0x8100 by default
1957          * So no need to change. Same to VT field of DMATXCTL register
1958          */
1959 }
1960
1961 static void
1962 ixgbe_vlan_offload_set(struct rte_eth_dev *dev, int mask)
1963 {
1964         if (mask & ETH_VLAN_STRIP_MASK) {
1965                 if (dev->data->dev_conf.rxmode.hw_vlan_strip)
1966                         ixgbe_vlan_hw_strip_enable_all(dev);
1967                 else
1968                         ixgbe_vlan_hw_strip_disable_all(dev);
1969         }
1970
1971         if (mask & ETH_VLAN_FILTER_MASK) {
1972                 if (dev->data->dev_conf.rxmode.hw_vlan_filter)
1973                         ixgbe_vlan_hw_filter_enable(dev);
1974                 else
1975                         ixgbe_vlan_hw_filter_disable(dev);
1976         }
1977
1978         if (mask & ETH_VLAN_EXTEND_MASK) {
1979                 if (dev->data->dev_conf.rxmode.hw_vlan_extend)
1980                         ixgbe_vlan_hw_extend_enable(dev);
1981                 else
1982                         ixgbe_vlan_hw_extend_disable(dev);
1983         }
1984 }
1985
1986 static void
1987 ixgbe_vmdq_vlan_hw_filter_enable(struct rte_eth_dev *dev)
1988 {
1989         struct ixgbe_hw *hw =
1990                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1991         /* VLNCTRL: enable vlan filtering and allow all vlan tags through */
1992         uint32_t vlanctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
1993
1994         vlanctrl |= IXGBE_VLNCTRL_VFE; /* enable vlan filters */
1995         IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlanctrl);
1996 }
1997
1998 static int
1999 ixgbe_check_vf_rss_rxq_num(struct rte_eth_dev *dev, uint16_t nb_rx_q)
2000 {
2001         struct rte_pci_device *pci_dev = IXGBE_DEV_TO_PCI(dev);
2002
2003         switch (nb_rx_q) {
2004         case 1:
2005         case 2:
2006                 RTE_ETH_DEV_SRIOV(dev).active = ETH_64_POOLS;
2007                 break;
2008         case 4:
2009                 RTE_ETH_DEV_SRIOV(dev).active = ETH_32_POOLS;
2010                 break;
2011         default:
2012                 return -EINVAL;
2013         }
2014
2015         RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool = nb_rx_q;
2016         RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx = pci_dev->max_vfs * nb_rx_q;
2017
2018         return 0;
2019 }
2020
2021 static int
2022 ixgbe_check_mq_mode(struct rte_eth_dev *dev)
2023 {
2024         struct rte_eth_conf *dev_conf = &dev->data->dev_conf;
2025         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2026         uint16_t nb_rx_q = dev->data->nb_rx_queues;
2027         uint16_t nb_tx_q = dev->data->nb_tx_queues;
2028
2029         if (RTE_ETH_DEV_SRIOV(dev).active != 0) {
2030                 /* check multi-queue mode */
2031                 switch (dev_conf->rxmode.mq_mode) {
2032                 case ETH_MQ_RX_VMDQ_DCB:
2033                         PMD_INIT_LOG(INFO, "ETH_MQ_RX_VMDQ_DCB mode supported in SRIOV");
2034                         break;
2035                 case ETH_MQ_RX_VMDQ_DCB_RSS:
2036                         /* DCB/RSS VMDQ in SRIOV mode, not implement yet */
2037                         PMD_INIT_LOG(ERR, "SRIOV active,"
2038                                         " unsupported mq_mode rx %d.",
2039                                         dev_conf->rxmode.mq_mode);
2040                         return -EINVAL;
2041                 case ETH_MQ_RX_RSS:
2042                 case ETH_MQ_RX_VMDQ_RSS:
2043                         dev->data->dev_conf.rxmode.mq_mode = ETH_MQ_RX_VMDQ_RSS;
2044                         if (nb_rx_q <= RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool)
2045                                 if (ixgbe_check_vf_rss_rxq_num(dev, nb_rx_q)) {
2046                                         PMD_INIT_LOG(ERR, "SRIOV is active,"
2047                                                 " invalid queue number"
2048                                                 " for VMDQ RSS, allowed"
2049                                                 " value are 1, 2 or 4.");
2050                                         return -EINVAL;
2051                                 }
2052                         break;
2053                 case ETH_MQ_RX_VMDQ_ONLY:
2054                 case ETH_MQ_RX_NONE:
2055                         /* if nothing mq mode configure, use default scheme */
2056                         dev->data->dev_conf.rxmode.mq_mode = ETH_MQ_RX_VMDQ_ONLY;
2057                         if (RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool > 1)
2058                                 RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool = 1;
2059                         break;
2060                 default: /* ETH_MQ_RX_DCB, ETH_MQ_RX_DCB_RSS or ETH_MQ_TX_DCB*/
2061                         /* SRIOV only works in VMDq enable mode */
2062                         PMD_INIT_LOG(ERR, "SRIOV is active,"
2063                                         " wrong mq_mode rx %d.",
2064                                         dev_conf->rxmode.mq_mode);
2065                         return -EINVAL;
2066                 }
2067
2068                 switch (dev_conf->txmode.mq_mode) {
2069                 case ETH_MQ_TX_VMDQ_DCB:
2070                         PMD_INIT_LOG(INFO, "ETH_MQ_TX_VMDQ_DCB mode supported in SRIOV");
2071                         dev->data->dev_conf.txmode.mq_mode = ETH_MQ_TX_VMDQ_DCB;
2072                         break;
2073                 default: /* ETH_MQ_TX_VMDQ_ONLY or ETH_MQ_TX_NONE */
2074                         dev->data->dev_conf.txmode.mq_mode = ETH_MQ_TX_VMDQ_ONLY;
2075                         break;
2076                 }
2077
2078                 /* check valid queue number */
2079                 if ((nb_rx_q > RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool) ||
2080                     (nb_tx_q > RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool)) {
2081                         PMD_INIT_LOG(ERR, "SRIOV is active,"
2082                                         " nb_rx_q=%d nb_tx_q=%d queue number"
2083                                         " must be less than or equal to %d.",
2084                                         nb_rx_q, nb_tx_q,
2085                                         RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool);
2086                         return -EINVAL;
2087                 }
2088         } else {
2089                 if (dev_conf->rxmode.mq_mode == ETH_MQ_RX_VMDQ_DCB_RSS) {
2090                         PMD_INIT_LOG(ERR, "VMDQ+DCB+RSS mq_mode is"
2091                                           " not supported.");
2092                         return -EINVAL;
2093                 }
2094                 /* check configuration for vmdb+dcb mode */
2095                 if (dev_conf->rxmode.mq_mode == ETH_MQ_RX_VMDQ_DCB) {
2096                         const struct rte_eth_vmdq_dcb_conf *conf;
2097
2098                         if (nb_rx_q != IXGBE_VMDQ_DCB_NB_QUEUES) {
2099                                 PMD_INIT_LOG(ERR, "VMDQ+DCB, nb_rx_q != %d.",
2100                                                 IXGBE_VMDQ_DCB_NB_QUEUES);
2101                                 return -EINVAL;
2102                         }
2103                         conf = &dev_conf->rx_adv_conf.vmdq_dcb_conf;
2104                         if (!(conf->nb_queue_pools == ETH_16_POOLS ||
2105                                conf->nb_queue_pools == ETH_32_POOLS)) {
2106                                 PMD_INIT_LOG(ERR, "VMDQ+DCB selected,"
2107                                                 " nb_queue_pools must be %d or %d.",
2108                                                 ETH_16_POOLS, ETH_32_POOLS);
2109                                 return -EINVAL;
2110                         }
2111                 }
2112                 if (dev_conf->txmode.mq_mode == ETH_MQ_TX_VMDQ_DCB) {
2113                         const struct rte_eth_vmdq_dcb_tx_conf *conf;
2114
2115                         if (nb_tx_q != IXGBE_VMDQ_DCB_NB_QUEUES) {
2116                                 PMD_INIT_LOG(ERR, "VMDQ+DCB, nb_tx_q != %d",
2117                                                  IXGBE_VMDQ_DCB_NB_QUEUES);
2118                                 return -EINVAL;
2119                         }
2120                         conf = &dev_conf->tx_adv_conf.vmdq_dcb_tx_conf;
2121                         if (!(conf->nb_queue_pools == ETH_16_POOLS ||
2122                                conf->nb_queue_pools == ETH_32_POOLS)) {
2123                                 PMD_INIT_LOG(ERR, "VMDQ+DCB selected,"
2124                                                 " nb_queue_pools != %d and"
2125                                                 " nb_queue_pools != %d.",
2126                                                 ETH_16_POOLS, ETH_32_POOLS);
2127                                 return -EINVAL;
2128                         }
2129                 }
2130
2131                 /* For DCB mode check our configuration before we go further */
2132                 if (dev_conf->rxmode.mq_mode == ETH_MQ_RX_DCB) {
2133                         const struct rte_eth_dcb_rx_conf *conf;
2134
2135                         if (nb_rx_q != IXGBE_DCB_NB_QUEUES) {
2136                                 PMD_INIT_LOG(ERR, "DCB selected, nb_rx_q != %d.",
2137                                                  IXGBE_DCB_NB_QUEUES);
2138                                 return -EINVAL;
2139                         }
2140                         conf = &dev_conf->rx_adv_conf.dcb_rx_conf;
2141                         if (!(conf->nb_tcs == ETH_4_TCS ||
2142                                conf->nb_tcs == ETH_8_TCS)) {
2143                                 PMD_INIT_LOG(ERR, "DCB selected, nb_tcs != %d"
2144                                                 " and nb_tcs != %d.",
2145                                                 ETH_4_TCS, ETH_8_TCS);
2146                                 return -EINVAL;
2147                         }
2148                 }
2149
2150                 if (dev_conf->txmode.mq_mode == ETH_MQ_TX_DCB) {
2151                         const struct rte_eth_dcb_tx_conf *conf;
2152
2153                         if (nb_tx_q != IXGBE_DCB_NB_QUEUES) {
2154                                 PMD_INIT_LOG(ERR, "DCB, nb_tx_q != %d.",
2155                                                  IXGBE_DCB_NB_QUEUES);
2156                                 return -EINVAL;
2157                         }
2158                         conf = &dev_conf->tx_adv_conf.dcb_tx_conf;
2159                         if (!(conf->nb_tcs == ETH_4_TCS ||
2160                                conf->nb_tcs == ETH_8_TCS)) {
2161                                 PMD_INIT_LOG(ERR, "DCB selected, nb_tcs != %d"
2162                                                 " and nb_tcs != %d.",
2163                                                 ETH_4_TCS, ETH_8_TCS);
2164                                 return -EINVAL;
2165                         }
2166                 }
2167
2168                 /*
2169                  * When DCB/VT is off, maximum number of queues changes,
2170                  * except for 82598EB, which remains constant.
2171                  */
2172                 if (dev_conf->txmode.mq_mode == ETH_MQ_TX_NONE &&
2173                                 hw->mac.type != ixgbe_mac_82598EB) {
2174                         if (nb_tx_q > IXGBE_NONE_MODE_TX_NB_QUEUES) {
2175                                 PMD_INIT_LOG(ERR,
2176                                              "Neither VT nor DCB are enabled, "
2177                                              "nb_tx_q > %d.",
2178                                              IXGBE_NONE_MODE_TX_NB_QUEUES);
2179                                 return -EINVAL;
2180                         }
2181                 }
2182         }
2183         return 0;
2184 }
2185
2186 static int
2187 ixgbe_dev_configure(struct rte_eth_dev *dev)
2188 {
2189         struct ixgbe_interrupt *intr =
2190                 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
2191         struct ixgbe_adapter *adapter =
2192                 (struct ixgbe_adapter *)dev->data->dev_private;
2193         int ret;
2194
2195         PMD_INIT_FUNC_TRACE();
2196         /* multipe queue mode checking */
2197         ret  = ixgbe_check_mq_mode(dev);
2198         if (ret != 0) {
2199                 PMD_DRV_LOG(ERR, "ixgbe_check_mq_mode fails with %d.",
2200                             ret);
2201                 return ret;
2202         }
2203
2204         /* set flag to update link status after init */
2205         intr->flags |= IXGBE_FLAG_NEED_LINK_UPDATE;
2206
2207         /*
2208          * Initialize to TRUE. If any of Rx queues doesn't meet the bulk
2209          * allocation or vector Rx preconditions we will reset it.
2210          */
2211         adapter->rx_bulk_alloc_allowed = true;
2212         adapter->rx_vec_allowed = true;
2213
2214         return 0;
2215 }
2216
2217 static void
2218 ixgbe_dev_phy_intr_setup(struct rte_eth_dev *dev)
2219 {
2220         struct ixgbe_hw *hw =
2221                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2222         struct ixgbe_interrupt *intr =
2223                 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
2224         uint32_t gpie;
2225
2226         /* only set up it on X550EM_X */
2227         if (hw->mac.type == ixgbe_mac_X550EM_x) {
2228                 gpie = IXGBE_READ_REG(hw, IXGBE_GPIE);
2229                 gpie |= IXGBE_SDP0_GPIEN_X550EM_x;
2230                 IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
2231                 if (hw->phy.type == ixgbe_phy_x550em_ext_t)
2232                         intr->mask |= IXGBE_EICR_GPI_SDP0_X550EM_x;
2233         }
2234 }
2235
2236 /*
2237  * Configure device link speed and setup link.
2238  * It returns 0 on success.
2239  */
2240 static int
2241 ixgbe_dev_start(struct rte_eth_dev *dev)
2242 {
2243         struct ixgbe_hw *hw =
2244                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2245         struct ixgbe_vf_info *vfinfo =
2246                 *IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private);
2247         struct rte_pci_device *pci_dev = IXGBE_DEV_TO_PCI(dev);
2248         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
2249         uint32_t intr_vector = 0;
2250         int err, link_up = 0, negotiate = 0;
2251         uint32_t speed = 0;
2252         int mask = 0;
2253         int status;
2254         uint16_t vf, idx;
2255         uint32_t *link_speeds;
2256
2257         PMD_INIT_FUNC_TRACE();
2258
2259         /* IXGBE devices don't support:
2260         *    - half duplex (checked afterwards for valid speeds)
2261         *    - fixed speed: TODO implement
2262         */
2263         if (dev->data->dev_conf.link_speeds & ETH_LINK_SPEED_FIXED) {
2264                 PMD_INIT_LOG(ERR, "Invalid link_speeds for port %hhu; fix speed not supported",
2265                              dev->data->port_id);
2266                 return -EINVAL;
2267         }
2268
2269         /* disable uio/vfio intr/eventfd mapping */
2270         rte_intr_disable(intr_handle);
2271
2272         /* stop adapter */
2273         hw->adapter_stopped = 0;
2274         ixgbe_stop_adapter(hw);
2275
2276         /* reinitialize adapter
2277          * this calls reset and start
2278          */
2279         status = ixgbe_pf_reset_hw(hw);
2280         if (status != 0)
2281                 return -1;
2282         hw->mac.ops.start_hw(hw);
2283         hw->mac.get_link_status = true;
2284
2285         /* configure PF module if SRIOV enabled */
2286         ixgbe_pf_host_configure(dev);
2287
2288         ixgbe_dev_phy_intr_setup(dev);
2289
2290         /* check and configure queue intr-vector mapping */
2291         if ((rte_intr_cap_multiple(intr_handle) ||
2292              !RTE_ETH_DEV_SRIOV(dev).active) &&
2293             dev->data->dev_conf.intr_conf.rxq != 0) {
2294                 intr_vector = dev->data->nb_rx_queues;
2295                 if (intr_vector > IXGBE_MAX_INTR_QUEUE_NUM) {
2296                         PMD_INIT_LOG(ERR, "At most %d intr queues supported",
2297                                         IXGBE_MAX_INTR_QUEUE_NUM);
2298                         return -ENOTSUP;
2299                 }
2300                 if (rte_intr_efd_enable(intr_handle, intr_vector))
2301                         return -1;
2302         }
2303
2304         if (rte_intr_dp_is_en(intr_handle) && !intr_handle->intr_vec) {
2305                 intr_handle->intr_vec =
2306                         rte_zmalloc("intr_vec",
2307                                     dev->data->nb_rx_queues * sizeof(int), 0);
2308                 if (intr_handle->intr_vec == NULL) {
2309                         PMD_INIT_LOG(ERR, "Failed to allocate %d rx_queues"
2310                                      " intr_vec\n", dev->data->nb_rx_queues);
2311                         return -ENOMEM;
2312                 }
2313         }
2314
2315         /* confiugre msix for sleep until rx interrupt */
2316         ixgbe_configure_msix(dev);
2317
2318         /* initialize transmission unit */
2319         ixgbe_dev_tx_init(dev);
2320
2321         /* This can fail when allocating mbufs for descriptor rings */
2322         err = ixgbe_dev_rx_init(dev);
2323         if (err) {
2324                 PMD_INIT_LOG(ERR, "Unable to initialize RX hardware");
2325                 goto error;
2326         }
2327
2328     mask = ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK |
2329                 ETH_VLAN_EXTEND_MASK;
2330         ixgbe_vlan_offload_set(dev, mask);
2331
2332         if (dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_VMDQ_ONLY) {
2333                 /* Enable vlan filtering for VMDq */
2334                 ixgbe_vmdq_vlan_hw_filter_enable(dev);
2335         }
2336
2337         /* Configure DCB hw */
2338         ixgbe_configure_dcb(dev);
2339
2340         if (dev->data->dev_conf.fdir_conf.mode != RTE_FDIR_MODE_NONE) {
2341                 err = ixgbe_fdir_configure(dev);
2342                 if (err)
2343                         goto error;
2344         }
2345
2346         /* Restore vf rate limit */
2347         if (vfinfo != NULL) {
2348                 for (vf = 0; vf < pci_dev->max_vfs; vf++)
2349                         for (idx = 0; idx < IXGBE_MAX_QUEUE_NUM_PER_VF; idx++)
2350                                 if (vfinfo[vf].tx_rate[idx] != 0)
2351                                         ixgbe_set_vf_rate_limit(dev, vf,
2352                                                 vfinfo[vf].tx_rate[idx],
2353                                                 1 << idx);
2354         }
2355
2356         ixgbe_restore_statistics_mapping(dev);
2357
2358         err = ixgbe_dev_rxtx_start(dev);
2359         if (err < 0) {
2360                 PMD_INIT_LOG(ERR, "Unable to start rxtx queues");
2361                 goto error;
2362         }
2363
2364         /* Skip link setup if loopback mode is enabled for 82599. */
2365         if (hw->mac.type == ixgbe_mac_82599EB &&
2366                         dev->data->dev_conf.lpbk_mode == IXGBE_LPBK_82599_TX_RX)
2367                 goto skip_link_setup;
2368
2369         if (ixgbe_is_sfp(hw) && hw->phy.multispeed_fiber) {
2370                 err = hw->mac.ops.setup_sfp(hw);
2371                 if (err)
2372                         goto error;
2373         }
2374
2375         if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_copper) {
2376                 /* Turn on the copper */
2377                 ixgbe_set_phy_power(hw, true);
2378         } else {
2379                 /* Turn on the laser */
2380                 ixgbe_enable_tx_laser(hw);
2381         }
2382
2383         err = ixgbe_check_link(hw, &speed, &link_up, 0);
2384         if (err)
2385                 goto error;
2386         dev->data->dev_link.link_status = link_up;
2387
2388         err = ixgbe_get_link_capabilities(hw, &speed, &negotiate);
2389         if (err)
2390                 goto error;
2391
2392         link_speeds = &dev->data->dev_conf.link_speeds;
2393         if (*link_speeds & ~(ETH_LINK_SPEED_100M | ETH_LINK_SPEED_1G |
2394                         ETH_LINK_SPEED_10G)) {
2395                 PMD_INIT_LOG(ERR, "Invalid link setting");
2396                 goto error;
2397         }
2398
2399         speed = 0x0;
2400         if (*link_speeds == ETH_LINK_SPEED_AUTONEG) {
2401                 speed = (hw->mac.type != ixgbe_mac_82598EB) ?
2402                                 IXGBE_LINK_SPEED_82599_AUTONEG :
2403                                 IXGBE_LINK_SPEED_82598_AUTONEG;
2404         } else {
2405                 if (*link_speeds & ETH_LINK_SPEED_10G)
2406                         speed |= IXGBE_LINK_SPEED_10GB_FULL;
2407                 if (*link_speeds & ETH_LINK_SPEED_1G)
2408                         speed |= IXGBE_LINK_SPEED_1GB_FULL;
2409                 if (*link_speeds & ETH_LINK_SPEED_100M)
2410                         speed |= IXGBE_LINK_SPEED_100_FULL;
2411         }
2412
2413         err = ixgbe_setup_link(hw, speed, link_up);
2414         if (err)
2415                 goto error;
2416
2417 skip_link_setup:
2418
2419         if (rte_intr_allow_others(intr_handle)) {
2420                 /* check if lsc interrupt is enabled */
2421                 if (dev->data->dev_conf.intr_conf.lsc != 0)
2422                         ixgbe_dev_lsc_interrupt_setup(dev);
2423                 ixgbe_dev_macsec_interrupt_setup(dev);
2424         } else {
2425                 rte_intr_callback_unregister(intr_handle,
2426                                              ixgbe_dev_interrupt_handler, dev);
2427                 if (dev->data->dev_conf.intr_conf.lsc != 0)
2428                         PMD_INIT_LOG(INFO, "lsc won't enable because of"
2429                                      " no intr multiplex\n");
2430         }
2431
2432         /* check if rxq interrupt is enabled */
2433         if (dev->data->dev_conf.intr_conf.rxq != 0 &&
2434             rte_intr_dp_is_en(intr_handle))
2435                 ixgbe_dev_rxq_interrupt_setup(dev);
2436
2437         /* enable uio/vfio intr/eventfd mapping */
2438         rte_intr_enable(intr_handle);
2439
2440         /* resume enabled intr since hw reset */
2441         ixgbe_enable_intr(dev);
2442
2443         return 0;
2444
2445 error:
2446         PMD_INIT_LOG(ERR, "failure in ixgbe_dev_start(): %d", err);
2447         ixgbe_dev_clear_queues(dev);
2448         return -EIO;
2449 }
2450
2451 /*
2452  * Stop device: disable rx and tx functions to allow for reconfiguring.
2453  */
2454 static void
2455 ixgbe_dev_stop(struct rte_eth_dev *dev)
2456 {
2457         struct rte_eth_link link;
2458         struct ixgbe_hw *hw =
2459                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2460         struct ixgbe_vf_info *vfinfo =
2461                 *IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private);
2462         struct ixgbe_filter_info *filter_info =
2463                 IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
2464         struct ixgbe_5tuple_filter *p_5tuple, *p_5tuple_next;
2465         struct rte_pci_device *pci_dev = IXGBE_DEV_TO_PCI(dev);
2466         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
2467         int vf;
2468
2469         PMD_INIT_FUNC_TRACE();
2470
2471         /* disable interrupts */
2472         ixgbe_disable_intr(hw);
2473
2474         /* reset the NIC */
2475         ixgbe_pf_reset_hw(hw);
2476         hw->adapter_stopped = 0;
2477
2478         /* stop adapter */
2479         ixgbe_stop_adapter(hw);
2480
2481         for (vf = 0; vfinfo != NULL && vf < pci_dev->max_vfs; vf++)
2482                 vfinfo[vf].clear_to_send = false;
2483
2484         if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_copper) {
2485                 /* Turn off the copper */
2486                 ixgbe_set_phy_power(hw, false);
2487         } else {
2488                 /* Turn off the laser */
2489                 ixgbe_disable_tx_laser(hw);
2490         }
2491
2492         ixgbe_dev_clear_queues(dev);
2493
2494         /* Clear stored conf */
2495         dev->data->scattered_rx = 0;
2496         dev->data->lro = 0;
2497
2498         /* Clear recorded link status */
2499         memset(&link, 0, sizeof(link));
2500         rte_ixgbe_dev_atomic_write_link_status(dev, &link);
2501
2502         /* Remove all ntuple filters of the device */
2503         for (p_5tuple = TAILQ_FIRST(&filter_info->fivetuple_list);
2504              p_5tuple != NULL; p_5tuple = p_5tuple_next) {
2505                 p_5tuple_next = TAILQ_NEXT(p_5tuple, entries);
2506                 TAILQ_REMOVE(&filter_info->fivetuple_list,
2507                              p_5tuple, entries);
2508                 rte_free(p_5tuple);
2509         }
2510         memset(filter_info->fivetuple_mask, 0,
2511                 sizeof(uint32_t) * IXGBE_5TUPLE_ARRAY_SIZE);
2512
2513         if (!rte_intr_allow_others(intr_handle))
2514                 /* resume to the default handler */
2515                 rte_intr_callback_register(intr_handle,
2516                                            ixgbe_dev_interrupt_handler,
2517                                            (void *)dev);
2518
2519         /* Clean datapath event and queue/vec mapping */
2520         rte_intr_efd_disable(intr_handle);
2521         if (intr_handle->intr_vec != NULL) {
2522                 rte_free(intr_handle->intr_vec);
2523                 intr_handle->intr_vec = NULL;
2524         }
2525 }
2526
2527 /*
2528  * Set device link up: enable tx.
2529  */
2530 static int
2531 ixgbe_dev_set_link_up(struct rte_eth_dev *dev)
2532 {
2533         struct ixgbe_hw *hw =
2534                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2535         if (hw->mac.type == ixgbe_mac_82599EB) {
2536 #ifdef RTE_NIC_BYPASS
2537                 if (hw->device_id == IXGBE_DEV_ID_82599_BYPASS) {
2538                         /* Not suported in bypass mode */
2539                         PMD_INIT_LOG(ERR, "Set link up is not supported "
2540                                      "by device id 0x%x", hw->device_id);
2541                         return -ENOTSUP;
2542                 }
2543 #endif
2544         }
2545
2546         if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_copper) {
2547                 /* Turn on the copper */
2548                 ixgbe_set_phy_power(hw, true);
2549         } else {
2550                 /* Turn on the laser */
2551                 ixgbe_enable_tx_laser(hw);
2552         }
2553
2554         return 0;
2555 }
2556
2557 /*
2558  * Set device link down: disable tx.
2559  */
2560 static int
2561 ixgbe_dev_set_link_down(struct rte_eth_dev *dev)
2562 {
2563         struct ixgbe_hw *hw =
2564                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2565         if (hw->mac.type == ixgbe_mac_82599EB) {
2566 #ifdef RTE_NIC_BYPASS
2567                 if (hw->device_id == IXGBE_DEV_ID_82599_BYPASS) {
2568                         /* Not suported in bypass mode */
2569                         PMD_INIT_LOG(ERR, "Set link down is not supported "
2570                                      "by device id 0x%x", hw->device_id);
2571                         return -ENOTSUP;
2572                 }
2573 #endif
2574         }
2575
2576         if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_copper) {
2577                 /* Turn off the copper */
2578                 ixgbe_set_phy_power(hw, false);
2579         } else {
2580                 /* Turn off the laser */
2581                 ixgbe_disable_tx_laser(hw);
2582         }
2583
2584         return 0;
2585 }
2586
2587 /*
2588  * Reest and stop device.
2589  */
2590 static void
2591 ixgbe_dev_close(struct rte_eth_dev *dev)
2592 {
2593         struct ixgbe_hw *hw =
2594                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2595
2596         PMD_INIT_FUNC_TRACE();
2597
2598         ixgbe_pf_reset_hw(hw);
2599
2600         ixgbe_dev_stop(dev);
2601         hw->adapter_stopped = 1;
2602
2603         ixgbe_dev_free_queues(dev);
2604
2605         ixgbe_disable_pcie_master(hw);
2606
2607         /* reprogram the RAR[0] in case user changed it. */
2608         ixgbe_set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
2609 }
2610
2611 static void
2612 ixgbe_read_stats_registers(struct ixgbe_hw *hw,
2613                            struct ixgbe_hw_stats *hw_stats,
2614                            struct ixgbe_macsec_stats *macsec_stats,
2615                            uint64_t *total_missed_rx, uint64_t *total_qbrc,
2616                            uint64_t *total_qprc, uint64_t *total_qprdc)
2617 {
2618         uint32_t bprc, lxon, lxoff, total;
2619         uint32_t delta_gprc = 0;
2620         unsigned i;
2621         /* Workaround for RX byte count not including CRC bytes when CRC
2622 +        * strip is enabled. CRC bytes are removed from counters when crc_strip
2623          * is disabled.
2624 +        */
2625         int crc_strip = (IXGBE_READ_REG(hw, IXGBE_HLREG0) &
2626                         IXGBE_HLREG0_RXCRCSTRP);
2627
2628         hw_stats->crcerrs += IXGBE_READ_REG(hw, IXGBE_CRCERRS);
2629         hw_stats->illerrc += IXGBE_READ_REG(hw, IXGBE_ILLERRC);
2630         hw_stats->errbc += IXGBE_READ_REG(hw, IXGBE_ERRBC);
2631         hw_stats->mspdc += IXGBE_READ_REG(hw, IXGBE_MSPDC);
2632
2633         for (i = 0; i < 8; i++) {
2634                 uint32_t mp = IXGBE_READ_REG(hw, IXGBE_MPC(i));
2635
2636                 /* global total per queue */
2637                 hw_stats->mpc[i] += mp;
2638                 /* Running comprehensive total for stats display */
2639                 *total_missed_rx += hw_stats->mpc[i];
2640                 if (hw->mac.type == ixgbe_mac_82598EB) {
2641                         hw_stats->rnbc[i] +=
2642                             IXGBE_READ_REG(hw, IXGBE_RNBC(i));
2643                         hw_stats->pxonrxc[i] +=
2644                                 IXGBE_READ_REG(hw, IXGBE_PXONRXC(i));
2645                         hw_stats->pxoffrxc[i] +=
2646                                 IXGBE_READ_REG(hw, IXGBE_PXOFFRXC(i));
2647                 } else {
2648                         hw_stats->pxonrxc[i] +=
2649                                 IXGBE_READ_REG(hw, IXGBE_PXONRXCNT(i));
2650                         hw_stats->pxoffrxc[i] +=
2651                                 IXGBE_READ_REG(hw, IXGBE_PXOFFRXCNT(i));
2652                         hw_stats->pxon2offc[i] +=
2653                                 IXGBE_READ_REG(hw, IXGBE_PXON2OFFCNT(i));
2654                 }
2655                 hw_stats->pxontxc[i] +=
2656                     IXGBE_READ_REG(hw, IXGBE_PXONTXC(i));
2657                 hw_stats->pxofftxc[i] +=
2658                     IXGBE_READ_REG(hw, IXGBE_PXOFFTXC(i));
2659         }
2660         for (i = 0; i < IXGBE_QUEUE_STAT_COUNTERS; i++) {
2661                 uint32_t delta_qprc = IXGBE_READ_REG(hw, IXGBE_QPRC(i));
2662                 uint32_t delta_qptc = IXGBE_READ_REG(hw, IXGBE_QPTC(i));
2663                 uint32_t delta_qprdc = IXGBE_READ_REG(hw, IXGBE_QPRDC(i));
2664
2665                 delta_gprc += delta_qprc;
2666
2667                 hw_stats->qprc[i] += delta_qprc;
2668                 hw_stats->qptc[i] += delta_qptc;
2669
2670                 hw_stats->qbrc[i] += IXGBE_READ_REG(hw, IXGBE_QBRC_L(i));
2671                 hw_stats->qbrc[i] +=
2672                     ((uint64_t)IXGBE_READ_REG(hw, IXGBE_QBRC_H(i)) << 32);
2673                 if (crc_strip == 0)
2674                         hw_stats->qbrc[i] -= delta_qprc * ETHER_CRC_LEN;
2675
2676                 hw_stats->qbtc[i] += IXGBE_READ_REG(hw, IXGBE_QBTC_L(i));
2677                 hw_stats->qbtc[i] +=
2678                     ((uint64_t)IXGBE_READ_REG(hw, IXGBE_QBTC_H(i)) << 32);
2679
2680                 hw_stats->qprdc[i] += delta_qprdc;
2681                 *total_qprdc += hw_stats->qprdc[i];
2682
2683                 *total_qprc += hw_stats->qprc[i];
2684                 *total_qbrc += hw_stats->qbrc[i];
2685         }
2686         hw_stats->mlfc += IXGBE_READ_REG(hw, IXGBE_MLFC);
2687         hw_stats->mrfc += IXGBE_READ_REG(hw, IXGBE_MRFC);
2688         hw_stats->rlec += IXGBE_READ_REG(hw, IXGBE_RLEC);
2689
2690         /*
2691          * An errata states that gprc actually counts good + missed packets:
2692          * Workaround to set gprc to summated queue packet receives
2693          */
2694         hw_stats->gprc = *total_qprc;
2695
2696         if (hw->mac.type != ixgbe_mac_82598EB) {
2697                 hw_stats->gorc += IXGBE_READ_REG(hw, IXGBE_GORCL);
2698                 hw_stats->gorc += ((u64)IXGBE_READ_REG(hw, IXGBE_GORCH) << 32);
2699                 hw_stats->gotc += IXGBE_READ_REG(hw, IXGBE_GOTCL);
2700                 hw_stats->gotc += ((u64)IXGBE_READ_REG(hw, IXGBE_GOTCH) << 32);
2701                 hw_stats->tor += IXGBE_READ_REG(hw, IXGBE_TORL);
2702                 hw_stats->tor += ((u64)IXGBE_READ_REG(hw, IXGBE_TORH) << 32);
2703                 hw_stats->lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXCNT);
2704                 hw_stats->lxoffrxc += IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT);
2705         } else {
2706                 hw_stats->lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXC);
2707                 hw_stats->lxoffrxc += IXGBE_READ_REG(hw, IXGBE_LXOFFRXC);
2708                 /* 82598 only has a counter in the high register */
2709                 hw_stats->gorc += IXGBE_READ_REG(hw, IXGBE_GORCH);
2710                 hw_stats->gotc += IXGBE_READ_REG(hw, IXGBE_GOTCH);
2711                 hw_stats->tor += IXGBE_READ_REG(hw, IXGBE_TORH);
2712         }
2713         uint64_t old_tpr = hw_stats->tpr;
2714
2715         hw_stats->tpr += IXGBE_READ_REG(hw, IXGBE_TPR);
2716         hw_stats->tpt += IXGBE_READ_REG(hw, IXGBE_TPT);
2717
2718         if (crc_strip == 0)
2719                 hw_stats->gorc -= delta_gprc * ETHER_CRC_LEN;
2720
2721         uint64_t delta_gptc = IXGBE_READ_REG(hw, IXGBE_GPTC);
2722         hw_stats->gptc += delta_gptc;
2723         hw_stats->gotc -= delta_gptc * ETHER_CRC_LEN;
2724         hw_stats->tor -= (hw_stats->tpr - old_tpr) * ETHER_CRC_LEN;
2725
2726         /*
2727          * Workaround: mprc hardware is incorrectly counting
2728          * broadcasts, so for now we subtract those.
2729          */
2730         bprc = IXGBE_READ_REG(hw, IXGBE_BPRC);
2731         hw_stats->bprc += bprc;
2732         hw_stats->mprc += IXGBE_READ_REG(hw, IXGBE_MPRC);
2733         if (hw->mac.type == ixgbe_mac_82598EB)
2734                 hw_stats->mprc -= bprc;
2735
2736         hw_stats->prc64 += IXGBE_READ_REG(hw, IXGBE_PRC64);
2737         hw_stats->prc127 += IXGBE_READ_REG(hw, IXGBE_PRC127);
2738         hw_stats->prc255 += IXGBE_READ_REG(hw, IXGBE_PRC255);
2739         hw_stats->prc511 += IXGBE_READ_REG(hw, IXGBE_PRC511);
2740         hw_stats->prc1023 += IXGBE_READ_REG(hw, IXGBE_PRC1023);
2741         hw_stats->prc1522 += IXGBE_READ_REG(hw, IXGBE_PRC1522);
2742
2743         lxon = IXGBE_READ_REG(hw, IXGBE_LXONTXC);
2744         hw_stats->lxontxc += lxon;
2745         lxoff = IXGBE_READ_REG(hw, IXGBE_LXOFFTXC);
2746         hw_stats->lxofftxc += lxoff;
2747         total = lxon + lxoff;
2748
2749         hw_stats->mptc += IXGBE_READ_REG(hw, IXGBE_MPTC);
2750         hw_stats->ptc64 += IXGBE_READ_REG(hw, IXGBE_PTC64);
2751         hw_stats->gptc -= total;
2752         hw_stats->mptc -= total;
2753         hw_stats->ptc64 -= total;
2754         hw_stats->gotc -= total * ETHER_MIN_LEN;
2755
2756         hw_stats->ruc += IXGBE_READ_REG(hw, IXGBE_RUC);
2757         hw_stats->rfc += IXGBE_READ_REG(hw, IXGBE_RFC);
2758         hw_stats->roc += IXGBE_READ_REG(hw, IXGBE_ROC);
2759         hw_stats->rjc += IXGBE_READ_REG(hw, IXGBE_RJC);
2760         hw_stats->mngprc += IXGBE_READ_REG(hw, IXGBE_MNGPRC);
2761         hw_stats->mngpdc += IXGBE_READ_REG(hw, IXGBE_MNGPDC);
2762         hw_stats->mngptc += IXGBE_READ_REG(hw, IXGBE_MNGPTC);
2763         hw_stats->ptc127 += IXGBE_READ_REG(hw, IXGBE_PTC127);
2764         hw_stats->ptc255 += IXGBE_READ_REG(hw, IXGBE_PTC255);
2765         hw_stats->ptc511 += IXGBE_READ_REG(hw, IXGBE_PTC511);
2766         hw_stats->ptc1023 += IXGBE_READ_REG(hw, IXGBE_PTC1023);
2767         hw_stats->ptc1522 += IXGBE_READ_REG(hw, IXGBE_PTC1522);
2768         hw_stats->bptc += IXGBE_READ_REG(hw, IXGBE_BPTC);
2769         hw_stats->xec += IXGBE_READ_REG(hw, IXGBE_XEC);
2770         hw_stats->fccrc += IXGBE_READ_REG(hw, IXGBE_FCCRC);
2771         hw_stats->fclast += IXGBE_READ_REG(hw, IXGBE_FCLAST);
2772         /* Only read FCOE on 82599 */
2773         if (hw->mac.type != ixgbe_mac_82598EB) {
2774                 hw_stats->fcoerpdc += IXGBE_READ_REG(hw, IXGBE_FCOERPDC);
2775                 hw_stats->fcoeprc += IXGBE_READ_REG(hw, IXGBE_FCOEPRC);
2776                 hw_stats->fcoeptc += IXGBE_READ_REG(hw, IXGBE_FCOEPTC);
2777                 hw_stats->fcoedwrc += IXGBE_READ_REG(hw, IXGBE_FCOEDWRC);
2778                 hw_stats->fcoedwtc += IXGBE_READ_REG(hw, IXGBE_FCOEDWTC);
2779         }
2780
2781         /* Flow Director Stats registers */
2782         hw_stats->fdirmatch += IXGBE_READ_REG(hw, IXGBE_FDIRMATCH);
2783         hw_stats->fdirmiss += IXGBE_READ_REG(hw, IXGBE_FDIRMISS);
2784
2785         /* MACsec Stats registers */
2786         macsec_stats->out_pkts_untagged += IXGBE_READ_REG(hw, IXGBE_LSECTXUT);
2787         macsec_stats->out_pkts_encrypted +=
2788                 IXGBE_READ_REG(hw, IXGBE_LSECTXPKTE);
2789         macsec_stats->out_pkts_protected +=
2790                 IXGBE_READ_REG(hw, IXGBE_LSECTXPKTP);
2791         macsec_stats->out_octets_encrypted +=
2792                 IXGBE_READ_REG(hw, IXGBE_LSECTXOCTE);
2793         macsec_stats->out_octets_protected +=
2794                 IXGBE_READ_REG(hw, IXGBE_LSECTXOCTP);
2795         macsec_stats->in_pkts_untagged += IXGBE_READ_REG(hw, IXGBE_LSECRXUT);
2796         macsec_stats->in_pkts_badtag += IXGBE_READ_REG(hw, IXGBE_LSECRXBAD);
2797         macsec_stats->in_pkts_nosci += IXGBE_READ_REG(hw, IXGBE_LSECRXNOSCI);
2798         macsec_stats->in_pkts_unknownsci +=
2799                 IXGBE_READ_REG(hw, IXGBE_LSECRXUNSCI);
2800         macsec_stats->in_octets_decrypted +=
2801                 IXGBE_READ_REG(hw, IXGBE_LSECRXOCTD);
2802         macsec_stats->in_octets_validated +=
2803                 IXGBE_READ_REG(hw, IXGBE_LSECRXOCTV);
2804         macsec_stats->in_pkts_unchecked += IXGBE_READ_REG(hw, IXGBE_LSECRXUNCH);
2805         macsec_stats->in_pkts_delayed += IXGBE_READ_REG(hw, IXGBE_LSECRXDELAY);
2806         macsec_stats->in_pkts_late += IXGBE_READ_REG(hw, IXGBE_LSECRXLATE);
2807         for (i = 0; i < 2; i++) {
2808                 macsec_stats->in_pkts_ok +=
2809                         IXGBE_READ_REG(hw, IXGBE_LSECRXOK(i));
2810                 macsec_stats->in_pkts_invalid +=
2811                         IXGBE_READ_REG(hw, IXGBE_LSECRXINV(i));
2812                 macsec_stats->in_pkts_notvalid +=
2813                         IXGBE_READ_REG(hw, IXGBE_LSECRXNV(i));
2814         }
2815         macsec_stats->in_pkts_unusedsa += IXGBE_READ_REG(hw, IXGBE_LSECRXUNSA);
2816         macsec_stats->in_pkts_notusingsa +=
2817                 IXGBE_READ_REG(hw, IXGBE_LSECRXNUSA);
2818 }
2819
2820 /*
2821  * This function is based on ixgbe_update_stats_counters() in ixgbe/ixgbe.c
2822  */
2823 static void
2824 ixgbe_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
2825 {
2826         struct ixgbe_hw *hw =
2827                         IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2828         struct ixgbe_hw_stats *hw_stats =
2829                         IXGBE_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
2830         struct ixgbe_macsec_stats *macsec_stats =
2831                         IXGBE_DEV_PRIVATE_TO_MACSEC_STATS(
2832                                 dev->data->dev_private);
2833         uint64_t total_missed_rx, total_qbrc, total_qprc, total_qprdc;
2834         unsigned i;
2835
2836         total_missed_rx = 0;
2837         total_qbrc = 0;
2838         total_qprc = 0;
2839         total_qprdc = 0;
2840
2841         ixgbe_read_stats_registers(hw, hw_stats, macsec_stats, &total_missed_rx,
2842                         &total_qbrc, &total_qprc, &total_qprdc);
2843
2844         if (stats == NULL)
2845                 return;
2846
2847         /* Fill out the rte_eth_stats statistics structure */
2848         stats->ipackets = total_qprc;
2849         stats->ibytes = total_qbrc;
2850         stats->opackets = hw_stats->gptc;
2851         stats->obytes = hw_stats->gotc;
2852
2853         for (i = 0; i < IXGBE_QUEUE_STAT_COUNTERS; i++) {
2854                 stats->q_ipackets[i] = hw_stats->qprc[i];
2855                 stats->q_opackets[i] = hw_stats->qptc[i];
2856                 stats->q_ibytes[i] = hw_stats->qbrc[i];
2857                 stats->q_obytes[i] = hw_stats->qbtc[i];
2858                 stats->q_errors[i] = hw_stats->qprdc[i];
2859         }
2860
2861         /* Rx Errors */
2862         stats->imissed  = total_missed_rx;
2863         stats->ierrors  = hw_stats->crcerrs +
2864                           hw_stats->mspdc +
2865                           hw_stats->rlec +
2866                           hw_stats->ruc +
2867                           hw_stats->roc +
2868                           hw_stats->illerrc +
2869                           hw_stats->errbc +
2870                           hw_stats->rfc +
2871                           hw_stats->fccrc +
2872                           hw_stats->fclast;
2873
2874         /* Tx Errors */
2875         stats->oerrors  = 0;
2876 }
2877
2878 static void
2879 ixgbe_dev_stats_reset(struct rte_eth_dev *dev)
2880 {
2881         struct ixgbe_hw_stats *stats =
2882                         IXGBE_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
2883
2884         /* HW registers are cleared on read */
2885         ixgbe_dev_stats_get(dev, NULL);
2886
2887         /* Reset software totals */
2888         memset(stats, 0, sizeof(*stats));
2889 }
2890
2891 /* This function calculates the number of xstats based on the current config */
2892 static unsigned
2893 ixgbe_xstats_calc_num(void) {
2894         return IXGBE_NB_HW_STATS + IXGBE_NB_MACSEC_STATS +
2895                 (IXGBE_NB_RXQ_PRIO_STATS * IXGBE_NB_RXQ_PRIO_VALUES) +
2896                 (IXGBE_NB_TXQ_PRIO_STATS * IXGBE_NB_TXQ_PRIO_VALUES);
2897 }
2898
2899 static int ixgbe_dev_xstats_get_names(__rte_unused struct rte_eth_dev *dev,
2900         struct rte_eth_xstat_name *xstats_names, __rte_unused unsigned limit)
2901 {
2902         const unsigned cnt_stats = ixgbe_xstats_calc_num();
2903         unsigned stat, i, count;
2904
2905         if (xstats_names != NULL) {
2906                 count = 0;
2907
2908                 /* Note: limit >= cnt_stats checked upstream
2909                  * in rte_eth_xstats_names()
2910                  */
2911
2912                 /* Extended stats from ixgbe_hw_stats */
2913                 for (i = 0; i < IXGBE_NB_HW_STATS; i++) {
2914                         snprintf(xstats_names[count].name,
2915                                 sizeof(xstats_names[count].name),
2916                                 "%s",
2917                                 rte_ixgbe_stats_strings[i].name);
2918                         count++;
2919                 }
2920
2921                 /* MACsec Stats */
2922                 for (i = 0; i < IXGBE_NB_MACSEC_STATS; i++) {
2923                         snprintf(xstats_names[count].name,
2924                                 sizeof(xstats_names[count].name),
2925                                 "%s",
2926                                 rte_ixgbe_macsec_strings[i].name);
2927                         count++;
2928                 }
2929
2930                 /* RX Priority Stats */
2931                 for (stat = 0; stat < IXGBE_NB_RXQ_PRIO_STATS; stat++) {
2932                         for (i = 0; i < IXGBE_NB_RXQ_PRIO_VALUES; i++) {
2933                                 snprintf(xstats_names[count].name,
2934                                         sizeof(xstats_names[count].name),
2935                                         "rx_priority%u_%s", i,
2936                                         rte_ixgbe_rxq_strings[stat].name);
2937                                 count++;
2938                         }
2939                 }
2940
2941                 /* TX Priority Stats */
2942                 for (stat = 0; stat < IXGBE_NB_TXQ_PRIO_STATS; stat++) {
2943                         for (i = 0; i < IXGBE_NB_TXQ_PRIO_VALUES; i++) {
2944                                 snprintf(xstats_names[count].name,
2945                                         sizeof(xstats_names[count].name),
2946                                         "tx_priority%u_%s", i,
2947                                         rte_ixgbe_txq_strings[stat].name);
2948                                 count++;
2949                         }
2950                 }
2951         }
2952         return cnt_stats;
2953 }
2954
2955 static int ixgbevf_dev_xstats_get_names(__rte_unused struct rte_eth_dev *dev,
2956         struct rte_eth_xstat_name *xstats_names, unsigned limit)
2957 {
2958         unsigned i;
2959
2960         if (limit < IXGBEVF_NB_XSTATS && xstats_names != NULL)
2961                 return -ENOMEM;
2962
2963         if (xstats_names != NULL)
2964                 for (i = 0; i < IXGBEVF_NB_XSTATS; i++)
2965                         snprintf(xstats_names[i].name,
2966                                 sizeof(xstats_names[i].name),
2967                                 "%s", rte_ixgbevf_stats_strings[i].name);
2968         return IXGBEVF_NB_XSTATS;
2969 }
2970
2971 static int
2972 ixgbe_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
2973                                          unsigned n)
2974 {
2975         struct ixgbe_hw *hw =
2976                         IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2977         struct ixgbe_hw_stats *hw_stats =
2978                         IXGBE_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
2979         struct ixgbe_macsec_stats *macsec_stats =
2980                         IXGBE_DEV_PRIVATE_TO_MACSEC_STATS(
2981                                 dev->data->dev_private);
2982         uint64_t total_missed_rx, total_qbrc, total_qprc, total_qprdc;
2983         unsigned i, stat, count = 0;
2984
2985         count = ixgbe_xstats_calc_num();
2986
2987         if (n < count)
2988                 return count;
2989
2990         total_missed_rx = 0;
2991         total_qbrc = 0;
2992         total_qprc = 0;
2993         total_qprdc = 0;
2994
2995         ixgbe_read_stats_registers(hw, hw_stats, macsec_stats, &total_missed_rx,
2996                         &total_qbrc, &total_qprc, &total_qprdc);
2997
2998         /* If this is a reset xstats is NULL, and we have cleared the
2999          * registers by reading them.
3000          */
3001         if (!xstats)
3002                 return 0;
3003
3004         /* Extended stats from ixgbe_hw_stats */
3005         count = 0;
3006         for (i = 0; i < IXGBE_NB_HW_STATS; i++) {
3007                 xstats[count].value = *(uint64_t *)(((char *)hw_stats) +
3008                                 rte_ixgbe_stats_strings[i].offset);
3009                 xstats[count].id = count;
3010                 count++;
3011         }
3012
3013         /* MACsec Stats */
3014         for (i = 0; i < IXGBE_NB_MACSEC_STATS; i++) {
3015                 xstats[count].value = *(uint64_t *)(((char *)macsec_stats) +
3016                                 rte_ixgbe_macsec_strings[i].offset);
3017                 xstats[count].id = count;
3018                 count++;
3019         }
3020
3021         /* RX Priority Stats */
3022         for (stat = 0; stat < IXGBE_NB_RXQ_PRIO_STATS; stat++) {
3023                 for (i = 0; i < IXGBE_NB_RXQ_PRIO_VALUES; i++) {
3024                         xstats[count].value = *(uint64_t *)(((char *)hw_stats) +
3025                                         rte_ixgbe_rxq_strings[stat].offset +
3026                                         (sizeof(uint64_t) * i));
3027                         xstats[count].id = count;
3028                         count++;
3029                 }
3030         }
3031
3032         /* TX Priority Stats */
3033         for (stat = 0; stat < IXGBE_NB_TXQ_PRIO_STATS; stat++) {
3034                 for (i = 0; i < IXGBE_NB_TXQ_PRIO_VALUES; i++) {
3035                         xstats[count].value = *(uint64_t *)(((char *)hw_stats) +
3036                                         rte_ixgbe_txq_strings[stat].offset +
3037                                         (sizeof(uint64_t) * i));
3038                         xstats[count].id = count;
3039                         count++;
3040                 }
3041         }
3042         return count;
3043 }
3044
3045 static void
3046 ixgbe_dev_xstats_reset(struct rte_eth_dev *dev)
3047 {
3048         struct ixgbe_hw_stats *stats =
3049                         IXGBE_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
3050         struct ixgbe_macsec_stats *macsec_stats =
3051                         IXGBE_DEV_PRIVATE_TO_MACSEC_STATS(
3052                                 dev->data->dev_private);
3053
3054         unsigned count = ixgbe_xstats_calc_num();
3055
3056         /* HW registers are cleared on read */
3057         ixgbe_dev_xstats_get(dev, NULL, count);
3058
3059         /* Reset software totals */
3060         memset(stats, 0, sizeof(*stats));
3061         memset(macsec_stats, 0, sizeof(*macsec_stats));
3062 }
3063
3064 static void
3065 ixgbevf_update_stats(struct rte_eth_dev *dev)
3066 {
3067         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3068         struct ixgbevf_hw_stats *hw_stats = (struct ixgbevf_hw_stats *)
3069                           IXGBE_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
3070
3071         /* Good Rx packet, include VF loopback */
3072         UPDATE_VF_STAT(IXGBE_VFGPRC,
3073             hw_stats->last_vfgprc, hw_stats->vfgprc);
3074
3075         /* Good Rx octets, include VF loopback */
3076         UPDATE_VF_STAT_36BIT(IXGBE_VFGORC_LSB, IXGBE_VFGORC_MSB,
3077             hw_stats->last_vfgorc, hw_stats->vfgorc);
3078
3079         /* Good Tx packet, include VF loopback */
3080         UPDATE_VF_STAT(IXGBE_VFGPTC,
3081             hw_stats->last_vfgptc, hw_stats->vfgptc);
3082
3083         /* Good Tx octets, include VF loopback */
3084         UPDATE_VF_STAT_36BIT(IXGBE_VFGOTC_LSB, IXGBE_VFGOTC_MSB,
3085             hw_stats->last_vfgotc, hw_stats->vfgotc);
3086
3087         /* Rx Multicst Packet */
3088         UPDATE_VF_STAT(IXGBE_VFMPRC,
3089             hw_stats->last_vfmprc, hw_stats->vfmprc);
3090 }
3091
3092 static int
3093 ixgbevf_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
3094                        unsigned n)
3095 {
3096         struct ixgbevf_hw_stats *hw_stats = (struct ixgbevf_hw_stats *)
3097                         IXGBE_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
3098         unsigned i;
3099
3100         if (n < IXGBEVF_NB_XSTATS)
3101                 return IXGBEVF_NB_XSTATS;
3102
3103         ixgbevf_update_stats(dev);
3104
3105         if (!xstats)
3106                 return 0;
3107
3108         /* Extended stats */
3109         for (i = 0; i < IXGBEVF_NB_XSTATS; i++) {
3110                 xstats[i].value = *(uint64_t *)(((char *)hw_stats) +
3111                         rte_ixgbevf_stats_strings[i].offset);
3112         }
3113
3114         return IXGBEVF_NB_XSTATS;
3115 }
3116
3117 static void
3118 ixgbevf_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
3119 {
3120         struct ixgbevf_hw_stats *hw_stats = (struct ixgbevf_hw_stats *)
3121                           IXGBE_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
3122
3123         ixgbevf_update_stats(dev);
3124
3125         if (stats == NULL)
3126                 return;
3127
3128         stats->ipackets = hw_stats->vfgprc;
3129         stats->ibytes = hw_stats->vfgorc;
3130         stats->opackets = hw_stats->vfgptc;
3131         stats->obytes = hw_stats->vfgotc;
3132 }
3133
3134 static void
3135 ixgbevf_dev_stats_reset(struct rte_eth_dev *dev)
3136 {
3137         struct ixgbevf_hw_stats *hw_stats = (struct ixgbevf_hw_stats *)
3138                         IXGBE_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
3139
3140         /* Sync HW register to the last stats */
3141         ixgbevf_dev_stats_get(dev, NULL);
3142
3143         /* reset HW current stats*/
3144         hw_stats->vfgprc = 0;
3145         hw_stats->vfgorc = 0;
3146         hw_stats->vfgptc = 0;
3147         hw_stats->vfgotc = 0;
3148 }
3149
3150 static int
3151 ixgbe_fw_version_get(struct rte_eth_dev *dev, char *fw_version, size_t fw_size)
3152 {
3153         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3154         u16 eeprom_verh, eeprom_verl;
3155         u32 etrack_id;
3156         int ret;
3157
3158         ixgbe_read_eeprom(hw, 0x2e, &eeprom_verh);
3159         ixgbe_read_eeprom(hw, 0x2d, &eeprom_verl);
3160
3161         etrack_id = (eeprom_verh << 16) | eeprom_verl;
3162         ret = snprintf(fw_version, fw_size, "0x%08x", etrack_id);
3163
3164         ret += 1; /* add the size of '\0' */
3165         if (fw_size < (u32)ret)
3166                 return ret;
3167         else
3168                 return 0;
3169 }
3170
3171 static void
3172 ixgbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
3173 {
3174         struct rte_pci_device *pci_dev = IXGBE_DEV_TO_PCI(dev);
3175         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3176         struct rte_eth_conf *dev_conf = &dev->data->dev_conf;
3177
3178         dev_info->pci_dev = pci_dev;
3179         dev_info->max_rx_queues = (uint16_t)hw->mac.max_rx_queues;
3180         dev_info->max_tx_queues = (uint16_t)hw->mac.max_tx_queues;
3181         if (RTE_ETH_DEV_SRIOV(dev).active == 0) {
3182                 /*
3183                  * When DCB/VT is off, maximum number of queues changes,
3184                  * except for 82598EB, which remains constant.
3185                  */
3186                 if (dev_conf->txmode.mq_mode == ETH_MQ_TX_NONE &&
3187                                 hw->mac.type != ixgbe_mac_82598EB)
3188                         dev_info->max_tx_queues = IXGBE_NONE_MODE_TX_NB_QUEUES;
3189         }
3190         dev_info->min_rx_bufsize = 1024; /* cf BSIZEPACKET in SRRCTL register */
3191         dev_info->max_rx_pktlen = 15872; /* includes CRC, cf MAXFRS register */
3192         dev_info->max_mac_addrs = hw->mac.num_rar_entries;
3193         dev_info->max_hash_mac_addrs = IXGBE_VMDQ_NUM_UC_MAC;
3194         dev_info->max_vfs = pci_dev->max_vfs;
3195         if (hw->mac.type == ixgbe_mac_82598EB)
3196                 dev_info->max_vmdq_pools = ETH_16_POOLS;
3197         else
3198                 dev_info->max_vmdq_pools = ETH_64_POOLS;
3199         dev_info->vmdq_queue_num = dev_info->max_rx_queues;
3200         dev_info->rx_offload_capa =
3201                 DEV_RX_OFFLOAD_VLAN_STRIP |
3202                 DEV_RX_OFFLOAD_IPV4_CKSUM |
3203                 DEV_RX_OFFLOAD_UDP_CKSUM  |
3204                 DEV_RX_OFFLOAD_TCP_CKSUM;
3205
3206         /*
3207          * RSC is only supported by 82599 and x540 PF devices in a non-SR-IOV
3208          * mode.
3209          */
3210         if ((hw->mac.type == ixgbe_mac_82599EB ||
3211              hw->mac.type == ixgbe_mac_X540) &&
3212             !RTE_ETH_DEV_SRIOV(dev).active)
3213                 dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_TCP_LRO;
3214
3215         if (hw->mac.type == ixgbe_mac_82599EB ||
3216             hw->mac.type == ixgbe_mac_X540)
3217                 dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_MACSEC_STRIP;
3218
3219         if (hw->mac.type == ixgbe_mac_X550 ||
3220             hw->mac.type == ixgbe_mac_X550EM_x ||
3221             hw->mac.type == ixgbe_mac_X550EM_a)
3222                 dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM;
3223
3224         dev_info->tx_offload_capa =
3225                 DEV_TX_OFFLOAD_VLAN_INSERT |
3226                 DEV_TX_OFFLOAD_IPV4_CKSUM  |
3227                 DEV_TX_OFFLOAD_UDP_CKSUM   |
3228                 DEV_TX_OFFLOAD_TCP_CKSUM   |
3229                 DEV_TX_OFFLOAD_SCTP_CKSUM  |
3230                 DEV_TX_OFFLOAD_TCP_TSO;
3231
3232         if (hw->mac.type == ixgbe_mac_82599EB ||
3233             hw->mac.type == ixgbe_mac_X540)
3234                 dev_info->tx_offload_capa |= DEV_TX_OFFLOAD_MACSEC_INSERT;
3235
3236         if (hw->mac.type == ixgbe_mac_X550 ||
3237             hw->mac.type == ixgbe_mac_X550EM_x ||
3238             hw->mac.type == ixgbe_mac_X550EM_a)
3239                 dev_info->tx_offload_capa |= DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM;
3240
3241         dev_info->default_rxconf = (struct rte_eth_rxconf) {
3242                 .rx_thresh = {
3243                         .pthresh = IXGBE_DEFAULT_RX_PTHRESH,
3244                         .hthresh = IXGBE_DEFAULT_RX_HTHRESH,
3245                         .wthresh = IXGBE_DEFAULT_RX_WTHRESH,
3246                 },
3247                 .rx_free_thresh = IXGBE_DEFAULT_RX_FREE_THRESH,
3248                 .rx_drop_en = 0,
3249         };
3250
3251         dev_info->default_txconf = (struct rte_eth_txconf) {
3252                 .tx_thresh = {
3253                         .pthresh = IXGBE_DEFAULT_TX_PTHRESH,
3254                         .hthresh = IXGBE_DEFAULT_TX_HTHRESH,
3255                         .wthresh = IXGBE_DEFAULT_TX_WTHRESH,
3256                 },
3257                 .tx_free_thresh = IXGBE_DEFAULT_TX_FREE_THRESH,
3258                 .tx_rs_thresh = IXGBE_DEFAULT_TX_RSBIT_THRESH,
3259                 .txq_flags = ETH_TXQ_FLAGS_NOMULTSEGS |
3260                                 ETH_TXQ_FLAGS_NOOFFLOADS,
3261         };
3262
3263         dev_info->rx_desc_lim = rx_desc_lim;
3264         dev_info->tx_desc_lim = tx_desc_lim;
3265
3266         dev_info->hash_key_size = IXGBE_HKEY_MAX_INDEX * sizeof(uint32_t);
3267         dev_info->reta_size = ixgbe_reta_size_get(hw->mac.type);
3268         dev_info->flow_type_rss_offloads = IXGBE_RSS_OFFLOAD_ALL;
3269
3270         dev_info->speed_capa = ETH_LINK_SPEED_1G | ETH_LINK_SPEED_10G;
3271         if (hw->mac.type == ixgbe_mac_X540 ||
3272             hw->mac.type == ixgbe_mac_X540_vf ||
3273             hw->mac.type == ixgbe_mac_X550 ||
3274             hw->mac.type == ixgbe_mac_X550_vf) {
3275                 dev_info->speed_capa |= ETH_LINK_SPEED_100M;
3276         }
3277 }
3278
3279 static const uint32_t *
3280 ixgbe_dev_supported_ptypes_get(struct rte_eth_dev *dev)
3281 {
3282         static const uint32_t ptypes[] = {
3283                 /* For non-vec functions,
3284                  * refers to ixgbe_rxd_pkt_info_to_pkt_type();
3285                  * for vec functions,
3286                  * refers to _recv_raw_pkts_vec().
3287                  */
3288                 RTE_PTYPE_L2_ETHER,
3289                 RTE_PTYPE_L3_IPV4,
3290                 RTE_PTYPE_L3_IPV4_EXT,
3291                 RTE_PTYPE_L3_IPV6,
3292                 RTE_PTYPE_L3_IPV6_EXT,
3293                 RTE_PTYPE_L4_SCTP,
3294                 RTE_PTYPE_L4_TCP,
3295                 RTE_PTYPE_L4_UDP,
3296                 RTE_PTYPE_TUNNEL_IP,
3297                 RTE_PTYPE_INNER_L3_IPV6,
3298                 RTE_PTYPE_INNER_L3_IPV6_EXT,
3299                 RTE_PTYPE_INNER_L4_TCP,
3300                 RTE_PTYPE_INNER_L4_UDP,
3301                 RTE_PTYPE_UNKNOWN
3302         };
3303
3304         if (dev->rx_pkt_burst == ixgbe_recv_pkts ||
3305             dev->rx_pkt_burst == ixgbe_recv_pkts_lro_single_alloc ||
3306             dev->rx_pkt_burst == ixgbe_recv_pkts_lro_bulk_alloc ||
3307             dev->rx_pkt_burst == ixgbe_recv_pkts_bulk_alloc)
3308                 return ptypes;
3309         return NULL;
3310 }
3311
3312 static void
3313 ixgbevf_dev_info_get(struct rte_eth_dev *dev,
3314                      struct rte_eth_dev_info *dev_info)
3315 {
3316         struct rte_pci_device *pci_dev = IXGBE_DEV_TO_PCI(dev);
3317         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3318
3319         dev_info->pci_dev = pci_dev;
3320         dev_info->max_rx_queues = (uint16_t)hw->mac.max_rx_queues;
3321         dev_info->max_tx_queues = (uint16_t)hw->mac.max_tx_queues;
3322         dev_info->min_rx_bufsize = 1024; /* cf BSIZEPACKET in SRRCTL reg */
3323         dev_info->max_rx_pktlen = 15872; /* includes CRC, cf MAXFRS reg */
3324         dev_info->max_mac_addrs = hw->mac.num_rar_entries;
3325         dev_info->max_hash_mac_addrs = IXGBE_VMDQ_NUM_UC_MAC;
3326         dev_info->max_vfs = pci_dev->max_vfs;
3327         if (hw->mac.type == ixgbe_mac_82598EB)
3328                 dev_info->max_vmdq_pools = ETH_16_POOLS;
3329         else
3330                 dev_info->max_vmdq_pools = ETH_64_POOLS;
3331         dev_info->rx_offload_capa = DEV_RX_OFFLOAD_VLAN_STRIP |
3332                                 DEV_RX_OFFLOAD_IPV4_CKSUM |
3333                                 DEV_RX_OFFLOAD_UDP_CKSUM  |
3334                                 DEV_RX_OFFLOAD_TCP_CKSUM;
3335         dev_info->tx_offload_capa = DEV_TX_OFFLOAD_VLAN_INSERT |
3336                                 DEV_TX_OFFLOAD_IPV4_CKSUM  |
3337                                 DEV_TX_OFFLOAD_UDP_CKSUM   |
3338                                 DEV_TX_OFFLOAD_TCP_CKSUM   |
3339                                 DEV_TX_OFFLOAD_SCTP_CKSUM  |
3340                                 DEV_TX_OFFLOAD_TCP_TSO;
3341
3342         dev_info->default_rxconf = (struct rte_eth_rxconf) {
3343                 .rx_thresh = {
3344                         .pthresh = IXGBE_DEFAULT_RX_PTHRESH,
3345                         .hthresh = IXGBE_DEFAULT_RX_HTHRESH,
3346                         .wthresh = IXGBE_DEFAULT_RX_WTHRESH,
3347                 },
3348                 .rx_free_thresh = IXGBE_DEFAULT_RX_FREE_THRESH,
3349                 .rx_drop_en = 0,
3350         };
3351
3352         dev_info->default_txconf = (struct rte_eth_txconf) {
3353                 .tx_thresh = {
3354                         .pthresh = IXGBE_DEFAULT_TX_PTHRESH,
3355                         .hthresh = IXGBE_DEFAULT_TX_HTHRESH,
3356                         .wthresh = IXGBE_DEFAULT_TX_WTHRESH,
3357                 },
3358                 .tx_free_thresh = IXGBE_DEFAULT_TX_FREE_THRESH,
3359                 .tx_rs_thresh = IXGBE_DEFAULT_TX_RSBIT_THRESH,
3360                 .txq_flags = ETH_TXQ_FLAGS_NOMULTSEGS |
3361                                 ETH_TXQ_FLAGS_NOOFFLOADS,
3362         };
3363
3364         dev_info->rx_desc_lim = rx_desc_lim;
3365         dev_info->tx_desc_lim = tx_desc_lim;
3366 }
3367
3368 /* return 0 means link status changed, -1 means not changed */
3369 static int
3370 ixgbe_dev_link_update(struct rte_eth_dev *dev, int wait_to_complete)
3371 {
3372         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3373         struct rte_eth_link link, old;
3374         ixgbe_link_speed link_speed = IXGBE_LINK_SPEED_UNKNOWN;
3375         int link_up;
3376         int diag;
3377
3378         link.link_status = ETH_LINK_DOWN;
3379         link.link_speed = 0;
3380         link.link_duplex = ETH_LINK_HALF_DUPLEX;
3381         memset(&old, 0, sizeof(old));
3382         rte_ixgbe_dev_atomic_read_link_status(dev, &old);
3383
3384         hw->mac.get_link_status = true;
3385
3386         /* check if it needs to wait to complete, if lsc interrupt is enabled */
3387         if (wait_to_complete == 0 || dev->data->dev_conf.intr_conf.lsc != 0)
3388                 diag = ixgbe_check_link(hw, &link_speed, &link_up, 0);
3389         else
3390                 diag = ixgbe_check_link(hw, &link_speed, &link_up, 1);
3391
3392         if (diag != 0) {
3393                 link.link_speed = ETH_SPEED_NUM_100M;
3394                 link.link_duplex = ETH_LINK_FULL_DUPLEX;
3395                 rte_ixgbe_dev_atomic_write_link_status(dev, &link);
3396                 if (link.link_status == old.link_status)
3397                         return -1;
3398                 return 0;
3399         }
3400
3401         if (link_up == 0) {
3402                 rte_ixgbe_dev_atomic_write_link_status(dev, &link);
3403                 if (link.link_status == old.link_status)
3404                         return -1;
3405                 return 0;
3406         }
3407         link.link_status = ETH_LINK_UP;
3408         link.link_duplex = ETH_LINK_FULL_DUPLEX;
3409
3410         switch (link_speed) {
3411         default:
3412         case IXGBE_LINK_SPEED_UNKNOWN:
3413                 link.link_duplex = ETH_LINK_FULL_DUPLEX;
3414                 link.link_speed = ETH_SPEED_NUM_100M;
3415                 break;
3416
3417         case IXGBE_LINK_SPEED_100_FULL:
3418                 link.link_speed = ETH_SPEED_NUM_100M;
3419                 break;
3420
3421         case IXGBE_LINK_SPEED_1GB_FULL:
3422                 link.link_speed = ETH_SPEED_NUM_1G;
3423                 break;
3424
3425         case IXGBE_LINK_SPEED_10GB_FULL:
3426                 link.link_speed = ETH_SPEED_NUM_10G;
3427                 break;
3428         }
3429         rte_ixgbe_dev_atomic_write_link_status(dev, &link);
3430
3431         if (link.link_status == old.link_status)
3432                 return -1;
3433
3434         return 0;
3435 }
3436
3437 static void
3438 ixgbe_dev_promiscuous_enable(struct rte_eth_dev *dev)
3439 {
3440         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3441         uint32_t fctrl;
3442
3443         fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
3444         fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
3445         IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
3446 }
3447
3448 static void
3449 ixgbe_dev_promiscuous_disable(struct rte_eth_dev *dev)
3450 {
3451         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3452         uint32_t fctrl;
3453
3454         fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
3455         fctrl &= (~IXGBE_FCTRL_UPE);
3456         if (dev->data->all_multicast == 1)
3457                 fctrl |= IXGBE_FCTRL_MPE;
3458         else
3459                 fctrl &= (~IXGBE_FCTRL_MPE);
3460         IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
3461 }
3462
3463 static void
3464 ixgbe_dev_allmulticast_enable(struct rte_eth_dev *dev)
3465 {
3466         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3467         uint32_t fctrl;
3468
3469         fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
3470         fctrl |= IXGBE_FCTRL_MPE;
3471         IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
3472 }
3473
3474 static void
3475 ixgbe_dev_allmulticast_disable(struct rte_eth_dev *dev)
3476 {
3477         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3478         uint32_t fctrl;
3479
3480         if (dev->data->promiscuous == 1)
3481                 return; /* must remain in all_multicast mode */
3482
3483         fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
3484         fctrl &= (~IXGBE_FCTRL_MPE);
3485         IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
3486 }
3487
3488 /**
3489  * It clears the interrupt causes and enables the interrupt.
3490  * It will be called once only during nic initialized.
3491  *
3492  * @param dev
3493  *  Pointer to struct rte_eth_dev.
3494  *
3495  * @return
3496  *  - On success, zero.
3497  *  - On failure, a negative value.
3498  */
3499 static int
3500 ixgbe_dev_lsc_interrupt_setup(struct rte_eth_dev *dev)
3501 {
3502         struct ixgbe_interrupt *intr =
3503                 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
3504
3505         ixgbe_dev_link_status_print(dev);
3506         intr->mask |= IXGBE_EICR_LSC;
3507
3508         return 0;
3509 }
3510
3511 /**
3512  * It clears the interrupt causes and enables the interrupt.
3513  * It will be called once only during nic initialized.
3514  *
3515  * @param dev
3516  *  Pointer to struct rte_eth_dev.
3517  *
3518  * @return
3519  *  - On success, zero.
3520  *  - On failure, a negative value.
3521  */
3522 static int
3523 ixgbe_dev_rxq_interrupt_setup(struct rte_eth_dev *dev)
3524 {
3525         struct ixgbe_interrupt *intr =
3526                 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
3527
3528         intr->mask |= IXGBE_EICR_RTX_QUEUE;
3529
3530         return 0;
3531 }
3532
3533 /**
3534  * It clears the interrupt causes and enables the interrupt.
3535  * It will be called once only during nic initialized.
3536  *
3537  * @param dev
3538  *  Pointer to struct rte_eth_dev.
3539  *
3540  * @return
3541  *  - On success, zero.
3542  *  - On failure, a negative value.
3543  */
3544 static int
3545 ixgbe_dev_macsec_interrupt_setup(struct rte_eth_dev *dev)
3546 {
3547         struct ixgbe_interrupt *intr =
3548                 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
3549
3550         intr->mask |= IXGBE_EICR_LINKSEC;
3551
3552         return 0;
3553 }
3554
3555 /*
3556  * It reads ICR and sets flag (IXGBE_EICR_LSC) for the link_update.
3557  *
3558  * @param dev
3559  *  Pointer to struct rte_eth_dev.
3560  *
3561  * @return
3562  *  - On success, zero.
3563  *  - On failure, a negative value.
3564  */
3565 static int
3566 ixgbe_dev_interrupt_get_status(struct rte_eth_dev *dev)
3567 {
3568         uint32_t eicr;
3569         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3570         struct ixgbe_interrupt *intr =
3571                 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
3572
3573         /* clear all cause mask */
3574         ixgbe_disable_intr(hw);
3575
3576         /* read-on-clear nic registers here */
3577         eicr = IXGBE_READ_REG(hw, IXGBE_EICR);
3578         PMD_DRV_LOG(DEBUG, "eicr %x", eicr);
3579
3580         intr->flags = 0;
3581
3582         /* set flag for async link update */
3583         if (eicr & IXGBE_EICR_LSC)
3584                 intr->flags |= IXGBE_FLAG_NEED_LINK_UPDATE;
3585
3586         if (eicr & IXGBE_EICR_MAILBOX)
3587                 intr->flags |= IXGBE_FLAG_MAILBOX;
3588
3589         if (eicr & IXGBE_EICR_LINKSEC)
3590                 intr->flags |= IXGBE_FLAG_MACSEC;
3591
3592         if (hw->mac.type ==  ixgbe_mac_X550EM_x &&
3593             hw->phy.type == ixgbe_phy_x550em_ext_t &&
3594             (eicr & IXGBE_EICR_GPI_SDP0_X550EM_x))
3595                 intr->flags |= IXGBE_FLAG_PHY_INTERRUPT;
3596
3597         return 0;
3598 }
3599
3600 /**
3601  * It gets and then prints the link status.
3602  *
3603  * @param dev
3604  *  Pointer to struct rte_eth_dev.
3605  *
3606  * @return
3607  *  - On success, zero.
3608  *  - On failure, a negative value.
3609  */
3610 static void
3611 ixgbe_dev_link_status_print(struct rte_eth_dev *dev)
3612 {
3613         struct rte_pci_device *pci_dev = IXGBE_DEV_TO_PCI(dev);
3614         struct rte_eth_link link;
3615
3616         memset(&link, 0, sizeof(link));
3617         rte_ixgbe_dev_atomic_read_link_status(dev, &link);
3618         if (link.link_status) {
3619                 PMD_INIT_LOG(INFO, "Port %d: Link Up - speed %u Mbps - %s",
3620                                         (int)(dev->data->port_id),
3621                                         (unsigned)link.link_speed,
3622                         link.link_duplex == ETH_LINK_FULL_DUPLEX ?
3623                                         "full-duplex" : "half-duplex");
3624         } else {
3625                 PMD_INIT_LOG(INFO, " Port %d: Link Down",
3626                                 (int)(dev->data->port_id));
3627         }
3628         PMD_INIT_LOG(DEBUG, "PCI Address: " PCI_PRI_FMT,
3629                                 pci_dev->addr.domain,
3630                                 pci_dev->addr.bus,
3631                                 pci_dev->addr.devid,
3632                                 pci_dev->addr.function);
3633 }
3634
3635 /*
3636  * It executes link_update after knowing an interrupt occurred.
3637  *
3638  * @param dev
3639  *  Pointer to struct rte_eth_dev.
3640  *
3641  * @return
3642  *  - On success, zero.
3643  *  - On failure, a negative value.
3644  */
3645 static int
3646 ixgbe_dev_interrupt_action(struct rte_eth_dev *dev,
3647                            struct rte_intr_handle *intr_handle)
3648 {
3649         struct ixgbe_interrupt *intr =
3650                 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
3651         int64_t timeout;
3652         struct rte_eth_link link;
3653         int intr_enable_delay = false;
3654         struct ixgbe_hw *hw =
3655                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3656
3657         PMD_DRV_LOG(DEBUG, "intr action type %d", intr->flags);
3658
3659         if (intr->flags & IXGBE_FLAG_MAILBOX) {
3660                 ixgbe_pf_mbx_process(dev);
3661                 intr->flags &= ~IXGBE_FLAG_MAILBOX;
3662         }
3663
3664         if (intr->flags & IXGBE_FLAG_PHY_INTERRUPT) {
3665                 ixgbe_handle_lasi(hw);
3666                 intr->flags &= ~IXGBE_FLAG_PHY_INTERRUPT;
3667         }
3668
3669         if (intr->flags & IXGBE_FLAG_NEED_LINK_UPDATE) {
3670                 /* get the link status before link update, for predicting later */
3671                 memset(&link, 0, sizeof(link));
3672                 rte_ixgbe_dev_atomic_read_link_status(dev, &link);
3673
3674                 ixgbe_dev_link_update(dev, 0);
3675
3676                 /* likely to up */
3677                 if (!link.link_status)
3678                         /* handle it 1 sec later, wait it being stable */
3679                         timeout = IXGBE_LINK_UP_CHECK_TIMEOUT;
3680                 /* likely to down */
3681                 else
3682                         /* handle it 4 sec later, wait it being stable */
3683                         timeout = IXGBE_LINK_DOWN_CHECK_TIMEOUT;
3684
3685                 ixgbe_dev_link_status_print(dev);
3686
3687                 intr_enable_delay = true;
3688         }
3689
3690         if (intr_enable_delay) {
3691                 if (rte_eal_alarm_set(timeout * 1000,
3692                                       ixgbe_dev_interrupt_delayed_handler, (void *)dev) < 0)
3693                         PMD_DRV_LOG(ERR, "Error setting alarm");
3694         } else {
3695                 PMD_DRV_LOG(DEBUG, "enable intr immediately");
3696                 ixgbe_enable_intr(dev);
3697                 rte_intr_enable(intr_handle);
3698         }
3699
3700
3701         return 0;
3702 }
3703
3704 /**
3705  * Interrupt handler which shall be registered for alarm callback for delayed
3706  * handling specific interrupt to wait for the stable nic state. As the
3707  * NIC interrupt state is not stable for ixgbe after link is just down,
3708  * it needs to wait 4 seconds to get the stable status.
3709  *
3710  * @param handle
3711  *  Pointer to interrupt handle.
3712  * @param param
3713  *  The address of parameter (struct rte_eth_dev *) regsitered before.
3714  *
3715  * @return
3716  *  void
3717  */
3718 static void
3719 ixgbe_dev_interrupt_delayed_handler(void *param)
3720 {
3721         struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
3722         struct rte_pci_device *pci_dev = IXGBE_DEV_TO_PCI(dev);
3723         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
3724         struct ixgbe_interrupt *intr =
3725                 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
3726         struct ixgbe_hw *hw =
3727                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3728         uint32_t eicr;
3729
3730         eicr = IXGBE_READ_REG(hw, IXGBE_EICR);
3731         if (eicr & IXGBE_EICR_MAILBOX)
3732                 ixgbe_pf_mbx_process(dev);
3733
3734         if (intr->flags & IXGBE_FLAG_PHY_INTERRUPT) {
3735                 ixgbe_handle_lasi(hw);
3736                 intr->flags &= ~IXGBE_FLAG_PHY_INTERRUPT;
3737         }
3738
3739         if (intr->flags & IXGBE_FLAG_NEED_LINK_UPDATE) {
3740                 ixgbe_dev_link_update(dev, 0);
3741                 intr->flags &= ~IXGBE_FLAG_NEED_LINK_UPDATE;
3742                 ixgbe_dev_link_status_print(dev);
3743                 _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC, NULL);
3744         }
3745
3746         if (intr->flags & IXGBE_FLAG_MACSEC) {
3747                 _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_MACSEC,
3748                                               NULL);
3749                 intr->flags &= ~IXGBE_FLAG_MACSEC;
3750         }
3751
3752         PMD_DRV_LOG(DEBUG, "enable intr in delayed handler S[%08x]", eicr);
3753         ixgbe_enable_intr(dev);
3754         rte_intr_enable(intr_handle);
3755 }
3756
3757 /**
3758  * Interrupt handler triggered by NIC  for handling
3759  * specific interrupt.
3760  *
3761  * @param handle
3762  *  Pointer to interrupt handle.
3763  * @param param
3764  *  The address of parameter (struct rte_eth_dev *) regsitered before.
3765  *
3766  * @return
3767  *  void
3768  */
3769 static void
3770 ixgbe_dev_interrupt_handler(struct rte_intr_handle *handle,
3771                             void *param)
3772 {
3773         struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
3774
3775         ixgbe_dev_interrupt_get_status(dev);
3776         ixgbe_dev_interrupt_action(dev, handle);
3777 }
3778
3779 static int
3780 ixgbe_dev_led_on(struct rte_eth_dev *dev)
3781 {
3782         struct ixgbe_hw *hw;
3783
3784         hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3785         return ixgbe_led_on(hw, 0) == IXGBE_SUCCESS ? 0 : -ENOTSUP;
3786 }
3787
3788 static int
3789 ixgbe_dev_led_off(struct rte_eth_dev *dev)
3790 {
3791         struct ixgbe_hw *hw;
3792
3793         hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3794         return ixgbe_led_off(hw, 0) == IXGBE_SUCCESS ? 0 : -ENOTSUP;
3795 }
3796
3797 static int
3798 ixgbe_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
3799 {
3800         struct ixgbe_hw *hw;
3801         uint32_t mflcn_reg;
3802         uint32_t fccfg_reg;
3803         int rx_pause;
3804         int tx_pause;
3805
3806         hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3807
3808         fc_conf->pause_time = hw->fc.pause_time;
3809         fc_conf->high_water = hw->fc.high_water[0];
3810         fc_conf->low_water = hw->fc.low_water[0];
3811         fc_conf->send_xon = hw->fc.send_xon;
3812         fc_conf->autoneg = !hw->fc.disable_fc_autoneg;
3813
3814         /*
3815          * Return rx_pause status according to actual setting of
3816          * MFLCN register.
3817          */
3818         mflcn_reg = IXGBE_READ_REG(hw, IXGBE_MFLCN);
3819         if (mflcn_reg & (IXGBE_MFLCN_RPFCE | IXGBE_MFLCN_RFCE))
3820                 rx_pause = 1;
3821         else
3822                 rx_pause = 0;
3823
3824         /*
3825          * Return tx_pause status according to actual setting of
3826          * FCCFG register.
3827          */
3828         fccfg_reg = IXGBE_READ_REG(hw, IXGBE_FCCFG);
3829         if (fccfg_reg & (IXGBE_FCCFG_TFCE_802_3X | IXGBE_FCCFG_TFCE_PRIORITY))
3830                 tx_pause = 1;
3831         else
3832                 tx_pause = 0;
3833
3834         if (rx_pause && tx_pause)
3835                 fc_conf->mode = RTE_FC_FULL;
3836         else if (rx_pause)
3837                 fc_conf->mode = RTE_FC_RX_PAUSE;
3838         else if (tx_pause)
3839                 fc_conf->mode = RTE_FC_TX_PAUSE;
3840         else
3841                 fc_conf->mode = RTE_FC_NONE;
3842
3843         return 0;
3844 }
3845
3846 static int
3847 ixgbe_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
3848 {
3849         struct ixgbe_hw *hw;
3850         int err;
3851         uint32_t rx_buf_size;
3852         uint32_t max_high_water;
3853         uint32_t mflcn;
3854         enum ixgbe_fc_mode rte_fcmode_2_ixgbe_fcmode[] = {
3855                 ixgbe_fc_none,
3856                 ixgbe_fc_rx_pause,
3857                 ixgbe_fc_tx_pause,
3858                 ixgbe_fc_full
3859         };
3860
3861         PMD_INIT_FUNC_TRACE();
3862
3863         hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3864         rx_buf_size = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(0));
3865         PMD_INIT_LOG(DEBUG, "Rx packet buffer size = 0x%x", rx_buf_size);
3866
3867         /*
3868          * At least reserve one Ethernet frame for watermark
3869          * high_water/low_water in kilo bytes for ixgbe
3870          */
3871         max_high_water = (rx_buf_size - ETHER_MAX_LEN) >> IXGBE_RXPBSIZE_SHIFT;
3872         if ((fc_conf->high_water > max_high_water) ||
3873                 (fc_conf->high_water < fc_conf->low_water)) {
3874                 PMD_INIT_LOG(ERR, "Invalid high/low water setup value in KB");
3875                 PMD_INIT_LOG(ERR, "High_water must <= 0x%x", max_high_water);
3876                 return -EINVAL;
3877         }
3878
3879         hw->fc.requested_mode = rte_fcmode_2_ixgbe_fcmode[fc_conf->mode];
3880         hw->fc.pause_time     = fc_conf->pause_time;
3881         hw->fc.high_water[0]  = fc_conf->high_water;
3882         hw->fc.low_water[0]   = fc_conf->low_water;
3883         hw->fc.send_xon       = fc_conf->send_xon;
3884         hw->fc.disable_fc_autoneg = !fc_conf->autoneg;
3885
3886         err = ixgbe_fc_enable(hw);
3887
3888         /* Not negotiated is not an error case */
3889         if ((err == IXGBE_SUCCESS) || (err == IXGBE_ERR_FC_NOT_NEGOTIATED)) {
3890
3891                 /* check if we want to forward MAC frames - driver doesn't have native
3892                  * capability to do that, so we'll write the registers ourselves */
3893
3894                 mflcn = IXGBE_READ_REG(hw, IXGBE_MFLCN);
3895
3896                 /* set or clear MFLCN.PMCF bit depending on configuration */
3897                 if (fc_conf->mac_ctrl_frame_fwd != 0)
3898                         mflcn |= IXGBE_MFLCN_PMCF;
3899                 else
3900                         mflcn &= ~IXGBE_MFLCN_PMCF;
3901
3902                 IXGBE_WRITE_REG(hw, IXGBE_MFLCN, mflcn);
3903                 IXGBE_WRITE_FLUSH(hw);
3904
3905                 return 0;
3906         }
3907
3908         PMD_INIT_LOG(ERR, "ixgbe_fc_enable = 0x%x", err);
3909         return -EIO;
3910 }
3911
3912 /**
3913  *  ixgbe_pfc_enable_generic - Enable flow control
3914  *  @hw: pointer to hardware structure
3915  *  @tc_num: traffic class number
3916  *  Enable flow control according to the current settings.
3917  */
3918 static int
3919 ixgbe_dcb_pfc_enable_generic(struct ixgbe_hw *hw, uint8_t tc_num)
3920 {
3921         int ret_val = 0;
3922         uint32_t mflcn_reg, fccfg_reg;
3923         uint32_t reg;
3924         uint32_t fcrtl, fcrth;
3925         uint8_t i;
3926         uint8_t nb_rx_en;
3927
3928         /* Validate the water mark configuration */
3929         if (!hw->fc.pause_time) {
3930                 ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
3931                 goto out;
3932         }
3933
3934         /* Low water mark of zero causes XOFF floods */
3935         if (hw->fc.current_mode & ixgbe_fc_tx_pause) {
3936                  /* High/Low water can not be 0 */
3937                 if ((!hw->fc.high_water[tc_num]) || (!hw->fc.low_water[tc_num])) {
3938                         PMD_INIT_LOG(ERR, "Invalid water mark configuration");
3939                         ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
3940                         goto out;
3941                 }
3942
3943                 if (hw->fc.low_water[tc_num] >= hw->fc.high_water[tc_num]) {
3944                         PMD_INIT_LOG(ERR, "Invalid water mark configuration");
3945                         ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
3946                         goto out;
3947                 }
3948         }
3949         /* Negotiate the fc mode to use */
3950         ixgbe_fc_autoneg(hw);
3951
3952         /* Disable any previous flow control settings */
3953         mflcn_reg = IXGBE_READ_REG(hw, IXGBE_MFLCN);
3954         mflcn_reg &= ~(IXGBE_MFLCN_RPFCE_SHIFT | IXGBE_MFLCN_RFCE|IXGBE_MFLCN_RPFCE);
3955
3956         fccfg_reg = IXGBE_READ_REG(hw, IXGBE_FCCFG);
3957         fccfg_reg &= ~(IXGBE_FCCFG_TFCE_802_3X | IXGBE_FCCFG_TFCE_PRIORITY);
3958
3959         switch (hw->fc.current_mode) {
3960         case ixgbe_fc_none:
3961                 /*
3962                  * If the count of enabled RX Priority Flow control >1,
3963                  * and the TX pause can not be disabled
3964                  */
3965                 nb_rx_en = 0;
3966                 for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
3967                         reg = IXGBE_READ_REG(hw, IXGBE_FCRTH_82599(i));
3968                         if (reg & IXGBE_FCRTH_FCEN)
3969                                 nb_rx_en++;
3970                 }
3971                 if (nb_rx_en > 1)
3972                         fccfg_reg |= IXGBE_FCCFG_TFCE_PRIORITY;
3973                 break;
3974         case ixgbe_fc_rx_pause:
3975                 /*
3976                  * Rx Flow control is enabled and Tx Flow control is
3977                  * disabled by software override. Since there really
3978                  * isn't a way to advertise that we are capable of RX
3979                  * Pause ONLY, we will advertise that we support both
3980                  * symmetric and asymmetric Rx PAUSE.  Later, we will
3981                  * disable the adapter's ability to send PAUSE frames.
3982                  */
3983                 mflcn_reg |= IXGBE_MFLCN_RPFCE;
3984                 /*
3985                  * If the count of enabled RX Priority Flow control >1,
3986                  * and the TX pause can not be disabled
3987                  */
3988                 nb_rx_en = 0;
3989                 for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
3990                         reg = IXGBE_READ_REG(hw, IXGBE_FCRTH_82599(i));
3991                         if (reg & IXGBE_FCRTH_FCEN)
3992                                 nb_rx_en++;
3993                 }
3994                 if (nb_rx_en > 1)
3995                         fccfg_reg |= IXGBE_FCCFG_TFCE_PRIORITY;
3996                 break;
3997         case ixgbe_fc_tx_pause:
3998                 /*
3999                  * Tx Flow control is enabled, and Rx Flow control is
4000                  * disabled by software override.
4001                  */
4002                 fccfg_reg |= IXGBE_FCCFG_TFCE_PRIORITY;
4003                 break;
4004         case ixgbe_fc_full:
4005                 /* Flow control (both Rx and Tx) is enabled by SW override. */
4006                 mflcn_reg |= IXGBE_MFLCN_RPFCE;
4007                 fccfg_reg |= IXGBE_FCCFG_TFCE_PRIORITY;
4008                 break;
4009         default:
4010                 PMD_DRV_LOG(DEBUG, "Flow control param set incorrectly");
4011                 ret_val = IXGBE_ERR_CONFIG;
4012                 goto out;
4013         }
4014
4015         /* Set 802.3x based flow control settings. */
4016         mflcn_reg |= IXGBE_MFLCN_DPF;
4017         IXGBE_WRITE_REG(hw, IXGBE_MFLCN, mflcn_reg);
4018         IXGBE_WRITE_REG(hw, IXGBE_FCCFG, fccfg_reg);
4019
4020         /* Set up and enable Rx high/low water mark thresholds, enable XON. */
4021         if ((hw->fc.current_mode & ixgbe_fc_tx_pause) &&
4022                 hw->fc.high_water[tc_num]) {
4023                 fcrtl = (hw->fc.low_water[tc_num] << 10) | IXGBE_FCRTL_XONE;
4024                 IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(tc_num), fcrtl);
4025                 fcrth = (hw->fc.high_water[tc_num] << 10) | IXGBE_FCRTH_FCEN;
4026         } else {
4027                 IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(tc_num), 0);
4028                 /*
4029                  * In order to prevent Tx hangs when the internal Tx
4030                  * switch is enabled we must set the high water mark
4031                  * to the maximum FCRTH value.  This allows the Tx
4032                  * switch to function even under heavy Rx workloads.
4033                  */
4034                 fcrth = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(tc_num)) - 32;
4035         }
4036         IXGBE_WRITE_REG(hw, IXGBE_FCRTH_82599(tc_num), fcrth);
4037
4038         /* Configure pause time (2 TCs per register) */
4039         reg = hw->fc.pause_time * 0x00010001;
4040         for (i = 0; i < (IXGBE_DCB_MAX_TRAFFIC_CLASS / 2); i++)
4041                 IXGBE_WRITE_REG(hw, IXGBE_FCTTV(i), reg);
4042
4043         /* Configure flow control refresh threshold value */
4044         IXGBE_WRITE_REG(hw, IXGBE_FCRTV, hw->fc.pause_time / 2);
4045
4046 out:
4047         return ret_val;
4048 }
4049
4050 static int
4051 ixgbe_dcb_pfc_enable(struct rte_eth_dev *dev, uint8_t tc_num)
4052 {
4053         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4054         int32_t ret_val = IXGBE_NOT_IMPLEMENTED;
4055
4056         if (hw->mac.type != ixgbe_mac_82598EB) {
4057                 ret_val = ixgbe_dcb_pfc_enable_generic(hw, tc_num);
4058         }
4059         return ret_val;
4060 }
4061
4062 static int
4063 ixgbe_priority_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_pfc_conf *pfc_conf)
4064 {
4065         int err;
4066         uint32_t rx_buf_size;
4067         uint32_t max_high_water;
4068         uint8_t tc_num;
4069         uint8_t  map[IXGBE_DCB_MAX_USER_PRIORITY] = { 0 };
4070         struct ixgbe_hw *hw =
4071                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4072         struct ixgbe_dcb_config *dcb_config =
4073                 IXGBE_DEV_PRIVATE_TO_DCB_CFG(dev->data->dev_private);
4074
4075         enum ixgbe_fc_mode rte_fcmode_2_ixgbe_fcmode[] = {
4076                 ixgbe_fc_none,
4077                 ixgbe_fc_rx_pause,
4078                 ixgbe_fc_tx_pause,
4079                 ixgbe_fc_full
4080         };
4081
4082         PMD_INIT_FUNC_TRACE();
4083
4084         ixgbe_dcb_unpack_map_cee(dcb_config, IXGBE_DCB_RX_CONFIG, map);
4085         tc_num = map[pfc_conf->priority];
4086         rx_buf_size = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(tc_num));
4087         PMD_INIT_LOG(DEBUG, "Rx packet buffer size = 0x%x", rx_buf_size);
4088         /*
4089          * At least reserve one Ethernet frame for watermark
4090          * high_water/low_water in kilo bytes for ixgbe
4091          */
4092         max_high_water = (rx_buf_size - ETHER_MAX_LEN) >> IXGBE_RXPBSIZE_SHIFT;
4093         if ((pfc_conf->fc.high_water > max_high_water) ||
4094             (pfc_conf->fc.high_water <= pfc_conf->fc.low_water)) {
4095                 PMD_INIT_LOG(ERR, "Invalid high/low water setup value in KB");
4096                 PMD_INIT_LOG(ERR, "High_water must <= 0x%x", max_high_water);
4097                 return -EINVAL;
4098         }
4099
4100         hw->fc.requested_mode = rte_fcmode_2_ixgbe_fcmode[pfc_conf->fc.mode];
4101         hw->fc.pause_time = pfc_conf->fc.pause_time;
4102         hw->fc.send_xon = pfc_conf->fc.send_xon;
4103         hw->fc.low_water[tc_num] =  pfc_conf->fc.low_water;
4104         hw->fc.high_water[tc_num] = pfc_conf->fc.high_water;
4105
4106         err = ixgbe_dcb_pfc_enable(dev, tc_num);
4107
4108         /* Not negotiated is not an error case */
4109         if ((err == IXGBE_SUCCESS) || (err == IXGBE_ERR_FC_NOT_NEGOTIATED))
4110                 return 0;
4111
4112         PMD_INIT_LOG(ERR, "ixgbe_dcb_pfc_enable = 0x%x", err);
4113         return -EIO;
4114 }
4115
4116 static int
4117 ixgbe_dev_rss_reta_update(struct rte_eth_dev *dev,
4118                           struct rte_eth_rss_reta_entry64 *reta_conf,
4119                           uint16_t reta_size)
4120 {
4121         uint16_t i, sp_reta_size;
4122         uint8_t j, mask;
4123         uint32_t reta, r;
4124         uint16_t idx, shift;
4125         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4126         uint32_t reta_reg;
4127
4128         PMD_INIT_FUNC_TRACE();
4129
4130         if (!ixgbe_rss_update_sp(hw->mac.type)) {
4131                 PMD_DRV_LOG(ERR, "RSS reta update is not supported on this "
4132                         "NIC.");
4133                 return -ENOTSUP;
4134         }
4135
4136         sp_reta_size = ixgbe_reta_size_get(hw->mac.type);
4137         if (reta_size != sp_reta_size) {
4138                 PMD_DRV_LOG(ERR, "The size of hash lookup table configured "
4139                         "(%d) doesn't match the number hardware can supported "
4140                         "(%d)\n", reta_size, sp_reta_size);
4141                 return -EINVAL;
4142         }
4143
4144         for (i = 0; i < reta_size; i += IXGBE_4_BIT_WIDTH) {
4145                 idx = i / RTE_RETA_GROUP_SIZE;
4146                 shift = i % RTE_RETA_GROUP_SIZE;
4147                 mask = (uint8_t)((reta_conf[idx].mask >> shift) &
4148                                                 IXGBE_4_BIT_MASK);
4149                 if (!mask)
4150                         continue;
4151                 reta_reg = ixgbe_reta_reg_get(hw->mac.type, i);
4152                 if (mask == IXGBE_4_BIT_MASK)
4153                         r = 0;
4154                 else
4155                         r = IXGBE_READ_REG(hw, reta_reg);
4156                 for (j = 0, reta = 0; j < IXGBE_4_BIT_WIDTH; j++) {
4157                         if (mask & (0x1 << j))
4158                                 reta |= reta_conf[idx].reta[shift + j] <<
4159                                                         (CHAR_BIT * j);
4160                         else
4161                                 reta |= r & (IXGBE_8_BIT_MASK <<
4162                                                 (CHAR_BIT * j));
4163                 }
4164                 IXGBE_WRITE_REG(hw, reta_reg, reta);
4165         }
4166
4167         return 0;
4168 }
4169
4170 static int
4171 ixgbe_dev_rss_reta_query(struct rte_eth_dev *dev,
4172                          struct rte_eth_rss_reta_entry64 *reta_conf,
4173                          uint16_t reta_size)
4174 {
4175         uint16_t i, sp_reta_size;
4176         uint8_t j, mask;
4177         uint32_t reta;
4178         uint16_t idx, shift;
4179         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4180         uint32_t reta_reg;
4181
4182         PMD_INIT_FUNC_TRACE();
4183         sp_reta_size = ixgbe_reta_size_get(hw->mac.type);
4184         if (reta_size != sp_reta_size) {
4185                 PMD_DRV_LOG(ERR, "The size of hash lookup table configured "
4186                         "(%d) doesn't match the number hardware can supported "
4187                         "(%d)\n", reta_size, sp_reta_size);
4188                 return -EINVAL;
4189         }
4190
4191         for (i = 0; i < reta_size; i += IXGBE_4_BIT_WIDTH) {
4192                 idx = i / RTE_RETA_GROUP_SIZE;
4193                 shift = i % RTE_RETA_GROUP_SIZE;
4194                 mask = (uint8_t)((reta_conf[idx].mask >> shift) &
4195                                                 IXGBE_4_BIT_MASK);
4196                 if (!mask)
4197                         continue;
4198
4199                 reta_reg = ixgbe_reta_reg_get(hw->mac.type, i);
4200                 reta = IXGBE_READ_REG(hw, reta_reg);
4201                 for (j = 0; j < IXGBE_4_BIT_WIDTH; j++) {
4202                         if (mask & (0x1 << j))
4203                                 reta_conf[idx].reta[shift + j] =
4204                                         ((reta >> (CHAR_BIT * j)) &
4205                                                 IXGBE_8_BIT_MASK);
4206                 }
4207         }
4208
4209         return 0;
4210 }
4211
4212 static void
4213 ixgbe_add_rar(struct rte_eth_dev *dev, struct ether_addr *mac_addr,
4214                                 uint32_t index, uint32_t pool)
4215 {
4216         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4217         uint32_t enable_addr = 1;
4218
4219         ixgbe_set_rar(hw, index, mac_addr->addr_bytes, pool, enable_addr);
4220 }
4221
4222 static void
4223 ixgbe_remove_rar(struct rte_eth_dev *dev, uint32_t index)
4224 {
4225         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4226
4227         ixgbe_clear_rar(hw, index);
4228 }
4229
4230 static void
4231 ixgbe_set_default_mac_addr(struct rte_eth_dev *dev, struct ether_addr *addr)
4232 {
4233         ixgbe_remove_rar(dev, 0);
4234
4235         ixgbe_add_rar(dev, addr, 0, 0);
4236 }
4237
4238 int
4239 rte_pmd_ixgbe_set_vf_mac_addr(uint8_t port, uint16_t vf,
4240                 struct ether_addr *mac_addr)
4241 {
4242         struct ixgbe_hw *hw;
4243         struct ixgbe_vf_info *vfinfo;
4244         int rar_entry;
4245         uint8_t *new_mac = (uint8_t *)(mac_addr);
4246         struct rte_eth_dev *dev;
4247         struct rte_eth_dev_info dev_info;
4248
4249         RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
4250
4251         dev = &rte_eth_devices[port];
4252         rte_eth_dev_info_get(port, &dev_info);
4253
4254         if (vf >= dev_info.max_vfs)
4255                 return -EINVAL;
4256
4257         hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4258         vfinfo = *(IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private));
4259         rar_entry = hw->mac.num_rar_entries - (vf + 1);
4260
4261         if (is_valid_assigned_ether_addr((struct ether_addr *)new_mac)) {
4262                 rte_memcpy(vfinfo[vf].vf_mac_addresses, new_mac,
4263                                 ETHER_ADDR_LEN);
4264                 return hw->mac.ops.set_rar(hw, rar_entry, new_mac, vf,
4265                                 IXGBE_RAH_AV);
4266         }
4267         return -EINVAL;
4268 }
4269
4270 static int
4271 ixgbe_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
4272 {
4273         uint32_t hlreg0;
4274         uint32_t maxfrs;
4275         struct ixgbe_hw *hw;
4276         struct rte_eth_dev_info dev_info;
4277         uint32_t frame_size = mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
4278
4279         ixgbe_dev_info_get(dev, &dev_info);
4280
4281         /* check that mtu is within the allowed range */
4282         if ((mtu < ETHER_MIN_MTU) || (frame_size > dev_info.max_rx_pktlen))
4283                 return -EINVAL;
4284
4285         /* refuse mtu that requires the support of scattered packets when this
4286          * feature has not been enabled before.
4287          */
4288         if (!dev->data->scattered_rx &&
4289             (frame_size + 2 * IXGBE_VLAN_TAG_SIZE >
4290              dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM))
4291                 return -EINVAL;
4292
4293         hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4294         hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0);
4295
4296         /* switch to jumbo mode if needed */
4297         if (frame_size > ETHER_MAX_LEN) {
4298                 dev->data->dev_conf.rxmode.jumbo_frame = 1;
4299                 hlreg0 |= IXGBE_HLREG0_JUMBOEN;
4300         } else {
4301                 dev->data->dev_conf.rxmode.jumbo_frame = 0;
4302                 hlreg0 &= ~IXGBE_HLREG0_JUMBOEN;
4303         }
4304         IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0);
4305
4306         /* update max frame size */
4307         dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
4308
4309         maxfrs = IXGBE_READ_REG(hw, IXGBE_MAXFRS);
4310         maxfrs &= 0x0000FFFF;
4311         maxfrs |= (dev->data->dev_conf.rxmode.max_rx_pkt_len << 16);
4312         IXGBE_WRITE_REG(hw, IXGBE_MAXFRS, maxfrs);
4313
4314         return 0;
4315 }
4316
4317 /*
4318  * Virtual Function operations
4319  */
4320 static void
4321 ixgbevf_intr_disable(struct ixgbe_hw *hw)
4322 {
4323         PMD_INIT_FUNC_TRACE();
4324
4325         /* Clear interrupt mask to stop from interrupts being generated */
4326         IXGBE_WRITE_REG(hw, IXGBE_VTEIMC, IXGBE_VF_IRQ_CLEAR_MASK);
4327
4328         IXGBE_WRITE_FLUSH(hw);
4329 }
4330
4331 static void
4332 ixgbevf_intr_enable(struct ixgbe_hw *hw)
4333 {
4334         PMD_INIT_FUNC_TRACE();
4335
4336         /* VF enable interrupt autoclean */
4337         IXGBE_WRITE_REG(hw, IXGBE_VTEIAM, IXGBE_VF_IRQ_ENABLE_MASK);
4338         IXGBE_WRITE_REG(hw, IXGBE_VTEIAC, IXGBE_VF_IRQ_ENABLE_MASK);
4339         IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, IXGBE_VF_IRQ_ENABLE_MASK);
4340
4341         IXGBE_WRITE_FLUSH(hw);
4342 }
4343
4344 static int
4345 ixgbevf_dev_configure(struct rte_eth_dev *dev)
4346 {
4347         struct rte_eth_conf *conf = &dev->data->dev_conf;
4348         struct ixgbe_adapter *adapter =
4349                         (struct ixgbe_adapter *)dev->data->dev_private;
4350
4351         PMD_INIT_LOG(DEBUG, "Configured Virtual Function port id: %d",
4352                      dev->data->port_id);
4353
4354         /*
4355          * VF has no ability to enable/disable HW CRC
4356          * Keep the persistent behavior the same as Host PF
4357          */
4358 #ifndef RTE_LIBRTE_IXGBE_PF_DISABLE_STRIP_CRC
4359         if (!conf->rxmode.hw_strip_crc) {
4360                 PMD_INIT_LOG(NOTICE, "VF can't disable HW CRC Strip");
4361                 conf->rxmode.hw_strip_crc = 1;
4362         }
4363 #else
4364         if (conf->rxmode.hw_strip_crc) {
4365                 PMD_INIT_LOG(NOTICE, "VF can't enable HW CRC Strip");
4366                 conf->rxmode.hw_strip_crc = 0;
4367         }
4368 #endif
4369
4370         /*
4371          * Initialize to TRUE. If any of Rx queues doesn't meet the bulk
4372          * allocation or vector Rx preconditions we will reset it.
4373          */
4374         adapter->rx_bulk_alloc_allowed = true;
4375         adapter->rx_vec_allowed = true;
4376
4377         return 0;
4378 }
4379
4380 static int
4381 ixgbevf_dev_start(struct rte_eth_dev *dev)
4382 {
4383         struct ixgbe_hw *hw =
4384                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4385         uint32_t intr_vector = 0;
4386         struct rte_pci_device *pci_dev = IXGBE_DEV_TO_PCI(dev);
4387         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
4388
4389         int err, mask = 0;
4390
4391         PMD_INIT_FUNC_TRACE();
4392
4393         hw->mac.ops.reset_hw(hw);
4394         hw->mac.get_link_status = true;
4395
4396         /* negotiate mailbox API version to use with the PF. */
4397         ixgbevf_negotiate_api(hw);
4398
4399         ixgbevf_dev_tx_init(dev);
4400
4401         /* This can fail when allocating mbufs for descriptor rings */
4402         err = ixgbevf_dev_rx_init(dev);
4403         if (err) {
4404                 PMD_INIT_LOG(ERR, "Unable to initialize RX hardware (%d)", err);
4405                 ixgbe_dev_clear_queues(dev);
4406                 return err;
4407         }
4408
4409         /* Set vfta */
4410         ixgbevf_set_vfta_all(dev, 1);
4411
4412         /* Set HW strip */
4413         mask = ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK |
4414                 ETH_VLAN_EXTEND_MASK;
4415         ixgbevf_vlan_offload_set(dev, mask);
4416
4417         ixgbevf_dev_rxtx_start(dev);
4418
4419         /* check and configure queue intr-vector mapping */
4420         if (dev->data->dev_conf.intr_conf.rxq != 0) {
4421                 intr_vector = dev->data->nb_rx_queues;
4422                 if (rte_intr_efd_enable(intr_handle, intr_vector))
4423                         return -1;
4424         }
4425
4426         if (rte_intr_dp_is_en(intr_handle) && !intr_handle->intr_vec) {
4427                 intr_handle->intr_vec =
4428                         rte_zmalloc("intr_vec",
4429                                     dev->data->nb_rx_queues * sizeof(int), 0);
4430                 if (intr_handle->intr_vec == NULL) {
4431                         PMD_INIT_LOG(ERR, "Failed to allocate %d rx_queues"
4432                                      " intr_vec\n", dev->data->nb_rx_queues);
4433                         return -ENOMEM;
4434                 }
4435         }
4436         ixgbevf_configure_msix(dev);
4437
4438         rte_intr_enable(intr_handle);
4439
4440         /* Re-enable interrupt for VF */
4441         ixgbevf_intr_enable(hw);
4442
4443         return 0;
4444 }
4445
4446 static void
4447 ixgbevf_dev_stop(struct rte_eth_dev *dev)
4448 {
4449         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4450         struct rte_pci_device *pci_dev = IXGBE_DEV_TO_PCI(dev);
4451         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
4452
4453         PMD_INIT_FUNC_TRACE();
4454
4455         ixgbevf_intr_disable(hw);
4456
4457         hw->adapter_stopped = 1;
4458         ixgbe_stop_adapter(hw);
4459
4460         /*
4461           * Clear what we set, but we still keep shadow_vfta to
4462           * restore after device starts
4463           */
4464         ixgbevf_set_vfta_all(dev, 0);
4465
4466         /* Clear stored conf */
4467         dev->data->scattered_rx = 0;
4468
4469         ixgbe_dev_clear_queues(dev);
4470
4471         /* Clean datapath event and queue/vec mapping */
4472         rte_intr_efd_disable(intr_handle);
4473         if (intr_handle->intr_vec != NULL) {
4474                 rte_free(intr_handle->intr_vec);
4475                 intr_handle->intr_vec = NULL;
4476         }
4477 }
4478
4479 static void
4480 ixgbevf_dev_close(struct rte_eth_dev *dev)
4481 {
4482         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4483
4484         PMD_INIT_FUNC_TRACE();
4485
4486         ixgbe_reset_hw(hw);
4487
4488         ixgbevf_dev_stop(dev);
4489
4490         ixgbe_dev_free_queues(dev);
4491
4492         /**
4493          * Remove the VF MAC address ro ensure
4494          * that the VF traffic goes to the PF
4495          * after stop, close and detach of the VF
4496          **/
4497         ixgbevf_remove_mac_addr(dev, 0);
4498 }
4499
4500 static void ixgbevf_set_vfta_all(struct rte_eth_dev *dev, bool on)
4501 {
4502         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4503         struct ixgbe_vfta *shadow_vfta =
4504                 IXGBE_DEV_PRIVATE_TO_VFTA(dev->data->dev_private);
4505         int i = 0, j = 0, vfta = 0, mask = 1;
4506
4507         for (i = 0; i < IXGBE_VFTA_SIZE; i++) {
4508                 vfta = shadow_vfta->vfta[i];
4509                 if (vfta) {
4510                         mask = 1;
4511                         for (j = 0; j < 32; j++) {
4512                                 if (vfta & mask)
4513                                         ixgbe_set_vfta(hw, (i<<5)+j, 0,
4514                                                        on, false);
4515                                 mask <<= 1;
4516                         }
4517                 }
4518         }
4519
4520 }
4521
4522 static int
4523 ixgbevf_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
4524 {
4525         struct ixgbe_hw *hw =
4526                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4527         struct ixgbe_vfta *shadow_vfta =
4528                 IXGBE_DEV_PRIVATE_TO_VFTA(dev->data->dev_private);
4529         uint32_t vid_idx = 0;
4530         uint32_t vid_bit = 0;
4531         int ret = 0;
4532
4533         PMD_INIT_FUNC_TRACE();
4534
4535         /* vind is not used in VF driver, set to 0, check ixgbe_set_vfta_vf */
4536         ret = ixgbe_set_vfta(hw, vlan_id, 0, !!on, false);
4537         if (ret) {
4538                 PMD_INIT_LOG(ERR, "Unable to set VF vlan");
4539                 return ret;
4540         }
4541         vid_idx = (uint32_t) ((vlan_id >> 5) & 0x7F);
4542         vid_bit = (uint32_t) (1 << (vlan_id & 0x1F));
4543
4544         /* Save what we set and retore it after device reset */
4545         if (on)
4546                 shadow_vfta->vfta[vid_idx] |= vid_bit;
4547         else
4548                 shadow_vfta->vfta[vid_idx] &= ~vid_bit;
4549
4550         return 0;
4551 }
4552
4553 static void
4554 ixgbevf_vlan_strip_queue_set(struct rte_eth_dev *dev, uint16_t queue, int on)
4555 {
4556         struct ixgbe_hw *hw =
4557                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4558         uint32_t ctrl;
4559
4560         PMD_INIT_FUNC_TRACE();
4561
4562         if (queue >= hw->mac.max_rx_queues)
4563                 return;
4564
4565         ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(queue));
4566         if (on)
4567                 ctrl |= IXGBE_RXDCTL_VME;
4568         else
4569                 ctrl &= ~IXGBE_RXDCTL_VME;
4570         IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(queue), ctrl);
4571
4572         ixgbe_vlan_hw_strip_bitmap_set(dev, queue, on);
4573 }
4574
4575 static void
4576 ixgbevf_vlan_offload_set(struct rte_eth_dev *dev, int mask)
4577 {
4578         struct ixgbe_hw *hw =
4579                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4580         uint16_t i;
4581         int on = 0;
4582
4583         /* VF function only support hw strip feature, others are not support */
4584         if (mask & ETH_VLAN_STRIP_MASK) {
4585                 on = !!(dev->data->dev_conf.rxmode.hw_vlan_strip);
4586
4587                 for (i = 0; i < hw->mac.max_rx_queues; i++)
4588                         ixgbevf_vlan_strip_queue_set(dev, i, on);
4589         }
4590 }
4591
4592 static int
4593 ixgbe_vmdq_mode_check(struct ixgbe_hw *hw)
4594 {
4595         uint32_t reg_val;
4596
4597         /* we only need to do this if VMDq is enabled */
4598         reg_val = IXGBE_READ_REG(hw, IXGBE_VT_CTL);
4599         if (!(reg_val & IXGBE_VT_CTL_VT_ENABLE)) {
4600                 PMD_INIT_LOG(ERR, "VMDq must be enabled for this setting");
4601                 return -1;
4602         }
4603
4604         return 0;
4605 }
4606
4607 static uint32_t
4608 ixgbe_uta_vector(struct ixgbe_hw *hw, struct ether_addr *uc_addr)
4609 {
4610         uint32_t vector = 0;
4611
4612         switch (hw->mac.mc_filter_type) {
4613         case 0:   /* use bits [47:36] of the address */
4614                 vector = ((uc_addr->addr_bytes[4] >> 4) |
4615                         (((uint16_t)uc_addr->addr_bytes[5]) << 4));
4616                 break;
4617         case 1:   /* use bits [46:35] of the address */
4618                 vector = ((uc_addr->addr_bytes[4] >> 3) |
4619                         (((uint16_t)uc_addr->addr_bytes[5]) << 5));
4620                 break;
4621         case 2:   /* use bits [45:34] of the address */
4622                 vector = ((uc_addr->addr_bytes[4] >> 2) |
4623                         (((uint16_t)uc_addr->addr_bytes[5]) << 6));
4624                 break;
4625         case 3:   /* use bits [43:32] of the address */
4626                 vector = ((uc_addr->addr_bytes[4]) |
4627                         (((uint16_t)uc_addr->addr_bytes[5]) << 8));
4628                 break;
4629         default:  /* Invalid mc_filter_type */
4630                 break;
4631         }
4632
4633         /* vector can only be 12-bits or boundary will be exceeded */
4634         vector &= 0xFFF;
4635         return vector;
4636 }
4637
4638 static int
4639 ixgbe_uc_hash_table_set(struct rte_eth_dev *dev, struct ether_addr *mac_addr,
4640                         uint8_t on)
4641 {
4642         uint32_t vector;
4643         uint32_t uta_idx;
4644         uint32_t reg_val;
4645         uint32_t uta_shift;
4646         uint32_t rc;
4647         const uint32_t ixgbe_uta_idx_mask = 0x7F;
4648         const uint32_t ixgbe_uta_bit_shift = 5;
4649         const uint32_t ixgbe_uta_bit_mask = (0x1 << ixgbe_uta_bit_shift) - 1;
4650         const uint32_t bit1 = 0x1;
4651
4652         struct ixgbe_hw *hw =
4653                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4654         struct ixgbe_uta_info *uta_info =
4655                 IXGBE_DEV_PRIVATE_TO_UTA(dev->data->dev_private);
4656
4657         /* The UTA table only exists on 82599 hardware and newer */
4658         if (hw->mac.type < ixgbe_mac_82599EB)
4659                 return -ENOTSUP;
4660
4661         vector = ixgbe_uta_vector(hw, mac_addr);
4662         uta_idx = (vector >> ixgbe_uta_bit_shift) & ixgbe_uta_idx_mask;
4663         uta_shift = vector & ixgbe_uta_bit_mask;
4664
4665         rc = ((uta_info->uta_shadow[uta_idx] >> uta_shift & bit1) != 0);
4666         if (rc == on)
4667                 return 0;
4668
4669         reg_val = IXGBE_READ_REG(hw, IXGBE_UTA(uta_idx));
4670         if (on) {
4671                 uta_info->uta_in_use++;
4672                 reg_val |= (bit1 << uta_shift);
4673                 uta_info->uta_shadow[uta_idx] |= (bit1 << uta_shift);
4674         } else {
4675                 uta_info->uta_in_use--;
4676                 reg_val &= ~(bit1 << uta_shift);
4677                 uta_info->uta_shadow[uta_idx] &= ~(bit1 << uta_shift);
4678         }
4679
4680         IXGBE_WRITE_REG(hw, IXGBE_UTA(uta_idx), reg_val);
4681
4682         if (uta_info->uta_in_use > 0)
4683                 IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL,
4684                                 IXGBE_MCSTCTRL_MFE | hw->mac.mc_filter_type);
4685         else
4686                 IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, hw->mac.mc_filter_type);
4687
4688         return 0;
4689 }
4690
4691 static int
4692 ixgbe_uc_all_hash_table_set(struct rte_eth_dev *dev, uint8_t on)
4693 {
4694         int i;
4695         struct ixgbe_hw *hw =
4696                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4697         struct ixgbe_uta_info *uta_info =
4698                 IXGBE_DEV_PRIVATE_TO_UTA(dev->data->dev_private);
4699
4700         /* The UTA table only exists on 82599 hardware and newer */
4701         if (hw->mac.type < ixgbe_mac_82599EB)
4702                 return -ENOTSUP;
4703
4704         if (on) {
4705                 for (i = 0; i < ETH_VMDQ_NUM_UC_HASH_ARRAY; i++) {
4706                         uta_info->uta_shadow[i] = ~0;
4707                         IXGBE_WRITE_REG(hw, IXGBE_UTA(i), ~0);
4708                 }
4709         } else {
4710                 for (i = 0; i < ETH_VMDQ_NUM_UC_HASH_ARRAY; i++) {
4711                         uta_info->uta_shadow[i] = 0;
4712                         IXGBE_WRITE_REG(hw, IXGBE_UTA(i), 0);
4713                 }
4714         }
4715         return 0;
4716
4717 }
4718
4719 uint32_t
4720 ixgbe_convert_vm_rx_mask_to_val(uint16_t rx_mask, uint32_t orig_val)
4721 {
4722         uint32_t new_val = orig_val;
4723
4724         if (rx_mask & ETH_VMDQ_ACCEPT_UNTAG)
4725                 new_val |= IXGBE_VMOLR_AUPE;
4726         if (rx_mask & ETH_VMDQ_ACCEPT_HASH_MC)
4727                 new_val |= IXGBE_VMOLR_ROMPE;
4728         if (rx_mask & ETH_VMDQ_ACCEPT_HASH_UC)
4729                 new_val |= IXGBE_VMOLR_ROPE;
4730         if (rx_mask & ETH_VMDQ_ACCEPT_BROADCAST)
4731                 new_val |= IXGBE_VMOLR_BAM;
4732         if (rx_mask & ETH_VMDQ_ACCEPT_MULTICAST)
4733                 new_val |= IXGBE_VMOLR_MPE;
4734
4735         return new_val;
4736 }
4737
4738 static int
4739 ixgbe_set_pool_rx_mode(struct rte_eth_dev *dev, uint16_t pool,
4740                                uint16_t rx_mask, uint8_t on)
4741 {
4742         int val = 0;
4743
4744         struct ixgbe_hw *hw =
4745                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4746         uint32_t vmolr = IXGBE_READ_REG(hw, IXGBE_VMOLR(pool));
4747
4748         if (hw->mac.type == ixgbe_mac_82598EB) {
4749                 PMD_INIT_LOG(ERR, "setting VF receive mode set should be done"
4750                              " on 82599 hardware and newer");
4751                 return -ENOTSUP;
4752         }
4753         if (ixgbe_vmdq_mode_check(hw) < 0)
4754                 return -ENOTSUP;
4755
4756         val = ixgbe_convert_vm_rx_mask_to_val(rx_mask, val);
4757
4758         if (on)
4759                 vmolr |= val;
4760         else
4761                 vmolr &= ~val;
4762
4763         IXGBE_WRITE_REG(hw, IXGBE_VMOLR(pool), vmolr);
4764
4765         return 0;
4766 }
4767
4768 static int
4769 ixgbe_set_pool_rx(struct rte_eth_dev *dev, uint16_t pool, uint8_t on)
4770 {
4771         uint32_t reg, addr;
4772         uint32_t val;
4773         const uint8_t bit1 = 0x1;
4774
4775         struct ixgbe_hw *hw =
4776                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4777
4778         if (ixgbe_vmdq_mode_check(hw) < 0)
4779                 return -ENOTSUP;
4780
4781         if (pool >= ETH_64_POOLS)
4782                 return -EINVAL;
4783
4784         /* for pool >= 32, set bit in PFVFRE[1], otherwise PFVFRE[0] */
4785         if (pool >= 32) {
4786                 addr = IXGBE_VFRE(1);
4787                 val = bit1 << (pool - 32);
4788         } else {
4789                 addr = IXGBE_VFRE(0);
4790                 val = bit1 << pool;
4791         }
4792
4793         reg = IXGBE_READ_REG(hw, addr);
4794
4795         if (on)
4796                 reg |= val;
4797         else
4798                 reg &= ~val;
4799
4800         IXGBE_WRITE_REG(hw, addr, reg);
4801
4802         return 0;
4803 }
4804
4805 static int
4806 ixgbe_set_pool_tx(struct rte_eth_dev *dev, uint16_t pool, uint8_t on)
4807 {
4808         uint32_t reg, addr;
4809         uint32_t val;
4810         const uint8_t bit1 = 0x1;
4811
4812         struct ixgbe_hw *hw =
4813                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4814
4815         if (ixgbe_vmdq_mode_check(hw) < 0)
4816                 return -ENOTSUP;
4817
4818         if (pool >= ETH_64_POOLS)
4819                 return -EINVAL;
4820
4821         /* for pool >= 32, set bit in PFVFTE[1], otherwise PFVFTE[0] */
4822         if (pool >= 32) {
4823                 addr = IXGBE_VFTE(1);
4824                 val = bit1 << (pool - 32);
4825         } else {
4826                 addr = IXGBE_VFTE(0);
4827                 val = bit1 << pool;
4828         }
4829
4830         reg = IXGBE_READ_REG(hw, addr);
4831
4832         if (on)
4833                 reg |= val;
4834         else
4835                 reg &= ~val;
4836
4837         IXGBE_WRITE_REG(hw, addr, reg);
4838
4839         return 0;
4840 }
4841
4842 static int
4843 ixgbe_set_pool_vlan_filter(struct rte_eth_dev *dev, uint16_t vlan,
4844                         uint64_t pool_mask, uint8_t vlan_on)
4845 {
4846         int ret = 0;
4847         uint16_t pool_idx;
4848         struct ixgbe_hw *hw =
4849                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4850
4851         if (ixgbe_vmdq_mode_check(hw) < 0)
4852                 return -ENOTSUP;
4853         for (pool_idx = 0; pool_idx < ETH_64_POOLS; pool_idx++) {
4854                 if (pool_mask & ((uint64_t)(1ULL << pool_idx))) {
4855                         ret = hw->mac.ops.set_vfta(hw, vlan, pool_idx,
4856                                                    vlan_on, false);
4857                         if (ret < 0)
4858                                 return ret;
4859                 }
4860         }
4861
4862         return ret;
4863 }
4864
4865 int
4866 rte_pmd_ixgbe_set_vf_vlan_anti_spoof(uint8_t port, uint16_t vf, uint8_t on)
4867 {
4868         struct ixgbe_hw *hw;
4869         struct ixgbe_mac_info *mac;
4870         struct rte_eth_dev *dev;
4871         struct rte_eth_dev_info dev_info;
4872
4873         RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
4874
4875         dev = &rte_eth_devices[port];
4876         rte_eth_dev_info_get(port, &dev_info);
4877
4878         if (vf >= dev_info.max_vfs)
4879                 return -EINVAL;
4880
4881         if (on > 1)
4882                 return -EINVAL;
4883
4884         hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4885         mac = &hw->mac;
4886
4887         mac->ops.set_vlan_anti_spoofing(hw, on, vf);
4888
4889         return 0;
4890 }
4891
4892 int
4893 rte_pmd_ixgbe_set_vf_mac_anti_spoof(uint8_t port, uint16_t vf, uint8_t on)
4894 {
4895         struct ixgbe_hw *hw;
4896         struct ixgbe_mac_info *mac;
4897         struct rte_eth_dev *dev;
4898         struct rte_eth_dev_info dev_info;
4899
4900         RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
4901
4902         dev = &rte_eth_devices[port];
4903         rte_eth_dev_info_get(port, &dev_info);
4904
4905         if (vf >= dev_info.max_vfs)
4906                 return -EINVAL;
4907
4908         if (on > 1)
4909                 return -EINVAL;
4910
4911         hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4912         mac = &hw->mac;
4913         mac->ops.set_mac_anti_spoofing(hw, on, vf);
4914
4915         return 0;
4916 }
4917
4918 int
4919 rte_pmd_ixgbe_set_vf_vlan_insert(uint8_t port, uint16_t vf, uint16_t vlan_id)
4920 {
4921         struct ixgbe_hw *hw;
4922         uint32_t ctrl;
4923         struct rte_eth_dev *dev;
4924         struct rte_eth_dev_info dev_info;
4925
4926         RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
4927
4928         dev = &rte_eth_devices[port];
4929         rte_eth_dev_info_get(port, &dev_info);
4930
4931         if (vf >= dev_info.max_vfs)
4932                 return -EINVAL;
4933
4934         if (vlan_id > 4095)
4935                 return -EINVAL;
4936
4937         hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4938         ctrl = IXGBE_READ_REG(hw, IXGBE_VMVIR(vf));
4939         if (vlan_id) {
4940                 ctrl = vlan_id;
4941                 ctrl |= IXGBE_VMVIR_VLANA_DEFAULT;
4942         } else {
4943                 ctrl = 0;
4944         }
4945
4946         IXGBE_WRITE_REG(hw, IXGBE_VMVIR(vf), ctrl);
4947
4948         return 0;
4949 }
4950
4951 int
4952 rte_pmd_ixgbe_set_tx_loopback(uint8_t port, uint8_t on)
4953 {
4954         struct ixgbe_hw *hw;
4955         uint32_t ctrl;
4956         struct rte_eth_dev *dev;
4957
4958         RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
4959
4960         dev = &rte_eth_devices[port];
4961
4962         if (on > 1)
4963                 return -EINVAL;
4964
4965         hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4966         ctrl = IXGBE_READ_REG(hw, IXGBE_PFDTXGSWC);
4967         /* enable or disable VMDQ loopback */
4968         if (on)
4969                 ctrl |= IXGBE_PFDTXGSWC_VT_LBEN;
4970         else
4971                 ctrl &= ~IXGBE_PFDTXGSWC_VT_LBEN;
4972
4973         IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, ctrl);
4974
4975         return 0;
4976 }
4977
4978 int
4979 rte_pmd_ixgbe_set_all_queues_drop_en(uint8_t port, uint8_t on)
4980 {
4981         struct ixgbe_hw *hw;
4982         uint32_t reg_value;
4983         int i;
4984         int num_queues = (int)(IXGBE_QDE_IDX_MASK >> IXGBE_QDE_IDX_SHIFT);
4985         struct rte_eth_dev *dev;
4986
4987         RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
4988
4989         dev = &rte_eth_devices[port];
4990
4991         if (on > 1)
4992                 return -EINVAL;
4993
4994         hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4995         for (i = 0; i <= num_queues; i++) {
4996                 reg_value = IXGBE_QDE_WRITE |
4997                                 (i << IXGBE_QDE_IDX_SHIFT) |
4998                                 (on & IXGBE_QDE_ENABLE);
4999                 IXGBE_WRITE_REG(hw, IXGBE_QDE, reg_value);
5000         }
5001
5002         return 0;
5003 }
5004
5005 int
5006 rte_pmd_ixgbe_set_vf_split_drop_en(uint8_t port, uint16_t vf, uint8_t on)
5007 {
5008         struct ixgbe_hw *hw;
5009         uint32_t reg_value;
5010         struct rte_eth_dev *dev;
5011         struct rte_eth_dev_info dev_info;
5012
5013         RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
5014
5015         dev = &rte_eth_devices[port];
5016         rte_eth_dev_info_get(port, &dev_info);
5017
5018         /* only support VF's 0 to 63 */
5019         if ((vf >= dev_info.max_vfs) || (vf > 63))
5020                 return -EINVAL;
5021
5022         if (on > 1)
5023                 return -EINVAL;
5024
5025         hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5026         reg_value = IXGBE_READ_REG(hw, IXGBE_SRRCTL(vf));
5027         if (on)
5028                 reg_value |= IXGBE_SRRCTL_DROP_EN;
5029         else
5030                 reg_value &= ~IXGBE_SRRCTL_DROP_EN;
5031
5032         IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(vf), reg_value);
5033
5034         return 0;
5035 }
5036
5037 int
5038 rte_pmd_ixgbe_set_vf_vlan_stripq(uint8_t port, uint16_t vf, uint8_t on)
5039 {
5040         struct rte_eth_dev *dev;
5041         struct rte_eth_dev_info dev_info;
5042         uint16_t queues_per_pool;
5043         uint32_t q;
5044
5045         RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
5046
5047         dev = &rte_eth_devices[port];
5048         rte_eth_dev_info_get(port, &dev_info);
5049
5050         if (vf >= dev_info.max_vfs)
5051                 return -EINVAL;
5052
5053         if (on > 1)
5054                 return -EINVAL;
5055
5056         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_strip_queue_set, -ENOTSUP);
5057
5058         /* The PF has 128 queue pairs and in SRIOV configuration
5059          * those queues will be assigned to VF's, so RXDCTL
5060          * registers will be dealing with queues which will be
5061          * assigned to VF's.
5062          * Let's say we have SRIOV configured with 31 VF's then the
5063          * first 124 queues 0-123 will be allocated to VF's and only
5064          * the last 4 queues 123-127 will be assigned to the PF.
5065          */
5066
5067         queues_per_pool = dev_info.vmdq_queue_num / dev_info.max_vmdq_pools;
5068
5069         for (q = 0; q < queues_per_pool; q++)
5070                 (*dev->dev_ops->vlan_strip_queue_set)(dev,
5071                                 q + vf * queues_per_pool, on);
5072         return 0;
5073 }
5074
5075 #define IXGBE_MRCTL_VPME  0x01 /* Virtual Pool Mirroring. */
5076 #define IXGBE_MRCTL_UPME  0x02 /* Uplink Port Mirroring. */
5077 #define IXGBE_MRCTL_DPME  0x04 /* Downlink Port Mirroring. */
5078 #define IXGBE_MRCTL_VLME  0x08 /* VLAN Mirroring. */
5079 #define IXGBE_INVALID_MIRROR_TYPE(mirror_type) \
5080         ((mirror_type) & ~(uint8_t)(ETH_MIRROR_VIRTUAL_POOL_UP | \
5081         ETH_MIRROR_UPLINK_PORT | ETH_MIRROR_DOWNLINK_PORT | ETH_MIRROR_VLAN))
5082
5083 static int
5084 ixgbe_mirror_rule_set(struct rte_eth_dev *dev,
5085                         struct rte_eth_mirror_conf *mirror_conf,
5086                         uint8_t rule_id, uint8_t on)
5087 {
5088         uint32_t mr_ctl, vlvf;
5089         uint32_t mp_lsb = 0;
5090         uint32_t mv_msb = 0;
5091         uint32_t mv_lsb = 0;
5092         uint32_t mp_msb = 0;
5093         uint8_t i = 0;
5094         int reg_index = 0;
5095         uint64_t vlan_mask = 0;
5096
5097         const uint8_t pool_mask_offset = 32;
5098         const uint8_t vlan_mask_offset = 32;
5099         const uint8_t dst_pool_offset = 8;
5100         const uint8_t rule_mr_offset  = 4;
5101         const uint8_t mirror_rule_mask = 0x0F;
5102
5103         struct ixgbe_mirror_info *mr_info =
5104                         (IXGBE_DEV_PRIVATE_TO_PFDATA(dev->data->dev_private));
5105         struct ixgbe_hw *hw =
5106                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5107         uint8_t mirror_type = 0;
5108
5109         if (ixgbe_vmdq_mode_check(hw) < 0)
5110                 return -ENOTSUP;
5111
5112         if (rule_id >= IXGBE_MAX_MIRROR_RULES)
5113                 return -EINVAL;
5114
5115         if (IXGBE_INVALID_MIRROR_TYPE(mirror_conf->rule_type)) {
5116                 PMD_DRV_LOG(ERR, "unsupported mirror type 0x%x.",
5117                         mirror_conf->rule_type);
5118                 return -EINVAL;
5119         }
5120
5121         if (mirror_conf->rule_type & ETH_MIRROR_VLAN) {
5122                 mirror_type |= IXGBE_MRCTL_VLME;
5123                 /* Check if vlan id is valid and find conresponding VLAN ID index in VLVF */
5124                 for (i = 0; i < IXGBE_VLVF_ENTRIES; i++) {
5125                         if (mirror_conf->vlan.vlan_mask & (1ULL << i)) {
5126                                 /* search vlan id related pool vlan filter index */
5127                                 reg_index = ixgbe_find_vlvf_slot(hw,
5128                                                  mirror_conf->vlan.vlan_id[i],
5129                                                  false);
5130                                 if (reg_index < 0)
5131                                         return -EINVAL;
5132                                 vlvf = IXGBE_READ_REG(hw, IXGBE_VLVF(reg_index));
5133                                 if ((vlvf & IXGBE_VLVF_VIEN) &&
5134                                     ((vlvf & IXGBE_VLVF_VLANID_MASK) ==
5135                                       mirror_conf->vlan.vlan_id[i]))
5136                                         vlan_mask |= (1ULL << reg_index);
5137                                 else
5138                                         return -EINVAL;
5139                         }
5140                 }
5141
5142                 if (on) {
5143                         mv_lsb = vlan_mask & 0xFFFFFFFF;
5144                         mv_msb = vlan_mask >> vlan_mask_offset;
5145
5146                         mr_info->mr_conf[rule_id].vlan.vlan_mask =
5147                                                 mirror_conf->vlan.vlan_mask;
5148                         for (i = 0; i < ETH_VMDQ_MAX_VLAN_FILTERS; i++) {
5149                                 if (mirror_conf->vlan.vlan_mask & (1ULL << i))
5150                                         mr_info->mr_conf[rule_id].vlan.vlan_id[i] =
5151                                                 mirror_conf->vlan.vlan_id[i];
5152                         }
5153                 } else {
5154                         mv_lsb = 0;
5155                         mv_msb = 0;
5156                         mr_info->mr_conf[rule_id].vlan.vlan_mask = 0;
5157                         for (i = 0; i < ETH_VMDQ_MAX_VLAN_FILTERS; i++)
5158                                 mr_info->mr_conf[rule_id].vlan.vlan_id[i] = 0;
5159                 }
5160         }
5161
5162         /*
5163          * if enable pool mirror, write related pool mask register,if disable
5164          * pool mirror, clear PFMRVM register
5165          */
5166         if (mirror_conf->rule_type & ETH_MIRROR_VIRTUAL_POOL_UP) {
5167                 mirror_type |= IXGBE_MRCTL_VPME;
5168                 if (on) {
5169                         mp_lsb = mirror_conf->pool_mask & 0xFFFFFFFF;
5170                         mp_msb = mirror_conf->pool_mask >> pool_mask_offset;
5171                         mr_info->mr_conf[rule_id].pool_mask =
5172                                         mirror_conf->pool_mask;
5173
5174                 } else {
5175                         mp_lsb = 0;
5176                         mp_msb = 0;
5177                         mr_info->mr_conf[rule_id].pool_mask = 0;
5178                 }
5179         }
5180         if (mirror_conf->rule_type & ETH_MIRROR_UPLINK_PORT)
5181                 mirror_type |= IXGBE_MRCTL_UPME;
5182         if (mirror_conf->rule_type & ETH_MIRROR_DOWNLINK_PORT)
5183                 mirror_type |= IXGBE_MRCTL_DPME;
5184
5185         /* read  mirror control register and recalculate it */
5186         mr_ctl = IXGBE_READ_REG(hw, IXGBE_MRCTL(rule_id));
5187
5188         if (on) {
5189                 mr_ctl |= mirror_type;
5190                 mr_ctl &= mirror_rule_mask;
5191                 mr_ctl |= mirror_conf->dst_pool << dst_pool_offset;
5192         } else
5193                 mr_ctl &= ~(mirror_conf->rule_type & mirror_rule_mask);
5194
5195         mr_info->mr_conf[rule_id].rule_type = mirror_conf->rule_type;
5196         mr_info->mr_conf[rule_id].dst_pool = mirror_conf->dst_pool;
5197
5198         /* write mirrror control  register */
5199         IXGBE_WRITE_REG(hw, IXGBE_MRCTL(rule_id), mr_ctl);
5200
5201         /* write pool mirrror control  register */
5202         if (mirror_conf->rule_type == ETH_MIRROR_VIRTUAL_POOL_UP) {
5203                 IXGBE_WRITE_REG(hw, IXGBE_VMRVM(rule_id), mp_lsb);
5204                 IXGBE_WRITE_REG(hw, IXGBE_VMRVM(rule_id + rule_mr_offset),
5205                                 mp_msb);
5206         }
5207         /* write VLAN mirrror control  register */
5208         if (mirror_conf->rule_type == ETH_MIRROR_VLAN) {
5209                 IXGBE_WRITE_REG(hw, IXGBE_VMRVLAN(rule_id), mv_lsb);
5210                 IXGBE_WRITE_REG(hw, IXGBE_VMRVLAN(rule_id + rule_mr_offset),
5211                                 mv_msb);
5212         }
5213
5214         return 0;
5215 }
5216
5217 static int
5218 ixgbe_mirror_rule_reset(struct rte_eth_dev *dev, uint8_t rule_id)
5219 {
5220         int mr_ctl = 0;
5221         uint32_t lsb_val = 0;
5222         uint32_t msb_val = 0;
5223         const uint8_t rule_mr_offset = 4;
5224
5225         struct ixgbe_hw *hw =
5226                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5227         struct ixgbe_mirror_info *mr_info =
5228                 (IXGBE_DEV_PRIVATE_TO_PFDATA(dev->data->dev_private));
5229
5230         if (ixgbe_vmdq_mode_check(hw) < 0)
5231                 return -ENOTSUP;
5232
5233         memset(&mr_info->mr_conf[rule_id], 0,
5234                 sizeof(struct rte_eth_mirror_conf));
5235
5236         /* clear PFVMCTL register */
5237         IXGBE_WRITE_REG(hw, IXGBE_MRCTL(rule_id), mr_ctl);
5238
5239         /* clear pool mask register */
5240         IXGBE_WRITE_REG(hw, IXGBE_VMRVM(rule_id), lsb_val);
5241         IXGBE_WRITE_REG(hw, IXGBE_VMRVM(rule_id + rule_mr_offset), msb_val);
5242
5243         /* clear vlan mask register */
5244         IXGBE_WRITE_REG(hw, IXGBE_VMRVLAN(rule_id), lsb_val);
5245         IXGBE_WRITE_REG(hw, IXGBE_VMRVLAN(rule_id + rule_mr_offset), msb_val);
5246
5247         return 0;
5248 }
5249
5250 static int
5251 ixgbevf_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id)
5252 {
5253         struct rte_pci_device *pci_dev = IXGBE_DEV_TO_PCI(dev);
5254         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
5255         uint32_t mask;
5256         struct ixgbe_hw *hw =
5257                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5258
5259         mask = IXGBE_READ_REG(hw, IXGBE_VTEIMS);
5260         mask |= (1 << IXGBE_MISC_VEC_ID);
5261         RTE_SET_USED(queue_id);
5262         IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, mask);
5263
5264         rte_intr_enable(intr_handle);
5265
5266         return 0;
5267 }
5268
5269 static int
5270 ixgbevf_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id)
5271 {
5272         uint32_t mask;
5273         struct ixgbe_hw *hw =
5274                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5275
5276         mask = IXGBE_READ_REG(hw, IXGBE_VTEIMS);
5277         mask &= ~(1 << IXGBE_MISC_VEC_ID);
5278         RTE_SET_USED(queue_id);
5279         IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, mask);
5280
5281         return 0;
5282 }
5283
5284 static int
5285 ixgbe_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id)
5286 {
5287         struct rte_pci_device *pci_dev = IXGBE_DEV_TO_PCI(dev);
5288         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
5289         uint32_t mask;
5290         struct ixgbe_hw *hw =
5291                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5292         struct ixgbe_interrupt *intr =
5293                 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
5294
5295         if (queue_id < 16) {
5296                 ixgbe_disable_intr(hw);
5297                 intr->mask |= (1 << queue_id);
5298                 ixgbe_enable_intr(dev);
5299         } else if (queue_id < 32) {
5300                 mask = IXGBE_READ_REG(hw, IXGBE_EIMS_EX(0));
5301                 mask &= (1 << queue_id);
5302                 IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(0), mask);
5303         } else if (queue_id < 64) {
5304                 mask = IXGBE_READ_REG(hw, IXGBE_EIMS_EX(1));
5305                 mask &= (1 << (queue_id - 32));
5306                 IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(1), mask);
5307         }
5308         rte_intr_enable(intr_handle);
5309
5310         return 0;
5311 }
5312
5313 static int
5314 ixgbe_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id)
5315 {
5316         uint32_t mask;
5317         struct ixgbe_hw *hw =
5318                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5319         struct ixgbe_interrupt *intr =
5320                 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
5321
5322         if (queue_id < 16) {
5323                 ixgbe_disable_intr(hw);
5324                 intr->mask &= ~(1 << queue_id);
5325                 ixgbe_enable_intr(dev);
5326         } else if (queue_id < 32) {
5327                 mask = IXGBE_READ_REG(hw, IXGBE_EIMS_EX(0));
5328                 mask &= ~(1 << queue_id);
5329                 IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(0), mask);
5330         } else if (queue_id < 64) {
5331                 mask = IXGBE_READ_REG(hw, IXGBE_EIMS_EX(1));
5332                 mask &= ~(1 << (queue_id - 32));
5333                 IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(1), mask);
5334         }
5335
5336         return 0;
5337 }
5338
5339 static void
5340 ixgbevf_set_ivar_map(struct ixgbe_hw *hw, int8_t direction,
5341                      uint8_t queue, uint8_t msix_vector)
5342 {
5343         uint32_t tmp, idx;
5344
5345         if (direction == -1) {
5346                 /* other causes */
5347                 msix_vector |= IXGBE_IVAR_ALLOC_VAL;
5348                 tmp = IXGBE_READ_REG(hw, IXGBE_VTIVAR_MISC);
5349                 tmp &= ~0xFF;
5350                 tmp |= msix_vector;
5351                 IXGBE_WRITE_REG(hw, IXGBE_VTIVAR_MISC, tmp);
5352         } else {
5353                 /* rx or tx cause */
5354                 msix_vector |= IXGBE_IVAR_ALLOC_VAL;
5355                 idx = ((16 * (queue & 1)) + (8 * direction));
5356                 tmp = IXGBE_READ_REG(hw, IXGBE_VTIVAR(queue >> 1));
5357                 tmp &= ~(0xFF << idx);
5358                 tmp |= (msix_vector << idx);
5359                 IXGBE_WRITE_REG(hw, IXGBE_VTIVAR(queue >> 1), tmp);
5360         }
5361 }
5362
5363 /**
5364  * set the IVAR registers, mapping interrupt causes to vectors
5365  * @param hw
5366  *  pointer to ixgbe_hw struct
5367  * @direction
5368  *  0 for Rx, 1 for Tx, -1 for other causes
5369  * @queue
5370  *  queue to map the corresponding interrupt to
5371  * @msix_vector
5372  *  the vector to map to the corresponding queue
5373  */
5374 static void
5375 ixgbe_set_ivar_map(struct ixgbe_hw *hw, int8_t direction,
5376                    uint8_t queue, uint8_t msix_vector)
5377 {
5378         uint32_t tmp, idx;
5379
5380         msix_vector |= IXGBE_IVAR_ALLOC_VAL;
5381         if (hw->mac.type == ixgbe_mac_82598EB) {
5382                 if (direction == -1)
5383                         direction = 0;
5384                 idx = (((direction * 64) + queue) >> 2) & 0x1F;
5385                 tmp = IXGBE_READ_REG(hw, IXGBE_IVAR(idx));
5386                 tmp &= ~(0xFF << (8 * (queue & 0x3)));
5387                 tmp |= (msix_vector << (8 * (queue & 0x3)));
5388                 IXGBE_WRITE_REG(hw, IXGBE_IVAR(idx), tmp);
5389         } else if ((hw->mac.type == ixgbe_mac_82599EB) ||
5390                         (hw->mac.type == ixgbe_mac_X540)) {
5391                 if (direction == -1) {
5392                         /* other causes */
5393                         idx = ((queue & 1) * 8);
5394                         tmp = IXGBE_READ_REG(hw, IXGBE_IVAR_MISC);
5395                         tmp &= ~(0xFF << idx);
5396                         tmp |= (msix_vector << idx);
5397                         IXGBE_WRITE_REG(hw, IXGBE_IVAR_MISC, tmp);
5398                 } else {
5399                         /* rx or tx causes */
5400                         idx = ((16 * (queue & 1)) + (8 * direction));
5401                         tmp = IXGBE_READ_REG(hw, IXGBE_IVAR(queue >> 1));
5402                         tmp &= ~(0xFF << idx);
5403                         tmp |= (msix_vector << idx);
5404                         IXGBE_WRITE_REG(hw, IXGBE_IVAR(queue >> 1), tmp);
5405                 }
5406         }
5407 }
5408
5409 static void
5410 ixgbevf_configure_msix(struct rte_eth_dev *dev)
5411 {
5412         struct rte_pci_device *pci_dev = IXGBE_DEV_TO_PCI(dev);
5413         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
5414         struct ixgbe_hw *hw =
5415                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5416         uint32_t q_idx;
5417         uint32_t vector_idx = IXGBE_MISC_VEC_ID;
5418
5419         /* Configure VF other cause ivar */
5420         ixgbevf_set_ivar_map(hw, -1, 1, vector_idx);
5421
5422         /* won't configure msix register if no mapping is done
5423          * between intr vector and event fd.
5424          */
5425         if (!rte_intr_dp_is_en(intr_handle))
5426                 return;
5427
5428         /* Configure all RX queues of VF */
5429         for (q_idx = 0; q_idx < dev->data->nb_rx_queues; q_idx++) {
5430                 /* Force all queue use vector 0,
5431                  * as IXGBE_VF_MAXMSIVECOTR = 1
5432                  */
5433                 ixgbevf_set_ivar_map(hw, 0, q_idx, vector_idx);
5434                 intr_handle->intr_vec[q_idx] = vector_idx;
5435         }
5436 }
5437
5438 /**
5439  * Sets up the hardware to properly generate MSI-X interrupts
5440  * @hw
5441  *  board private structure
5442  */
5443 static void
5444 ixgbe_configure_msix(struct rte_eth_dev *dev)
5445 {
5446         struct rte_pci_device *pci_dev = IXGBE_DEV_TO_PCI(dev);
5447         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
5448         struct ixgbe_hw *hw =
5449                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5450         uint32_t queue_id, base = IXGBE_MISC_VEC_ID;
5451         uint32_t vec = IXGBE_MISC_VEC_ID;
5452         uint32_t mask;
5453         uint32_t gpie;
5454
5455         /* won't configure msix register if no mapping is done
5456          * between intr vector and event fd
5457          */
5458         if (!rte_intr_dp_is_en(intr_handle))
5459                 return;
5460
5461         if (rte_intr_allow_others(intr_handle))
5462                 vec = base = IXGBE_RX_VEC_START;
5463
5464         /* setup GPIE for MSI-x mode */
5465         gpie = IXGBE_READ_REG(hw, IXGBE_GPIE);
5466         gpie |= IXGBE_GPIE_MSIX_MODE | IXGBE_GPIE_PBA_SUPPORT |
5467                 IXGBE_GPIE_OCD | IXGBE_GPIE_EIAME;
5468         /* auto clearing and auto setting corresponding bits in EIMS
5469          * when MSI-X interrupt is triggered
5470          */
5471         if (hw->mac.type == ixgbe_mac_82598EB) {
5472                 IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
5473         } else {
5474                 IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(0), 0xFFFFFFFF);
5475                 IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(1), 0xFFFFFFFF);
5476         }
5477         IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
5478
5479         /* Populate the IVAR table and set the ITR values to the
5480          * corresponding register.
5481          */
5482         for (queue_id = 0; queue_id < dev->data->nb_rx_queues;
5483              queue_id++) {
5484                 /* by default, 1:1 mapping */
5485                 ixgbe_set_ivar_map(hw, 0, queue_id, vec);
5486                 intr_handle->intr_vec[queue_id] = vec;
5487                 if (vec < base + intr_handle->nb_efd - 1)
5488                         vec++;
5489         }
5490
5491         switch (hw->mac.type) {
5492         case ixgbe_mac_82598EB:
5493                 ixgbe_set_ivar_map(hw, -1, IXGBE_IVAR_OTHER_CAUSES_INDEX,
5494                                    IXGBE_MISC_VEC_ID);
5495                 break;
5496         case ixgbe_mac_82599EB:
5497         case ixgbe_mac_X540:
5498                 ixgbe_set_ivar_map(hw, -1, 1, IXGBE_MISC_VEC_ID);
5499                 break;
5500         default:
5501                 break;
5502         }
5503         IXGBE_WRITE_REG(hw, IXGBE_EITR(IXGBE_MISC_VEC_ID),
5504                         IXGBE_MIN_INTER_INTERRUPT_INTERVAL_DEFAULT & 0xFFF);
5505
5506         /* set up to autoclear timer, and the vectors */
5507         mask = IXGBE_EIMS_ENABLE_MASK;
5508         mask &= ~(IXGBE_EIMS_OTHER |
5509                   IXGBE_EIMS_MAILBOX |
5510                   IXGBE_EIMS_LSC);
5511
5512         IXGBE_WRITE_REG(hw, IXGBE_EIAC, mask);
5513 }
5514
5515 static int ixgbe_set_queue_rate_limit(struct rte_eth_dev *dev,
5516         uint16_t queue_idx, uint16_t tx_rate)
5517 {
5518         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5519         uint32_t rf_dec, rf_int;
5520         uint32_t bcnrc_val;
5521         uint16_t link_speed = dev->data->dev_link.link_speed;
5522
5523         if (queue_idx >= hw->mac.max_tx_queues)
5524                 return -EINVAL;
5525
5526         if (tx_rate != 0) {
5527                 /* Calculate the rate factor values to set */
5528                 rf_int = (uint32_t)link_speed / (uint32_t)tx_rate;
5529                 rf_dec = (uint32_t)link_speed % (uint32_t)tx_rate;
5530                 rf_dec = (rf_dec << IXGBE_RTTBCNRC_RF_INT_SHIFT) / tx_rate;
5531
5532                 bcnrc_val = IXGBE_RTTBCNRC_RS_ENA;
5533                 bcnrc_val |= ((rf_int << IXGBE_RTTBCNRC_RF_INT_SHIFT) &
5534                                 IXGBE_RTTBCNRC_RF_INT_MASK_M);
5535                 bcnrc_val |= (rf_dec & IXGBE_RTTBCNRC_RF_DEC_MASK);
5536         } else {
5537                 bcnrc_val = 0;
5538         }
5539
5540         /*
5541          * Set global transmit compensation time to the MMW_SIZE in RTTBCNRM
5542          * register. MMW_SIZE=0x014 if 9728-byte jumbo is supported, otherwise
5543          * set as 0x4.
5544          */
5545         if ((dev->data->dev_conf.rxmode.jumbo_frame == 1) &&
5546                 (dev->data->dev_conf.rxmode.max_rx_pkt_len >=
5547                                 IXGBE_MAX_JUMBO_FRAME_SIZE))
5548                 IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRM,
5549                         IXGBE_MMW_SIZE_JUMBO_FRAME);
5550         else
5551                 IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRM,
5552                         IXGBE_MMW_SIZE_DEFAULT);
5553
5554         /* Set RTTBCNRC of queue X */
5555         IXGBE_WRITE_REG(hw, IXGBE_RTTDQSEL, queue_idx);
5556         IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRC, bcnrc_val);
5557         IXGBE_WRITE_FLUSH(hw);
5558
5559         return 0;
5560 }
5561
5562 static int ixgbe_set_vf_rate_limit(struct rte_eth_dev *dev, uint16_t vf,
5563         uint16_t tx_rate, uint64_t q_msk)
5564 {
5565         struct rte_pci_device *pci_dev = IXGBE_DEV_TO_PCI(dev);
5566         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5567         struct ixgbe_vf_info *vfinfo =
5568                 *(IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private));
5569         uint8_t  nb_q_per_pool = RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool;
5570         uint32_t queue_stride =
5571                 IXGBE_MAX_RX_QUEUE_NUM / RTE_ETH_DEV_SRIOV(dev).active;
5572         uint32_t queue_idx = vf * queue_stride, idx = 0, vf_idx;
5573         uint32_t queue_end = queue_idx + nb_q_per_pool - 1;
5574         uint16_t total_rate = 0;
5575
5576         if (queue_end >= hw->mac.max_tx_queues)
5577                 return -EINVAL;
5578
5579         if (vfinfo != NULL) {
5580                 for (vf_idx = 0; vf_idx < pci_dev->max_vfs; vf_idx++) {
5581                         if (vf_idx == vf)
5582                                 continue;
5583                         for (idx = 0; idx < RTE_DIM(vfinfo[vf_idx].tx_rate);
5584                                 idx++)
5585                                 total_rate += vfinfo[vf_idx].tx_rate[idx];
5586                 }
5587         } else
5588                 return -EINVAL;
5589
5590         /* Store tx_rate for this vf. */
5591         for (idx = 0; idx < nb_q_per_pool; idx++) {
5592                 if (((uint64_t)0x1 << idx) & q_msk) {
5593                         if (vfinfo[vf].tx_rate[idx] != tx_rate)
5594                                 vfinfo[vf].tx_rate[idx] = tx_rate;
5595                         total_rate += tx_rate;
5596                 }
5597         }
5598
5599         if (total_rate > dev->data->dev_link.link_speed) {
5600                 /*
5601                  * Reset stored TX rate of the VF if it causes exceed
5602                  * link speed.
5603                  */
5604                 memset(vfinfo[vf].tx_rate, 0, sizeof(vfinfo[vf].tx_rate));
5605                 return -EINVAL;
5606         }
5607
5608         /* Set RTTBCNRC of each queue/pool for vf X  */
5609         for (; queue_idx <= queue_end; queue_idx++) {
5610                 if (0x1 & q_msk)
5611                         ixgbe_set_queue_rate_limit(dev, queue_idx, tx_rate);
5612                 q_msk = q_msk >> 1;
5613         }
5614
5615         return 0;
5616 }
5617
5618 static void
5619 ixgbevf_add_mac_addr(struct rte_eth_dev *dev, struct ether_addr *mac_addr,
5620                      __attribute__((unused)) uint32_t index,
5621                      __attribute__((unused)) uint32_t pool)
5622 {
5623         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5624         int diag;
5625
5626         /*
5627          * On a 82599 VF, adding again the same MAC addr is not an idempotent
5628          * operation. Trap this case to avoid exhausting the [very limited]
5629          * set of PF resources used to store VF MAC addresses.
5630          */
5631         if (memcmp(hw->mac.perm_addr, mac_addr, sizeof(struct ether_addr)) == 0)
5632                 return;
5633         diag = ixgbevf_set_uc_addr_vf(hw, 2, mac_addr->addr_bytes);
5634         if (diag == 0)
5635                 return;
5636         PMD_DRV_LOG(ERR, "Unable to add MAC address - diag=%d", diag);
5637 }
5638
5639 static void
5640 ixgbevf_remove_mac_addr(struct rte_eth_dev *dev, uint32_t index)
5641 {
5642         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5643         struct ether_addr *perm_addr = (struct ether_addr *) hw->mac.perm_addr;
5644         struct ether_addr *mac_addr;
5645         uint32_t i;
5646         int diag;
5647
5648         /*
5649          * The IXGBE_VF_SET_MACVLAN command of the ixgbe-pf driver does
5650          * not support the deletion of a given MAC address.
5651          * Instead, it imposes to delete all MAC addresses, then to add again
5652          * all MAC addresses with the exception of the one to be deleted.
5653          */
5654         (void) ixgbevf_set_uc_addr_vf(hw, 0, NULL);
5655
5656         /*
5657          * Add again all MAC addresses, with the exception of the deleted one
5658          * and of the permanent MAC address.
5659          */
5660         for (i = 0, mac_addr = dev->data->mac_addrs;
5661              i < hw->mac.num_rar_entries; i++, mac_addr++) {
5662                 /* Skip the deleted MAC address */
5663                 if (i == index)
5664                         continue;
5665                 /* Skip NULL MAC addresses */
5666                 if (is_zero_ether_addr(mac_addr))
5667                         continue;
5668                 /* Skip the permanent MAC address */
5669                 if (memcmp(perm_addr, mac_addr, sizeof(struct ether_addr)) == 0)
5670                         continue;
5671                 diag = ixgbevf_set_uc_addr_vf(hw, 2, mac_addr->addr_bytes);
5672                 if (diag != 0)
5673                         PMD_DRV_LOG(ERR,
5674                                     "Adding again MAC address "
5675                                     "%02x:%02x:%02x:%02x:%02x:%02x failed "
5676                                     "diag=%d",
5677                                     mac_addr->addr_bytes[0],
5678                                     mac_addr->addr_bytes[1],
5679                                     mac_addr->addr_bytes[2],
5680                                     mac_addr->addr_bytes[3],
5681                                     mac_addr->addr_bytes[4],
5682                                     mac_addr->addr_bytes[5],
5683                                     diag);
5684         }
5685 }
5686
5687 static void
5688 ixgbevf_set_default_mac_addr(struct rte_eth_dev *dev, struct ether_addr *addr)
5689 {
5690         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5691
5692         hw->mac.ops.set_rar(hw, 0, (void *)addr, 0, 0);
5693 }
5694
5695 #define MAC_TYPE_FILTER_SUP(type)    do {\
5696         if ((type) != ixgbe_mac_82599EB && (type) != ixgbe_mac_X540 &&\
5697                 (type) != ixgbe_mac_X550 && (type) != ixgbe_mac_X550EM_x &&\
5698                 (type) != ixgbe_mac_X550EM_a)\
5699                 return -ENOTSUP;\
5700 } while (0)
5701
5702 static int
5703 ixgbe_syn_filter_set(struct rte_eth_dev *dev,
5704                         struct rte_eth_syn_filter *filter,
5705                         bool add)
5706 {
5707         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5708         uint32_t synqf;
5709
5710         if (filter->queue >= IXGBE_MAX_RX_QUEUE_NUM)
5711                 return -EINVAL;
5712
5713         synqf = IXGBE_READ_REG(hw, IXGBE_SYNQF);
5714
5715         if (add) {
5716                 if (synqf & IXGBE_SYN_FILTER_ENABLE)
5717                         return -EINVAL;
5718                 synqf = (uint32_t)(((filter->queue << IXGBE_SYN_FILTER_QUEUE_SHIFT) &
5719                         IXGBE_SYN_FILTER_QUEUE) | IXGBE_SYN_FILTER_ENABLE);
5720
5721                 if (filter->hig_pri)
5722                         synqf |= IXGBE_SYN_FILTER_SYNQFP;
5723                 else
5724                         synqf &= ~IXGBE_SYN_FILTER_SYNQFP;
5725         } else {
5726                 if (!(synqf & IXGBE_SYN_FILTER_ENABLE))
5727                         return -ENOENT;
5728                 synqf &= ~(IXGBE_SYN_FILTER_QUEUE | IXGBE_SYN_FILTER_ENABLE);
5729         }
5730         IXGBE_WRITE_REG(hw, IXGBE_SYNQF, synqf);
5731         IXGBE_WRITE_FLUSH(hw);
5732         return 0;
5733 }
5734
5735 static int
5736 ixgbe_syn_filter_get(struct rte_eth_dev *dev,
5737                         struct rte_eth_syn_filter *filter)
5738 {
5739         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5740         uint32_t synqf = IXGBE_READ_REG(hw, IXGBE_SYNQF);
5741
5742         if (synqf & IXGBE_SYN_FILTER_ENABLE) {
5743                 filter->hig_pri = (synqf & IXGBE_SYN_FILTER_SYNQFP) ? 1 : 0;
5744                 filter->queue = (uint16_t)((synqf & IXGBE_SYN_FILTER_QUEUE) >> 1);
5745                 return 0;
5746         }
5747         return -ENOENT;
5748 }
5749
5750 static int
5751 ixgbe_syn_filter_handle(struct rte_eth_dev *dev,
5752                         enum rte_filter_op filter_op,
5753                         void *arg)
5754 {
5755         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5756         int ret;
5757
5758         MAC_TYPE_FILTER_SUP(hw->mac.type);
5759
5760         if (filter_op == RTE_ETH_FILTER_NOP)
5761                 return 0;
5762
5763         if (arg == NULL) {
5764                 PMD_DRV_LOG(ERR, "arg shouldn't be NULL for operation %u",
5765                             filter_op);
5766                 return -EINVAL;
5767         }
5768
5769         switch (filter_op) {
5770         case RTE_ETH_FILTER_ADD:
5771                 ret = ixgbe_syn_filter_set(dev,
5772                                 (struct rte_eth_syn_filter *)arg,
5773                                 TRUE);
5774                 break;
5775         case RTE_ETH_FILTER_DELETE:
5776                 ret = ixgbe_syn_filter_set(dev,
5777                                 (struct rte_eth_syn_filter *)arg,
5778                                 FALSE);
5779                 break;
5780         case RTE_ETH_FILTER_GET:
5781                 ret = ixgbe_syn_filter_get(dev,
5782                                 (struct rte_eth_syn_filter *)arg);
5783                 break;
5784         default:
5785                 PMD_DRV_LOG(ERR, "unsupported operation %u\n", filter_op);
5786                 ret = -EINVAL;
5787                 break;
5788         }
5789
5790         return ret;
5791 }
5792
5793
5794 static inline enum ixgbe_5tuple_protocol
5795 convert_protocol_type(uint8_t protocol_value)
5796 {
5797         if (protocol_value == IPPROTO_TCP)
5798                 return IXGBE_FILTER_PROTOCOL_TCP;
5799         else if (protocol_value == IPPROTO_UDP)
5800                 return IXGBE_FILTER_PROTOCOL_UDP;
5801         else if (protocol_value == IPPROTO_SCTP)
5802                 return IXGBE_FILTER_PROTOCOL_SCTP;
5803         else
5804                 return IXGBE_FILTER_PROTOCOL_NONE;
5805 }
5806
5807 /*
5808  * add a 5tuple filter
5809  *
5810  * @param
5811  * dev: Pointer to struct rte_eth_dev.
5812  * index: the index the filter allocates.
5813  * filter: ponter to the filter that will be added.
5814  * rx_queue: the queue id the filter assigned to.
5815  *
5816  * @return
5817  *    - On success, zero.
5818  *    - On failure, a negative value.
5819  */
5820 static int
5821 ixgbe_add_5tuple_filter(struct rte_eth_dev *dev,
5822                         struct ixgbe_5tuple_filter *filter)
5823 {
5824         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5825         struct ixgbe_filter_info *filter_info =
5826                 IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
5827         int i, idx, shift;
5828         uint32_t ftqf, sdpqf;
5829         uint32_t l34timir = 0;
5830         uint8_t mask = 0xff;
5831
5832         /*
5833          * look for an unused 5tuple filter index,
5834          * and insert the filter to list.
5835          */
5836         for (i = 0; i < IXGBE_MAX_FTQF_FILTERS; i++) {
5837                 idx = i / (sizeof(uint32_t) * NBBY);
5838                 shift = i % (sizeof(uint32_t) * NBBY);
5839                 if (!(filter_info->fivetuple_mask[idx] & (1 << shift))) {
5840                         filter_info->fivetuple_mask[idx] |= 1 << shift;
5841                         filter->index = i;
5842                         TAILQ_INSERT_TAIL(&filter_info->fivetuple_list,
5843                                           filter,
5844                                           entries);
5845                         break;
5846                 }
5847         }
5848         if (i >= IXGBE_MAX_FTQF_FILTERS) {
5849                 PMD_DRV_LOG(ERR, "5tuple filters are full.");
5850                 return -ENOSYS;
5851         }
5852
5853         sdpqf = (uint32_t)(filter->filter_info.dst_port <<
5854                                 IXGBE_SDPQF_DSTPORT_SHIFT);
5855         sdpqf = sdpqf | (filter->filter_info.src_port & IXGBE_SDPQF_SRCPORT);
5856
5857         ftqf = (uint32_t)(filter->filter_info.proto &
5858                 IXGBE_FTQF_PROTOCOL_MASK);
5859         ftqf |= (uint32_t)((filter->filter_info.priority &
5860                 IXGBE_FTQF_PRIORITY_MASK) << IXGBE_FTQF_PRIORITY_SHIFT);
5861         if (filter->filter_info.src_ip_mask == 0) /* 0 means compare. */
5862                 mask &= IXGBE_FTQF_SOURCE_ADDR_MASK;
5863         if (filter->filter_info.dst_ip_mask == 0)
5864                 mask &= IXGBE_FTQF_DEST_ADDR_MASK;
5865         if (filter->filter_info.src_port_mask == 0)
5866                 mask &= IXGBE_FTQF_SOURCE_PORT_MASK;
5867         if (filter->filter_info.dst_port_mask == 0)
5868                 mask &= IXGBE_FTQF_DEST_PORT_MASK;
5869         if (filter->filter_info.proto_mask == 0)
5870                 mask &= IXGBE_FTQF_PROTOCOL_COMP_MASK;
5871         ftqf |= mask << IXGBE_FTQF_5TUPLE_MASK_SHIFT;
5872         ftqf |= IXGBE_FTQF_POOL_MASK_EN;
5873         ftqf |= IXGBE_FTQF_QUEUE_ENABLE;
5874
5875         IXGBE_WRITE_REG(hw, IXGBE_DAQF(i), filter->filter_info.dst_ip);
5876         IXGBE_WRITE_REG(hw, IXGBE_SAQF(i), filter->filter_info.src_ip);
5877         IXGBE_WRITE_REG(hw, IXGBE_SDPQF(i), sdpqf);
5878         IXGBE_WRITE_REG(hw, IXGBE_FTQF(i), ftqf);
5879
5880         l34timir |= IXGBE_L34T_IMIR_RESERVE;
5881         l34timir |= (uint32_t)(filter->queue <<
5882                                 IXGBE_L34T_IMIR_QUEUE_SHIFT);
5883         IXGBE_WRITE_REG(hw, IXGBE_L34T_IMIR(i), l34timir);
5884         return 0;
5885 }
5886
5887 /*
5888  * remove a 5tuple filter
5889  *
5890  * @param
5891  * dev: Pointer to struct rte_eth_dev.
5892  * filter: the pointer of the filter will be removed.
5893  */
5894 static void
5895 ixgbe_remove_5tuple_filter(struct rte_eth_dev *dev,
5896                         struct ixgbe_5tuple_filter *filter)
5897 {
5898         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5899         struct ixgbe_filter_info *filter_info =
5900                 IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
5901         uint16_t index = filter->index;
5902
5903         filter_info->fivetuple_mask[index / (sizeof(uint32_t) * NBBY)] &=
5904                                 ~(1 << (index % (sizeof(uint32_t) * NBBY)));
5905         TAILQ_REMOVE(&filter_info->fivetuple_list, filter, entries);
5906         rte_free(filter);
5907
5908         IXGBE_WRITE_REG(hw, IXGBE_DAQF(index), 0);
5909         IXGBE_WRITE_REG(hw, IXGBE_SAQF(index), 0);
5910         IXGBE_WRITE_REG(hw, IXGBE_SDPQF(index), 0);
5911         IXGBE_WRITE_REG(hw, IXGBE_FTQF(index), 0);
5912         IXGBE_WRITE_REG(hw, IXGBE_L34T_IMIR(index), 0);
5913 }
5914
5915 static int
5916 ixgbevf_dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu)
5917 {
5918         struct ixgbe_hw *hw;
5919         uint32_t max_frame = mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
5920
5921         hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5922
5923         if ((mtu < ETHER_MIN_MTU) || (max_frame > ETHER_MAX_JUMBO_FRAME_LEN))
5924                 return -EINVAL;
5925
5926         /* refuse mtu that requires the support of scattered packets when this
5927          * feature has not been enabled before.
5928          */
5929         if (!dev->data->scattered_rx &&
5930             (max_frame + 2 * IXGBE_VLAN_TAG_SIZE >
5931              dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM))
5932                 return -EINVAL;
5933
5934         /*
5935          * When supported by the underlying PF driver, use the IXGBE_VF_SET_MTU
5936          * request of the version 2.0 of the mailbox API.
5937          * For now, use the IXGBE_VF_SET_LPE request of the version 1.0
5938          * of the mailbox API.
5939          * This call to IXGBE_SET_LPE action won't work with ixgbe pf drivers
5940          * prior to 3.11.33 which contains the following change:
5941          * "ixgbe: Enable jumbo frames support w/ SR-IOV"
5942          */
5943         ixgbevf_rlpml_set_vf(hw, max_frame);
5944
5945         /* update max frame size */
5946         dev->data->dev_conf.rxmode.max_rx_pkt_len = max_frame;
5947         return 0;
5948 }
5949
5950 #define MAC_TYPE_FILTER_SUP_EXT(type)    do {\
5951         if ((type) != ixgbe_mac_82599EB && (type) != ixgbe_mac_X540)\
5952                 return -ENOTSUP;\
5953 } while (0)
5954
5955 static inline struct ixgbe_5tuple_filter *
5956 ixgbe_5tuple_filter_lookup(struct ixgbe_5tuple_filter_list *filter_list,
5957                         struct ixgbe_5tuple_filter_info *key)
5958 {
5959         struct ixgbe_5tuple_filter *it;
5960
5961         TAILQ_FOREACH(it, filter_list, entries) {
5962                 if (memcmp(key, &it->filter_info,
5963                         sizeof(struct ixgbe_5tuple_filter_info)) == 0) {
5964                         return it;
5965                 }
5966         }
5967         return NULL;
5968 }
5969
5970 /* translate elements in struct rte_eth_ntuple_filter to struct ixgbe_5tuple_filter_info*/
5971 static inline int
5972 ntuple_filter_to_5tuple(struct rte_eth_ntuple_filter *filter,
5973                         struct ixgbe_5tuple_filter_info *filter_info)
5974 {
5975         if (filter->queue >= IXGBE_MAX_RX_QUEUE_NUM ||
5976                 filter->priority > IXGBE_5TUPLE_MAX_PRI ||
5977                 filter->priority < IXGBE_5TUPLE_MIN_PRI)
5978                 return -EINVAL;
5979
5980         switch (filter->dst_ip_mask) {
5981         case UINT32_MAX:
5982                 filter_info->dst_ip_mask = 0;
5983                 filter_info->dst_ip = filter->dst_ip;
5984                 break;
5985         case 0:
5986                 filter_info->dst_ip_mask = 1;
5987                 break;
5988         default:
5989                 PMD_DRV_LOG(ERR, "invalid dst_ip mask.");
5990                 return -EINVAL;
5991         }
5992
5993         switch (filter->src_ip_mask) {
5994         case UINT32_MAX:
5995                 filter_info->src_ip_mask = 0;
5996                 filter_info->src_ip = filter->src_ip;
5997                 break;
5998         case 0:
5999                 filter_info->src_ip_mask = 1;
6000                 break;
6001         default:
6002                 PMD_DRV_LOG(ERR, "invalid src_ip mask.");
6003                 return -EINVAL;
6004         }
6005
6006         switch (filter->dst_port_mask) {
6007         case UINT16_MAX:
6008                 filter_info->dst_port_mask = 0;
6009                 filter_info->dst_port = filter->dst_port;
6010                 break;
6011         case 0:
6012                 filter_info->dst_port_mask = 1;
6013                 break;
6014         default:
6015                 PMD_DRV_LOG(ERR, "invalid dst_port mask.");
6016                 return -EINVAL;
6017         }
6018
6019         switch (filter->src_port_mask) {
6020         case UINT16_MAX:
6021                 filter_info->src_port_mask = 0;
6022                 filter_info->src_port = filter->src_port;
6023                 break;
6024         case 0:
6025                 filter_info->src_port_mask = 1;
6026                 break;
6027         default:
6028                 PMD_DRV_LOG(ERR, "invalid src_port mask.");
6029                 return -EINVAL;
6030         }
6031
6032         switch (filter->proto_mask) {
6033         case UINT8_MAX:
6034                 filter_info->proto_mask = 0;
6035                 filter_info->proto =
6036                         convert_protocol_type(filter->proto);
6037                 break;
6038         case 0:
6039                 filter_info->proto_mask = 1;
6040                 break;
6041         default:
6042                 PMD_DRV_LOG(ERR, "invalid protocol mask.");
6043                 return -EINVAL;
6044         }
6045
6046         filter_info->priority = (uint8_t)filter->priority;
6047         return 0;
6048 }
6049
6050 /*
6051  * add or delete a ntuple filter
6052  *
6053  * @param
6054  * dev: Pointer to struct rte_eth_dev.
6055  * ntuple_filter: Pointer to struct rte_eth_ntuple_filter
6056  * add: if true, add filter, if false, remove filter
6057  *
6058  * @return
6059  *    - On success, zero.
6060  *    - On failure, a negative value.
6061  */
6062 static int
6063 ixgbe_add_del_ntuple_filter(struct rte_eth_dev *dev,
6064                         struct rte_eth_ntuple_filter *ntuple_filter,
6065                         bool add)
6066 {
6067         struct ixgbe_filter_info *filter_info =
6068                 IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
6069         struct ixgbe_5tuple_filter_info filter_5tuple;
6070         struct ixgbe_5tuple_filter *filter;
6071         int ret;
6072
6073         if (ntuple_filter->flags != RTE_5TUPLE_FLAGS) {
6074                 PMD_DRV_LOG(ERR, "only 5tuple is supported.");
6075                 return -EINVAL;
6076         }
6077
6078         memset(&filter_5tuple, 0, sizeof(struct ixgbe_5tuple_filter_info));
6079         ret = ntuple_filter_to_5tuple(ntuple_filter, &filter_5tuple);
6080         if (ret < 0)
6081                 return ret;
6082
6083         filter = ixgbe_5tuple_filter_lookup(&filter_info->fivetuple_list,
6084                                          &filter_5tuple);
6085         if (filter != NULL && add) {
6086                 PMD_DRV_LOG(ERR, "filter exists.");
6087                 return -EEXIST;
6088         }
6089         if (filter == NULL && !add) {
6090                 PMD_DRV_LOG(ERR, "filter doesn't exist.");
6091                 return -ENOENT;
6092         }
6093
6094         if (add) {
6095                 filter = rte_zmalloc("ixgbe_5tuple_filter",
6096                                 sizeof(struct ixgbe_5tuple_filter), 0);
6097                 if (filter == NULL)
6098                         return -ENOMEM;
6099                 (void)rte_memcpy(&filter->filter_info,
6100                                  &filter_5tuple,
6101                                  sizeof(struct ixgbe_5tuple_filter_info));
6102                 filter->queue = ntuple_filter->queue;
6103                 ret = ixgbe_add_5tuple_filter(dev, filter);
6104                 if (ret < 0) {
6105                         rte_free(filter);
6106                         return ret;
6107                 }
6108         } else
6109                 ixgbe_remove_5tuple_filter(dev, filter);
6110
6111         return 0;
6112 }
6113
6114 /*
6115  * get a ntuple filter
6116  *
6117  * @param
6118  * dev: Pointer to struct rte_eth_dev.
6119  * ntuple_filter: Pointer to struct rte_eth_ntuple_filter
6120  *
6121  * @return
6122  *    - On success, zero.
6123  *    - On failure, a negative value.
6124  */
6125 static int
6126 ixgbe_get_ntuple_filter(struct rte_eth_dev *dev,
6127                         struct rte_eth_ntuple_filter *ntuple_filter)
6128 {
6129         struct ixgbe_filter_info *filter_info =
6130                 IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
6131         struct ixgbe_5tuple_filter_info filter_5tuple;
6132         struct ixgbe_5tuple_filter *filter;
6133         int ret;
6134
6135         if (ntuple_filter->flags != RTE_5TUPLE_FLAGS) {
6136                 PMD_DRV_LOG(ERR, "only 5tuple is supported.");
6137                 return -EINVAL;
6138         }
6139
6140         memset(&filter_5tuple, 0, sizeof(struct ixgbe_5tuple_filter_info));
6141         ret = ntuple_filter_to_5tuple(ntuple_filter, &filter_5tuple);
6142         if (ret < 0)
6143                 return ret;
6144
6145         filter = ixgbe_5tuple_filter_lookup(&filter_info->fivetuple_list,
6146                                          &filter_5tuple);
6147         if (filter == NULL) {
6148                 PMD_DRV_LOG(ERR, "filter doesn't exist.");
6149                 return -ENOENT;
6150         }
6151         ntuple_filter->queue = filter->queue;
6152         return 0;
6153 }
6154
6155 /*
6156  * ixgbe_ntuple_filter_handle - Handle operations for ntuple filter.
6157  * @dev: pointer to rte_eth_dev structure
6158  * @filter_op:operation will be taken.
6159  * @arg: a pointer to specific structure corresponding to the filter_op
6160  *
6161  * @return
6162  *    - On success, zero.
6163  *    - On failure, a negative value.
6164  */
6165 static int
6166 ixgbe_ntuple_filter_handle(struct rte_eth_dev *dev,
6167                                 enum rte_filter_op filter_op,
6168                                 void *arg)
6169 {
6170         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
6171         int ret;
6172
6173         MAC_TYPE_FILTER_SUP_EXT(hw->mac.type);
6174
6175         if (filter_op == RTE_ETH_FILTER_NOP)
6176                 return 0;
6177
6178         if (arg == NULL) {
6179                 PMD_DRV_LOG(ERR, "arg shouldn't be NULL for operation %u.",
6180                             filter_op);
6181                 return -EINVAL;
6182         }
6183
6184         switch (filter_op) {
6185         case RTE_ETH_FILTER_ADD:
6186                 ret = ixgbe_add_del_ntuple_filter(dev,
6187                         (struct rte_eth_ntuple_filter *)arg,
6188                         TRUE);
6189                 break;
6190         case RTE_ETH_FILTER_DELETE:
6191                 ret = ixgbe_add_del_ntuple_filter(dev,
6192                         (struct rte_eth_ntuple_filter *)arg,
6193                         FALSE);
6194                 break;
6195         case RTE_ETH_FILTER_GET:
6196                 ret = ixgbe_get_ntuple_filter(dev,
6197                         (struct rte_eth_ntuple_filter *)arg);
6198                 break;
6199         default:
6200                 PMD_DRV_LOG(ERR, "unsupported operation %u.", filter_op);
6201                 ret = -EINVAL;
6202                 break;
6203         }
6204         return ret;
6205 }
6206
6207 static inline int
6208 ixgbe_ethertype_filter_lookup(struct ixgbe_filter_info *filter_info,
6209                         uint16_t ethertype)
6210 {
6211         int i;
6212
6213         for (i = 0; i < IXGBE_MAX_ETQF_FILTERS; i++) {
6214                 if (filter_info->ethertype_filters[i] == ethertype &&
6215                     (filter_info->ethertype_mask & (1 << i)))
6216                         return i;
6217         }
6218         return -1;
6219 }
6220
6221 static inline int
6222 ixgbe_ethertype_filter_insert(struct ixgbe_filter_info *filter_info,
6223                         uint16_t ethertype)
6224 {
6225         int i;
6226
6227         for (i = 0; i < IXGBE_MAX_ETQF_FILTERS; i++) {
6228                 if (!(filter_info->ethertype_mask & (1 << i))) {
6229                         filter_info->ethertype_mask |= 1 << i;
6230                         filter_info->ethertype_filters[i] = ethertype;
6231                         return i;
6232                 }
6233         }
6234         return -1;
6235 }
6236
6237 static inline int
6238 ixgbe_ethertype_filter_remove(struct ixgbe_filter_info *filter_info,
6239                         uint8_t idx)
6240 {
6241         if (idx >= IXGBE_MAX_ETQF_FILTERS)
6242                 return -1;
6243         filter_info->ethertype_mask &= ~(1 << idx);
6244         filter_info->ethertype_filters[idx] = 0;
6245         return idx;
6246 }
6247
6248 static int
6249 ixgbe_add_del_ethertype_filter(struct rte_eth_dev *dev,
6250                         struct rte_eth_ethertype_filter *filter,
6251                         bool add)
6252 {
6253         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
6254         struct ixgbe_filter_info *filter_info =
6255                 IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
6256         uint32_t etqf = 0;
6257         uint32_t etqs = 0;
6258         int ret;
6259
6260         if (filter->queue >= IXGBE_MAX_RX_QUEUE_NUM)
6261                 return -EINVAL;
6262
6263         if (filter->ether_type == ETHER_TYPE_IPv4 ||
6264                 filter->ether_type == ETHER_TYPE_IPv6) {
6265                 PMD_DRV_LOG(ERR, "unsupported ether_type(0x%04x) in"
6266                         " ethertype filter.", filter->ether_type);
6267                 return -EINVAL;
6268         }
6269
6270         if (filter->flags & RTE_ETHTYPE_FLAGS_MAC) {
6271                 PMD_DRV_LOG(ERR, "mac compare is unsupported.");
6272                 return -EINVAL;
6273         }
6274         if (filter->flags & RTE_ETHTYPE_FLAGS_DROP) {
6275                 PMD_DRV_LOG(ERR, "drop option is unsupported.");
6276                 return -EINVAL;
6277         }
6278
6279         ret = ixgbe_ethertype_filter_lookup(filter_info, filter->ether_type);
6280         if (ret >= 0 && add) {
6281                 PMD_DRV_LOG(ERR, "ethertype (0x%04x) filter exists.",
6282                             filter->ether_type);
6283                 return -EEXIST;
6284         }
6285         if (ret < 0 && !add) {
6286                 PMD_DRV_LOG(ERR, "ethertype (0x%04x) filter doesn't exist.",
6287                             filter->ether_type);
6288                 return -ENOENT;
6289         }
6290
6291         if (add) {
6292                 ret = ixgbe_ethertype_filter_insert(filter_info,
6293                         filter->ether_type);
6294                 if (ret < 0) {
6295                         PMD_DRV_LOG(ERR, "ethertype filters are full.");
6296                         return -ENOSYS;
6297                 }
6298                 etqf = IXGBE_ETQF_FILTER_EN;
6299                 etqf |= (uint32_t)filter->ether_type;
6300                 etqs |= (uint32_t)((filter->queue <<
6301                                     IXGBE_ETQS_RX_QUEUE_SHIFT) &
6302                                     IXGBE_ETQS_RX_QUEUE);
6303                 etqs |= IXGBE_ETQS_QUEUE_EN;
6304         } else {
6305                 ret = ixgbe_ethertype_filter_remove(filter_info, (uint8_t)ret);
6306                 if (ret < 0)
6307                         return -ENOSYS;
6308         }
6309         IXGBE_WRITE_REG(hw, IXGBE_ETQF(ret), etqf);
6310         IXGBE_WRITE_REG(hw, IXGBE_ETQS(ret), etqs);
6311         IXGBE_WRITE_FLUSH(hw);
6312
6313         return 0;
6314 }
6315
6316 static int
6317 ixgbe_get_ethertype_filter(struct rte_eth_dev *dev,
6318                         struct rte_eth_ethertype_filter *filter)
6319 {
6320         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
6321         struct ixgbe_filter_info *filter_info =
6322                 IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
6323         uint32_t etqf, etqs;
6324         int ret;
6325
6326         ret = ixgbe_ethertype_filter_lookup(filter_info, filter->ether_type);
6327         if (ret < 0) {
6328                 PMD_DRV_LOG(ERR, "ethertype (0x%04x) filter doesn't exist.",
6329                             filter->ether_type);
6330                 return -ENOENT;
6331         }
6332
6333         etqf = IXGBE_READ_REG(hw, IXGBE_ETQF(ret));
6334         if (etqf & IXGBE_ETQF_FILTER_EN) {
6335                 etqs = IXGBE_READ_REG(hw, IXGBE_ETQS(ret));
6336                 filter->ether_type = etqf & IXGBE_ETQF_ETHERTYPE;
6337                 filter->flags = 0;
6338                 filter->queue = (etqs & IXGBE_ETQS_RX_QUEUE) >>
6339                                IXGBE_ETQS_RX_QUEUE_SHIFT;
6340                 return 0;
6341         }
6342         return -ENOENT;
6343 }
6344
6345 /*
6346  * ixgbe_ethertype_filter_handle - Handle operations for ethertype filter.
6347  * @dev: pointer to rte_eth_dev structure
6348  * @filter_op:operation will be taken.
6349  * @arg: a pointer to specific structure corresponding to the filter_op
6350  */
6351 static int
6352 ixgbe_ethertype_filter_handle(struct rte_eth_dev *dev,
6353                                 enum rte_filter_op filter_op,
6354                                 void *arg)
6355 {
6356         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
6357         int ret;
6358
6359         MAC_TYPE_FILTER_SUP(hw->mac.type);
6360
6361         if (filter_op == RTE_ETH_FILTER_NOP)
6362                 return 0;
6363
6364         if (arg == NULL) {
6365                 PMD_DRV_LOG(ERR, "arg shouldn't be NULL for operation %u.",
6366                             filter_op);
6367                 return -EINVAL;
6368         }
6369
6370         switch (filter_op) {
6371         case RTE_ETH_FILTER_ADD:
6372                 ret = ixgbe_add_del_ethertype_filter(dev,
6373                         (struct rte_eth_ethertype_filter *)arg,
6374                         TRUE);
6375                 break;
6376         case RTE_ETH_FILTER_DELETE:
6377                 ret = ixgbe_add_del_ethertype_filter(dev,
6378                         (struct rte_eth_ethertype_filter *)arg,
6379                         FALSE);
6380                 break;
6381         case RTE_ETH_FILTER_GET:
6382                 ret = ixgbe_get_ethertype_filter(dev,
6383                         (struct rte_eth_ethertype_filter *)arg);
6384                 break;
6385         default:
6386                 PMD_DRV_LOG(ERR, "unsupported operation %u.", filter_op);
6387                 ret = -EINVAL;
6388                 break;
6389         }
6390         return ret;
6391 }
6392
6393 static int
6394 ixgbe_dev_filter_ctrl(struct rte_eth_dev *dev,
6395                      enum rte_filter_type filter_type,
6396                      enum rte_filter_op filter_op,
6397                      void *arg)
6398 {
6399         int ret = -EINVAL;
6400
6401         switch (filter_type) {
6402         case RTE_ETH_FILTER_NTUPLE:
6403                 ret = ixgbe_ntuple_filter_handle(dev, filter_op, arg);
6404                 break;
6405         case RTE_ETH_FILTER_ETHERTYPE:
6406                 ret = ixgbe_ethertype_filter_handle(dev, filter_op, arg);
6407                 break;
6408         case RTE_ETH_FILTER_SYN:
6409                 ret = ixgbe_syn_filter_handle(dev, filter_op, arg);
6410                 break;
6411         case RTE_ETH_FILTER_FDIR:
6412                 ret = ixgbe_fdir_ctrl_func(dev, filter_op, arg);
6413                 break;
6414         case RTE_ETH_FILTER_L2_TUNNEL:
6415                 ret = ixgbe_dev_l2_tunnel_filter_handle(dev, filter_op, arg);
6416                 break;
6417         default:
6418                 PMD_DRV_LOG(WARNING, "Filter type (%d) not supported",
6419                                                         filter_type);
6420                 break;
6421         }
6422
6423         return ret;
6424 }
6425
6426 static u8 *
6427 ixgbe_dev_addr_list_itr(__attribute__((unused)) struct ixgbe_hw *hw,
6428                         u8 **mc_addr_ptr, u32 *vmdq)
6429 {
6430         u8 *mc_addr;
6431
6432         *vmdq = 0;
6433         mc_addr = *mc_addr_ptr;
6434         *mc_addr_ptr = (mc_addr + sizeof(struct ether_addr));
6435         return mc_addr;
6436 }
6437
6438 static int
6439 ixgbe_dev_set_mc_addr_list(struct rte_eth_dev *dev,
6440                           struct ether_addr *mc_addr_set,
6441                           uint32_t nb_mc_addr)
6442 {
6443         struct ixgbe_hw *hw;
6444         u8 *mc_addr_list;
6445
6446         hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
6447         mc_addr_list = (u8 *)mc_addr_set;
6448         return ixgbe_update_mc_addr_list(hw, mc_addr_list, nb_mc_addr,
6449                                          ixgbe_dev_addr_list_itr, TRUE);
6450 }
6451
6452 static uint64_t
6453 ixgbe_read_systime_cyclecounter(struct rte_eth_dev *dev)
6454 {
6455         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
6456         uint64_t systime_cycles;
6457
6458         switch (hw->mac.type) {
6459         case ixgbe_mac_X550:
6460         case ixgbe_mac_X550EM_x:
6461         case ixgbe_mac_X550EM_a:
6462                 /* SYSTIMEL stores ns and SYSTIMEH stores seconds. */
6463                 systime_cycles = (uint64_t)IXGBE_READ_REG(hw, IXGBE_SYSTIML);
6464                 systime_cycles += (uint64_t)IXGBE_READ_REG(hw, IXGBE_SYSTIMH)
6465                                 * NSEC_PER_SEC;
6466                 break;
6467         default:
6468                 systime_cycles = (uint64_t)IXGBE_READ_REG(hw, IXGBE_SYSTIML);
6469                 systime_cycles |= (uint64_t)IXGBE_READ_REG(hw, IXGBE_SYSTIMH)
6470                                 << 32;
6471         }
6472
6473         return systime_cycles;
6474 }
6475
6476 static uint64_t
6477 ixgbe_read_rx_tstamp_cyclecounter(struct rte_eth_dev *dev)
6478 {
6479         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
6480         uint64_t rx_tstamp_cycles;
6481
6482         switch (hw->mac.type) {
6483         case ixgbe_mac_X550:
6484         case ixgbe_mac_X550EM_x:
6485         case ixgbe_mac_X550EM_a:
6486                 /* RXSTMPL stores ns and RXSTMPH stores seconds. */
6487                 rx_tstamp_cycles = (uint64_t)IXGBE_READ_REG(hw, IXGBE_RXSTMPL);
6488                 rx_tstamp_cycles += (uint64_t)IXGBE_READ_REG(hw, IXGBE_RXSTMPH)
6489                                 * NSEC_PER_SEC;
6490                 break;
6491         default:
6492                 /* RXSTMPL stores ns and RXSTMPH stores seconds. */
6493                 rx_tstamp_cycles = (uint64_t)IXGBE_READ_REG(hw, IXGBE_RXSTMPL);
6494                 rx_tstamp_cycles |= (uint64_t)IXGBE_READ_REG(hw, IXGBE_RXSTMPH)
6495                                 << 32;
6496         }
6497
6498         return rx_tstamp_cycles;
6499 }
6500
6501 static uint64_t
6502 ixgbe_read_tx_tstamp_cyclecounter(struct rte_eth_dev *dev)
6503 {
6504         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
6505         uint64_t tx_tstamp_cycles;
6506
6507         switch (hw->mac.type) {
6508         case ixgbe_mac_X550:
6509         case ixgbe_mac_X550EM_x:
6510         case ixgbe_mac_X550EM_a:
6511                 /* TXSTMPL stores ns and TXSTMPH stores seconds. */
6512                 tx_tstamp_cycles = (uint64_t)IXGBE_READ_REG(hw, IXGBE_TXSTMPL);
6513                 tx_tstamp_cycles += (uint64_t)IXGBE_READ_REG(hw, IXGBE_TXSTMPH)
6514                                 * NSEC_PER_SEC;
6515                 break;
6516         default:
6517                 /* TXSTMPL stores ns and TXSTMPH stores seconds. */
6518                 tx_tstamp_cycles = (uint64_t)IXGBE_READ_REG(hw, IXGBE_TXSTMPL);
6519                 tx_tstamp_cycles |= (uint64_t)IXGBE_READ_REG(hw, IXGBE_TXSTMPH)
6520                                 << 32;
6521         }
6522
6523         return tx_tstamp_cycles;
6524 }
6525
6526 static void
6527 ixgbe_start_timecounters(struct rte_eth_dev *dev)
6528 {
6529         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
6530         struct ixgbe_adapter *adapter =
6531                 (struct ixgbe_adapter *)dev->data->dev_private;
6532         struct rte_eth_link link;
6533         uint32_t incval = 0;
6534         uint32_t shift = 0;
6535
6536         /* Get current link speed. */
6537         memset(&link, 0, sizeof(link));
6538         ixgbe_dev_link_update(dev, 1);
6539         rte_ixgbe_dev_atomic_read_link_status(dev, &link);
6540
6541         switch (link.link_speed) {
6542         case ETH_SPEED_NUM_100M:
6543                 incval = IXGBE_INCVAL_100;
6544                 shift = IXGBE_INCVAL_SHIFT_100;
6545                 break;
6546         case ETH_SPEED_NUM_1G:
6547                 incval = IXGBE_INCVAL_1GB;
6548                 shift = IXGBE_INCVAL_SHIFT_1GB;
6549                 break;
6550         case ETH_SPEED_NUM_10G:
6551         default:
6552                 incval = IXGBE_INCVAL_10GB;
6553                 shift = IXGBE_INCVAL_SHIFT_10GB;
6554                 break;
6555         }
6556
6557         switch (hw->mac.type) {
6558         case ixgbe_mac_X550:
6559         case ixgbe_mac_X550EM_x:
6560         case ixgbe_mac_X550EM_a:
6561                 /* Independent of link speed. */
6562                 incval = 1;
6563                 /* Cycles read will be interpreted as ns. */
6564                 shift = 0;
6565                 /* Fall-through */
6566         case ixgbe_mac_X540:
6567                 IXGBE_WRITE_REG(hw, IXGBE_TIMINCA, incval);
6568                 break;
6569         case ixgbe_mac_82599EB:
6570                 incval >>= IXGBE_INCVAL_SHIFT_82599;
6571                 shift -= IXGBE_INCVAL_SHIFT_82599;
6572                 IXGBE_WRITE_REG(hw, IXGBE_TIMINCA,
6573                                 (1 << IXGBE_INCPER_SHIFT_82599) | incval);
6574                 break;
6575         default:
6576                 /* Not supported. */
6577                 return;
6578         }
6579
6580         memset(&adapter->systime_tc, 0, sizeof(struct rte_timecounter));
6581         memset(&adapter->rx_tstamp_tc, 0, sizeof(struct rte_timecounter));
6582         memset(&adapter->tx_tstamp_tc, 0, sizeof(struct rte_timecounter));
6583
6584         adapter->systime_tc.cc_mask = IXGBE_CYCLECOUNTER_MASK;
6585         adapter->systime_tc.cc_shift = shift;
6586         adapter->systime_tc.nsec_mask = (1ULL << shift) - 1;
6587
6588         adapter->rx_tstamp_tc.cc_mask = IXGBE_CYCLECOUNTER_MASK;
6589         adapter->rx_tstamp_tc.cc_shift = shift;
6590         adapter->rx_tstamp_tc.nsec_mask = (1ULL << shift) - 1;
6591
6592         adapter->tx_tstamp_tc.cc_mask = IXGBE_CYCLECOUNTER_MASK;
6593         adapter->tx_tstamp_tc.cc_shift = shift;
6594         adapter->tx_tstamp_tc.nsec_mask = (1ULL << shift) - 1;
6595 }
6596
6597 static int
6598 ixgbe_timesync_adjust_time(struct rte_eth_dev *dev, int64_t delta)
6599 {
6600         struct ixgbe_adapter *adapter =
6601                         (struct ixgbe_adapter *)dev->data->dev_private;
6602
6603         adapter->systime_tc.nsec += delta;
6604         adapter->rx_tstamp_tc.nsec += delta;
6605         adapter->tx_tstamp_tc.nsec += delta;
6606
6607         return 0;
6608 }
6609
6610 static int
6611 ixgbe_timesync_write_time(struct rte_eth_dev *dev, const struct timespec *ts)
6612 {
6613         uint64_t ns;
6614         struct ixgbe_adapter *adapter =
6615                         (struct ixgbe_adapter *)dev->data->dev_private;
6616
6617         ns = rte_timespec_to_ns(ts);
6618         /* Set the timecounters to a new value. */
6619         adapter->systime_tc.nsec = ns;
6620         adapter->rx_tstamp_tc.nsec = ns;
6621         adapter->tx_tstamp_tc.nsec = ns;
6622
6623         return 0;
6624 }
6625
6626 static int
6627 ixgbe_timesync_read_time(struct rte_eth_dev *dev, struct timespec *ts)
6628 {
6629         uint64_t ns, systime_cycles;
6630         struct ixgbe_adapter *adapter =
6631                         (struct ixgbe_adapter *)dev->data->dev_private;
6632
6633         systime_cycles = ixgbe_read_systime_cyclecounter(dev);
6634         ns = rte_timecounter_update(&adapter->systime_tc, systime_cycles);
6635         *ts = rte_ns_to_timespec(ns);
6636
6637         return 0;
6638 }
6639
6640 static int
6641 ixgbe_timesync_enable(struct rte_eth_dev *dev)
6642 {
6643         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
6644         uint32_t tsync_ctl;
6645         uint32_t tsauxc;
6646
6647         /* Stop the timesync system time. */
6648         IXGBE_WRITE_REG(hw, IXGBE_TIMINCA, 0x0);
6649         /* Reset the timesync system time value. */
6650         IXGBE_WRITE_REG(hw, IXGBE_SYSTIML, 0x0);
6651         IXGBE_WRITE_REG(hw, IXGBE_SYSTIMH, 0x0);
6652
6653         /* Enable system time for platforms where it isn't on by default. */
6654         tsauxc = IXGBE_READ_REG(hw, IXGBE_TSAUXC);
6655         tsauxc &= ~IXGBE_TSAUXC_DISABLE_SYSTIME;
6656         IXGBE_WRITE_REG(hw, IXGBE_TSAUXC, tsauxc);
6657
6658         ixgbe_start_timecounters(dev);
6659
6660         /* Enable L2 filtering of IEEE1588/802.1AS Ethernet frame types. */
6661         IXGBE_WRITE_REG(hw, IXGBE_ETQF(IXGBE_ETQF_FILTER_1588),
6662                         (ETHER_TYPE_1588 |
6663                          IXGBE_ETQF_FILTER_EN |
6664                          IXGBE_ETQF_1588));
6665
6666         /* Enable timestamping of received PTP packets. */
6667         tsync_ctl = IXGBE_READ_REG(hw, IXGBE_TSYNCRXCTL);
6668         tsync_ctl |= IXGBE_TSYNCRXCTL_ENABLED;
6669         IXGBE_WRITE_REG(hw, IXGBE_TSYNCRXCTL, tsync_ctl);
6670
6671         /* Enable timestamping of transmitted PTP packets. */
6672         tsync_ctl = IXGBE_READ_REG(hw, IXGBE_TSYNCTXCTL);
6673         tsync_ctl |= IXGBE_TSYNCTXCTL_ENABLED;
6674         IXGBE_WRITE_REG(hw, IXGBE_TSYNCTXCTL, tsync_ctl);
6675
6676         IXGBE_WRITE_FLUSH(hw);
6677
6678         return 0;
6679 }
6680
6681 static int
6682 ixgbe_timesync_disable(struct rte_eth_dev *dev)
6683 {
6684         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
6685         uint32_t tsync_ctl;
6686
6687         /* Disable timestamping of transmitted PTP packets. */
6688         tsync_ctl = IXGBE_READ_REG(hw, IXGBE_TSYNCTXCTL);
6689         tsync_ctl &= ~IXGBE_TSYNCTXCTL_ENABLED;
6690         IXGBE_WRITE_REG(hw, IXGBE_TSYNCTXCTL, tsync_ctl);
6691
6692         /* Disable timestamping of received PTP packets. */
6693         tsync_ctl = IXGBE_READ_REG(hw, IXGBE_TSYNCRXCTL);
6694         tsync_ctl &= ~IXGBE_TSYNCRXCTL_ENABLED;
6695         IXGBE_WRITE_REG(hw, IXGBE_TSYNCRXCTL, tsync_ctl);
6696
6697         /* Disable L2 filtering of IEEE1588/802.1AS Ethernet frame types. */
6698         IXGBE_WRITE_REG(hw, IXGBE_ETQF(IXGBE_ETQF_FILTER_1588), 0);
6699
6700         /* Stop incrementating the System Time registers. */
6701         IXGBE_WRITE_REG(hw, IXGBE_TIMINCA, 0);
6702
6703         return 0;
6704 }
6705
6706 static int
6707 ixgbe_timesync_read_rx_timestamp(struct rte_eth_dev *dev,
6708                                  struct timespec *timestamp,
6709                                  uint32_t flags __rte_unused)
6710 {
6711         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
6712         struct ixgbe_adapter *adapter =
6713                 (struct ixgbe_adapter *)dev->data->dev_private;
6714         uint32_t tsync_rxctl;
6715         uint64_t rx_tstamp_cycles;
6716         uint64_t ns;
6717
6718         tsync_rxctl = IXGBE_READ_REG(hw, IXGBE_TSYNCRXCTL);
6719         if ((tsync_rxctl & IXGBE_TSYNCRXCTL_VALID) == 0)
6720                 return -EINVAL;
6721
6722         rx_tstamp_cycles = ixgbe_read_rx_tstamp_cyclecounter(dev);
6723         ns = rte_timecounter_update(&adapter->rx_tstamp_tc, rx_tstamp_cycles);
6724         *timestamp = rte_ns_to_timespec(ns);
6725
6726         return  0;
6727 }
6728
6729 static int
6730 ixgbe_timesync_read_tx_timestamp(struct rte_eth_dev *dev,
6731                                  struct timespec *timestamp)
6732 {
6733         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
6734         struct ixgbe_adapter *adapter =
6735                 (struct ixgbe_adapter *)dev->data->dev_private;
6736         uint32_t tsync_txctl;
6737         uint64_t tx_tstamp_cycles;
6738         uint64_t ns;
6739
6740         tsync_txctl = IXGBE_READ_REG(hw, IXGBE_TSYNCTXCTL);
6741         if ((tsync_txctl & IXGBE_TSYNCTXCTL_VALID) == 0)
6742                 return -EINVAL;
6743
6744         tx_tstamp_cycles = ixgbe_read_tx_tstamp_cyclecounter(dev);
6745         ns = rte_timecounter_update(&adapter->tx_tstamp_tc, tx_tstamp_cycles);
6746         *timestamp = rte_ns_to_timespec(ns);
6747
6748         return 0;
6749 }
6750
6751 static int
6752 ixgbe_get_reg_length(struct rte_eth_dev *dev)
6753 {
6754         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
6755         int count = 0;
6756         int g_ind = 0;
6757         const struct reg_info *reg_group;
6758         const struct reg_info **reg_set = (hw->mac.type == ixgbe_mac_82598EB) ?
6759                                     ixgbe_regs_mac_82598EB : ixgbe_regs_others;
6760
6761         while ((reg_group = reg_set[g_ind++]))
6762                 count += ixgbe_regs_group_count(reg_group);
6763
6764         return count;
6765 }
6766
6767 static int
6768 ixgbevf_get_reg_length(struct rte_eth_dev *dev __rte_unused)
6769 {
6770         int count = 0;
6771         int g_ind = 0;
6772         const struct reg_info *reg_group;
6773
6774         while ((reg_group = ixgbevf_regs[g_ind++]))
6775                 count += ixgbe_regs_group_count(reg_group);
6776
6777         return count;
6778 }
6779
6780 static int
6781 ixgbe_get_regs(struct rte_eth_dev *dev,
6782               struct rte_dev_reg_info *regs)
6783 {
6784         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
6785         uint32_t *data = regs->data;
6786         int g_ind = 0;
6787         int count = 0;
6788         const struct reg_info *reg_group;
6789         const struct reg_info **reg_set = (hw->mac.type == ixgbe_mac_82598EB) ?
6790                                     ixgbe_regs_mac_82598EB : ixgbe_regs_others;
6791
6792         if (data == NULL) {
6793                 regs->length = ixgbe_get_reg_length(dev);
6794                 regs->width = sizeof(uint32_t);
6795                 return 0;
6796         }
6797
6798         /* Support only full register dump */
6799         if ((regs->length == 0) ||
6800             (regs->length == (uint32_t)ixgbe_get_reg_length(dev))) {
6801                 regs->version = hw->mac.type << 24 | hw->revision_id << 16 |
6802                         hw->device_id;
6803                 while ((reg_group = reg_set[g_ind++]))
6804                         count += ixgbe_read_regs_group(dev, &data[count],
6805                                 reg_group);
6806                 return 0;
6807         }
6808
6809         return -ENOTSUP;
6810 }
6811
6812 static int
6813 ixgbevf_get_regs(struct rte_eth_dev *dev,
6814                 struct rte_dev_reg_info *regs)
6815 {
6816         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
6817         uint32_t *data = regs->data;
6818         int g_ind = 0;
6819         int count = 0;
6820         const struct reg_info *reg_group;
6821
6822         if (data == NULL) {
6823                 regs->length = ixgbevf_get_reg_length(dev);
6824                 regs->width = sizeof(uint32_t);
6825                 return 0;
6826         }
6827
6828         /* Support only full register dump */
6829         if ((regs->length == 0) ||
6830             (regs->length == (uint32_t)ixgbevf_get_reg_length(dev))) {
6831                 regs->version = hw->mac.type << 24 | hw->revision_id << 16 |
6832                         hw->device_id;
6833                 while ((reg_group = ixgbevf_regs[g_ind++]))
6834                         count += ixgbe_read_regs_group(dev, &data[count],
6835                                                       reg_group);
6836                 return 0;
6837         }
6838
6839         return -ENOTSUP;
6840 }
6841
6842 static int
6843 ixgbe_get_eeprom_length(struct rte_eth_dev *dev)
6844 {
6845         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
6846
6847         /* Return unit is byte count */
6848         return hw->eeprom.word_size * 2;
6849 }
6850
6851 static int
6852 ixgbe_get_eeprom(struct rte_eth_dev *dev,
6853                 struct rte_dev_eeprom_info *in_eeprom)
6854 {
6855         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
6856         struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
6857         uint16_t *data = in_eeprom->data;
6858         int first, length;
6859
6860         first = in_eeprom->offset >> 1;
6861         length = in_eeprom->length >> 1;
6862         if ((first > hw->eeprom.word_size) ||
6863             ((first + length) > hw->eeprom.word_size))
6864                 return -EINVAL;
6865
6866         in_eeprom->magic = hw->vendor_id | (hw->device_id << 16);
6867
6868         return eeprom->ops.read_buffer(hw, first, length, data);
6869 }
6870
6871 static int
6872 ixgbe_set_eeprom(struct rte_eth_dev *dev,
6873                 struct rte_dev_eeprom_info *in_eeprom)
6874 {
6875         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
6876         struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
6877         uint16_t *data = in_eeprom->data;
6878         int first, length;
6879
6880         first = in_eeprom->offset >> 1;
6881         length = in_eeprom->length >> 1;
6882         if ((first > hw->eeprom.word_size) ||
6883             ((first + length) > hw->eeprom.word_size))
6884                 return -EINVAL;
6885
6886         in_eeprom->magic = hw->vendor_id | (hw->device_id << 16);
6887
6888         return eeprom->ops.write_buffer(hw,  first, length, data);
6889 }
6890
6891 uint16_t
6892 ixgbe_reta_size_get(enum ixgbe_mac_type mac_type) {
6893         switch (mac_type) {
6894         case ixgbe_mac_X550:
6895         case ixgbe_mac_X550EM_x:
6896         case ixgbe_mac_X550EM_a:
6897                 return ETH_RSS_RETA_SIZE_512;
6898         case ixgbe_mac_X550_vf:
6899         case ixgbe_mac_X550EM_x_vf:
6900         case ixgbe_mac_X550EM_a_vf:
6901                 return ETH_RSS_RETA_SIZE_64;
6902         default:
6903                 return ETH_RSS_RETA_SIZE_128;
6904         }
6905 }
6906
6907 uint32_t
6908 ixgbe_reta_reg_get(enum ixgbe_mac_type mac_type, uint16_t reta_idx) {
6909         switch (mac_type) {
6910         case ixgbe_mac_X550:
6911         case ixgbe_mac_X550EM_x:
6912         case ixgbe_mac_X550EM_a:
6913                 if (reta_idx < ETH_RSS_RETA_SIZE_128)
6914                         return IXGBE_RETA(reta_idx >> 2);
6915                 else
6916                         return IXGBE_ERETA((reta_idx - ETH_RSS_RETA_SIZE_128) >> 2);
6917         case ixgbe_mac_X550_vf:
6918         case ixgbe_mac_X550EM_x_vf:
6919         case ixgbe_mac_X550EM_a_vf:
6920                 return IXGBE_VFRETA(reta_idx >> 2);
6921         default:
6922                 return IXGBE_RETA(reta_idx >> 2);
6923         }
6924 }
6925
6926 uint32_t
6927 ixgbe_mrqc_reg_get(enum ixgbe_mac_type mac_type) {
6928         switch (mac_type) {
6929         case ixgbe_mac_X550_vf:
6930         case ixgbe_mac_X550EM_x_vf:
6931         case ixgbe_mac_X550EM_a_vf:
6932                 return IXGBE_VFMRQC;
6933         default:
6934                 return IXGBE_MRQC;
6935         }
6936 }
6937
6938 uint32_t
6939 ixgbe_rssrk_reg_get(enum ixgbe_mac_type mac_type, uint8_t i) {
6940         switch (mac_type) {
6941         case ixgbe_mac_X550_vf:
6942         case ixgbe_mac_X550EM_x_vf:
6943         case ixgbe_mac_X550EM_a_vf:
6944                 return IXGBE_VFRSSRK(i);
6945         default:
6946                 return IXGBE_RSSRK(i);
6947         }
6948 }
6949
6950 bool
6951 ixgbe_rss_update_sp(enum ixgbe_mac_type mac_type) {
6952         switch (mac_type) {
6953         case ixgbe_mac_82599_vf:
6954         case ixgbe_mac_X540_vf:
6955                 return 0;
6956         default:
6957                 return 1;
6958         }
6959 }
6960
6961 static int
6962 ixgbe_dev_get_dcb_info(struct rte_eth_dev *dev,
6963                         struct rte_eth_dcb_info *dcb_info)
6964 {
6965         struct ixgbe_dcb_config *dcb_config =
6966                         IXGBE_DEV_PRIVATE_TO_DCB_CFG(dev->data->dev_private);
6967         struct ixgbe_dcb_tc_config *tc;
6968         uint8_t i, j;
6969
6970         if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_DCB_FLAG)
6971                 dcb_info->nb_tcs = dcb_config->num_tcs.pg_tcs;
6972         else
6973                 dcb_info->nb_tcs = 1;
6974
6975         if (dcb_config->vt_mode) { /* vt is enabled*/
6976                 struct rte_eth_vmdq_dcb_conf *vmdq_rx_conf =
6977                                 &dev->data->dev_conf.rx_adv_conf.vmdq_dcb_conf;
6978                 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++)
6979                         dcb_info->prio_tc[i] = vmdq_rx_conf->dcb_tc[i];
6980                 for (i = 0; i < vmdq_rx_conf->nb_queue_pools; i++) {
6981                         for (j = 0; j < dcb_info->nb_tcs; j++) {
6982                                 dcb_info->tc_queue.tc_rxq[i][j].base =
6983                                                 i * dcb_info->nb_tcs + j;
6984                                 dcb_info->tc_queue.tc_rxq[i][j].nb_queue = 1;
6985                                 dcb_info->tc_queue.tc_txq[i][j].base =
6986                                                 i * dcb_info->nb_tcs + j;
6987                                 dcb_info->tc_queue.tc_txq[i][j].nb_queue = 1;
6988                         }
6989                 }
6990         } else { /* vt is disabled*/
6991                 struct rte_eth_dcb_rx_conf *rx_conf =
6992                                 &dev->data->dev_conf.rx_adv_conf.dcb_rx_conf;
6993                 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++)
6994                         dcb_info->prio_tc[i] = rx_conf->dcb_tc[i];
6995                 if (dcb_info->nb_tcs == ETH_4_TCS) {
6996                         for (i = 0; i < dcb_info->nb_tcs; i++) {
6997                                 dcb_info->tc_queue.tc_rxq[0][i].base = i * 32;
6998                                 dcb_info->tc_queue.tc_rxq[0][i].nb_queue = 16;
6999                         }
7000                         dcb_info->tc_queue.tc_txq[0][0].base = 0;
7001                         dcb_info->tc_queue.tc_txq[0][1].base = 64;
7002                         dcb_info->tc_queue.tc_txq[0][2].base = 96;
7003                         dcb_info->tc_queue.tc_txq[0][3].base = 112;
7004                         dcb_info->tc_queue.tc_txq[0][0].nb_queue = 64;
7005                         dcb_info->tc_queue.tc_txq[0][1].nb_queue = 32;
7006                         dcb_info->tc_queue.tc_txq[0][2].nb_queue = 16;
7007                         dcb_info->tc_queue.tc_txq[0][3].nb_queue = 16;
7008                 } else if (dcb_info->nb_tcs == ETH_8_TCS) {
7009                         for (i = 0; i < dcb_info->nb_tcs; i++) {
7010                                 dcb_info->tc_queue.tc_rxq[0][i].base = i * 16;
7011                                 dcb_info->tc_queue.tc_rxq[0][i].nb_queue = 16;
7012                         }
7013                         dcb_info->tc_queue.tc_txq[0][0].base = 0;
7014                         dcb_info->tc_queue.tc_txq[0][1].base = 32;
7015                         dcb_info->tc_queue.tc_txq[0][2].base = 64;
7016                         dcb_info->tc_queue.tc_txq[0][3].base = 80;
7017                         dcb_info->tc_queue.tc_txq[0][4].base = 96;
7018                         dcb_info->tc_queue.tc_txq[0][5].base = 104;
7019                         dcb_info->tc_queue.tc_txq[0][6].base = 112;
7020                         dcb_info->tc_queue.tc_txq[0][7].base = 120;
7021                         dcb_info->tc_queue.tc_txq[0][0].nb_queue = 32;
7022                         dcb_info->tc_queue.tc_txq[0][1].nb_queue = 32;
7023                         dcb_info->tc_queue.tc_txq[0][2].nb_queue = 16;
7024                         dcb_info->tc_queue.tc_txq[0][3].nb_queue = 16;
7025                         dcb_info->tc_queue.tc_txq[0][4].nb_queue = 8;
7026                         dcb_info->tc_queue.tc_txq[0][5].nb_queue = 8;
7027                         dcb_info->tc_queue.tc_txq[0][6].nb_queue = 8;
7028                         dcb_info->tc_queue.tc_txq[0][7].nb_queue = 8;
7029                 }
7030         }
7031         for (i = 0; i < dcb_info->nb_tcs; i++) {
7032                 tc = &dcb_config->tc_config[i];
7033                 dcb_info->tc_bws[i] = tc->path[IXGBE_DCB_TX_CONFIG].bwg_percent;
7034         }
7035         return 0;
7036 }
7037
7038 /* Update e-tag ether type */
7039 static int
7040 ixgbe_update_e_tag_eth_type(struct ixgbe_hw *hw,
7041                             uint16_t ether_type)
7042 {
7043         uint32_t etag_etype;
7044
7045         if (hw->mac.type != ixgbe_mac_X550 &&
7046             hw->mac.type != ixgbe_mac_X550EM_x &&
7047             hw->mac.type != ixgbe_mac_X550EM_a) {
7048                 return -ENOTSUP;
7049         }
7050
7051         etag_etype = IXGBE_READ_REG(hw, IXGBE_ETAG_ETYPE);
7052         etag_etype &= ~IXGBE_ETAG_ETYPE_MASK;
7053         etag_etype |= ether_type;
7054         IXGBE_WRITE_REG(hw, IXGBE_ETAG_ETYPE, etag_etype);
7055         IXGBE_WRITE_FLUSH(hw);
7056
7057         return 0;
7058 }
7059
7060 /* Config l2 tunnel ether type */
7061 static int
7062 ixgbe_dev_l2_tunnel_eth_type_conf(struct rte_eth_dev *dev,
7063                                   struct rte_eth_l2_tunnel_conf *l2_tunnel)
7064 {
7065         int ret = 0;
7066         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
7067
7068         if (l2_tunnel == NULL)
7069                 return -EINVAL;
7070
7071         switch (l2_tunnel->l2_tunnel_type) {
7072         case RTE_L2_TUNNEL_TYPE_E_TAG:
7073                 ret = ixgbe_update_e_tag_eth_type(hw, l2_tunnel->ether_type);
7074                 break;
7075         default:
7076                 PMD_DRV_LOG(ERR, "Invalid tunnel type");
7077                 ret = -EINVAL;
7078                 break;
7079         }
7080
7081         return ret;
7082 }
7083
7084 /* Enable e-tag tunnel */
7085 static int
7086 ixgbe_e_tag_enable(struct ixgbe_hw *hw)
7087 {
7088         uint32_t etag_etype;
7089
7090         if (hw->mac.type != ixgbe_mac_X550 &&
7091             hw->mac.type != ixgbe_mac_X550EM_x &&
7092             hw->mac.type != ixgbe_mac_X550EM_a) {
7093                 return -ENOTSUP;
7094         }
7095
7096         etag_etype = IXGBE_READ_REG(hw, IXGBE_ETAG_ETYPE);
7097         etag_etype |= IXGBE_ETAG_ETYPE_VALID;
7098         IXGBE_WRITE_REG(hw, IXGBE_ETAG_ETYPE, etag_etype);
7099         IXGBE_WRITE_FLUSH(hw);
7100
7101         return 0;
7102 }
7103
7104 /* Enable l2 tunnel */
7105 static int
7106 ixgbe_dev_l2_tunnel_enable(struct rte_eth_dev *dev,
7107                            enum rte_eth_tunnel_type l2_tunnel_type)
7108 {
7109         int ret = 0;
7110         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
7111
7112         switch (l2_tunnel_type) {
7113         case RTE_L2_TUNNEL_TYPE_E_TAG:
7114                 ret = ixgbe_e_tag_enable(hw);
7115                 break;
7116         default:
7117                 PMD_DRV_LOG(ERR, "Invalid tunnel type");
7118                 ret = -EINVAL;
7119                 break;
7120         }
7121
7122         return ret;
7123 }
7124
7125 /* Disable e-tag tunnel */
7126 static int
7127 ixgbe_e_tag_disable(struct ixgbe_hw *hw)
7128 {
7129         uint32_t etag_etype;
7130
7131         if (hw->mac.type != ixgbe_mac_X550 &&
7132             hw->mac.type != ixgbe_mac_X550EM_x &&
7133             hw->mac.type != ixgbe_mac_X550EM_a) {
7134                 return -ENOTSUP;
7135         }
7136
7137         etag_etype = IXGBE_READ_REG(hw, IXGBE_ETAG_ETYPE);
7138         etag_etype &= ~IXGBE_ETAG_ETYPE_VALID;
7139         IXGBE_WRITE_REG(hw, IXGBE_ETAG_ETYPE, etag_etype);
7140         IXGBE_WRITE_FLUSH(hw);
7141
7142         return 0;
7143 }
7144
7145 /* Disable l2 tunnel */
7146 static int
7147 ixgbe_dev_l2_tunnel_disable(struct rte_eth_dev *dev,
7148                             enum rte_eth_tunnel_type l2_tunnel_type)
7149 {
7150         int ret = 0;
7151         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
7152
7153         switch (l2_tunnel_type) {
7154         case RTE_L2_TUNNEL_TYPE_E_TAG:
7155                 ret = ixgbe_e_tag_disable(hw);
7156                 break;
7157         default:
7158                 PMD_DRV_LOG(ERR, "Invalid tunnel type");
7159                 ret = -EINVAL;
7160                 break;
7161         }
7162
7163         return ret;
7164 }
7165
7166 static int
7167 ixgbe_e_tag_filter_del(struct rte_eth_dev *dev,
7168                        struct rte_eth_l2_tunnel_conf *l2_tunnel)
7169 {
7170         int ret = 0;
7171         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
7172         uint32_t i, rar_entries;
7173         uint32_t rar_low, rar_high;
7174
7175         if (hw->mac.type != ixgbe_mac_X550 &&
7176             hw->mac.type != ixgbe_mac_X550EM_x &&
7177             hw->mac.type != ixgbe_mac_X550EM_a) {
7178                 return -ENOTSUP;
7179         }
7180
7181         rar_entries = ixgbe_get_num_rx_addrs(hw);
7182
7183         for (i = 1; i < rar_entries; i++) {
7184                 rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(i));
7185                 rar_low  = IXGBE_READ_REG(hw, IXGBE_RAL(i));
7186                 if ((rar_high & IXGBE_RAH_AV) &&
7187                     (rar_high & IXGBE_RAH_ADTYPE) &&
7188                     ((rar_low & IXGBE_RAL_ETAG_FILTER_MASK) ==
7189                      l2_tunnel->tunnel_id)) {
7190                         IXGBE_WRITE_REG(hw, IXGBE_RAL(i), 0);
7191                         IXGBE_WRITE_REG(hw, IXGBE_RAH(i), 0);
7192
7193                         ixgbe_clear_vmdq(hw, i, IXGBE_CLEAR_VMDQ_ALL);
7194
7195                         return ret;
7196                 }
7197         }
7198
7199         return ret;
7200 }
7201
7202 static int
7203 ixgbe_e_tag_filter_add(struct rte_eth_dev *dev,
7204                        struct rte_eth_l2_tunnel_conf *l2_tunnel)
7205 {
7206         int ret = 0;
7207         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
7208         uint32_t i, rar_entries;
7209         uint32_t rar_low, rar_high;
7210
7211         if (hw->mac.type != ixgbe_mac_X550 &&
7212             hw->mac.type != ixgbe_mac_X550EM_x &&
7213             hw->mac.type != ixgbe_mac_X550EM_a) {
7214                 return -ENOTSUP;
7215         }
7216
7217         /* One entry for one tunnel. Try to remove potential existing entry. */
7218         ixgbe_e_tag_filter_del(dev, l2_tunnel);
7219
7220         rar_entries = ixgbe_get_num_rx_addrs(hw);
7221
7222         for (i = 1; i < rar_entries; i++) {
7223                 rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(i));
7224                 if (rar_high & IXGBE_RAH_AV) {
7225                         continue;
7226                 } else {
7227                         ixgbe_set_vmdq(hw, i, l2_tunnel->pool);
7228                         rar_high = IXGBE_RAH_AV | IXGBE_RAH_ADTYPE;
7229                         rar_low = l2_tunnel->tunnel_id;
7230
7231                         IXGBE_WRITE_REG(hw, IXGBE_RAL(i), rar_low);
7232                         IXGBE_WRITE_REG(hw, IXGBE_RAH(i), rar_high);
7233
7234                         return ret;
7235                 }
7236         }
7237
7238         PMD_INIT_LOG(NOTICE, "The table of E-tag forwarding rule is full."
7239                      " Please remove a rule before adding a new one.");
7240         return -EINVAL;
7241 }
7242
7243 /* Add l2 tunnel filter */
7244 static int
7245 ixgbe_dev_l2_tunnel_filter_add(struct rte_eth_dev *dev,
7246                                struct rte_eth_l2_tunnel_conf *l2_tunnel)
7247 {
7248         int ret = 0;
7249
7250         switch (l2_tunnel->l2_tunnel_type) {
7251         case RTE_L2_TUNNEL_TYPE_E_TAG:
7252                 ret = ixgbe_e_tag_filter_add(dev, l2_tunnel);
7253                 break;
7254         default:
7255                 PMD_DRV_LOG(ERR, "Invalid tunnel type");
7256                 ret = -EINVAL;
7257                 break;
7258         }
7259
7260         return ret;
7261 }
7262
7263 /* Delete l2 tunnel filter */
7264 static int
7265 ixgbe_dev_l2_tunnel_filter_del(struct rte_eth_dev *dev,
7266                                struct rte_eth_l2_tunnel_conf *l2_tunnel)
7267 {
7268         int ret = 0;
7269
7270         switch (l2_tunnel->l2_tunnel_type) {
7271         case RTE_L2_TUNNEL_TYPE_E_TAG:
7272                 ret = ixgbe_e_tag_filter_del(dev, l2_tunnel);
7273                 break;
7274         default:
7275                 PMD_DRV_LOG(ERR, "Invalid tunnel type");
7276                 ret = -EINVAL;
7277                 break;
7278         }
7279
7280         return ret;
7281 }
7282
7283 /**
7284  * ixgbe_dev_l2_tunnel_filter_handle - Handle operations for l2 tunnel filter.
7285  * @dev: pointer to rte_eth_dev structure
7286  * @filter_op:operation will be taken.
7287  * @arg: a pointer to specific structure corresponding to the filter_op
7288  */
7289 static int
7290 ixgbe_dev_l2_tunnel_filter_handle(struct rte_eth_dev *dev,
7291                                   enum rte_filter_op filter_op,
7292                                   void *arg)
7293 {
7294         int ret = 0;
7295
7296         if (filter_op == RTE_ETH_FILTER_NOP)
7297                 return 0;
7298
7299         if (arg == NULL) {
7300                 PMD_DRV_LOG(ERR, "arg shouldn't be NULL for operation %u.",
7301                             filter_op);
7302                 return -EINVAL;
7303         }
7304
7305         switch (filter_op) {
7306         case RTE_ETH_FILTER_ADD:
7307                 ret = ixgbe_dev_l2_tunnel_filter_add
7308                         (dev,
7309                          (struct rte_eth_l2_tunnel_conf *)arg);
7310                 break;
7311         case RTE_ETH_FILTER_DELETE:
7312                 ret = ixgbe_dev_l2_tunnel_filter_del
7313                         (dev,
7314                          (struct rte_eth_l2_tunnel_conf *)arg);
7315                 break;
7316         default:
7317                 PMD_DRV_LOG(ERR, "unsupported operation %u.", filter_op);
7318                 ret = -EINVAL;
7319                 break;
7320         }
7321         return ret;
7322 }
7323
7324 static int
7325 ixgbe_e_tag_forwarding_en_dis(struct rte_eth_dev *dev, bool en)
7326 {
7327         int ret = 0;
7328         uint32_t ctrl;
7329         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
7330
7331         if (hw->mac.type != ixgbe_mac_X550 &&
7332             hw->mac.type != ixgbe_mac_X550EM_x &&
7333             hw->mac.type != ixgbe_mac_X550EM_a) {
7334                 return -ENOTSUP;
7335         }
7336
7337         ctrl = IXGBE_READ_REG(hw, IXGBE_VT_CTL);
7338         ctrl &= ~IXGBE_VT_CTL_POOLING_MODE_MASK;
7339         if (en)
7340                 ctrl |= IXGBE_VT_CTL_POOLING_MODE_ETAG;
7341         IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, ctrl);
7342
7343         return ret;
7344 }
7345
7346 /* Enable l2 tunnel forwarding */
7347 static int
7348 ixgbe_dev_l2_tunnel_forwarding_enable
7349         (struct rte_eth_dev *dev,
7350          enum rte_eth_tunnel_type l2_tunnel_type)
7351 {
7352         int ret = 0;
7353
7354         switch (l2_tunnel_type) {
7355         case RTE_L2_TUNNEL_TYPE_E_TAG:
7356                 ret = ixgbe_e_tag_forwarding_en_dis(dev, 1);
7357                 break;
7358         default:
7359                 PMD_DRV_LOG(ERR, "Invalid tunnel type");
7360                 ret = -EINVAL;
7361                 break;
7362         }
7363
7364         return ret;
7365 }
7366
7367 /* Disable l2 tunnel forwarding */
7368 static int
7369 ixgbe_dev_l2_tunnel_forwarding_disable
7370         (struct rte_eth_dev *dev,
7371          enum rte_eth_tunnel_type l2_tunnel_type)
7372 {
7373         int ret = 0;
7374
7375         switch (l2_tunnel_type) {
7376         case RTE_L2_TUNNEL_TYPE_E_TAG:
7377                 ret = ixgbe_e_tag_forwarding_en_dis(dev, 0);
7378                 break;
7379         default:
7380                 PMD_DRV_LOG(ERR, "Invalid tunnel type");
7381                 ret = -EINVAL;
7382                 break;
7383         }
7384
7385         return ret;
7386 }
7387
7388 static int
7389 ixgbe_e_tag_insertion_en_dis(struct rte_eth_dev *dev,
7390                              struct rte_eth_l2_tunnel_conf *l2_tunnel,
7391                              bool en)
7392 {
7393         struct rte_pci_device *pci_dev = IXGBE_DEV_TO_PCI(dev);
7394         int ret = 0;
7395         uint32_t vmtir, vmvir;
7396         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
7397
7398         if (l2_tunnel->vf_id >= pci_dev->max_vfs) {
7399                 PMD_DRV_LOG(ERR,
7400                             "VF id %u should be less than %u",
7401                             l2_tunnel->vf_id,
7402                             pci_dev->max_vfs);
7403                 return -EINVAL;
7404         }
7405
7406         if (hw->mac.type != ixgbe_mac_X550 &&
7407             hw->mac.type != ixgbe_mac_X550EM_x &&
7408             hw->mac.type != ixgbe_mac_X550EM_a) {
7409                 return -ENOTSUP;
7410         }
7411
7412         if (en)
7413                 vmtir = l2_tunnel->tunnel_id;
7414         else
7415                 vmtir = 0;
7416
7417         IXGBE_WRITE_REG(hw, IXGBE_VMTIR(l2_tunnel->vf_id), vmtir);
7418
7419         vmvir = IXGBE_READ_REG(hw, IXGBE_VMVIR(l2_tunnel->vf_id));
7420         vmvir &= ~IXGBE_VMVIR_TAGA_MASK;
7421         if (en)
7422                 vmvir |= IXGBE_VMVIR_TAGA_ETAG_INSERT;
7423         IXGBE_WRITE_REG(hw, IXGBE_VMVIR(l2_tunnel->vf_id), vmvir);
7424
7425         return ret;
7426 }
7427
7428 /* Enable l2 tunnel tag insertion */
7429 static int
7430 ixgbe_dev_l2_tunnel_insertion_enable(struct rte_eth_dev *dev,
7431                                      struct rte_eth_l2_tunnel_conf *l2_tunnel)
7432 {
7433         int ret = 0;
7434
7435         switch (l2_tunnel->l2_tunnel_type) {
7436         case RTE_L2_TUNNEL_TYPE_E_TAG:
7437                 ret = ixgbe_e_tag_insertion_en_dis(dev, l2_tunnel, 1);
7438                 break;
7439         default:
7440                 PMD_DRV_LOG(ERR, "Invalid tunnel type");
7441                 ret = -EINVAL;
7442                 break;
7443         }
7444
7445         return ret;
7446 }
7447
7448 /* Disable l2 tunnel tag insertion */
7449 static int
7450 ixgbe_dev_l2_tunnel_insertion_disable
7451         (struct rte_eth_dev *dev,
7452          struct rte_eth_l2_tunnel_conf *l2_tunnel)
7453 {
7454         int ret = 0;
7455
7456         switch (l2_tunnel->l2_tunnel_type) {
7457         case RTE_L2_TUNNEL_TYPE_E_TAG:
7458                 ret = ixgbe_e_tag_insertion_en_dis(dev, l2_tunnel, 0);
7459                 break;
7460         default:
7461                 PMD_DRV_LOG(ERR, "Invalid tunnel type");
7462                 ret = -EINVAL;
7463                 break;
7464         }
7465
7466         return ret;
7467 }
7468
7469 static int
7470 ixgbe_e_tag_stripping_en_dis(struct rte_eth_dev *dev,
7471                              bool en)
7472 {
7473         int ret = 0;
7474         uint32_t qde;
7475         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
7476
7477         if (hw->mac.type != ixgbe_mac_X550 &&
7478             hw->mac.type != ixgbe_mac_X550EM_x &&
7479             hw->mac.type != ixgbe_mac_X550EM_a) {
7480                 return -ENOTSUP;
7481         }
7482
7483         qde = IXGBE_READ_REG(hw, IXGBE_QDE);
7484         if (en)
7485                 qde |= IXGBE_QDE_STRIP_TAG;
7486         else
7487                 qde &= ~IXGBE_QDE_STRIP_TAG;
7488         qde &= ~IXGBE_QDE_READ;
7489         qde |= IXGBE_QDE_WRITE;
7490         IXGBE_WRITE_REG(hw, IXGBE_QDE, qde);
7491
7492         return ret;
7493 }
7494
7495 /* Enable l2 tunnel tag stripping */
7496 static int
7497 ixgbe_dev_l2_tunnel_stripping_enable
7498         (struct rte_eth_dev *dev,
7499          enum rte_eth_tunnel_type l2_tunnel_type)
7500 {
7501         int ret = 0;
7502
7503         switch (l2_tunnel_type) {
7504         case RTE_L2_TUNNEL_TYPE_E_TAG:
7505                 ret = ixgbe_e_tag_stripping_en_dis(dev, 1);
7506                 break;
7507         default:
7508                 PMD_DRV_LOG(ERR, "Invalid tunnel type");
7509                 ret = -EINVAL;
7510                 break;
7511         }
7512
7513         return ret;
7514 }
7515
7516 /* Disable l2 tunnel tag stripping */
7517 static int
7518 ixgbe_dev_l2_tunnel_stripping_disable
7519         (struct rte_eth_dev *dev,
7520          enum rte_eth_tunnel_type l2_tunnel_type)
7521 {
7522         int ret = 0;
7523
7524         switch (l2_tunnel_type) {
7525         case RTE_L2_TUNNEL_TYPE_E_TAG:
7526                 ret = ixgbe_e_tag_stripping_en_dis(dev, 0);
7527                 break;
7528         default:
7529                 PMD_DRV_LOG(ERR, "Invalid tunnel type");
7530                 ret = -EINVAL;
7531                 break;
7532         }
7533
7534         return ret;
7535 }
7536
7537 /* Enable/disable l2 tunnel offload functions */
7538 static int
7539 ixgbe_dev_l2_tunnel_offload_set
7540         (struct rte_eth_dev *dev,
7541          struct rte_eth_l2_tunnel_conf *l2_tunnel,
7542          uint32_t mask,
7543          uint8_t en)
7544 {
7545         int ret = 0;
7546
7547         if (l2_tunnel == NULL)
7548                 return -EINVAL;
7549
7550         ret = -EINVAL;
7551         if (mask & ETH_L2_TUNNEL_ENABLE_MASK) {
7552                 if (en)
7553                         ret = ixgbe_dev_l2_tunnel_enable(
7554                                 dev,
7555                                 l2_tunnel->l2_tunnel_type);
7556                 else
7557                         ret = ixgbe_dev_l2_tunnel_disable(
7558                                 dev,
7559                                 l2_tunnel->l2_tunnel_type);
7560         }
7561
7562         if (mask & ETH_L2_TUNNEL_INSERTION_MASK) {
7563                 if (en)
7564                         ret = ixgbe_dev_l2_tunnel_insertion_enable(
7565                                 dev,
7566                                 l2_tunnel);
7567                 else
7568                         ret = ixgbe_dev_l2_tunnel_insertion_disable(
7569                                 dev,
7570                                 l2_tunnel);
7571         }
7572
7573         if (mask & ETH_L2_TUNNEL_STRIPPING_MASK) {
7574                 if (en)
7575                         ret = ixgbe_dev_l2_tunnel_stripping_enable(
7576                                 dev,
7577                                 l2_tunnel->l2_tunnel_type);
7578                 else
7579                         ret = ixgbe_dev_l2_tunnel_stripping_disable(
7580                                 dev,
7581                                 l2_tunnel->l2_tunnel_type);
7582         }
7583
7584         if (mask & ETH_L2_TUNNEL_FORWARDING_MASK) {
7585                 if (en)
7586                         ret = ixgbe_dev_l2_tunnel_forwarding_enable(
7587                                 dev,
7588                                 l2_tunnel->l2_tunnel_type);
7589                 else
7590                         ret = ixgbe_dev_l2_tunnel_forwarding_disable(
7591                                 dev,
7592                                 l2_tunnel->l2_tunnel_type);
7593         }
7594
7595         return ret;
7596 }
7597
7598 static int
7599 ixgbe_update_vxlan_port(struct ixgbe_hw *hw,
7600                         uint16_t port)
7601 {
7602         IXGBE_WRITE_REG(hw, IXGBE_VXLANCTRL, port);
7603         IXGBE_WRITE_FLUSH(hw);
7604
7605         return 0;
7606 }
7607
7608 /* There's only one register for VxLAN UDP port.
7609  * So, we cannot add several ports. Will update it.
7610  */
7611 static int
7612 ixgbe_add_vxlan_port(struct ixgbe_hw *hw,
7613                      uint16_t port)
7614 {
7615         if (port == 0) {
7616                 PMD_DRV_LOG(ERR, "Add VxLAN port 0 is not allowed.");
7617                 return -EINVAL;
7618         }
7619
7620         return ixgbe_update_vxlan_port(hw, port);
7621 }
7622
7623 /* We cannot delete the VxLAN port. For there's a register for VxLAN
7624  * UDP port, it must have a value.
7625  * So, will reset it to the original value 0.
7626  */
7627 static int
7628 ixgbe_del_vxlan_port(struct ixgbe_hw *hw,
7629                      uint16_t port)
7630 {
7631         uint16_t cur_port;
7632
7633         cur_port = (uint16_t)IXGBE_READ_REG(hw, IXGBE_VXLANCTRL);
7634
7635         if (cur_port != port) {
7636                 PMD_DRV_LOG(ERR, "Port %u does not exist.", port);
7637                 return -EINVAL;
7638         }
7639
7640         return ixgbe_update_vxlan_port(hw, 0);
7641 }
7642
7643 /* Add UDP tunneling port */
7644 static int
7645 ixgbe_dev_udp_tunnel_port_add(struct rte_eth_dev *dev,
7646                               struct rte_eth_udp_tunnel *udp_tunnel)
7647 {
7648         int ret = 0;
7649         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
7650
7651         if (hw->mac.type != ixgbe_mac_X550 &&
7652             hw->mac.type != ixgbe_mac_X550EM_x &&
7653             hw->mac.type != ixgbe_mac_X550EM_a) {
7654                 return -ENOTSUP;
7655         }
7656
7657         if (udp_tunnel == NULL)
7658                 return -EINVAL;
7659
7660         switch (udp_tunnel->prot_type) {
7661         case RTE_TUNNEL_TYPE_VXLAN:
7662                 ret = ixgbe_add_vxlan_port(hw, udp_tunnel->udp_port);
7663                 break;
7664
7665         case RTE_TUNNEL_TYPE_GENEVE:
7666         case RTE_TUNNEL_TYPE_TEREDO:
7667                 PMD_DRV_LOG(ERR, "Tunnel type is not supported now.");
7668                 ret = -EINVAL;
7669                 break;
7670
7671         default:
7672                 PMD_DRV_LOG(ERR, "Invalid tunnel type");
7673                 ret = -EINVAL;
7674                 break;
7675         }
7676
7677         return ret;
7678 }
7679
7680 /* Remove UDP tunneling port */
7681 static int
7682 ixgbe_dev_udp_tunnel_port_del(struct rte_eth_dev *dev,
7683                               struct rte_eth_udp_tunnel *udp_tunnel)
7684 {
7685         int ret = 0;
7686         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
7687
7688         if (hw->mac.type != ixgbe_mac_X550 &&
7689             hw->mac.type != ixgbe_mac_X550EM_x &&
7690             hw->mac.type != ixgbe_mac_X550EM_a) {
7691                 return -ENOTSUP;
7692         }
7693
7694         if (udp_tunnel == NULL)
7695                 return -EINVAL;
7696
7697         switch (udp_tunnel->prot_type) {
7698         case RTE_TUNNEL_TYPE_VXLAN:
7699                 ret = ixgbe_del_vxlan_port(hw, udp_tunnel->udp_port);
7700                 break;
7701         case RTE_TUNNEL_TYPE_GENEVE:
7702         case RTE_TUNNEL_TYPE_TEREDO:
7703                 PMD_DRV_LOG(ERR, "Tunnel type is not supported now.");
7704                 ret = -EINVAL;
7705                 break;
7706         default:
7707                 PMD_DRV_LOG(ERR, "Invalid tunnel type");
7708                 ret = -EINVAL;
7709                 break;
7710         }
7711
7712         return ret;
7713 }
7714
7715 static void
7716 ixgbevf_dev_allmulticast_enable(struct rte_eth_dev *dev)
7717 {
7718         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
7719
7720         hw->mac.ops.update_xcast_mode(hw, IXGBEVF_XCAST_MODE_ALLMULTI);
7721 }
7722
7723 static void
7724 ixgbevf_dev_allmulticast_disable(struct rte_eth_dev *dev)
7725 {
7726         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
7727
7728         hw->mac.ops.update_xcast_mode(hw, IXGBEVF_XCAST_MODE_NONE);
7729 }
7730
7731 static void ixgbevf_mbx_process(struct rte_eth_dev *dev)
7732 {
7733         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
7734         u32 in_msg = 0;
7735
7736         if (ixgbe_read_mbx(hw, &in_msg, 1, 0))
7737                 return;
7738
7739         /* PF reset VF event */
7740         if (in_msg == IXGBE_PF_CONTROL_MSG)
7741                 _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_RESET, NULL);
7742 }
7743
7744 static int
7745 ixgbevf_dev_interrupt_get_status(struct rte_eth_dev *dev)
7746 {
7747         uint32_t eicr;
7748         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
7749         struct ixgbe_interrupt *intr =
7750                 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
7751         ixgbevf_intr_disable(hw);
7752
7753         /* read-on-clear nic registers here */
7754         eicr = IXGBE_READ_REG(hw, IXGBE_VTEICR);
7755         intr->flags = 0;
7756
7757         /* only one misc vector supported - mailbox */
7758         eicr &= IXGBE_VTEICR_MASK;
7759         if (eicr == IXGBE_MISC_VEC_ID)
7760                 intr->flags |= IXGBE_FLAG_MAILBOX;
7761
7762         return 0;
7763 }
7764
7765 static int
7766 ixgbevf_dev_interrupt_action(struct rte_eth_dev *dev)
7767 {
7768         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
7769         struct ixgbe_interrupt *intr =
7770                 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
7771
7772         if (intr->flags & IXGBE_FLAG_MAILBOX) {
7773                 ixgbevf_mbx_process(dev);
7774                 intr->flags &= ~IXGBE_FLAG_MAILBOX;
7775         }
7776
7777         ixgbevf_intr_enable(hw);
7778
7779         return 0;
7780 }
7781
7782 static void
7783 ixgbevf_dev_interrupt_handler(__rte_unused struct rte_intr_handle *handle,
7784                               void *param)
7785 {
7786         struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
7787
7788         ixgbevf_dev_interrupt_get_status(dev);
7789         ixgbevf_dev_interrupt_action(dev);
7790 }
7791
7792 /**
7793  *  ixgbe_disable_sec_tx_path_generic - Stops the transmit data path
7794  *  @hw: pointer to hardware structure
7795  *
7796  *  Stops the transmit data path and waits for the HW to internally empty
7797  *  the Tx security block
7798  **/
7799 int ixgbe_disable_sec_tx_path_generic(struct ixgbe_hw *hw)
7800 {
7801 #define IXGBE_MAX_SECTX_POLL 40
7802
7803         int i;
7804         int sectxreg;
7805
7806         sectxreg = IXGBE_READ_REG(hw, IXGBE_SECTXCTRL);
7807         sectxreg |= IXGBE_SECTXCTRL_TX_DIS;
7808         IXGBE_WRITE_REG(hw, IXGBE_SECTXCTRL, sectxreg);
7809         for (i = 0; i < IXGBE_MAX_SECTX_POLL; i++) {
7810                 sectxreg = IXGBE_READ_REG(hw, IXGBE_SECTXSTAT);
7811                 if (sectxreg & IXGBE_SECTXSTAT_SECTX_RDY)
7812                         break;
7813                 /* Use interrupt-safe sleep just in case */
7814                 usec_delay(1000);
7815         }
7816
7817         /* For informational purposes only */
7818         if (i >= IXGBE_MAX_SECTX_POLL)
7819                 PMD_DRV_LOG(DEBUG, "Tx unit being enabled before security "
7820                          "path fully disabled.  Continuing with init.\n");
7821
7822         return IXGBE_SUCCESS;
7823 }
7824
7825 /**
7826  *  ixgbe_enable_sec_tx_path_generic - Enables the transmit data path
7827  *  @hw: pointer to hardware structure
7828  *
7829  *  Enables the transmit data path.
7830  **/
7831 int ixgbe_enable_sec_tx_path_generic(struct ixgbe_hw *hw)
7832 {
7833         uint32_t sectxreg;
7834
7835         sectxreg = IXGBE_READ_REG(hw, IXGBE_SECTXCTRL);
7836         sectxreg &= ~IXGBE_SECTXCTRL_TX_DIS;
7837         IXGBE_WRITE_REG(hw, IXGBE_SECTXCTRL, sectxreg);
7838         IXGBE_WRITE_FLUSH(hw);
7839
7840         return IXGBE_SUCCESS;
7841 }
7842
7843 int
7844 rte_pmd_ixgbe_macsec_enable(uint8_t port, uint8_t en, uint8_t rp)
7845 {
7846         struct ixgbe_hw *hw;
7847         struct rte_eth_dev *dev;
7848         uint32_t ctrl;
7849
7850         RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
7851
7852         dev = &rte_eth_devices[port];
7853         hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
7854
7855         /* Stop the data paths */
7856         if (ixgbe_disable_sec_rx_path(hw) != IXGBE_SUCCESS)
7857                 return -ENOTSUP;
7858         /*
7859          * Workaround:
7860          * As no ixgbe_disable_sec_rx_path equivalent is
7861          * implemented for tx in the base code, and we are
7862          * not allowed to modify the base code in DPDK, so
7863          * just call the hand-written one directly for now.
7864          * The hardware support has been checked by
7865          * ixgbe_disable_sec_rx_path().
7866          */
7867         ixgbe_disable_sec_tx_path_generic(hw);
7868
7869         /* Enable Ethernet CRC (required by MACsec offload) */
7870         ctrl = IXGBE_READ_REG(hw, IXGBE_HLREG0);
7871         ctrl |= IXGBE_HLREG0_TXCRCEN | IXGBE_HLREG0_RXCRCSTRP;
7872         IXGBE_WRITE_REG(hw, IXGBE_HLREG0, ctrl);
7873
7874         /* Enable the TX and RX crypto engines */
7875         ctrl = IXGBE_READ_REG(hw, IXGBE_SECTXCTRL);
7876         ctrl &= ~IXGBE_SECTXCTRL_SECTX_DIS;
7877         IXGBE_WRITE_REG(hw, IXGBE_SECTXCTRL, ctrl);
7878
7879         ctrl = IXGBE_READ_REG(hw, IXGBE_SECRXCTRL);
7880         ctrl &= ~IXGBE_SECRXCTRL_SECRX_DIS;
7881         IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, ctrl);
7882
7883         ctrl = IXGBE_READ_REG(hw, IXGBE_SECTXMINIFG);
7884         ctrl &= ~IXGBE_SECTX_MINSECIFG_MASK;
7885         ctrl |= 0x3;
7886         IXGBE_WRITE_REG(hw, IXGBE_SECTXMINIFG, ctrl);
7887
7888         /* Enable SA lookup */
7889         ctrl = IXGBE_READ_REG(hw, IXGBE_LSECTXCTRL);
7890         ctrl &= ~IXGBE_LSECTXCTRL_EN_MASK;
7891         ctrl |= en ? IXGBE_LSECTXCTRL_AUTH_ENCRYPT :
7892                      IXGBE_LSECTXCTRL_AUTH;
7893         ctrl |= IXGBE_LSECTXCTRL_AISCI;
7894         ctrl &= ~IXGBE_LSECTXCTRL_PNTHRSH_MASK;
7895         ctrl |= IXGBE_MACSEC_PNTHRSH & IXGBE_LSECTXCTRL_PNTHRSH_MASK;
7896         IXGBE_WRITE_REG(hw, IXGBE_LSECTXCTRL, ctrl);
7897
7898         ctrl = IXGBE_READ_REG(hw, IXGBE_LSECRXCTRL);
7899         ctrl &= ~IXGBE_LSECRXCTRL_EN_MASK;
7900         ctrl |= IXGBE_LSECRXCTRL_STRICT << IXGBE_LSECRXCTRL_EN_SHIFT;
7901         ctrl &= ~IXGBE_LSECRXCTRL_PLSH;
7902         if (rp)
7903                 ctrl |= IXGBE_LSECRXCTRL_RP;
7904         else
7905                 ctrl &= ~IXGBE_LSECRXCTRL_RP;
7906         IXGBE_WRITE_REG(hw, IXGBE_LSECRXCTRL, ctrl);
7907
7908         /* Start the data paths */
7909         ixgbe_enable_sec_rx_path(hw);
7910         /*
7911          * Workaround:
7912          * As no ixgbe_enable_sec_rx_path equivalent is
7913          * implemented for tx in the base code, and we are
7914          * not allowed to modify the base code in DPDK, so
7915          * just call the hand-written one directly for now.
7916          */
7917         ixgbe_enable_sec_tx_path_generic(hw);
7918
7919         return 0;
7920 }
7921
7922 int
7923 rte_pmd_ixgbe_macsec_disable(uint8_t port)
7924 {
7925         struct ixgbe_hw *hw;
7926         struct rte_eth_dev *dev;
7927         uint32_t ctrl;
7928
7929         RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
7930
7931         dev = &rte_eth_devices[port];
7932         hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
7933
7934         /* Stop the data paths */
7935         if (ixgbe_disable_sec_rx_path(hw) != IXGBE_SUCCESS)
7936                 return -ENOTSUP;
7937         /*
7938          * Workaround:
7939          * As no ixgbe_disable_sec_rx_path equivalent is
7940          * implemented for tx in the base code, and we are
7941          * not allowed to modify the base code in DPDK, so
7942          * just call the hand-written one directly for now.
7943          * The hardware support has been checked by
7944          * ixgbe_disable_sec_rx_path().
7945          */
7946         ixgbe_disable_sec_tx_path_generic(hw);
7947
7948         /* Disable the TX and RX crypto engines */
7949         ctrl = IXGBE_READ_REG(hw, IXGBE_SECTXCTRL);
7950         ctrl |= IXGBE_SECTXCTRL_SECTX_DIS;
7951         IXGBE_WRITE_REG(hw, IXGBE_SECTXCTRL, ctrl);
7952
7953         ctrl = IXGBE_READ_REG(hw, IXGBE_SECRXCTRL);
7954         ctrl |= IXGBE_SECRXCTRL_SECRX_DIS;
7955         IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, ctrl);
7956
7957         /* Disable SA lookup */
7958         ctrl = IXGBE_READ_REG(hw, IXGBE_LSECTXCTRL);
7959         ctrl &= ~IXGBE_LSECTXCTRL_EN_MASK;
7960         ctrl |= IXGBE_LSECTXCTRL_DISABLE;
7961         IXGBE_WRITE_REG(hw, IXGBE_LSECTXCTRL, ctrl);
7962
7963         ctrl = IXGBE_READ_REG(hw, IXGBE_LSECRXCTRL);
7964         ctrl &= ~IXGBE_LSECRXCTRL_EN_MASK;
7965         ctrl |= IXGBE_LSECRXCTRL_DISABLE << IXGBE_LSECRXCTRL_EN_SHIFT;
7966         IXGBE_WRITE_REG(hw, IXGBE_LSECRXCTRL, ctrl);
7967
7968         /* Start the data paths */
7969         ixgbe_enable_sec_rx_path(hw);
7970         /*
7971          * Workaround:
7972          * As no ixgbe_enable_sec_rx_path equivalent is
7973          * implemented for tx in the base code, and we are
7974          * not allowed to modify the base code in DPDK, so
7975          * just call the hand-written one directly for now.
7976          */
7977         ixgbe_enable_sec_tx_path_generic(hw);
7978
7979         return 0;
7980 }
7981
7982 int
7983 rte_pmd_ixgbe_macsec_config_txsc(uint8_t port, uint8_t *mac)
7984 {
7985         struct ixgbe_hw *hw;
7986         struct rte_eth_dev *dev;
7987         uint32_t ctrl;
7988
7989         RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
7990
7991         dev = &rte_eth_devices[port];
7992         hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
7993
7994         ctrl = mac[0] | (mac[1] << 8) | (mac[2] << 16) | (mac[3] << 24);
7995         IXGBE_WRITE_REG(hw, IXGBE_LSECTXSCL, ctrl);
7996
7997         ctrl = mac[4] | (mac[5] << 8);
7998         IXGBE_WRITE_REG(hw, IXGBE_LSECTXSCH, ctrl);
7999
8000         return 0;
8001 }
8002
8003 int
8004 rte_pmd_ixgbe_macsec_config_rxsc(uint8_t port, uint8_t *mac, uint16_t pi)
8005 {
8006         struct ixgbe_hw *hw;
8007         struct rte_eth_dev *dev;
8008         uint32_t ctrl;
8009
8010         RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
8011
8012         dev = &rte_eth_devices[port];
8013         hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
8014
8015         ctrl = mac[0] | (mac[1] << 8) | (mac[2] << 16) | (mac[3] << 24);
8016         IXGBE_WRITE_REG(hw, IXGBE_LSECRXSCL, ctrl);
8017
8018         pi = rte_cpu_to_be_16(pi);
8019         ctrl = mac[4] | (mac[5] << 8) | (pi << 16);
8020         IXGBE_WRITE_REG(hw, IXGBE_LSECRXSCH, ctrl);
8021
8022         return 0;
8023 }
8024
8025 int
8026 rte_pmd_ixgbe_macsec_select_txsa(uint8_t port, uint8_t idx, uint8_t an,
8027                                  uint32_t pn, uint8_t *key)
8028 {
8029         struct ixgbe_hw *hw;
8030         struct rte_eth_dev *dev;
8031         uint32_t ctrl, i;
8032
8033         RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
8034
8035         dev = &rte_eth_devices[port];
8036         hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
8037
8038         if (idx != 0 && idx != 1)
8039                 return -EINVAL;
8040
8041         if (an >= 4)
8042                 return -EINVAL;
8043
8044         hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
8045
8046         /* Set the PN and key */
8047         pn = rte_cpu_to_be_32(pn);
8048         if (idx == 0) {
8049                 IXGBE_WRITE_REG(hw, IXGBE_LSECTXPN0, pn);
8050
8051                 for (i = 0; i < 4; i++) {
8052                         ctrl = (key[i * 4 + 0] <<  0) |
8053                                (key[i * 4 + 1] <<  8) |
8054                                (key[i * 4 + 2] << 16) |
8055                                (key[i * 4 + 3] << 24);
8056                         IXGBE_WRITE_REG(hw, IXGBE_LSECTXKEY0(i), ctrl);
8057                 }
8058         } else {
8059                 IXGBE_WRITE_REG(hw, IXGBE_LSECTXPN1, pn);
8060
8061                 for (i = 0; i < 4; i++) {
8062                         ctrl = (key[i * 4 + 0] <<  0) |
8063                                (key[i * 4 + 1] <<  8) |
8064                                (key[i * 4 + 2] << 16) |
8065                                (key[i * 4 + 3] << 24);
8066                         IXGBE_WRITE_REG(hw, IXGBE_LSECTXKEY1(i), ctrl);
8067                 }
8068         }
8069
8070         /* Set AN and select the SA */
8071         ctrl = (an << idx * 2) | (idx << 4);
8072         IXGBE_WRITE_REG(hw, IXGBE_LSECTXSA, ctrl);
8073
8074         return 0;
8075 }
8076
8077 int
8078 rte_pmd_ixgbe_macsec_select_rxsa(uint8_t port, uint8_t idx, uint8_t an,
8079                                  uint32_t pn, uint8_t *key)
8080 {
8081         struct ixgbe_hw *hw;
8082         struct rte_eth_dev *dev;
8083         uint32_t ctrl, i;
8084
8085         RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
8086
8087         dev = &rte_eth_devices[port];
8088         hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
8089
8090         if (idx != 0 && idx != 1)
8091                 return -EINVAL;
8092
8093         if (an >= 4)
8094                 return -EINVAL;
8095
8096         /* Set the PN */
8097         pn = rte_cpu_to_be_32(pn);
8098         IXGBE_WRITE_REG(hw, IXGBE_LSECRXPN(idx), pn);
8099
8100         /* Set the key */
8101         for (i = 0; i < 4; i++) {
8102                 ctrl = (key[i * 4 + 0] <<  0) |
8103                        (key[i * 4 + 1] <<  8) |
8104                        (key[i * 4 + 2] << 16) |
8105                        (key[i * 4 + 3] << 24);
8106                 IXGBE_WRITE_REG(hw, IXGBE_LSECRXKEY(idx, i), ctrl);
8107         }
8108
8109         /* Set the AN and validate the SA */
8110         ctrl = an | (1 << 2);
8111         IXGBE_WRITE_REG(hw, IXGBE_LSECRXSA(idx), ctrl);
8112
8113         return 0;
8114 }
8115
8116 RTE_PMD_REGISTER_PCI(net_ixgbe, rte_ixgbe_pmd.pci_drv);
8117 RTE_PMD_REGISTER_PCI_TABLE(net_ixgbe, pci_id_ixgbe_map);
8118 RTE_PMD_REGISTER_KMOD_DEP(net_ixgbe, "* igb_uio | uio_pci_generic | vfio");
8119 RTE_PMD_REGISTER_PCI(net_ixgbe_vf, rte_ixgbevf_pmd.pci_drv);
8120 RTE_PMD_REGISTER_PCI_TABLE(net_ixgbe_vf, pci_id_ixgbevf_map);
8121 RTE_PMD_REGISTER_KMOD_DEP(net_ixgbe_vf, "* igb_uio | vfio");